diff --git "a/5733.jsonl" "b/5733.jsonl" new file mode 100644--- /dev/null +++ "b/5733.jsonl" @@ -0,0 +1,671 @@ +{"seq_id":"442107856","text":"# Written by Philip M. White \n# Copyright 2009.\n# Licensed under the BSD license.\n\nfrom CreditCard import *\n\nclass CreditCardAmexBlueCash(CreditCard):\n\tdef __init__(self, annual_everyday=None):\n\t\tself.name = \"American Express Blue Cash\"\n\t\tself.url = \"http://www201.americanexpress.com/getthecard/learn-about/BlueCash\"\n\t\tself.annual_fee = Money(0)\n\t\tself.reward_types = set(['cash'])\n\n\t\tself.changepoint = Money(6500*100)\n\t\tif annual_everyday is not None:\n\t\t\tself.annual_everyday = annual_everyday\n\n\tdef getAnnualRewardsEarned(self, s):\n\t\tannual_therest = s - self.annual_everyday\n\t\tr = min(self.annual_everyday, self.changepoint)*0.01 + max(0, self.annual_everyday-self.changepoint)*0.05\n\t\tr += min(annual_therest, self.changepoint)*0.005 + max(0, annual_therest-self.changepoint)*0.015\n\t\treturn r\n","sub_path":"cards/CreditCardAmexBlueCash.py","file_name":"CreditCardAmexBlueCash.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"299943433","text":"#!/usr/bin/env python3\n\nimport sys\n#import fileinput\n\n\ndef mistake_type(tag, new_tags, line, columns):\n matched = []\n\n #no tag\n if tag == \"\":\n return(\"no tag\", None)\n \n #no match\n if new_tags == []:\n return (\"no match\", None)\n\n #prefix\n l = len(tag)\n for t in new_tags:\n if len(t) > l and t[:l] == tag:\n matched.append(t)\n if matched != []:\n return (\"prefix\", matched)\n \n #order\n tag_list = list(tag)\n tag_list.sort()\n for t in new_tags:\n t_list = list(t)\n t_list.sort()\n if tag_list == t_list:\n return (\"order\", t)\n\n #different \n return (\"different\", None)\n\n\n#for line in fileinput.input():\nfor line in sys.stdin:\n columns = line.rstrip('\\r\\n').split(\"\\t\")\n if len(columns) < 4:\n continue\n tag = columns[2]\n if len(columns[3]) == 1:\n new_tags = columns[3]\n else:\n new_tags = columns[3].split(\":\")[2::2]\n if tag not in new_tags:\n #print(line, end = \"\")\n (mistake, result) = mistake_type(tag, new_tags, line, columns)\n if mistake == \"no tag\":\n print(\"no tag\\t\", line, end = \"\", sep = \"\")\n elif mistake == \"no match\":\n print(\"no match\\t\", line, end = \"\", sep = \"\")\n elif mistake == \"prefix\":\n print(\"prefix\", \"\\t\", columns[0], \"\\t\", columns[1], \"\\t\", tag, \"\\t\", columns[0], \":\",columns[1], \":\", \":\".join(result), sep = \"\")\n elif mistake == \"order\":\n print(\"order\", \"\\t\", columns[0], \"\\t\", columns[1], \"\\t\", tag, \"\\t\", columns[0], \":\",columns[1], \":\", result, sep = \"\")\n else:\n print(\"different\\t\", line, end = \"\", sep = \"\")\n \n\n\n\n \n \n\n\n \n","sub_path":"nlp1/check_majka.py","file_name":"check_majka.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101779126","text":"# always seem to need this\nimport context\nimport sys\n#import comm\nimport time\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport paho.mqtt.client as mqtt\nimport mainwindow\n\nfrom PyQt5.QtCore import QThread, pyqtSignal, Qt\nfrom PyQt5.QtCore import pyqtSlot\n\n# Define Variables\nMQTT_BROKER = \"192.168.7.107\"\nMQTT_PORT = 1883\nMQTT_KEEPALIVE_INTERVAL = 5\nMQTT_TOPIC = \"#\"\n\n\n# This gets the Qt stuff\nimport PyQt5\nfrom PyQt5.QtWidgets import *\n\n\nclass Worker(QtCore.QObject):\n\t\tset_box_signal = QtCore.pyqtSignal(str)\n\t\tdef __init__(self, parent=None):\n\t\t\tsuper(WorkerThread,self).__init__(parent)\n\n\t\t\tvalue = 1\n\t\t\twhile True:\n\t\t\t\tvalue += 1\n\t\t\t\tself.sig.emit(boxNumber,str(value))\n\t\t\t\ttime.sleep(1)\n\n\n'''\n\n\t\t@QtCore.pyqtSlot(int,str)\n\t\tdef startCounting(self,boxNumber,boxText):\n\t\t\tprint(\"Begin MQTT from counting class\")\n\n\t\t\ttime.sleep(1)\n\t\t\tprint (\"WOrkerThread running..\")\n\t\t\t# Define Variables\n\t\t\tMQTT_BROKER = \"192.168.7.107\"\n\t\t\tMQTT_PORT = 1883\n\t\t\tMQTT_KEEPALIVE_INTERVAL = 5\n\t\t\tMQTT_TOPIC = \"#\"\n\n\t\t\t# Connect with MQTT Broker\n\t\t\tmqttc.connect(MQTT_BROKER, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n\t\t\t# Continue the network loop\n\t\t\tmqttc.loop_forever()\n\n\n\t\t# Define on_connect event Handler\n\t\tdef on_connect(mosq, obj, rc,x):\n\t\t\t#Subscribe to a the Topic\n\t\t\tprint(\"Connected - subscribing\")\n\t\t\tmqttc.subscribe(MQTT_TOPIC, 0)\n\n\t\t# Define on_subscribe event Handler\n\t\tdef on_subscribe(mosq, obj, mid, granted_qos):\n\t\t\tprint (\"Subscribed to MQTT Topic\")\n\n\t\t# Define on_message event Handler\n\t\tdef on_message(mosq, obj, msg):\n\t\t\tprint (msg.payload)\n\n\t\t#explicit slot that takes input from the start_counting_signal\n#\t\t@QtCore.pyqtSlot(int,str)\n#\t\tdef startCounting(self,boxNumber,boxText):\n#\t\t\tprint(\"Begin MQTT\")\n\n##\t\t\tMQTT_BROKER = \"192.168.7.107\"\n##\t\t\tMQTT_PORT = 1883\n##\t\t\tMQTT_KEEPALIVE_INTERVAL = 5\n##\t\t\tMQTT_TOPIC = \"#\"\n\t\t\t# Initiate MQTT Client\n#\t\t\tmqttc = mqtt.Client()\n\n\t\t\t# Register Event Handlers\n##\t\t\tmqttc.on_message = self.on_message\n##\t\t\tmqttc.on_connect = self.on_connect\n##\t\t\tmqttc.on_subscribe = self.on_subscribe\n\n\t\t\t# Connect with MQTT Broker\n#\t\t\tmqttc.connect(MQTT_BROKER, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n\t\t\t# Continue the network loop\n#\t\t\tmqttc.loop_forever()\n'''\n\n\n\n# create class for our Raspberry Pi GUI\nclass MainWindow(QMainWindow, mainwindow.Ui_MainWindow):\n\n\t\tstart_counting_signal=QtCore.pyqtSignal(int, str)\n\t\t\n\t \t# access variables inside of the UI's file\n\t\tdef __init__(self):\n\t\t\tsuper(self.__class__, self).__init__()\n\t\t\tself.setupUi(self) # gets defined in the UI file\n\t\t\tself.worker= Worker()\n\t\t\tself.threads=QtCore.QThread()\n\t\t\tself.workers.moveToThread(self.threads)\n\n\t\t\tself.start_counting_signal.connect(self.workers.startCounting)\n\t\t\t\n\t\t\t#self.threads.start()\n\t\t\t\n\t\t\tself.pushButton.clicked.connect(self.test)\n\t\t\tself.pushButton.clicked.connect(self.start_counting_emitter)\n\n\t\tdef start_counting_emitter(self):\n\t\t self.threads.start()\n\t\t print('thread started')\n\t\t self.start_counting_signal.emit(0,'8')\n\t\t self.start_counting_signal.disconnect(self.workers.startCounting)\n\n\t\tdef updateStatus(self, n):\n\t\t\tprint (\"Kerry Brenner\")\n\t\t\tself.label.setText(n)\n\n\t\tdef test(self):\n#\t\t\tself.worker = WorkerThread()\n\t\t\tself.workers.set_box_signal.connect(self.updateStatus)\n\t\t\tself.start_counting_signal.emit(1,\"A\")\n\t\t\tprint(\"\\n\\n\\nJonWas here\\n\\n\\n\")\n\t\t\tself.label.setText(\"HOWDY\")\n\t\t\tself.worker = WorkerThread()\n#\t\t\tself.worker.sig.connect(self.updateStatus)\n\t\t\tself.worker.start()\n\n\t#\t\tmqttc.connect(\"192.168.7.107\", 1883, 60)\n\t#\t\tmqttc.subscribe(\"#\", 0)\n\t#\t\tmqttc.loop_forever()\n\n\n\n\nclass WorkerThread(QThread):\n\n\n\t\t# Added a signal\n\t\tsig = QtCore.pyqtSignal(str)\n\t\t# Register Event Handlers\n\t\t#set_box_signal = QtCore.pyqtSignal(str)\n\n\n\t\tdef run(self):\n\t\t\ttime.sleep(1)\n\t\t\tprint (\"WOrkerThread running..\")\n\n# I feel better having one of these\ndef main():\n\t\tapp = QApplication(sys.argv)\n\t\tform = MainWindow()\n\n\n\t\tform.show()\n# without this, the script exits immediately.\n\t\tsys.exit(app.exec_())\n \n# python bit to figure how who started This\nif __name__ == \"__main__\":\n main()\n","sub_path":"main5.py","file_name":"main5.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"316489522","text":"\n\t\t\n\nclass EDID(object):\n\t\"\"\"A model object of all the data stored in the EDID\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(EDID, self).__init__()\n\t\tself.arg = arg\n\t\tself.header = Header();\n\t\tself.header.Fix","sub_path":"model/edid.py","file_name":"edid.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"156128634","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport yfinance as yf\nimport investpy as inv\n#import time\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport seaborn as sns\n#import cufflinks as cf\nimport datetime\nfrom datetime import date\nimport math\nimport statsmodels.api\nimport statsmodels as sm\n\nretornos = ''\npreco = ''\nticker=''\nlista = ''\n\ndef sazonalidade():\n global retornos\n global preco\n global ticker\n st.header('Análise de Sazonalidade')\n st.write('Escolha o País')\n pais = st.radio('', ('Brasil', 'Estados Unidos'))\n\n st.write('Escolha entre Ações ou Indices')\n opcao = st.radio('', ('Ações', 'Indices'))\n\n if pais == 'Brasil' and opcao == 'Ações':\n #lista = inv.get_stocks_list(country='brazil')\n lista = st.session_state.lista_tickers\n if pais == 'Brasil' and opcao == 'Indices':\n lista = inv.get_indices_list(country='brazil')\n if pais == 'Estados Unidos' and opcao == 'Ações':\n lista = inv.get_stocks_list(country='united states')\n if pais == 'Estados Unidos' and opcao == 'Indices':\n lista = inv.get_indices_list(country='united states')\n\n with st.form(key='Analise_Sazonalidade'):\n ticker = st.selectbox('Selecione a Ação ou Indice desejado', lista)\n if st.form_submit_button(label='Analisar Sazonalidade'):\n\n\n # ticker = st.selectbox('Selecione a Ação ou Indice desejado', lista)\n #\n # if st.button('Analisar Sazonalidade'):\n\n try:\n data_inicial = '01/12/1999'\n data_final = date.today().strftime('%d/%m/%Y')\n\n # Dados do Investing - Pegar dados periodo Mensal\n if pais == 'Brasil' and opcao == 'Ações':\n # retornos = \\\n # inv.get_stock_historical_data(ticker, country='brazil', from_date=data_inicial, to_date=data_final,\n # interval='Monthly')['Close'].pct_change(1)\n # preco = \\\n # inv.get_stock_historical_data(ticker, country='brazil', from_date=data_inicial, to_date=data_final,\n # interval='Daily')['Close']\n data_inicial = '1999-12-01'\n data_final = date.today().strftime('%Y-%m-%d')\n retornos = yf.download(ticker + '.SA', start= data_inicial, end=data_final, interval='1mo', progress=False)[\"Adj Close\"].pct_change()\n preco = yf.download(ticker + '.SA', start= data_inicial, end=data_final, progress=False)[\"Adj Close\"]\n data_inicial = '01/12/1999'\n data_final = date.today().strftime('%d/%m/%Y')\n # retornos = \\\n # inv.get_stock_historical_data(ticker, country='brazil', from_date=data_inicial, to_date=data_final,\n # interval='Monthly')['Close'].pct_change()\n\n\n if pais == 'Brasil' and opcao == 'Indices':\n retornos = \\\n inv.get_index_historical_data(ticker, country='brazil', from_date=data_inicial, to_date=data_final,\n interval='Monthly')['Close'].pct_change(1)\n preco = \\\n inv.get_index_historical_data(ticker, country='brazil', from_date=data_inicial, to_date=data_final,\n interval='Daily')['Close']\n if pais == 'Estados Unidos' and opcao == 'Ações':\n retornos = \\\n inv.get_stock_historical_data(ticker, country='united states', from_date=data_inicial,\n to_date=data_final,\n interval='Monthly')['Close'].pct_change(1)\n preco = \\\n inv.get_stock_historical_data(ticker, country='united states', from_date=data_inicial,\n to_date=data_final,\n interval='Daily')['Close']\n if pais == 'Estados Unidos' and opcao == 'Indices':\n retornos = \\\n inv.get_index_historical_data(ticker, country='united states', from_date=data_inicial,\n to_date=data_final,\n interval='Monthly')['Close'].pct_change(1)\n preco = \\\n inv.get_index_historical_data(ticker, country='united states', from_date=data_inicial,\n to_date=data_final,\n interval='Daily')['Close']\n preco = preco.fillna(method='bfill')\n except:\n st.error('Algo errado com o ativo escolhido! Provavelmente seus dados históricos apresentaram algum problema. Escolha outro Ativo.')\n\n\n if len(retornos) != 0:\n analise_sazonalidade()\n\ndef analise_sazonalidade():\n with st.expander(\"Retornos Mensais\", expanded=True):\n if st.checkbox('Mapa Retornos Mensais', help='Analisar os retornos mensais do ativo escolhido', value=False):\n # Separar e agrupar os anos e meses\n retorno_mensal = retornos.groupby([retornos.index.year.rename('Year'), retornos.index.month.rename('Month')]).mean()\n # Criar e formatar a tabela pivot table\n tabela_retornos = pd.DataFrame(retorno_mensal)\n try:\n tabela_retornos = pd.pivot_table(tabela_retornos, values='Close', index='Year', columns='Month')\n except:\n tabela_retornos = pd.pivot_table(tabela_retornos, values='Adj Close', index='Year', columns='Month')\n tabela_retornos.columns = ['Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun', 'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez']\n\n # HeatMap Seaborn\n fig, ax = plt.subplots(figsize=(12, 9))\n cmap = sns.color_palette('RdYlGn', 50)\n sns.heatmap(tabela_retornos, cmap=cmap, annot=True, fmt='.2%', center=0, vmax=0.02, vmin=-0.02, cbar=False,\n linewidths=1, xticklabels=True, yticklabels=True, ax=ax)\n ax.set_title(ticker, fontsize=18)\n ax.set_yticklabels(ax.get_yticklabels(), rotation=0, verticalalignment='center', fontsize='12')\n ax.set_xticklabels(ax.get_xticklabels(), fontsize='12')\n ax.xaxis.tick_top() # x axis on top\n plt.ylabel('')\n st.pyplot()\n\n # Media das rentabilidades\n media = pd.DataFrame(tabela_retornos.mean())\n media.columns = ['Média']\n media = media.transpose()\n fig, ax = plt.subplots(figsize=(12, 0.5))\n sns.heatmap(media, cmap=cmap, annot=True, fmt='.2%', center=0, vmax=0.02, vmin=-0.02, cbar=False,\n linewidths=1, xticklabels=True, yticklabels=True, ax=ax)\n ax.set_yticklabels(ax.get_yticklabels(), rotation=0, verticalalignment='center', fontsize='11')\n st.pyplot()\n\n with st.expander(\"Sazonalidade Anual\", expanded=True):\n if st.checkbox('Gráfico de Sazonalidade', help='Analisar o comportamento da sazonalidade ao longo dos meses do ano', value=False):\n st.subheader('Sazonalidade Anual')\n mostrar_anos = st.checkbox('Mostrar Anos')\n\n\n decomposicao = sm.tsa.seasonal.seasonal_decompose(preco, model='additive', period=251)\n\n Monthly_seasonal = pd.DataFrame(decomposicao.seasonal.groupby([decomposicao.seasonal.index.year.rename('year'),\n decomposicao.seasonal.index.month.rename(\n 'month')]).mean())\n Monthly_seasonal = pd.pivot_table(Monthly_seasonal, values='seasonal', index='year', columns='month')\n Monthly_seasonal.columns = ['Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun', 'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez']\n Monthly_seasonal = Monthly_seasonal.transpose()\n Monthly_seasonal['Media'] = Monthly_seasonal.mean(axis=1)\n\n if mostrar_anos :\n fig = Monthly_seasonal.iplot(asFigure=True, xTitle='Meses', yTitle='Sazonalidade',\n title='Sazonalidade Anual - ' + ticker, dimensions = [710,500])\n fig.update_layout(plot_bgcolor=\"white\", paper_bgcolor=\"white\", legend_bgcolor=\"white\")\n st.plotly_chart(fig)\n else:\n fig = Monthly_seasonal['Media'].iplot(asFigure=True, xTitle='Meses', yTitle='Sazonalidade',\n title='Sazonalidade Anual - ' + ticker, dimensions=[725, 500])\n fig.update_layout(plot_bgcolor=\"white\", paper_bgcolor=\"white\", legend_bgcolor=\"white\")\n st.plotly_chart(fig)\n\n # # Gráfico de Ranking dos Meses\n # tabela_rank_anos = tabela_retornos.rank(axis=1)\n # tabela_rank_meses = tabela_rank_anos.transpose()\n # tabela_descricao = tabela_rank_anos.describe()\n # tabela_descricao = tabela_descricao.transpose()\n # tabela_rank_meses['Media'] = tabela_descricao['mean']\n # fig = tabela_rank_meses.iplot(asFigure=True, xTitle='Meses', yTitle='Ranking', dimensions=(1000, 600),\n # title='Ranking dos meses por ano - ' + ticker)\n # st.write(\n # 'Gráfico - Rankings dos meses (Classificação dos meses do menor para o maior rendimento naquele ano) - Clique 2x no item da Legenda para selecionar')\n # st.plotly_chart(fig)\n","sub_path":"quant_app_sazonalidade_backup.py","file_name":"quant_app_sazonalidade_backup.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"276044811","text":"from collections import namedtuple\n\n\nTagDetails = namedtuple(\"TagDetails\", (\"sha\", \"tag\"))\n\n\nclass TagsMixin():\n\n def get_tags(self, remote=None, reverse=False):\n \"\"\"\n Return a list of TagDetails object. These objects correspond\n to all tags found in the repository, containing abbreviated\n hashes and reference names.\n \"\"\"\n stdout = self.git(\n \"ls-remote\" if remote else \"show-ref\",\n \"--tags\",\n remote if remote else None,\n throw_on_stderr=False\n )\n porcelain_entries = stdout.split(\"\\n\")\n if reverse:\n porcelain_entries.reverse()\n\n entries = [TagDetails(entry[:40], entry[51:]) for entry in iter(porcelain_entries) if entry]\n\n return entries\n\n def get_lastest_local_tag(self):\n \"\"\"\n Return the latest tag of the current branch. get_tags() fails to return an ordered list.\n \"\"\"\n\n tag = self.git(\"describe\", \"--tags\", \"--abbrev=0\", throw_on_stderr=False).strip()\n return tag\n","sub_path":"core/git_mixins/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"525754843","text":"# Data augmented\r\nfrom __future__ import print_function\r\nimport tensorflow as tf\r\nimport cv2\r\nimport h5py\r\nimport numpy as np\r\nimport sys\r\nimport os\r\nimport subprocess as sp\r\nfrom skimage.measure import compare_ssim as ssim\r\nfrom skimage.measure import compare_psnr as psnr\r\nfrom mylib import *\r\n\r\nbatch_size = 1024 \r\nepochs = 1000\r\n\r\n\r\ndef tf_build_model(module_name, input_tensor, output_tensor, test=False, freq=False, params=None, _weights_name=None):\r\n with tf.variable_scope('main_full', reuse=tf.AUTO_REUSE):\r\n model_module = __import__(module_name)\r\n if test:\r\n satd_op, mse_op, pred = model_module.build_model(\r\n input_tensor, output_tensor, params=None, freq=freq, test=test)\r\n return satd_op, mse_op, pred\r\n else:\r\n train_op, satd_op, mse_op, pred = model_module.build_model(\r\n input_tensor, output_tensor, params=params, freq=freq, test=test)\r\n return train_op, satd_op, mse_op, pred\r\n\r\ndef drive():\r\n print(sys.argv)\r\n global batch_size\r\n block_size = 8\r\n model_module_name = sys.argv[2]\r\n weights_name = None\r\n train_mode = sys.argv[3]\r\n init_lr = float(sys.argv[4])\r\n batch_size = int(sys.argv[5])\r\n if len(sys.argv) == 7:\r\n weights_name = sys.argv[6]\r\n print(weights_name)\r\n\r\n h5_path = '../../train/' + train_mode + '.h5'\r\n # load data\r\n\r\n hf = None\r\n \r\n hf = h5py.File(h5_path)\r\n \r\n print(\"Loading data\")\r\n x = np.array(hf['data'], dtype=np.float32)\r\n y = np.array(hf['label'], dtype=np.float32)\r\n\r\n length = x.shape[0]\r\n array_list = list(range(0, length))\r\n np.random.shuffle(array_list)\r\n bar = int(length*0.95)\r\n print('-------print the length of bar: %d, and length %d' %(bar, length))\r\n train_data = x[array_list[:bar], :, :, :]\r\n val_data = x[array_list[bar:], :, :, :]\r\n train_label = y[array_list[:bar], :, :, :]\r\n val_label = y[array_list[bar:], :, :, :]\r\n\r\n print(bar)\r\n\r\n def train_generator():\r\n while True:\r\n for i in range(0, bar, batch_size)[:-1]:\r\n yield train_data[i:i+batch_size, :, :, :], train_label[i:i+batch_size, :, :, :]\r\n # np.random.shuffle(train_data)\r\n\r\n def val_generator():\r\n for i in range(0, length-bar, batch_size)[:-1]:\r\n yield val_data[i:i+batch_size, :, :, :], val_label[i:i+batch_size, :, :, :]\r\n\r\n inputs = tf.placeholder(tf.float32, [batch_size, 3072, 1, 1])\r\n targets = tf.placeholder(tf.float32, [batch_size, 1024, 1, 1])\r\n\r\n # build model\r\n train_op, satd_loss, mse_loss, pred = tf_build_model(model_module_name,\r\n inputs,\r\n targets,\r\n test=False,\r\n freq=True,\r\n params=\r\n {'learning_rate': init_lr,\r\n 'batch_size': batch_size\r\n },\r\n _weights_name=weights_name\r\n )\r\n \r\n tensorboard_train_dir = '../../freq_32_tensorboard/' + train_mode + '/train'\r\n tensorboard_valid_dir = '../../freq_32_tensorboard/' + train_mode + '/valid'\r\n if not os.path.exists(tensorboard_train_dir):\r\n os.makedirs(tensorboard_train_dir)\r\n if not os.path.exists(tensorboard_valid_dir):\r\n os.makedirs(tensorboard_valid_dir)\r\n\r\n saver = tf.train.Saver(max_to_keep=30)\r\n checkpoint_dir = '../../freq_32_model/' + train_mode + '/'\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n with tf.Session() as sess:\r\n if weights_name is not None:\r\n saver.restore(sess, weights_name)\r\n print('-----------Sucesfully restoring weights from: ', weights_name)\r\n else:\r\n sess.run(tf.global_variables_initializer())\r\n print('-----------No weights defined, run initializer')\r\n total_var = 0\r\n for var in tf.trainable_variables():\r\n shape = var.get_shape()\r\n par_num = 1\r\n for dim in shape:\r\n par_num *= dim.value\r\n total_var += par_num\r\n print(\"----------------Number of total variables: %d\" %(total_var))\r\n options = tf.RunOptions() # trace_level=tf.RunOptions.FULL_TRACE)\r\n run_metadata = tf.RunMetadata()\r\n data_gen = train_generator()\r\n interval = 500\r\n metrics = np.zeros((interval,3))\r\n\r\n # --------------- part for tensorboard----------------\r\n train_writer = tf.summary.FileWriter(tensorboard_train_dir, sess.graph)\r\n valid_writer = tf.summary.FileWriter(tensorboard_valid_dir, sess.graph)\r\n train_satd_summary = tf.summary.scalar(train_mode + ' SATD loss', satd_loss)\r\n train_mse_summary = tf.summary.scalar(train_mode + ' MSE loss', mse_loss)\r\n merged = tf.summary.merge([train_satd_summary, train_mse_summary])\r\n\r\n #sub1--------------------------------here for valid mean\r\n valid_size = int(len(range(0, length - bar, batch_size)[:-1]))\r\n print(valid_size)\r\n valid_mse_input = tf.placeholder(tf.float32, [valid_size])\r\n valid_satd_input = tf.placeholder(tf.float32, [valid_size])\r\n valid_mse_mean = tf.reduce_mean(valid_mse_input)\r\n valid_satd_mean = tf.reduce_mean(valid_satd_input)\r\n valid_mse_summary = tf.summary.scalar(train_mode + ' MSE loss', valid_mse_mean)\r\n valid_satd_summary = tf.summary.scalar(train_mode + ' SATD loss', valid_satd_mean)\r\n valid_merged = tf.summary.merge([valid_mse_summary, valid_satd_summary])\r\n #sub1--------------------------------for valid mean\r\n\r\n # --------------- part for tensorboard----------------\r\n\r\n for i in range(200000):\r\n if i % interval == 0:\r\n val_satd_s = []\r\n val_mse_s = []\r\n val_gen = val_generator()\r\n psnr_s = []\r\n ssim_s = []\r\n for v_data, v_label in val_gen:\r\n val_satd, val_mse, recon = sess.run([satd_loss, mse_loss, pred], feed_dict={\r\n inputs: v_data, targets: v_label})\r\n val_satd_s.append(float(val_satd))\r\n val_mse_s.append(float(val_mse))\r\n tmp_psnr, tmp_ssim = test_quality(v_label.reshape([-1, 32, 32])[0] * 255.0, recon.reshape([-1, 32, 32])[0] * 255.0)\r\n psnr_s.append(tmp_psnr)\r\n ssim_s.append(tmp_ssim)\r\n # print('#########tmp: ', tmp_psnr, tmp_ssim)\r\n\r\n # Here is about the tensorboard\r\n rs = sess.run(valid_merged, feed_dict={\r\n valid_mse_input: val_mse_s, valid_satd_input: val_satd_s\r\n })\r\n valid_writer.add_summary(rs, i)\r\n # Here is about the tensorboard\r\n\r\n # now test for psnr\r\n print('------------->now show the info of PSNR and SSIM')\r\n print('PSNR is: %f, SSIM is: %f'%(np.mean(psnr_s), np.mean(ssim_s)))\r\n\r\n\r\n # print(val_satd_s)\r\n print(\"Model name: %s, step %8d, Train SATD %.4f, Train MSE %.4f, Val SATD %.4f, Val MSE %.6f\" % (\r\n model_module_name, i, np.mean(metrics[:,0]), np.mean(metrics[:,1]), np.mean(val_satd_s), np.mean(val_mse_s)))\r\n \r\n # ------------------- Here is the training part ---------------\r\n iter_data, iter_label = next(data_gen)\r\n # print(iter_data.shape)\r\n feed_dict = {inputs: iter_data, targets: iter_label}\r\n _, satd, mse, rs = sess.run([train_op, satd_loss, mse_loss, merged],\r\n feed_dict=feed_dict,\r\n options=options,\r\n run_metadata=run_metadata)\r\n if i % interval == 0:\r\n train_writer.add_summary(rs, i)\r\n\r\n metrics[i%interval,0] = satd\r\n metrics[i%interval,1] = mse\r\n \r\n if i % 10000 == 0:\r\n save_path = saver.save(sess, os.path.join(\r\n checkpoint_dir, \"%s_%06d.ckpt\" % (model_module_name,i)))\r\n\r\ndef run_test():\r\n print(sys.argv)\r\n global batch_size\r\n block_size = 8\r\n batch_size = 64\r\n model_module_name = sys.argv[2]\r\n train_mode = sys.argv[3]\r\n weights_name = sys.argv[4]\r\n print(weights_name, train_mode, model_module_name)\r\n inputs = tf.placeholder(tf.float32, [batch_size, 64, 64, 1])\r\n targets = tf.placeholder(tf.float32, [batch_size, 32, 32, 1])\r\n\r\n h5_path = '../../train/' + train_mode + '.h5'\r\n\r\n hf = None\r\n \r\n hf = h5py.File(h5_path)\r\n \r\n print(\"Loading data\")\r\n x = np.array(hf['data'], dtype=np.float32)\r\n y = np.array(hf['label'], dtype=np.float32)\r\n length = x.shape[0]\r\n print(\"Finishing loading data\")\r\n print(weights_name)\r\n satd_loss, mse_loss, pred = tf_build_model(model_module_name,\r\n inputs,\r\n targets,\r\n test=True,\r\n freq=True,\r\n _weights_name=weights_name\r\n )\r\n print('finish build network')\r\n def val_generator():\r\n for i in range(0, length, batch_size)[:-1]:\r\n yield x[i:i+batch_size, :, :, :], y[i:i+batch_size, :, :, :]\r\n\r\n saver = tf.train.Saver()\r\n \r\n\r\n # Test reshape\r\n tmp_input = np.zeros([batch_size, 64, 64, 1])\r\n tmp_label = np.zeros([batch_size, 32, 32, 1])\r\n # Test reshape\r\n with tf.Session() as sess:\r\n if weights_name is None:\r\n print('error!, no weights_name')\r\n exit(0)\r\n else:\r\n saver.restore(sess, weights_name)\r\n print('Successfully restore weights from file: ', weights_name)\r\n \r\n psnr_s = []\r\n ssim_s = []\r\n val_gen = val_generator()\r\n val_cnt = 0\r\n for v_data, v_label in val_gen:\r\n tmp_input[:,0:32,0:64,:] = v_data[:,:2048,:,:].reshape([-1,32,64,1])\r\n tmp_input[:,32:64,0:32,:] = v_data[:,2048:,:,:].reshape([-1,32,32,1])\r\n tmp_label = v_label.reshape([-1,32,32,1])\r\n val_satd, val_mse, recon = sess.run([satd_loss, mse_loss, pred], feed_dict={\r\n inputs: tmp_input, targets: tmp_label})\r\n\r\n recon = recon.reshape([-1, 32, 32]) * 255.0\r\n gt = v_label.reshape([-1, 32, 32]) * 255.0\r\n val_psnr, val_ssim = test_quality(gt, recon)\r\n val_cnt = val_cnt + batch_size\r\n print('-----------> Step %d, Total: %d, psnr: %f, ssim: %f, mse loss: %f, satd_loss: %f<------------'%(val_cnt, length, val_psnr, val_ssim, np.mean(val_mse), np.mean(val_satd)))\r\n psnr_s.append(val_psnr)\r\n ssim_s.append(val_ssim)\r\n print('Finish testing, now psnr is: %f, and ssim is: %f'%(np.mean(psnr_s), np.mean(ssim_s)))\r\n\r\n\r\ndef dump_img(filename, targetpath):\r\n model_module_name = sys.argv[2]\r\n weights_name = sys.argv[3]\r\n filename = sys.argv[4]\r\n print(weights_name, model_module_name, filename)\r\n \r\n img = skimage.imread(filename) / 255.0\r\n input, gt = img2input(filename)\r\n\r\n\r\n inputs = tf.placeholder(tf.float32, [1, 3072, 1, 1])\r\n targets = tf.placeholder(tf.float32, [1, 1024, 1, 1])\r\n satd_loss, mse_loss, pred = tf_build_model(model_module_name,\r\n inputs,\r\n targets,\r\n test=True,\r\n freq=False,\r\n _weights_name=weights_name\r\n )\r\n\r\n saver = tf.train.Saver()\r\n \r\n with tf.Session() as sess:\r\n if weights_name is None:\r\n print('error!, no weights_name')\r\n exit(0)\r\n else:\r\n saver.restore(sess, weights_name)\r\n print('Successfully restore weights from file: ', weights_name)\r\n \r\n recon = sess.run(pred, feed_dict={inputs: input.reshape(1,3072,1,1), targets: gt.reshape(1,1024,1,1)})\r\n img[32:,32:] = recon.reshape([32,32]) * 255.0\r\n skimage.imwrite(targetpath, img)\r\n\r\n \r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n tasks = {'train': drive, 'test': run_test, 'dump': dump_img}\r\n task = sys.argv[1]\r\n print('-------------begin task', task)\r\n tasks[task]()\r\n","sub_path":"tf/engine_tornado.py","file_name":"engine_tornado.py","file_ext":"py","file_size_in_byte":12794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"230562829","text":"#!/usr/bin/env python\n\nimport os\nimport logging\nimport json\n\nclass Base(object):\n\tdef __init__(self, configDir):\n\t\tself.products = {}\n\t\tself.logger = logging.getLogger(type(self).__name__)\n\t\tself.configDir = configDir\n\n\tdef log(self, msg):\n\t\tself.logger.info(msg)\n\n\tdef trace(self, product):\n\t\tself.logger.debug(product)\n\n\nclass EtcHostsRenderer(Base):\n\tdef __init__(self, configDir):\n\t\tsuper(EtcHostsRenderer, self).__init__(configDir)\n\t\tself.fp = open(\"/tmp/fakehosts_updating.txt\", \"w\")\n\n\tdef printProduct(self, product):\n\t\tpass\n\n\tdef printService(self, product, service, nodes):\n\t\tsize = len(nodes)\n\t\tfor x in range(4):\n\t\t\tprint>>self.fp, nodes[x % size][\"addr\"] + \"\\tsvc-\" + product + \"-\" + service + \"-\" + str(x)\n\n\tdef close(self):\n\t\tself.fp.close()\n\t\tos.system(\"mv /tmp/fakehosts_updating.txt %sfakehosts.txt\" % self.configDir)\n\n\nclass YamlServiceRenderer(Base):\n\tdef __init__(self, configDir):\n\t\tsuper(YamlServiceRenderer, self).__init__(configDir)\n\t\tself.fp = open(\"/tmp/services_updating.yml\", \"w\")\n\n\tdef printProduct(self, product):\n\t\tprint>>self.fp, product + \" :\"\n\n\tdef printService(self, product, service, nodes):\n\t\tprint>>self.fp, \" \" + service + \" :\"\n\t\tfor n in nodes:\n\t\t\tprint>>self.fp, \" - name : \" + n[\"name\"]\n\t\t\tprint>>self.fp, \" port : \" + str(n[\"port\"])\n\t\t\tprint>>self.fp, \" addr : \" + n[\"addr\"]\n\n\tdef close(self):\n\t\tself.fp.close()\n\t\tos.system(\"mv /tmp/services_updating.yml %sservices.yml\" % self.configDir)\n\n\nclass HAProxyConfigRenderer(Base):\n\tdef __init__(self, configDir):\n\t\tsuper(HAProxyConfigRenderer, self).__init__(configDir)\n\t\tself.fp = {}\n\n\tdef printProduct(self, product):\n\t\tself.fp[product] = open(\"/tmp/haproxy_%s_updating.cnf\" % product, \"w\")\n\n\tdef printService(self, product, service, nodes):\n\t\tprint>>self.fp[product], service + \" :\"\n\t\tfor n in nodes:\n\t\t\tprint>>self.fp[product], \" - name : \" + n[\"name\"]\n\t\t\tprint>>self.fp[product], \" port : \" + str(n[\"port\"])\n\t\t\tprint>>self.fp[product], \" addr : \" + n[\"addr\"]\n\n\tdef close(self):\n\t\tfor p in self.fp:\n\t\t\tself.fp[p].close()\n\t\t\tos.system(\"mv /tmp/haproxy_%s_updating.cnf %shaproxy_%s.cnf\" % (p, self.configDir, p))\n\n\nclass SimpleRenderer(Base):\n\tdef getRenderers(self):\n\t\treturn [EtcHostsRenderer(self.configDir), YamlServiceRenderer(self.configDir)]\n\n\tdef doRender(self, products, renderers):\n\t\tself.log(\"Writing new config files...\")\n\t\tself.trace(str(products))\n\t\tcfp = open(\"/tmp/services_updating.json\", \"w\")\n\t\tprint>>cfp, json.dumps(products, sort_keys=True, indent=4, separators=(',', ': '))\n\t\tfor p in products:\n\t\t\tfor r in renderers:\n\t\t\t\tr.printProduct(p)\n\t\t\tservices = products[p]\n\t\t\tfor s in services:\n\t\t\t\tfor r in renderers:\n\t\t\t\t\tr.printService(p, s, services[s])\n\t\tself.log(\"Replacing the old set of config files with the fresh one...\")\n\t\tfor r in renderers:\n\t\t\tr.close()\n\t\tcfp.close()\n\t\tos.system(\"mv /tmp/services_updating.json %sservices.json\" % self.configDir)\n\n\tdef render(self, products):\n\t\tself.doRender(products, self.getRenderers())\n\n\nclass HAProxyRenderer(SimpleRenderer):\n\tdef render(self, products):\n\t\trenderers = self.getRenderers()\n\t\trenderers.append(HAProxyConfigRenderer(self.configDir))\n\t\tself.doRender(products, renderers)\n\n\nclass Members(Base):\n\tdef __init__(self, renderer, configDir):\n\t\tsuper(Members, self).__init__(configDir)\n\t\tself.renderer = renderer\n\n\tdef getMemberTable(self, products):\n\t\tself.log(\"Parsing the members table\")\n\t\thandle = os.popen(\"serf members -format=json -status=alive -tag products='.*(%s).*'\" % \"|\".join(products))\n\t\t# handle = open(\"fixtures/serf_members_fake.json\", \"r\")\n\t\tmembers = json.load(handle)\n\t\treturn members[\"members\"]\n\n\tdef parseMemberTable(self, members, observed, collaborators):\n\t\tproducts = {}\n\t\tfor member in members:\n\t\t\thost = member[\"addr\"].split(\":\")\n\t\t\tname = member[\"name\"]\n\t\t\tproductsInNode = member[\"tags\"][\"products\"].split(\":\")\n\t\t\tfor p in productsInNode:\n\t\t\t\tif p not in observed:\n\t\t\t\t\tcontinue\n\t\t\t\tproduct = products.get(p, {})\n\t\t\t\tservices = member[\"tags\"][p + \".service_type\"].split(\":\")\n\t\t\t\tfor s in services:\n\t\t\t\t\tif p in collaborators and s != \"public\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnodes = product.get(s, [])\n\t\t\t\t\tnodes.append({ 'addr': host[0], 'port': int(member[\"tags\"][p + \".\" + s + \".service_port\"]), 'name': name })\n\t\t\t\t\tproduct[s] = nodes\n\t\t\t\tproducts[p] = product\n\t\treturn products\n\n\tdef update(self, products):\n\t\tmustUpdate = False\n\t\ttry:\n\t\t\thandle = open(\"conf/services.json\", \"r\")\n\t\t\tpreviousVersion = json.load(handle)\n\t\t\tmustUpdate = previousVersion != products\n\t\t\thandle.close()\n\t\texcept:\n\t\t\tmustUpdate = True\n\n\t\tif (mustUpdate):\n\t\t\tself.renderer.render(products)\n\t\telse:\n\t\t\tself.log(\"Nothing to update\")\n\t\treturn products\n\n\tdef run(self, suscribed, observed):\n\t\tself.log(\"Checking the membership table\")\n\t\tunion = list(set(suscribed + observed))\n\t\tdiff = list(set(observed) - set(suscribed))\n\t\tproducts = self.parseMemberTable(self.getMemberTable(union), union, diff)\n\t\tself.update(products)\n\t\tself.log(\"Done\")\n\t\treturn products\n\nif __name__ == '__main__':\n\tconfigDir = \"conf/\"\n\tmembers_handler = Members(SimpleRenderer(configDir), configDir)\n\tmembers_handler.run()","sub_path":"members.py","file_name":"members.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640102834","text":"\"\"\"\nmain\n\"\"\"\nimport sys\nimport unittest\nfrom modules import tocases\n\n\ndef main(mpp):\n \"\"\"\n main\n :param mpp:\n :return:\n \"\"\"\n sheet = 'ui'\n level = 'L1'\n excel = tocases.ToCases()\n excel.case_suite(sheet, level)\n from testcase import case_suite\n suite = case_suite.CaseSuite()\n if mpp[1] == 'text':\n suite.text_result() # 不生成html\n elif mpp[1] == 'html':\n suite.html_result(sheet) # 生成html\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81745404","text":"import os\n\nimport shm\n\nimport cv2\n\nfrom datetime import datetime\n\n# log_base_path = os.path.join(os.environ['CUAUV_SOFTWARE'], 'vision', 'video_logs')\nlog_base_path = '/var/log/auv/current'\n\nclass VideoWriter:\n def __init__(self, direction):\n self.fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n self.video_writer = None\n self.frame_count = 0\n self.direction = direction\n\n # YEAR-MONTH-DAY-HOUR-MINUTES-SECONDS\n time_str = datetime.today().strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.log_path = os.path.join(log_base_path,\n '{}_{}.avi'.format(direction, time_str))\n\n frame_var_name = 'frame_num_{}'.format(self.direction)\n if not hasattr(shm.camera, frame_var_name):\n print(\"WARNING: no frame number variable in shm for direction {}!\".format(self.direction))\n self.frame_var = None\n else:\n self.frame_var = getattr(shm.camera, frame_var_name)\n\n def log_image(self, mat):\n # write the current frame number to shm. this is useful for shm logging,\n # and shm log playback\n if self.frame_var is not None:\n self.frame_var.set(self.frame_count)\n\n self.frame_count += 1\n if self.video_writer is None:\n self.video_writer = cv2.VideoWriter(self.log_path, self.fourcc, 10.,\n (mat.shape[1], mat.shape[0]))\n\n self.video_writer.write(mat)\n","sub_path":"vision/modules/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"63286669","text":"#!/usr/bin/env python3\nimport sys\nimport random\nimport csv\n\nrows = list()\n# Open the csv file containing the data\nwith open(sys.argv[1], 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n # Store the csv data in a list\n rows = list(csv_reader)\n\ndata = rows[1:]\n\n# Separate the columns\ntime, ra_data, dec_data = zip(*data)\n\nra_in_sec = list()\n\n# Parse the right ascension data in seconds\nfor ra in ra_data:\n hour = int(ra[0:2])\n minute = int(ra[3:5])\n sec = float(ra[6:])\n ra_in_sec.append(hour * 60 * 60 + minute * 60 + sec)\n\n# Estimate the standard deviation for the gaussian noise\naverage_ra_diff = 0\nfor i in range(len(ra_in_sec) - 1):\n average_ra_diff += ra_in_sec[i + 1] - ra_in_sec[i]\naverage_ra_diff /= (len(ra_in_sec) - 1)\n\n\nsigma_ra = 0.5 * average_ra_diff\n\n# Add gaussian noise with 0 mean and sigma_ra std to the right ascension values\nnoisy_ra_in_sec = list()\nfor ra in ra_in_sec:\n noisy_ra = round(ra + random.gauss(0, sigma_ra), 2)\n if noisy_ra < 0:\n noisy_ra_in_sec.append(24 * 60 * 60 + noisy_ra)\n elif noisy_ra >= 24 * 60 * 60:\n noisy_ra_in_sec.append(noisy_ra - 24 * 60 * 60)\n else:\n noisy_ra_in_sec.append(noisy_ra)\n\n\ndec_in_arcsec = list()\n# Parse the declination data in arc seconds\nfor dec in dec_data:\n positive = dec[0] == '+'\n degree = int(dec[1:3])\n minute = int(dec[4:6])\n sec = float(dec[7:])\n if positive:\n dec_in_arcsec.append(degree * 3600 + minute * 60 + sec)\n else:\n dec_in_arcsec.append(-1 * (degree * 3600 + minute * 60 + sec))\n\n# Estimate the standard deviation for the gaussian noise\naverage_dec_diff = 0\nfor i in range(len(dec_in_arcsec) - 1):\n average_dec_diff += dec_in_arcsec[i + 1] - dec_in_arcsec[i]\naverage_dec_diff /= (len(dec_in_arcsec) - 1)\n\n\nsigma_dec = 0.5 * average_dec_diff\n\n# Add gaussian noise with 0 mean and sigma_dec std to the declination values\nnoisy_dec_in_arcsec = list()\nfor dec in dec_in_arcsec:\n noisy_dec = round(dec + random.gauss(0, sigma_dec), 1)\n noisy_dec_in_arcsec.append(noisy_dec)\n\n\ncsv_list = zip(time, noisy_ra_in_sec, noisy_dec_in_arcsec)\n\n\n# Set the output csv file name\noutputFileName = sys.path[0] + '/' + time[0][0:4] + '.csv'\n\n# Write the output list in csv format to a new csv file\nwith open(outputFileName, mode='w') as outputFile:\n writer = csv.writer(outputFile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['Date', 'Right ascension (in seconds)', 'Declination (in arc seconds)'])\n writer.writerows(csv_list)\n\nprint('Output file name: ' + outputFileName)","sub_path":"code/add_noise.py","file_name":"add_noise.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"445664483","text":"'''\nCode developed for colour detection bot as an activity by Astra Robotics Club in RV College of Engineering\nThis is for a simple 2 wheeled bot to:\n1) Move forward if green colour is seen\n2) Stop if red colour is seen\n3) Move backward if blue colour is seen\nAll code lines commented below are for testing and debugging purposes and may be kindly ignored.\nCode is also further commented to explain a few key code lines\n'''\nimport cv2\nimport numpy as np\nfrom gpiozero import Robot\nfrom collections import Counter as ctr\n\nclass CDbot:\n \n #contructor of class\n def __init__(self):\n \n window = np.zeros((100,100)) #initialising a black 100x100 window for using escape to quit\n self.colour = {'red':0,'green':0,'blue':0,-1:0} #control dictionary for logic\n left = (4,14) #left wheel gpio pins\n right = (17,18) #right wheel gpio pins\n self.robot = Robot(left,right) #robot initialisation class call\n self.cap = cv2.VideoCapture(0) #recording video from camera 0 (i.e. the camera on rpi)\n \n while(True):\n ret, image = self.cap.read() #read from camera\n if cv2.waitKey(1) != 27 : \n cv2.imshow('window',window) #show the window previously made\n self.botmove(image) #main bot moving method call\n else:\n break #exit if escape key is pressed \n \n #bot movement mechanics\n def botmove(self,image):\n self.colour = {'red':0, 'blue':0, 'green':0,-1:0}\n self.colour[self.colour_detect(image)]=1\n \n if self.colour['red'] == 1:\n #print('stop')\n self.robot.stop() #stops robot if red is verified\n elif self.colour['green'] == 1:\n #print('forward')\n self.robot.forward() #moves robot forward if green is verified\n elif self.colour['blue'] == 1:\n #print('reverse')\n self.robot.reverse() #moves robot reverse if blue is verified\n else:\n pass #don't do anything\n\n #colour detection code\n def colour_detect(image):\n\n hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)\n\n lower_blue = np.array([100,100,100])\n upper_blue = np.array([140,255,255])\n\n lower_red = np.array([140,100,100])\n upper_red = np.array([180,255,255])\n\n lower_green = np.array([60,100,100])\n upper_green = np.array([100,255,255])\n\n blue_mask = cv2.inRange(hsv, lower_blue, upper_blue)\n red_mask = cv2.inRange(hsv, lower_red, upper_red)\n green_mask = cv2.inRange(hsv, lower_green, upper_green)\n \n (w,h,c)=hsv.shape\n image_area = w*h\n\n blue_area = 0\n red_area = 0\n green_area = 0\n res = cv2.bitwise_and(image,image, mask = green_mask)\n cv2.imshow('res',res)\n\n _,contours_blue,_ = cv2.findContours(blue_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n _,contours_red,_ = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n _,contours_green,_ = cv2.findContours(green_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n for cnt_b in contours_blue:\n blue_area = blue_area + cv2.contourArea(cnt_b)\n\n for cnt_r in contours_red:\n red_area = red_area + cv2.contourArea(cnt_r)\n\n for cnt_g in contours_green: \n green_area = green_area + cv2.contourArea(cnt_g)\n \n blue_ratio = blue_area/image_area\n red_ratio = red_area/image_area\n green_ratio = green_area/image_area\n\n colour_ratios = [blue_ratio, red_ratio, green_ratio]\n if(max(colour_ratios)>0.2):\n if(max(colour_ratios) == colour_ratios[0]):\n return 'blue'\n elif(max(colour_ratios) == colour_ratios[1]):\n return 'red'\n else:\n return 'green'\n else:\n return -1\n \n #destructor of class\n def __del__(self):\n print('Program terminated')\n self.cap.release()\n cv2.destroyAllWindows() \n \nif __name__ == '__main__':\n bot = CDbot() #creation of object for doing the task\n\n \n","sub_path":"Colour_detection_bot/main_code.py","file_name":"main_code.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"187833149","text":"# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\n\nfrom mock import patch\n\nfrom rules_python import whl\n\n\ndef TestData(name):\n return os.path.join(os.environ['TEST_SRCDIR'], name)\n\n\nclass WheelTest(unittest.TestCase):\n\n def test_grpc_whl(self):\n td = TestData('grpc_whl/file/grpcio-1.6.0-cp27-cp27m-manylinux1_i686.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(wheel.name(), 'grpcio')\n self.assertEqual(wheel.distribution(), 'grpcio')\n self.assertEqual(wheel.version(), '1.6.0')\n self.assertEqual(set(wheel.dependencies()),\n set(['enum34', 'futures', 'protobuf', 'six']))\n self.assertEqual('pypi__grpcio_1_6_0', wheel.repository_name())\n self.assertEqual([], wheel.extras())\n\n def test_futures_whl(self):\n td = TestData('futures_3_1_1_whl/file/futures-3.1.1-py2-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(wheel.name(), 'futures')\n self.assertEqual(wheel.distribution(), 'futures')\n self.assertEqual(wheel.version(), '3.1.1')\n self.assertEqual(set(wheel.dependencies()), set())\n self.assertEqual('pypi__futures_3_1_1', wheel.repository_name())\n self.assertEqual([], wheel.extras())\n\n def test_whl_with_METADATA_file(self):\n td = TestData('futures_2_2_0_whl/file/futures-2.2.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(wheel.name(), 'futures')\n self.assertEqual(wheel.distribution(), 'futures')\n self.assertEqual(wheel.version(), '2.2.0')\n self.assertEqual(set(wheel.dependencies()), set())\n self.assertEqual('pypi__futures_2_2_0', wheel.repository_name())\n\n @patch('platform.python_version', return_value='2.7.13')\n def test_mock_whl(self, *args):\n td = TestData('mock_whl/file/mock-2.0.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(wheel.name(), 'mock')\n self.assertEqual(wheel.distribution(), 'mock')\n self.assertEqual(wheel.version(), '2.0.0')\n self.assertEqual(set(wheel.dependencies()),\n set(['funcsigs', 'pbr', 'six']))\n self.assertEqual('pypi__mock_2_0_0', wheel.repository_name())\n\n @patch('platform.python_version', return_value='3.3.0')\n def test_mock_whl_3_3(self, *args):\n td = TestData('mock_whl/file/mock-2.0.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(set(wheel.dependencies()),\n set(['pbr', 'six']))\n\n @patch('platform.python_version', return_value='2.7.13')\n def test_mock_whl_extras(self, *args):\n td = TestData('mock_whl/file/mock-2.0.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(['docs', 'test'], wheel.extras())\n self.assertEqual(set(wheel.dependencies(extra='docs')), set(['sphinx']))\n self.assertEqual(set(wheel.dependencies(extra='test')), set(['unittest2']))\n\n @patch('platform.python_version', return_value='3.0.0')\n def test_mock_whl_extras_3_0(self, *args):\n td = TestData('mock_whl/file/mock-2.0.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(['docs', 'test'], wheel.extras())\n self.assertEqual(set(wheel.dependencies(extra='docs')), set(['sphinx', 'Pygments', 'jinja2']))\n self.assertEqual(set(wheel.dependencies(extra='test')), set(['unittest2']))\n\n @patch('platform.python_version', return_value='2.7.13')\n def test_google_cloud_language_whl(self, *args):\n td = TestData('google_cloud_language_whl/file/' +\n 'google_cloud_language-0.29.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n self.assertEqual(wheel.name(), 'google-cloud-language')\n self.assertEqual(wheel.distribution(), 'google_cloud_language')\n self.assertEqual(wheel.version(), '0.29.0')\n expected_deps = ['google-gax', 'google-cloud-core',\n 'googleapis-common-protos[grpc]', 'enum34']\n self.assertEqual(set(wheel.dependencies()),\n set(expected_deps))\n self.assertEqual('pypi__google_cloud_language_0_29_0',\n wheel.repository_name())\n self.assertEqual([], wheel.extras())\n\n @patch('platform.python_version', return_value='3.4.0')\n def test_google_cloud_language_whl_3_4(self, *args):\n td = TestData('google_cloud_language_whl/file/' +\n 'google_cloud_language-0.29.0-py2.py3-none-any.whl')\n wheel = whl.Wheel(td)\n expected_deps = ['google-gax', 'google-cloud-core',\n 'googleapis-common-protos[grpc]']\n self.assertEqual(set(wheel.dependencies()),\n set(expected_deps))\n\n def test_parse_metadata(self):\n content = '''Metadata-Version: 2.1\nName: tensorflow\nVersion: 1.12.0\nRequires-Dist: absl-py (>=0.1.6)\nRequires-Dist: astor (>=0.6.0)\nRequires-Dist: gast (>=0.2.0)\nRequires-Dist: keras-applications (>=1.0.6)\nRequires-Dist: keras-preprocessing (>=1.0.5)\nRequires-Dist: numpy (>=1.13.3)\nRequires-Dist: six (>=1.10.0)\nRequires-Dist: protobuf (>=3.6.1)\nRequires-Dist: tensorboard (<1.13.0,>=1.12.0)\nRequires-Dist: termcolor (>=1.1.0)\nRequires-Dist: grpcio (>=1.8.6)\nRequires-Dist: wheel (>=0.26)\n'''\n self.assertEqual(whl.Wheel._parse_metadata(content), {\n 'extras': [],\n 'name': 'tensorflow',\n 'run_requires': [{'extra': None,\n 'marker': None,\n 'requires': ['absl-py',\n 'astor',\n 'gast',\n 'grpcio',\n 'keras-applications',\n 'keras-preprocessing',\n 'numpy',\n 'protobuf',\n 'six',\n 'tensorboard',\n 'termcolor',\n 'wheel']}],\n })\n content = '''Metadata-Version: 2.0\nName: Werkzeug\nProvides-Extra: dev\nRequires-Dist: coverage; extra == 'dev'\nRequires-Dist: pytest; extra == 'dev'\nRequires-Dist: sphinx; extra == 'dev'\nRequires-Dist: tox; extra == 'dev'\nProvides-Extra: termcolor\nRequires-Dist: termcolor; extra == 'termcolor'\nProvides-Extra: watchdog\nRequires-Dist: watchdog; extra == 'watchdog'\n'''\n self.assertEqual(whl.Wheel._parse_metadata(content), {\n 'extras': ['dev', 'termcolor', 'watchdog'],\n 'name': 'Werkzeug',\n 'run_requires': [{'extra': 'dev',\n 'marker': 'extra == \"dev\"',\n 'requires': ['coverage', 'pytest', 'sphinx', 'tox']},\n {'extra': 'termcolor',\n 'marker': 'extra == \"termcolor\"',\n 'requires': ['termcolor']},\n {'extra': 'watchdog',\n 'marker': 'extra == \"watchdog\"',\n 'requires': ['watchdog']}]\n })\n content = '''Metadata-Version: 2.1\nName: librosa\nProvides-Extra: docs\nProvides-Extra: tests\nProvides-Extra: display\nRequires-Dist: audioread (>=2.0.0)\nRequires-Dist: numpy (>=1.8.0)\nRequires-Dist: scipy (>=0.14.0)\nRequires-Dist: scikit-learn (!=0.19.0,>=0.14.0)\nRequires-Dist: joblib (>=0.12)\nRequires-Dist: decorator (>=3.0.0)\nRequires-Dist: six (>=1.3)\nRequires-Dist: resampy (>=0.2.0)\nRequires-Dist: numba (>=0.38.0)\nProvides-Extra: display\nRequires-Dist: matplotlib (>=1.5); extra == 'display'\nProvides-Extra: docs\nRequires-Dist: numpydoc; extra == 'docs'\nRequires-Dist: sphinx (!=1.3.1); extra == 'docs'\nRequires-Dist: sphinx-rtd-theme; extra == 'docs'\nRequires-Dist: matplotlib (>=2.0.0); extra == 'docs'\nRequires-Dist: sphinxcontrib-versioning (>=2.2.1); extra == 'docs'\nRequires-Dist: sphinx-gallery; extra == 'docs'\nProvides-Extra: tests\nRequires-Dist: matplotlib (>=2.1); extra == 'tests'\n'''\n self.assertEqual(whl.Wheel._parse_metadata(content), {\n 'extras': ['display', 'docs', 'tests'],\n 'name': 'librosa',\n 'run_requires': [{'extra': None,\n 'marker': None,\n 'requires': ['audioread',\n 'decorator',\n 'joblib',\n 'numba',\n 'numpy',\n 'resampy',\n 'scikit-learn',\n 'scipy',\n 'six']},\n {'extra': 'display',\n 'marker': 'extra == \"display\"',\n 'requires': ['matplotlib']},\n {'extra': 'docs',\n 'marker': 'extra == \"docs\"',\n 'requires': ['matplotlib',\n 'numpydoc',\n 'sphinx',\n 'sphinx-gallery',\n 'sphinx-rtd-theme',\n 'sphinxcontrib-versioning']},\n {'extra': 'tests',\n 'marker': 'extra == \"tests\"',\n 'requires': ['matplotlib']}]\n })\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"rules_python/whl_test.py","file_name":"whl_test.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"200675373","text":"# -*- coding: utf-8 -*-\nimport abc\nfrom watson.common.contextmanagers import ignored\n\n\nclass Base(metaclass=abc.ABCMeta):\n \"\"\"The base command that outlines the required structure for a console\n command.\n\n Help is automatically invoked when the `-h` or `--help` option is used.\n\n http://docs.python.org/dev/library/argparse.html#the-add-argument-method\n\n Example:\n\n .. code-block:: python\n\n # can be executed by `script.py mycommand`\n class MyCommand(BaseCommand):\n name = 'mycommand'\n\n def execute(self):\n return True\n\n # can be executed by `script.py mycommand -t something`\n class MyCommand(BaseCommand):\n name = 'mycommand'\n arguments = [\n (['-t', '--test'], {'help': 'Do something with -t'})\n ]\n\n def execute(self):\n return True if self.parsed_args.t else False\n\n # can be executed by `script.py mycommand something`\n class MyCommand(BaseCommand):\n name = 'mycommand'\n arguments = [\n {'dest': 'argument1', 'help': 'This is the help for the argument'}\n ]\n\n def execute(self):\n return True if self.parsed_args.argument1 else False\n \"\"\"\n name = None\n arguments = []\n help = 'Missing help.'\n _parsed_args = None\n\n @property\n def parsed_args(self):\n \"\"\"Returns the parsed arguments.\n\n Returns:\n list|dict depending on whether or not there have been named arguments.\n \"\"\"\n return self._parsed_args\n\n @parsed_args.setter\n def parsed_args(self, args):\n \"\"\"Set the parsed arguments.\n \"\"\"\n self._parsed_args = args\n\n @abc.abstractmethod\n def execute(self):\n raise NotImplementedError('execute() must be implemented.') # pragma: no cover\n\n def __call__(self):\n return self.execute()\n\n\ndef find_commands_in_module(module):\n \"\"\"Retrieves a list of all commands within a module.\n\n Returns:\n A list of commands from the module.\n \"\"\"\n commands = []\n for key in dir(module):\n item = getattr(module, key)\n with ignored(Exception):\n if issubclass(item, Base) and item != Base:\n commands.append(item)\n return commands\n","sub_path":"watson/console/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466477087","text":"import os\nimport unittest\n\nfrom biolinkml import LOCAL_YAML_PATH, METAMODEL_URI, METAMODEL_LOCAL_NAME, LOCAL_CONTEXT_PATH, METAMODEL_NAMESPACE, \\\n METATYPE_NAMESPACE, METATYPE_LOCAL_NAME, METATYPE_URI, LOCAL_TYPES_PATH\nfrom biolinkml.utils.rawloader import load_raw_schema\n\n\nclass ModelURITestCase(unittest.TestCase):\n\n def validate_yaml_content(self, meta_yaml, access_by_uri: bool) -> None:\n self.assertEqual(METAMODEL_URI, meta_yaml.id)\n self.assertEqual(METAMODEL_LOCAL_NAME, meta_yaml.default_prefix)\n self.assertEqual(METAMODEL_NAMESPACE, meta_yaml.prefixes[meta_yaml.default_prefix].prefix_reference)\n self.assertEqual(METAMODEL_URI if access_by_uri else LOCAL_YAML_PATH, meta_yaml.source_file)\n\n def test_model_uris(self):\n \"\"\" Test that the variables in meta.yaml match the contents of biolinkml/__init__.py \"\"\"\n self.assertTrue(os.path.exists(LOCAL_YAML_PATH))\n self.assertTrue(os.path.exists(LOCAL_CONTEXT_PATH))\n meta_yaml = load_raw_schema(LOCAL_YAML_PATH)\n self.validate_yaml_content(meta_yaml, False)\n\n types_yaml = load_raw_schema(LOCAL_TYPES_PATH)\n self.assertEqual(METATYPE_LOCAL_NAME, types_yaml.default_prefix)\n self.assertEqual(METATYPE_URI, types_yaml.id)\n self.assertEqual(METATYPE_LOCAL_NAME, types_yaml.default_prefix)\n self.assertEqual(METATYPE_NAMESPACE, types_yaml.prefixes[types_yaml.default_prefix].prefix_reference)\n\n def test_model_access(self):\n \"\"\" Make sure that the law loader can dereference a URL and that the data matches \"\"\"\n online_meta_yaml = load_raw_schema(METAMODEL_URI)\n self.validate_yaml_content(online_meta_yaml, True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_base/test_model_uris.py","file_name":"test_model_uris.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"458548097","text":"from sys import argv\n\nscript, arg = argv\nn = int(arg)\n\nfor i in range( 1, n ):\n if n//i == i and n%i == 0:\n print(n//i)\n exit(0)\n \nprint(False)","sub_path":"ex37.py","file_name":"ex37.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"224081495","text":"# Opus/UrbanSim urban simulation software.\r\n# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington\r\n# See opus_core/LICENSE \r\n\r\nfrom opus_core.variables.variable import Variable\r\nfrom variable_functions import my_attribute_label\r\nfrom numpy import ma\r\nfrom numpy import float32\r\n\r\nclass lot_sf_unit(Variable):\r\n ''' (lot_sf) / residential_units.'''\r\n \r\n lot_sf = 'lot_sf'\r\n residential_units = 'residential_units'\r\n \r\n def dependencies(self):\r\n return [my_attribute_label(self.lot_sf), \\\r\n my_attribute_label(self.residential_units)]\r\n \r\n def compute(self, dataset_pool):\r\n parcels = self.get_dataset()\r\n residential_units = parcels.get_attribute(self.residential_units)\r\n return ma.filled(parcels.get_attribute(self.lot_sf) / \\\r\n ma.masked_where(residential_units==0, residential_units.astype(float32)), 0.0)\r\n\r\n def post_check(self, values, dataset_pool):\r\n self.do_check('x >= 0', values)\r\n \r\n\r\nfrom opus_core.tests import opus_unittest\r\nfrom urbansim.variable_test_toolbox import VariableTestToolbox\r\nfrom numpy import array\r\nfrom psrc.datasets.parcel_dataset import ParcelDataset\r\nfrom opus_core.storage_factory import StorageFactory\r\n\r\nclass Tests(opus_unittest.OpusTestCase):\r\n variable_name = 'psrc.parcel.lot_sf_unit'\r\n\r\n def test_my_inputs(self):\r\n storage = StorageFactory().get_storage('dict_storage')\r\n \r\n parcels_table_name = 'parcels'\r\n \r\n storage.write_table(\r\n table_name=parcels_table_name,\r\n table_data={\r\n 'parcel_id':array([1,2,3,4,5]),\r\n 'residential_units':array([2,0,1,4,7]),\r\n 'lot_sf':array([1000,0,2000,1000,7000]),\r\n },\r\n )\r\n\r\n parcels = ParcelDataset(in_storage=storage, in_table_name=parcels_table_name)\r\n\r\n values = VariableTestToolbox().compute_variable(self.variable_name,\r\n data_dictionary = {\r\n 'parcel':parcels\r\n }, \r\n dataset = 'parcel'\r\n )\r\n \r\n should_be = array([500, 0, 2000, 250, 1000])\r\n \r\n self.assert_(ma.allclose(values, should_be),\r\n 'Error in ' + self.variable_name)\r\n\r\n\r\nif __name__=='__main__': \r\n opus_unittest.main()","sub_path":"psrc/parcel/lot_sf_unit.py","file_name":"lot_sf_unit.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"319216692","text":"import os\nimport numpy as np\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\n\ndef load_mnist(x_style='gray', normalize_x=True, categorize_y=True):\n \"\"\"load_mnist\n\n x_style in ['no_channel', 'one_channel', 'three_channels', 'flat']\n 'no_channel': x_train.shape = (n_samples, 28, 28)\n 'one_channel': x_train.shape = (n_samples, 28, 28, 1)\n 'three_channels': x_train.shape = (n_samples, 28, 28, 3): the last axis is generated by simple duplication\n 'flat': x_train.shape = (n_samples, 28 * 28)\n normalize_x: if True, the range of x_train and x_test will be from 0 to 1\n categorize_y: if True, y_train and y_test will be one-hot vectors\n \"\"\"\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.astype(np.float32)\n x_test = x_test.astype(np.float32)\n if x_style == 'no_channel':\n pass\n elif x_style == 'flat':\n x_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])\n x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2])\n elif x_style == 'one_channel':\n x_train = x_train[:, :, :, np.newaxis]\n x_test = x_test[:, :, :, np.newaxis]\n elif x_style == 'three_channels':\n x_train = x_train[:, :, :, np.newaxis]\n x_test = x_test[:, :, :, np.newaxis]\n x_train = np.repeat(x_train, 3, axis=3)\n x_test = np.repeat(x_test, 3, axis=3)\n else:\n raise ValueError('x_style must be in [\\'no_channel\\', \\'one_channel\\', \\'three_channels\\', \\'flat\\']: {} assigned'.format(x_style))\n if normalize_x:\n x_train /= 255\n x_test /= 255\n if categorize_y:\n n_classes = 10\n y_train = np_utils.to_categorical(y_train, n_classes)\n y_test = np_utils.to_categorical(y_test, n_classes)\n return (x_train, y_train), (x_test, y_test)\n","sub_path":"keras_test/load_mnist.py","file_name":"load_mnist.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19072750","text":"import pygame\nfrom time import *\nfrom helper import *\nfrom introFunctions import *\n\n#start menu \nrunning = startMenu() \n\n#main game loop begins\nwhile running: \n#Draw #screen.fill(BG) #Background\n background(season(day))\n statBar(day, age, money, exp,jobs[jobIndex]) #stats\n \n #conditional buttons#\n colour = hovering(school, pygame.mouse.get_pos())\n if age <= 18: \n createButton(\"School\", colour, schoolX, schoolY, button1X, button1Y)\n elif not educated:\n createButton(\"\", colour, schoolX, schoolY, button1X, button1Y)\n screen.blit(schoolText,(schoolX + button1X // 8, schoolY + button1Y // 4))\n screen.blit(schoolText2,(schoolX + button1X // 8, schoolY + button1Y // 4 + 25))\n screen.blit(schoolText3,(schoolX + button1X // 8, schoolY + button1Y // 4 + 50))\n \n if exp >= jobExps[1]:\n colour = hovering(jobOffer,pygame.mouse.get_pos())\n createButton(\"Job Offer\", colour, jobOfferX, jobOfferY, button2X,button2Y)\n jobTitle = myFont.render(jobs[jobIndex+1],1,BLACK) #change title depending on exp\n screen.blit(jobTitle,(jobOfferX + button1X // 2, jobOfferY + button1Y // 4+25))\n \n #constant button #check hovered\n colour = hovering(work, pygame.mouse.get_pos())\n createButton(\"Work\", colour, workX, workY, button1X, button1Y)\n\n pygame.display.flip() #actually draw\n\n\n #collect player action \n for event in pygame.event.get(): \n if event.type == pygame.QUIT:\n running = False #quit if player wants\n\n if event.type == pygame.MOUSEBUTTONDOWN: #collect clicked coords\n buttonClick = pygame.mouse.get_pressed() #check what button\n mouse_pos = event.pos\n \n #check collisions on buttons\n #work button click\n if work.collidepoint(mouse_pos) and buttonClick[0] ==1: \n #print('work')\n buttonPressed(mouse_pos, workX, workY, button1X, button1Y)\n money += pay(jobWages,jobIndex, workHour, mouse_pos)\n exp += jobExpFactors[jobIndex]\n click = True\n\n #school button click\n if age <= 18 and school.collidepoint(mouse_pos)and buttonClick[0] ==1: \n #print('School')\n buttonPressed(mouse_pos, schoolX, schoolY, button1X, button1Y)\n exp += learnExp\n click = True\n elif school.collidepoint(mouse_pos)and buttonClick[0] ==1 and not educated:\n #print('educated')#post secondary\n createButton(\"\", GREEN, schoolX, schoolY, button1X,button1Y)\n educated, age, money, exp = education(age,money,exp,schoolCost, schoolExp)\n click = True\n\n #job offer to level up when enough exp\n if exp >= jobExps[1] and jobOffer.collidepoint(mouse_pos) and buttonClick[0] ==1: \n #print('applied')\n del jobExps[0] #dont need anymore\n jobIndex += 1 #update job\n createButton(\"\", GREEN, jobOfferX, jobOfferY,button2X, button2Y)\n click = True\n \n if click == True: #check if any button pressed to update stats\n time.sleep(0.1)\n day, age, money, exp = timePass(day, age, money, exp, intrest)\n click = False\n \n clock.tick(60)\n pygame.display.flip()#draw changes\n #print(day, age, money, exp) #temporary \n\n \n\n#end game screen?\npygame.quit()#end of program","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"63919914","text":"import os\nimport inspect\n\n# /afs/cern.ch/user/n/ntrevisa/work/latinos/unblinding/CMSSW_10_6_4/src/PlotsConfigurations/Configurations/ControlRegions/DYtt/Full2018_v9\n\nconfigurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file\nconfigurations = os.path.dirname(configurations) # Full2018_v9\nconfigurations = os.path.dirname(configurations) # DYtt\nconfigurations = os.path.dirname(configurations) # ControlRegions\nconfigurations = os.path.dirname(configurations) # Configurations\n\nfrom LatinoAnalysis.Tools.commonTools import getSampleFiles, getBaseW, addSampleWeight\n\ndef nanoGetSampleFiles(inputDir, sample):\n try:\n if _samples_noload:\n return []\n except NameError:\n pass\n\n return getSampleFiles(inputDir, sample, True, 'nanoLatino_')\n\n# samples\n\ntry:\n len(samples)\nexcept NameError:\n import collections\n samples = collections.OrderedDict()\n\n################################################\n################# SKIMS ########################\n################################################\n\n# MC: /eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/Summer20UL18_106x_nAODv9_Full2018v9/MCl1loose2018v9__MCCorr2018v9NoJERInHorn__l2tightOR2018v9/\n# DATA: /eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/Run2018_UL2018_nAODv9_Full2018v9/DATAl1loose2018v9__l2loose__l2tightOR2018v9/\n# FAKE: /eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/Run2018_UL2018_nAODv9_Full2018v9/DATAl1loose2018v9__l2loose__fakeW/\n\nmcProduction = 'Summer20UL18_106x_nAODv9_Full2018v9'\n\ndataReco = 'Run2018_UL2018_nAODv9_Full2018v9'\n\nmcSteps = 'MCl1loose2018v9__MCCorr2018v9NoJERInHorn__l2tightOR2018v9'\n\nfakeSteps = 'DATAl1loose2018v9__l2loose__fakeW'\n\ndataSteps = 'DATAl1loose2018v9__l2loose__l2tightOR2018v9'\n\n# embedReco = 'Embedding2016_102X_nAODv7_Full2016v7'\n# embedSteps = 'DATAl1loose2016v7__l2loose__l2tightOR2016v7__Embedding'\n\n##############################################\n###### Tree base directory for the site ######\n##############################################\n\nSITE=os.uname()[1]\nif 'iihe' in SITE:\n treeBaseDir = '/pnfs/iihe/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano'\nelif 'cern' in SITE:\n treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano'\n\ndef makeMCDirectory(var=''):\n if var:\n return os.path.join(treeBaseDir, mcProduction, mcSteps.format(var='__' + var))\n else:\n return os.path.join(treeBaseDir, mcProduction, mcSteps.format(var=''))\n\nmcDirectory = makeMCDirectory()\nfakeDirectory = os.path.join(treeBaseDir, dataReco, fakeSteps)\ndataDirectory = os.path.join(treeBaseDir, dataReco, dataSteps)\n# embedDirectory = os.path.join(treeBaseDir, embedReco, embedSteps)\n\n################################################\n############ DATA DECLARATION ##################\n################################################\n\nDataRun = [\n ['A','Run2018A-UL2018-v1'],\n ['B','Run2018B-UL2018-v1'],\n ['C','Run2018C-UL2018-v1'],\n ['D','Run2018D-UL2018-v1'],\n]\n\nDataSets = ['MuonEG','SingleMuon','EGamma','DoubleMuon']\n\nDataTrig = {\n 'MuonEG' : 'Trigger_ElMu' ,\n 'DoubleMuon' : '!Trigger_ElMu && Trigger_dblMu' ,\n 'SingleMuon' : '!Trigger_ElMu && !Trigger_dblMu && Trigger_sngMu' ,\n 'EGamma' : '!Trigger_ElMu && !Trigger_dblMu && !Trigger_sngMu && (Trigger_sngEl || Trigger_dblEl)' ,\n}\n\n#########################################\n############ MC COMMON ##################\n#########################################\n\n# SFweight does not include btag weights\nmcCommonWeightNoMatch = 'XSWeight*METFilter_MC*SFweight'\nmcCommonWeight = 'XSWeight*METFilter_MC*PromptGenLepMatch2l*SFweight'\n\n###########################################\n############# BACKGROUNDS ###############\n###########################################\n\n###### DY #######\nfiles = nanoGetSampleFiles(mcDirectory, 'DYJetsToTT_MuEle_M-50') + \\\n nanoGetSampleFiles(mcDirectory, 'DYJetsToLL_M-10to50-LO')\n\nsamples['DY'] = {\n 'name' : files,\n 'weight' : mcCommonWeight + '*( !(Sum$(PhotonGen_isPrompt==1 && PhotonGen_pt>15 && abs(PhotonGen_eta)<2.6) > 0))',\n 'FilesPerJob' : 2,\n}\n\n##### Top #######\nfiles = nanoGetSampleFiles(mcDirectory, 'TTTo2L2Nu') + \\\n nanoGetSampleFiles(mcDirectory, 'ST_s-channel') + \\\n nanoGetSampleFiles(mcDirectory, 'ST_t-channel_top') + \\\n nanoGetSampleFiles(mcDirectory, 'ST_t-channel_antitop') + \\\n nanoGetSampleFiles(mcDirectory, 'ST_tW_antitop') + \\\n nanoGetSampleFiles(mcDirectory, 'ST_tW_top')\n\nsamples['top'] = {\n 'name' : files,\n 'weight' : mcCommonWeight,\n 'FilesPerJob' : 1,\n}\naddSampleWeight(samples,'top','TTTo2L2Nu','Top_pTrw')\n\n###### WW ########\nsamples['WW'] = {\n 'name' : nanoGetSampleFiles(mcDirectory, 'WWTo2L2Nu'),\n 'weight' : mcCommonWeight + '*nllW*ewknloW',\n 'FilesPerJob' : 1\n}\n\n###########################################\n################## FAKE ###################\n###########################################\n\nsamples['Fake'] = {\n 'name': [],\n 'weight': 'METFilter_DATA*fakeW',\n 'weights': [],\n 'isData': ['all'],\n 'FilesPerJob': 50\n}\n\nfor _, sd in DataRun:\n for pd in DataSets:\n tag = pd + '_' + sd\n\n if ( ('DoubleMuon' in pd and 'Run2018B' in sd)\n or ('DoubleMuon' in pd and 'Run2018D' in sd)\n or ('DoubleMuon' in pd and 'Run2018D' in sd) \n or ('SingleMuon' in pd and 'Run2018A' in sd)\n or ('SingleMuon' in pd and 'Run2018B' in sd)\n or ('SingleMuon' in pd and 'Run2018C' in sd)):\n print(\"sd = {}\".format(sd))\n print(\"pd = {}\".format(pd))\n print(\"Old tag = {}\".format(tag))\n tag = tag.replace('v1','v2')\n print(\"New tag = {}\".format(tag))\n\n files = nanoGetSampleFiles(fakeDirectory, tag)\n\n samples['Fake']['name'].extend(files)\n samples['Fake']['weights'].extend([DataTrig[pd]] * len(files))\n\n# samples['Fake']['subsamples'] = {\n# 'em' : 'Lepton_pdgId[0]*Lepton_pdgId[1] == 11*13',\n# 'mm' : 'Lepton_pdgId[0]*Lepton_pdgId[1] == 13*13',\n# 'ee' : 'Lepton_pdgId[0]*Lepton_pdgId[1] == 11*11'\n# }\n\n###########################################\n################## DATA ###################\n###########################################\n\nsamples['DATA'] = {\n 'name': [],\n 'weight': 'LepWPCut*METFilter_DATA',\n 'weights': [],\n 'isData': ['all'],\n 'FilesPerJob': 50\n}\n\nfor _, sd in DataRun:\n for pd in DataSets:\n tag = pd + '_' + sd\n\n if ( ('DoubleMuon' in pd and 'Run2018B' in sd)\n or ('DoubleMuon' in pd and 'Run2018D' in sd)\n or ('DoubleMuon' in pd and 'Run2018D' in sd)\n or ('SingleMuon' in pd and 'Run2018A' in sd)\n or ('SingleMuon' in pd and 'Run2018B' in sd)\n or ('SingleMuon' in pd and 'Run2018C' in sd)):\n print(\"sd = {}\".format(sd))\n print(\"pd = {}\".format(pd))\n print(\"Old tag = {}\".format(tag))\n tag = tag.replace('v1','v2')\n print(\"New tag = {}\".format(tag))\n\n files = nanoGetSampleFiles(dataDirectory, tag)\n\n samples['DATA']['name'].extend(files)\n samples['DATA']['weights'].extend([DataTrig[pd]] * len(files))\n","sub_path":"Configurations/ControlRegions/DYtt/Full2018_v9/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326754190","text":"# cython: language_level=3\n# distutils: language = c++\n# -*- coding: utf-8 -*-\n# *****************************************************************************\n# Copyright (c) 2016-2020, Intel Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\nInterface of the Mathematical part of the DPNP\n\nNotes\n-----\nThis module is a face or public interface file for the library\nit contains:\n - Interface functions\n - documentation for the functions\n - The functions parameters check\n\n\"\"\"\n\n\nimport numpy\n\nfrom dpnp.backend import *\nfrom dpnp.dparray import dparray\nfrom dpnp.dpnp_utils import *\nimport dpnp\n\n\n__all__ = [\n \"abs\",\n \"absolute\",\n \"add\",\n \"ceil\",\n \"copysign\",\n \"divide\",\n \"fabs\",\n \"floor\",\n \"floor_divide\",\n \"fmax\",\n \"fmin\",\n \"fmod\",\n \"maximum\",\n \"minimum\",\n \"mod\",\n \"modf\",\n \"multiply\",\n \"nanprod\",\n \"nansum\",\n \"negative\",\n \"power\",\n \"prod\",\n \"remainder\",\n \"sign\",\n \"subtract\",\n \"sum\",\n \"true_divide\",\n \"trunc\"\n]\n\n\ndef abs(*args, **kwargs):\n \"\"\"\n Calculate the absolute value element-wise.\n\n .. seealso:: :obj:`numpy.add`\n\n \"\"\"\n\n return dpnp.absolute(*args, **kwargs)\n\n\ndef absolute(x1, **kwargs):\n \"\"\"\n Calculate the absolute value element-wise.\n\n Parameters\n ----------\n x1 : array_like\n Input array.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of each element in x.\n \"\"\"\n\n is_input_dparray = isinstance(x1, dparray)\n\n if not use_origin_backend(x1) and is_input_dparray and x1.ndim != 0 and not kwargs:\n result = dpnp_absolute(x1)\n\n return result\n\n return call_origin(numpy.absolute, x1, **kwargs)\n\n\ndef add(x1, x2, **kwargs):\n \"\"\"\n Add arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : array_like\n The arrays to be added.\n **kwargs\n For other keyword arguments.\n\n Returns\n -------\n out : dparray:\n The sum of `x1` and `x2`, element-wise.\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n :obj:`numpy.add`\n\n Notes\n -----\n Equivalent to `x1 + x2` in terms of array broadcasting.\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"add\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"add\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_add(x1, x2)\n\n return call_origin(numpy.add, x1, x2, **kwargs)\n\n\ndef ceil(x1, **kwargs):\n \"\"\"\n Compute the ceiling of the input, element-wise.\n\n .. seealso:: :obj:`numpy.ceil`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_ceil(x1)\n\n return call_origin(numpy.ceil, x1, **kwargs)\n\n\ndef copysign(x1, x2, **kwargs):\n \"\"\"\n Change the sign of x1 to that of x2, element-wise.\n\n Parameters\n ----------\n x1 : array_like\n Values to change the sign of.\n x2 : array_like\n The sign of x2 is copied to x1.\n kwargs : dict\n Remaining input parameters of the function.\n\n Returns\n -------\n out: ndarray or scalar\n The values of x1 with the sign of x2.\n \"\"\"\n if not use_origin_backend(x1) and not kwargs:\n if not isinstance(x1, dparray):\n pass\n elif not isinstance(x2, dparray):\n pass\n elif x1.size != x2.size:\n pass\n elif x1.shape != x2.shape:\n pass\n else:\n return dpnp_copysign(x1, x2)\n\n return call_origin(numpy.copysign, x1, x2, **kwargs)\n\n\ndef divide(x1, x2, **kwargs):\n \"\"\"\n Divide arguments element-wise.\n\n .. note::\n The 'out' parameter is currently not supported.\n\n Args:\n x1 (dpnp.dparray): The left argument.\n x2 (dpnp.dparray): The right argument.\n out (dpnp.dparray): Output array.\n\n Returns:\n dpnp.dparray: The division of x1 and x2, element-wise.\n This is a scalar if both x1 and x2 are scalars.\n\n .. seealso:: :obj:`numpy.divide`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"divide\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"divide\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_divide(x1, x2)\n\n return call_origin(numpy.divide, x1, x2, **kwargs)\n\n\ndef fabs(x1, **kwargs):\n \"\"\"\n Compute the absolute values element-wise.\n\n .. seealso:: :obj:`numpy.fabs`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray):\n return dpnp_fabs(x1)\n\n return call_origin(numpy.fabs, x1, **kwargs)\n\n\ndef floor(x1, **kwargs):\n \"\"\"\n Compute the floor of the input, element-wise.\n\n Some spreadsheet programs calculate the “floor-towards-zero”, in other words floor(-2.5) == -2.\n dpNP instead uses the definition of floor where floor(-2.5) == -3.\n\n .. seealso:: :obj:`numpy.floor`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_floor(x1)\n\n return call_origin(numpy.floor, x1, **kwargs)\n\n\ndef floor_divide(x1, x2, **kwargs):\n \"\"\"\n Compute the largest integer smaller or equal to the division of the inputs.\n\n .. seealso:: :obj:`numpy.floor_divide`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs:\n return dpnp_floor_divide(x1, x2)\n\n return call_origin(numpy.floor_divide, x1, x2, **kwargs)\n\n\ndef fmax(*args, **kwargs):\n \"\"\"\n Element-wise maximum of array elements.\n\n .. seealso:: :obj:`numpy.fmax`\n\n \"\"\"\n\n return dpnp.maximum(*args, **kwargs)\n\n\ndef fmin(*args, **kwargs):\n \"\"\"\n Element-wise minimum of array elements.\n\n .. seealso:: :obj:`numpy.fmin`\n\n \"\"\"\n\n return dpnp.minimum(*args, **kwargs)\n\n\ndef fmod(x1, x2, **kwargs):\n \"\"\"\n Calculate the element-wise remainder of division.\n\n .. seealso:: :obj:`numpy.fmod`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n # \"dtype is None\" is important here because we have no kernels with runtime dependent output type\n # kernels are (use \"python example4.py\" to investigate):\n # input1:float64 : input2:float64 : output:float64 : name:\n # input1:float64 : input2:float32 : output:float64 : name:\n # input1:float64 : input2:int64 : output:float64 : name:\n # input1:float64 : input2:int32 : output:float64 : name:\n # input1:float64 : input2:bool : output:float64 : name:\n # input1:float32 : input2:float64 : output:float64 : name:\n # input1:float32 : input2:float32 : output:float32 : name:\n # input1:float32 : input2:int64 : output:float64 : name:\n # input1:float32 : input2:int32 : output:float64 : name:\n # input1:float32 : input2:bool : output:float32 : name:\n # input1:int64 : input2:float64 : output:float64 : name:\n # input1:int64 : input2:float32 : output:float64 : name:\n # input1:int64 : input2:int64 : output:int64 : name:\n # input1:int64 : input2:int32 : output:int64 : name:\n # input1:int64 : input2:bool : output:int64 : name:\n # input1:int32 : input2:float64 : output:float64 : name:\n # input1:int32 : input2:float32 : output:float64 : name:\n # input1:int32 : input2:int64 : output:int64 : name:\n # input1:int32 : input2:int32 : output:int32 : name:\n # input1:int32 : input2:bool : output:int32 : name:\n # input1:bool : input2:float64 : output:float64 : name:\n # input1:bool : input2:float32 : output:float32 : name:\n # input1:bool : input2:int64 : output:int64 : name:\n # input1:bool : input2:int32 : output:int32 : name:\n # input1:bool : input2:bool : output:int8 : name:\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"fmod\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"fmod\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_fmod(x1, x2)\n\n return call_origin(numpy.fmod, x1, x2, **kwargs)\n\n\ndef maximum(x1, x2, **kwargs):\n \"\"\"\n Element-wise maximum of array elements.\n\n .. seealso:: :obj:`numpy.maximum`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"maximum\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"maximum\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_maximum(x1, x2)\n\n return call_origin(numpy.maximum, x1, x2, **kwargs)\n\n\ndef minimum(x1, x2, **kwargs):\n \"\"\"\n Element-wise minimum of array elements.\n\n .. seealso:: :obj:`numpy.minimum`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"minimum\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"minimum\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_minimum(x1, x2)\n\n return call_origin(numpy.minimum, x1, x2, **kwargs)\n\n\ndef mod(*args, **kwargs):\n \"\"\"\n Compute element-wise remainder of division.\n\n Alias for :obj:`dpnp.remainder`\n\n .. seealso:: :obj:`numpy.mod`\n\n \"\"\"\n\n return dpnp.remainder(*args, **kwargs)\n\n\ndef modf(x, **kwargs):\n \"\"\"\n Return the fractional and integral parts of an array, element-wise.\n\n The fractional and integral parts are negative if the given number is negative.\n\n Parameters\n ----------\n x : array_like\n Input array.\n kwargs : dict\n Remaining input parameters of the function.\n\n Returns\n -------\n y1 : ndarray or scalar\n Fractional part of x. This is a scalar if x is a scalar.\n y2 : ndarray or scalar\n Integral part of x. This is a scalar if x is a scalar.\n\n See Also\n --------\n :obj:`numpy.modf`\n\n \"\"\"\n if not use_origin_backend(x) and not kwargs:\n if not isinstance(x, dparray):\n pass\n else:\n return dpnp_modf(x)\n\n return call_origin(numpy.modf, x, **kwargs)\n\n\ndef multiply(x1, x2, **kwargs):\n \"\"\"\n Multiply arguments element-wise.\n\n .. note::\n The 'out' parameter is currently not supported.\n\n Args:\n x1 (dpnp.dparray): The left argument.\n x2 (dpnp.dparray): The right argument.\n out (dpnp.dparray): Output array.\n\n Returns:\n dpnp.dparray: The product of x1 and x2, element-wise.\n This is a scalar if both x1 and x2 are scalars.\n\n .. seealso:: :obj:`numpy.multiply`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"multiply\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"multiply\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_multiply(x1, x2)\n\n return call_origin(numpy.multiply, x1, x2, **kwargs)\n\n\ndef nanprod(x1, **kwargs):\n \"\"\"\n Calculate prod() function treating 'Not a Numbers' (NaN) as ones.\n\n .. seealso:: :obj:`numpy.nanprod`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_nanprod(x1)\n\n return call_origin(numpy.nanprod, x1, **kwargs)\n\n\ndef nansum(x1, **kwargs):\n \"\"\"\n Calculate sum() function treating 'Not a Numbers' (NaN) as zero.\n\n .. seealso:: :obj:`numpy.nansum`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_nansum(x1)\n\n return call_origin(numpy.nansum, x1, **kwargs)\n\n\ndef negative(x1, **kwargs):\n \"\"\"\n Negative element-wise.\n\n .. seealso:: :obj:`numpy.negative`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n return dpnp_negative(x1, x2)\n\n return call_origin(numpy.negative, x1, **kwargs)\n\n\ndef power(x1, x2, **kwargs):\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n .. note::\n The 'out' parameter is currently not supported.\n\n Args:\n x1 (dpnp.dparray): array.\n x2 (dpnp.dparray): array.\n out (dpnp.dparray): Output array.\n\n Returns:\n dpnp.dparray: The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n\n .. seealso:: :obj:`numpy.power`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"power\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"power\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_power(x1, x2)\n\n return call_origin(numpy.power, x1, x2, **kwargs)\n\n\ndef prod(x1, **kwargs):\n \"\"\"\n Calculate product of array elements over a given axis.\n\n .. seealso:: :obj:`numpy.prod`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_prod(x1)\n\n return call_origin(numpy.prod, x1, **kwargs)\n\n\ndef remainder(x1, x2, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.size != x2.size):\n checker_throw_value_error(\"remainder\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"remainder\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_remainder(x1, x2)\n\n return call_origin(numpy.remainder, x1, x2, **kwargs)\n\n\ndef sign(x1, **kwargs):\n \"\"\"\n Compute the absolute values element-wise.\n\n .. seealso:: :obj:`numpy.sign`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_sign(x1)\n\n return call_origin(numpy.sign, x1, **kwargs)\n\n\ndef subtract(x1, x2, **kwargs):\n \"\"\"\n Subtract arguments, element-wise.\n\n .. note::\n The 'out' parameter is currently not supported.\n\n Args:\n x1 (dpnp.dparray): array.\n x2 (dpnp.dparray): array.\n out (dpnp.dparray): Output array.\n\n Returns:\n dpnp.dparray: The difference of x1 and x2, element-wise.\n This is a scalar if both x1 and x2 are scalars.\n\n .. seealso:: :obj:`numpy.subtract`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n is_x2_dparray = isinstance(x2, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and is_x2_dparray and not kwargs):\n if (x1.dtype != numpy.bool) and (x2.dtype != numpy.bool):\n if (x1.size != x2.size):\n checker_throw_value_error(\"subtract\", \"size\", x1.size, x2.size)\n\n if (x1.shape != x2.shape):\n checker_throw_value_error(\"subtract\", \"shape\", x1.shape, x2.shape)\n\n return dpnp_subtract(x1, x2)\n\n return call_origin(numpy.subtract, x1, x2, **kwargs)\n\n\ndef sum(x1, **kwargs):\n \"\"\"\n Sum of array elements over a given axis.\n\n .. seealso:: :obj:`numpy.sum`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray):\n axis = kwargs.get('axis')\n\n result = dpnp_sum(x1, axis)\n\n # scalar returned\n if result.shape == (1,):\n return result.dtype.type(result[0])\n\n return result\n\n return call_origin(numpy.sum, x1, **kwargs)\n\n\ndef true_divide(*args, **kwargs):\n \"\"\"\n Provide a true division of the inputs, element-wise.\n\n .. seealso:: :obj:`numpy.true_divide`\n\n \"\"\"\n\n return dpnp.divide(*args, **kwargs)\n\n\ndef trunc(x1, **kwargs):\n \"\"\"\n Compute the truncated value of the input, element-wise.\n\n .. seealso:: :obj:`numpy.trunc`\n\n \"\"\"\n\n is_x1_dparray = isinstance(x1, dparray)\n\n if (not use_origin_backend(x1) and is_x1_dparray and not kwargs):\n return dpnp_trunc(x1)\n\n return call_origin(numpy.trunc, x1, **kwargs)\n","sub_path":"dpnp/dpnp_iface_mathematical.py","file_name":"dpnp_iface_mathematical.py","file_ext":"py","file_size_in_byte":19136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75057454","text":"\"\"\"Platform for MagicHome switches integration.\"\"\"\nfrom homeassistant.components.switch import ENTITY_ID_FORMAT, SwitchDevice\n\nfrom . import MAGICHOME_API, MagicHomeDevice\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up MagicHome Switch device.\"\"\"\n if discovery_info is None:\n return\n magichome = hass.data[MAGICHOME_API]\n dev_ids = discovery_info.get(\"dev_ids\")\n devices = []\n for dev_id in dev_ids:\n device = magichome.get_device_by_id(dev_id)\n if device is None:\n continue\n devices.append(MagicHomeSwitch(device))\n add_entities(devices)\n\n\nclass MagicHomeSwitch(MagicHomeDevice, SwitchDevice):\n \"\"\"MagicHome Switch Device.\"\"\"\n\n def __init__(self, magichome):\n \"\"\"Init MagicHome switch device.\"\"\"\n super().__init__(magichome)\n self.entity_id = ENTITY_ID_FORMAT.format(magichome.object_id())\n\n @property\n def is_on(self):\n \"\"\"Return true if switch is on.\"\"\"\n return self.magichome.state()\n\n def turn_on(self, **kwargs):\n \"\"\"Turn the switch on.\"\"\"\n self.magichome.turn_on()\n\n def turn_off(self, **kwargs):\n \"\"\"Turn the device off.\"\"\"\n self.magichome.turn_off()\n","sub_path":"homeassistant/components/magichome/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"560403422","text":"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Your App secret key\nSECRET_KEY = '\\2\\1thisismyscretkey\\1\\2\\e\\y\\y\\h'\n\n# Flask-WTF flag for CSRF\nCSRF_ENABLED = True\n\n#---------------------------------------------------\n# Image and file configuration\n#---------------------------------------------------\n# The file upload folder, when using models with files\nUPLOAD_FOLDER = basedir + '/uploads/'\n","sub_path":"flask/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"206109567","text":"def solution(n, m):\n answer = []\n answer.append(gcd(n,m))\n answer.append((n*m/gcd(n,m)))\n return answer\n\ndef gcd(a,b):\n md = a%b\n while md >0:\n a = b\n b = md\n md = a%b\n return b","sub_path":"programers/level1/gcdLcd.py","file_name":"gcdLcd.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"249214051","text":"'''\nComputer Vision Midterm Project\nProject group members: \n 1. Aniket Bote (N12824308)\n 2. Sindhu Harish (N19806874)\n'''\n\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom utils import Operator, apply_discrete_convolution\n\n\ndef perform_gradient_operation(args, image_name, image):\n '''\n Args:\n image : An image on which gradient operation will happen\n Returns:\n Magnitude : Magnitude of the gradient\n Theta : Gradient Angle\n '''\n # Compute horizontal gradients\n dfdx = apply_discrete_convolution(image, Operator.gx)\n\n #Copy Image to Output folder after horizontal gradient\n cv2.imwrite(os.path.join(args.output_folder, image_name + '_Gx_normalized.bmp'), dfdx)\n\n # Compute vertical gradients\n dfdy = apply_discrete_convolution(image, Operator.gy)\n\n #Copy Image to Output folder after vertical gradient\n cv2.imwrite(os.path.join(args.output_folder, image_name + '_Gy_normalized.bmp'), dfdy)\n\n # Compute magnitude of the gradient\n m = np.sqrt(np.square(dfdx) + np.square(dfdy))\n \n # Normalize gradient magnitude\n m = np.absolute(m) / 3\n\n #Copy Image to Output folder with gradient magnitude value\n cv2.imwrite(os.path.join(args.output_folder, image_name + '_gradient_magnitude_normalized.bmp'), m)\n\n # Compute gradient angle\n theta = np.degrees(np.arctan2(dfdy, dfdx))\n\n return m, theta\n","sub_path":"gradient_operation.py","file_name":"gradient_operation.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"157869370","text":"import logging\nfrom time import sleep\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.common import exceptions\n\n\nclass BaseWebPage(object):\n logger = logging.getLogger('test')\n\n def __init__(self, driver, url):\n self.driver = driver\n if driver.current_url is not url:\n self.driver.get(url)\n\n def find_element(self, locator):\n driver = self.driver\n element = WebDriverWait(driver, 10).until(expected_conditions.presence_of_element_located(locator))\n return element\n\n def clear(self, locator):\n self.find_element(locator)\\\n .clear()\n return self\n\n def type(self, locator, text):\n self.find_element(locator)\\\n .send_keys(text)\n return self\n\n def click(self, locator, max_retry=2):\n retry = 0\n while(retry < max_retry):\n try:\n element = WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(locator))\n element.click()\n return self\n except (exceptions.WebDriverException, exceptions.StaleElementReferenceException) as ex:\n if retry >= max_retry:\n self.logger.error(\"Locator not found\")\n self.logger.warning(\"Locator cannot be clicker (retry {})\".format(retry))\n retry += 1\n sleep(1)\n\n def is_element_visible(self, locator):\n try:\n WebDriverWait(self.driver, 10)\\\n .until(expected_conditions.visibility_of_element_located(locator))\n return True\n except TimeoutException:\n return False\n","sub_path":"framework/page_object/base_pages/baseWebPage.py","file_name":"baseWebPage.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173045532","text":"'''\n*******************************************************************************\n Copyright 2013 EMC Inc.\n\nFilename]: tc_uefi_ms_IoInfoInPost.py\nAuthor ]: Jane.Jin@emc.com\nPurpose ]: Check IO information of slot\nContains]: \n tc_uefi_ms_IoInfoInPost - class\n __init__\n test\nHistory ]:\n******************************************************************************\n VER NAME DATE COMMENT\n******************************************************************************\n R00 JaneJin 03/10/2014 Initial edition\n R01 JaneJin 03/21/2014 Second edition\n******************************************************************************\n'''\n\nfrom case.CBaseCase import *\n\nclass T3508_uefi_IoInfoInPost(CBaseCase):\n \"\"\"\n******************************************************************************\nPurpose ]: Check IO information of slot\nAuthor ]: Jane.Jin@emc.com\nMethod ]:\nReqID ]: \nSprint ]: ATOM 2.0.16\nTicket ]: ATOM-987\nPlatform]: All\nType ]: Auto\n******************************************************************************\n \"\"\"\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n\n \n def test(self):\n if self.enclosure.sp.go_to_post() != 0:\n self.result(FAIL, 'Fail to go to post')\n return\n \n obj_slic_glacier = None\n for each_slic in self.enclosure.sp.slic:\n if each_slic == None:\n self.log('ERROR', 'slic is not inserted')\n continue\n elif each_slic.get_slic_pci_type() == '':\n self.log('ERROR', 'There is no slic on the current slot')\n continue\n else:\n obj_slic_glacier = each_slic\n if not each_slic.check_slic_pci_i2c_type():\n self.result(FAIL, 'PCI and I2C type of %dth slic are different' %each_slic.int_slot)\n else:\n if not each_slic.check_slic_pci_type_in_config():\n self.result(FAIL, 'Error PCI type of %dth slic in configuration'%each_slic.int_slot)\n continue\n \n if obj_slic_glacier is None:\n self.result(NA, 'No SLIC found')\n return\n\n\n \n ","sub_path":"case/OUT_OF_DATE/T3508_uefi_IoInfoInPost.py","file_name":"T3508_uefi_IoInfoInPost.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"226171026","text":"from builtins import range\nfrom random import choice, shuffle, random, randint\nimport logging\nimport operator\nfrom functools import reduce\nfrom itertools import chain, combinations\n\n# Utils logger\nlogger = logging.getLogger(__name__)\n\n\ndef reservoir_sample(items, k):\n \"\"\" Select k items from items.\n\n Inputs:\n items - an iterable to choose k from\n k - number to choose\n\n Returns:\n The k randomly chosen samples\n\n Assumptions:\n len(items) >= k\n\n \"\"\"\n samples = list(items[:k])\n n = k\n for i in range(k, len(items)):\n n += 1\n index = randint(0, n)\n if index < k:\n samples[index] = items[i]\n shuffle(samples) # There are better ways to get a random order\n return samples\n\n\ndef ncr(n, r):\n r = min(r, n - r)\n numer = reduce(operator.mul, range(n, n - r, -1), 1)\n denom = reduce(operator.mul, range(1, r + 1), 1)\n return numer / denom\n\n\ndef tri_num(n):\n num = 0\n for i in range(n):\n num += i + 1\n return num\n\n\ndef iterate_recursively(it, allowed_iterables=(list, tuple)):\n if isinstance(it, allowed_iterables):\n for item in it:\n for value in iterate_recursively(item, allowed_iterables):\n yield value\n else:\n yield it\n\n\ndef count_dicts(din, dout=None):\n \"\"\"Count values in dictionary.\n\n For the set of keys in d1 and d2 make a dictionary. For each such dictionary add each value from d1[key], d2[key] as\n keys and set the vaule to the number of occurances of the value.\n\n \"\"\"\n if dout is None:\n dout = {}\n\n for key, value in din.iteritems():\n # Make the entry for the key if needed\n if key not in dout:\n dout[key] = {}\n # Add value as key if needed\n if value not in dout[key]:\n dout[key][value] = 1\n else:\n dout[key][value] += 1\n\n return dout\n\n\ndef powerset(it):\n # Copy to list\n it2 = list(it)\n return chain.from_iterable(combinations(it2, r) for r in range(len(it2) + 1))\n\n\nclass ConfigMixin(object):\n def __init__(self):\n super(ConfigMixin, self).__init__()\n self._allowed_config_keys = []\n\n def update_config(self, **kwargs):\n \"\"\" Set the sensors mode.\n\n Inputs:\n **kwargs - other keyword arguments for derived classes\n\n \"\"\"\n for key, value in kwargs.iteritems():\n if key not in self._allowed_config_keys:\n raise KeyError('{} is not an allowed to be changed'.format(key))\n setattr(self, key, value)\n","sub_path":"srp_md/src/srp_md/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"643339421","text":"from Asset import AssetBase\n\nclass RatingBase(AssetBase):\n '''\n classdocs\n '''\n def __init__(self, rating=None, createdon=None, user=None, updatedon=None):\n AssetBase.__init__(self)\n self._attrSpecs = getattr(self, '_attrSpecs', {})\n self._attrSpecs.update({'rating': {'type': 'int', 'name': 'rating', 'native': True}, 'createdOn': {'type': 'date', 'name': 'createdon', 'minOccurs': '0', 'native': True}, 'user': {'type': 'Link', 'name': 'user', 'minOccurs': '0', 'native': False}, 'updatedOn': {'type': 'date', 'name': 'updatedon', 'minOccurs': '0', 'native': True}})\n self.rating = rating\n self.createdon = createdon\n self.user = user\n self.updatedon = updatedon \n","sub_path":"core/restclient/generator/v3_0/agilitymodel/base/Rating.py","file_name":"Rating.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140231126","text":"import os\nimport torch.utils.data as data\nfrom PIL import Image\nfrom data.transforms import get_params, get_transform\nfrom data.image_folder import make_dataset\n\n\nclass BDD100KDataset(data.Dataset):\n def __init__(self, cfg):\n super(BDD100KDataset, self).__init__()\n self.cfg = cfg\n\n image_dir = os.path.join(cfg.dataroot, 'images', cfg.phase)\n self.image_paths = sorted(make_dataset(image_dir))\n\n label_dir = os.path.join(cfg.dataroot, 'labels', cfg.phase)\n self.label_paths = sorted(make_dataset(label_dir))\n\n def __getitem__(self, index):\n # load image\n image_path = self.image_paths[index]\n image = Image.open(image_path).convert('RGB')\n params = get_params(self.cfg, image.size)\n transform_image = get_transform(self.cfg, params, self.cfg.image_preprocess)\n image_tensor = transform_image(image)\n\n # load label\n label_path = self.label_paths[index]\n label = Image.open(label_path).convert('L')\n transform_label = get_transform(self.cfg, params, self.cfg.label_preprocess, Image.NEAREST, False)\n label_tensor = transform_label(label) * 255\n label_tensor[label_tensor == 255] = self.cfg.num_classes # ignore label is cfg.num_classes\n\n input_dict = {'image': image_tensor, 'label': label_tensor}\n return input_dict\n\n def __len__(self):\n return len(self.image_paths)\n","sub_path":"data/bdd100k_dataset.py","file_name":"bdd100k_dataset.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595129670","text":"\"\"\"\nExample demonstrating clipping planes on a mesh.\n\"\"\"\n\nimport pygfx as gfx\n\nfrom PySide6 import QtWidgets, QtCore\nfrom wgpu.gui.qt import WgpuCanvas\n\n\nclass WgpuCanvasWithInputEvents(WgpuCanvas):\n _drag_modes = {QtCore.Qt.RightButton: \"pan\", QtCore.Qt.LeftButton: \"rotate\"}\n _mode = None\n\n def wheelEvent(self, event): # noqa: N802\n controls.zoom(2 ** (event.angleDelta().y() * 0.0015))\n\n def mousePressEvent(self, event): # noqa: N802\n mode = self._drag_modes.get(event.button(), None)\n if self._mode or not mode:\n return\n self._mode = mode\n drag_start = (\n controls.pan_start if self._mode == \"pan\" else controls.rotate_start\n )\n drag_start(\n (event.position().x(), event.position().y()),\n self.get_logical_size(),\n camera,\n )\n app.setOverrideCursor(QtCore.Qt.ClosedHandCursor)\n\n def mouseReleaseEvent(self, event): # noqa: N802\n if self._mode and self._mode == self._drag_modes.get(event.button(), None):\n self._mode = None\n drag_stop = (\n controls.pan_stop if self._mode == \"pan\" else controls.rotate_stop\n )\n drag_stop()\n app.restoreOverrideCursor()\n\n def mouseMoveEvent(self, event): # noqa: N802\n if self._mode is not None:\n drag_move = (\n controls.pan_move if self._mode == \"pan\" else controls.rotate_move\n )\n drag_move((event.position().x(), event.position().y()))\n\n\n# Create a canvas and a renderer\n\napp = QtWidgets.QApplication([])\ncanvas = WgpuCanvasWithInputEvents(size=(800, 400))\nrenderer = gfx.renderers.WgpuRenderer(canvas)\n\n# Compose two of the same scenes\n\n\ndef create_scene(clipping_planes, clipping_mode):\n\n maxsize = 221\n scene = gfx.Scene()\n for n in range(20, maxsize, 50):\n material = gfx.MeshPhongMaterial(\n color=(n / maxsize, 1, 0, 1),\n clipping_planes=clipping_planes,\n clipping_mode=clipping_mode,\n )\n geometry = gfx.BoxGeometry(n, n, n)\n cube = gfx.Mesh(geometry, material)\n scene.add(cube)\n\n return scene\n\n\nclipping_planes = [(-1, 0, 0, 0), (0, 0, -1, 0)]\nscene1 = create_scene(clipping_planes, \"any\")\nscene2 = create_scene(clipping_planes, \"all\")\n\ncamera = gfx.PerspectiveCamera(70, 16 / 9)\ncamera.position.z = 250\n\ncontrols = gfx.OrbitControls(camera.position.clone())\n\n\ndef animate():\n\n controls.update_camera(camera)\n\n w, h = canvas.get_logical_size()\n renderer.render(scene1, camera, flush=False, viewport=(0, 0, w / 2, h))\n renderer.render(scene2, camera, flush=False, viewport=(w / 2, 0, w / 2, h))\n renderer.flush()\n\n canvas.request_draw()\n\n\nif __name__ == \"__main__\":\n canvas.request_draw(animate)\n app.exec()\n","sub_path":"examples/clipping_planes.py","file_name":"clipping_planes.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356741977","text":"import multiprocessing\nimport requests\nimport time\n\n\ndef func(number):\n url = \"https://docs.python.org/3/library/concurrent.futures.html\"\n for i in range(number):\n response = requests.get(url)\n with open(\"processes_example.com.txt\", \"w\") as output:\n output.write(response.text)\n\n\nif __name__ == \"__main__\":\n starttime = time.time()\n number = 50\n process1 = multiprocessing.Process(target=func, args=(number,))\n process2 = multiprocessing.Process(target=func, args=(number,))\n\n process1.start()\n process2.start()\n\n process1.join()\n process2.join()\n\n print(\"That took {} seconds\".format(time.time() - starttime))\n","sub_path":"Yurii_Khomych/l_13_gil/threads_vs_processes/ex_2_processes_io.py","file_name":"ex_2_processes_io.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211367377","text":"#!/usr/bin/python3\n#-*-coding: utf-8-*-\nfrom socket import *\nimport os\nfrom sys import argv\n\n\ndef h():\n\tprint(\"Este programa fue creado con fines didacticos, el mal uso dado es responsabilidad del usuario.\")\n\tprint(\"-i: Aquí debes de poner la ip de a donde se va a conectar..\")\n\tprint(\"-p: Puerto por el que se va a conectar.\")\n\texit()\n\nif __name__ == '__main__':\n\tif len(argv) < 5 and argv[1] != \"-h\" or len(argv) > 5:\n\t print(\"{} -h para ver las opciones.\".format(argv[0]))\n\t exit()\n\telse:\n\t\targcount = 0\n\t\tip = str()\n\t\tport = 0\n\t\tfor arg in argv:\n\t\t\tif arg[0] != \"-\":\n\t\t\t\targcount += 1\n\t\t\t\tcontinue\n\t\t\telif arg == \"-h\":\n\t\t\t\th()\n\t\t\telif arg == \"-p\":\n\t\t\t\ttry:\n\t\t\t\t\tport = int(argv[argcount + 1])\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"{} no es un numero valido.\".format(argv[argcount + 1]))\n\t\t\t\t\texit()\n\t\t\telif arg == \"-i\":\n\t\t\t\tip = str(argv[argcount + 1])\n\t\t\telse:\n\t\t\t\tprint(\"{} no se reconoce como una bandera valida.\".format(arg))\n\t\t\t\texit()\n\t\t\targcount += 1\n\t\ttry:\n\t\t\tsock = socket(AF_INET, SOCK_STREAM)\n\t\t\tsock.connect((ip, port))\n\t\texcept Exception as e:\n\t\t\tprint(\"No se ha podido conectar a {} por el puerto {}.\\n\".format(ip, port, e))\n\t\t\texit()\n\t\telse:\n\t\t\twhile True:\n\t\t\t\tcmd = sock.recv(1024).decode()\n\t\t\t\tif cmd[:2] == \"cd\":\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.chdir(cmd[2:])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tansw = \"El sistema no pudo reconocer la ruta especificada.\\n{}\".format(os.getcwd())\n\t\t\t\t\telse:\n\t\t\t\t\t\tansw = \"{}\".format(os.getcwd())\n\t\t\t\telif cmd[:2] == \"ls\" or cmd[:3] == \"dir\":\n\t\t\t\t\ttry:\n\t\t\t\t\t\tansw = \"{}\\n{}\".format(os.getcwd(), os.listdir())\n\t\t\t\t\texcept:\n\t\t\t\t\t\tansw = os.getcwd()\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\tansw = \"Se ha ejecutado el comando correctamente.\"\n\t\t\t\t\texcept:\n\t\t\t\t\t\tansw = \"No se ha podido ejecutar el comando.\"\t\t\n\t\t\t\tsock.send(answ.encode())\n\n\n","sub_path":"backdoors_de_prueba/backdoor4client.py","file_name":"backdoor4client.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555393277","text":"import unittest\r\nfrom selenium import webdriver\r\nfrom WelcomePage import Welcome\r\n\r\n\r\nclass TestGetToCreateAccount(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.driver=webdriver.Chrome()\r\n self.driver.get(\"http://localhost:8080/\")\r\n \r\n def test_getToSignUp(self):\r\n welcome=Welcome(self.driver)\r\n welcome.SignIn()\r\n self.driver.find_element_by_xpath('//a[@data-locale-item=\"createAccount\"]').click()\r\n assert \"EasyPay :: Registration\" in self.driver.title\r\n\r\n def TearDown(self):\r\n self.driver.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n\r\n\r\n","sub_path":"TestGetToCreateAccount.py","file_name":"TestGetToCreateAccount.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30319934","text":"\"\"\" \nDatalog class definition : Data logging class\n\nMethods:\n Datalog(filename) : constructor\n\n write(txt) : add a line to the datalogging buffer\n save() : save data to file\n \nCreated by : Jacco M. Hoekstra (TU Delft)\nDate : October 2013\n\nModifation : - added start method including command processing\n - added CSV-style saving\nBy : P. Danneels\nDate : April 2016\n\n\"\"\"\nimport os\nimport numpy as np\nfrom misc import tim2txt\nfrom time import strftime, gmtime\n\n#-----------------------------------------------------------------\n\n\nclass Datalog():\n def __init__(self, sim):\n self.sim = sim\n # Create a buffer and save filename\n self.fname = os.path.dirname(__file__) + \"/../../data/output/\" \\\n + strftime(\"%Y-%m-%d-%H-%M-%S-BlueSky.txt\", gmtime())\n self.buffer = [\"time;acid;gs;vs;track;lat;long\\n\"] # Log data\n self.aclist = np.zeros(1) # AC list to log\n self.dt = 1. # Default logging interval\n self.t0 = -9999 # Timer\n self.swlog = False # Logging started\n return\n\n def start(self, acbatch, dt):\n if len(self.sim.traf.id) == 0:\n return False, \"LOG: No traffic present, log not started.\"\n self.id2idx = np.vectorize(self.sim.traf.id2idx) # vectorize function\n if acbatch is None: # No batch defined, log all\n self.aclist = self.id2idx(self.sim.traf.id)\n elif acbatch == 'AREA':\n if self.sim.traf.swarea:\n self.aclist = self.id2idx(self.sim.traf.id)\n else:\n return False, \"LOG: AREA DISABLED, LOG NOT STARTED\"\n else:\n idx = self.id2idx(acbatch)\n if idx < 0: # not an acid or ac does not exist\n return False, \"LOG: ACID \" + acbatch + \" NOT FOUND\"\n else:\n self.aclist = idx\n self.dt = dt\n if self.aclist.any():\n self.swlog = True\n return True, \"LOG started.\"\n\n def update(self, sim):\n if not self.swlog: # Only update when logging started an traffic is selected\n return\n if abs(sim.simt - self.t0) < self.dt: # Only do something when time is there\n return\n self.t0 = sim.simt # Update time for scheduler\n\n t = tim2txt(sim.simt) # Nicely formated time\n\n if self.aclist.ndim < 1: # Write to buffer for one AC\n self.writebuffer(sim, t, self.aclist)\n else: # Write to buffer for multiple AC\n for i in self.aclist:\n self.writebuffer(sim, t, self.aclist[i])\n return\n\n def writebuffer(self, sim, t, idx):\n self.buffer.append(t + \";\" +\n str(sim.traf.id[idx]) + \";\" +\n str(sim.traf.gs[idx]) + \";\" +\n str(sim.traf.vs[idx]) + \";\" +\n str(sim.traf.trk[idx]) + \";\" +\n str(sim.traf.lat[idx]) + \";\" +\n str(sim.traf.lon[idx]) + \"\\n\")\n return\n\n def save(self): # Write buffer to file\n f = open(self.fname, \"w\")\n f.writelines(self.buffer)\n f.close()\n return\n","sub_path":"bluesky/tools/datalog.py","file_name":"datalog.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"454595347","text":"import csv\nimport sqlalchemy\nimport logging\nimport datetime\nimport pdb\nfrom sqlalchemy import inspect, create_engine\nfrom sqlalchemy import exc\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.engine import reflection\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy import MetaData\n# import database_config\n# import database_credentials\n\n\n# def validate_header1(header1):\n# print('need to validate')\n#\n# def validate_header2(header2):\n# #make sure it's there\n# print('need to validate')\n#\n# def validate_footer(footer):\n# print('need to validate')\n\ndef get_headers_footer(filename):\n with open(filename) as f:\n reader = csv.reader(f, delimiter='\\t')\n header_1 = next(reader)\n header_2 = next(reader)\n for line in f:\n pass\n footer = line.strip().split('\\t')\n hhf_dict = {'header_1': header_1, 'header_2': header_2, 'footer': footer}\n return hhf_dict\n\ndef parse_tsv_filename(filename, hhf_dict):\n # tsv filename is the fourth field in header 1. ignore subdirectory name\n tsv_name = hhf_dict['header_1'][3][9:]\n tsv_name_metadata = {}\n parts = tsv_name.split('_')\n tsv_name_metadata = {\n 'library':parts[0],\n 'table':parts[1],\n 'datetime':parts[2]+parts[3]\n }\n #the counter doesn't exist in the current filename\n return tsv_name_metadata\n\n'''\nalex's process id codes added for testing\n'''\ndef set_dw_process_metadata(row_dict):\n # add dummy em_create_dw_prcsng_cycle_id bc it's a PK\n row_dict['em_create_dw_prcsng_cycle_id'] = 9999\n row_dict['em_create_dw_job_exectn_id'] = 9999\n row_dict['em_create_dw_job_name'] = \"load FET\"\n row_dict['em_create_dw_job_version_no'] = \"0.0\"\n row_dict['em_create_user_id'] = \"thschone\"\n row_dict['em_create_tmstmp'] = datetime.datetime.now()\n return row_dict\n\n\n# determine if csv row is a footer\ndef row_is_footer(row):\n return row[0] == 'T'\n\n# parse values from csv row into dict with column names as keys\ndef parse_row(row, header_2):\n if row_is_footer(row):\n # validate footer here\n raise StopIteration()\n else:\n row_dict = {}\n # added code to use column headers as number of rows, ignore if more fields than that\n for i, field in enumerate(row):\n # don't continue farther than the number of header columns (temp fix)\n if i < len(header_2):\n row_dict[header_2[i]] = field\n else:\n logging.warning(\"\\nNot enough column names after \" + header_2[i-1] + ': ' + row[i-1] +\".... skipping extra value\\n\\n\")\n break\n return row_dict\n\n\n\n\n\ndef load_file_equivalent_table(filename, engine, bib_rec_stg1_tables):\n\n '''\n header pre processing\n '''\n hhf_dict = get_headers_footer(filename)\n\n tsv_name_metadata = parse_tsv_filename(filename, hhf_dict)\n\n\n\n '''\n load tsv into file-equivalent table\n '''\n\n #create session\n Session = sessionmaker(bind=engine)\n session = Session()\n\n '''\n write to FET\n '''\n # get name of the base table in sqlalchemy based on filename\n filename_only= filename.split(\"/\")[2]\n lib_table = filename_only.split(\"_\")[0:2]\n table_base_class = \"_\".join(lib_table)\n\n # read each line of the csv ignoring 2 headers and last line and write to the db\n with open(filename) as f:\n reader = csv.reader(f, delimiter='\\t')\n header_1 = next(reader) # row 0\n header_2 = next(reader) # row 1\n\n #read all lines after lines one and two\n try:\n while True:\n row = next(reader)\n row_dict = parse_row(row, header_2)\n set_dw_process_metadata(row_dict)\n try:\n # insert the row into SQLAlchemy table base class\n record = bib_rec_stg1_tables[table_base_class](**row_dict)\n session.add(record)\n session.commit()\n except exc.SQLAlchemyError as e:\n print(e)\n session.rollback()\n except StopIteration:\n pass\n","sub_path":"dwetl/loadstg1.py","file_name":"loadstg1.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192546330","text":"#!/usr/bin/env python\n\"\"\"DOC STRING\"\"\"\nfrom flask import render_template, current_app, request, flash, \\\n url_for, redirect, jsonify\nfrom flask.ext.login import current_user\nfrom ..db_models import WorkTime, Fprint, Stroker, DiDwells\nfrom ..log_auth.views import login_confirmed\nfrom ..log_auth.fprint_handling2 import RunCompare2\nfrom . import proj\nfrom .views import build_request_dict, gen_stroke_dynamics\nfrom code_list import code_li\nfrom forms import TimeEntry\n__author__ = 'timesheet'\n__project__ = 'donal'\n\n\n# ========================\n# SIMPLE STUFF\n# ========================\n@proj.route('/worktime', methods=['GET', 'POST'])\n@login_confirmed\ndef worktime():\n form = TimeEntry()\n # Row click activates delete routine\n try:\n return redirect(url_for('.del_row', c_id=request.args.get('c')))\n except:\n pass\n if request.method == 'POST':\n di = dict([(k, v) for k, v in request.form.items() if k != 'csrf_token'])\n # adjusts\n di['member_id'] = current_user.id\n if di['quantity'] == '': di['quantity'] = 0\n try:\n WorkTime.create(**di)\n except:\n flash('That didn\\'t save properly.')\n if not current_user.multi:\n your_time = WorkTime.query.filter_by(member_id=current_user.id) \\\n .order_by(WorkTime.id).all()\n else:\n your_time = WorkTime.query.order_by(WorkTime.id).all()\n \"\"\"\n return set_template('panelbuilder.html', [your_time, code_di], '.worktime',\n panel_args=dict(\n patex=current_app.config['PAHDS']['timer'],\n tadata=current_app.config['TADATA']['timer'],\n wid=8\n ))\n \"\"\"\n return render_template('proj/worktime.html', form=your_time,\n endpoint='.worktime',\n panel_args=dict(\n patex=current_app.config['PAHDS']['timer'],\n tadata=current_app.config['TADATA']['timer'],\n wid=8),\n kwargs={},\n code_li=code_li\n )\n\n\n@proj.route('/del_row/')\n@login_confirmed\ndef del_row(c_id):\n return render_template('./proj/yousure.html', c_id=c_id)\n\n\n@proj.route('/deleter/')\n@login_confirmed\ndef deleter(c_id):\n row = WorkTime.query.filter_by(id=c_id).first()\n row.delete()\n return redirect(url_for('.worktime'))\n\n\n@proj.route('/_captureFP4', methods=['GET', 'POST'])\ndef fprint4():\n \"\"\"Processes our js data (FP, Keystroker) into database.\n :return: truncated data for visual display.\n \"\"\"\n datapack = None\n cur_host = [('None', 'None')]\n cur_gues = [('None', 'None')]\n di = build_request_dict(request.get_json()['fp'].items())\n rc = RunCompare2(di)\n # now filtering on bigH so the idea of similarity is nonsense\n # everything will be a ping\n # previously filtering on member_id\n # got rid of for the beta test\n # find previous case with same bigH\n simFPs = Fprint.query.filter_by(bigH=di['bigH']).\\\n order_by(Fprint.id.desc()).all()\n # moved up here; there is no 'password' in our case\n # multiple users can use same machine\n if simFPs:\n sameFP = [ro for ro in simFPs if ro.dyno == di['dyno']]\n if sameFP:\n lastFP = sameFP[0]\n # rc.run_comparison(lastFP) NOT NEEDED\n msg = 'Nice to see you again. #{}'.format(di['bigH'])\n lastFP.ping()\n datapack = lastFP.last_log\n # get guests, host\n # todo rules should only allow you to select one host\n cur_g = lastFP.host_of.all()\n cur_h = lastFP.guest_of.first()\n if cur_g:\n cur_gues = [(ro.guest.dyno, ro.guest.bigH) for ro in cur_g]\n if cur_h:\n cur_host = [(ro.host.dyno, ro.host.bigH) for ro in cur_h]\n else:\n lastFP = Fprint.create(**rc.new_di)\n msg = 'HalfFresh/newemail Profile created. #{}'.format(di['bigH'])\n else:\n lastFP = Fprint.create(**rc.new_di)\n msg = 'Fresh Profile created. #{}'.format(di['bigH'])\n # ==============================\n # KEYSTROKE BUILDING\n # Only successful re- or new-logins reach here.\n # ==============================\n ks_packs = [(k, v) for k, v in request.get_json()['ksb'].items()\n if k.startswith('stroker') and v['kblob']]\n for (k, v) in ks_packs:\n ks_pack = v['kblob']\n dwells, digraphs = gen_stroke_dynamics(ks_pack)\n # Raw Keystrokes (we might drop this and just retain Features)\n for ro in ks_pack:\n Stroker.create(**dict(zip([\n 'ktype', 'kcode', 'ktime', 'klab', 'fp_id', 'fp_logins'],\n ro + [k, lastFP.id, lastFP.logins])\n ))\n # Feature Extractions\n for ro in digraphs + dwells:\n DiDwells.create(**dict(zip([\n 'didwells', 'klab', 'fp_id', 'fp_logins'],\n [ro, k, lastFP.id, lastFP.logins])\n ))\n msg = '... ' + msg\n return jsonify(msg=msg, datapack=datapack, cur_gues=cur_gues,\n cur_host=cur_host)\n","sub_path":"app/proj/views2.py","file_name":"views2.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217382382","text":"import sys\nimport os\n\nfrom util import database\nfrom models.Player import Player\nfrom models.Category import Category\n\nimport json\nfrom glom import glom\nfrom pprint import pprint\n\n\ndef import_json_file(name):\n with open('{}/json/{}.json'.format('/'.join(os.path.realpath(__file__).split('/')[:-2]), name)) as f:\n data = json.load(f)\n f.close()\n return data\n\ndef import_json():\n category_data = import_json_file('categories')\n players = import_json_file('players2019')\n data = {'category_data':category_data,\n 'player_data':players}\n return data\n\ndef load_data_into_models(data):\n categories = []\n for item in data['category_data']:\n c = Category(**item)\n categories.append(c)\n\n players = []\n for player in data['player_data']:\n p = Player(**player)\n players.append(p)\n\n populated_models = {'players': players,\n 'categories': categories}\n return populated_models\n\n\ndef check_tables_empty(session):\n players = session.query(Player).first()\n categories = session.query(Category).first()\n if (players == None) and (categories == None):\n return True\n else:\n return False\n\n\n\ndef load_data_into_tables(session, populated_models):\n session.add_all(populated_models['categories'])\n session.add_all(populated_models['players'])\n session.commit()\n return\n\n\ndef main():\n engine, Session = database.connect()\n s = Session()\n tables_empty = check_tables_empty(s)\n\n if tables_empty:\n data = import_json()\n populated_models = load_data_into_models(data)\n load_data_into_tables(s, populated_models)\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"util/load_json.py","file_name":"load_json.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609386863","text":"import networkx as nx\nfrom string import ascii_lowercase as lowercase\n\ndef edit_distance_one(word,words):\n \"\"\"\n for i in range(len(word)):\n left, c, right = word[0:i], word[i], word[i+1:]\n j = lookup[c] # lowercase.index(c)\n for cc in lowercase[j+1:]:\n yield left + cc + right\"\"\"\n adjs= set()\n for w in words:\n if dif_by_one(word,w):\n adjs.add(w)\n return adjs\ndef dif_by_one(word, word2):\n for i in range(len(word)):\n if word[i] in word2:\n word2=word2.replace(word[i],\"\",1)\n return len(word2) is 1\n\n\ndef generate_graph(words):\n G = nx.Graph(name=\"words\")\n lookup = dict((c,lowercase.index(c)) for c in lowercase)\n candgen = ((word, cand) for word in sorted(words)\n for cand in edit_distance_one(word,words))\n G.add_nodes_from(words)\n for word, cand in candgen:\n G.add_edge(word, cand)\n return G\n\ndef words_graph(isFour):\n \"\"\"Return the words example graph from the Stanford GraphBase\"\"\"\n words=set()\n filename = 'words.dat'\n if isFour:\n filename = 'words4.dat'\n for line in open(filename, 'r'):\n w=\"\"\n for c in line:\n if c in lowercase:\n w = w + c\n else:\n break\n if len(w.strip()) is not 0:\n words.add(w)\n return generate_graph(words)\n\nif __name__ == '__main__':\n from networkx import *\n start=input('Enter a starting word: ')\n end=input('Enter an ending word: ')\n isFour=False\n if len(start) is 4:\n isFour=True\n G=words_graph(isFour)\n print(\"Loaded words_dat.txt containing 5757 five-letter English words.\")\n print(\"Two words are connected if they differ in one letter.\")\n print(\"Graph has %d nodes with %d edges\"\n %(number_of_nodes(G),number_of_edges(G)))\n print(\"%d connected components\" % number_connected_components(G))\n print(\"Shortest path between %s and %s is\"%(start,end))\n try:\n sp=shortest_path(G, start, end)\n for n in sp:\n print(n)\n except nx.NetworkXNoPath:\n print(\"None\")\n\n\n\n","sub_path":"lab7/word_ladder.py","file_name":"word_ladder.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602761030","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 11 20:59:16 2021\r\n\r\n@author: Pastor\r\n\"\"\"\r\n\r\n## The data is taken using EST time\r\n\r\nimport requests # for \"get\" request to API\r\nimport json # parse json into a list\r\nimport pandas as pd # working with data frames\r\nimport datetime as dt # working with dates\r\nimport matplotlib.pyplot as plt # plot data\r\nimport qgrid # display dataframe in notebooks\r\nimport os\r\nimport time\r\nfrom threading import Thread\r\n\r\n# Global variables\r\n\r\nBASE_URL = 'https://api.binance.com'\r\nsymbols = []\r\npair = 'USDT'\r\ntimeframe = '1d' # timeframe use to get the data\r\nfile_out = pair # Folder that contains the data\r\nn_pair = len(pair)\r\n\r\n\r\n# This function allow to get all symbols name pairs with USDT we can change it to use different pairs like BTC\r\n\r\nresp = requests.get(BASE_URL + '/api/v1/ticker/allBookTickers')\r\ntickers_list = json.loads(resp.content)\r\nfor ticker in tickers_list:\r\n if str(ticker['symbol'])[-n_pair:] == pair:\r\n symbols.append(ticker['symbol'])\r\n \r\n\r\n# this function allow to get the data from binance on EST time\r\n\r\ndef get_binance_bars(symbol, interval, startTime, endTime):\r\n \r\n url = \"https://api.binance.com/api/v3/klines\"\r\n \r\n startTime = str(int(startTime.timestamp() * 1000))\r\n endTime = str(int(endTime.timestamp() * 1000))\r\n limit = '1000'\r\n \r\n req_params = {\"symbol\" : symbol, 'interval' : interval, 'startTime' : startTime, 'endTime' : endTime, 'limit' : limit}\r\n \r\n df = pd.DataFrame(json.loads(requests.get(url, params = req_params).text))\r\n \r\n if (len(df.index) == 0):\r\n return None\r\n \r\n df = df.iloc[:, 0:6]\r\n df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']\r\n \r\n df.open = df.open.astype(\"float\")\r\n df.high = df.high.astype(\"float\")\r\n df.low = df.low.astype(\"float\")\r\n df.close = df.close.astype(\"float\")\r\n df.volume = df.volume.astype(\"float\")\r\n \r\n df['adj_close'] = df['close']\r\n \r\n df.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in df.datetime]\r\n \r\n return df\r\n\r\n# Get the last year, month and day using in the main file to get the data only after that time\r\n# Ada is only use to get the last day available in the file\r\n\r\n\r\n# loop to get the data for all symbols and combine them into a single file\r\n\r\nfor i in symbols:\r\n df_list = []\r\n \r\n # path_crypto needs to be updated with your working directory and folder where you want store the data\r\n path_crypto = r'C:\\Users\\Pastor\\Dropbox\\Pastor\\data\\binance_data_{0}\\{1}.csv'.format(file_out, i)\r\n if os.path.isfile(path_crypto) == True:\r\n crypto = pd.read_csv(path_crypto)\r\n t = crypto.Date.tail(1)\r\n year = int(t.str[0:4])\r\n month = int(t.str[5:7])\r\n day = int(t.str[8:10]) \r\n \r\n else:\r\n year = 2000\r\n month = 1\r\n day = 1\r\n \r\n last_datetime = dt.datetime(year, month, day) # year, month, day\r\n while True:\r\n print(last_datetime, i)\r\n new_df = get_binance_bars(i, timeframe, last_datetime, dt.datetime.now())\r\n if new_df is None:\r\n break\r\n df_list.append(new_df)\r\n last_datetime = max(new_df.index) + dt.timedelta(1, 0)\r\n df = pd.concat(df_list)\r\n df.reset_index(level=0, inplace=True)\r\n df.columns = ['Date', 'datetime', 'Open', 'High', 'Low', 'Close', 'Volume', 'adj_close']\r\n \r\n if os.path.isfile(path_crypto) == True:\r\n df_main = pd.read_csv(path_crypto)\r\n df_update = pd.concat([df_main, df], sort = False)\r\n df = df_update.drop_duplicates(subset = [\"Date\"])\r\n print(\"-------------Update----------\")\r\n \r\n df.to_csv(path_crypto, index = False)\r\n ","sub_path":"Binance_USDT.py","file_name":"Binance_USDT.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"318319357","text":"import cv2\nimport os\nimport traceback\n\nfrom partypi.utils.inference import get_labels\n\nOPACITY = 0.4\nremote_API = False\nBLUE = (232, 167, 35)\nGREEN = (62, 184, 144)\nYELLOW = (0, 255, 255)\nPURPLE = (68, 54, 66)\nVERSION = \"0.1.6\"\nhat_path = 'images/hat.png'\n\n# Parameters for loading data and images\ndetection_model_path = './face.xml'\n# Get input model shapes for inference\n\nemotion_labels = get_labels()\nEMOTIONS = list(get_labels().values())\n\n# Hyperparameters for bounding box\nemotion_offsets = (20, 40)\n\n\ndef print_traceback():\n print(\"Exception:\")\n print('-' * 60)\n traceback.print_exc()\n print('-' * 60)\n pass\n\n\ndef preprocess_input(x, v2=True):\n x = x.astype('float32')\n x = x / 255.0\n if v2:\n x = x - 0.5\n x = x * 2.0\n return x\n\n\ndef draw_text(\n coordinates,\n image_array,\n text,\n color=(255, 255, 255),\n x_offset=0,\n y_offset=0,\n font_scale=2,\n thickness=1,\n):\n x, y = coordinates[:2]\n cv2.putText(\n image_array,\n text,\n (int(x + x_offset), int(y + y_offset)),\n cv2.FONT_HERSHEY_SIMPLEX,\n font_scale,\n color,\n thickness,\n cv2.LINE_AA,\n )\n\n\ndef new_image_path():\n \"\"\"\n Get path for saving a new image.\n \"\"\"\n img_prefix = 'img_'\n extension = '.png'\n nr = 0\n photos_path = os.path.abspath('../photos')\n if not os.path.exists(photos_path):\n os.mkdir(photos_path)\n for file in os.listdir(photos_path):\n if file.endswith(extension):\n file = file.replace(img_prefix, '')\n file = file.replace(extension, '')\n # print file\n file_nr = int(file)\n nr = max(nr, file_nr)\n img_nr = nr + 1\n image_path = os.path.join(photos_path,\n str(img_prefix) + str(img_nr) + str(extension))\n return image_path\n","sub_path":"partypi/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"484142520","text":"import sys\nfrom player_class import *\nfrom enemy_class import *\nfrom pygame import mixer\n\n# Initialising pygame\npygame.init()\n\n\n# Class for the pick Ups in the game\n# The class inherits from the pygame sprite class\n\n\nclass PickUp(pygame.sprite.Sprite):\n # the constructor of the class, passing the game class, the x and y position of the object and path to the representing sprite\n def __init__(self, game, positionX, positionY, pickUpSprite):\n # inheriting the methods from the parent class\n super().__init__()\n # Class variables\n # Game is for the main game class that holds the information for the player and all the lists that this class cares about\n self.Game = game\n # image loads the sprite that is passed in the constructor\n self.image = pygame.image.load(pickUpSprite)\n # rect creates a rectangle around the sprite to allow movement and manipulation of the sprite\n self.rect = self.image.get_rect()\n # rect center, centers the sprite to the passed coordinates\n self.rect.center = [positionX, positionY]\n # sound is the sound effect for when the player collects the object\n self.sound = mixer.Sound(\"Presets/SoundFX/PickUpSound.wav\")\n\n # update is function that is called on every sprite every second\n\n def update(self):\n # check if the player is colliding with the pick up object and if so, delete the object\n if pygame.sprite.collide_rect_ratio(.3)(self.Game.player, self):\n self.kill()\n\n\nclass Coin(pygame.sprite.Sprite):\n # the constructor of the class, passing the game class, the x and y position of the object and path to the representing sprite\n def __init__(self, game, positionX, positionY, coinSprite):\n # inheriting the methods from the parent class\n super().__init__()\n # Class variables\n # Game is for the main game class that holds the information for the player and all the lists that this class cares about\n self.Game = game\n # image loads the sprite that is passed in the constructor\n self.image = pygame.image.load(coinSprite)\n # rect creates a rectangle around the sprite to allow movement and manipulation of the sprite\n self.rect = self.image.get_rect()\n # rect center, centers the sprite to the passed coordinates\n self.rect.center = [positionX, positionY]\n # sound is the sound effect for when the player collects the object\n self.sound = mixer.Sound(\"Presets/SoundFX/CoinSound.wav\")\n\n # update is function that is called on every sprite every second\n\n def update(self):\n # check if the player is colliding with the pick up object and if so, delete the object\n if pygame.sprite.collide_rect_ratio(.3)(self.Game.player, self):\n self.kill()\n\n\n# Main game class\n\nclass Game:\n def __init__(self):\n # set the screen size\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n #set the title of the window\n self.title = pygame.display.set_caption(\"Dungeon Runaway\")\n #set the icon\n self.iconImage = pygame.image.load(\"Presets/PlayerSprites/Walk/WalkRight/tile_5.png\")\n self.icon = pygame.display.set_icon(self.iconImage)\n # set up a clock to control the frames showed per second\n self.clock = pygame.time.Clock()\n # running is a state variable that makes the game run until is not false\n self.running = True\n # game state variable that keeps the current state of the game, this is used to trigger Game Over screen and Main Menu screen\n self.state = \"Start\"\n # set the game grid's cell width and height to the preset in the settings class\n self.cellWidth = CELL_WIDTH\n self.cellHeight = CELL_HEIGHT\n # set the player position\n self.playerPosition = None\n # list that will hold the position of all the walls\n self.walls = []\n # list that will hold the position of all coins\n self.coins = []\n # list for the coins' sprites\n self.coinsSprites = []\n # list for all the enemies\n self.enemies = []\n # list that will hold all the positions of the enemies\n self.enemiesPosition = []\n # list for the pick ups positions\n self.pickUps = []\n # list for the pick ups' sprites\n self.pickUpSprites = []\n # execute the load level function, that read from the wall.txt file and loads the data\n self.loadLevel()\n # variable that will hold the initially spawned loot number\n self.spawnedLoot = len(self.coins)\n # create an instance of the player and passing the position\n self.player = Player(self, Vector2(self.playerPosition))\n # load the enemies\n self.loadEnemies()\n # create a group for all the sprites in the game. This is not used for all of them but instead only for the collectibles\n self.allSpritesGroup = pygame.sprite.Group()\n # group for the player sprites\n self.playerSpriteGroup = pygame.sprite.Group()\n # group for the enemy sprites\n self.enemySpriteGroup = pygame.sprite.Group()\n # separate group for the collectibles sprites\n self.coinsGroup = pygame.sprite.Group()\n self.pickUpsGroup = pygame.sprite.Group()\n # list of the background sound files, just for ease than having to browse through the code\n self.backgroundMusic = [\"Presets/Music/BackgroundMusic.flac\", \"Presets/Music/beatsMe.wav\", \"Presets/Music/BackgroundMusic1.wav\", \"Presets/Music/BackgroundMusic2.mp3\", \"Presets/Music/BackgroundMusic3.mp3\"]\n # event for when a song end\n self.SONG_END = pygame.USEREVENT + 1\n # subscribe to the event\n mixer.music.set_endevent(self.SONG_END)\n # load the game over sound\n self.gameOverSound = mixer.Sound(\"Presets/SoundFX/GameOver.wav\")\n # highscore variable\n self.highScore = 0\n # 2 prefabs of the collectibles, used mainly to play the sound after they are picked up by the player.\n self.CoinPrefab = Coin(self, -100, 0, \"Presets/CoinSprite.png\")\n self.PickUpPrefab = PickUp(self, -100, 0, \"Presets/SpeedPickUp.png\")\n # load the first song and play it\n mixer.music.load(self.backgroundMusic[1])\n mixer.music.play()\n # function that makes the game loop\n self.currentlyPlayingSong = None\n\n def run(self):\n # set the highscore to the number that is written in the hs.txt file\n self.highScore = self.readHighScore()\n # draw the collectibles sprites\n self.drawCoins()\n self.drawPickUps()\n #self.queueMusic()\n # Game loop\n # check for the running variable\n while self.running:\n # check for the state of the game, by default its start\n if self.state == \"Start\":\n # run all the start state functions\n # every state has 3 functions\n # Events, Update and Draw\n # Events is for the input that is given to pygame\n self.startEvents()\n # Update is for the logic that is running on the background\n self.startUpdate()\n # Draw is displaying everything that is meant to be shown\n self.startDraw()\n\n elif self.state == \"Play\":\n self.playEvents()\n self.playUpdate()\n self.playDraw()\n\n elif self.state == \"Level Cleared\":\n self.LCEvents()\n self.LCUpdate()\n self.LCDraw()\n\n elif self.state == \"Game Over\":\n self.GOEvents()\n self.GOUpdate()\n self.GODraw()\n else:\n # if its none of the states above, stop the game\n self.running = False\n # clock sets the framerate of the game\n self.clock.tick(FPS)\n # close the game and the window\n pygame.quit()\n sys.exit()\n\n ##################################### HELPER FUNCTIONS #################################################################\n\n # function that displays text on the screen\n def drawText(self, _text, screen, _position, size, colour, font_name, centered=False):\n # set the font to the passed font\n font = pygame.font.SysFont(font_name, size)\n # set the text to the passed text\n text = font.render(_text, False, colour)\n # set the size of the text\n textSize = text.get_size()\n\n # position the text if specified\n if centered:\n _position[0] = _position[0] - textSize[0] // 2\n _position[1] = _position[1] - textSize[1] // 2\n\n # Display the text on the screen\n screen.blit(text, _position)\n\n # function that reads from the walls.txt and loads the passed data\n\n def loadLevel(self):\n # set the background image\n self.background = pygame.image.load(\"Presets/background.png\")\n # scale the image based on the screen's width and height\n self.background = pygame.transform.scale(self.background, (MAZE_WIDTH, MAZE_HEIGHT))\n\n # reading walls file and writing the data into walls list\n with open(\"Presets/Files/walls.txt\", 'r') as file:\n for y, line in enumerate(file):\n for x, char in enumerate(line):\n # appends a Vector2 to the walls list with the position of the wall\n if char == \"1\":\n self.walls.append(Vector2(x, y))\n # appends a Vector2 to the coins list with the position of the coins\n elif char == \"C\":\n self.coins.append(Vector2(x, y))\n # sets the player's position to the coordinates\n elif char == \"P\":\n self.playerPosition = [x, y]\n # appends a Vector2 to the pick ups list with the position of the pickup\n elif char == \"U\":\n self.pickUps.append(Vector2(x, y))\n # appends the position of the enemies to the enemiesPosition list\n elif char in [\"2\", \"3\", \"4\", \"5\"]:\n self.enemiesPosition.append([x, y])\n # visually representing the entrance of the enemy's spawn else it does not makes sense\n elif char == \"B\":\n pygame.draw.rect(self.background, BLACK,\n (x * self.cellWidth, y * self.cellHeight, self.cellWidth, self.cellHeight))\n\n # Function that loads enemies based on what the loadLevel() has written in the enemiesPosition list\n\n def loadEnemies(self):\n for index, enemy in enumerate(self.enemiesPosition):\n self.enemies.append(Enemy(self, Vector2(enemy), index))\n\n # Function that adds coin sprite object to the coinsSprites list and then adds all of the objects to the allSpritesGroup to be displayed\n\n def drawCoins(self):\n for coin in self.coins:\n self.coinsSprites.append(\n Coin(self, int(coin.x * self.cellWidth) + self.cellWidth // 2 + TOP_BOTTOM_PADDING // 2,\n int(coin.y * self.cellHeight) + self.cellHeight // 2 + TOP_BOTTOM_PADDING // 2,\n \"Presets/CoinSprite.png\"))\n\n for coinSprite in self.coinsSprites:\n self.allSpritesGroup.add(coinSprite)\n\n # Function that adds pickUp sprite object to the pickUpsSprites list and then adds all of the objects to the allSpritesGroup to be displayed\n\n def drawPickUps(self):\n for pickUp in self.pickUps:\n self.pickUpSprites.append(\n PickUp(self, int(pickUp.x * self.cellWidth) + self.cellWidth // 2 + TOP_BOTTOM_PADDING // 2,\n int(pickUp.y * self.cellHeight) + self.cellHeight // 2 + TOP_BOTTOM_PADDING // 2,\n \"Presets/SpeedPickUp.png\"))\n\n for pickUpSprite in self.pickUpSprites:\n self.allSpritesGroup.add(pickUpSprite)\n\n # Reset function for when the game ends and the player wants to restart the game.\n\n def reset(self, resetScore=True, resetLives=True, resetCollectedLoot=True):\n # reset the amount of collected loot if the player dies\n if resetCollectedLoot:\n self.player.collectedLoot = 0\n # set the lives back to 3\n if resetLives:\n self.player.lives = 3\n # reset the score\n if resetScore:\n self.player.currentScore = 0\n # return the player to the starting position\n self.player.gridPosition = Vector2(self.player.startPosition)\n self.player.pixelPosition = self.player.getPixelPosition()\n # set the direction of the player to 0,0 so it does not move in any direction\n self.player.direction *= 0\n # empty the collectibles lists\n self.coins = []\n self.coinsSprites = []\n self.pickUps = []\n self.pickUpSprites = []\n # reset the allSpritesGroup\n self.allSpritesGroup = pygame.sprite.Group()\n # When reset, fill the lists once again from the file\n with open(\"Presets/Files/walls.txt\", 'r') as file:\n for y, line in enumerate(file):\n for x, char in enumerate(line):\n if char == \"C\":\n self.coins.append(Vector2(x, y))\n elif char == \"U\":\n self.pickUps.append(Vector2(x, y))\n\n # change the state to play\n self.state = \"Play\"\n # change the highscore if the player has managed to beat the last given\n self.highScore = self.readHighScore()\n # draw the collectibles\n self.drawCoins()\n self.drawPickUps()\n\n # write to a file the last currentScore\n def saveHighScore(self, highScore):\n file = open(\"Presets/Files/hs.txt\", \"w\")\n file.write(str(highScore))\n file.close()\n\n # Read from the file with the highscore\n\n def readHighScore(self):\n file = open(\"Presets/Files/hs.txt\", \"r\")\n return int(file.readline())\n\n def queueMusic(self):\n nextSong = random.choice(self.backgroundMusic)\n while nextSong == self.currentlyPlayingSong:\n nextSong = random.choice(self.backgroundMusic)\n self.currentlyPlayingSong = nextSong\n mixer.music.load(nextSong)\n mixer.music.play()\n\n ##################################### START FUNCTIONS ##################################################################\n # start events as explained above, contain the input that is passed to the game\n\n def startEvents(self):\n\n for event in pygame.event.get():\n if event.type == self.SONG_END:\n self.queueMusic()\n\n if event.type == pygame.QUIT:\n self.running = False\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n # if space was pressed, change the state to play\n self.state = \"Play\"\n\n # this function is empty because there are none animations or anything that requires updating while on the main menu\n\n def startUpdate(self):\n pass\n\n # Draw function\n\n def startDraw(self):\n # draw the whole window's screen black\n self.screen.fill(BLACK)\n # use the drawText function and display the text\n self.drawText(\"Press Space to Start\", self.screen, [WIDTH // 2, HEIGHT // 2 - 50], START_TEXT_SIZE,\n (170, 132, 58),\n START_TEXT_FONT, centered=True)\n self.drawText(\"1 Player Only\", self.screen, [WIDTH // 2, HEIGHT // 2 + 50], START_TEXT_SIZE, (32, 150, 79),\n START_TEXT_FONT, centered=True)\n self.drawText(\"HIGH SCORE\", self.screen, [3, 5], START_TEXT_SIZE, WHITE,\n START_TEXT_FONT)\n\n # update the display\n pygame.display.update()\n\n ##################################### PLAY FUNCTIONS #################################################################\n # Play events, function that listens to the input that the player gives.\n\n def playEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n # if the player inputs any of the arrows, the direction of the player changes and so does the animation\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.player.move(Vector2(-1, 0))\n animationBool(self.player, False, False, True, False)\n\n if event.key == pygame.K_RIGHT:\n self.player.move(Vector2(1, 0))\n animationBool(self.player, False, False, False, True)\n\n if event.key == pygame.K_UP:\n self.player.move(Vector2(0, -1))\n animationBool(self.player, True, False, False, False)\n\n if event.key == pygame.K_DOWN:\n self.player.move(Vector2(0, 1))\n animationBool(self.player, False, True, False, False)\n if event.type == self.SONG_END:\n print(f\"gay\")\n self.queueMusic()\n\n # Update runs again every second.\n\n def playUpdate(self):\n # run the player update function\n if not pygame.mixer.get_busy():\n mixer.music.unpause()\n self.player.Update()\n # run all the enemies update functions\n for enemy in self.enemies:\n enemy.Update()\n # if enemy collides with the player, the player loses life\n if enemy.gridPosition == self.player.gridPosition:\n self.playerLoseLife()\n\n if self.player.collectedLoot >= self.spawnedLoot:\n self.state = \"Level Cleared\"\n self.player.collectedLoot = 0\n\n # Function that decreases the life of the player.\n def playerLoseLife(self):\n # reduce the life\n mixer.music.pause()\n self.player.deathSound.play()\n\n self.player.lives -= 1\n # read again if the player has managed to beat the highscore and store it for a later check\n bestHighScore = self.readHighScore()\n # if the player's life reach 0 change the state to game over\n if self.player.lives == 0:\n self.state = \"Game Over\"\n self.gameOverSound.play()\n # if the current score of the player is better than the best highscore\n if self.player.currentScore >= bestHighScore:\n # update the highscore in case the player decides to reset the game\n self.saveHighScore(self.player.currentScore)\n\n else:\n # if however the player has more lives, reset the position\n self.player.gridPosition = Vector2(self.player.startPosition)\n self.player.pixelPosition = self.player.getPixelPosition()\n # change the direction to 0, 0\n self.player.direction *= 0\n # reset the position of the enemies\n for enemy in self.enemies:\n enemy.gridPosition = Vector2(enemy.startingPosition)\n enemy.pixelPosition = enemy.getPixelPosition()\n enemy.direction *= 0\n\n # Draw function\n\n def playDraw(self):\n # make the whole screen black\n self.screen.fill(BLACK)\n\n # draw all the enemies and add them to their respective spriteGroup\n for enemy in self.enemies:\n enemy.update()\n self.enemySpriteGroup.add(enemy)\n\n # add the player to the playerSpriteGroup\n self.playerSpriteGroup.add(self.player)\n # display the padding on the screen\n self.screen.blit(self.background, (TOP_BOTTOM_PADDING // 2, TOP_BOTTOM_PADDING // 2))\n # draw all the coins\n self.coinsGroup.draw(self.screen)\n # draw all the pick ups\n self.pickUpsGroup.draw(self.screen)\n # draw all the sprites in the group, not sure entirely why do I need to draw them separately and then all together but it works so I am not going to change it\n self.allSpritesGroup.draw(self.screen)\n # draw all the enemies\n self.enemySpriteGroup.draw(self.screen)\n # draw the player\n self.playerSpriteGroup.draw(self.screen)\n\n # draw the text to show the current and best highscore\n self.drawText(\"CURRENT SCORE: {}\".format(self.player.currentScore), self.screen, (5, 0), 16, WHITE,\n START_TEXT_FONT)\n self.drawText(\"HIGH SCORE: {}\".format(self.highScore), self.screen, (WIDTH / 1.4, 0), 16, WHITE,\n START_TEXT_FONT)\n self.player.draw()\n\n # update the screen\n pygame.display.update()\n\n ##################################### Game Over FUNCTIONS #################################################################\n # Events for the game over screen\n\n def GOEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n # if the player presses space, reset the game\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n self.reset()\n # if the playe presses the escape, quit the game\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n self.running = False\n\n def GOUpdate(self):\n pass\n\n # Game over draw\n\n def GODraw(self):\n # make the screen black\n self.screen.fill(BLACK)\n # two variables for the text to make it easier to track down in case of possible typos\n quitText = \"Press the escape button to QUIT\"\n retryText = \"Press SPACE bar to PLAY AGAIN\"\n # Display the text on the screen\n self.drawText(\"GAME OVER\", self.screen, [WIDTH // 2, 100], 25, RED, START_TEXT_FONT, centered=True)\n self.drawText(retryText, self.screen, [\n WIDTH // 2, HEIGHT // 2], 25, (190, 190, 190), START_TEXT_FONT, centered=True)\n self.drawText(quitText, self.screen, [\n WIDTH // 2, HEIGHT // 1.5], 25, (190, 190, 190), START_TEXT_FONT, centered=True)\n # update the screen\n pygame.display.update()\n\n ##################################### Level Cleared FUNCTIONS #################################################################\n\n def LCEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n self.reset(False, False, False)\n\n def LCUpdate(self):\n pass\n\n def LCDraw(self):\n self.screen.fill(BLACK)\n # two variables for the text to make it easier to track down in case of possible typos\n congratsText = \"Congratulations!\"\n additionalCongratsText = \"You cleared the dungeon without getting caught!\"\n evenMoreCongratsText = \"You are a true goblin!\"\n retryText = \"Press Space to continue \"\n # Display the text on the screen\n self.drawText(\"Level Cleared\", self.screen, [WIDTH // 2, 100], 25, RED, START_TEXT_FONT, centered=True)\n self.drawText(congratsText, self.screen, [\n WIDTH // 2, HEIGHT // 3], 20, (190, 190, 190), START_TEXT_FONT, centered=True)\n self.drawText(additionalCongratsText, self.screen, [\n WIDTH // 2, HEIGHT // 2.5], 20, (190, 190, 190), START_TEXT_FONT, centered=True)\n self.drawText(evenMoreCongratsText, self.screen, [\n WIDTH // 2, HEIGHT // 2], 20, (190, 190, 190), START_TEXT_FONT, centered=True)\n\n self.drawText(retryText, self.screen, [\n WIDTH // 2, HEIGHT // 1.5], 25, (190, 190, 190), START_TEXT_FONT, centered=True)\n # update the screen\n pygame.display.update()\n","sub_path":"DungeonRunaway/game_class.py","file_name":"game_class.py","file_ext":"py","file_size_in_byte":23953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"259969387","text":"import string\nfrom cStringIO import StringIO\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.DirectoryView import addDirectoryViews\n\nfrom Products.qPloneTabs.config import *\n\nconfiglets = ({'id':PROJECT_NAME,\n 'name':'Plone Tabs',\n 'action':'string:${portal_url}/prefs_tabs_form',\n 'condition':'',\n 'category':'Products',\n 'visible':1,\n 'appId':PROJECT_NAME,\n 'permission':VIEW_PERMISSION,\n 'imageUrl':'qplonetabs.gif' },)\n\ndef setupSkin(self, out, skinFolder):\n \"\"\" Setup product skin layer \"\"\"\n\n skinstool=getToolByName(self, 'portal_skins')\n for skin in skinstool.getSkinSelections():\n path = skinstool.getSkinPath(skin)\n path = map(string.strip, string.split(path,','))\n if not skinFolder in path:\n try:\n path.insert( path.index('custom')+1, skinFolder)\n except ValueError:\n path.append(skinFolder)\n path = string.join(path, ', ')\n skinstool.addSkinSelection(skin, path)\n out.write(' %s layer sucessfully installed into skin %s.\\n' % (skinFolder, skin))\n else:\n out.write(' %s layer was already installed into skin %s.\\n' % (skinFolder, skin))\n\ndef removeSkin(self, skins=[]):\n \"\"\" Setup product skin layer \"\"\"\n\n if skins:\n skinstool = getToolByName(self, 'portal_skins')\n for skinName in skinstool.getSkinSelections():\n path = skinstool.getSkinPath(skinName)\n path = [i.strip() for i in path.split(',')]\n for s in skins:\n if s in path:\n path.remove(s)\n s += '/'\n for layer in path:\n if layer.startswith(s):\n path.remove(layer)\n path = ','.join(path)\n skinstool.addSkinSelection(skinName, path)\n\ndef addConfiglet(self, out):\n \"\"\" Add tabs configlet to portal control panel \"\"\"\n\n configTool = getToolByName(self, 'portal_controlpanel', None)\n if configTool:\n for conf in configlets:\n configTool.registerConfiglet(**conf)\n out.write('Added configlet %s\\n' % conf['id'])\n\ndef removeConfiglet(self, out):\n \"\"\" Remove tabs configlet from portal control panel \"\"\"\n\n configTool = getToolByName(self, 'portal_controlpanel', None)\n if configTool:\n for conf in configlets:\n configTool.unregisterConfiglet(conf['id'])\n out.write('Removed configlet %s\\n' % conf['id'])\n\ndef install(self):\n \"\"\" Product installation \"\"\"\n\n out = StringIO()\n\n out.write('setupSkin... \\n')\n skinstool = getToolByName(self, 'portal_skins')\n addDirectoryViews(skinstool, SKINS_DIR, GLOBALS)\n setupSkin(self, out, PROJECT_NAME)\n\n mtool = getToolByName(self, 'portal_migration')\n plone_version = mtool.getFileSystemVersion()\n if plone_version == '2.0.5':\n setupSkin(self, out, PROJECT_NAME+'/2.0.5')\n out.write('Added %s/2.0.5 Layer to portal_skins\\n' % PROJECT_NAME)\n\n addConfiglet(self, out)\n\n return out.getvalue()\n\ndef uninstall(self):\n \"\"\" Product uninstallation \"\"\"\n\n out = StringIO()\n\n removeConfiglet(self, out)\n\n out.write('removeSkin... \\n')\n removeSkin(self, [PROJECT_NAME, PROJECT_NAME+'/2.0.5'])\n\n return out.getvalue()\n","sub_path":"qPloneTabs/tags/0.2/Extensions/Install.py","file_name":"Install.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326319743","text":"#\n##############################################\n# Faizan Syed (20571514)\n# CS 116 Winter 2015\n# Assignment 7 Problem 2 (minimum difference)\n##############################################\n#\n\nimport check\n\n# list_of_diff(diffs_so_far, numbers_left) a list of the differences of all adjacent\n# integers in numbers_left when diffs_so_far is called with empty list\n# list_of_diff: (listof Int) (listof Int) -> (listof Int)\n# requires: diffs_so_far = []\ndef list_of_diff(diffs_so_far, numbers_left):\n if len(numbers_left) == 1:\n return diffs_so_far\n diffs_so_far.append(numbers_left[1]-numbers_left[0])\n if len(numbers_left) != 1:\n return list_of_diff(diffs_so_far, numbers_left[1:])\n \n\n# mergesort(L) consumes a list of numbers L and produces them sorted in ascending order\n# mergesort: (listof Int) -> (listof Int)\ndef mergesort(L):\n if len(L) < 2:\n return\n mid = len(L)//2\n L1 = L[:mid]\n L2 = L[mid:]\n mergesort(L1)\n mergesort(L2)\n R = []\n merge(L1, L2, 0, 0, R)\n copy_values(L, R, 0)\n return R\n\n# merge(L1,L2,i,j,R) sorts L1 and L2 using i and j and mutates R to ordered list\n# merge: Int Int Int Int [] -> (listof Int)\n# Effects: Mutates R to be ordered list of L1 qnd L2 elements\ndef merge(L1,L2,i,j,R):\n if i>len(L1)-1:\n R.extend(L2[j:])\n elif j>len(L2)-1:\n R.extend(L1[i:])\n elif L1[i] < L2[j]:\n R.append(L1[i])\n merge(L1,L2,i+1,j,R)\n else:\n R.append(L2[j])\n merge(L1,L2,i,j+1,R)\n\n# copy_values(to, original, pos) copies all the values from original to to when pos is 0\n# copy_values: (listof Any) (listof Any) -> None\n# Effects: Mutates to by setting each element equal to original\ndef copy_values(to,original,pos):\n if pos < len(original):\n to[pos] = original[pos]\n copy_values(to, original,pos+1)\n\n# smallest_diff(numbers) produces the smallest difference between 2 adjacent integers\n# in the list numbers\n# smallest_diff: (listof Int) -> Int\n# requires: numbers to be in non-decreasing order\n\n# Examples:\n# smallest_diff([1,2,3,4,5,6]) =>\n# smallest_diff([1,2,445,5678,54332,53453243,43456455353]) => 1\n\ndef smallest_diff(numbers):\n list_diffs = list_of_diff([], numbers)\n return mergesort(list_diffs)[0]\n\ncheck.expect(\"T1\", smallest_diff([1,2,3,4,5,6]), 1)\ncheck.expect(\"T2\", smallest_diff([1,2,445,5678,54332,53453243,43456455353]), 1)\ncheck.expect(\"T3\", smallest_diff([1,22,445,5678,54332,54333]), 1)","sub_path":"Mergesort.py","file_name":"Mergesort.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470460312","text":"'''\nplot stable/unstable manifolds\n'''\nfrom utils import solve_ic, roots, to_cart, to_ang, get_dydt, H, get_grids\nfrom scipy.interpolate import interp1d\nimport os, pickle, lzma\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif', size=20)\nplt.rc('lines', lw=2.5)\nplt.rc('xtick', direction='in', top=True, bottom=True)\nplt.rc('ytick', direction='in', left=True, right=True)\n\nfrom scipy.optimize import root\ndef plot_manifolds(eta):\n plt_fn = '6manifolds%s' % ('%.2f' % eta).replace('.', '_')\n\n fig = plt.figure(figsize=(6, 6))\n I = np.radians(20)\n tide = 1e-3\n q, _ = roots(I, eta)\n max_step = 0.05\n # improve CS4 location numerically\n _cs4 = np.array(to_cart(q[3], 0)) + \\\n np.array([0, np.sin(tide / (eta * np.sin(I))), 0])\n dydt = lambda s: get_dydt(I, eta, tide)(0, s)\n cs4 = root(dydt, _cs4).x\n cs4_q, cs4_phi = to_ang(*cs4)\n\n def get_displaced(sign_q, sign_phi):\n # sign of sign_q is backwards from what is expected\n small = 0.001\n eigen = np.sqrt(eta * np.sin(I))\n return to_cart(cs4_q - small * sign_q,\n cs4_phi + small * eigen * sign_phi)\n\n # backwards from CS4^0\n init = get_displaced(1, 1)\n pkl_fn = plt_fn + '.pkl'\n if not os.path.exists(pkl_fn):\n print('Running %s' % pkl_fn)\n _, t1, s1 = solve_ic(I, eta, tide, init,\n -20000, rtol=1e-6, max_step=max_step)\n with lzma.open(pkl_fn, 'wb') as f:\n pickle.dump((t1, s1), f)\n else:\n with lzma.open(pkl_fn, 'rb') as f:\n print('Loading %s' % pkl_fn)\n t1, s1 = pickle.load(f)\n q1, _phi1 = to_ang(*s1)\n phi1 = np.unwrap(_phi1 + np.pi) - 2 * np.pi\n term1 = np.where(phi1 < 0)[0][0]\n plt.plot(phi1[ :term1], s1[2, :term1], 'b--',\n label=r'CS4$_{\\phi=0}^-$', linewidth=1.5, alpha=0.7)\n\n # forwards from CS4^0\n init = get_displaced(-1, 1)\n _, t2, s2 = solve_ic(I, eta, tide, init, 200, rtol=1e-6, max_step=max_step)\n q2, _phi2 = to_ang(*s2)\n phi2 = np.unwrap(_phi2 + np.pi) - 2 * np.pi\n phi2_grad = np.gradient(phi2) / np.gradient(t2)\n term2 = np.where(np.logical_and(\n abs(phi2_grad) < 5 * min(abs(phi2_grad)),\n phi2 < 1,\n ))[0][0]\n plt.plot(phi2[ :term2], s2[2, :term2], 'b',\n label=r'CS4$_{\\phi=0}^+$', linewidth=1.5, alpha=0.7)\n\n # backwards from CS4^1\n init = get_displaced(-1, -1)\n _, t3, s3 = solve_ic(I, eta, tide, init, -200, rtol=1e-6, max_step=max_step)\n q3, _phi3 = to_ang(*s3)\n phi3 = np.unwrap(_phi3 + np.pi)\n term3 = np.where(phi3 < 0)[0][0]\n plt.plot(phi3[ :term3], s3[2, :term3], 'k--',\n label=r'CS4$_{\\phi=360}^-$', linewidth=1.5, alpha=0.7)\n\n # forwards from CS4^1\n init = get_displaced(1, -1)\n _, t4, s4 = solve_ic(I, eta, tide, init, 200, rtol=1e-6, max_step=max_step)\n q4, _phi4 = to_ang(*s4)\n phi4 = np.unwrap(_phi4 + np.pi)\n term4 = np.where(phi4 < 0)[0][0]\n plt.plot(phi4[ :term4], s4[2, :term4], 'k',\n label=r'CS4$_{\\phi=360}^+$', linewidth=1.5, alpha=0.7)\n\n plt.xlim([0, 2 * np.pi])\n plt.xticks([0, np.pi, 2 * np.pi],\n ['0', r'$180$', r'$360$'])\n\n ylim = plt.ylim()\n a1 = 0.4 # inner alpha\n a2 = 0.2 # Z3 alpha\n # fill Zone I with yellow\n plt.fill_between(phi4[ :term4], s4[2, :term4], np.ones_like(s4[2, :term4]),\n facecolor='darkorange', alpha=a1)\n top_interp = interp1d(phi4[ :term4], s4[2, :term4])\n # fill Zone III with yellow for now\n plt.fill_between(phi3[ :term3], s3[2, :term3], -np.ones_like(s3[2, :term3]),\n color='darkorange', alpha=a2)\n bot_interp = interp1d(phi3[ :term3], s3[2, :term3])\n # fill flow into zone I with yellow\n phi_inner = phi1[ :term1]\n mu_inner_outboundary = s1[2, :term1]\n rightmost = np.argmax(phi_inner)\n plt.fill_between(phi_inner[ :rightmost], mu_inner_outboundary[ :rightmost],\n top_interp(phi_inner[ :rightmost]),\n facecolor='darkorange', alpha=a1)\n plt.fill_between(phi_inner[rightmost: ], mu_inner_outboundary[rightmost: ],\n bot_interp(phi_inner[rightmost: ]),\n facecolor='darkorange', alpha=a1)\n phi_farright = np.linspace(phi_inner[rightmost], 2 * np.pi, 20)\n top_farright = top_interp(phi_farright)\n bot_farright = bot_interp(phi_farright)\n plt.fill_between(phi_farright, top_farright, bot_farright,\n facecolor='darkorange', alpha=a1)\n # fill Zone II with red\n inner_interp_above = interp1d(phi_inner[ :rightmost],\n mu_inner_outboundary[ :rightmost])\n inner_interp_below = interp1d(phi_inner[rightmost: ],\n mu_inner_outboundary[rightmost: ])\n phi_zone2 = phi_inner[ :rightmost]\n plt.fill_between(phi_zone2, inner_interp_above(phi_zone2),\n inner_interp_below(phi_zone2),\n facecolor='tab:green', alpha=a1)\n # plot extension of probabilistic region\n # phi_shift = phi1 + 2 * np.pi\n # next_capture_line = np.where(np.logical_and(\n # phi_shift > 0,\n # phi_shift < 2 * np.pi))\n # plt.plot(phi_shift[next_capture_line], s1[2][next_capture_line],\n # 'b--', linewidth=1.5, alpha=0.7)\n # first, white it out so no double coloring\n # plt.fill_between(phi_shift[next_capture_line][1: -1],\n # s1[2][next_capture_line][1: -1],\n # bot_interp(phi_shift[next_capture_line][1: -1]),\n # facecolor='white')\n # plt.fill_between(phi_shift[next_capture_line][1: -1],\n # s1[2][next_capture_line][1: -1],\n # bot_interp(phi_shift[next_capture_line][1: -1]),\n # facecolor='tab:green', alpha=a2)\n # just plot the line evolved backwards in time in Zone III, should be plenty\n # convincing\n for idx in range(1, 100):\n phi_shift = phi1 + 2 * idx * np.pi\n next_capture_line = np.where(np.logical_and(\n phi_shift > 0,\n phi_shift < 2 * np.pi))\n plt.plot(phi_shift[next_capture_line], s1[2][next_capture_line],\n c='tab:green', linewidth=np.sqrt(2 / idx),\n alpha=a1)\n\n # plot separatrix\n # x_grid, phi_grid = get_grids()\n # H_grid = H(I, eta, x_grid, phi_grid)\n # H_sep = H(I, eta, np.cos(cs4_q), cs4_phi - np.pi)\n # plt.contour(phi_grid,\n # x_grid,\n # H_grid,\n # levels=[H_sep],\n # colors=['r'],\n # linewidths=3,\n # alpha=0.5,\n # linestyles='solid')\n # overplot CS4 and location of H = H_sep - \\Delta H_-\n plt.plot(0, np.cos(cs4_q), marker='o', mfc='purple', mec='k',\n markersize=10)\n plt.plot(2 * np.pi, np.cos(cs4_q), marker='o', mfc='purple',\n mec='k', markersize=10)\n plt.plot(phi1[term1], s1[2, term1], 'bo', markersize=10)\n plt.plot(phi3[term3], s3[2, term3], 'ko', markersize=10)\n\n # label three zones\n qs, phis = roots(I, eta)\n sepwidth = 2 * np.sqrt(eta * np.sin(I))\n plt.text(np.pi, np.cos(qs[1]) + 0.6 * sepwidth, 'II', backgroundcolor=(1, 1, 1, 0.9),\n ha='center', va='center')\n plt.text(1.0, np.cos(qs[1]) + 0.8 * sepwidth, 'I',\n backgroundcolor=(1, 1, 1, 0.9), ha='center', va='center')\n plt.text(1.0, np.cos(qs[1]) - 0.8 * sepwidth, 'III',\n backgroundcolor=(1, 1, 1, 0.9), ha='center', va='center')\n\n plt.xlabel(r'$\\phi$ (deg)')\n plt.ylabel(r'$\\cos \\theta$')\n\n plt.ylim(ylim)\n plt.legend(fontsize=14, loc='center', ncol=2)\n plt.tight_layout()\n plt.savefig(plt_fn, dpi=300)\n plt.clf()\n\n\nif __name__ == '__main__':\n # plot_manifolds(0.05)\n # plot_manifolds(0.1)\n plot_manifolds(0.2)\n","sub_path":"initial/0_eta/6manifolds.py","file_name":"6manifolds.py","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"313899690","text":"import json\nimport sqlalchemy as sa\n\n\ndef get_redshift_connection(creds_path):\n\n \"\"\"\n Create a sqlalchemy connection to the redshift backup db\n :param creds_path: a string path to a creds json file for creating a sa connection string\n :return: a sqlalchemy engine\n \"\"\"\n\n with open(creds_path) as f:\n rs_creds = json.load(f)\n\n engine_connect_string = 'redshift+psycopg2://{db_user}:{db_pw}@{db_host}:{port}/{db_name}'.format(\n db_user=rs_creds[\"username\"],\n db_pw=rs_creds[\"pw\"],\n db_host=rs_creds[\"host\"],\n port=rs_creds[\"port\"],\n db_name=rs_creds[\"db_name\"]\n )\n\n return sa.create_engine(engine_connect_string)","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"282153517","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport shutil\nimport unittest\nimport tempfile\n\nfrom . import temp_repos\n\nfrom ..common import temp_dir\nfrom ..snapshot_repo import snapshot_repo\n\n\nclass SnapshotRepoTestCase(unittest.TestCase):\n\n def test_snapshot(self):\n with temp_repos.temp_repos_steps(repo_change_steps=[{\n 'dog': temp_repos.SAMPLE_STEPS[0]['dog'],\n }]) as repos_root, temp_dir() as td:\n with open(td / 'fake_gpg_key', 'w'):\n pass\n\n whitelist_dir = td / 'gpg_whitelist'\n os.mkdir(whitelist_dir)\n shutil.copy(td / 'fake_gpg_key', whitelist_dir)\n\n snapshot_repo([\n '--repo-name', 'dog',\n '--repo-url', (repos_root / '0/dog').file_url(),\n '--gpg-key-whitelist-dir', whitelist_dir.decode(),\n '--gpg-url', (td / 'fake_gpg_key').file_url(),\n '--snapshot-dir', (td / 'snap').decode(),\n '--storage', json.dumps({\n 'key': 'test',\n 'kind': 'filesystem',\n 'base_dir': (td / 'storage').decode(),\n }),\n '--db', json.dumps({\n 'kind': 'sqlite',\n 'db_path': (td / 'db.sqlite3').decode(),\n }),\n ])\n # This test simply checks the overall integration, so we don't\n # bother looking inside the DB or Storage, or inspecting the\n # details of the snapshot -- those should all be covered by\n # lower-level tests.\n with open(td / 'snap/rpm.json') as rpm_path:\n self.assertEqual({\n 'dog-pkgs/rpm-test-carrot-2-rc0.x86_64.rpm',\n 'dog-pkgs/rpm-test-mice-0.1-a.x86_64.rpm',\n 'dog-pkgs/rpm-test-milk-1.41-42.x86_64.rpm',\n }, set(json.load(rpm_path).keys()))\n","sub_path":"fs_image/rpm/tests/test_snapshot_repo.py","file_name":"test_snapshot_repo.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"635276577","text":"from infrastructure.switchlang import switch\nimport program_hosts as hosts\nimport infrastructure.state as state\nimport services.db_services as svc\nfrom dateutil import parser\n\ndef run():\n print(' ****************** Welcome guest **************** ')\n print()\n\n show_commands()\n\n while True:\n action = hosts.get_action()\n\n with switch(action) as s:\n s.case('c', hosts.create_account)\n s.case('l', hosts.log_into_account)\n\n s.case('a', add_a_snake)\n s.case('y', view_your_snakes)\n s.case('b', book_a_cage)\n s.case('v', view_bookings)\n s.case('m', lambda: 'change_mode')\n s.case('o', hosts.logout)\n\n s.case('?', show_commands)\n s.case('', lambda: None)\n s.case(['x', 'bye', 'exit', 'exit()'], hosts.exit_app)\n\n s.default(hosts.unknown_command)\n\n state.reload_account()\n\n if action:\n print()\n\n if s.result == 'change_mode':\n return\n\n\ndef show_commands():\n print('What action would you like to take:')\n print('[C]reate an account')\n print('[L]ogin to your account')\n print('[B]ook a cage')\n print('[A]dd a snake')\n print('View [y]our snakes')\n print('[V]iew your bookings')\n print('L[O]gout')\n print('[M]ain menu')\n print('e[X]it app')\n print('[?] Help (this info)')\n print()\n\n\ndef add_a_snake():\n print(' ****************** Add a snake **************** ')\n \n\n if not state.active_account:\n print(\"Login needed to register snake\")\n return\n \n name = input(\"Enter the name of snake: \")\n species = input(\"Enter the species: \")\n length = float(input(\"Enter length of snake: \"))\n is_venom = input(\"Is the snake venomous: \").lower().startswith('y')\n\n s = svc.create_snake(state.active_account,name,species,length,is_venom)\n\n state.reload_account() # Reload the active account object with the modified data\n\n print(\"Snake is registered is \",{s.id})\n\n\n\ndef view_your_snakes():\n print(' ****************** Your snakes **************** ')\n\n if not state.active_account:\n print(\"Login needed to view snakes\")\n return\n\n snakes = svc.get_snake_list(state.active_account)\n\n\n for i,s in enumerate(snakes):\n print(i+1,\"Snake name is \",s.name)\n \n\n\n\ndef book_a_cage():\n print(' ****************** Book a cage **************** ')\n \n\n if not state.active_account:\n print(\"Login needed to book a cage\")\n return\n \n snakes = svc.get_snake_list(state.active_account)\n\n if snakes == []:\n print(\"Please add snake to proceed\")\n return\n \n else:\n for i,s in enumerate(snakes):\n print(i+1,\"Snake name is \",s.name)\n \n snake_number = int(input(\"Selected your snake: \"))\n selected_snake = snakes[snake_number-1]\n\n start_date = parser.parse(\n input(\"Enter check-in date [yyyy-mm-dd]: \")\n )\n\n end_date = parser.parse(\n input(\"Enter check-out date [yyyy-mm-dd]: \")\n )\n \n if start_date>end_date:\n print(\"Checkout date is before checkin date\")\n return\n\n cages = svc.get_available_cages(selected_snake,start_date,end_date)\n\n \n if cages == []:\n print(\"No available cages for those dates\")\n return\n\n for i,c in enumerate(cages):\n print(i+1,c.name,\"is available\",\"is carpeted: \",c.is_carpeted,\"has toys: \",c.has_toys)\n\n cage_number = int(input(\"Which cage do you want? \"))\n\n cage = cages[cage_number-1]\n svc.book_cage_for_snake(state.active_account,selected_snake,cage,start_date,end_date)\n\n print(\"Booking Done for\",selected_snake.name,\"in cage\",cage.name)\n \n\n\n\ndef view_bookings():\n print(' ****************** Your bookings **************** ')\n\n if not state.active_account:\n print(\"Login needed to view bookings\")\n return\n \n snakes = svc.get_snake_list(state.active_account)\n\n snakes_in_cages = svc.get_bookings(snakes)\n\n for s,cages in snakes_in_cages:\n for c in cages:\n for b in c.bookings:\n if b.guest_snake_id==s.id:\n print(\"Snake is {} in cage {} from {} to {}\".format(s.name,c.name,b.check_in_date.strftime('%Y-%m-%d'),b.check_out_date.strftime('%Y-%m-%d')))\n","sub_path":"src/program_guests.py","file_name":"program_guests.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"288678612","text":"from typing import Dict\nfrom dataclasses import dataclass, field\nfrom .util import get_url\n\n\n@dataclass(frozen=True)\nclass Emote:\n id: int\n code: str\n set: int = field(default=0, repr=False)\n\n\nGLOBAL_EMOTE_API = 'https://twitchemotes.com/api_cache/v3/global.json'\nemotes: Dict[str, Emote] = {}\n\n\nasync def update_global_emotes():\n data = await get_url(GLOBAL_EMOTE_API)\n emotes.clear()\n\n for k, v in data.items():\n emotes[k] = Emote(int(v['id']), v['code'], v['emoticon_set'])\n","sub_path":"twitchbot/emote.py","file_name":"emote.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"648863036","text":"import asyncio\nimport logging\n\nfrom sirbot import SirBot\nfrom slack.events import Event\n\nfrom pybot.endpoints.slack.utils.event_utils import (\n build_messages,\n get_backend_auth_headers,\n link_backend_user,\n send_community_notification,\n send_user_greetings,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_endpoints(plugin):\n plugin.on_event(\"team_join\", team_join, wait=False)\n\n\nasync def team_join(event: Event, app: SirBot) -> None:\n \"\"\"\n Handler for when the Slack workspace has a new member join.\n\n After 30 seconds sends the new user a greeting, some resource links, and\n notifies the community channel of the new member.\n \"\"\"\n slack_api = app.plugins[\"slack\"].api\n user_id = event[\"user\"][\"id\"]\n\n *user_messages, community_message, outreach_team_message = build_messages(user_id)\n futures = [\n send_user_greetings(user_messages, slack_api),\n send_community_notification(community_message, slack_api),\n send_community_notification(outreach_team_message, slack_api),\n ]\n\n logger.info(f\"New team join event: {event}\")\n await asyncio.sleep(30)\n await asyncio.wait(futures)\n\n headers = await get_backend_auth_headers(app.http_session)\n if headers:\n await link_backend_user(user_id, headers, slack_api, app.http_session)\n","sub_path":"pybot/endpoints/slack/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"606569251","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nrelative_path_to_processor = '..app.processors'\n\nprocessor_conf_list = [\n {\n \"name\": \"common_offline_preprocess\",\n \"module\": \"common_offline_preprocess\",\n \"comment\": \"通用的离线预处理流程,包括保存field_config,清理老的model field_config、data数据,拷贝\\3\\4数据等\",\n \"args\": {}\n },\n {\n \"name\": \"train_data_preprocess\",\n \"module\": \"train_data_preprocess\",\n \"comment\": \"离线模块数据预处理\",\n \"args\": {\n }\n },\n {\n \"name\": \"train_recognize_similarity_estimator\",\n \"module\": \"train_recognize_similarity_estimator\",\n \"comment\": \"基于相似度表格识别模型离线训练\",\n \"args\": {}\n },\n {\n \"name\": \"train_extract_search_estimator\",\n \"module\": \"train_extract_search_estimator\",\n \"comment\": \"search抽取离线训练\",\n \"args\": {}\n },\n {\n \"name\": \"train_extract_svm_estimator\",\n \"module\": \"train_extract_svm_estimator\",\n \"comment\": \"svm抽取��线训练\",\n \"args\": {}\n },\n {\n \"name\": \"predict_data_preprocess\",\n \"module\": \"predict_data_preprocess\",\n \"comment\": \"在线预测模块数据预处理\",\n \"args\": {\n }\n },\n {\n \"name\": \"predict_recognize_similarity_estimator\",\n \"module\": \"predict_recognize_similarity_estimator\",\n \"comment\": \"基于相似度的表格识别在线预测\",\n \"args\": {\n \"estimator\": {\"min_confidence\": 0.6}\n }\n },\n {\n \"name\": \"predict_extract_search_estimator\",\n \"module\": \"predict_extract_search_estimator\",\n \"comment\": \"基于search模型抽取的在线预测\",\n \"args\": {\n \"estimator\": {\"min_confidence\": 0.6}\n }\n },\n {\n \"name\": \"predict_extract_svm_estimator\",\n \"module\": \"predict_extract_svm_estimator\",\n \"comment\": \"基于svm模型抽取的在线预测\",\n \"args\": {\n \"estimator\": {\"min_confidence\": 0.15}\n }\n },\n {\n \"name\": \"save_models\",\n \"module\": \"save_models\",\n \"comment\": \"通用的模型存储\",\n \"args\": {}\n },\n {\n \"name\": \"model_result_merge\",\n \"module\": \"model_result_merge\",\n \"comment\": \"合并各类estimator的抽取结果\",\n \"args\": {}\n },\n {\n \"name\": \"model_extract\",\n \"module\": \"model_extract\",\n \"comment\": \"表格模型抽取\",\n \"args\": {}\n },\n {\n \"name\": \"script\",\n \"module\": \"script\",\n \"comment\": \"表格脚本抽取\",\n \"args\": {\n \"script_package\": \"text_classification_code.app.scripts\"\n }\n },\n {\n \"name\": \"model_script_result_merge\",\n \"module\": \"model_script_result_merge\",\n \"comment\": \"合并模型和表格抽取结果\",\n \"args\": {}\n },\n {\n \"name\": \"merge_field_config\",\n \"module\": \"merge_field_config\",\n \"comment\": \"获取字段训练参数\",\n \"args\": {}\n }\n]\n\nworkflow_conf_dict = {\n \"offline\": # 先表格判别,再表格抽取离线训练workflow\n [\n \"merge_field_config\",\n \"common_offline_preprocess\",\n \"train_data_preprocess\",\n \"train_recognize_similarity_estimator\",\n \"train_extract_search_estimator\",\n \"train_extract_svm_estimator\",\n \"save_models\"\n ],\n \"model_online\": # 先表格判别,再表格抽取在线抽取workflow\n [\n \"predict_data_preprocess\",\n \"predict_recognize_similarity_estimator\",\n # \"predict_extract_search_estimator\",\n \"predict_extract_svm_estimator\",\n \"model_result_merge\"\n ],\n \"online\": # 综合模型抽取和脚本抽取结果\n [\n \"model_extract\",\n \"script\",\n \"model_script_result_merge\"\n ]\n}\n","sub_path":"text_classification/conf/u_shape_framework_conf.py","file_name":"u_shape_framework_conf.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"124780244","text":"#########################################\n# DEPENDENCIES:\n#########################################\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\n\n\n#########################################\n# FUNCTION TO INITIALIZE BROWSER USING CHROME DRIVER \n# - PLEASE MAKE SURE YOUR CHROMEDRIVER IS LOCATED IN THE SAME FOLDER\n#########################################\ndef init_browser():\n executable_path = {\"executable_path\": \"chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=True)\n\n\n#########################################\n# FUNCTION TO SCRAPE THE PREDEFINED WEBSITES \n#########################################\ndef scrape():\n\n # Define Variables:\n #####################################################################################\n # Nasa Data:\n nasa_url = \"https://mars.nasa.gov/news/\"\n nasa_dict = {\n \"news_title\": \"\",\n \"news_p\": \"\"\n }\n\n # JPL Data:\n jpl_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n featured_image_url = \"\"\n\n # Mars Facts:\n mfact_url = \"https://space-facts.com/mars/\"\n\n # Mars Hemisphere Data:\n mhemi_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n hemisphere_image_urls = []\n\n \n \n # Initialize browser:\n #####################################################################################\n browser = init_browser()\n\n\n\n # Nasa Data Scraping:\n #####################################################################################\n browser.visit(nasa_url)\n\n time.sleep(1)\n\n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n\n nasa_dict[\"news_title\"] = soup.find(\"div\", class_=\"content_title\").get_text()\n nasa_dict[\"news_p\"] = soup.find(\"div\", class_=\"article_teaser_body\").get_text()\n\n\n\n # JPL Data Scraping:\n #####################################################################################\n browser.visit(jpl_url)\n\n time.sleep(1)\n\n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n\n results = soup.find(\"div\", class_=\"carousel_items\").find(\"article\")[\"style\"] \n ending_url_list = results.split(\"'\")\n\n base_url = \"https://www.jpl.nasa.gov\"\n\n featured_image_url = base_url + ending_url_list[1]\n \n\n\n # Mars Facts Data Scraping:\n #####################################################################################\n # Use PANDAS to scrape:\n tableslist = pd.read_html(mfact_url)\n\n table_df = tableslist[1]\n\n # if necessary: named the columns:\n final_table_df = table_df.rename(columns={\n 0: 'Fact',\n 1: 'Value'\n })\n\n # Use PANDAS to convert data to HTML table string: \n html_table = final_table_df.to_html(index=False, justify=\"left\")\n # To remove new line characters:\n html_table.replace('\\n', '')\n\n\n\n # Mars Hemisphere Data Scraping:\n #####################################################################################\n browser.visit(mhemi_url)\n\n time.sleep(1)\n\n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n\n linkresults = soup.find_all('a', class_='itemLink')\n\n #################################\n # Pull out the image partial links and their titles:\n #################################\n for x in linkresults:\n \n raw_title = x.find('img')\n link = x['href']\n \n if raw_title and link:\n title = raw_title['alt']\n final_title = title.replace(\" Enhanced thumbnail\", \"\")\n \n # click on the link to retrieve the the full res image url:\n browser.click_link_by_partial_text(final_title)\n time.sleep(1)\n html2 = browser.html\n soup2 = BeautifulSoup(html2, \"html.parser\")\n \n \n newresults = soup2.find_all('a')\n # loop through the a tags to find the right link:\n for hemi in newresults:\n if hemi.text == 'Sample':\n newlink = hemi['href']\n \n \n #Use a Python dictionary to store the data using the keys img_url and title:\n temp_dict = {\n \"title\": final_title,\n \"img_url\": newlink\n }\n \n # Append the dictionary with the image url string and the hemisphere title to a list\n hemisphere_image_urls.append(temp_dict)\n \n\n \n # As of 08/18/2019 the original link stopped working (error 404). Here is my workaround: \n if not hemisphere_image_urls:\n temp_dict = {\n \"title\": \"Cerberus Hemisphere\",\n \"img_url\": \"https://astrogeology.usgs.gov/cache/images/cfa62af2557222a02478f1fcd781d445_cerberus_enhanced.tif_full.jpg\"\n }\n hemisphere_image_urls.append(temp_dict)\n \n temp_dict = {\n \"title\": \"Valles Marineris Hemisphere\",\n \"img_url\": \"https://astrogeology.usgs.gov/cache/images/7cf2da4bf549ed01c17f206327be4db7_valles_marineris_enhanced.tif_full.jpg\"\n }\n hemisphere_image_urls.append(temp_dict)\n \n temp_dict = {\n \"title\": \"Syrtis Major Hemisphere\",\n \"img_url\": \"https://astrogeology.usgs.gov/cache/images/ae209b4e408bb6c3e67b6af38168cf28_syrtis_major_enhanced.tif_full.jpg\"\n }\n hemisphere_image_urls.append(temp_dict)\n \n temp_dict = {\n \"title\": \"Schiaparelli Hemisphere\",\n \"img_url\": \"https://astrogeology.usgs.gov/cache/images/3cdd1cbf5e0813bba925c9030d13b62e_schiaparelli_enhanced.tif_full.jpg\"\n }\n hemisphere_image_urls.append(temp_dict)\n \n \n\n \n # Quite the browser after scraping\n #####################################################################################\n browser.quit()\n\n \n mars_dict = {\n \"nasa\" : nasa_dict,\n \"jpl\" : featured_image_url,\n \"mfacts\" : html_table,\n \"mhemi\" : hemisphere_image_urls\n }\n \n\n # Return results\n #####################################################################################\n return mars_dict\n\n \n\n\n\n\n","sub_path":"12_WebScrapeMongo_Homework/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":6168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"93030516","text":"# realpython.com/python-strings/\n# docs.python.org/3.1/library/string.html\n# www.digitalocean.com/community/tutorials/how-to-use-string-formatters-in-python-3\n\ndef main():\n toPrint = \"\"\n i = get_input(\"Height: \")\n rAlign = \"{:>\"+str(i)+\"s}\"\n for y in range(i):\n toPrint = toPrint + \"#\"\n print(rAlign.format(toPrint) + \" \" + toPrint)\n\ndef get_input(prompt):\n while True:\n userIp = input(prompt)\n if not userIp == '' and userIp.isdigit() and int(userIp) >= 0 and int(userIp) < 24:\n break\n return int(userIp)\n\nif __name__ == \"__main__\":\n main()","sub_path":"week0/codefiles/mariomore.py","file_name":"mariomore.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152627968","text":"from functools import reduce\nfrom operator import xor\n\nA, B = map(int,input().split())\n\n\ndef calc(A):\n A = A+1\n cnt = 0\n ans = 0\n for i in range(0,50):\n # i=0 2回周期で1が出現\n # i=1 4回周期で1が出現\n loop = pow(2,i+1)\n\n #端数を覗いた2の出現回数\n #Aが6 / iが1のときは loop=4\n # 7//4 = 1 * 2^1 = 2\n cnt = (A//loop)*pow(2,i)\n\n # 端数を計算\n # 7%4 - 4/2 = 3-2 = 1\n # 端数は1なので加算\n if((A%loop - loop//2) > 0):\n cnt = cnt + (A%loop - loop//2)\n\n # cntが奇数の場合は ans 2^iをに加算\n if(cnt%2 == 1):\n ans = ans + pow(2,i)\n\n return ans\n\n\n\ndef main():\n return calc(A-1)^calc(B)\n\nprint(main())\n","sub_path":"abc121/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"394445205","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 21 23:15:46 2018\r\n\r\n@author: Donghoon\r\n\"\"\"\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\n\"\"\"선 그래프\"\"\"\r\nvar=[1,2,4,8,16,32,64,128,256]\r\nbias_squared=[256,128,64,32,16,8,4,2,1]\r\ntotal_error=[x+y for x,y in zip(var,bias_squared)]\r\nxs=[i for i,_ in enumerate(var)]\r\n\r\nplt.plot(xs,var,'g-',label='variance')\r\nplt.plot(xs,bias_squared,'r-.',label='bias^2')\r\nplt.plot(xs,total_error,'b:',label='total error')\r\n\r\nplt.legend(loc=9)\r\nplt.xlabel(\"model complexity\")\r\nplt.title(\"The Bias Variance Trade-off\")\r\nplt.show()\r\n\r\n\"\"\"산점도\"\"\"\r\nfriends=[70,65,72,63,71,64,60,64,67]\r\nminutes=[175,179,205,120,220,130,105,145,190]\r\nlabels=['a,','b','c','d','e','f','g','h','i']\r\n\r\nplt.scatter(friends, minutes)\r\nplt.title(\"Daily Minutes vs. Number of Friends\")\r\nplt.xlabel(\"# of friends\")\r\nplt.ylabel(\"daily minutes spent on the site\")\r\nfor label, friend_count, minute_count, in zip(labels, friends, minutes):\r\n plt.annotate(label,\r\n xy=(friend_count, minute_count),\r\n xytext=(5,-5),\r\n textcoords='offset points'\r\n )\r\nplt.show()\r\n\r\n\"\"\"공정한 산점도 with comparable x,y axis\"\"\"\r\ntest_1_grades=[99,90,85,97,80]\r\ntest_2_grades=[100,85,60,90,70]\r\n\r\nplt.scatter(test_1_grades,test_2_grades)\r\nplt.title(\"Axes Aren't Comparable\")\r\nplt.xlabel(\"test 1 grade\")\r\nplt.ylabel(\"test 2 grade\")\r\nplt.axis(\"equal\")\r\nplt.show()\r\n","sub_path":"Ds Ch.3-2.py","file_name":"Ds Ch.3-2.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160386991","text":"import twitter_service\nimport datetime\nimport pickle\nimport statistic_service\nimport json\n\n\ntas = twitter_service.TweetService()\n\ndef get_representative():\n\n #local_tweet = pickle.load(open(\"local_tweets.pickle\", 'rb'))\n #results = tas.processWithLocalData(local_tweet)\n #pickle.dump(results, open('local_analyzed_tweets.pickle', 'wb'))\n local_analyzed_tweets = pickle.load(open(\"../local_analyzed_tweets.pickle\", 'rb'))\n stat_service = statistic_service.StatisticService()\n results = stat_service.process(local_analyzed_tweets,10)\n #print(json.dumps(results[0]._json, indent=2))\n\n for r in results:\n print(json.dumps(r._json,indent=2))\n #print(json.dumps(r.analysis))\n #print(\"%f, %s\" % (r.representativeness['score'],r.text))\n return results\n\n\ndef jsonfiy_tweet(tweets):\n print(type(tweets[0].analysis))\n\n\nif __name__ == '__main__':\n t1 = datetime.datetime.now()\n get_representative()\n\n\n t2 = datetime.datetime.now()\n print(t2 - t1)","sub_path":"test/StatistcTest.py","file_name":"StatistcTest.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"594950090","text":"#eoscms ls -l /eos/cms/store/group/dpg_hcal/comm_hcal/USC/run327785/USC_327785.root\n# choose run in /store/group/dpg_hcal/comm_hcal/USC/\n#how to run: cmsRun remoteMonitoring_LASER_era2018_cfg.py 324144 /store/group/dpg_hcal/comm_hcal/USC/ /afs/cern.ch/work/z/zhokin/hcal/voc2/CMSSW_11_1_0_pre3/src/DPGAnalysis/HcalTools/scripts/rmt\n\nimport sys\nimport FWCore.ParameterSet.Config as cms\nfrom Configuration.StandardSequences.Eras import eras\nprocess = cms.Process(\"TEST\", eras.Run2_2018)\n#process = cms.Process(\"TEST\", eras.Run3)\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\nprocess.load(\"CondCore.CondDB.CondDB_cfi\")\nprocess.load(\"EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi\")\nprocess.l1GtUnpack.DaqGtInputTag = 'source'\n# from RelValAlCaPedestal_cfg_2018.py\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('Configuration.StandardSequences.Reconstruction_Data_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\n#process.load('RecoLocalCalo.Configuration.hcalLocalReco_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\n\n##runnumber = sys.argv[2][4:-5] \nrunnumber = sys.argv[2]\nrundir = sys.argv[3]\nhistodir = sys.argv[4]\n\n#print 'RUN = '+runnumber\n#print 'Input file = '+rundir+'/run'+runnumber+'/USC_'+runnumber+'.root'\n##print 'Input file = '+rundir+'/USC_'+runnumber+'.root'\n#print 'Output file = '+histodir+'/LASER_'+runnumber+'.root'\n\nprocess.maxEvents = cms.untracked.PSet(\n# input = cms.untracked.int32(100)\n input = cms.untracked.int32(-1)\n )\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(histodir+'/LASER_'+runnumber+'.root')\n# ,closeFileFast = cms.untracked.bool(True)\n )\n\n#process.source = cms.Source(\"PoolSource\",\nprocess.source = cms.Source(\"HcalTBSource\",\n skipBadFiles=cms.untracked.bool(True),\n firstLuminosityBlockForEachRun = cms.untracked.VLuminosityBlockID([]),\n firstRun = cms.untracked.uint32(316584),\n# firstRun = cms.untracked.uint32(330153),\n# firstRun = cms.untracked.uint32(329416),\n fileNames = cms.untracked.vstring(\nrundir+'/run'+runnumber+'/USC_'+runnumber+'.root'\n#rundir+'/USC_'+runnumber+'.root'\n# '/store/group/dpg_hcal/comm_hcal/USC/run331370/USC_331370.root'\n\n), \n secondaryFileNames = cms.untracked.vstring()\n )\n\nprocess.Analyzer = cms.EDAnalyzer(\"CMTRawAnalyzer\",\n #\n Verbosity = cms.untracked.int32(0),\n #Verbosity = cms.untracked.int32(-9062),\n #Verbosity = cms.untracked.int32(-9063),\n #Verbosity = cms.untracked.int32(-9064),\n #Verbosity = cms.untracked.int32(-9065),\n #Verbosity = cms.untracked.int32(-84),\n #Verbosity = cms.untracked.int32(-91),\n #Verbosity = cms.untracked.int32(-92),\n #\n MapCreation = cms.untracked.int32(1),\n #\n recordNtuples = cms.untracked.bool(False),\n #recordNtuples = cms.untracked.bool(True),\n maxNeventsInNtuple = cms.int32(1),\n #\n #recordHistoes = cms.untracked.bool(False),\n recordHistoes = cms.untracked.bool(True),\n #\n ##scripts: zRunRatio34.C, zRunNbadchan.C\n studyRunDependenceHist = cms.untracked.bool(True),\n #studyRunDependenceHist = cms.untracked.bool(False),\n #\n ##scripts: zerrors.C\n studyCapIDErrorsHist = cms.untracked.bool(True),\n #studyCapIDErrorsHist = cms.untracked.bool(False),\n #\n ##scripts: zrms.C\n studyRMSshapeHist = cms.untracked.bool(True),\n #studyRMSshapeHist = cms.untracked.bool(False),\n #\n ##scripts: zratio34.C\n studyRatioShapeHist = cms.untracked.bool(True),\n #studyRatioShapeHist = cms.untracked.bool(False),\n #\n ##scripts: zadcamplitude.C\n studyADCAmplHist = cms.untracked.bool(True),\n #studyADCAmplHist = cms.untracked.bool(False),\n #\n ##scripts: ztsmean.C\n studyTSmeanShapeHist = cms.untracked.bool(True),\n #studyTSmeanShapeHist = cms.untracked.bool(False),\n #\n ##scripts: ztsmaxa.C\n studyTSmaxShapeHist = cms.untracked.bool(True),\n #studyTSmaxShapeHist = cms.untracked.bool(False),\n #\n ##scripts: zcalib....C\n studyCalibCellsHist = cms.untracked.bool(True),\n #studyCalibCellsHist = cms.untracked.bool(False),\n #\n ##scripts: zdifampl.C\n studyDiffAmplHist = cms.untracked.bool(True),\n #studyDiffAmplHist = cms.untracked.bool(False),\n #\n ##scripts: zadcamplitude.C\n studyPedestalsHist = cms.untracked.bool(True),\n #studyPedestalsHist = cms.untracked.bool(False),\n #\n ##scripts: zamplpedcorr.C\n studyPedestalCorrelations = cms.untracked.bool(True),\n #studyPedestalsHist = cms.untracked.bool(False),\n #\n #\n ##DigiCollectionLabel = cms.untracked.InputTag(\"hcalDigis\"),\n #Verbosity = cms.untracked.int32(-54),\n #Verbosity = cms.untracked.int32(-22),\n #Verbosity = cms.untracked.int32(-11),\n #Verbosity = cms.untracked.int32(-12),\n #Verbosity = cms.untracked.int32(-13),\n #Verbosity = cms.untracked.int32(-51),\n #Verbosity = cms.untracked.int32(-24),\n #Verbosity = cms.untracked.int32(-244),\n #Verbosity = cms.untracked.int32(-233),\n #\n #\n # Normal channels:\n #\n # -53 for BAD HBHEHF channels from study on shape Ratio\n #Verbosity = cms.untracked.int32(-53),\n ratioHBMin = cms.double(0.50),\n ratioHBMax = cms.double(1.00),\n ratioHEMin = cms.double(0.55),\n ratioHEMax = cms.double(1.00),\n ratioHFMin = cms.double(0.60),\n ratioHFMax = cms.double(1.02),\n ratioHOMin = cms.double(0.55),\n ratioHOMax = cms.double(1.04),\n # -54 for BAD HBHEHF channels from study on RMS of shapes\n #Verbosity = cms.untracked.int32(-54),\n rmsHBMin = cms.double(0.3),\n rmsHBMax = cms.double(2.5),\n rmsHEMin = cms.double(0.9),\n rmsHEMax = cms.double(3.6),\n rmsHFMin = cms.double(0.2),\n rmsHFMax = cms.double(2.1),\n rmsHOMin = cms.double(0.2),\n rmsHOMax = cms.double(2.6),\n # -55 for BAD HBHEHF channels from study on TSmean of shapes\n #Verbosity = cms.untracked.int32(-55),\n TSmeanHBMin = cms.double(5.0),\n TSmeanHBMax = cms.double(7.5),\n TSmeanHEMin = cms.double(2.5),\n TSmeanHEMax = cms.double(6.5),\n TSmeanHFMin = cms.double(5.5),\n TSmeanHFMax = cms.double(8.5),\n TSmeanHOMin = cms.double(1.5),\n TSmeanHOMax = cms.double(4.4),\n # -55 for BAD HBHEHF channels from study on TSmax of shapes\n #Verbosity = cms.untracked.int32(-55),\n TSpeakHBMin = cms.double(1.8),\n TSpeakHBMax = cms.double(8.5),\n TSpeakHEMin = cms.double(1.5),\n TSpeakHEMax = cms.double(8.5),\n TSpeakHFMin = cms.double(1.5),\n TSpeakHFMax = cms.double(8.5),\n TSpeakHOMin = cms.double(0.5),\n TSpeakHOMax = cms.double(6.5),\n # -56 for BAD HBHEHOHF channels from study on ADC Amplitude\n #Verbosity = cms.untracked.int32(-56),\n ADCAmplHBMin = cms.double(10000.),\n ADCAmplHBMax = cms.double(300000.),\n ADCAmplHEMin = cms.double(20000.), \n ADCAmplHEMax = cms.double(300000.),\n ADCAmplHFMin = cms.double(50.),\n ADCAmplHFMax = cms.double(9000.),\n ADCAmplHOMin = cms.double(50.),\n ADCAmplHOMax = cms.double(9000.),\n #\n # to see channels w/ PedestalSigma < cut\n #Verbosity = cms.untracked.int32(-57),\n pedestalwHBMax = cms.double(0.1),\n pedestalwHEMax = cms.double(0.1),\n pedestalwHFMax = cms.double(0.4),\n pedestalwHOMax = cms.double(0.1),\n #\n # to see channels for pedestal < cut\n pedestalHBMax = cms.double(0.1),\n pedestalHEMax = cms.double(0.6),\n pedestalHFMax = cms.double(0.8),\n pedestalHOMax = cms.double(0.1),\n #\n #\n # CALIBRATION channels:\n #\n # for BAD HBHEHOHF CALIBRATION channels from study on ADC amplitude\n # cuts for Laser runs:\n #calibrADCHBMin = cms.double(15.0),\n #calibrADCHEMin = cms.double(15.0),\n #calibrADCHOMin = cms.double(15.0),\n #calibrADCHFMin = cms.double(15.0),\n # cuts for LASER runs:\n calibrADCHBMin = cms.double(1000.),\n\t\t\t\t calibrADCHBMax = cms.double(100000000.),\n calibrADCHEMin = cms.double(1000.),\n\t\t\t\t calibrADCHEMax = cms.double(100000000.),\n calibrADCHOMin = cms.double(1000.),\n\t\t\t\t calibrADCHOMax = cms.double(100000000.),\n calibrADCHFMin = cms.double(100.),\n\t\t\t\t calibrADCHFMax = cms.double(100000000.),\n\t\t\t\t \n # for BAD HBHEHOHF CALIBRATION channels from study on shape Ratio\n calibrRatioHBMin = cms.double(0.76),\n\t\t\t\t calibrRatioHBMax = cms.double(0.94),\n calibrRatioHEMin = cms.double(0.76),\n\t\t\t\t calibrRatioHEMax = cms.double(0.94),\n calibrRatioHOMin = cms.double(0.85),\n\t\t\t\t calibrRatioHOMax = cms.double(0.99),\n calibrRatioHFMin = cms.double(0.5),\n\t\t\t\t calibrRatioHFMax = cms.double(0.8),\n # for BAD HBHEHOHF CALIBRATION channels from study on TSmax\n calibrTSmaxHBMin = cms.double(1.50),\n calibrTSmaxHBMax = cms.double(2.50),\n calibrTSmaxHEMin = cms.double(1.50),\n calibrTSmaxHEMax = cms.double(2.50),\n calibrTSmaxHOMin = cms.double(1.50),\n calibrTSmaxHOMax = cms.double(2.50),\n calibrTSmaxHFMin = cms.double(3.50),\n calibrTSmaxHFMax = cms.double(4.50),\n # for BAD HBHEHOHF CALIBRATION channels from study on TSmean\n calibrTSmeanHBMin = cms.double(2.40),\n calibrTSmeanHBMax = cms.double(3.70),\n calibrTSmeanHEMin = cms.double(2.40),\n calibrTSmeanHEMax = cms.double(3.70),\n calibrTSmeanHOMin = cms.double(1.50),\n calibrTSmeanHOMax = cms.double(2.70),\n calibrTSmeanHFMin = cms.double(3.50),\n calibrTSmeanHFMax = cms.double(4.50),\n # for BAD HBHEHOHF CALIBRATION channels from study on Width\n calibrWidthHBMin = cms.double(1.30),\n calibrWidthHBMax = cms.double(1.90),\n calibrWidthHEMin = cms.double(1.30),\n calibrWidthHEMax = cms.double(1.90),\n calibrWidthHOMin = cms.double(0.70),\n calibrWidthHOMax = cms.double(1.65),\n calibrWidthHFMin = cms.double(0.30),\n calibrWidthHFMax = cms.double(1.50),\n #\n # Special task of run or LS quality:\n #\n # flag for ask runs of LSs for RMT & CMT accordingly:\n #=0-runs, =1-LSs\n # keep for LASER runs this flags =0 always\n flagtoaskrunsorls = cms.int32(0),\n #\n # flag for choice of criterion of bad channels:\n #=0-CapIdErr, =1-Ratio, =2-Width, =3-TSmax, =4-TSmean, =5-adcAmplitud\n # keep for CMT (global runs) this flags =0 always\n flagtodefinebadchannel = cms.int32(0),\n #how many bins you want on the plots:better to choice (#LS+1)\n howmanybinsonplots = cms.int32(25),\n #\n # ls - range for RBX study (and ??? perhaps for gain stability via abort gap):\n lsmin = cms.int32(1),\n #lsmax = cms.int32(620),\n lsmax = cms.int32(2600),\n #\n flagabortgaprejected = cms.int32(1),\n bcnrejectedlow = cms.int32(3446),\n bcnrejectedhigh= cms.int32(3564),\n #\n # flag cpu time reducing\n #=0-all plots, =1-optimized number of plots (for Global runs)\n flagcpuoptimization = cms.int32(0),\n #\n # flag for ask type of Normalization for CMT estimators:\n #=0-normalizationOn#evOfLS; =1-averageVariable-normalizationOn#entriesInLS;\n flagestimatornormalization = cms.int32(1),\n #\n #\n # cuts on Nbadchannels to see LS dependences:\n # Verbosity = cms.untracked.int32(-77),\n # to select abnormal events,for which Nbcs > this limits\n lsdep_cut1_peak_HBdepth1 = cms.int32(20),\n lsdep_cut1_peak_HBdepth2 = cms.int32(7),\n lsdep_cut1_peak_HEdepth1 = cms.int32(16),\n lsdep_cut1_peak_HEdepth2 = cms.int32(13),\n lsdep_cut1_peak_HEdepth3 = cms.int32(4),\n lsdep_cut1_peak_HFdepth1 = cms.int32(10),\n lsdep_cut1_peak_HFdepth2 = cms.int32(5),\n lsdep_cut1_peak_HOdepth4 = cms.int32(45),\n # to select events with Nbcs > this limits\n lsdep_cut3_max_HBdepth1 = cms.int32(19),\n lsdep_cut3_max_HBdepth2 = cms.int32(6),\n lsdep_cut3_max_HEdepth1 = cms.int32(15),\n lsdep_cut3_max_HEdepth2 = cms.int32(12),\n lsdep_cut3_max_HEdepth3 = cms.int32(3),\n lsdep_cut3_max_HFdepth1 = cms.int32(9),\n lsdep_cut3_max_HFdepth2 = cms.int32(4),\n lsdep_cut3_max_HOdepth4 = cms.int32(40),\n #\n #\n #old was for runs:\n # nbadchannels1 = cms.int32(7),\n # nbadchannels2 = cms.int32(12),\n # nbadchannels3 = cms.int32(50),\n #\n #Verbosity = cms.untracked.int32(-79),\n # cuts on Estimator1 to see LS dependences:\n lsdep_estimator1_HBdepth1 = cms.double(2500.),\n lsdep_estimator1_HBdepth2 = cms.double(2500.),\n lsdep_estimator1_HBdepth3 = cms.double(2500.),\n lsdep_estimator1_HBdepth4 = cms.double(2500.),\n lsdep_estimator1_HEdepth1 = cms.double(2500.),\n lsdep_estimator1_HEdepth2 = cms.double(2500.),\n lsdep_estimator1_HEdepth3 = cms.double(2500.),\n lsdep_estimator1_HEdepth4 = cms.double(2500.),\n lsdep_estimator1_HEdepth5 = cms.double(2500.),\n lsdep_estimator1_HEdepth6 = cms.double(2500.),\n lsdep_estimator1_HEdepth7 = cms.double(2500.),\n lsdep_estimator1_HFdepth1 = cms.double(2500.),\n lsdep_estimator1_HFdepth2 = cms.double(2500.),\n lsdep_estimator1_HFdepth3 = cms.double(2500.),\n lsdep_estimator1_HFdepth4 = cms.double(2500.),\n lsdep_estimator1_HOdepth4 = cms.double(2500.),\n # cuts on Estimator2 to see LS dependences:\n lsdep_estimator2_HBdepth1 = cms.double(7.),\n lsdep_estimator2_HBdepth2 = cms.double(7.),\n lsdep_estimator2_HEdepth1 = cms.double(7.),\n lsdep_estimator2_HEdepth2 = cms.double(7.),\n lsdep_estimator2_HEdepth3 = cms.double(7.),\n lsdep_estimator2_HFdepth1 = cms.double(7.),\n lsdep_estimator2_HFdepth2 = cms.double(7.),\n lsdep_estimator2_HOdepth4 = cms.double(7.),\n # cuts on Estimator3 to see LS dependences:\n lsdep_estimator3_HBdepth1 = cms.double(7.),\n lsdep_estimator3_HBdepth2 = cms.double(7.),\n lsdep_estimator3_HEdepth1 = cms.double(7.),\n lsdep_estimator3_HEdepth2 = cms.double(7.),\n lsdep_estimator3_HEdepth3 = cms.double(7.),\n lsdep_estimator3_HFdepth1 = cms.double(7.),\n lsdep_estimator3_HFdepth2 = cms.double(7.),\n lsdep_estimator3_HOdepth4 = cms.double(7.),\n # cuts on Estimator4 to see LS dependences:\n lsdep_estimator4_HBdepth1 = cms.double(5.),\n lsdep_estimator4_HBdepth2 = cms.double(5.),\n lsdep_estimator4_HEdepth1 = cms.double(5.),\n lsdep_estimator4_HEdepth2 = cms.double(5.),\n lsdep_estimator4_HEdepth3 = cms.double(5.),\n lsdep_estimator4_HFdepth1 = cms.double(5.),\n lsdep_estimator4_HFdepth2 = cms.double(5.),\n lsdep_estimator4_HOdepth4 = cms.double(5.),\n # cuts on Estimator5 to see LS dependences:\n lsdep_estimator5_HBdepth1 = cms.double(1.8),\n lsdep_estimator5_HBdepth2 = cms.double(1.8),\n lsdep_estimator5_HEdepth1 = cms.double(1.8),\n lsdep_estimator5_HEdepth2 = cms.double(1.8),\n lsdep_estimator5_HEdepth3 = cms.double(1.8),\n lsdep_estimator5_HFdepth1 = cms.double(1.8),\n lsdep_estimator5_HFdepth2 = cms.double(1.8),\n lsdep_estimator5_HOdepth4 = cms.double(1.8),\n #\n # \n #Verbosity = cms.untracked.int32(-81),\n #Verbosity = cms.untracked.int32(-82),\n #Verbosity = cms.untracked.int32(-83),\n # \n # use ADC amplitude:\n useADCmassive = cms.untracked.bool(True),\n useADCfC = cms.untracked.bool(False),\n useADCcounts = cms.untracked.bool(False),\n # \n # Pedestals in fC\n #usePedestalSubtraction = cms.untracked.bool(True),\n usePedestalSubtraction = cms.untracked.bool(False),\n #\n # for possible ignoring of channels w/o signal, apply same cut for\n # HBHEHFHO on Amplitude, usable for all Estimators 1,2,3,4,5:\n # forallestimators_amplitude_bigger = cms.double(10.),\n forallestimators_amplitude_bigger = cms.double(-100.),\n #\n #\n # if 0 - do not use digis at all\n flagToUseDigiCollectionsORNot = cms.int32(1),\n #\n #usecontinuousnumbering = cms.untracked.bool(False),\n usecontinuousnumbering = cms.untracked.bool(True),\n #\n #\n #\n hcalCalibDigiCollectionTag = cms.InputTag('hcalDigis'),\n hbheDigiCollectionTag = cms.InputTag('hcalDigis'),\n hoDigiCollectionTag = cms.InputTag('hcalDigis'),\n hfDigiCollectionTag = cms.InputTag('hcalDigis'),\n #\n #\n #\n #\n #for upgrade: ---------------------------------------------------------\n hbheQIE11DigiCollectionTag = cms.InputTag('hcalDigis'),\n hbheQIE10DigiCollectionTag = cms.InputTag('hcalDigis'),\n # flag to use either only old QIE8 digiCollections or only new QIE10,11 digiCollections\n #=0-all digiCollections(default for normal running), =1-only old QIE8 digiCollections, \n #=2-only new QIE1011 digiCollections, =3-only new QIE1011 digiCollections w/o new high depthes\n #=4-2016fall, =5-2016fall w/o new high depthes, =6-2017bebin, =7-2017bebin w/o new high depthes in HEonly\n #=8--2017bebin w/o new high depthes, =9-all digiCollections w/o new high depthes\n # flag HBHE8 HBHE11 HF8 HF10 comments:\n # 0 + + + + all\n # 1 + - + - old\n # 2 - + - + new\n # 3 - + - + new w/o high depthes\n # 4 + - + + 2016fall\n # 5 + - + + 2016fall w/o high depthes\n # 6 + + - + 2017 && 2018 && 2021\n # 7 + + - + 2017begin w/o high depthes in HEonly\n # 8 + + - + 2017begin w/o high depthes\n # 9 + + + + all w/o high depthes\n # 10 + - - + 2017 w/o HEP17\n # \n flagupgradeqie1011 = cms.int32(6),\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n # flaguseshunt = 1 or 6 (6 is default for global runs) \n flaguseshunt = cms.int32(6),\n # flagsipmcorrection: != 0 yes,apply; = 0 do not use;\n flagsipmcorrection = cms.int32(1),\n #end upgrade: --------------------------------------------------------- end upgrade\n #\n #\n # for local LASER runs ONLY!!! to be > 0 (,else = 0)\n flagLaserRaddam = cms.int32(1),\n # for gaussian fit for local shunt1 (Gsel0) led low-intensity or ped ONLY!!! to be > 0 (,else = 0)\n flagfitshunt1pedorledlowintensity = cms.int32(0),\n #\n splashesUpperLimit = cms.int32(10000),\n #\n #\n # for use in IterativeMethod of CalibrationGroup!!! to be > 1 (,else = 0)\n flagIterativeMethodCalibrationGroupDigi = cms.int32(1),\n #\n # for use in IterativeMethod of CalibrationGroup!!! to be > 1 (,else = 0)\n flagIterativeMethodCalibrationGroupReco = cms.int32(1),\n #\n hbheInputSignalTag = cms.InputTag('hbherecoMBNZS'),\n hbheInputNoiseTag = cms.InputTag('hbherecoNoise'),\n hfInputSignalTag = cms.InputTag('hfrecoMBNZS'),\n hfInputNoiseTag = cms.InputTag('hfrecoNoise'),\n #\n #\n #\n #\n #\n #\n #HistOutFile = cms.untracked.string('LASER_331370.root'),\n #HistOutFile = cms.untracked.string(histodir+'/LASER_'+runnumber+'.root'),\n #MAPOutFile = cms.untracked.string('LogEleMapdb.h')\n #\n ##OutputFilePath = cms.string('/tmp/zhokin/'), \n ##OutputFileExt = cms.string(''),\n #\n )\t\t\n\nprocess.hcal_db_producer = cms.ESProducer(\"HcalDbProducer\",\n dump = cms.untracked.vstring(''),\n file = cms.untracked.string('')\n)\nprocess.es_hardcode = cms.ESSource(\"HcalHardcodeCalibrations\",\n toGet = cms.untracked.vstring('QIEShape',\n 'QIEData',\n 'ChannelQuality',\n 'HcalQIEData',\n 'Pedestals',\n 'PedestalWidths',\n 'Gains',\n 'GainWidths',\n 'ZSThresholds',\n 'RespCorrs')\n)\n\n## Jula's recipe for too many files \n#process.options = cms.untracked.PSet(\n# wantSummary = cms.untracked.bool(False),\n# Rethrow = cms.untracked.vstring(\"ProductNotFound\"), # make this exception fatal\n# fileMode = cms.untracked.string('NOMERGE') # no ordering needed, but calls endRun/beginRun etc. at file boundaries\n#)\n\n######################################################################################## Global Tags for 2018 data taking :\n# use twiki site to specify HLT reconstruction Global tags:\n# https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideFrontierConditions\n#\n# 100X_dataRun2_HLT_v2 for CMSSW_10_0_3 onwards CRUZET 2018 update of 0T templates for SiPixels\n# 100X_dataRun2_HLT_v1 for CMSSW_10_0_0 onwards MWGRs 2018 first HLT GT for 2018 \n#\n#\n############################################################################ GlobalTag :1+ good as 5\n#from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\n#process.GlobalTag = GlobalTag(process.GlobalTag, '100X_dataRun2_HLT_v2', '')\n\n#from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\n#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data_FULL', '')\n\n\n#from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\n#process.GlobalTag = GlobalTag(process.GlobalTag, '101X_dataRun2_HLT_v7', '')\n\n# 2019 Ultra Legacy 2017\n#process.GlobalTag.globaltag = '106X_dataRun2_trackerAlignment2017_v1'\n# 2019 Ultra Legacy 2018 test TkAl\n#process.GlobalTag.globaltag = '106X_dataRun2_v17'\n# 2019 Ultra Legacy 2018 \n#process.GlobalTag.globaltag = '106X_dataRun2_newTkAl_v18'\n# 2019 Ultra Legacy 2016\n#process.GlobalTag.globaltag = '106X_dataRun2_UL2016TkAl_v24'\n#process.GlobalTag.globaltag = '105X_dataRun2_v8'\n\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nfrom Configuration.AlCa.autoCond import autoCond\nprocess.GlobalTag.globaltag = '104X_dataRun2_v1'\n#process.GlobalTag.globaltag = '105X_postLS2_design_v4'\n#process.GlobalTag.globaltag = '106X_dataRun3_HLT_v3'\n\n\n############################################################################\n# V.EPSHTEIN:\n#process.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\n#process.GlobalTag.globaltag = '100X_dataRun2_Prompt_Candidate_2018_01_31_16_01_36'\n###\n#process.hcal_db_producer = cms.ESProducer(\"HcalDbProducer\",\n# dump = cms.untracked.vstring(''),\n# file = cms.untracked.string('')\n#)\n#\n#process.hcalDigis= cms.EDProducer(\"HcalRawToDigi\",\n# FilterDataQuality = cms.bool(True),\n# HcalFirstFED = cms.untracked.int32(700),\n# InputLabel = cms.InputTag(\"source\"),\n# UnpackCalib = cms.untracked.bool(True),\n# FEDs = cms.untracked.vint32(1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117),\n#)\n###\n############################################################################\nprocess.load('Configuration.StandardSequences.RawToDigi_Data_cff')\nprocess.hcalDigis.FilterDataQuality = cms.bool(False)\nprocess.hcalDigis.InputLabel = cms.InputTag(\"source\")\n############################################################################\nprocess.load('EventFilter.HcalRawToDigi.hcalRawToDigi_cfi')\nprocess.hcalDigis= process.hcalRawToDigi.clone(\n FilterDataQuality = False,\n InputLabel = \"source\",\n #InputLabel = \"rawDataCollector\",\n)\n############################################################################\n##process.load(\"Calibration.HcalAlCaRecoProducers.ALCARECOHcalCalPedestal_cff\")\nprocess.load(\"Calibration.HcalAlCaRecoProducers.ALCARECOHcalCalPedestalLocal_cff\")\n##process.load(\"Calibration.HcalAlCaRecoProducers.ALCARECOHcalCalMinBias_cff\")\n#process.load(\"ALCARECOHcalCalPedestalLocal_cff\")\n############################################################################\n#process.p = cms.Path(process.hcalDigis*process.Analyzer)\n#process.p = cms.Path(process.seqALCARECOHcalCalMinBiasDigiNoHLT*process.seqALCARECOHcalCalMinBias*process.minbiasana)\n\nprocess.p = cms.Path(process.hcalDigis*process.seqALCARECOHcalCalMinBiasDigiNoHLT*process.seqALCARECOHcalCalMinBias*process.Analyzer)\n#process.p = cms.Path(process.seqALCARECOHcalCalMinBiasDigiNoHLT*process.seqALCARECOHcalCalMinBias*process.Analyzer)\n\n# see /afs/cern.ch/work/z/zhokin/public/CMSSW_10_4_0_patch1/src/Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalMinBias_cff.py\n############################################################################\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n categories = cms.untracked.vstring(''),\n destinations = cms.untracked.vstring('cout'),\n debugModules = cms.untracked.vstring('*'),\n cout = cms.untracked.PSet(\n threshold = cms.untracked.string('WARNING'),\n\t WARNING = cms.untracked.PSet(limit = cms.untracked.int32(0))\n )\n )\n############################################################################\n\n\n\n","sub_path":"DPGAnalysis/HcalTools/python/remoteMonitoring_LASER_era2018_cfg.py","file_name":"remoteMonitoring_LASER_era2018_cfg.py","file_ext":"py","file_size_in_byte":36836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"92777100","text":"__author__ = 'DRL'\n\nfrom matplotlib import pyplot as _plt, backend_bases as _plt_bases\n\nfrom abc import ABCMeta as _ABCMeta, abstractproperty as _abs_prop\nimport os as _os\nfrom itertools import izip as _izip\nfrom datetime import datetime as _dt, timedelta as _td\n\nfrom . import errors_common as _err, errors_fs as _err_fs\nfrom .file_time import file_dt_local_tz\n\nimport tzlocal as _tzlocal # $ pip install tzlocal\n_local_tz = _tzlocal.get_localzone()\n\n_delta_1h = _td(hours=1)\n_delta_1d = _td(days=1)\n_delta_mini = _td(milliseconds=10)\n\n# region Service functions/classes\n\n\nclass _DateCounter(object):\n\t\"\"\"\n\tService class designed to handle data required for counting number of full day cycles a given log file is covering.\n\t\"\"\"\n\tdef __init__(self):\n\t\tsuper(_DateCounter, self).__init__()\n\t\tself.file_date = _dt.now(tz=_local_tz)\n\t\tself.last_time = _dt.now(tz=_local_tz)\n\t\tself.days = 0\n\n\tdef reset_for_file(self, file_path):\n\t\tf_dt = file_dt_local_tz(file_path)\n\t\t\t# local, since time in file is expressed in this timezone,\n\t\t\t# so we need to do all the calculations in it, for proper day-cycle shift detection\n\n\t\tself.file_date = f_dt\n\t\tself.last_time = _dt(f_dt.year, f_dt.month, f_dt.day, tzinfo=_local_tz)\n\t\t# we don't need to make some \"unset\" state for this ^ because file time\n\t\t# is guaranteed to be later or equal to the 00:00\n\t\tself.days = 0\n\t\treturn self\n\n\nclass GraphData(object):\n\tdef __init__(self, sequence_pairs=None, label_y=None, grid=True):\n\t\tsuper(GraphData, self).__init__()\n\t\tself.sequence_pairs = list()\n\t\tself.label_y = None\n\t\tself.grid = True\n\n\t\tif sequence_pairs:\n\t\t\tself.sequence_pairs = sequence_pairs\n\t\tif label_y:\n\t\t\tself.label_y = label_y\n\t\tself.grid = grid\n\n\tdef add_graph(self):\n\t\t_plt.plot(*self.sequence_pairs)\n\t\tlabel = self.label_y\n\t\tif label:\n\t\t\t_plt.ylabel(label)\n\t\t_plt.grid(self.grid)\n\n\ndef _out_dict_append(dict_o, gpu_id, nu_val):\n\t\"\"\"\n\tService function, appending the new logged value to the resulting dict,\n\tfor the corresponding GPU.\n\n\t:param dict_o: resulting dict\n\t:param gpu_id: gpu ID\n\t:param nu_val: the appended value\n\t\"\"\"\n\tif not gpu_id in dict_o.keys():\n\t\tdict_o[gpu_id] = list()\n\tdict_o[gpu_id].append(nu_val)\n\n\ndef _out_dict_extend(dict_o, extra_dict):\n\tfor gpu_id, extra_list in extra_dict.iteritems():\n\t\tif not extra_list:\n\t\t\tcontinue\n\n\t\tif not gpu_id in dict_o.keys():\n\t\t\tdict_o[gpu_id] = extra_list\n\t\t\tcontinue\n\n\t\tdict_o[gpu_id].extend(extra_list)\n\n\ndef _out_dict_sort(dict_o):\n\tfor gpu_id, data_list in dict_o.iteritems():\n\t\tassert isinstance(data_list, list)\n\t\tdata_list.sort(key=lambda x: x[0])\n\n# endregion\n\n\nclass MiningGraphBase(object):\n\t__metaclass__ = _ABCMeta\n\n\tdef __init__(\n\t\tself, dir_path,\n\t\thash_rate=True, gpu_temp=True, gpu_fan=True,\n\t\tweeks=0, days=0, hours=0, minutes=0\n\t):\n\t\tsuper(MiningGraphBase, self).__init__()\n\n\t\tself.__dir_path = ''\n\t\tself.__set_dir_path(dir_path)\n\n\t\tself.__days_counter = _DateCounter() # private - since it's only used by service methods for parsing single file\n\n\t\t# ensure each timedelta segment is of a proper type:\n\t\tall_time_segs = (weeks, days, hours, minutes)\n\t\tfor t_s in all_time_segs:\n\t\t\t_err.WrongTypeError(t_s, (int, bool), 'time range').raise_if_needed()\n\t\tself.max_timedelta = _td(weeks=weeks, days=days, hours=hours, minutes=minutes)\n\t\tself.filter_by_timedelta = any(all_time_segs)\n\n\t\tself.do_hash = bool(hash_rate)\n\t\tself.do_gpu = gpu_temp or gpu_fan\n\t\tself.do_gpu_temp = bool(gpu_temp)\n\t\tself.do_gpu_fan = bool(gpu_fan)\n\n\t\tself.out_hashes = dict()\n\t\tself.out_gpu = dict()\n\n\t\tself.__file_out_hashes = dict()\n\t\tself.__file_out_gpu = dict()\n\t\tself.__graph_data = list()\n\n# region dir_path property\n\n\tdef __set_dir_path(self, dir_path):\n\t\t# ensure provided path is proper directory.\n\t\t# yeah, it's not following the pythonic \"better ask for forgiveness then for permission\" idea,\n\t\t# but it avoids unnecessary calculations and interrupts evaluation on early stages\n\t\tdir_path = _err.NotStringError(dir_path, 'dir_path').raise_if_needed_or_empty()\n\t\tif not _os.path.exists(dir_path):\n\t\t\traise _err_fs.NoFileOrDirError(dir_path)\n\t\tif not _os.path.isdir(dir_path):\n\t\t\traise _err_fs.NotDirError(dir_path)\n\t\tself.__dir_path = dir_path\n\n\t@property\n\tdef dir_path(self):\n\t\treturn self.__dir_path\n\n\t@dir_path.setter\n\tdef dir_path(self, value):\n\t\tself.__set_dir_path(value)\n\n# endregion\n\n# region Abstract properties\n\t@_abs_prop\n\tdef _hash_matches(self):\n\t\t\"\"\"\n\t\tNeeds to be defined in child class.\n\n\t\t:return: Tuple of (compiled) match methods.\n\t\t\"\"\"\n\t\tpass\n\n\t@_abs_prop\n\tdef _gpu_matches(self):\n\t\t\"\"\"\n\t\tNeeds to be defined in child class.\n\n\t\t:return: Tuple of (compiled) match methods.\n\t\t\"\"\"\n\t\tpass\n# endregion\n\n# region Match line functions\n\n\tdef match_hash(self, line):\n\t\t\"\"\"\n\t\tChecks whether the given line describes the current hash rate.\n\t\tIt's actually a function, not a method. It just depends on abstract _hash_matches property.\n\n\t\t:param line: \n\t\t:return:\n\t\t\tNone (no match) or tuple of strings:\n\t\t\t\t* timestamp\n\t\t\t\t* GPU number (id)\n\t\t\t\t* hash rate (H/s)\n\t\t\"\"\"\n\t\tmatches_functions = self._hash_matches\n\t\tfor match_f in matches_functions:\n\t\t\tm = match_f(line)\n\t\t\tif m:\n\t\t\t\treturn m.groups()\n\t\treturn None\n\t\t# ('17:18:50:069', '0', '302.196')\n\n\tdef match_gpu(self, line):\n\t\t\"\"\"\n\t\tChecks whether the given line describes GPU cooling state.\n\t\tIt's actually a function, not a method. It just depends on abstract _gpu_matches property.\n\n\t\t:param line: \n\t\t:return:\n\t\t\tNone (no match) or tuple of strings:\n\t\t\t\t* timestamp\n\t\t\t\t* GPU number (id)\n\t\t\t\t* temp (in C)\n\t\t\t\t* fan speed (in %)\n\t\t\"\"\"\n\t\tmatches_functions = self._gpu_matches\n\t\tfor match_f in matches_functions:\n\t\t\tm = match_f(line)\n\t\t\tif m:\n\t\t\t\treturn m.groups()\n\t\treturn None\n\t\t# ('17:18:58:716', '0', '60', '50')\n\n# endregion\n\n# region Service methods for parsing single file\n\tdef __process_time_stamp(self, time_str):\n\t\t\"\"\"\n\t\tThis function generates a proper datetime object from parsed timestamp string\n\t\tand also finds moments where switching from 23h to 00h occurs, therefore updating self.__days_counter.\n\n\t\tPrior to calling this method,\n\t\tthe __days_counter object needs to be reset for each file (via reset_for_file() method)\n\t\tand the per-file dicts need to be also reset.\n\n\t\tThe resulting datetime value is not offset by the calculated delta days.\n\t\tSo you need to do it manually, after the entire file is parsed.\n\t\tThe days to offset are stored in self.__days_counter.days field (obviously).\n\n\t\t:param time_str: the timestamp from the log file. e.g., '09:21:14:714'\n\t\t:return: datetime object in UTC timezone\n\t\t\"\"\"\n\t\t# time_str = '09:21:14:714'\n\t\tdays_counter = self.__days_counter\n\n\t\tt = _dt.strptime(time_str + '000', '%H:%M:%S:%f').replace(tzinfo=_local_tz)\n\t\tf_d = days_counter.file_date\n\t\tt = t.replace(f_d.year, f_d.month, f_d.day)\n\t\tassert t.tzinfo == _local_tz\n\n\t\tlast_t = days_counter.last_time\n\n\t\tif t < last_t:\n\t\t\t# we could be on a 23->0 hour switch case\n\t\t\tif t < (last_t - _delta_1h) and t.hour == 0 and last_t.hour == 23:\n\t\t\t\t# second check ^ here is just to avoid possible errors where cur time is just a few sec in past\n\t\t\t\t# last_t = _dt.strptime('23:59:59:613' + '000', '%H:%M:%S:%f')\n\t\t\t\t# t = _dt.strptime('00:00:01:120' + '000', '%H:%M:%S:%f')\n\t\t\t\tdays_counter.days += 1\n\t\t\t\tt += _delta_1d\n\t\t\t\tdays_counter.last_time = t\n\t\t\t\tdays_counter.file_date = t\n\t\t\t# we're here just as a result of some error. Don't change any days_counter data.\n\t\t\treturn t\n\n\t\t# the current time is higher then the previous one. As expected. But...\n\t\tif t.hour == 23 and last_t.hour == 0:\n\t\t\t# most likely, we're just jumping back and forth.\n\t\t\t# Fix t accordingly and don't store it into the last_time\n\t\t\treturn t - _delta_1d\n\n\t\t# the regular case:\n\t\tdays_counter.last_time = t\n\t\treturn t\n\n\tdef __append_to_file_hashes(self, gpu_n, time_stamp, hash_val):\n\t\t\"\"\"\n\t\tAppend the parsed data to the per-file out-dict.\n\n\t\tAll the input parameters are strings (parsed sub-strings).\n\n\t\t:param gpu_n: GPU #\n\t\t:param time_stamp: logged time (without date, in local timezone)\n\t\t:param hash_val: the actual hash value, in H/s (float-number only)\n\t\t\"\"\"\n\t\t_out_dict_append(\n\t\t\tself.__file_out_hashes, int(gpu_n),\n\t\t\t(self.__process_time_stamp(time_stamp), float(hash_val))\n\t\t)\n\n\tdef __append_to_file_gpu(self, gpu_n, time_stamp, temp, fan):\n\t\t\"\"\"\n\t\tAppend the parsed data to the per-file out-dict.\n\n\t\tAll the input parameters are strings (parsed sub-strings).\n\n\t\t:param gpu_n: GPU #\n\t\t:param time_stamp: logged time (without date, in local timezone)\n\t\t:param temp: temperature in C (digits only)\n\t\t:param fan: fan speed in % (digits only)\n\t\t\"\"\"\n\t\t_out_dict_append(\n\t\t\tself.__file_out_gpu, int(gpu_n),\n\t\t\t(self.__process_time_stamp(time_stamp), int(temp), int(fan))\n\t\t)\n\n\tdef __parse_line_hash_only(self, line):\n\t\t\"\"\"\n\t\tParse single line from file.\n\t\tIf the line matches the pattern, store only to per-file output dict.\n\n\t\t:param line: \n\t\t\"\"\"\n\t\tassert isinstance(line, (str, unicode))\n\t\tmatch = self.match_hash(line)\n\t\tif not match:\n\t\t\treturn\n\t\ttime_stamp, gpu_n, hash_val = match\n\t\tself.__append_to_file_hashes(gpu_n, time_stamp, hash_val)\n\n\tdef __parse_line_gpu_only(self, line):\n\t\t\"\"\"\n\t\tParse single line from file.\n\t\tIf the line matches the pattern, store only to per-file output dict.\n\n\t\t:param line: \n\t\t\"\"\"\n\t\tassert isinstance(line, (str, unicode))\n\t\tmatch = self.match_gpu(line)\n\t\tif not match:\n\t\t\treturn\n\t\ttime_stamp, gpu_n, temp, fan = match\n\t\tself.__append_to_file_gpu(gpu_n, time_stamp, temp, fan)\n\n\tdef __parse_line_both(self, line):\n\t\t\"\"\"\n\t\tParse single line from file,\n\t\tIf the line matches the pattern, store to any (both) per-file output dicts.\n\n\t\t:param line: \n\t\t\"\"\"\n\t\tassert isinstance(line, (str, unicode))\n\t\tmatch = self.match_hash(line)\n\t\tif match:\n\t\t\ttime_stamp, gpu_n, hash_val = match\n\t\t\tself.__append_to_file_hashes(gpu_n, time_stamp, hash_val)\n\t\t\treturn\n\t\tmatch = self.match_gpu(line)\n\t\tif not match:\n\t\t\treturn\n\t\ttime_stamp, gpu_n, temp, fan = match\n\t\tself.__append_to_file_gpu(gpu_n, time_stamp, temp, fan)\n\n\tdef __reset_per_file_data(self, file_path):\n\t\t\"\"\"\n\t\tPrepare the per-file data for parsing the next log file.\n\n\t\tI.e.:\n\t\t\t* reset self.__days_counter object\n\t\t\t* reset per-file out-dictionaries\n\n\t\t:param file_path:\n\t\t:return: the given file path. It's checked for file existence.\n\t\t\"\"\"\n\t\t# file_path = r's:\\0-Programs\\3-Coins\\Zcash\\Claymore-AMD\\1497594056_log.txt'\n\t\tfile_path = _err.NotStringError(file_path, 'file_path').raise_if_needed()\n\t\tif not _os.path.exists(file_path):\n\t\t\traise _err_fs.NoFileOrDirError(file_path)\n\t\tif not _os.path.isfile(file_path):\n\t\t\traise _err_fs.NotFileError(file_path)\n\n\t\t# resetting our day calculation and stored data:\n\t\tself.__days_counter.reset_for_file(file_path)\n\t\tself.__file_out_hashes = dict()\n\t\tself.__file_out_gpu = dict()\n\t\treturn file_path\n\n\tdef __per_file_data_to_output(self, sort_outputs=True):\n\t\t\"\"\"\n\t\tTransfer parsed file's outputs to the common ones.\n\n\t\t:param sort_outputs:\n\t\t\tWhen True (default), the output list is re-sorted after\n\t\t\tthe current file's date is passed to the common output.\n\t\t\"\"\"\n\t\thashes_from_to = (self.__file_out_hashes, self.out_hashes)\n\t\tgpu_from_to = (self.__file_out_gpu, self.out_gpu)\n\n\t\tfor per_file, out in (hashes_from_to, gpu_from_to):\n\t\t\t_out_dict_extend(out, per_file)\n\t\t\tif sort_outputs:\n\t\t\t\t_out_dict_sort(out)\n\n\t\tself.__file_out_hashes = dict()\n\t\tself.__file_out_gpu = dict()\n\n\t@staticmethod\n\tdef __offset_data_list_by_days(data_list, days_offset_td):\n\t\t\"\"\"\n\t\tFor logs that cover multiple day cycles,\n\t\trestore the dates to the guessed ones.\n\n\t\t:param data_list: the actual \"data\", aka of tuples\n\t\t:param days_offset_td:\n\t\t\t counted (positive) days offset.\n\n\t\t\tI.e., if we know that the file covers 3 days: dt.timedelta(days=3)\n\n\t\t\tIn other words, \"how many days have gone since the log was started till the file was last changed\"\n\t\t\"\"\"\n\t\tassert isinstance(data_list, list)\n\t\tfor i, values in enumerate(data_list):\n\t\t\tassert isinstance(values, tuple)\n\t\t\tout_dt = values[0] - days_offset_td\n\t\t\tdata_list[i] = (out_dt,) + values[1:]\n\n\t@staticmethod\n\tdef __surround_data_list_with_zeroes(data_list):\n\t\t\"\"\"\n\t\tInsert zero values at the start/end of the data sequence for the currently parsed file.\n\n\t\t:param data_list: the actual \"data\", aka of tuples\n\t\t\"\"\"\n\t\tassert isinstance(data_list, list)\n\t\tif not data_list:\n\t\t\treturn\n\n\t\tfirst = data_list[0][0] # dt\n\t\tlast = data_list[-1][0]\n\t\tfirst -= _delta_mini\n\t\tlast += _delta_mini\n\t\tnum_val = len(data_list[0]) - 1\n\t\tzero_values = [0] * num_val\n\t\tfirst = tuple([first] + zero_values)\n\t\tlast = tuple([last] + zero_values)\n\n\t\tdata_list.insert(0, first)\n\t\tdata_list.append(last)\n\n\t# -------------------------------------------------------\n\n\tdef __parse_single_file(self, file_path, sort_outputs=True):\n\t\t\"\"\"\n\t\tThis function parses the given file and appends resulting lists\n\t\tto the main output dictionaries.\n\n\t\t:param file_path: Path to the log file (absolute recommended).\n\t\t:param sort_outputs:\n\t\t\tWhen True (default), the output list is re-sorted after\n\t\t\tthe current file's date is passed to the common output.\n\t\t\"\"\"\n\t\t# file_path = r's:\\0-Programs\\3-Coins\\Zcash\\Claymore-AMD\\1497594056_log.txt'\n\t\tfile_path = self.__reset_per_file_data(file_path)\n\n\t\tdo_hash = self.do_hash\n\t\tdo_gpu = self.do_gpu\n\n\t\tif do_hash and do_gpu:\n\t\t\tparse_line = self.__parse_line_both\n\t\telif do_hash:\n\t\t\tparse_line = self.__parse_line_hash_only\n\t\telif do_gpu:\n\t\t\tparse_line = self.__parse_line_gpu_only\n\t\telse:\n\t\t\treturn\n\n\t\twith open(file_path) as file_obj:\n\t\t\tfor l in file_obj:\n\t\t\t\tparse_line(l)\n\n\t\t# OK, we've parsed the file.\n\t\t# But now we need to handle day offsets.\n\n\t\tdelta_days = self.__days_counter.days\n\t\toffset_days_td = _td(days=delta_days)\n\n\t\tfor out in (self.__file_out_hashes, self.__file_out_gpu):\n\t\t\tfor gpu_id, data_list in out.iteritems():\n\t\t\t\tif delta_days != 0:\n\t\t\t\t\tself.__offset_data_list_by_days(data_list, offset_days_td)\n\t\t\t\tself.__surround_data_list_with_zeroes(data_list)\n\n\t\tself.__per_file_data_to_output(sort_outputs)\n\n# endregion\n\n\tdef parse_single_file(self, file_path):\n\t\t\"\"\"\n\t\tThis function parses the given file and appends resulting lists\n\t\tto the main output dictionaries.\n\n\t\t:param file_path: Path to the log file (absolute recommended).\n\t\t:return: self\n\t\t\"\"\"\n\t\tself.__parse_single_file(file_path, True)\n\t\treturn self\n\n# region Service methods for entire folder parsing\n\n\tdef filter_out_data_beyond_time_range(self, earliest_dt):\n\t\t\"\"\"\n\t\tKeeps in the output sequence lists only those entries which time is later than / equal to the given moment.\n\n\t\t:param earliest_dt: object, in local timezone.\n\t\t\"\"\"\n\t\tearliest_dt = _err.WrongTypeError(earliest_dt, _dt, 'earliest_dt').raise_if_needed()\n\t\tassert isinstance(earliest_dt, _dt)\n\t\tif str(earliest_dt.tzinfo) != str(_local_tz):\n\t\t\traise _err.WrongValueError(earliest_dt, 'earliest_dt', 'dt.datetime object in local timezone')\n\n\t\tfor out_dict in (self.out_hashes, self.out_gpu):\n\t\t\tfor gpu_id, data_list in out_dict.iteritems():\n\t\t\t\tassert isinstance(data_list, list)\n\t\t\t\tout_dict[gpu_id] = filter(\n\t\t\t\t\tlambda x: x[0] >= earliest_dt,\n\t\t\t\t\tdata_list\n\t\t\t\t)\n\n\tdef get_log_file_paths(self):\n\t\t\"\"\"\n\t\tGet list of the file paths for the logs that will be processed.\n\n\t\t:return:\n\t\t\ttwo values:\n\t\t\t\t* - file paths (absolute)\n\t\t\t\t* - time of the last file in the local timezone. Current time if no log files found.\n\t\t\"\"\"\n\t\tdir_path = self.dir_path\n\t\tfull_path_f = lambda file_path: _os.path.join(dir_path, file_path)\n\t\t# f = '1497589922_log.txt'\n\n\t\tfiles = map(full_path_f, _os.listdir(dir_path)) # all the files in dir, as full paths\n\t\tfiles = [ # path-time pairs\n\t\t\t(f, file_dt_local_tz(f)) for f in files\n\t\t\tif (\n\t\t\t\tf.lower().endswith('_log.txt') and\n\t\t\t\t_os.path.isfile(f)\n\t\t\t)\n\t\t]\n\t\tfiles = sorted(files, key=lambda el: el[1])\n\n\t\tlast_file_dt = _dt.now(tz=_local_tz)\n\t\tif files:\n\t\t\tlast_file_dt = files[-1][1]\n\t\t\tif self.filter_by_timedelta:\n\t\t\t\t# keep only files within the given time range\n\t\t\t\tearliest_dt = last_file_dt - self.max_timedelta\n\t\t\t\tfiles = filter(lambda x: x[1] >= earliest_dt, files)\n\n\t\tfiles = map(lambda x: x[0], files) # tuples (path, dt) to just paths\n\t\tassert isinstance(files, list)\n\t\tassert isinstance(last_file_dt, _dt)\n\t\treturn files, last_file_dt\n\n# endregion\n\n\tdef parse_logs_dir(self):\n\t\t\"\"\"\n\t\tParse the entire folder with log files.\n\t\tStore the parsed sequences to the out dictionaries.\n\t\tThe resulting sequences are sorted by date, even if files themselves wasn't.\n\n\t\tIf time filter was provided, only those that are within the range are kept.\n\n\t\t:return: self\n\t\t\"\"\"\n\t\tfile_paths, last_file_dt = self.get_log_file_paths()\n\t\tif not file_paths:\n\t\t\treturn self\n\n\t\tself.out_hashes = dict()\n\t\tself.out_gpu = dict()\n\n\t\tdo_filter = self.filter_by_timedelta\n\t\tfor i, f in enumerate(file_paths):\n\t\t\tself.__parse_single_file(f, sort_outputs=False)\n\t\t\tif do_filter and i == 0:\n\t\t\t\t# for the 1st file, keep only entries that match the given time range\n\t\t\t\tself.filter_out_data_beyond_time_range(last_file_dt - self.max_timedelta)\n\n\t\tfor out_dict in (self.out_hashes, self.out_gpu):\n\t\t\t_out_dict_sort(out_dict) # sort both outputs\n\t\t\t# and remove empty GPU data sets:\n\t\t\tfor gpu_id, data_list in out_dict.iteritems():\n\t\t\t\tif not data_list:\n\t\t\t\t\tout_dict.pop(gpu_id)\n\n\t\treturn self\n\n\n\n\t@staticmethod\n\tdef __extend_graph_pairs_list(out_list, times_list, values_list):\n\t\tassert isinstance(out_list, list)\n\t\tif values_list[0] == 0:\n\t\t\ttimes_list = times_list[1:]\n\t\t\tvalues_list = values_list[1:]\n\t\tif values_list[-1] == 0:\n\t\t\ttimes_list = times_list[:-1]\n\t\t\tvalues_list = values_list[:-1]\n\t\tout_list.extend([times_list, values_list])\n\n\tdef __graph_data_append_hashes(self):\n\t\t\"\"\"\n\t\tGenerate GraphData object from hashes and attach it to main graphs list.\n\n\t\tThis is a very low-level method.\n\t\tYou should already ensure that hashes Graph needs to be shown\n\t\tand output hashes dict is not empty.\n\t\t\"\"\"\n\t\tseq_pairs = list()\n\t\tfor gpu_id, hashes_list in sorted(self.out_hashes.iteritems(), key=lambda x: x[0]):\n\t\t\ttimes, hashes = _izip(*hashes_list)\n\t\t\tself.__extend_graph_pairs_list(seq_pairs, times, hashes)\n\t\tself.__graph_data.append(\n\t\t\tGraphData(seq_pairs, 'Hash rate, H/s')\n\t\t)\n\n\tdef __graph_data_append_gpu(self, do_temp=True, do_fan=True):\n\t\t\"\"\"\n\t\tGenerate GraphData object(s) from GPU cooling data and attach it to main graphs list.\n\n\t\tThis is a very low-level method.\n\t\tYou should already ensure that the corresponding GPU Graph needs to be shown\n\t\tand output GPU dict is not empty.\n\t\t\"\"\"\n\t\ttemp_pairs = list()\n\t\tfan_pairs = list()\n\n\t\tfor gpu_id, gpu_data_list in sorted(self.out_gpu.iteritems(), key=lambda x: x[0]):\n\t\t\ttimes, temps_seq, fans_seq = _izip(*gpu_data_list)\n\t\t\tif not times:\n\t\t\t\tcontinue\n\t\t\tif do_temp and temps_seq:\n\t\t\t\tself.__extend_graph_pairs_list(temp_pairs, times, temps_seq)\n\t\t\tif do_fan and fans_seq:\n\t\t\t\tself.__extend_graph_pairs_list(fan_pairs, times, fans_seq)\n\n\t\tgraphs_data_append = self.__graph_data.append\n\t\tif do_temp and temp_pairs:\n\t\t\tgraphs_data_append(\n\t\t\t\tGraphData(temp_pairs, 'GPU Temperature, C')\n\t\t\t)\n\t\tif do_fan and fan_pairs:\n\t\t\tgraphs_data_append(\n\t\t\t\tGraphData(fan_pairs, 'Fan Speed, %')\n\t\t\t)\n\n\tdef __prepare_graph_data(self):\n\t\t\"\"\"\n\t\tGenerate the entire list of GraphData items, ready to be shown.\n\n\t\tThe added graphs are detected automatically, when both conditions are met:\n\t\t\t* The corresponding do_* field is True.\n\t\t\t* There is an actual data to add.\n\t\t\"\"\"\n\t\tdo_hash = self.do_hash\n\t\tdo_gpu = self.do_gpu\n\t\tdo_gpu_temp = self.do_gpu_temp\n\t\tdo_gpu_fan = self.do_gpu_fan\n\n\t\thash_data = self.out_hashes\n\t\tgpu_data = self.out_gpu\n\n\t\tif not hash_data:\n\t\t\tdo_hash = False\n\t\tif not gpu_data:\n\t\t\tdo_gpu = False\n\t\t\tdo_gpu_temp = False\n\t\t\tdo_gpu_fan = False\n\t\tif not(any([do_hash, do_gpu])):\n\t\t\treturn\n\n\t\tself.__graph_data = list()\n\t\tif do_hash:\n\t\t\tself.__graph_data_append_hashes()\n\t\tif do_gpu:\n\t\t\tself.__graph_data_append_gpu(do_gpu_temp, do_gpu_fan)\n\n\n\n\tdef display_graph(self):\n\t\t\"\"\"\n\t\tShows the graphs window.\n\n\t\tDepending on what graphs was enabled in object instance and what output data it actually contains,\n\t\tthere could be any combination of , and graphs.\n\t\t\"\"\"\n\t\tself.__prepare_graph_data()\n\t\tgraphs_data = self.__graph_data\n\t\tif not graphs_data:\n\t\t\treturn\n\n\t\ttotal_plots = len(graphs_data)\n\t\tcur_plot = [1] # as array - to modify from inner function\n\n\t\tdef prepare_subplot():\n\t\t\t_plt.subplot(total_plots, 1, cur_plot[0])\n\t\t\tcur_plot[0] += 1\n\n\t\twindow_title = 'GPU mining graph'\n\t\tif total_plots > 1:\n\t\t\t_plt.subplots_adjust(wspace=0.5)\n\t\t\tprep_subplot_if_needed = prepare_subplot\n\t\t\twindow_title += 's'\n\t\telse:\n\t\t\tprep_subplot_if_needed = lambda: None\n\n\t\tfor graph in graphs_data:\n\t\t\tprep_subplot_if_needed()\n\t\t\tassert isinstance(graph, GraphData)\n\t\t\tgraph.add_graph()\n\n\t\tcanvas = _plt.gcf().canvas\n\t\tassert isinstance(canvas, _plt_bases.FigureCanvasBase)\n\t\tcanvas.set_window_title(window_title)\n\t\t_plt.show()\n","sub_path":"mining_graph/__base_graph_class.py","file_name":"__base_graph_class.py","file_ext":"py","file_size_in_byte":20879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"201896519","text":"from configparser import RawConfigParser\nimport configparser\nimport logging\n\n\ndef read_config_values(config_file, term_nm_colors):\n config_parser = RawConfigParser()\n config_parser.read(config_file)\n\n logging.debug(\"Reading config file \\\"\" + config_file + \"\\\"\")\n\n config_values = {}\n\n\n def __config_get_value(config_fn, section, value_name, default_value):\n ret_value = None\n write_default_value = False\n\n try:\n ret_value = config_fn(section, value_name)\n except configparser.NoSectionError:\n # Section could not be found -> write it to the config file.\n config_parser[section] = {}\n write_default_value = True\n logging.debug(\"Config file section [\\\"\" + section + \"\\\"] not found -> writing it to the config file\") \n except configparser.NoOptionError:\n write_default_value = True\n\n if write_default_value:\n # Option could not be found -> write it to the config file.\n ret_value = default_value\n config_parser[section][value_name] = ret_value\n logging.debug(\"Config file option [\\\"\" + section + \"\\\"].\\\"\" + value_name + \"\\\" not found -> writing it to the config file\")\n\n config_values[value_name] = config_fn(section, value_name)\n\n\n # Default values are specified here.\n __config_get_value(config_parser.getint, \"SETTINGS\", \"NumberOfSubprocesses\", \"4\")\n __config_get_value(config_parser.getint, \"SETTINGS\", \"NumberOfLinesReqToEnableSubprocs\", \"1024\")\n __config_get_value(config_parser.get, \"SETTINGS\", \"DisableSubprocesses\", \"False\")\n __config_get_value(config_parser.get, \"SETTINGS\", \"GlobalRegexps\", \"False\")\n\n with open(config_file, \"w\") as config_file:\n config_parser.write(config_file)\n\n return config_values\n","sub_path":"afsal/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326378772","text":"from datetime import datetime\nimport time\nfrom bluepy.btle import UUID, Peripheral, DefaultDelegate\nimport struct\nfrom datetime import datetime\nfrom dateutil import tz\nimport pandas as pd\nimport sqlalchemy as sql\nimport pymysql\n\nhost=\"twofast-rpi3-0\" # your host\nuser='reader' # username\npw='heiko' # password\ndb=\"NG_twofast_DB\" # name of the database\nconnect_string = 'mysql+pymysql://%(user)s:%(pw)s@%(host)s:3306/%(db)s'% {\"user\": user, \"pw\": pw, \"host\": host, \"db\": db}\nsql_engine = sql.create_engine(connect_string)\n\nclass SHT31Delegate(DefaultDelegate):\n def __init__(self, parent):\n DefaultDelegate.__init__(self)\n self.parent = parent\n self.sustainedNotifications = { 'Temp' : 0, 'Humi' : 0 }\n self.enabledNotifications = { 'Temp' : False, 'Humi' : False }\n self.offset = 0\n\n def prepareLoggerReadout(self, loggerInterval, newestTimeStampMs):\n self.__loggerInterval = loggerInterval\n self.__newestTimeStampMs = newestTimeStampMs\n self.loggingReadout = True\n\n def handleNotification(self, cHandle, data):\n dataTypes = {55: 'Temp', 50: 'Humi'}\n typeData = dataTypes[cHandle]\n\n if 4 < len(data):\n # data format for on device logged data: runnumber (4 bytes (unsigned int)) + N * value (N * 4 bytes (float32); while: 1 <= N <=4 )\n unpackedData = list(struct.unpack('I'+str(int((len(data)-4)/4))+'f', data))\n runnumber = unpackedData.pop(0)\n self.sustainedNotifications[typeData] = 0\n for x in unpackedData:\n self.parent.loggedDataReadout[typeData][self.__newestTimeStampMs-(runnumber-self.offset)*self.__loggerInterval] = x\n runnumber = runnumber+1\n else:\n # data format for non device logged data: value (4 bytes (float32))\n self.sustainedNotifications[typeData] = self.sustainedNotifications[typeData] + 1\n if 1 < self.sustainedNotifications[typeData]:\n # logging data transmission done for this datatype\n self.sustainedNotifications[typeData] = 2\n if sum(self.sustainedNotifications.values())/len(self.sustainedNotifications) >= 2:\n # logging data transmission done for all datatypes\n self.loggingReadout = False\n for dataType, enabled in self.enabledNotifications.items():\n if dataType is 'Temp' and not enabled:\n self.parent.setTemperatureNotification(False)\n elif dataType is 'Humi' and not enabled:\n self.parent.setHumidityNotification(False)\n\n if self.enabledNotifications[typeData]:\n self.parent.loggedData[typeData][int(round(time.time() * 1000))] = struct.unpack('f', data)[0]\n\nclass SHT31():\n def __init__(self, addr = None, iface = None):\n self.loggedDataReadout = {'Temp' : {}, 'Humi': {}}\n self.loggedData = {'Temp' : {}, 'Humi': {}}\n self.__loggerInterval = 0\n self.__loggingReadout = False\n\n self.__peripheral = Peripheral(addr, 'random', iface)\n if addr is not None:\n self.__peripheral.setDelegate(SHT31Delegate(self))\n self.__prepareGadget()\n\n def __prepareGadget(self):\n self.__characteristics = {}\n\n # READ\n self.__characteristics['SystemId'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A23))[0]\n # READ\n self.__characteristics['ManufacturerNameString'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A29))[0]\n # READ\n self.__characteristics['ModelNumberString'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A24))[0]\n # READ\n self.__characteristics['SerialNumberString'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A25))[0]\n # READ\n self.__characteristics['HardwareRevisionString'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A27))[0]\n # READ\n self.__characteristics['FirmwareRevisionString'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A26))[0]\n # READ\n self.__characteristics['SoftwareRevisionString'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A28))[0]\n # READ WRITE\n self.__characteristics['DeviceName'] = self.__peripheral.getCharacteristics(uuid=UUID(\"00002a00-0000-1000-8000-00805f9b34fb\"))[0]\n # READ NOTIFY\n self.__characteristics['Battery'] = self.__peripheral.getCharacteristics(uuid=UUID(0x2A19))[0]\n # WRITE\n self.__characteristics['SyncTimeMs'] = self.__peripheral.getCharacteristics(uuid=UUID(\"0000f235-b38d-4985-720e-0f993a68ee41\"))[0]\n # READ WRITE\n self.__characteristics['OldestTimeStampMs'] = self.__peripheral.getCharacteristics(uuid=UUID(\"0000f236-b38d-4985-720e-0f993a68ee41\"))[0]\n # READ WRITE\n self.__characteristics['NewestTimeStampMs'] = self.__peripheral.getCharacteristics(uuid=UUID(\"0000f237-b38d-4985-720e-0f993a68ee41\"))[0]\n # WRITE NOTIFY\n self.__characteristics['StartLoggerDownload'] = self.__peripheral.getCharacteristics(uuid=UUID(\"0000f238-b38d-4985-720e-0f993a68ee41\"))[0]\n # READ WRITE\n self.__characteristics['LoggerIntervalMs'] = self.__peripheral.getCharacteristics(uuid=UUID(\"0000f239-b38d-4985-720e-0f993a68ee41\"))[0]\n # READ NOTIFY\n self.__characteristics['Humidity'] = self.__peripheral.getCharacteristics(uuid=UUID(\"00001235-b38d-4985-720e-0f993a68ee41\"))[0]\n # READ NOTIFY\n self.__characteristics['Temperature'] = self.__peripheral.getCharacteristics(uuid=UUID(\"00002235-b38d-4985-720e-0f993a68ee41\"))[0]\n\n if self.readFirmwareRevisionString() == '1.3':\n # Error in the documentation/firmware of 1.3 runnumber does not start with 0 it starts with 1, therefore insert an offset here\n self.__peripheral.delegate.offset = 1\n\n def connect(self, addr, iface=None):\n self.__peripheral.setDelegate(SHT31Delegate(self))\n self.__peripheral.connect(addr, 'random', iface)\n self.__prepareGadget()\n\n def disconnect(self):\n self.__peripheral.disconnect()\n\n def __readCharacteristcAscii(self, name):\n return self.__characteristics[name].read().decode('ascii')\n\n def readDeviceName(self):\n return self.__readCharacteristcAscii('DeviceName')\n\n def setDeviceName(self, name):\n return self.__characteristics['DeviceName'].write(name.encode('ascii'))\n\n def readTemperature(self):\n return struct.unpack('f', self.__characteristics['Temperature'].read())[0]\n\n def setTemperatureNotification(self, enabled):\n tmp = 1 if enabled else 0\n self.__peripheral.delegate.enabledNotifications['Temp'] = enabled\n self.__setTemperatureNotification(tmp)\n\n def __setTemperatureNotification(self, byte):\n self.__peripheral.writeCharacteristic(self.__characteristics['Temperature'].valHandle+2, int(byte).to_bytes(1, byteorder = 'little'))\n\n def readHumidity(self):\n return struct.unpack('f', self.__characteristics['Humidity'].read())[0]\n\n def setHumidityNotification(self, enabled):\n tmp = 1 if enabled else 0\n self.__peripheral.delegate.enabledNotifications['Humi'] = enabled\n self.__setHumidityNotification(tmp)\n\n def __setHumidityNotification(self, byte):\n self.__peripheral.writeCharacteristic(self.__characteristics['Humidity'].valHandle+2, int(byte).to_bytes(1, byteorder = 'little'))\n\n def readBattery(self):\n return int.from_bytes(self.__characteristics['Battery'].read(), byteorder='little')\n\n def setSyncTimeMs(self, timestamp = None):\n timestampMs = timestamp if timestamp else int(round(time.time() * 1000))\n self.__characteristics['SyncTimeMs'].write(timestampMs.to_bytes(8, byteorder='little'))\n\n def readOldestTimestampMs(self):\n return int.from_bytes(self.__characteristics['OldestTimeStampMs'].read(), byteorder='little')\n\n def setOldestTimestampMs(self, value):\n self.__characteristics['OldestTimeStampMs'].write(value.to_bytes(8, byteorder='little'))\n\n def readNewestTimestampMs(self):\n return int.from_bytes(self.__characteristics['NewestTimeStampMs'].read(), byteorder='little')\n\n def setNewestTimestampMs(self, value):\n self.__characteristics['NewestTimeStampMs'].write(value.to_bytes(8, byteorder='little'))\n\n def readLoggerIntervalMs(self):\n return int.from_bytes(self.__characteristics['LoggerIntervalMs'].read(), byteorder='little')\n\n def setLoggerIntervalMs(self, interval):\n oneMonthInMs = (30 * 24 * 60 * 60 * 1000)\n interval = 1000 if interval < 1000 else oneMonthInMs if interval > oneMonthInMs else interval\n self.__characteristics['LoggerIntervalMs'].write((int(interval)).to_bytes(4, byteorder='little'))\n\n def readLoggedDataInterval(self, startMs = None, stopMs = None):\n self.setSyncTimeMs()\n time.sleep(0.1) # Sleep a bit to enable the gadget to set the SyncTime; otherwise 0 is read when readNewestTimestampMs is used\n self.__setTemperatureNotification(1)\n self.__setHumidityNotification(1)\n\n if startMs is not None:\n self.setOldestTimestampMs(startMs)\n else:\n self.setOldestTimestampMs(0)\n\n if stopMs is not None:\n self.setNewestTimestampMs(stopMs)\n# else:\n# self.setNewestTimestampMs(0)\n\n tmpNewestTimestamp = self.readNewestTimestampMs()\n #print(tmpNewestTimestamp)\n self.__peripheral.delegate.prepareLoggerReadout(self.readLoggerIntervalMs(), tmpNewestTimestamp)\n self.__characteristics['StartLoggerDownload'].write((1).to_bytes(1, byteorder='little'))\n\n def waitForNotifications(self, timeout):\n return self.__peripheral.waitForNotifications(timeout)\n\n def isLogReadoutInProgress(self):\n return self.__peripheral.delegate.loggingReadout\n\n def readSystemId(self):\n return self.__characteristics['SystemId'].read()\n\n def readManufacturerNameString(self):\n return self.__readCharacteristcAscii('ManufacturerNameString')\n\n def readModelNumberString(self):\n return self.__readCharacteristcAscii('ModelNumberString')\n\n def readSerialNumberString(self):\n return self.__readCharacteristcAscii('SerialNumberString')\n\n def readHardwareRevisionString(self):\n return self.__readCharacteristcAscii('HardwareRevisionString')\n\n def readFirmwareRevisionString(self):\n return self.__readCharacteristcAscii('FirmwareRevisionString')\n\n def readSoftwareRevisionString(self):\n return self.__readCharacteristcAscii('SoftwareRevisionString')\n\ndef utc_to_local_time(timestamp):\n # METHOD 1: Hardcode zones:\n from_zone = tz.gettz('UTC')\n to_zone = tz.gettz('Europe/Zurich')\n\n # utc = datetime.utcnow()\n # utc = datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')\n\n # Tell the datetime object that it's in UTC time zone since\n # datetime objects are 'naive' by default\n utc = datetime.utcfromtimestamp(timestamp/1000)\n utc = utc.replace(tzinfo=from_zone)\n\n # Convert time zone\n my_time = utc.astimezone(to_zone)\n return my_time\n\ndef main():\n while True:\n # start = time.time()\n bleAddress = 'C5:BB:A6:86:0E:64'\n print('Connecting to:', bleAddress)\n gadget = SHT31(bleAddress)\n print('Connected')\n\n # print('Device name:', gadget.readDeviceName())\n\n # print('System ID: ', gadget.readSystemId())\n # print('Model number string:', gadget.readModelNumberString())\n # print('Serial number string:', gadget.readSerialNumberString())\n # print('Firmware revision string:', gadget.readFirmwareRevisionString())\n # print('Hardware revision string:', gadget.readHardwareRevisionString())\n # print('Software revision string:', gadget.readSoftwareRevisionString())\n # print('Manufacturer name string:', gadget.readManufacturerNameString())\n\n print('Battery level [%]:', gadget.readBattery())\n # print('Temperature [°C]:', '{:.2f}'.format(gadget.readTemperature()))\n # print('Humidity [%]:', '{:.2f}'.format(gadget.readHumidity()))\n\n # print('LoggerInterval [ms]: ', gadget.readLoggerIntervalMs())\n gadget.setSyncTimeMs()\n time.sleep(0.1) # Sleep a bit to enable the gadget to set the SyncTime; otherwise 0 is read when readNewestTimestampMs is used\n print('OldestTimestampMs [µs]:', gadget.readOldestTimestampMs(), datetime.utcfromtimestamp(gadget.readOldestTimestampMs()/1000).strftime('%Y-%m-%d %H:%M:%S'))\n print('NewestTimeStampMs [µs]:', gadget.readNewestTimestampMs(), datetime.utcfromtimestamp(gadget.readNewestTimestampMs()/1000).strftime('%Y-%m-%d %H:%M:%S'))\n\n gadget.readLoggedDataInterval()\n gadget.setTemperatureNotification(True) # enable notifications for humidity values; the object will log incoming data into the loggedData variable\n gadget.setHumidityNotification(True) # enable notifications for humidity values; the object will log incoming data into the loggedData variable\n\n try:\n while True:\n if False is gadget.waitForNotifications(5) or False is gadget.isLogReadoutInProgress():\n print('Done reading data')\n break\n # print('Read dataset')\n finally:\n data = gadget.loggedDataReadout # contains the data logged by the smartgadget\n data = pd.DataFrame(data)\n data.reset_index(inplace=True)\n data.rename(columns={\"index\": \"utc_time\", 'Temp': 'temp', 'Humi': 'humid'}, inplace=True)\n data['time'] = data['utc_time'].apply(lambda x: utc_to_local_time(x))\n data['time'] = data['time'].astype(pd.Timestamp)\n data['time'] = data['time'].dt.tz_localize(None)\n data = data[['time', 'temp', 'humid']]\n data = data.fillna(-1)\n # print(data.tail())\n # print(gadget.loggedData) # contains the data sent via notifications\n gadget.setLoggerIntervalMs(1000) # setting a new logger interval will clear all the logged data on the device\n gadget.disconnect()\n print('read values ', len(gadget.loggedDataReadout['Temp']), len(gadget.loggedDataReadout['Humi']))\n print('Disconnected')\n end = time.time()\n # print(end - start)\n # select only relevant\n data.to_sql('temp_humid_sensor', con=sql_engine, if_exists='append', index=False)\n time.sleep(5)\n\nif __name__ == \"__main__\":\n main()","sub_path":"01_neutron_generator_contol/sensirionsmartgadget/read_temp_humid_to_db.py","file_name":"read_temp_humid_to_db.py","file_ext":"py","file_size_in_byte":14661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"518223076","text":"# %pylab\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport csv\n\ndef read_data(filename):\n rows = []\n with open(filename, 'rb') as fp:\n for row in csv.reader(fp):\n rows.append(map(float, row))\n return rows[1:]\n\ndata0 = read_data('./55a6aad1b4c50673b900025e_items.csv')\ndata1 = read_data('./55a6ac88c1f3aa45db000239_items.csv')\ndata2 = read_data('./55a7ef8dc1f3aa4f610002e7_items.csv')\n\ndf0 = pd.DataFrame(data0)\ndf1 = pd.DataFrame(data1)\ndf2 = pd.DataFrame(data2)\n\ndef get_bins():\n return [i*5 for i in range(61)]\n\ndef plot_hist(ax, x, label):\n ax.hist(x, bins=get_bins(), label=label)\n\nfig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, sharey=True)\nax0, ax1, ax2 = axes\nplot_hist(ax0, df0.icol(0), 'Tue')\nplot_hist(ax1, df1.icol(0), 'Wed')\nplot_hist(ax2, df2.icol(0), 'Thu')\n","sub_path":"python/matplotlib/ordertimes/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596244109","text":"# create convenient type hint\nimport numpy as _np\n\nfrom pandas_ml_common import Typing\n\n\ndef ta_ewma_covariance(df: Typing.PatchedPandas, convert_to='returns', alpha=0.97):\n data = df.copy()\n\n if convert_to == 'returns':\n data = df.pct_change()\n if convert_to == 'log-returns':\n data = _np.log(df) - _np.log(df.shift(1))\n\n data.columns = data.columns.to_list()\n return data.ewm(com=alpha).cov()\n\n","sub_path":"pandas-ml-quant/pandas_ml_quant/analysis/covariances.py","file_name":"covariances.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"496922182","text":"from cv2 import cv2 as cv\n\n# Test Previous\n# Server (ip : 192.168.0.12)\n# ~$ ffmpeg -f v4l2 -i /dev/video0 -preset ultrafast -vcodec libx264 -vsync 2 -tune zerolatency -b 900k -f h264 udp://0.0.0.:5000\n# ffmpeg -re -i /dev/video0 -f rtsp -rtsp_transport udp rtsp://localhost:5000/live.sdp\n# ~$ ffmpeg -i /dev/video0 -vsync 2 -f h264 udp://0.0.0.0:5000 \n# Client (ip : 192.168.0.6)\n# ~$ ffplay udp://127.0.0.1:5000\n# ffmpeg -i udp://192.168.0.12:5000 -vcodec copy output.h264\ncap = cv.VideoCapture('udp://192.168.0.12:5000', cv.CAP_FFMPEG)\nif not cap.isOpened():\n print('VideoCapture not opened')\n exit(-1)\n\nwhile True:\n ret, frame = cap.read()\n\n if not ret:\n print('frame empty')\n break\n\n cv.imshow('image', frame)\n\n if cv.waitKey(1) & 0XFF == ord('q'):\n break\n\ncap.release()\ncv.destroyAllWindows()\n","sub_path":"OpenCV_py/ReadFromUDPProtocol.py","file_name":"ReadFromUDPProtocol.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185424433","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\n#tf.enable_eager_execution()\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\nfrom tensorflow.keras import layers\nfrom tensorflow import keras\nfrom pathlib import Path\nimport tensorflow.keras.backend as K\nhome = str(Path.home())\n\ntf.keras.backend.clear_session() # For easy reset of notebook state.\nprint(cv2.__version__)\nprint(tf.__version__)\n#assert tf.executing_eagerly() == True\n\nclass_names = [\"cloudy\" ,\"sunny\"]\nNUM_OF_CLASSES = len(class_names)\nIMG_HEIGHT = 256\nIMG_WIDTH = 256\nIMG_CHN = 3\nRGB_MEAN = [0.485, 0.456, 0.406]\nRGB_STD = [0.229, 0.224, 0.225]\nlr = 1\nRF = 1e-4\ndef generate_generator(generator, path, batch_size = 16, img_height = IMG_HEIGHT, img_width = IMG_WIDTH):\n\n gen = generator.flow_from_directory(path,\n classes = class_names,\n target_size = (img_height,img_width),\n batch_size = batch_size,\n shuffle=True, \n seed=7)\n while True:\n X,y = gen.next() \n yield X, y #Yield both images and their mutual label\n \n\ndef weighted_categorical_crossentropy(weights):\n \"\"\"\n A weighted version of keras.objectives.categorical_crossentropy\n \n Variables:\n weights: numpy array of shape (C,) where C is the number of classes\n \n Usage:\n weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.\n loss = weighted_categorical_crossentropy(weights)\n model.compile(loss=loss,optimizer='adam')\n \"\"\"\n \n weights = K.variable(weights)\n \n def loss(y_true, y_pred):\n # scale predictions so that the class probas of each sample sum to 1\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n # clip to prevent NaN's and Inf's\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n # calc\n loss = y_true * K.log(y_pred) * weights\n loss = -K.sum(loss, -1)\n return loss\n \n return loss \n \ndef Classifier(): \n \n input_img = layers.Input(shape = (IMG_HEIGHT,IMG_WIDTH,IMG_CHN), dtype = 'float32', name = \"input_img\" )\n x = layers.Conv2D(16, 3, strides=(2, 2), name = \"conv1\",\\\n padding='valid', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(input_img)\n x = layers.Conv2D(32, 3, strides=(2, 2), name = \"conv2\",\\\n padding='same', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(x)\n x = layers.MaxPool2D()(x)\n x = layers.Conv2D(64, 3, strides=(2, 2), name = \"conv3\",\\\n padding='same', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(x)\n x = layers.Conv2D(128, 3, strides=(2, 2), name = \"conv4\",\\\n padding='same', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(x)\n x = layers.MaxPool2D()(x)\n x = layers.Conv2D(128, 3, strides=(1, 1), name = \"conv5\",\\\n padding='same', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(x)\n x = layers.Conv2D(128, 3, strides=(1, 1), name = \"conv6\",\\\n padding='same', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(x)\n x = layers.Conv2D(128, 3, strides=(1, 1), name = \"conv7\",\\\n padding='same', activation=\"relu\", kernel_initializer='glorot_uniform', kernel_regularizer = keras.regularizers.l2(RF))(x)\n #lf = layers.MaxPool2D(pool_size=(4, 4))(conv2_out)\n #x = layers.Concatenate(axis = -1)([x,lf])\n x = layers.Flatten()(x)\n x = layers.Dense(32, activation= 'relu', name = \"fc1\", kernel_initializer = 'glorot_uniform')(x)\n output = layers.Dense(NUM_OF_CLASSES, activation= 'softmax', name = \"output\", kernel_initializer = 'glorot_uniform')(x)\n \n\n model = keras.Model(inputs=[input_img], outputs=output)\n model.compile(optimizer = keras.optimizers.Adam(learning_rate=lr),\n loss=tf.keras.losses.categorical_crossentropy,#weighted_categorical_crossentropy([1, 1/0.1, 1/0.65]),\n metrics=[tf.keras.metrics.CategoricalAccuracy()] )\n \n return model\n \n\nclass Dataloader:\n def __init__(self, data_path, batch_size = 16):\n \n test_imgen = keras.preprocessing.image.ImageDataGenerator(rescale = 1/255.0)\n self.test_generator = generate_generator(test_imgen,\n path = str(data_path)+ \"val/\",\n batch_size=batch_size) \n def load_dl(self):\n return self.test_generator\n \nbatch_size = 16\ncheckpoint_path = \"weights_car_sun_8722/cp.ckpt\"\ntestset_size = 180\ndata_path = \"/home/charles/dataset/weather_car_sun/\"\ndl = Dataloader(data_path)\ntest_generator = dl.load_dl()\nmodel = Classifier()\nmodel.load_weights(checkpoint_path)\nprint(\"Weights Loaded\")\nloss, acc = model.evaluate_generator(test_generator,steps = testset_size/batch_size,use_multiprocessing = False)\nprint(\"Restored model, loss: {}, accuracy: {:5.2f}%\".format(loss, acc*100))\n\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n \"\"\"\n Freezes the state of a session into a pruned computation graph.\n\n Creates a new computation graph where variable nodes are replaced by\n constants taking their current value in the session. The new graph will be\n pruned so subgraphs that are not necessary to compute the requested\n outputs are removed.\n @param session The TensorFlow session to be frozen.\n @param keep_var_names A list of variable names that should not be frozen,\n or None to freeze all the variables in the graph.\n @param output_names Names of the relevant graph outputs.\n @param clear_devices Remove the device directives from the graph for better portability.\n @return The frozen graph definition.\n \"\"\"\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = tf.graph_util.convert_variables_to_constants(\n session, input_graph_def, output_names, freeze_var_names)\n return frozen_graph\n\n\nfrom keras import backend as K\n\n# Create, compile and train model...\n\nfrozen_graph = freeze_session(K.get_session(),\n output_names=[out.op.name for out in model.outputs])\n\nprint([out.op.name for out in model.outputs])\nprint([out.op.name for out in model.inputs])\n\ntf.train.write_graph(frozen_graph, \"./\", \"model_sun_low.pb\", as_text=False)\n[n.name for n in tf.get_default_graph().as_graph_def().node]\n\nprint(\"model output to pb successful.\")\n\n","sub_path":"weather_classification/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145063718","text":"# coding: utf8\nfrom __future__ import unicode_literals\n\nimport os\nimport time\nimport sys\n\n\nclass Subscriber(object):\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\t\n\tdef increase(self, message):\n\t\tprint('{} noticed that the file size increased. It is now {:,} bytes'.format(self.name, message))\n\t\t\n\tdef decrease(self, message):\n\t\tprint('{} noticed that the file size decreased. It is now {:,} bytes'.format(self.name, message))\n\n\n\nclass Publisher(object):\n\tdef __init__(self, channels, path_of_file_to_watch):\n\t\tself.path = path_of_file_to_watch\n\t\tself.channels = {\tchannel : dict() \n\t\t\t\t\t\t\tfor channel \n\t\t\t\t\t\t\tin channels\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\tdef register(self, channel, who, callback=None):\n\t\tif callback == None:\n\t\t\tcallback = getattr(who, 'update')\n\t\tself.subscribers(channel)[who] = callback\n\t\t\n\tdef subscribers(self, channel):\n\t\treturn self.channels[channel]\n\t\t\n\tdef dispatch(self, channel, message):\n\t\tsubs = self.subscribers(channel)\n\t\tfor subscriber, callback in subs.items():\n\t\t\tcallback(message)\n\n\tdef start_watching(self):\n\t\tOldSize = os.stat(self.path).st_size\n\t\twhile True:\n\t\t\tNewSize = os.stat(self.path).st_size\n\t\t\tif NewSize>OldSize:\n\t\t\t\tpub.dispatch(\"up\", NewSize*1024)\n\t\t\telif NewSize', params=params)\n self.tester = Tester(topo)\n self.tester.run_for(60)\n self.tester.tuple_count(test_op.stream, num_result_tuples, exact=True)\n\n cfg = {}\n job_config = streamsx.topology.context.JobConfig(tracing='error')\n job_config.add(cfg)\n\n # Run the test\n self.tester.test(self.test_ctxtype, cfg, assert_on_fail=True, always_collect_logs=True)\n print (str(self.tester.result))\n result = th.parseApplicationTrace(self.tester.result[\"application_logs\"], \"object_storage_test\")\n print (\"RESULT \"+str(result))\n\n def _check_created_objects(self, n_objects, s3_client, bucket_name):\n test_object_names = []\n for num in range(n_objects):\n test_object_names.append('test_data_'+str(num)) # expected keys - n objects are created by SPL application\n # check if n objects exists and if size is not zero\n s3.validateObjects(s3_client, bucket_name, test_object_names)\n\n @unittest.skipIf(th.cos_credentials() == False, \"Missing \"+th.COS_CREDENTIALS()+\" environment variable.\")\n def test_write_n_objects_s3a(self):\n nObjects = 10 # number of objects to be created by SPL application\n self._build_launch_validate(\"test_write_n_objects_s3a\", \"com.ibm.streamsx.objectstorage.s3.test::WriteDurationTestS3aComp\", {'dataSize':100000, 'numObjects':nObjects, 'accessKeyID':self.access_key, 'secretAccessKey':self.secret_access_key, 'bucket':self.bucket_name}, 1, 'performance/com.ibm.streamsx.objectstorage.s3.test')\n self._check_created_objects(nObjects, self.s3_client, self.bucket_name)\n\n @unittest.skipIf(th.cos_credentials() == False, \"Missing \"+th.COS_CREDENTIALS()+\" environment variable.\")\n def test_write_n_objects_cos(self):\n nObjects = 10 # number of objects to be created by SPL application\n self._build_launch_validate(\"test_write_n_objects_cos\", \"com.ibm.streamsx.objectstorage.s3.test::WriteDurationTestCosComp\", {'dataSize':100000, 'numObjects':nObjects, 'accessKeyID':self.access_key, 'secretAccessKey':self.secret_access_key, 'bucket':self.bucket_name}, 1, 'performance/com.ibm.streamsx.objectstorage.s3.test')\n self._check_created_objects(nObjects, self.s3_client, self.bucket_name)\n\n @unittest.skipIf(th.iam_credentials() == False, \"Missing \"+th.COS_IAM_CREDENTIALS()+\" environment variable.\")\n def test_write_n_objects_s3a_iam(self):\n nObjects = 10 # number of objects to be created by SPL application\n self._build_launch_validate(\"test_write_n_objects_s3a_iam\", \"com.ibm.streamsx.objectstorage.s3.test::WriteDurationTestIAMComp\", {'dataSize':100000, 'numObjects':nObjects, 'IAMApiKey':self.iam_api_key, 'IAMServiceInstanceId':self.service_instance_id, 'objectStorageURI':self.uri_s3a}, 1, 'performance/com.ibm.streamsx.objectstorage.s3.test')\n self._check_created_objects(nObjects, self.s3_client_iam, self.bucket_name_iam)\n\n @unittest.skipIf(th.iam_credentials() == False, \"Missing \"+th.COS_IAM_CREDENTIALS()+\" environment variable.\")\n def test_write_n_objects_cos_iam(self):\n nObjects = 10 # number of objects to be created by SPL application\n self._build_launch_validate(\"test_write_n_objects_cos_iam\", \"com.ibm.streamsx.objectstorage.s3.test::WriteDurationTestIAMComp\", {'dataSize':100000, 'numObjects':nObjects, 'IAMApiKey':self.iam_api_key, 'IAMServiceInstanceId':self.service_instance_id, 'objectStorageURI':self.uri_cos}, 1, 'performance/com.ibm.streamsx.objectstorage.s3.test')\n self._check_created_objects(nObjects, self.s3_client_iam, self.bucket_name_iam)\n\nclass TestInstall(TestDistributed):\n \"\"\" Test invocations of composite operators in local Streams instance using installed toolkit \"\"\"\n\n def setUp(self):\n Tester.setup_distributed(self)\n self.streams_install = os.environ.get('STREAMS_INSTALL')\n self.object_storage_toolkit_location = self.streams_install+'/toolkits/com.ibm.streamsx.objectstorage'\n\nclass TestCloud(TestDistributed):\n \"\"\" Test invocations of composite operators in Streaming Analytics Service using local toolkit \"\"\"\n\n @classmethod\n def setUpClass(self):\n super().setUpClass()\n th.stop_streams_cloud_instance()\n th.start_streams_cloud_instance()\n\n @classmethod\n def tearDownClass(self):\n th.stop_streams_cloud_instance()\n\n def setUp(self):\n Tester.setup_streaming_analytics(self, force_remote_build=True)\n self.object_storage_toolkit_location = \"../com.ibm.streamsx.objectstorage\"\n\nclass TestCloudInstall(TestDistributed):\n \"\"\" Test invocations of composite operators in Streaming Analytics Service using remote toolkit \"\"\"\n\n @classmethod\n def setUpClass(self):\n super().setUpClass()\n th.stop_streams_cloud_instance()\n th.start_streams_cloud_instance()\n\n @classmethod\n def tearDownClass(self):\n th.stop_streams_cloud_instance()\n\n def setUp(self):\n Tester.setup_streaming_analytics(self, force_remote_build=True)\n # remote toolkit is used\n self.object_storage_toolkit_location = None\n\n\n","sub_path":"test/test_performance.py","file_name":"test_performance.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"282172776","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUtilities to get details from the course catalog API.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom logging import getLogger\n\nfrom edx_rest_api_client.client import EdxRestApiClient\nfrom edx_rest_api_client.exceptions import SlumberBaseException\nfrom opaque_keys import InvalidKeyError\nfrom opaque_keys.edx.keys import CourseKey\nfrom requests.exceptions import ConnectionError, Timeout # pylint: disable=redefined-builtin\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom enterprise.utils import MultipleProgramMatchError, NotConnectedToOpenEdX, get_course_id_from_course_run_id\n\ntry:\n from openedx.core.lib.token_utils import JwtBuilder\nexcept ImportError:\n JwtBuilder = None\n\ntry:\n from openedx.core.djangoapps.catalog.models import CatalogIntegration\nexcept ImportError:\n CatalogIntegration = None\n\ntry:\n from openedx.core.lib.edx_api_utils import get_edx_api_data\nexcept ImportError:\n get_edx_api_data = None\n\n\nLOGGER = getLogger(__name__)\n\n\ndef course_discovery_api_client(user):\n \"\"\"\n Return a Course Discovery API client setup with authentication for the specified user.\n \"\"\"\n if JwtBuilder is None:\n raise NotConnectedToOpenEdX(\n _(\"To get a Catalog API client, this package must be \"\n \"installed in an Open edX environment.\")\n )\n\n scopes = ['email', 'profile']\n expires_in = settings.OAUTH_ID_TOKEN_EXPIRATION\n jwt = JwtBuilder(user).build_token(scopes, expires_in)\n return EdxRestApiClient(settings.COURSE_CATALOG_API_URL, jwt=jwt)\n\n\nclass CourseCatalogApiClient(object):\n \"\"\"\n Object builds an API client to make calls to the Catalog API.\n \"\"\"\n\n SEARCH_ALL_ENDPOINT = 'search/all/'\n CATALOGS_ENDPOINT = 'catalogs'\n CATALOGS_COURSES_ENDPOINT = 'catalogs/{}/courses/'\n COURSES_ENDPOINT = 'courses'\n COURSE_RUNS_ENDPOINT = 'course_runs'\n PROGRAMS_ENDPOINT = 'programs'\n PROGRAM_TYPES_ENDPOINT = 'program_types'\n\n DEFAULT_VALUE_SAFEGUARD = object()\n\n def __init__(self, user):\n \"\"\"\n Create an Course Catalog API client setup with authentication for the specified user.\n\n This method retrieves an authenticated API client that can be used\n to access the course catalog API. It raises an exception to be caught at\n a higher level if the package doesn't have OpenEdX resources available.\n \"\"\"\n if CatalogIntegration is None:\n raise NotConnectedToOpenEdX(\n _(\"To get a CatalogIntegration object, this package must be \"\n \"installed in an Open edX environment.\")\n )\n if get_edx_api_data is None:\n raise NotConnectedToOpenEdX(\n _(\"To parse a Catalog API response, this package must be \"\n \"installed in an Open edX environment.\")\n )\n\n self.user = user\n self.client = course_discovery_api_client(user)\n\n def get_paginated_search_results(self, querystring=None):\n \"\"\"\n Return paginated search result from all data.\n\n Returns:\n dict: API response with search results, as well as links to next and previous pages.\n\n \"\"\"\n return self._load_data(\n self.SEARCH_ALL_ENDPOINT,\n default=[],\n querystring=querystring,\n traverse_pagination=False,\n many=False,\n )\n\n def get_all_catalogs(self):\n \"\"\"\n Return a list of all course catalogs, including name and ID.\n\n Returns:\n list: List of catalogs available for the user.\n\n \"\"\"\n return self._load_data(\n self.CATALOGS_ENDPOINT,\n default=[]\n )\n\n def get_catalog(self, catalog_id):\n \"\"\"\n Return specified course catalog.\n\n Returns:\n dict: catalog details if it is available for the user.\n\n \"\"\"\n return self._load_data(\n self.CATALOGS_ENDPOINT,\n default=[],\n resource_id=catalog_id\n )\n\n def get_paginated_catalog_courses(self, catalog_id, querystring=None):\n \"\"\"\n Return paginated response for all catalog courses.\n\n Returns:\n dict: API response with links to next and previous pages.\n\n \"\"\"\n return self._load_data(\n self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),\n default=[],\n querystring=querystring,\n traverse_pagination=False,\n many=False,\n )\n\n def get_paginated_catalogs(self, querystring=None):\n \"\"\"\n Return a paginated list of course catalogs, including name and ID.\n\n Returns:\n dict: Paginated response containing catalogs available for the user.\n\n \"\"\"\n return self._load_data(\n self.CATALOGS_ENDPOINT,\n default=[],\n querystring=querystring,\n traverse_pagination=False,\n many=False\n )\n\n def get_catalog_courses(self, catalog_id):\n \"\"\"\n Return the courses included in a single course catalog by ID.\n\n Args:\n catalog_id (int): The catalog ID we want to retrieve.\n\n Returns:\n list: Courses of the catalog in question\n\n \"\"\"\n return self._load_data(\n self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),\n default=[]\n )\n\n def get_course_and_course_run(self, course_run_id):\n \"\"\"\n Return the course and course run metadata for the given course run ID.\n\n Arguments:\n course_run_id (str): The course run ID.\n\n Returns:\n tuple: The course metadata and the course run metadata.\n \"\"\"\n # Parse the course ID from the course run ID.\n course_id = get_course_id_from_course_run_id(course_run_id)\n # Retrieve the course metadata from the catalog service.\n course = self.get_course_details(course_id)\n\n course_run = None\n if course:\n # Find the specified course run.\n course_run = None\n course_runs = [course_run for course_run in course['course_runs'] if course_run['key'] == course_run_id]\n if course_runs:\n course_run = course_runs[0]\n\n return course, course_run\n\n def get_course_details(self, course_id):\n \"\"\"\n Return the details of a single course by id - not a course run id.\n\n Args:\n course_id (str): The unique id for the course in question.\n\n Returns:\n dict: Details of the course in question.\n\n \"\"\"\n return self._load_data(\n self.COURSES_ENDPOINT,\n resource_id=course_id,\n many=False\n )\n\n def get_course_run(self, course_run_id):\n \"\"\"\n Return course_run data, including name, ID and seats.\n\n Args:\n course_run_id(string): Course run ID (aka Course Key) in string format.\n\n Returns:\n dict: Course run data provided by Course Catalog API.\n\n \"\"\"\n return self._load_data(\n self.COURSE_RUNS_ENDPOINT,\n resource_id=course_run_id\n )\n\n def get_program_by_title(self, program_title):\n \"\"\"\n Return single program by name, or None if not found.\n\n Arguments:\n program_title(string): Program title as seen by students and in Course Catalog Admin\n\n Returns:\n dict: Program data provided by Course Catalog API\n\n \"\"\"\n all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[])\n matching_programs = [program for program in all_programs if program.get('title') == program_title]\n if len(matching_programs) > 1:\n raise MultipleProgramMatchError(len(matching_programs))\n elif len(matching_programs) == 1:\n return matching_programs[0]\n else:\n return None\n\n def get_program_by_uuid(self, program_uuid):\n \"\"\"\n Return single program by UUID, or None if not found.\n\n Arguments:\n program_uuid(string): Program UUID in string form\n\n Returns:\n dict: Program data provided by Course Catalog API\n\n \"\"\"\n return self._load_data(\n self.PROGRAMS_ENDPOINT,\n resource_id=program_uuid,\n default=None\n )\n\n def get_program_course_keys(self, program_uuid):\n \"\"\"\n Get a list of the course IDs (not course run IDs) contained in the program.\n\n Arguments:\n program_uuid (str): Program UUID in string form\n\n Returns:\n list(str): List of course keys in string form that are included in the program\n\n \"\"\"\n program_details = self.get_program_by_uuid(program_uuid)\n if not program_details:\n return []\n return [course['key'] for course in program_details.get('courses', [])]\n\n def get_program_type_by_slug(self, slug):\n \"\"\"\n Get a program type by its slug.\n\n Arguments:\n slug (str): The slug to identify the program type.\n\n Returns:\n dict: A program type object.\n\n \"\"\"\n return self._load_data(\n self.PROGRAM_TYPES_ENDPOINT,\n resource_id=slug,\n default=None,\n )\n\n def get_common_course_modes(self, course_run_ids):\n \"\"\"\n Find common course modes for a set of course runs.\n\n This function essentially returns an intersection of types of seats available\n for each course run.\n\n Arguments:\n course_run_ids(Iterable[str]): Target Course run IDs.\n\n Returns:\n set: course modes found in all given course runs\n\n Examples:\n # run1 has prof and audit, run 2 has the same\n get_common_course_modes(['course-v1:run1', 'course-v1:run2'])\n {'prof', 'audit'}\n\n # run1 has prof and audit, run 2 has only prof\n get_common_course_modes(['course-v1:run1', 'course-v1:run2'])\n {'prof'}\n\n # run1 has prof and audit, run 2 honor\n get_common_course_modes(['course-v1:run1', 'course-v1:run2'])\n {}\n\n # run1 has nothing, run2 has prof\n get_common_course_modes(['course-v1:run1', 'course-v1:run2'])\n {}\n\n # run1 has prof and audit, run 2 prof, run3 has audit\n get_common_course_modes(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])\n {}\n\n # run1 has nothing, run 2 prof, run3 has prof\n get_common_course_modes(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])\n {}\n\n \"\"\"\n available_course_modes = None\n for course_run_id in course_run_ids:\n course_run = self.get_course_run(course_run_id) or {}\n course_run_modes = {seat.get('type') for seat in course_run.get('seats', [])}\n\n if available_course_modes is None:\n available_course_modes = course_run_modes\n else:\n available_course_modes &= course_run_modes\n\n if not available_course_modes:\n return available_course_modes\n\n return available_course_modes\n\n def is_course_in_catalog(self, catalog_id, course_id):\n \"\"\"\n Determine if the given course or course run ID is contained in the catalog with the given ID.\n\n Args:\n catalog_id (int): The ID of the catalog\n course_id (str): The ID of the course or course run\n\n Returns:\n bool: Whether the course or course run is contained in the given catalog\n \"\"\"\n try:\n # Determine if we have a course run ID, rather than a plain course ID\n course_run_id = str(CourseKey.from_string(course_id))\n except InvalidKeyError:\n course_run_id = None\n\n endpoint = self.client.catalogs(catalog_id).contains\n\n if course_run_id:\n resp = endpoint.get(course_run_id=course_run_id)\n else:\n resp = endpoint.get(course_id=course_id)\n\n return resp.get('courses', {}).get(course_id, False)\n\n def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs):\n \"\"\"\n Load data from API client.\n\n Arguments:\n resource(string): type of resource to load\n default(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.\n\n Returns:\n dict: Deserialized response from Course Catalog API\n\n \"\"\"\n default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}\n try:\n return get_edx_api_data(\n api_config=CatalogIntegration.current(),\n resource=resource,\n api=self.client,\n **kwargs\n ) or default_val\n except (SlumberBaseException, ConnectionError, Timeout) as exc:\n LOGGER.exception(\n 'Failed to load data from resource %s with kwargs %s due to: %s',\n resource, kwargs, str(exc)\n )\n return default_val\n\n\nclass CourseCatalogApiServiceClient(CourseCatalogApiClient):\n \"\"\"\n Catalog API client which uses the configured Catalog service user.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create an Course Catalog API client setup with authentication for the\n configured catalog service user.\n \"\"\"\n if CatalogIntegration is None:\n raise NotConnectedToOpenEdX(\n _(\"To get a CatalogIntegration object, this package must be \"\n \"installed in an Open edX environment.\")\n )\n\n catalog_integration = CatalogIntegration.current()\n if catalog_integration.enabled:\n try:\n user = catalog_integration.get_service_user()\n super(CourseCatalogApiServiceClient, self).__init__(user)\n except ObjectDoesNotExist:\n raise ImproperlyConfigured(_(\"The configured CatalogIntegration service user does not exist.\"))\n else:\n raise ImproperlyConfigured(_(\"There is no active CatalogIntegration.\"))\n\n @classmethod\n def program_exists(cls, program_uuid):\n \"\"\"\n Get whether the program exists or not.\n \"\"\"\n try:\n return bool(cls().get_program_by_uuid(program_uuid))\n except ImproperlyConfigured:\n return False\n","sub_path":"edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages/enterprise/api_client/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":14584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15363111","text":"secret_word = 'apple'\nprint(len(secret_word) * '*')\nletters = []\n\nwhile True:\n\tguess = input('Enter a letter: ')\n\tif guess == 'quit': # end guess\n\t\tbreak\n\telif guess in secret_word:\n\t\tletters.append(guess)\n\t\tword = ''\n\t\tfor char in secret_word:\n\t\t\tif char in letters:\n\t\t\t\tword += char\n\t\t\telse:\n\t\t\t\tword += '*'\n\t\tprint(word)\t\t\n\telse:\n\t\tprint('Guess wrong!')\n\t\t\n\n\n\n\n\n\n","sub_path":"others/guess_word.py","file_name":"guess_word.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591248794","text":"from obspy import UTCDateTime\nfrom obspy.fdsn import Client\nfrom obspy.earthworm import Client as EWClient\nfrom obspy.core.trace import Trace\nfrom obspy.core.stream import Stream\nfrom obspy.signal.trigger import classicSTALTA, triggerOnset\nimport numpy as np\nfrom scipy import stats\nfrom scipy.fftpack import fft\n\ndef getData(date, opt):\n\n \"\"\"\n Download data from IRIS or Earthworm waveserver with padding and filter it.\n\n date: UTCDateTime of beginning of period of interest\n opt: Options object describing station/run parameters\n \n Returns ObsPy stream object\n \"\"\" \n \n # Choose where data are downloaded automatically via options\n # Download data with padding to account for triggering algorithm\n # Make overlap symmetric\n \n if opt.server == \"IRIS\":\n client = Client(\"IRIS\")\n st = client.get_waveforms(opt.network, opt.station, opt.location, opt.channel,\n date - opt.atrig, date + opt.nsec + opt.atrig)\n else:\n client = EWClient(opt.server, opt.port)\n st = client.getWaveform(opt.network, opt.station, opt.location, opt.channel,\n date - opt.atrig, date + opt.nsec + opt.atrig)\n\n st = st.detrend() # can create noise artifacts??\n st = st.merge(method=1, fill_value='interpolate')\n st = st.filter(\"highpass\", freq=opt.fhigh, corners=2,\n zerophase=True)\n\n return st\n\n\ndef getCatData(date, opt):\n\n \"\"\"\n Download data from IRIS or Earthworm waveserver with padding and filter it. This is\n a specialized version getData() for catalog events, pulling a smaller amount of time\n around a known event.\n\n date: UTCDateTime of known catalog event\n opt: Options object describing station/run parameters\n \n Returns ObsPy stream object\n \"\"\" \n \n # Choose where data are downloaded automatically via options\n # Download data with padding to account for triggering algorithm\n # Make overlap symmetric\n \n if opt.server == \"IRIS\":\n client = Client(\"IRIS\")\n st = client.get_waveforms(opt.network, opt.station, opt.location, opt.channel,\n date - opt.atrig, date + 3*opt.atrig)\n else:\n client = EWClient(opt.server, opt.port)\n st = client.getWaveform(opt.network, opt.station, opt.location, opt.channel,\n date - opt.atrig, date + 3*opt.atrig)\n\n st = st.detrend() # can create noise artifacts??\n st = st.merge(method=1, fill_value='interpolate')\n st = st.filter(\"highpass\", freq=opt.fhigh, corners=2,\n zerophase=True)\n\n return st\n\n\ndef trigger(st, rtable, opt):\n\n \"\"\"\n Run triggering algorithm on a stream of data.\n\n st: OBSPy stream of data\n rtable: Repeater table contains reference time of previous trigger in samples\n opt: Options object describing station/run parameters\n\n Returns triggered traces as OBSPy trace object updates ptime for next run \n \"\"\"\n\n # Filter the data for triggering\n\n st_f = st.copy()\n st_f = st_f.filter(\"bandpass\", freqmin=opt.fmin, freqmax=opt.fmax, corners=2,\n zerophase=True)\n tr = st[0]\n tr_f = st_f[0]\n t = tr.stats.starttime\n\n cft = classicSTALTA(tr_f.data, opt.swin*opt.samprate, opt.lwin*opt.samprate)\n on_off = triggerOnset(cft, opt.trigon, opt.trigoff)\n \n if len(on_off) > 0:\n \n pick = on_off[:,0] \n ind = 0\n \n # Slice out the raw data (not filtered except for highpass to reduce long period\n # drift) and save the maximum STA/LTA ratio value for\n # use in orphan expiration\n \n # Convert ptime from time of last trigger to samples before start time \n if rtable.attrs.ptime:\n ptime = (UTCDateTime(rtable.attrs.ptime) - t)*opt.samprate\n else:\n ptime = -opt.mintrig*opt.samprate\n \n for n in range(len(pick)):\n \n ttime = pick[n]\n \n if (ttime >= opt.atrig*opt.samprate) and (ttime >= ptime +\n opt.mintrig*opt.samprate) and (ttime < len(tr.data) -\n 2*opt.atrig*opt.samprate):\n \n ptime = ttime\n if ind is 0:\n # Slice and save as first trace \n trigs = st.slice(t - opt.ptrig + ttime/opt.samprate,\n t + opt.atrig + ttime/opt.samprate)\n trigs[ind].stats.maxratio = np.amax(cft[on_off[n,0]:(on_off[n,1]+1)])\n ind = ind+1\n else:\n # Slice and append to previous traces\n trigs = trigs.append(tr.slice(\n t - opt.ptrig + ttime/opt.samprate,\n t + opt.atrig + ttime/opt.samprate))\n trigs[ind].stats.maxratio = np.amax(cft[on_off[n,0]:(on_off[n,1]+1)])\n ind = ind+1\n \n if ind is 0:\n rtable.attrs.ptime = (t + len(tr.data)/opt.samprate -\n opt.mintrig*opt.samprate).isoformat()\n return []\n else:\n rtable.attrs.ptime = (t + ptime/opt.samprate).isoformat()\n return trigs\n else:\n rtable.attrs.ptime = (t + len(tr.data)/opt.samprate -\n opt.mintrig*opt.samprate).isoformat()\n return []\n\n\ndef dataclean(alltrigs, opt, flag=1):\n\n \"\"\"\n Examine triggers and weed out spikes and calibration pulses using kurtosis and\n outlier ratios\n \n alltrigs: triggers output from triggering\n opt: opt from config\n flag: 1 if defining window to check, 0 if want to check whole waveform for spikes\n (note that different threshold values should be used for different window lengths)\n \n Returns good trigs (trigs) and junk (junk)\n \"\"\"\n \n trigs=Stream()\n junk=Stream()\n for i in range(len(alltrigs)):\n #define data\n dat=alltrigs[i].data\n if flag==0:\n datcut=dat\n else:\n datcut=alltrigs[i].data[range(int((opt.ptrig-opt.kurtwin/2)*opt.samprate),\n int((opt.ptrig+opt.kurtwin/2)*opt.samprate))]\n \n #calculate kurtosis in window\n k = stats.kurtosis(datcut)\n #compute kurtosis of frequency amplitude spectrum next\n \n datf = np.absolute(fft(dat))\n kf = stats.kurtosis(datf)\n #calculate outlier ratio using z ((data-median)/mad), outliers have z>4.45\n mad = np.median(np.absolute(dat - np.median(dat)))\n z=(dat-np.median(dat))/mad\n orm = len(z[z>4.45])/len(z)\n if k= 0:\n if(delta == -1):\n print(\"gets muted\")\n volume.mute = True\n if(delta == 0):\n volume.mute = False\n print(\"volume.GetMasterVolume(): %s\" % volume.GetMasterVolume())\n volume.SetMasterVolume(volume.GetMasterVolume() + delta, None)\n\nif __name__ == \"__main__\":\n main()","sub_path":"ControleComputer.py","file_name":"ControleComputer.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"262682549","text":"import os\nimport csv\nimport argparse\nimport time\n\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom mscn.util import *\nfrom mscn.data import get_train_datasets, load_sample, load_dicts, make_dataset\nfrom mscn.model import SetConv\nfrom mscn.datasets import LoadForest\nfrom mscn.queries import LoadForestQueries\n\ndef error_metric(est_card, card):\n # both + 1 in case est_card or card being 0\n if est_card > card:\n return (est_card + 1) / (card + 1)\n else:\n return (card + 1) / (est_card + 1)\n\ndef unnormalize_torch(vals, min_val, max_val):\n vals = (vals * (max_val - min_val)) + min_val\n return torch.exp(vals) - 1\n\ndef qerror_loss(preds, targets, min_val, max_val):\n qerror = []\n preds = unnormalize_torch(preds, min_val, max_val)\n targets = unnormalize_torch(targets, min_val, max_val)\n\n for i in range(len(targets)):\n qerror.append(error_metric(preds[i], targets[i]))\n return torch.mean(torch.cat(qerror))\n\ndef print_qerror(preds_unnorm, labels_unnorm):\n qerror = []\n for i in range(len(preds_unnorm)):\n qerror.append(error_metric(float(preds_unnorm[i]), float(labels_unnorm[i])))\n\n print(\"Median: {}\".format(np.median(qerror)))\n print(\"90th percentile: {}\".format(np.percentile(qerror, 90)))\n print(\"95th percentile: {}\".format(np.percentile(qerror, 95)))\n print(\"99th percentile: {}\".format(np.percentile(qerror, 99)))\n print(\"Max: {}\".format(np.max(qerror)))\n print(\"Mean: {}\".format(np.mean(qerror)))\n return np.array(qerror)\n\ndef predict(model, data_loader, cuda):\n preds = []\n t_total = 0.\n\n model.eval()\n for batch_idx, data_batch in enumerate(data_loader):\n\n samples, predicates, targets, sample_masks, predicate_masks = data_batch\n\n if cuda:\n samples, predicates, targets = samples.cuda(), predicates.cuda(), targets.cuda()\n sample_masks, predicate_masks = sample_masks.cuda(), predicate_masks.cuda()\n samples, predicates, targets = Variable(samples), Variable(predicates), Variable(\n targets)\n sample_masks, predicate_masks = Variable(sample_masks), Variable(predicate_masks)\n\n t = time.time()\n outputs = model(samples, predicates, sample_masks, predicate_masks)\n t_total += time.time() - t\n\n for i in range(outputs.shape[0]):\n preds.append(outputs[i].cpu().item())\n\n return preds, t_total\n\ndef train(query, num_samples, num_epochs, batch_size, hid_units, cuda, seed):\n\n # load queires\n queries, labels = LoadForestQueries(query, split_close_range=True)\n\n # Load training and validation data\n dicts, column_min_max_vals, min_val, max_val, labels_train, labels_test, max_num_predicates, train_data, test_data = get_train_datasets(\n queries, labels, num_samples, seed)\n column2vec, op2vec = dicts\n\n # Train model\n predicate_feats = len(column2vec) + len(op2vec) + 1\n state = {\n 'min_val': min_val,\n 'max_val': max_val\n }\n\n model = SetConv(num_samples, predicate_feats, hid_units)\n model_size = model.size()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n best_valid_loss = float('inf')\n\n if cuda:\n model.cuda()\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n model.train()\n for epoch in range(num_epochs):\n loss_total = 0.\n\n for batch_idx, data_batch in enumerate(train_data_loader):\n samples, predicates, targets, sample_masks, predicate_masks = data_batch\n\n if cuda:\n samples, predicates, targets = samples.cuda(), predicates.cuda(), targets.cuda()\n sample_masks, predicate_masks = sample_masks.cuda(), predicate_masks.cuda()\n samples, predicates, targets = Variable(samples), Variable(predicates), Variable(\n targets)\n sample_masks, predicate_masks = Variable(sample_masks), Variable(predicate_masks)\n\n optimizer.zero_grad()\n outputs = model(samples, predicates, sample_masks, predicate_masks)\n loss = qerror_loss(outputs, targets.float().reshape(-1, 1), min_val, max_val)\n loss_total += loss.item()\n loss.backward()\n optimizer.step()\n\n print(\"Epoch {}, loss: {}\".format(epoch, loss_total / len(train_data_loader)))\n\n # Get final training and validation set predictions\n # preds_train, t_total = predict(model, train_data_loader, cuda)\n # print(\"Prediction time per training sample: {}\".format(t_total / len(labels_train) * 1000))\n\n preds_test, t_total = predict(model, test_data_loader, cuda)\n print(\"Prediction time per validation sample: {}\".format(t_total / len(labels_test) * 1000))\n\n # Unnormalize\n # preds_train_unnorm = unnormalize_labels(preds_train, min_val, max_val)\n # labels_train_unnorm = unnormalize_labels(labels_train, min_val, max_val)\n preds_test_unnorm = unnormalize_labels(preds_test, min_val, max_val)\n labels_test_unnorm = unnormalize_labels(labels_test, min_val, max_val)\n\n # Print metrics\n # print(\"\\nQ-Error training set:\")\n # train_qerror = print_qerror(preds_train_unnorm, labels_train_unnorm)\n\n print(\"\\nQ-Error validation set:\")\n test_qerror = print_qerror(preds_test_unnorm, labels_test_unnorm)\n print(\"\")\n\n valid_loss = test_qerror.mean()\n if valid_loss < best_valid_loss:\n print('best valid loss for now!', valid_loss)\n best_valid_loss = valid_loss\n state['model_state_dict'] = model.state_dict()\n torch.save(state, os.path.join('model', '{}_{}_{}_{:.2f}.pt'\n .format(num_samples, hid_units, seed, model_size)))\n\ndef test(query, num_samples, model_name, batch_size, hid_units, cuda, seed):\n # load queires\n queries, labels = LoadForestQueries(query, split_close_range=True)\n\n # load sample\n sample = load_sample(num_samples, seed)\n\n # load dicts from data\n table = LoadForest()\n column2vec, op2vec, column_min_max_vals = load_dicts(table)\n\n # load model\n predicate_feats = len(column2vec) + len(op2vec) + 1\n model = SetConv(num_samples, predicate_feats, hid_units)\n model_size = model.size()\n state = torch.load(os.path.join('model', '{}_{}_{}_{:.2f}.pt'\n .format(num_samples, hid_units, seed, model_size)))\n model.load_state_dict(state['model_state_dict'])\n\n # load min max label from model dict\n min_val = state['min_val']\n max_val = state['max_val']\n print('min val: {}, max_val: {}'.format(min_val, max_val))\n\n # Get feature encoding and proper normalization\n samples_enc = get_sample_bitmap(sample, queries)\n predicates_enc = encode_data(queries, column_min_max_vals, column2vec, op2vec)\n labels_test, _, _ = normalize_labels(labels, min_val, max_val)\n\n print(\"Number of test samples: {}\".format(len(labels_test)))\n\n max_num_predicates = max([len(p) for p in predicates_enc])\n\n # Get test set predictions\n test_data = make_dataset(samples_enc, predicates_enc, labels_test, max_num_predicates)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n preds_test, t_total = predict(model, test_data_loader, cuda)\n print(\"Prediction time per test sample: {}\".format(t_total / len(labels_test) * 1000))\n\n # Unnormalize\n preds_test_unnorm = unnormalize_labels(preds_test, min_val, max_val)\n\n # Print metrics\n print(\"\\nQ-Error:\")\n test_error = print_qerror(preds_test_unnorm, labels)\n\n # Write predictions\n file_name = os.path.join('results', '{}_{}.csv'.format(query, model_name))\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, \"w\") as f:\n writer = csv.writer(f)\n for i in range(len(preds_test_unnorm)):\n writer.writerow((test_error[i], preds_test_unnorm[i], labels[i]))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cmd\", help=\"train / test\", type=str, default='train')\n parser.add_argument(\"--query\", help=\"query name\", type=str, default='q20k')\n parser.add_argument(\"--model\", help=\"model name\", type=str, default='')\n parser.add_argument(\"--seed\", help=\"random seed\", type=int, default=123)\n parser.add_argument(\"--samples\", help=\"number of materialized samples\", type=int, default=1000)\n parser.add_argument(\"--epochs\", help=\"number of epochs (default: 500)\", type=int, default=500)\n parser.add_argument(\"--batch\", help=\"batch size (default: 1024)\", type=int, default=1024)\n parser.add_argument(\"--hid\", help=\"number of hidden units (default: 28)\", type=int, default=28)\n parser.add_argument(\"--cuda\", help=\"use CUDA\", action=\"store_true\")\n args = parser.parse_args()\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.cmd == 'train':\n train(args.query, args.samples, args.epochs, args.batch, args.hid, args.cuda, args.seed)\n elif args.cmd == 'test':\n test(args.query, args.samples, args.model, args.batch, args.hid, args.cuda, args.seed)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54926112","text":"#!/usr/bin/env python3\nfrom readline import parse_and_bind, set_completer\nfrom itertools import count\nfrom sys import stdin\nfrom re import compile\n\nS_EXP = compile(r'\\(|\\)|\"([^\"]*)\"|\\d+|[^)\\s]+')\n\n\ndef get_env():\n return {\n 'car': lambda arg, **kw: arg[0], 'cdr': lambda arg, **kw: arg[1:],\n '+': lambda *a, **kw: sum(a), '-': lambda a, *b, **kw: a - sum(b),\n '>': lambda a, b, **kw: a > b, 'eq': lambda a, b, **kw: a == b,\n 'print': lambda f, *args, **kw: print(f.format(*args)),\n 'atom': lambda arg, **kw: type(arg) in [str, int], }\n\n\ndef tokenize(raw):\n return [a.group(1) or a.group(0) for a in S_EXP.finditer(raw)]\n\n\ndef parse(raw):\n return _parse(tokenize(raw))\n\n\ndef _parse(tks):\n if type(tks) == list:\n return parse_list(tks)\n else:\n return int(tks) if tks.isnumeric() else tks\n\n\ndef parse_eval(code, env):\n return [eval(e, env) for e in parse(code)]\n\n\ndef repl(code, env):\n return [r and print(r) for r in parse_eval(code, env)]\n\n\ndef parse_list(tks):\n def _parse_list():\n while tks:\n if tks[0] == ')':\n tks.pop(0)\n break\n elif tks[0] == '(':\n tks.pop(0)\n yield _parse(tks)\n else:\n yield _parse(tks.pop(0))\n return list(_parse_list())\n\n\ndef eval_procedure(name, args, env={}):\n def define():\n if type(args[0]) == str: # (define a (+ 1 2))\n def_name, def_body = args[0], eval(args[1], env)\n else: # define (a 2) (+ 2 3) (- 2 3)\n def_name, def_body = args[0][0], lambda *c_args, env={\n }: env.update(zip(args[0][1:], c_args)) or [\n eval(expr, env) for expr in args[1:]][-1]\n env[def_name] = def_body\n try:\n return {\n 'define': lambda: define(), 'quote': lambda: args[0],\n 'eval': lambda: eval(env[args[0]], env), 'cond': lambda: [\n eval(c[1], dict(env)) for c in args if eval(c[0], env)][-1],\n }[name]()\n except KeyError:\n return env[name](*[eval(arg, env=env) for arg in args], env=dict(env))\n\n\ndef eval(code, env):\n try:\n return {int: lambda: code, str: lambda: env[code], list: lambda:\n eval_procedure(code[0], code[1:], env=env), }[type(code)]()\n except KeyError:\n return \"Unbound variable: {}\".format(\n code[0] if isinstance(code, list) else code)\n except TypeError:\n return \"Call of non-procedure: {}\".format(code[0])\n\n\nif __name__ == \"__main__\":\n env = get_env()\n if stdin.isatty():\n parse_and_bind(\"tab: complete\")\n set_completer(\n lambda x, y: [a for a in env.keys() if a.startswith(x)][y])\n try:\n [repl(input('> '), env) for _ in count()]\n except (EOFError, KeyboardInterrupt):\n print(\"\\nGood bye\")\n else:\n repl(stdin.read(), env)\n","sub_path":"crispy/crispy.py","file_name":"crispy.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19747570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFeature seletion functions:\nthis module contains functions that automates the feature selection process\n\n@author: orimo\n\"\"\"\n\n# import packages\nimport os\nimport numpy as np\nimport pandas as pd \n\nos.chdir('C:/Users/orimo/Documents/python_ml/python_ml_train_orimorhaim/pre_process')\nfrom utils import *\n\n\n\n\n\n\ndef var_threshold(train, precentile = 50, norm = True):\n '''\n This function finds the value of the variance which correspond to the precentile specified between all features.\n @param train pandas data frame for which the threshold will be caclulates\n @param precentile the precentage rank\n @param norm - boolean variable indicate whether the datasets normalize or not\n @return threshold - the variance value which corresond to the specified percentile \n '''\n # adjsments \n train_ = train if norm else normal(train)\n precentile = precentile if 0 <= precentile <= 100 else 50 \n \n # extract threshold of variance\n threshold = train_.describe()\n threshold = np.percentile(threshold.loc['std'].values **2, precentile) # var = std^2\n return threshold\n\n\n\n\n\ndef clr_low_var(train, threshold, norm = True):\n '''\n This function finds the features which thier variance is lower then the given thershold and return\n the features names which have higher variance then the threshold\n @param train - pandas data frame\n @param threshold - the var value in which every value beneath this value will be excluded\n @param norm - boolean variable indicate whether the datasets normalize or not\n @return list of the features with higher variance then the threshold\n '''\n # adjsments \n train_ = train if norm else normal(train)\n cols = train_.select_dtypes([np.number]).columns.tolist()\n \n # exclude features\n cols = [col for col in cols if (train_[col].std())**2 > threshold]\n return cols\n\n\n\n\n\ndef exclude_low_variance(train, precentile, norm = True):\n '''\n This function use 'var_threshold' function in order to find the threshold correspnd to the percentile specified,\n and the 'clr_low_var' function which return the names of the columns with higher variance then the threshold\n '''\n # adjusmtents \n train_ = train if norm else normal(train)\n \n # extract threshold\n threshold = var_threshold(train_, precentile = precentile, norm = True)\n \n # extract features names\n cols = clr_low_var(train_, threshold, norm = True)\n \n return cols\n","sub_path":"pre_process/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"495157443","text":"import pdb\nfrom models.member import Member\nfrom models.session import Session\nfrom models.booking import Booking\n\nimport repositories.member_repository as member_repository\nimport repositories.session_repository as session_repository\nimport repositories.booking_repository as booking_repository\n\nbooking_repository.delete_all()\nsession_repository.delete_all()\nmember_repository.delete_all()\n\nmember1 = Member('Charlie', 8, 'Hound')\nmember_repository.save(member1)\n\nmember2 = Member('Cooper', 2, 'Spaniel')\nmember_repository.save(member2)\n\nmember3 = Member('Jess', 12, 'Retriever')\nmember_repository.save(member3)\n\nmember4 = Member('Archie', 1, 'Retriever')\nmember_repository.save(member4)\n\nsession1 = Session('Obedience', 'Monday 26th', 30, 'All')\nsession_repository.save(session1)\n\nsession2 = Session('Agility', 'Tuesday 27th', 60, 'All')\nsession_repository.save(session2)\n\nsession3 = Session('Puppy', 'Wednesday 28th', 30, 'All')\nsession_repository.save(session3)\n\nsession4 = Session('Scent work', 'Thursday 29th', 60, 'Hound')\nsession_repository.save(session4)\n\nbooking1 = Booking(member1, session4)\nbooking_repository.save(booking1)\n\nbooking2 = Booking(member4, session3)\nbooking_repository.save(booking2)\n\nbooking3 = Booking(member3, session2)\nbooking_repository.save(booking3)\n\nbooking4 = Booking(member2, session1)\nbooking_repository.save(booking4)\n\n\npdb.set_trace()\n\n\n","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"599133341","text":"from datetime import timedelta, datetime, tzinfo,timezone\nclass GMT12(tzinfo):\n def utcoffset(self,dt):\n return timedelta(hours=12) + self.dst(dt)\n def dst(self,dt):\n #DST stats from last Sunday 2 am in Sep\n #and ends at the first Sunday 3 am in April\n d=datetime(dt.year,10,1,2)\n self.dston = d-timedelta(days=d.weekday()+1)\n d=datetime(dt.year,4,1,3)\n self.dstoff=d+timedelta(days=6-d.weekday())\n if self.dston >dt.replace(tzinfo=None) >=self.dstoff:\n return timedelta(0)\n else:\n return timedelta(hours=1)\n def tzname(self,dt):\n return \"NZST GMT+12\"\n\ngmt12=GMT12()\ndt1=datetime(2015,9,27,1, tzinfo=gmt12)\ndt2=datetime(2015,9,27,2, tzinfo=gmt12)\ndt3=datetime(2016,4,3,2, tzinfo=gmt12)\ndt4=datetime(2016,4,3,3, tzinfo=gmt12)\ndt5=datetime(2015,5,3,3, tzinfo=gmt12)\ndt6=datetime(2016,2,3,3, tzinfo=gmt12)\nprint (dt1,dt2,dt3,dt4,dt5,dt6, sep='\\n')\nprint(dt1.dst(),dt2.dst(),dt3.dst(),dt4.dst(),dt5.dst(),dt6.dst(), sep='\\n')\nprint ('*'*20)\nprint(dt5.utcoffset())\nprint(dt6.utcoffset())\nprint(gmt12.dst(dt6))\nprint (gmt12.tzname(dt6))\nprint (dt5.ctime())\nprint (\"NZ local time is {0}, UTC time is {1}\".\n format(dt6.isoformat(), dt6.astimezone(tz=timezone.utc)))\nprint (\"NZ local time is {0}, UTC time is {1}\".\n format(dt6.isoformat(), dt6.astimezone(tz=timezone(timedelta(hours=8)))))\n","sub_path":"Python/NZdt.py","file_name":"NZdt.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"94317432","text":"import datetime\nimport sys\n\nfrom peewee import *\n\nDATABASE = SqliteDatabase('journal.db')\n\nclass Entry(Model):\n entry_id = AutoField()\n title = CharField()\n date = DateField(default=datetime.datetime.now)\n time_spent = CharField(max_length=50)\n learned = TextField()\n resources = TextField()\n\n \n class Meta:\n database = DATABASE\n order_by = ('-date',)\n\n \n @classmethod\n def create_entry(cls, title, date, time_spent, learned, resources):\n try:\n cls.create(\n title=title,\n date=date,\n time_spent=time_spent,\n learned=learned,\n resources=resources,\n ).save()\n except IntegrityError:\n raise ValueError(\"Journal entry already exists.\")\n\ndef initialize():\n DATABASE.connect()\n DATABASE.create_tables([Entry], safe=True)\n DATABASE.close()\n\nif __name__ == '__main__':\n initialize()\n ","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"58280930","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('vent.views',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', include('vcalendar.urls'), name='home'),\n url(r'^login/$','login'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^home/$','home'),\n url(r'^about/$','about'),\n url(r'^support-us/$','supportUs'),\n url(r'^faq/$','faq'),\n url(r'^contact-us/$','contact'),\n url(r'^logout/$', 'logout', name='logout'),\n url(r'^calendar/$', include('vcalendar.urls'), name='vcalendar'),\n url(r'^event/$', include('event.urls'), name='event'),\n url(r'^org/$', include('org.urls'), name='org'),\n url(r'^sponsors/$', include('sponsors.urls'), name='sponsors'),\n)\n","sub_path":"vent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"434484510","text":"import time\nimport unittest\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\nclass TestBase(unittest.TestCase):\n driver = None\n base_url = \"http://opennxos.cisco.com\"\n time_out = 10 # Time out 30 seconds\n\n def setUp(self):\n if TestBase.driver == None:\n TestBase.driver = webdriver.Firefox()\n TestBase.driver.implicitly_wait(self.time_out)\n TestBase.driver.maximize_window()\n return TestBase.driver\n\n def goto_link(self, link):\n TestBase.driver.get(link)\n\n def wait(self, time_out=5):\n time.sleep(time_out)\n\n def wait_for_presence_of_element(self, by, value):\n element = WebDriverWait(TestBase.driver, self.time_out).until(\n EC.presence_of_element_located((by, value))\n )\n return element\n\n def get_element(self, by, value):\n try:\n return TestBase.driver.find_element(by, value)\n except NoSuchElementException:\n return None\n\n def click_element(self, by, value):\n element = self.get_element(by, value)\n element.click()\n\n def is_element_present(self, by, value):\n try:\n TestBase.driver.find_element(by, value)\n return True\n except NoSuchElementException:\n return False\n\n def is_result_found(self, result):\n return result in TestBase.driver.page_source\n\n def enter_text(self, element, value):\n element.clear()\n element.send_keys(value)\n\n def select_option(self, element, value, select_by=\"text\"):\n select = Select(element)\n if select_by == \"text\":\n select.select_by_visible_text(value)\n elif select_by == \"index\":\n select.select_by_index(value)\n elif element.get(\"select_by\") == \"value\":\n select.select_by_value(value)\n\n def fill_form(self, form_data):\n for item in form_data:\n element_type = item.get(\"element_type\")\n element = self.get_element(item.get(\"find_element_by\"), item.get(\"key\"))\n value = item.get(\"value\")\n if element_type == \"text\":\n self.enter_text(element, value)\n elif element_type == \"select\":\n self.select_option(element, value, select_by=item.get(\"select_by\"))\n","sub_path":"suite/testbase.py","file_name":"testbase.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576675965","text":"import random\nimport matplotlib.pyplot as plt\nimport operator\n\n\n# 创建数据集\ndef createDataSet():\n dataset = []\n for line in open(\"data.csv\"):\n x, y = line.split(\",\")\n dataset.append([int(x), int(y)])\n return dataset\n # return [[1, 1], [1, 2], [2, 1], [6, 4], [6, 3], [5, 4]]\n\n\n# k-means算法\ndef kmeans(dataSet, k):\n center_cores = random.sample(dataSet, k)\n # center_cores = [[1,2], [33,18], [50,70]]\n print(\"随机质心:\", center_cores)\n changed, new_center_core = updateCenterCore2(dataSet, center_cores, k)\n print(\"第1次改变量:\", changed)\n print(\"第1次新质心:\", new_center_core)\n c = 2\n while changed != 0:\n print(\"--------------------------------\")\n changed, new_center_core = updateCenterCore2(dataSet, new_center_core, k)\n print(c, \"改变量:\", changed)\n print(c, \"新质心:\", new_center_core)\n c = c + 1\n\n return changed, new_center_core\n\n\n# 更新质心\ndef updateCenterCore(dataSet, centerCores, k):\n # 求各点到质心的距离\n calc_dis_list = calcDis(dataSet, centerCores, k)\n new_center_cores = []\n print(calc_dis_list)\n # 求新的质心\n l = len(dataSet)\n '''\n 对个点进行分类,分到对应质点下面\n 比较每个点对各个质点的距离,然后把各个点分到对应的质点簇,然后进行质点更新\n '''\n classfiy = []\n x = []\n for j in range(k):\n classfiy.append([])\n for i in range(l):\n min_val = calc_dis_list[i]\n min_indx = i\n t = i\n for j in range(k - 1):\n if min_val > calc_dis_list[t + l]:\n min_val = calc_dis_list[t + l]\n min_indx = t + l\n t = t + l\n classfiy[min_indx // l].append(dataSet[i])\n print(classfiy)\n print(\"----------------------------------\")\n for i in range(k):\n minX = 0\n minY = 0\n for j in range(len(classfiy[i])):\n minX = minX + classfiy[i][j][0]\n minY = minY + classfiy[i][j][1]\n # print(minX / len(classfiy[i]), minY / len(classfiy[i]))\n if len(classfiy[i]) == 0:\n new_center_cores.append(centerCores[i])\n else:\n new_center_cores.append([minX / len(classfiy[i]), minY / len(classfiy[i])])\n print(\"新质心为:\", new_center_cores)\n changed = 0\n for i in range(k):\n changed = changed + (((new_center_cores[i][0] - centerCores[i][0]) ** 2 + (\n new_center_cores[i][1] - centerCores[i][1]) ** 2) ** 0.5)\n return changed, new_center_cores\n\n\n# 更新质心\ndef updateCenterCore2(dataSet, centerCores, k):\n # 求各点到质心的距离\n calc_dis_list = calc_point2centers(dataSet, centerCores, k)\n new_center_cores = []\n print(calc_dis_list)\n # 求新的质心\n l = len(dataSet)\n '''\n 对个点进行分类,分到对应质点下面\n 比较每个点对各个质点的距离,然后把各个点分到对应的质点簇,然后进行质点更新\n '''\n classfiy = []\n x = []\n for j in range(k):\n classfiy.append([])\n for i in range(l):\n min_val = calc_dis_list[i][0]\n class_index = 0\n for j in range(1, k):\n if min_val > calc_dis_list[i][j]:\n min_val = calc_dis_list[i][j]\n class_index = j\n classfiy[class_index].append(dataSet[i])\n print(classfiy)\n print(\"----------------------------------\")\n for i in range(k):\n minX = 0\n minY = 0\n for j in range(len(classfiy[i])):\n minX = minX + classfiy[i][j][0]\n minY = minY + classfiy[i][j][1]\n # print(minX / len(classfiy[i]), minY / len(classfiy[i]))\n if len(classfiy[i]) == 0:\n new_center_cores.append(centerCores[i])\n else:\n new_center_cores.append([minX / len(classfiy[i]), minY / len(classfiy[i])])\n print(\"新质心为:\", new_center_cores)\n changed = 0\n for i in range(k):\n changed = changed + (((new_center_cores[i][0] - centerCores[i][0]) ** 2 + (\n new_center_cores[i][1] - centerCores[i][1]) ** 2) ** 0.5)\n return changed, new_center_cores\n\n# 计算各点到质心的距离\ndef calcDis(dataSet, centerCores, k):\n calcList = []\n for j in range(k):\n for i in range(len(dataSet)):\n calcList.append(\n (((centerCores[j][0] - dataSet[i][0]) ** 2 + (centerCores[j][1] - dataSet[i][1]) ** 2) ** 0.5))\n # print(\"各点与质心的距离:\",calcList)\n return calcList\n\n\n# 计算距离\ndef calc_dis(point, center):\n return (((center[0] - point[0]) ** 2 + (center[1] - point[1]) ** 2) ** 0.5)\n\n\n# 计算各节点到各质心的距离并放入calcList列表\ndef calc_point2centers(dataset, centerCores, k):\n calcList = []\n for i in range(len(dataset)):\n calcList.append([])\n for j in range(k):\n calcList[i].append((calc_dis(dataset[i], centerCores[j])))\n return calcList\n\n\nif __name__ == '__main__':\n startList = createDataSet()\n for i in range(len(startList)):\n plt.scatter(startList[i][0], startList[i][1], marker='o', color='green', s=40, label='原始点')\n print(\"原始数据点:\", startList)\n changed, new_center_cores = kmeans(startList, 3)\n for i in range(len(new_center_cores)):\n plt.scatter(new_center_cores[i][0], new_center_cores[i][1], marker='x', color='red', s=50, label='质心')\n plt.show()\n","sub_path":"Python/机器学习/k-means/k-means的python实现优化02.py","file_name":"k-means的python实现优化02.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"180737826","text":"import numpy as np\nimport math\n\nclass cross_validation_score:\n \"\"\"Does Scoring Cross Validation\"\"\"\n\n def score(self, model, X, y, cv=5, scaler=None):\n \"\"\"Given model and X, y returns cross val scores of number of folds cv\"\"\"\n\n totalSize = X.shape[0]\n\n sizes = self.getSegmentSizes(X, cv)\n scores = list()\n counter = 0\n for size in sizes:\n lowerBound = int(counter)\n upperBound = int(counter + size)\n X_test = np.array(X[lowerBound:upperBound,:])\n y_test = np.array(y[lowerBound:upperBound])\n X_train = np.vstack((np.array(X[:lowerBound,:]), np.array(X[upperBound:,:])))\n y_train = np.hstack((np.array(y[:lowerBound]), np.array(y[upperBound:])))\n\n if scaler != None:\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n model.fit(X_train, y_train)\n\n scores.append(model.score(X_test, y_test))\n\n counter = counter + size\n return scores\n\n def getSegmentSizes(self, X, cv):\n \"\"\"Returns the number of samples which should be in each fold\"\"\"\n\n totalSize = X.shape[0]\n if cv > totalSize:\n raise Exception(\"Set too small\")\n segment = int(np.floor(totalSize/cv))\n sizes = np.empty([cv])\n sizes.fill(int(segment))\n leftOver = totalSize - (segment * cv)\n\n sizes[:leftOver] = sizes[:leftOver] + 1\n\n return sizes\n\n\nclass confusion_matrix:\n\n def __init__(self):\n self.truePositives = 0\n self.falsePositives = 0\n self.trueNegatives = 0\n self.falseNegatives = 0\n\n def generate(self, y_pred, y_test):\n \"\"\"Create confusion matrix\"\"\"\n\n self.truePositives = 0\n self.falsePositives = 0\n self.trueNegatives = 0\n self.falseNegatives = 0\n\n for i in np.arange(y_test.shape[0]):\n if y_test[i] == 1 and y_pred[i] == 1:\n self.truePositives += 1\n\n if y_test[i] == 0 and y_pred[i] == 1:\n self.falsePositives += 1\n\n if y_test[i] == 0 and y_pred[i] == 0:\n self.trueNegatives += 1\n\n if y_test[i] == 1 and y_pred[i] == 0:\n self.falseNegatives += 1\n\n return np.array([[self.truePositives, self.falsePositives], [self.falseNegatives, self.trueNegatives]])\n\n def sensitivity(self):\n \"\"\"Get sensitivity score\"\"\"\n\n return self.truePositives/(self.truePositives + self.falseNegatives)\n\n def specificity(self):\n \"\"\"Get specificity score\"\"\"\n\n return self.trueNegatives/(self.trueNegatives + self.falsePositives)\n\n def precision(self):\n \"\"\"Get precision score\"\"\"\n\n return self.truePositives/(self.truePositives + self.falsePositives)\n\n def accuracy(self):\n \"\"\"Get accuracy score\"\"\"\n\n return (self.truePositives + self.trueNegatives)/(self.truePositives + self.trueNegatives + self.falsePositives + self.falseNegatives)\n\n\n","sub_path":"scoring/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"23170282","text":"import threading\n\n\nclass SpellCheckService:\n w_last = closest_to_last_word = None\n lock = threading.Lock()\n\n @staticmethod\n def service(req, resp):\n w = req.extract_word_to_check_from_request()\n result = None\n\n with SpellCheckService.lock:\n if w == SpellCheckService.w_last:\n result = SpellCheckService.closest_to_last_word.copy()\n\n if result is None:\n result = closest_in_dictionary(w)\n with SpellCheckService.lock:\n SpellCheckService.w_last = w\n SpellCheckService.closest_to_last_word = result\n\n resp.encode_into_response(result)\n","sub_path":"elements-of-programming-interviews/19-parallel-computing/19.1-multithreaded-dictionary/spell_check_service.py","file_name":"spell_check_service.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15947221","text":"from django.urls import path\nfrom cbv_app import views\n\napp_name = 'cbv_app'\n\nurlpatterns = [\n path('',views.BookListView.as_view(),name='list'),\n path('create/',views.BookCreateView.as_view(),name='create'),\n path('books/',views.BookDetailView.as_view(),name='detail'),\n path('update/',views.BookUpdateView.as_view(),name='update'),\n path('delete/',views.BookDeleteView.as_view(),name='delete'),\n\n]\n","sub_path":"cbv/cbv_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"518395523","text":"import os\nimport torch\nfrom torch.utils.data import Dataset\n\n# This Dataset loads saved torch Tensors. These Tensors must all be saved in the same directory in the format\n# /.pt (where there is a dict, labels, from the image_id to the correct classification).\nclass StandardDataset(Dataset):\n def __init__(self, ids, labels, dataset_dir: str, tsfm=None):\n self.ids = ids\n self.labels = labels\n self.dataset_dir = dataset_dir\n self.tsfm = tsfm\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, dex):\n if torch.is_tensor(dex):\n dex = dex.tolist()\n image_id = self.ids[dex]\n\n filename = os.path.join(self.dataset_dir, image_id + \".pt\")\n X = torch.load(filename)\n\n if self.tsfm:\n X = self.tsfm(X)\n\n y = self.labels[image_id]\n\n return X, y\n","sub_path":"models/util/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"237124595","text":"# Verwende die Bibliothek \"math\", welche einige nützliche mathematische Funktionen bereitstellt\nimport math\n\n# input(t) gibt t in die konsole aus und gibt die darauffolgende Benutzereingabe als String (\"Zeichenkette\") zurück.\n# int(x) konvertiert ein beliebiges Datenobjekt in eine ganzzahl, in diesem Falle ein String\nn = int(input(\"n: \"))\n\n# Erzeugung von φ''(n) als Menge aller ganzer Zahlen zwischen...\nphi_dd = set(range(\n # ... aufgerundet auf eine ganze Zahl n/3 und ...\n math.ceil(\n n / 3\n ),\n # ... abgerundet auf eine ganze Zahl 2 * n / 3 + 1 exklusiv [also ohne +1 inklusiv]\n math.floor(\n 2 * n / 3\n ) + 1\n))\n\n# Filterung der Menge φ''(n) zur Menge φ'(n)\nphi_d = set(filter(\n lambda element: (n * element) % (3 * element - n) == 0,\n phi_dd\n))\n\n# φ(n) als größe dieser Menge\nphi = len(phi_d)\n\n# Überprüfe auf gesuchte Größe, ...\nif phi == 2021:\n # ... gebe aus, dass es geklappt hat...\n print(str(n)+\" lässt sich auf 2021 Weisen als Stammbruch darstellen, nämlich:\")\n\n # ... und iteriere jetzt nach x aufsteigend über alle x und tue für jedes Element:\n for x in sorted(phi_d):\n # rechne y aus\n y = int((n * x) / (3 * x - n))\n\n # und gebe dieses Paar aus\n print(\"x=\"+str(x)+\", y=\"+str(y))\n\nelse:\n # und gebe hier noch aus, auf wie viele Weisen den sich n den darstellen lässt\n print(str(n)+\" lässt sich leider auf \"+str(phi)+\" mögliche Weisen als Summe zweier Stammbrüche darstellen\")\n\n","sub_path":"round 1/final/A2_14.py","file_name":"A2_14.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440399639","text":"from ToonBaseGlobal import *\nfrom IntervalGlobal import *\nfrom OrthoDrive import *\n\nclass OrthoWalk:\n __module__ = __name__\n notify = DirectNotifyGlobal.directNotify.newCategory('OrthoWalk')\n BROADCAST_POS_TASK = 'OrthoWalkBroadcastPos'\n\n def __init__(self, orthoDrive, collisions=1, broadcast=1, broadcastPeriod=0.1):\n self.orthoDrive = orthoDrive\n self.collisions = collisions\n self.broadcast = broadcast\n self.broadcastPeriod = broadcastPeriod\n self.priority = self.orthoDrive.priority + 1\n self.lt = toonbase.localToon\n\n def destroy(self):\n self.orthoDrive.destroy()\n del self.orthoDrive\n\n def start(self):\n self.notify.debug('start')\n self.orthoDrive.start()\n if self.collisions:\n self.__initCollisions()\n if self.broadcast:\n self.__initBroadcast()\n\n def stop(self):\n self.notify.debug('stop')\n self.__shutdownCollisions()\n self.__shutdownBroadcast()\n self.orthoDrive.stop()\n\n def __initCollisions(self):\n self.notify.debug('initCollisions')\n lt = toonbase.localToon\n lt.collisionsOn()\n lt.pusher.clearColliders()\n lt.pusher.addColliderNode(lt.cSphereNode, lt.node())\n self.__collisionsOn = 1\n\n def __shutdownCollisions(self):\n if not hasattr(self, '_OrthoWalk__collisionsOn'):\n return\n del self.__collisionsOn\n self.notify.debug('shutdownCollisions')\n lt = toonbase.localToon\n lt.collisionsOff()\n lt.pusher.clearColliders()\n lt.pusher.addColliderDrive(lt.cSphereNode, base.drive.node())\n\n def __initBroadcast(self):\n self.notify.debug('initBroadcast')\n self.__timeSinceLastPosBroadcast = 0.0\n self.__lastPosBroadcast = self.lt.getPos()\n self.__lastHprBroadcast = self.lt.getHpr()\n self.__storeStop = 0\n lt = self.lt\n lt.d_clearSmoothing()\n lt.d_setSmPosHpr(lt.getX(), lt.getY(), lt.getZ(), lt.getH(), lt.getP(), lt.getR())\n taskMgr.add(self.__doBroadcast, self.BROADCAST_POS_TASK, priority=self.priority)\n\n def __shutdownBroadcast(self):\n self.notify.debug('shutdownBroadcast')\n taskMgr.remove(self.BROADCAST_POS_TASK)\n\n def __doBroadcast(self, task):\n dt = globalClock.getDt()\n self.__timeSinceLastPosBroadcast += dt\n if self.__timeSinceLastPosBroadcast >= self.broadcastPeriod:\n self.__timeSinceLastPosBroadcast = 0\n pos = self.lt.getPos()\n hpr = self.lt.getHpr()\n if self.orthoDrive.setHeading and (pos[0] != self.__lastPosBroadcast[0] or pos[1] != self.__lastPosBroadcast[1] or hpr[0] != self.__lastHprBroadcast[0]):\n self.lt.d_setSmXYH(pos[0], pos[1], hpr[0])\n self.__lastPosBroadcast = pos\n self.__lastHprBroadcast = hpr\n self.__storeStop = 0\n else:\n if pos[0] != self.__lastPosBroadcast[0] or pos[1] != self.__lastPosBroadcast[1]:\n self.lt.d_setSmXY(pos[0], pos[1])\n self.__lastPosBroadcast = pos\n self.__storeStop = 0\n else:\n if not self.__storeStop:\n self.__storeStop = 1\n self.lt.d_setSmStop()\n return Task.cont","sub_path":"Bytecode Decompile/OrthoWalk.py","file_name":"OrthoWalk.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"450557183","text":"from utils.haystack.highlighting import SummaryHighlighter\nfrom django import template\n\n\nclass SummaryHighlightNode(template.Node):\n def __init__(self,\n text_block,\n query,\n html_tag=None,\n css_class=None,\n min_length=None,\n max_length=None):\n self.text_block = template.Variable(text_block)\n self.query = template.Variable(query)\n self.html_tag = html_tag\n self.css_class = css_class\n self.max_length = max_length\n self.min_length = min_length\n\n if html_tag is not None:\n self.html_tag = template.Variable(html_tag)\n\n if css_class is not None:\n self.css_class = template.Variable(css_class)\n\n if max_length is not None:\n self.max_length = template.Variable(max_length)\n\n if min_length is not None:\n self.max_length = template.Variable(min_length)\n\n def render(self, context):\n text_block = self.text_block.resolve(context)\n query = self.query.resolve(context)\n kwargs = {}\n\n if self.html_tag is not None:\n kwargs['html_tag'] = self.html_tag.resolve(context)\n\n if self.css_class is not None:\n kwargs['css_class'] = self.css_class.resolve(context)\n\n if self.max_length is not None:\n kwargs['max_length'] = self.max_length.resolve(context)\n\n if self.min_length is not None:\n kwargs['min_length'] = self.max_length.resolve(context)\n\n highlighter = SummaryHighlighter(query, **kwargs)\n highlighted_text = highlighter.highlight(text_block)\n\n return highlighted_text\n\n\nregister = template.Library()\n\n\n@register.tag\ndef summary_highlight(parser, token):\n \"\"\"\n Takes a block of text and highlights words from a provided query within\n that block of text. Optionally accepts arguments to provide the HTML tag\n to wrap highlighted word in, a CSS class to use with the tag and a maximum\n length of the blurb in characters.\n\n Syntax::\n\n {% summary_highlight with [css_class \"class_name\"] [html_tag \"span\"] [max_length 200] %}\n\n Example::\n\n # Highlight summary with default behavior.\n {% summary_highlight result.summary with request.query %}\n\n # Highlight summary but wrap highlighted words with a div and the\n # following CSS class.\n {% summary_highlight result.summary with request.query html_tag \"div\" css_class \"highlight_me_please\" %}\n\n # Highlight summary but only show 40 characters.\n {% summary_highlight result.summary with request.query max_length 40 %}\n \"\"\"\n bits = token.split_contents()\n tag_name = bits[0]\n\n if not len(bits) % 2 == 0:\n raise template.TemplateSyntaxError(\n u\"'%s' tag requires valid pairings arguments.\" % tag_name)\n\n text_block = bits[1]\n\n if len(bits) < 4:\n raise template.TemplateSyntaxError(\n u\"'%s' tag requires an object and a query provided by 'with'.\"\n % tag_name)\n\n if bits[2] != 'with':\n raise template.TemplateSyntaxError(\n u\"'%s' tag's second argument should be 'with'.\" % tag_name)\n\n query = bits[3]\n\n arg_bits = iter(bits[4:])\n kwargs = {}\n\n for bit in arg_bits:\n if bit == 'css_class':\n kwargs['css_class'] = arg_bits.next()\n\n if bit == 'html_tag':\n kwargs['html_tag'] = arg_bits.next()\n\n if bit == 'max_length':\n kwargs['max_length'] = arg_bits.next()\n\n if bit == 'min_length':\n kwargs['min_length'] = arg_bits.next()\n\n return SummaryHighlightNode(text_block, query, **kwargs)\n","sub_path":"templatetags/utils_highlight.py","file_name":"utils_highlight.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481846837","text":"__author__ = 'Pawel'\n\nimport tkinter as tk\nfrom src import O_X as ox\n\n\nclass Game(object):\n def __init__(self):\n self.root = tk.Tk()\n self.game = ox.GameO_X()\n self.cursor_x, self.cursor_y = 0, 0\n self.old_cursor_x, self.old_cursor_y = 0, 0\n self.old_value = \" \"\n self.currPlayer = 0\n self.introduction()\n self.setRound()\n\n def introduction(self):\n print(\"GAME O and X\\nMoving WSAD Place Spacebar \\nFirst Player is O\\nSecound Player is X\\nHave Fun! !\")\n\n def gameLoop(self):\n while not self.game.isGameOver():\n self.root.bind_all('', self.keyPress)\n self.root.withdraw()\n self.root.mainloop()\n print(\"END GAME\")\n\n def setRound(self):\n self.old_value = self.game.getArrayValue(0, 0)\n self.set_new_position()\n self.moveCursor()\n\n def moveCursor(self):\n self.clean_old_cursor()\n self.move_cursor()\n self.game.printGameArray(self.currPlayer)\n\n def clean_old_cursor(self):\n self.game.setToArrayOn(self.old_cursor_y, self.old_cursor_x, self.old_value, \"move\")\n\n def move_cursor(self):\n self.old_value = self.game.getArrayValue(self.cursor_y, self.cursor_x)\n self.game.setToArrayOn(self.cursor_y, self.cursor_x, \"_\", \"move\")\n\n def set_old_position(self):\n self.old_cursor_y = self.cursor_y\n self.old_cursor_x = self.cursor_x\n\n def set_new_position(self):\n self.cursor_x = self.cursor_y = 0\n self.old_cursor_x = self.old_cursor_y = 0\n\n def place_value(self):\n self.game.setToArrayOn(self.cursor_y, self.cursor_x, self.currPlayer, \"place\")\n self.check_end_game()\n self.currPlayer = (self.currPlayer+1) % 2\n\n def check_end_game(self):\n if self.game.checkWin() == 1:\n self.game.printGameArray(self.currPlayer)\n print(\"WIN PLAYER \" + str(self.currPlayer))\n self.root.destroy()\n elif self.game.checkWin() == -1:\n self.game.printGameArray(self.currPlayer)\n print(\"REMIS\")\n self.root.destroy()\n\n def keyPress(self, event):\n self.set_old_position()\n x = event.char\n move = True\n if event.keysym == 'Escape':\n self.game.gameOver = True\n self.root.destroy()\n elif event.keysym == 'space' and self.old_value == \" \":\n move = False\n self.place_value()\n elif x == \"w\":\n if self.cursor_y - 1 >= 0:\n self.cursor_y -= 1\n elif x == \"a\":\n if self.cursor_x - 1 >= 0:\n self.cursor_x -= 1\n elif x == \"s\":\n if self.cursor_y + 1 < 3:\n self.cursor_y += 1\n elif x == \"d\":\n if self.cursor_x + 1 < 3:\n self.cursor_x += 1\n\n if move and not self.game.gameOver:\n self.moveCursor()\n elif not self.game.gameOver:\n self.setRound()\n\n\nif __name__ == '__main__':\n game = Game()\n game.gameLoop()","sub_path":"src/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"86327002","text":"import os\n\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding='utf-8') as rfile:\n long_description = rfile.read()\n\nwith open(os.path.join(\"featurize_haunter\", \"__about__.py\")) as rfile:\n v_dict = {}\n exec(rfile.read(), v_dict)\n version = v_dict['__version__']\n\n\nsetuptools.setup(\n name=\"featurize_hanuter\",\n version=version,\n author=\"Dongsheng Lin\",\n description=\"command line tool for init and config\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/LinDong123a/featurize-haunter\",\n packages=setuptools.find_packages(),\n entry_points={\n \"console_scripts\": [\n \"featurize-haunter = featurize_haunter.featurize_haunter:main\",\n ],\n },\n package_dir={\"featurize_haunter\": \"featurize_haunter\"},\n include_package_data=True,\n python_requires='>=3.6',\n install_requires=[\n \"featurize==0.0.12\",\n \"playsound==1.2.2\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n ],\n project_urls={\n \"Source\": \"https://github.com/LinDong123a/featurize-haunter\",\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"226192564","text":"\"\"\"Merge data from oss1 to oss2\"\"\"\nimport logging\n\nfrom django.db.models import Q\n\nfrom website.datacenter import models as oss\nfrom website.test_report import models as oss_report\n\nfrom . import models_oss1 as oss1\nfrom . import db_utils\n\n\ndef sync_cycling_cfg(proj_id=9):\n \"\"\"Script entry\"\"\"\n logger = logging.getLogger('oss')\n # Common tables for all projects\n tables_common = [\n (oss1.CyclingConfigurationTypeSys, oss.CyclingConfigurationTypeSys),\n (oss1.ReportCyclingCaseCategory, oss_report.ReportCyclingCaseCategory),\n ]\n for table1, table2 in tables_common:\n logger.info('Sync data from %s to %s', str(table1), str(table2))\n if table2.objects.using('oss').all():\n continue\n db_utils.copy_all_data(\n table1, table2, query=None,\n solve_pk_conflict=True\n )\n\n query = Q(proj_id=proj_id)\n tables = [\n # Cycling case config\n (\n oss1.ReportCyclingCaseConfig,\n oss_report.ReportCyclingCaseConfig, query,\n (\n {\n 'name': 'category',\n 'identifies': ['name'],\n 'foreign_table': oss_report.ReportCyclingCaseCategory\n },\n )\n ),\n # HW configuration\n # ConfigurationTypeSys\n (\n oss1.CyclingConfigurationType,\n oss.CyclingConfigurationType,\n query, None\n ),\n (\n oss1.CyclingConfigurationItem,\n oss.CyclingConfigurationItem,\n Q(cfg_type__proj_id=proj_id),\n (\n {\n 'name': 'cfg_type',\n 'foreign_table': oss.CyclingConfigurationType\n },\n )\n ),\n (\n oss1.CyclingConfigurationSet,\n oss.CyclingConfigurationSet, query,\n None\n ),\n (\n oss1.CyclingConfiguration, oss.CyclingConfiguration,\n Q(cfg_set__proj_id=proj_id),\n (\n {\n 'name': 'cfg_set',\n 'foreign_table': oss.CyclingConfigurationSet\n },\n {\n 'name': 'cfg_item',\n 'foreign_table': oss.CyclingConfigurationItem\n },\n )\n ),\n # Sync report usefule info\n (\n oss1.NewReportUsefulInfo, oss_report.NewReportUsefulInfo,\n query, None\n ),\n # Sync mail settings\n (\n oss1.ReportMail, oss_report.ReportMail,\n query, None\n )\n ]\n\n for table1, table2, query_str, foreign_keys in tables:\n logger.info('Sync data from %s to %s', str(table1), str(table2))\n db_utils.copy_all_data(\n table1, table2, query=query_str,\n foreign_keys=foreign_keys,\n solve_pk_conflict=True\n )\n\n\nif __name__ == '__main__':\n sync_cycling_cfg(9)\n","sub_path":"oss/website/oss1db/cycling_cfg.py","file_name":"cycling_cfg.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"628315065","text":"bl_info = {\n \"name\": \"Animation Tweaks\",\n \"author\": \"Christophe Seux, Manuel Rais\",\n \"version\": (0, 1),\n \"blender\": (2, 77, 0),\n \"location\": \"\",\n \"description\": \"\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"Learnbgame\",\n}\n\nif \"bpy\" in locals():\n import imp\n imp.reload(operators)\n imp.reload(panels)\n\nelse:\n from . import operators\n from . import panels\n\nimport bpy\n\ndef storedPoseLib(scene, context):\n items = []\n\n ob = context.object\n\n if ob is not None:\n if ob.type == 'ARMATURE':\n poseLibs = [a.name for a in bpy.data.actions if len(a.pose_markers)>0]\n if ob.PoseLibCustom.filtered == True :\n\n idName = ob.name.split('_')[0]\n poseLibs = [p for p in poseLibs if p.split('_')[0] == idName]\n\n for i,poseLib in enumerate(sorted(poseLibs, reverse=True)):\n items.append((poseLib,poseLib,\"\",'ACTION',i))\n else :\n for i,poseLib in enumerate(sorted(poseLibs, reverse=True)):\n items.append((poseLib,poseLib,\"\",'ACTION',i))\n return items\n\n\nclass PoseLibCustomSettings(bpy.types.PropertyGroup) :\n filtered = bpy.props.BoolProperty(default = True,description='Filter by base Name')\n poseLibs = bpy.props.EnumProperty(items =storedPoseLib,name= 'Pose Library')\n\n\naddon_keymaps = []\ndef register():\n bpy.utils.register_module(__name__)\n bpy.types.Object.PoseLibCustom = bpy.props.PointerProperty(type = PoseLibCustomSettings)\n bpy.types.Armature.DefaultValues = bpy.props.PointerProperty(type= bpy.types.PropertyGroup)\n\n addon = bpy.context.window_manager.keyconfigs.addon\n if addon:\n km = addon.keymaps.new(name = \"3D View\", space_type = \"VIEW_3D\")\n km.keymap_items.new(\"pose.insert_keyframe\", type = \"K\", value = \"PRESS\")\n km.keymap_items.new(\"pose.reset_props\", type = \"X\", value = \"PRESS\")\n addon_keymaps.append(km)\n\n\ndef unregister():\n del bpy.types.Armature.DefaultValues\n wm = bpy.context.window_manager\n for km in addon_keymaps:\n for kmi in km.keymap_items:\n km.keymap_items.remove(kmi)\n wm.keyconfigs.addon.keymaps.remove(km)\n addon_keymaps.clear()\n\n bpy.utils.unregister_module(__name__)\n","sub_path":"All_In_One/addons/animTweak/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271678964","text":"'''\nGiven: N > 1, 1 <= k < 2^N\nFind the number of good sequences of 0 and 1 without consecutive 1's length N\n\nPrint k-th sequence (lexicograph sorting) len = N\n'''\n\n\ndef solution(n, k):\n def out(ans, i, k):\n if i < 0:\n return\n\n if i == 0:\n if k == 1:\n print('0', end='')\n else:\n print('1', end='')\n return\n\n if k <= ans[i-1]:\n print('0', end='')\n out(ans, i-1, k)\n else:\n print('10', end='')\n out(ans, i-2, k - ans[i-1])\n\n\n\n ans = [0] * n\n ans[0] = 2\n ans[1] = 3\n for i in range(2, n):\n ans[i] = ans[i-1] + ans[i-2]\n\n out(ans, n-1, k)\n\n\nfor i in range(1, 9):\n solution(4, i)\n print()\n","sub_path":"ProblemsSolving/DP/binary-sequence/seq_02.py","file_name":"seq_02.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"454130678","text":"# -*- coding: utf-8 -*-\nimport copy\nimport json\nimport logging\nimport os\nimport time\nimport unittest\n\nimport dateutil\nimport requests\nimport requests_mock\nfrom bson import ObjectId\nfrom configparser import ConfigParser\nfrom datetime import datetime, timedelta\nfrom mock import MagicMock\nfrom mongoengine import ValidationError\nfrom typing import Dict, List\nfrom unittest.mock import patch\n\nfrom execution_engine2.SDKMethodRunner import SDKMethodRunner\nfrom execution_engine2.db.MongoUtil import MongoUtil\nfrom execution_engine2.db.models.models import (\n Job,\n JobInput,\n Meta,\n Status,\n JobLog,\n TerminatedCode,\n)\nfrom execution_engine2.exceptions import AuthError\nfrom execution_engine2.exceptions import InvalidStatusTransitionException\nfrom execution_engine2.utils.Condor import submission_info\nfrom test.mongo_test_helper import MongoTestHelper\nfrom test.test_utils import bootstrap, get_example_job, validate_job_state\n\nlogging.basicConfig(level=logging.INFO)\nbootstrap()\n\n\ndef _run_job_adapter(\n ws_perms_info: Dict = None,\n ws_perms_global: List = [],\n client_groups_info: Dict = None,\n module_versions: Dict = None,\n user_roles: List = None,\n):\n \"\"\"\n Mocks POST calls to:\n Workspace.get_permissions_mass,\n Catalog.list_client_group_configs,\n Catalog.get_module_version\n Mocks GET calls to:\n Auth (/api/V2/me)\n Auth (/api/V2/token)\n\n Returns an Adapter for requests_mock that deals with mocking workspace permissions.\n :param ws_perms_info: dict - keys user_id, and ws_perms\n user_id: str - the user id\n ws_perms: dict of permissions, keys are ws ids, values are permission. Example:\n {123: \"a\", 456: \"w\"} means workspace id 123 has admin permissions, and 456 has\n write permission\n :param ws_perms_global: list - list of global workspaces - gives those workspaces a global (user \"*\") permission of \"r\"\n :param client_groups_info: dict - keys client_groups (list), function_name, module_name\n :param module_versions: dict - key git_commit_hash (str), others aren't used\n :return: an adapter function to be passed to request_mock\n \"\"\"\n\n def perm_adapter(request):\n response = requests.Response()\n response.status_code = 200\n rq_method = request.method.upper()\n if rq_method == \"POST\":\n params = request.json().get(\"params\")\n method = request.json().get(\"method\")\n\n result = []\n if method == \"Workspace.get_permissions_mass\":\n perms_req = params[0].get(\"workspaces\")\n ret_perms = []\n user_id = ws_perms_info.get(\"user_id\")\n ws_perms = ws_perms_info.get(\"ws_perms\", {})\n for ws in perms_req:\n perms = {user_id: ws_perms.get(ws[\"id\"], \"n\")}\n if ws[\"id\"] in ws_perms_global:\n perms[\"*\"] = \"r\"\n ret_perms.append(perms)\n result = [{\"perms\": ret_perms}]\n print(result)\n elif method == \"Catalog.list_client_group_configs\":\n result = []\n if client_groups_info is not None:\n result = [client_groups_info]\n elif method == \"Catalog.get_module_version\":\n result = [{\"git_commit_hash\": \"some_commit_hash\"}]\n if module_versions is not None:\n result = [module_versions]\n response._content = bytes(\n json.dumps({\"result\": result, \"version\": \"1.1\"}), \"UTF-8\"\n )\n elif rq_method == \"GET\":\n if request.url.endswith(\"/api/V2/me\"):\n response._content = bytes(\n json.dumps({\"customroles\": user_roles}), \"UTF-8\"\n )\n return response\n\n return perm_adapter\n\n\nclass ee2_SDKMethodRunner_test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n config_file = os.environ.get(\"KB_DEPLOYMENT_CONFIG\", \"test/deploy.cfg\")\n config_parser = ConfigParser()\n config_parser.read(config_file)\n\n cls.cfg = {}\n for nameval in config_parser.items(\"execution_engine2\"):\n cls.cfg[nameval[0]] = nameval[1]\n\n mongo_in_docker = cls.cfg.get(\"mongo-in-docker-compose\", None)\n if mongo_in_docker is not None:\n cls.cfg[\"mongo-host\"] = cls.cfg[\"mongo-in-docker-compose\"]\n\n cls.user_id = \"wsadmin\"\n cls.ws_id = 9999\n cls.token = \"token\"\n\n cls.method_runner = SDKMethodRunner(\n cls.cfg, user_id=cls.user_id, token=cls.token\n )\n cls.mongo_util = MongoUtil(cls.cfg)\n cls.mongo_helper = MongoTestHelper(cls.cfg)\n\n cls.test_collection = cls.mongo_helper.create_test_db(\n db=cls.cfg[\"mongo-database\"], col=cls.cfg[\"mongo-jobs-collection\"]\n )\n\n def getRunner(self) -> SDKMethodRunner:\n return copy.deepcopy(self.__class__.method_runner)\n\n def create_job_rec(self):\n job = Job()\n\n inputs = JobInput()\n\n job.user = self.user_id\n job.authstrat = \"kbaseworkspace\"\n job.wsid = self.ws_id\n job.status = \"created\"\n\n job_params = {\n \"wsid\": self.ws_id,\n \"method\": \"MEGAHIT.run_megahit\",\n \"app_id\": \"MEGAHIT/run_megahit\",\n \"service_ver\": \"2.2.1\",\n \"params\": [\n {\n \"k_list\": [],\n \"k_max\": None,\n \"output_contigset_name\": \"MEGAHIT.contigs\",\n }\n ],\n \"source_ws_objects\": [\"a/b/c\", \"e/d\"],\n \"parent_job_id\": \"9998\",\n }\n\n inputs.wsid = job.wsid\n inputs.method = job_params.get(\"method\")\n inputs.params = job_params.get(\"params\")\n inputs.service_ver = job_params.get(\"service_ver\")\n inputs.app_id = job_params.get(\"app_id\")\n inputs.source_ws_objects = job_params.get(\"source_ws_objects\")\n inputs.parent_job_id = job_params.get(\"parent_job_id\")\n\n inputs.narrative_cell_info = Meta()\n\n job.job_input = inputs\n job.job_output = None\n\n with self.mongo_util.mongo_engine_connection():\n job.save()\n\n return str(job.id)\n\n def test_init_ok(self):\n class_attri = [\"config\", \"catalog\", \"workspace\", \"mongo_util\", \"condor\"]\n runner = self.getRunner()\n self.assertTrue(set(class_attri) <= set(runner.__dict__.keys()))\n\n # TODO Think about what we want to do here, as this is an integration test and not a unit test\n # def test_get_client_groups(self):\n # runner = self.getRunner()\n #\n # client_groups = runner._get_client_groups(\n # \"kb_uploadmethods.import_sra_from_staging\"\n # )\n #\n # expected_groups = \"kb_upload\" # expected to fail if CI catalog is updated\n # self.assertCountEqual(expected_groups, client_groups)\n # client_groups = runner._get_client_groups(\"MEGAHIT.run_megahit\")\n # self.assertEqual(0, len(client_groups))\n #\n # with self.assertRaises(ValueError) as context:\n # runner._get_client_groups(\"kb_uploadmethods\")\n #\n # self.assertIn(\"unrecognized method:\", str(context.exception.args))\n #\n # def test_get_module_git_commit(self):\n #\n # runner = self.getRunner()\n #\n # git_commit_1 = runner._get_module_git_commit(\"MEGAHIT.run_megahit\", \"2.2.1\")\n # self.assertEqual(\n # \"048baf3c2b76cb923b3b4c52008ed77dbe20292d\", git_commit_1\n # ) # TODO: works only in CI\n #\n # git_commit_2 = runner._get_module_git_commit(\"MEGAHIT.run_megahit\")\n # self.assertTrue(isinstance(git_commit_2, str))\n # self.assertEqual(len(git_commit_1), len(git_commit_2))\n # self.assertNotEqual(git_commit_1, git_commit_2)\n\n def test_init_job_rec(self):\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n runner = self.getRunner()\n\n job_params = {\n \"wsid\": self.ws_id,\n \"method\": \"MEGAHIT.run_megahit\",\n \"app_id\": \"MEGAHIT/run_megahit\",\n \"service_ver\": \"2.2.1\",\n \"params\": [\n {\n \"workspace_name\": \"wjriehl:1475006266615\",\n \"read_library_refs\": [\"18836/5/1\"],\n \"output_contigset_name\": \"rhodo_contigs\",\n \"recipe\": \"auto\",\n \"assembler\": None,\n \"pipeline\": None,\n \"min_contig_len\": None,\n }\n ],\n \"source_ws_objects\": [\"a/b/c\", \"e/d\"],\n \"parent_job_id\": \"9998\",\n \"meta\": {\"tag\": \"dev\", \"token_id\": \"12345\"},\n }\n\n job_id = runner._init_job_rec(self.user_id, job_params)\n\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n job = Job.objects.get(id=job_id)\n\n self.assertEqual(job.user, self.user_id)\n self.assertEqual(job.authstrat, \"kbaseworkspace\")\n self.assertEqual(job.wsid, self.ws_id)\n\n job_input = job.job_input\n\n self.assertEqual(job_input.wsid, self.ws_id)\n self.assertEqual(job_input.method, \"MEGAHIT.run_megahit\")\n self.assertEqual(job_input.app_id, \"MEGAHIT/run_megahit\")\n self.assertEqual(job_input.service_ver, \"2.2.1\")\n self.assertCountEqual(job_input.source_ws_objects, [\"a/b/c\", \"e/d\"])\n self.assertEqual(job_input.parent_job_id, \"9998\")\n\n narrative_cell_info = job_input.narrative_cell_info\n self.assertEqual(narrative_cell_info.tag, \"dev\")\n self.assertEqual(narrative_cell_info.token_id, \"12345\")\n self.assertFalse(narrative_cell_info.status)\n\n self.assertFalse(job.job_output)\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n @patch(\"execution_engine2.SDKMethodRunner.SDKMethodRunner\", autospec=True)\n def test_cancel_job(self, runner):\n logging.info(\"\\n\\n Test cancel job\")\n sdk = copy.deepcopy(self.getRunner())\n\n with sdk.get_mongo_util().mongo_engine_connection():\n job = get_example_job()\n job.user = self.user_id\n job.wsid = self.ws_id\n job_id = job.save().id\n\n logging.info(\n f\"Created job {job_id} in {job.wsid} status {job.status}. About to cancel\"\n )\n\n sdk.cancel_job(job_id=job_id)\n\n self.assertEqual(\n Status(sdk.get_mongo_util().get_job(job_id=job_id).status),\n Status.terminated,\n )\n self.assertEqual(\n TerminatedCode(sdk.get_mongo_util().get_job(job_id=job_id).terminated_code),\n TerminatedCode.terminated_by_user,\n )\n\n with sdk.get_mongo_util().mongo_engine_connection():\n job = get_example_job()\n job.user = self.user_id\n job.wsid = self.ws_id\n job_id = job.save().id\n\n logging.info(\n f\"Created job {job_id} in {job.wsid} status {job.status}. About to cancel\"\n )\n\n sdk.cancel_job(\n job_id=job_id, terminated_code=TerminatedCode.terminated_by_automation.value\n )\n\n self.assertEqual(\n Status(sdk.get_mongo_util().get_job(job_id=job_id).status),\n Status.terminated,\n )\n self.assertEqual(\n TerminatedCode(sdk.get_mongo_util().get_job(job_id=job_id).terminated_code),\n TerminatedCode.terminated_by_automation,\n )\n\n @patch(\"execution_engine2.db.MongoUtil.MongoUtil\", autospec=True)\n def test_check_job_canceled(self, mongo_util):\n def generateJob(job_id):\n j = Job()\n j.status = job_id\n return j\n\n runner = self.getRunner()\n runner.get_mongo_util = MagicMock(return_value=mongo_util)\n mongo_util.get_job = MagicMock(side_effect=generateJob)\n\n call_count = 0\n rv = runner.check_job_canceled(\"created\")\n self.assertFalse(rv[\"canceled\"])\n self.assertFalse(rv[\"finished\"])\n call_count += 1\n\n rv = runner.check_job_canceled(\"estimating\")\n self.assertFalse(rv[\"canceled\"])\n self.assertFalse(rv[\"finished\"])\n call_count += 1\n\n rv = runner.check_job_canceled(\"queued\")\n self.assertFalse(rv[\"canceled\"])\n self.assertFalse(rv[\"finished\"])\n call_count += 1\n\n rv = runner.check_job_canceled(\"running\")\n self.assertFalse(rv[\"canceled\"])\n self.assertFalse(rv[\"finished\"])\n call_count += 1\n\n rv = runner.check_job_canceled(\"completed\")\n self.assertFalse(rv[\"canceled\"])\n self.assertTrue(rv[\"finished\"])\n call_count += 1\n\n rv = runner.check_job_canceled(\"error\")\n self.assertFalse(rv[\"canceled\"])\n self.assertTrue(rv[\"finished\"])\n call_count += 1\n\n rv = runner.check_job_canceled(\"terminated\")\n self.assertTrue(rv[\"canceled\"])\n self.assertTrue(rv[\"finished\"])\n call_count += 1\n\n self.assertEqual(call_count, mongo_util.get_job.call_count)\n self.assertEqual(call_count, runner.get_mongo_util.call_count)\n\n @requests_mock.Mocker()\n @patch(\"lib.execution_engine2.utils.Condor.Condor\", autospec=True)\n def test_run_job(self, rq_mock, condor_mock):\n rq_mock.add_matcher(\n _run_job_adapter(\n ws_perms_info={\"user_id\": self.user_id, \"ws_perms\": {self.ws_id: \"a\"}}\n )\n )\n runner = self.getRunner()\n runner.get_condor = MagicMock(return_value=condor_mock)\n job = get_example_job(user=self.user_id, wsid=self.ws_id).to_mongo().to_dict()\n job[\"method\"] = job[\"job_input\"][\"app_id\"]\n job[\"app_id\"] = job[\"job_input\"][\"app_id\"]\n\n si = submission_info(clusterid=\"test\", submit=job, error=None)\n condor_mock.run_job = MagicMock(return_value=si)\n\n job_id = runner.run_job(params=job)\n print(f\"Job id is {job_id} \")\n\n @requests_mock.Mocker()\n @patch(\"lib.execution_engine2.utils.Condor.Condor\", autospec=True)\n def test_run_job_and_add_log(self, rq_mock, condor_mock):\n \"\"\"\n This test runs a job and then adds logs\n\n :param condor_mock:\n :return:\n \"\"\"\n runner = self.getRunner()\n rq_mock.add_matcher(\n _run_job_adapter(\n ws_perms_info={\"user_id\": self.user_id, \"ws_perms\": {self.ws_id: \"a\"}}\n )\n )\n runner.get_condor = MagicMock(return_value=condor_mock)\n job = get_example_job(user=self.user_id, wsid=self.ws_id).to_mongo().to_dict()\n job[\"method\"] = job[\"job_input\"][\"app_id\"]\n job[\"app_id\"] = job[\"job_input\"][\"app_id\"]\n\n si = submission_info(clusterid=\"test\", submit=job, error=None)\n condor_mock.run_job = MagicMock(return_value=si)\n\n job_id = runner.run_job(params=job)\n logging.info(f\"Job id is {job_id} \")\n\n lines = []\n for item in [\"this\", \"is\", \"a\", \"test\"]:\n line = {\"error\": False, \"line\": item}\n lines.append(line)\n\n log_pos_1 = runner.add_job_logs(job_id=job_id, log_lines=lines)\n logging.info(f\"After insert log position is now {log_pos_1}\")\n log = runner.view_job_logs(job_id=job_id, skip_lines=None)\n\n log_lines = log[\"lines\"]\n for i, inserted_line in enumerate(log_lines):\n self.assertEqual(inserted_line[\"line\"], lines[i][\"line\"])\n\n line1 = {\n \"error\": False,\n \"line\": \"This is the read deal\",\n \"ts\": str(datetime.now()),\n }\n line2 = {\n \"error\": False,\n \"line\": \"This is the read deal2\",\n \"ts\": int(datetime.now().timestamp() * 1000),\n }\n line3 = {\n \"error\": False,\n \"line\": \"This is the read deal3\",\n \"ts\": datetime.now().timestamp(),\n }\n line4 = {\n \"error\": False,\n \"line\": \"This is the read deal4\",\n \"ts\": str(datetime.now().timestamp()),\n }\n input_lines2 = [line1, line2, line3, line4]\n\n for line in input_lines2:\n print(line)\n\n log_pos2 = runner.add_job_logs(job_id=job_id, log_lines=input_lines2)\n logging.info(\n f\"After inserting timestamped logs, log position is now {log_pos2}\"\n )\n\n log = runner.view_job_logs(job_id=job_id, skip_lines=None)\n log_lines = log[\"lines\"]\n\n print(\"About to dump log\")\n print(json.dumps(log))\n for i, inserted_line in enumerate(log_lines):\n if i < log_pos_1:\n continue\n\n self.assertEqual(inserted_line[\"line\"], input_lines2[i - log_pos_1][\"line\"])\n\n time_input = input_lines2[i - log_pos_1][\"ts\"]\n if isinstance(time_input, str):\n if time_input.replace(\".\", \"\", 1).isdigit():\n time_input = (\n float(time_input)\n if \".\" in time_input\n else int(time_input) / 1000.0\n )\n else:\n time_input = dateutil.parser.parse(time_input).timestamp()\n elif isinstance(time_input, int):\n time_input = time_input / 1000.0\n\n self.assertEqual(inserted_line[\"ts\"], time_input)\n\n error1 = line[\"error\"]\n error2 = input_lines2[i - log_pos_1][\"error\"]\n self.assertEqual(error1, error2)\n\n # TODO IMPLEMENT SKIPLINES AND TEST\n\n log = runner.view_job_logs(job_id=job_id, skip_lines=1)\n self.assertEqual(log[\"lines\"][0][\"linepos\"], 2)\n\n log = runner.view_job_logs(job_id=job_id, skip_lines=8)\n self.assertEqual(log, {\"lines\": [], \"last_line_number\": 8})\n\n @requests_mock.Mocker()\n def test_add_job_logs_ok(self, rq_mock):\n rq_mock.add_matcher(\n _run_job_adapter(\n ws_perms_info={\"user_id\": self.user_id, \"ws_perms\": {self.ws_id: \"a\"}},\n user_roles=[],\n )\n )\n with self.mongo_util.mongo_engine_connection():\n ori_job_log_count = JobLog.objects.count()\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n runner = self.getRunner()\n\n # create new log\n lines = [{\"line\": \"Hello world\"}]\n runner.add_job_logs(job_id=job_id, log_lines=lines)\n\n updated_job_log_count = JobLog.objects.count()\n self.assertEqual(ori_job_log_count, updated_job_log_count - 1)\n\n log = self.mongo_util.get_job_log(job_id=job_id)\n ori_updated_time = log.updated\n self.assertTrue(ori_updated_time)\n self.assertEqual(log.original_line_count, 1)\n self.assertEqual(log.stored_line_count, 1)\n ori_lines = log.lines\n self.assertEqual(len(ori_lines), 1)\n\n test_line = ori_lines[0]\n\n self.assertEqual(test_line.line, \"Hello world\")\n self.assertEqual(test_line.linepos, 1)\n self.assertFalse(test_line.error)\n\n # add job log\n lines = [\n {\"error\": True, \"line\": \"Hello Kbase\"},\n {\"line\": \"Hello Wrold Kbase\"},\n ]\n\n runner.add_job_logs(job_id=job_id, log_lines=lines)\n\n log = self.mongo_util.get_job_log(job_id=job_id)\n self.assertTrue(log.updated)\n self.assertTrue(ori_updated_time < log.updated)\n self.assertEqual(log.original_line_count, 3)\n self.assertEqual(log.stored_line_count, 3)\n ori_lines = log.lines\n self.assertEqual(len(ori_lines), 3)\n\n # original line\n test_line = ori_lines[0]\n\n self.assertEqual(test_line.line, \"Hello world\")\n self.assertEqual(test_line.linepos, 1)\n self.assertFalse(test_line.error)\n\n # new line\n test_line = ori_lines[1]\n\n self.assertEqual(test_line.line, \"Hello Kbase\")\n self.assertEqual(test_line.linepos, 2)\n self.assertTrue(test_line.error)\n\n test_line = ori_lines[2]\n\n self.assertEqual(test_line.line, \"Hello Wrold Kbase\")\n self.assertEqual(test_line.linepos, 3)\n self.assertFalse(test_line.error)\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n self.mongo_util.get_job_log(job_id=job_id).delete()\n self.assertEqual(ori_job_log_count, JobLog.objects.count())\n\n def test_get_job_params(self):\n\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n runner = self.getRunner()\n runner._test_job_permissions = MagicMock(return_value=True)\n params = runner.get_job_params(job_id)\n\n expected_params_keys = [\n \"wsid\",\n \"method\",\n \"params\",\n \"service_ver\",\n \"app_id\",\n \"source_ws_objects\",\n \"parent_job_id\",\n ]\n self.assertCountEqual(params.keys(), expected_params_keys)\n self.assertEqual(params[\"wsid\"], self.ws_id)\n self.assertEqual(params[\"method\"], \"MEGAHIT.run_megahit\")\n self.assertEqual(params[\"app_id\"], \"MEGAHIT/run_megahit\")\n self.assertEqual(params[\"service_ver\"], \"2.2.1\")\n self.assertCountEqual(params[\"source_ws_objects\"], [\"a/b/c\", \"e/d\"])\n self.assertEqual(params[\"parent_job_id\"], \"9998\")\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n def test_update_job_status(self):\n\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n runner = self.getRunner()\n runner._test_job_permissions = MagicMock(return_value=True)\n\n # test missing status\n with self.assertRaises(ValueError) as context:\n runner.update_job_status(None, \"invalid_status\")\n self.assertEqual(\n \"Please provide both job_id and status\", str(context.exception)\n )\n\n # test invalid status\n with self.assertRaises(ValidationError) as context:\n runner.update_job_status(job_id, \"invalid_status\")\n self.assertIn(\"is not a valid status\", str(context.exception))\n\n ori_job = Job.objects(id=job_id)[0]\n ori_updated_time = ori_job.updated\n\n # test update job status\n job_id = runner.update_job_status(job_id, \"estimating\")\n updated_job = Job.objects(id=job_id)[0]\n self.assertEqual(updated_job.status, \"estimating\")\n updated_time = updated_job.updated\n\n self.assertTrue(ori_updated_time < updated_time)\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n def test_get_job_status(self):\n\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n runner = self.getRunner()\n runner._test_job_permissions = MagicMock(return_value=True)\n\n # test missing job_id input\n with self.assertRaises(ValueError) as context:\n runner.get_job_status(None)\n self.assertEqual(\"Please provide valid job_id\", str(context.exception))\n\n returnVal = runner.get_job_status(job_id)\n\n self.assertTrue(\"status\" in returnVal)\n self.assertEqual(returnVal[\"status\"], \"created\")\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n def test_finish_job(self):\n\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"created\")\n self.assertFalse(job.finished)\n\n runner = self.getRunner()\n runner._test_job_permissions = MagicMock(return_value=True)\n runner.catalog.log_exec_stats = MagicMock(return_value=True)\n\n # test missing job_id input\n with self.assertRaises(ValueError) as context:\n logging.info(\"Finish Job Case 0 Raises Error\")\n runner.finish_job(None)\n self.assertEqual(\"Please provide valid job_id\", str(context.exception))\n\n # test finish job with invalid status\n with self.assertRaises(ValueError) as context:\n logging.info(\"Finish Job Case 1 Raises Error\")\n runner.finish_job(job_id)\n self.assertIn(\"Unexpected job status\", str(context.exception))\n\n # update job status to running\n\n runner.start_job(job_id=job_id, skip_estimation=True)\n\n # self.mongo_util.update_job_status(job_id=job_id, status=Status.running.value)\n # job.running = datetime.datetime.utcnow()\n # job.save()\n\n # test finish job without error\n job_output = dict()\n job_output[\"version\"] = \"1\"\n job_output[\"id\"] = \"5d54bdcb9b402d15271b3208\" # A valid objectid\n job_output[\"result\"] = {\"output\": \"output\"}\n logging.info(\"Case2 : Finish a running job\")\n\n print(f\"About to finish job {job_id}. The job status is currently\")\n print(runner.get_job_status(job_id))\n runner.finish_job(job_id, job_output=job_output)\n print(\"Job is now finished, status is\")\n print(runner.get_job_status(job_id))\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, Status.completed.value)\n self.assertFalse(job.errormsg)\n self.assertTrue(job.finished)\n # if job_output not a dict#\n # job_output2 = job.job_output.to_mongo().to_dict()\n job_output2 = job.job_output\n self.assertEqual(job_output2[\"version\"], \"1\")\n self.assertEqual(str(job_output2[\"id\"]), job_output[\"id\"])\n\n # update finished status to running\n with self.assertRaises(InvalidStatusTransitionException):\n self.mongo_util.update_job_status(\n job_id=job_id, status=Status.running.value\n )\n\n def test_finish_job_with_error_message(self):\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n runner = self.getRunner()\n runner._send_exec_stats_to_catalog = MagicMock(return_value=True)\n runner.catalog.log_exec_stats = MagicMock(return_value=True)\n runner._test_job_permissions = MagicMock(return_value=True)\n\n with self.assertRaises(InvalidStatusTransitionException):\n runner.finish_job(job_id, error_message=\"error message\")\n\n runner.start_job(job_id=job_id, skip_estimation=True)\n runner.finish_job(job_id, error_message=\"error message\")\n\n job = self.mongo_util.get_job(job_id=job_id)\n\n self.assertEqual(job.status, \"error\")\n self.assertEqual(job.errormsg, \"error message\")\n self.assertEqual(job.error_code, 1)\n self.assertIsNone(job.error)\n self.assertTrue(job.finished)\n\n with self.mongo_util.mongo_engine_connection():\n job_id = runner.update_job_status(\n job_id, \"running\"\n ) # put job back to running status\n\n error = {\n \"message\": \"error message\",\n \"code'\": -32000,\n \"name\": \"Server error\",\n \"error\": \"\"\"Traceback (most recent call last):\\n File \"/kb/module/bin/../lib/simpleapp/simpleappServer.py\"\"\",\n }\n\n runner.finish_job(\n job_id, error_message=\"error message\", error=error, error_code=0\n )\n\n job = self.mongo_util.get_job(job_id=job_id)\n\n self.assertEqual(job.status, \"error\")\n self.assertEqual(job.errormsg, \"error message\")\n self.assertEqual(job.error_code, 0)\n self.assertCountEqual(job.error, error)\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n def test_start_job(self):\n\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"created\")\n self.assertFalse(job.finished)\n self.assertFalse(job.running)\n self.assertFalse(job.estimating)\n\n runner = self.getRunner()\n runner._test_job_permissions = MagicMock(return_value=True)\n\n # test missing job_id input\n with self.assertRaises(ValueError) as context:\n runner.start_job(None)\n self.assertEqual(\"Please provide valid job_id\", str(context.exception))\n\n # start a created job, set job to estimation status\n runner.start_job(job_id, skip_estimation=False)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"estimating\")\n self.assertFalse(job.running)\n self.assertTrue(job.estimating)\n\n # start a estimating job, set job to running status\n runner.start_job(job_id)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"running\")\n self.assertTrue(job.running)\n self.assertTrue(job.estimating)\n\n # test start a job with invalid status\n with self.assertRaises(ValueError) as context:\n runner.start_job(job_id)\n self.assertIn(\"Unexpected job status\", str(context.exception))\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n @requests_mock.Mocker()\n def test_check_job_global_perm(self, rq_mock):\n rq_mock.add_matcher(\n _run_job_adapter(\n ws_perms_info={\"user_id\": self.user_id, \"ws_perms\": {self.ws_id: \"n\"}},\n ws_perms_global=[self.ws_id],\n user_roles=[],\n )\n )\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"created\")\n self.assertFalse(job.finished)\n self.assertFalse(job.running)\n self.assertFalse(job.estimating)\n\n # test check_job\n runner = self.getRunner()\n job_state = runner.check_job(job_id)\n json.dumps(job_state) # make sure it's JSON serializable\n self.assertTrue(validate_job_state(job_state))\n self.assertEqual(job_state[\"status\"], \"created\")\n self.assertEqual(job_state[\"wsid\"], self.ws_id)\n\n # test globally\n job_states = runner.check_workspace_jobs(self.ws_id)\n self.assertTrue(job_id in job_states)\n self.assertEqual(job_states[job_id][\"status\"], \"created\")\n\n # now test with a different user\n other_method_runner = SDKMethodRunner(\n self.cfg, user_id=\"some_other_user\", token=\"other_token\"\n )\n job_states = other_method_runner.check_workspace_jobs(self.ws_id)\n self.assertTrue(job_id in job_states)\n self.assertEqual(job_states[job_id][\"status\"], \"created\")\n\n @requests_mock.Mocker()\n def test_check_job_ok(self, rq_mock):\n rq_mock.add_matcher(\n _run_job_adapter(\n ws_perms_info={\"user_id\": self.user_id, \"ws_perms\": {self.ws_id: \"a\"}},\n user_roles=[],\n )\n )\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"created\")\n self.assertFalse(job.finished)\n self.assertFalse(job.running)\n self.assertFalse(job.estimating)\n\n runner = self.getRunner()\n runner._test_job_permissions = MagicMock(return_value=True)\n\n # test missing job_id input\n with self.assertRaises(ValueError) as context:\n runner.check_job(None)\n self.assertEqual(\"Please provide valid job_id\", str(context.exception))\n\n # test check_job\n job_state = runner.check_job(job_id)\n json.dumps(job_state) # make sure it's JSON serializable\n self.assertTrue(validate_job_state(job_state))\n self.assertEqual(job_state[\"status\"], \"created\")\n self.assertEqual(job_state[\"wsid\"], self.ws_id)\n\n # test check_job with projection\n job_state = runner.check_job(job_id, projection=[\"status\"])\n self.assertFalse(\"status\" in job_state.keys())\n self.assertEqual(job_state[\"wsid\"], self.ws_id)\n\n # test check_jobs\n job_states = runner.check_jobs([job_id])\n json.dumps(job_states) # make sure it's JSON serializable\n self.assertTrue(validate_job_state(job_states[job_id]))\n self.assertTrue(job_id in job_states)\n self.assertEqual(job_states[job_id][\"status\"], \"created\")\n self.assertEqual(job_states[job_id][\"wsid\"], self.ws_id)\n\n # test check_jobs with projection\n job_states = runner.check_jobs([job_id], projection=[\"wsid\"])\n self.assertTrue(job_id in job_states)\n self.assertFalse(\"wsid\" in job_states[job_id].keys())\n self.assertEqual(job_states[job_id][\"status\"], \"created\")\n\n # test check_workspace_jobs\n job_states = runner.check_workspace_jobs(self.ws_id)\n for job_id in job_states:\n self.assertTrue(job_states[job_id])\n json.dumps(job_states) # make sure it's JSON serializable\n self.assertTrue(job_id in job_states)\n self.assertEqual(job_states[job_id][\"status\"], \"created\")\n self.assertEqual(job_states[job_id][\"wsid\"], self.ws_id)\n\n # test check_workspace_jobs with projection\n job_states = runner.check_workspace_jobs(self.ws_id, projection=[\"wsid\"])\n self.assertTrue(job_id in job_states)\n self.assertFalse(\"wsid\" in job_states[job_id].keys())\n self.assertEqual(job_states[job_id][\"status\"], \"created\")\n\n with self.assertRaises(PermissionError) as e:\n job_states = runner.check_workspace_jobs(1234)\n self.assertIn(\n f\"User {self.user_id} does not have permission to read jobs in workspace {1234}\",\n str(e.exception),\n )\n\n self.mongo_util.get_job(job_id=job_id).delete()\n self.assertEqual(ori_job_count, Job.objects.count())\n\n @staticmethod\n def create_job_from_job(job, new_job_id):\n j = Job()\n j.id = new_job_id\n j.wsid = job.wsid\n j.user = job.user\n j.authstrat = job.authstrat\n j.status = job.status\n j.finished = new_job_id.generation_time.timestamp()\n j.job_input = job.job_input\n return j\n\n def replace_job_id(self, job1, new_id):\n with self.mongo_util.mongo_engine_connection():\n job2 = self.create_job_from_job(job1, new_id)\n job2.save()\n print(\"Saved job with id\", job2.id, job2.id.generation_time)\n job1.delete()\n\n # flake8: noqa: C901\n @patch(\"lib.execution_engine2.utils.Condor.Condor\", autospec=True)\n def test_check_jobs_date_range(self, condor_mock):\n user_name = \"wsadmin\"\n\n runner = self.getRunner()\n runner.workspace_auth = MagicMock()\n runner.auth.get_user = MagicMock(return_value=user_name)\n runner.is_admin = True\n runner._is_admin = MagicMock(return_value=True)\n\n runner.workspace_auth.can_read = MagicMock(return_value=True)\n runner.get_permissions_for_workspace = MagicMock(return_value=True)\n runner._get_module_git_commit = MagicMock(return_value=\"git_commit_goes_here\")\n runner.get_condor = MagicMock(return_value=condor_mock)\n # ctx = {\"user_id\": self.user_id, \"wsid\": self.ws_id, \"token\": self.token}\n job = get_example_job().to_mongo().to_dict()\n job[\"method\"] = job[\"job_input\"][\"app_id\"]\n job[\"app_id\"] = job[\"job_input\"][\"app_id\"]\n\n si = submission_info(clusterid=\"test\", submit=job, error=None)\n condor_mock.run_job = MagicMock(return_value=si)\n\n job_id1 = runner.run_job(params=job)\n job_id2 = runner.run_job(params=job)\n job_id3 = runner.run_job(params=job)\n job_id4 = runner.run_job(params=job)\n job_id5 = runner.run_job(params=job)\n job_id6 = runner.run_job(params=job)\n time.sleep(1)\n\n new_job_ids = []\n\n now = datetime.utcnow()\n last_month = now - timedelta(days=30)\n last_month_and_1_hour = now - timedelta(days=30) - timedelta(hours=1)\n\n last_week = now - timedelta(days=7)\n yesterday = now - timedelta(days=1)\n tomorrow = now + timedelta(days=1)\n day_after = now + timedelta(days=2)\n\n with self.mongo_util.mongo_engine_connection():\n # Last Month\n job = Job.objects.with_id(job_id1) # type : Job\n new_id_last_month = ObjectId.from_datetime(last_month)\n print(last_month, new_id_last_month, new_id_last_month.generation_time)\n\n print(\"About to replace job id\")\n print(job)\n print(new_id_last_month)\n self.replace_job_id(job, new_id_last_month)\n new_job_ids.append(str(new_id_last_month))\n\n # Last week\n job = Job.objects.with_id(job_id2) # type : Job\n new_id_last_week = ObjectId.from_datetime(last_week)\n self.replace_job_id(job, new_id_last_week)\n new_job_ids.append(str(new_id_last_week))\n\n # Yesterday\n job = Job.objects.with_id(job_id3) # type : Job\n new_id_yesterday = ObjectId.from_datetime(yesterday)\n self.replace_job_id(job, new_id_yesterday)\n new_job_ids.append(str(new_id_yesterday))\n\n # Now\n job = Job.objects.with_id(job_id4) # type : Job\n new_id_now = ObjectId.from_datetime(now)\n self.replace_job_id(job, new_id_now)\n new_job_ids.append(str(new_id_now))\n\n # Tomorrow\n job = Job.objects.with_id(job_id5) # type : Job\n new_id_tomorrow = ObjectId.from_datetime(tomorrow)\n self.replace_job_id(job, new_id_tomorrow)\n new_job_ids.append(str(new_id_tomorrow))\n\n # Day After\n job = Job.objects.with_id(job_id6) # type : Job\n new_id_day_after = ObjectId.from_datetime(day_after)\n self.replace_job_id(job, new_id_day_after)\n new_job_ids.append(str(new_id_day_after))\n\n # JOB ID GETS GENERATED HERE\n with self.mongo_util.mongo_engine_connection():\n ori_job_count = Job.objects.count()\n job_id = self.create_job_rec()\n self.assertEqual(ori_job_count, Job.objects.count() - 1)\n\n job = self.mongo_util.get_job(job_id=job_id)\n self.assertEqual(job.status, \"created\")\n self.assertFalse(job.finished)\n self.false = self.assertFalse(job.running)\n self.assertFalse(job.estimating)\n\n runner.check_permission_for_job = MagicMock(return_value=True)\n # runner.get_permissions_for_workspace = MagicMock(\n # return_value=SDKMethodRunner.WorkspacePermissions.ADMINISTRATOR\n # )\n runner.is_admin = MagicMock(return_value=True)\n\n print(\n \"Test case 1. Retrieving Jobs from last_week and tomorrow_max (yesterday and now jobs) \"\n )\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=last_week.timestamp(), # test timestamp input\n user=\"ALL\",\n )\n count = 0\n for js in job_state[\"jobs\"]:\n job_id = js[\"job_id\"]\n print(\"Job is id\", job_id)\n if job_id in new_job_ids:\n count += 1\n self.assertEqual(js[\"status\"], \"created\")\n print(js[\"created\"])\n print(type(js[\"created\"]))\n date = SDKMethodRunner._check_and_convert_time(js[\"created\"])\n ts = date\n print(\n f\"Creation date {date}, LastWeek:{last_week}, Tomorrow{tomorrow})\"\n )\n print(ts, last_week.timestamp())\n self.assertTrue(float(ts) >= last_week.timestamp())\n print(ts, tomorrow.timestamp())\n self.assertTrue(float(ts) <= tomorrow.timestamp())\n self.assertEqual(2, count)\n\n print(\n \"Test case 2A. Retrieving Jobs from last_month and tomorrow_max (last_month, last_week, yesterday and now jobs) \"\n )\n\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(\n tomorrow.timestamp()\n ), # test timestamp string input\n creation_start_time=last_month_and_1_hour, # test datetime input\n user=\"ALL\",\n )\n\n count = 0\n for js in job_state[\"jobs\"]:\n job_id = js[\"job_id\"]\n print(\"Job is id\", job_id)\n if job_id in new_job_ids:\n count += 1\n self.assertEqual(js[\"status\"], \"created\")\n date = SDKMethodRunner._check_and_convert_time(js[\"created\"])\n ts = date\n print(date, last_week, tomorrow)\n print(ts, last_week.timestamp(), tomorrow.timestamp())\n self.assertTrue(ts > last_month_and_1_hour.timestamp())\n self.assertTrue(ts < tomorrow.timestamp())\n self.assertEqual(4, count)\n\n print(\"Found all of the jobs\", len(new_job_ids))\n\n with self.assertRaises(Exception) as context:\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(yesterday),\n creation_start_time=str(tomorrow),\n user=\"ALL\",\n )\n self.assertEqual(\n \"The start date cannot be greater than the end date.\",\n str(context.exception),\n )\n\n print(\"Test case 2B. Same as above but with FAKE user (NO ADMIN) \")\n runner.is_admin = False\n runner._is_admin = MagicMock(return_value=False)\n with self.assertRaisesRegex(\n AuthError,\n \"You are not authorized to view all records or records for others.\",\n ) as error:\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=\"FAKE\",\n )\n print(\"Exception raised is\", error)\n\n print(\"Test case 2C. Same as above but with FAKE_TEST_USER + ADMIN) \")\n runner.is_admin = True\n runner._is_admin = MagicMock(return_value=True)\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=user_name,\n )\n\n count = 0\n for js in job_state[\"jobs\"]:\n job_id = js[\"job_id\"]\n print(\"Job is id\", job_id)\n if job_id in new_job_ids:\n count += 1\n self.assertEqual(js[\"status\"], \"created\")\n date = SDKMethodRunner._check_and_convert_time(js[\"created\"])\n ts = date\n print(date, last_week, tomorrow)\n print(ts, last_week.timestamp(), tomorrow.timestamp())\n self.assertTrue(ts > last_month_and_1_hour.timestamp())\n self.assertTrue(ts < tomorrow.timestamp())\n\n # May need to change this if other db entries get added\n self.assertEqual(4, count)\n\n print(\"Found all of the jobs\", len(new_job_ids))\n\n print(\"Test case 3. Assert Raises error\")\n\n with self.assertRaises(Exception) as context:\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(yesterday),\n creation_start_time=str(tomorrow),\n user=\"ALL\",\n )\n self.assertEqual(\n \"The start date cannot be greater than the end date.\",\n str(context.exception),\n )\n\n print(\"Test case 4, find the original job\")\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=user_name,\n )\n self.assertTrue(len(job_state[\"jobs\"][0].keys()) > 0)\n print(f\"Checking {job_id}\")\n\n found = False\n for job in job_state[\"jobs\"]:\n if job_id == job[\"job_id\"]:\n found = True\n\n if found is False:\n raise Exception(\"Didn't find the original job\")\n\n print(job_state)\n\n print(\"Test 5, find the original job, but with projections\")\n job_states = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=user_name,\n job_projection=[\"wsid\"],\n )\n job_state_with_proj = None\n for job in job_states[\"jobs\"]:\n if job_id == job[\"job_id\"]:\n job_state_with_proj = job\n\n example_job_stat = {\n \"_id\": \"5d892ede9ea3d7d3b824dbff\",\n \"authstrat\": \"kbaseworkspace\",\n \"wsid\": 9999,\n \"updated\": \"2019-09-23 20:45:19.468032\",\n \"job_id\": \"5d892ede9ea3d7d3b824dbff\",\n \"created\": \"2019-09-23 20:45:18+00:00\",\n }\n\n required_headers = list(example_job_stat.keys())\n required_headers.append(\"wsid\")\n\n for member in required_headers:\n self.assertIn(member, job_state_with_proj)\n self.assertNotIn(\"status\", job_state_with_proj)\n\n print(\"Test 6a, find the original job, but with projections and filters\")\n job_state = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=\"ALL\",\n job_projection=[\"wsid\", \"status\"],\n job_filter={\"wsid\": 9999},\n )\n\n for record in job_state[\"jobs\"]:\n\n print(record)\n if record[\"wsid\"] != 9999:\n raise Exception(\"Only records with wsid 9999 should be allowed\")\n self.assertIn(\"wsid\", record)\n self.assertIn(\"status\", record)\n self.assertNotIn(\"service_ver\", record)\n print(\"job state is\", \"len is\", len(job_state[\"jobs\"]))\n\n self.assertTrue(len(job_state[\"jobs\"]) >= 1)\n\n print(\"Test 6b, find the original job, but with projections and filters\")\n job_state2 = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=\"ALL\",\n job_projection=[\"wsid\", \"status\"],\n job_filter=[\"wsid=123\"],\n )\n\n for record in job_state2[\"jobs\"]:\n\n if record[\"wsid\"] != 123:\n print(record)\n print(\"ID IS\", record[\"wsid\"])\n raise Exception(\"Only records with wsid 123 should be allowed\")\n self.assertIn(\"wsid\", record)\n self.assertIn(\"status\", record)\n self.assertNotIn(\"service_ver\", record)\n\n print(len(job_state2[\"jobs\"]))\n self.assertTrue(4 >= len(job_state2[\"jobs\"]) > 0)\n\n print(\n \"Test 7, find same jobs as test 2 or 3, but also filter, project, and limit\"\n )\n job_state_limit = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=\"ALL\",\n job_projection=[\"wsid\", \"status\"],\n job_filter=[\"wsid=123\"],\n limit=2,\n )\n\n self.assertTrue(2 >= len(job_state_limit[\"jobs\"]) > 0)\n\n print(\n \"Test 8, ascending and descending (maybe should verify jobs count > 2)\"\n )\n job_state_limit_asc = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=\"ALL\",\n job_projection=[\"wsid\", \"status\"],\n ascending=\"True\",\n )\n\n epoch = datetime.utcfromtimestamp(0)\n\n job_id_temp = str(ObjectId.from_datetime(epoch))\n for item in job_state_limit_asc[\"jobs\"]:\n job_id = item[\"job_id\"]\n if ObjectId(job_id) > ObjectId(job_id_temp):\n job_id_temp = job_id\n else:\n raise Exception(\n \"Not ascending\"\n + \"JobIdPrev\"\n + str(job_id_temp)\n + \"JobIdNext\"\n + str(job_id)\n )\n\n job_state_limit_desc = runner.check_jobs_date_range_for_user(\n creation_end_time=str(tomorrow),\n creation_start_time=str(last_month_and_1_hour),\n user=\"ALL\",\n job_projection=[\"wsid\", \"status\"],\n ascending=\"False\",\n )\n\n # TimeDelta Over 9999 days\n job_id_temp = str(ObjectId.from_datetime(now + timedelta(days=9999)))\n\n for item in job_state_limit_desc[\"jobs\"]:\n job_id = item[\"job_id\"]\n if ObjectId(job_id) < ObjectId(job_id_temp):\n job_id_temp = job_id\n else:\n raise Exception(\n \"Not Descending\"\n + \"JobIdPrev:\"\n + str(job_id_temp)\n + \"JobIdNext:\"\n + str(job_id)\n )\n\n for key in job_state_limit_desc.keys():\n print(key)\n print(job_state_limit_desc[key])\n","sub_path":"test/ee2_SDKMethodRunner_test.py","file_name":"ee2_SDKMethodRunner_test.py","file_ext":"py","file_size_in_byte":52928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"405758079","text":"# laregest item in a list\n'''\nmyl = [10, 3, 17, 8, 1]\nnum = 0\n\nfor x in myl:\n if x > num:\n num = x\nprint(num) \n'''\n\n# 2D list\n\nmat = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\n\n# items are access on row and item number basis\n# 1 will be row 0, item 0 and so on...\n# we can modify a matrix value using same reference\n'''\nprint(mat[0][0]) # Thia prints 1\nprint(mat[2][2]) # This prints 9\n'''\n# To print a whole list\n'''\nfor row in mat:\n for x in row:\n print(x)\n'''\n\n# List Methods/ List functions\n'''\n.append\n.insert(index, value)\n.remove(value)\n.clear # removes all items from the list\n.pop # removes last item\n.index(value) # returns index of an item in the list\n.count(value) # returns number of times an item existis in the list\n.sort()\n.reverse() # reverses the list\n.copy() # copies a list\n'''\n# remove duplicats in a list\n\nls = [1, 5, 3, 8, 5, 6, 7]\n\n'''\n#### Bad logic\nnumc = 0\nfor x in ls:\n num = ls.count(x)\n if num > 1:\n for i in num:\n ls.remove(num) \n'''\n\n# Good logic\n\nuniques =[]\n\nfor x in ls:\n if x not in uniques:\n uniques.append(x)\nprint(uniques)\n\n","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211951813","text":"# Modified from original in https://www.py4e.com/code3.zip according to\n# problem12.1.py and for experimentation\nimport socket\n\nmysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nmysock.connect(('data.pr4e.org', 80))\ncmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\\r\\n\\r\\n'.encode()\nmysock.send(cmd)\n# counter = 0\nwhile True:\n data = mysock.recv(512)\n if len(data) < 1:\n break\n# print(\"pass\", counter, data.decode(),end='')\n print(data.decode(),end='')\n# counter += 1\nmysock.close()\n","sub_path":"3of5_python_to_access_web_data/socket1.py","file_name":"socket1.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580298367","text":"# 입력없음\n\ndef countdown(n):\n if n > 0:\n for i in range(n, 0, -1):\n print(i)\n elif n == 0:\n print(\"카운트다운을 하려면 0보다 큰 입력이 필요합니다.\")\n\ncountdown(0)\ncountdown(10)","sub_path":"Python/SWEA/Learn-Course/2_ProgrammingBeginner/Python Programming Beginner(1)/6329.py","file_name":"6329.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"278682560","text":"\"\"\"\nTest file to determine speed of alignment\n\n\"\"\"\nimport sys\n\nAlignmentFile = sys.argv[1]\n\nfile = open(AlignmentFile,'r')\n\ndata = file.readlines()\n\nallAlign = []\n\nfor x in data:\n temp = x\n if not(temp.startswith('#') or temp.startswith(\"//\") or temp.startswith(\"\\n\")):\n allAlign.append(temp.rstrip())\n\ntemp = allAlign[0].split()\nlength = int(temp[0])\nalignment = temp[1]\nfor x in allAlign[1:]:\n temp = x.split()\n alignment+=temp[1]\ntargetLen = len(alignment)\nqueryLen = 0\nfor c in alignment:\n if c.isupper():\n queryLen+=1.0\ncoverage = queryLen/length\nfile.close()\n\nsys.exit(int(coverage*100))\n","sub_path":"PipelineScripts/queryCoverageCalculator.py","file_name":"queryCoverageCalculator.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"281064387","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\"\"\"\r\n# @Time : 2019/9/9 9:55\r\n# @Author : peng.wang\r\n# @Email : WangPeng4@sfmail.sf-express.com\r\n# @FileName: model.py\r\n# @ProjectName :Facility_Location_FangTai\r\n\"\"\"\r\n\r\nfrom gurobipy import *\r\nfrom utils.misc import Logger\r\nimport pandas as pd\r\nfrom collections import defaultdict\r\n# define the log file\r\nlog = Logger(log_path='../log').logger\r\n\r\n# define the facility location problem\r\n\r\nYEAR_DAY = 365\r\n\r\n\r\nclass FacilityLocation(object):\r\n \"\"\"\r\n this class is consist of attributes for problem construction\r\n some utils function for dealing with post-process\r\n one key function for building the detail model\r\n \"\"\"\r\n def __init__(self, data, config):\r\n \"\"\"\r\n\r\n :param data: class of data provide all data used\r\n :param config: class of config provide all configuration used\r\n :param model: model of facility location\r\n\r\n \"\"\"\r\n self.data_class = data\r\n self.config = config\r\n self.model = None\r\n self.rdc_open = None\r\n self.f_c = None\r\n self.cdc_rdc_category = None\r\n\r\n def facility_location(self):\r\n \"\"\"\r\n the function will build a facility location optimization model which take the self.data_class input as parameters.\r\n return the optimized model\r\n :return: model\r\n \"\"\"\r\n \"\"\"######## list the parameters ######### \"\"\"\r\n # the current cdc and must use cdc\r\n cdc_use = self.data_class.cdc_use\r\n # the current rdc and must use rdc\r\n rdc_current = self.data_class.rdc_current\r\n rdc_use = self.data_class.rdc_use\r\n # candidate list of potential locations of CDC, RDC and customer list\r\n customer = self.data_class.customer\r\n cdc_cand = self.data_class.cdc_cand\r\n cdc_category_capacity = self.data_class.cdc_category_capacity # cdc capacity\r\n rdc_capacity = self.data_class.rdc_capacity\r\n rdc_cand = self.data_class.rdc_cand\r\n category_info = self.data_class.category_info\r\n # parameters\r\n trunk_price = self.data_class.trunk_price\r\n distribution_price = self.data_class.distribution_price\r\n demand = self.data_class.demand\r\n category_list = self.config.category_list # SKU category\r\n num_rdc = self.config.num_rdc\r\n \"\"\"######## declare the model ######### \"\"\"\r\n model = Model('facility_location_WL')\r\n\r\n \"\"\"######## define the decision variables ######### \"\"\"\r\n # the indicator of open status of RDC\r\n cdc_open = model.addVars(cdc_cand, vtype=GRB.BINARY, name='rdc_open')\r\n # the indicator of open status of RDC\r\n rdc_open = model.addVars(rdc_cand, vtype=GRB.BINARY, name='rdc_open')\r\n # quantity of specific SKU category of demand of customer supplied by RDC\r\n f_c = model.addVars(rdc_cand, customer, vtype=GRB.BINARY, name='rdc_customer_category')\r\n # quantity of specific SKU category of demand of RDC supplied by CDC\r\n cdc_rdc_category = model.addVars(cdc_cand, rdc_cand, category_list, vtype=GRB.CONTINUOUS,\r\n name='cdc_rdc_category')\r\n # the indicator of reachable of SLA\r\n # sla_b = model.addVars(customer_2b, vtype=GRB.BINARY, name='sla_b')\r\n # sla = model.addVars(customer, vtype=GRB.BINARY, name='sla_c')\r\n \"\"\"######## define the constrains ######### \"\"\"\r\n # the current of cdc and rdc should be used constraints\r\n if self.config.use_location_constr_open:\r\n model.addConstrs((cdc_open[cdc_name] == 1 for cdc_name in cdc_use), name='cdc_current_constr')\r\n model.addConstrs((rdc_open[rdc_name] == 1 for rdc_name in rdc_use), name='rdc_current_constr')\r\n # if self.config.use_location_constr_open:\r\n # # rdc_use = ['20', '22', '23', '28', '29', '351', '378', '411', '431',\r\n # '451', '471','539','574', '594', '714', '7311',\r\n # # '772','791', '851', '871', '991']\r\n # rdc_use = ['28', '311','572','760', '711','24','371','7311','29','531']\r\n #rdc_use = ['28', '311','572','760', '711','24','371'] #7仓\r\n #rdc_use = ['28', '311','571','760'] #4仓\r\n # rdc_use = [\"22\", \"24\", \"572\", \"769\", \"27\", \"28\" ]\r\n rdc_use = [\"24\", \"711\", \"531\", \"29\", \"572\", \"28\",\"760\",\"311\",\"7311\",\"371\"]\r\n model.addConstrs((rdc_open[rdc_name] == 1 for rdc_name in rdc_use), name='rdc_use_constr')\r\n\r\n # the number of potential location of RDC constraints\r\n model.addConstr(sum(rdc_open[rdc_name] for rdc_name in rdc_cand) == num_rdc, name='num_rdc')\r\n # the demand of customer should be met constrains\r\n # the demand of customer should be met constrains\r\n model.addConstrs((f_c.sum('*', name) == 1 for name in customer), name='demand')\r\n for rdc_name in rdc_cand:\r\n model.addConstrs((f_c[rdc_name, name] <= rdc_open[rdc_name] for name in customer), name='f_c_cons')\r\n\r\n customer_category_demand = dict()\r\n for rdc_name in rdc_cand:\r\n # rdc_demand_category = 0\r\n\r\n # customer2 = 0\r\n # customer3 = 0\r\n # customer4 = 0\r\n # customer5 = 0\r\n # customer6 = 0\r\n for category in category_list:\r\n customer1 = 0\r\n for name in customer:\r\n customer1 += f_c[rdc_name, name] * demand[name][category + \"_weight\"]\r\n # customer6 += f_c[rdc_name, name] * demand[name]['SKU6']\r\n customer_category_demand[rdc_name, category] = customer1\r\n\r\n for rdc_name in rdc_cand:\r\n for category in category_list:\r\n model.addConstr(cdc_rdc_category.sum('*', rdc_name, category) -\r\n customer_category_demand[rdc_name, category] == 0, name='demand_cons')\r\n\r\n # for rdc_name in rdc_cand:\r\n # customer_demand = 0\r\n # for name in customer:\r\n # for category in category_list:\r\n # customer_demand += f_c[rdc_name, name] * demand[name][category]\r\n # model.addConstr(cdc_rdc_category.sum('*', rdc_name, category) - customer_demand == 0, name='demand_cons')\r\n\r\n # model.addConstrs((f_b.sum('*', name) == 1 for name in customer_2b), name='2b_demand')\r\n # model.addConstrs((f_c.sum('*', name) == 1 for name in customer), name='demand')\r\n for cdc_name in cdc_cand:\r\n for category in category_list:\r\n model.addConstr(cdc_rdc_category.sum(cdc_name, \"*\", category)\r\n <= cdc_open[cdc_name] * cdc_category_capacity[cdc_name][category], name='cdc_capacity')\r\n\r\n\r\n\r\n # model.update()\r\n # model.addConstrs((f_b[rdc_name, name] <= rdc_open[rdc_name] for name in customer_2b), name='f_b_cons')\r\n # # the capacity constrains\r\n # rr = self.config.rr\r\n # rdc_capacity = self.data_class.rdc_capacity\r\n # if self.config.capacity_constrs_open:\r\n # for rdc_name in rdc_cand:\r\n # model.addConstr(sum((f_c[rdc_name, name] * demand[name]['demand_sum'] * self.config.turnover_days_c\r\n # for name in customer)) +\r\n # sum((f_c[rdc_name, name] * demand[name]['demand_sum'] for name in\r\n # customer)) * rr * self.config.return_turnover_days\r\n # + sum((f_b[rdc_name, name] * demand_2b[name]['demand_sum'] * self.config.turnover_days_b\r\n # for name in customer_2b))\r\n # <= rdc_open[rdc_name] * rdc_capacity[rdc_name], name='capacity_constr')\r\n # # model.addConstr(sum((f_c[rdc_name, name] * demand[name]['demand_sum'] * self.config.turnover_days_c\r\n # # for name in customer)) +\r\n # # sum((f_c[rdc_name, name] * demand[name]['demand_sum'] for name in\r\n # # customer)) * rr * self.config.return_turnover_days\r\n # # + sum((f_b[rdc_name, name] * demand_2b[name]['demand_sum'] * self.config.turnover_days_b\r\n # # for name in customer_2b))\r\n # # >= rdc_capacity[rdc_name], name='capacity_constr_1')\r\n #\r\n # # the toC sla constrains\r\n # SLA_NUM = 200\r\n # if self.config.sla_2c_constrs_open:\r\n #\r\n # for name in customer:\r\n # model.addConstr(sum(f_c[rdc_name, name] * distribution_price[rdc_name, name]['time_median_toC']\r\n # for rdc_name in rdc_cand)\r\n # - sum(f_c[rdc_name, name] * distribution_price[rdc_name, name]['sla_toC']\r\n # for rdc_name in rdc_cand) <= SLA_NUM * (1 - sla[name]), name='sla_constr')\r\n # model.addConstr(sum(f_c[rdc_name, name] * distribution_price[rdc_name, name]['time_median_toC']\r\n # for rdc_name in rdc_cand)\r\n # - sum(f_c[rdc_name, name] * distribution_price[rdc_name, name]['sla_toC']\r\n # for rdc_name in rdc_cand) >= -SLA_NUM * sla[name], name='sla_constr_1')\r\n # model.addConstr(sum(sla[name] * demand[name]['demand_sum'] for name in customer) >=\r\n # self.config.P_c * sum(demand[name]['demand_sum'] for name in customer),\r\n # name='sla_constr_2')\r\n # # toB sla constrains\r\n # if self.config.sla_2b_constrs_open:\r\n # for name in customer_2b:\r\n # model.addConstr(sum(f_b[rdc_name, name] * distribution_price[rdc_name, name]['time_median_toB']\r\n # for rdc_name in rdc_cand)\r\n # - sum(f_b[rdc_name, name] * distribution_price[rdc_name, name]['sla_toB']\r\n # for rdc_name in rdc_cand) <= SLA_NUM * (1 - sla_b[name]), name='sla_constr_2b')\r\n # model.addConstr(sum(f_b[rdc_name, name] * distribution_price[rdc_name, name]['time_median_toB']\r\n # for rdc_name in rdc_cand)\r\n # - - sum(f_b[rdc_name, name] * distribution_price[rdc_name, name]['sla_toB']\r\n # for rdc_name in rdc_cand) >= -SLA_NUM * sla_b[name], name='sla_constr_2b_1')\r\n # model.addConstr(sum(sla_b[name] * demand_2b[name]['demand_sum'] for name in customer_2b) >=\r\n # self.config.P_b * sum(demand_2b[name]['demand_sum'] for name in customer_2b),\r\n # name='sla_constr_2b_2')\r\n\r\n\r\n \"\"\"######## define the objective function ######### \"\"\"\r\n # define the shipping cost of cdc\r\n # intermediate variables\r\n # v_cdc_rdc, v_cdc = self.cdc_rdc_temp_calc(cdc_rdc_category)\r\n # i_rdc_C, q_rdc_C, w_rdc_C, i_rdc_c, q_rdc_c, w_rdc_c = self.rdc_c_temp_calc(rdc_customer_category)\r\n # i_rdc_B, q_rdc_B, w_rdc_B, i_rdc_b, q_rdc_b, w_rdc_b = self.rdc_b_temp_calc(f_b)\r\n # i_rdc_c, i_rdc, q_rdc_c, d_rdc_c, v_rdc = self.rdc_c_temp_calc_use(f_c)\r\n cdc_shipping_cost, cdc_shipping_cost_d = self.cdc_shipping_cost(cdc_rdc_category)\r\n rdc_shipping_cost, rdc_shipping_cost_d, rdc_shipping_distance = self.rdc_shipping_cost(f_c)\r\n rdc_storage_cost, rdc_storage_cost_d = self.rdc_storage_cost(f_c)\r\n rdc_inbound, rdc_outbound, rdc_inbound_cost, rdc_outbound_cost = self.rdc_handling_cost(f_c)\r\n # rdc_capital_cost, rdc_capital_cost_d = self.capital_cost(i_rdc_c)\r\n # cost = cdc_shipping_cost + cdc_shipping_cost_r + rdc_shipping_cost + rdc_storage_cost\\\r\n # + rdc_inbound + rdc_outbound + rdc_r_outbound + rdc_capital_cost\r\n distance_penalty = 0.1\r\n # time_penalty = 0.1\r\n # time_used = self.rdc_time_calc(f_c, f_b)\r\n cost = self.config.trunk_ratio * cdc_shipping_cost + self.config.dist_discount * rdc_shipping_cost \\\r\n + rdc_storage_cost + rdc_inbound + rdc_outbound\r\n distance = rdc_shipping_distance * distance_penalty\r\n # set the Objectice function\r\n model.setObjectiveN(cost,priority=2,index=0)\r\n model.setObjectiveN(distance, priority=1,index=1)\r\n\r\n \"\"\"######## solve the model ######### \"\"\"\r\n model.optimize()\r\n\r\n \"\"\"######## check the model ######### \"\"\"\r\n if model.Status == GRB.OPTIMAL:\r\n log.info('the facility location optimized sucessfully !')\r\n log.info('the objective of the model is {}'.format(model.objVal))\r\n self.model = model\r\n log.info('dump model to file:\\n')\r\n model.write('facility location ft.lp')\r\n\r\n # get all solved variables\r\n self.cdc_open = model.getAttr('x', cdc_open)\r\n self.rdc_open = model.getAttr('x', rdc_open)\r\n self.cdc_rdc_category = model.getAttr('x', cdc_rdc_category)\r\n self.f_c = model.getAttr('x', f_c)\r\n\r\n return model\r\n else:\r\n log.info('the model is infeasible!!!')\r\n return 0\r\n\r\n # do IIS\r\n model.computeIIS()\r\n if model.IISMinimal:\r\n print('IIS is minimal\\n')\r\n else:\r\n print('IIS is not minimal \\n')\r\n log.warn('the following constraints cannot be satisfied')\r\n for c in model.getConstrs():\r\n if c.IISConstr:\r\n log.info('%s' % c.constrName)\r\n\r\n def cdc_shipping_cost(self, cdc_rdc_category):\r\n \"\"\"\r\n calculate the shipping cost from cdc to rdc and\r\n reverse shipping cost from rdc to cdc\r\n the equation of shipping cost is following :\r\n tl_v_cdc_rdc = v_cdc_rdc * 30%\r\n ftl_v_cdc_rdc = v_cdc_rdc * 70% / self.config.volume_per_truck\r\n\r\n\r\n :param: cdc_rdc_category the weight of category from cdc to rdc\r\n :return:\r\n shipping_cost: shipping cost from cdc to rdc\r\n shipping_cost_d: shipping cost from cdc to rdc for each cdc\r\n \"\"\"\r\n\r\n # parameters\r\n rdc_cand = self.data_class.rdc_cand\r\n trunk_price = self.data_class.trunk_price\r\n cdc_rand = self.data_class.cdc_cand\r\n category_list = self.config.category_list\r\n category_info = self.data_class.category_info\r\n shipping_cost = 0\r\n shipping_cost_d = {}\r\n for cdc_name in cdc_rand:\r\n ship_cdc_rdc = 0\r\n for rdc_name in rdc_cand:\r\n for category in category_list:\r\n ship_cdc_rdc += cdc_rdc_category[cdc_name, rdc_name, category] \\\r\n * trunk_price[cdc_name, rdc_name]['weight_price']\r\n\r\n shipping_cost_d[cdc_name] = ship_cdc_rdc\r\n shipping_cost += ship_cdc_rdc\r\n\r\n return shipping_cost, shipping_cost_d\r\n\r\n\r\n def rdc_shipping_cost(self, f_c):\r\n \"\"\"\r\n calculate the shipping cost from rdc to customer\r\n reverse from c to rdc is omit now\r\n :param: rdc_customer_category:\r\n :return:\r\n \"\"\"\r\n # parameters\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer\r\n demand = self.data_class.demand\r\n category_list = self.config.category_list\r\n distribution_price = self.data_class.distribution_price\r\n category_info = self.data_class.category_info\r\n # declare variables\r\n shipping_cost_d = {}\r\n shipping_cost_c = 0\r\n shipping_distance = 0\r\n\r\n for rdc_name in rdc_cand:\r\n ship_c = 0\r\n distance = 0\r\n for name in customer:\r\n ship_c += f_c[rdc_name, name] * demand[name]['demand_weight_sum'] / self.config.weight_avg * \\\r\n ((self.config.weight_avg - distribution_price[rdc_name, name]['base_weight_qty'])\r\n * distribution_price[rdc_name, name]['weight_price_qty']\r\n + distribution_price[rdc_name, name]['base_price'])\r\n distance += f_c[rdc_name, name]*distribution_price[rdc_name, name]['distance']\r\n shipping_cost_d[rdc_name] = ship_c\r\n # all RDC\r\n shipping_cost_c += ship_c\r\n shipping_distance += distance\r\n\r\n return shipping_cost_c, shipping_cost_d, shipping_distance\r\n\r\n # TODO: 确认一下周转库存天数计算公式: 周转天数 = 平均库存/平均需求\r\n def rdc_storage_cost(self, f_c):\r\n \"\"\"\r\n calculate the storage cost of rdc\r\n\r\n :param: I_rdc_c: the average inventory from rdc to c\r\n :return:\r\n \"\"\"\r\n # parameters\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer\r\n demand = self.data_class.demand\r\n category_list = self.config.category_list\r\n rdc_cost_loc = self.data_class.rdc_cost_loc\r\n category_info = self.data_class.category_info\r\n # declare variables\r\n rdc_storage_cost = 0\r\n rdc_storage_cost_d = {}\r\n\r\n for rdc_name in rdc_cand:\r\n rdc_storage_c_sum = 0\r\n for name in customer:\r\n customer_area = 0\r\n for category in category_list:\r\n customer_area += (demand[name][category] /\r\n YEAR_DAY * category_info[category]['turn_over_day'] +\r\n category_info[category]['safety_inventory']) * category_info[category]['area']\r\n\r\n rdc_storage_c_sum += f_c[rdc_name, name] * (1 + self.config.area_ratio) * customer_area * 12 \\\r\n * rdc_cost_loc[rdc_name]['monthly_rental_price']\r\n rdc_storage_cost += rdc_storage_c_sum\r\n rdc_storage_cost_d[rdc_name] = rdc_storage_c_sum\r\n\r\n return rdc_storage_cost, rdc_storage_cost_d\r\n\r\n def rdc_handling_cost(self, f_c):\r\n \"\"\"\r\n calculate the handling cost\r\n inbound cost:\r\n outbound cost\r\n :return:\r\n \"\"\"\r\n # parameters\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer\r\n demand = self.data_class.demand\r\n category_list = self.config.category_list\r\n rdc_cost_loc = self.data_class.rdc_cost_loc\r\n category_info = self.data_class.category_info\r\n # declare\r\n rdc_inbound = 0\r\n rdc_outbound = 0\r\n\r\n rdc_inbound_cost = {}\r\n rdc_outbound_cost = {}\r\n\r\n for rdc_name in rdc_cand:\r\n inbound_c = 0\r\n outbound_c = 0\r\n for name in customer:\r\n customer_weight = f_c[rdc_name, name] * demand[name]['demand_weight_sum']\r\n inbound_c += customer_weight * rdc_cost_loc[rdc_name]['in_handling_cost']\r\n outbound_c += customer_weight * rdc_cost_loc[rdc_name]['out_handling_cost']\r\n\r\n rdc_inbound_cost[rdc_name] = inbound_c\r\n rdc_outbound_cost[rdc_name] = outbound_c\r\n rdc_inbound += inbound_c\r\n rdc_outbound += outbound_c\r\n return rdc_inbound, rdc_outbound, rdc_inbound_cost, rdc_outbound_cost\r\n\r\n def capital_cost(self, i_rdc_c):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n # parameters\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer\r\n uap = self.config.uap\r\n rdc_capital_cost_d = {}\r\n i_sum = 0\r\n for rdc_name in rdc_cand:\r\n i_rdc_sum = 0\r\n for name in customer:\r\n i_rdc_sum += i_rdc_c[rdc_name, name]\r\n i_sum += i_rdc_sum\r\n rdc_capital_cost_d[rdc_name] = i_rdc_sum * uap * self.config.annual_rate\r\n rdc_capital_cost = i_sum * uap * self.config.annual_rate\r\n\r\n return rdc_capital_cost, rdc_capital_cost_d\r\n\r\n def rdc_time_calc(self, f_c, f_b):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer\r\n customer_2b = self.data_class.customer_2b\r\n distribution_price = self.data_class.distribution_price\r\n time_used = 0\r\n for rdc_name in rdc_cand:\r\n time_used_temp = 0\r\n for name in customer:\r\n time_used_temp += f_c[rdc_name, name] * distribution_price[name, rdc_name]['time_median_toC']\r\n for name_b in customer_2b:\r\n time_used_temp += f_b[rdc_name, name_b] * distribution_price[name_b, rdc_name]['time_median_toB']\r\n time_used += time_used_temp\r\n return time_used\r\n\r\n # the intermediate variables should include v_cdc_rdc, w_rdc_c, i_rdc_c, q_rdc_c\r\n # TODO: have some problem\r\n def cdc_rdc_temp_calc(self, q_rdc_C, w_rdc_C, q_rdc_B, w_rdc_B):\r\n \"\"\"\r\n calculate the intermediate variables from cdc to rdc\r\n :return:\r\n v_cdc_rdc: the volume of yearly demand of all from cdc to rdc\r\n \"\"\"\r\n q_cdc_rdc = {}\r\n q_cdc = {}\r\n w_cdc_rdc = {}\r\n w_cdc = {}\r\n\r\n # parameters\r\n\r\n rdc_cand = self.data_class.rdc_cand\r\n cdc_cand = self.data_class.cdc_cand\r\n\r\n for cdc_name in cdc_cand:\r\n q_cdc_tmp = 0\r\n w_cdc_tmp = 0\r\n for rdc_name in rdc_cand:\r\n q_cdc_tmp += q_rdc_C[rdc_name]['less'] + q_rdc_C[rdc_name]['more'] + q_rdc_B[rdc_name]\r\n w_cdc_tmp += w_rdc_C[rdc_name] + w_rdc_B[rdc_name]\r\n\r\n q_cdc_rdc[cdc_name, rdc_name] = q_rdc_C[rdc_name]['less'] + q_rdc_C[rdc_name]['more'] + q_rdc_B[rdc_name]\r\n w_cdc_rdc[cdc_name, rdc_name] = w_rdc_C[rdc_name] + w_rdc_B[rdc_name]\r\n q_cdc[cdc_name] = q_cdc_tmp\r\n w_cdc[cdc_name] = w_cdc_tmp\r\n\r\n return q_cdc, w_cdc, q_cdc_rdc, w_cdc_rdc\r\n\r\n def rdc_c_temp_calc(self, f_c):\r\n \"\"\"\r\n calculate the intermedidate variables from rdc to c\r\n :return:\r\n \"\"\"\r\n # parameters\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer\r\n demand = self.data_class.demand\r\n turnover_days_c = self.config.turnover_days_c\r\n total_days = 365\r\n # declare variables\r\n q_rdc = {}\r\n w_rdc_c = {}\r\n w_rdc = {}\r\n i_rdc_c = {}\r\n i_rdc = {}\r\n q_rdc_c = {}\r\n\r\n for rdc_name in rdc_cand:\r\n q_rdc_less = 0\r\n q_rdc_more = 0\r\n w_rdc_less = 0\r\n w_rdc_more = 0\r\n i_rdc_sum = 0\r\n\r\n # toC customer\r\n for name in customer:\r\n\r\n q_rdc_less += f_c[rdc_name, name] * demand[name]['demand_L']\r\n q_rdc_more += f_c[rdc_name, name] * demand[name]['demand_M']\r\n\r\n w_rdc_less += f_c[rdc_name, name] * demand[name]['demand_L_weight']\r\n w_rdc_more += f_c[rdc_name, name] * demand[name]['demand_M_weight']\r\n\r\n i_rdc_tmp = (f_c[rdc_name, name] * demand[name]['demand_sum']) / total_days * turnover_days_c\r\n q_rdc_c[rdc_name, name] = {'less': f_c[rdc_name, name] * demand[name]['demand_L'],\r\n 'more': f_c[rdc_name, name] * demand[name]['demand_M']\r\n }\r\n # TODO: comfirm\r\n w_rdc_c[rdc_name, name] = f_c[rdc_name, name] * demand[name]['demand_M_avg_weight']\r\n\r\n i_rdc_sum += i_rdc_tmp\r\n i_rdc_c[rdc_name, name] = i_rdc_tmp\r\n i_rdc[rdc_name] = i_rdc_sum\r\n q_rdc[rdc_name] = {'less': q_rdc_less,\r\n 'more': q_rdc_more,\r\n }\r\n w_rdc[rdc_name] = w_rdc_less + w_rdc_more\r\n\r\n return i_rdc, q_rdc, w_rdc, i_rdc_c, q_rdc_c, w_rdc_c\r\n\r\n # post-process of cdc\r\n\r\n def rdc_b_temp_calc(self, f_b):\r\n \"\"\"\r\n calculate the intermedidate variables from rdc to B\r\n\r\n :return:\r\n \"\"\"\r\n # parameters\r\n total_days = 365\r\n rdc_cand = self.data_class.rdc_cand\r\n customer = self.data_class.customer_2b\r\n demand = self.data_class.demand_2b\r\n turnover_days_b = self.config.turnover_days_b\r\n\r\n # declare variables\r\n q_rdc = {}\r\n q_rdc_b = {}\r\n w_rdc_b = {}\r\n w_rdc = {}\r\n i_rdc_b = {}\r\n i_rdc = {}\r\n for rdc_name in rdc_cand:\r\n q_rdc_tmp = 0\r\n w_rdc_tmp = 0\r\n i_rdc_tmp = 0\r\n for name in customer:\r\n q_rdc_tmp += f_b[rdc_name, name] * demand[name]['demand_sum']\r\n w_rdc_tmp += f_b[rdc_name, name] * demand[name]['demand_weight_sum']\r\n i_rdc_tmp += (f_b[rdc_name, name] * demand[name]['demand_sum']) / total_days * turnover_days_b\r\n\r\n w_rdc_b[rdc_name, name] = f_b[rdc_name, name] * demand[name]['demand_weight_sum']\r\n i_rdc_b[rdc_name, name] = (f_b[rdc_name, name] * demand[name]['demand_sum']) / total_days * turnover_days_b\r\n q_rdc_b[rdc_name, name] = f_b[rdc_name, name] * demand[name]['demand_sum']\r\n\r\n q_rdc[rdc_name] = q_rdc_tmp\r\n w_rdc[rdc_name] = w_rdc_tmp\r\n i_rdc[rdc_name] = i_rdc_tmp\r\n\r\n return i_rdc, q_rdc, w_rdc, i_rdc_b, q_rdc_b, w_rdc_b\r\n\r\n # post-process of cdc\r\n\r\n def cdc_post_process(self):\r\n \"\"\"\r\n doing the post process of cdc, output the detail quantity of each items\r\n :return:\r\n\r\n \"\"\"\r\n cdc_output = {}\r\n df = pd.DataFrame()\r\n cdc_cand = self.data_class.cdc_cand\r\n rdc_cand = self.data_class.rdc_cand\r\n cdc_rdc_category = self.cdc_rdc_category\r\n category_list = self.config.category_list\r\n # i_rdc_C, q_rdc_C, w_rdc_C, i_rdc_c, q_rdc_c, w_rdc_c = self.rdc_c_temp_calc(f_c)\r\n # i_rdc_B, q_rdc_B, w_rdc_B, i_rdc_b, q_rdc_b, w_rdc_b = self.rdc_b_temp_calc(f_b)\r\n # q_cdc, w_cdc, q_cdc_rdc, w_cdc_rdc = self.cdc_rdc_temp_calc(q_rdc_C, w_rdc_C, q_rdc_B, w_rdc_B)\r\n cdc_shipping_cost, cdc_shipping_cost_d, = self.cdc_shipping_cost(cdc_rdc_category)\r\n m = 0\r\n for cdc_name in cdc_cand:\r\n cdc_output['CDC'] = cdc_name\r\n cdc_output['shipping_cost'] = cdc_shipping_cost_d[cdc_name]\r\n cdc_output['Quantity'] = sum(cdc_rdc_category[cdc_name, rdc_name, category]\r\n for rdc_name in rdc_cand for category in category_list)\r\n cdc_output['Quantity_SKU1'] = sum(cdc_rdc_category[cdc_name, rdc_name, 'SKU1']\r\n for rdc_name in rdc_cand)\r\n cdc_output['Quantity_SKU2'] = sum(cdc_rdc_category[cdc_name, rdc_name, 'SKU2']\r\n for rdc_name in rdc_cand)\r\n cdc_output['Quantity_SKU3'] = sum(cdc_rdc_category[cdc_name, rdc_name, 'SKU3']\r\n for rdc_name in rdc_cand)\r\n cdc_output['Quantity_SKU4'] = sum(cdc_rdc_category[cdc_name, rdc_name, 'SKU4']\r\n for rdc_name in rdc_cand)\r\n cdc_output['Quantity_SKU5'] = sum(cdc_rdc_category[cdc_name, rdc_name, 'SKU5']\r\n for rdc_name in rdc_cand)\r\n # cdc_output['Quantity_SKU6'] = sum(cdc_rdc_category[cdc_name, rdc_name, 'SKU6']\r\n # for rdc_name in rdc_cand)\r\n df = df.append(pd.DataFrame(cdc_output, index=[m]))\r\n m = m+1\r\n\r\n return df, cdc_shipping_cost\r\n\r\n def rdc_post_process(self):\r\n \"\"\"\r\n doing the post process of rdc\r\n :return:\r\n \"\"\"\r\n rdc_output = {}\r\n df = pd.DataFrame()\r\n # rdc_cand = self.data_class.rdc_cand\r\n rdc_cost_loc = self.data_class.rdc_cost_loc\r\n customer = self.data_class.customer\r\n demand = self.data_class.demand\r\n city_add = self.data_class.city_add\r\n category_list = self.config.category_list\r\n category_info = self.data_class.category_info\r\n f_c = self.f_c\r\n\r\n rdc_open = self.rdc_open\r\n # # filter all items that cdc_open = 1, get the valid\r\n rdc_open_valid = {k:v for k,v in rdc_open.items() if v==1}\r\n\r\n # v_cdc_rdc, v_cdc = self.cdc_rdc_temp_calc(f_c)\r\n rdc_shipping_cost, rdc_shipping_cost_d, rdc_shipping_distance = self.rdc_shipping_cost(f_c)\r\n rdc_storage_cost, rdc_storage_cost_d = self.rdc_storage_cost(f_c)\r\n rdc_inbound, rdc_outbound, rdc_inbound_cost, rdc_outbound_cost = self.rdc_handling_cost(f_c)\r\n # rdc_capital_cost, rdc_capital_cost_d = self.capital_cost(i_rdc_c)\r\n\r\n m = 0\r\n for rdc_name in rdc_open_valid:\r\n rdc_output['RDC'] = rdc_name\r\n rdc_output['City_name'] = city_add[rdc_name]['city']\r\n rdc_output['shipping_cost'] = rdc_shipping_cost_d[rdc_name]\r\n rdc_output['storage_cost'] = rdc_storage_cost_d[rdc_name]\r\n rdc_output['Inbound_Handling_Cost'] = rdc_inbound_cost[rdc_name]\r\n rdc_output['Outbound_Handling_Cost'] = rdc_outbound_cost[rdc_name]\r\n # rdc_output['Reverse_Handling_Cost'] = rdc_r_outbound_cost[rdc_name]\r\n rdc_output['Inventory'] = sum(f_c[rdc_name, name] * demand[name][category]/YEAR_DAY\r\n * category_info[category]['turn_over_day']\r\n + category_info[category]['safety_inventory']\r\n for name in customer for category in category_list)\r\n rdc_output['Area'] = sum((f_c[rdc_name, name]*demand[name][category]/YEAR_DAY\r\n * category_info[category]['turn_over_day']\r\n + category_info[category]['safety_inventory']) * category_info[category]['area']\r\n *(1+self.config.area_ratio)\r\n for name in customer for category in category_list)\r\n # rdc_output['Capital_Cost'] = rdc_capital_cost_d[rdc_name]\r\n rdc_output['Quantity'] = sum(f_c[rdc_name, name] * demand[name]['demand_sum']\r\n for name in customer)\r\n rdc_output['Total_Cost'] = rdc_output['shipping_cost'] + rdc_output['storage_cost']\r\n if rdc_output['Quantity'] > 0:\r\n rdc_output['Price_avg'] = rdc_output['Total_Cost'] / rdc_output['Quantity']\r\n rdc_output['shipment_avg'] = rdc_output['shipping_cost'] / rdc_output['Quantity']\r\n # rdc_output['Volume'] = v_rdc[rdc_name]\r\n\r\n df = df.append(pd.DataFrame(rdc_output, index=[m]))\r\n m = m+1\r\n\r\n # TODO:\r\n # rdc_inbound = 0\r\n # rdc_outbound = 0\r\n # rdc_r_outbound = 0\r\n # rdc_capital_cost = 0\r\n return df, rdc_shipping_cost, rdc_storage_cost, rdc_inbound, rdc_outbound\r\n\r\n def c_end_network(self):\r\n \"\"\"\r\n output the b end network\r\n customer, attributes of customer, cdc, attributes of cdc, service type,\r\n quantity, weight, inventory_quantity, sla, time\r\n :return:\r\n \"\"\"\r\n # parameters\r\n customer = self.data_class.customer\r\n demand = self.data_class.demand\r\n distribution_price = self.data_class.distribution_price\r\n city_add = self.data_class.city_add\r\n category_list = self.config.category_list\r\n # sla = self.data_class.c_sla\r\n # define the network dict\r\n c_network = {}\r\n # get the cdc_open, f_c\r\n rdc_open = self.rdc_open\r\n # filter all items that cdc_open = 1, get the valid\r\n rdc_open_valid = {k: v for k, v in rdc_open.items() if v == 1}\r\n f_c = self.f_c\r\n category_info = self.data_class.category_info\r\n # loop\r\n k = 0\r\n df_c = pd.DataFrame()\r\n\r\n for rdc_name in rdc_open_valid.keys():\r\n c_network['RDC'] = rdc_name\r\n c_network['RDC_NAME'] = city_add[rdc_name]['city']\r\n c_network['RDC_LAT'] = city_add[rdc_name]['lat']\r\n c_network['RDC_LGT'] = city_add[rdc_name]['lgt']\r\n for c_name in customer:\r\n if f_c[rdc_name, c_name] >= 0.90:\r\n c_network['CUSTOMER'] = c_name\r\n c_network['QUANTITY'] = f_c[rdc_name, c_name] * demand[c_name]['demand_sum']\r\n\r\n c_network['WEIGHT'] = f_c[rdc_name, c_name] * demand[c_name]['demand_weight_sum']\r\n # c_network['SLA'] = f_c[rdc_name, c_name] * distribution_price[rdc_name, c_name]['sla_toC']\r\n c_network['CUSTOMER_LGT'] = city_add[c_name]['lgt']\r\n c_network['CUSTOMER_LAT'] = city_add[c_name]['lat']\r\n c_network['CUSTOMER_NAME'] = city_add[c_name]['city']\r\n # c_network['TIME'] = distribution_price[rdc_name, c_name]['time_median_toC']\r\n df_c = df_c.append(pd.DataFrame(c_network, index=[k]))\r\n k = k+1\r\n\r\n return df_c\r\n\r\n def cdc_rdc_network(self):\r\n \"\"\"\r\n output the b end network\r\n customer, attributes of customer, cdc, attributes of cdc, service type,\r\n quantity, weight, inventory_quantity, sla, time\r\n :return:\r\n \"\"\"\r\n # parameters\r\n customer = self.data_class.customer\r\n cdc_cand = self.data_class.cdc_cand\r\n demand = self.data_class.demand\r\n distribution_price = self.data_class.distribution_price\r\n city_add = self.data_class.city_add\r\n category_list = self.config.category_list\r\n # sla = self.data_class.c_sla\r\n # define the network dict\r\n cdc_rdc_network = {}\r\n # get the cdc_open, f_c\r\n rdc_open = self.rdc_open\r\n # filter all items that cdc_open = 1, get the valid\r\n rdc_open_valid = {k: v for k, v in rdc_open.items() if v > 0.5}\r\n cdc_rdc_category = self.cdc_rdc_category\r\n category_info = self.data_class.category_info\r\n # loop\r\n k = 0\r\n df_c = pd.DataFrame()\r\n for cdc_name in cdc_cand:\r\n for rdc_name in rdc_open_valid.keys():\r\n # cdc_rdc_network['CDC'] = cdc_name\r\n # cdc_rdc_network['CDC_NAME'] = city_add[cdc_name]['city_name']\r\n # cdc_rdc_network['CDC_LAT'] = city_add[cdc_name]['lat']\r\n # cdc_rdc_network['CDC_LGT'] = city_add[cdc_name]['lgt']\r\n # cdc_rdc_network['RDC'] = rdc_name\r\n # cdc_rdc_network['RDC_NAME'] = city_add[rdc_name]['city_name']\r\n # cdc_rdc_network['RDC_LAT'] = city_add[rdc_name]['lat']\r\n # cdc_rdc_network['RDC_LGT'] = city_add[rdc_name]['lgt']\r\n # cdc_rdc_network[\"WEIGHT\"] = sum(cdc_rdc_category[cdc_name, rdc_name, category]\r\n # for category in category_list)\r\n category_demand_list = []\r\n for category in category_list:\r\n if cdc_rdc_category[cdc_name, rdc_name, category] >= 1:\r\n category_demand_list.append(cdc_rdc_category[cdc_name, rdc_name, category])\r\n else:\r\n category_demand_list.append(0)\r\n if sum(category_demand_list)>0:\r\n category_demand_list = [cdc_name, city_add[cdc_name]['lat'], city_add[cdc_name]['lgt'],\r\n rdc_name, city_add[rdc_name]['lat'], city_add[rdc_name]['lgt']]\\\r\n + category_demand_list\r\n df_c = df_c.append([category_demand_list])\r\n else:\r\n continue\r\n df_c.columns = ['CDC_Name', 'CDC_LAT', 'CDC_LGT', 'RDC_Name','RDC_LAT', 'RDC_LGT', 'SKU1','SKU2','SKU3','SKU4','SKU5']\r\n return df_c\r\n # def b_end_network(self):\r\n # \"\"\"\r\n # output the b end network\r\n # customer, attributes of customer, cdc, attributes of cdc, service type,\r\n # quantity, weight, inventory_quantity, sla, time\r\n # :return:\r\n # \"\"\"\r\n # # parameters\r\n # customer = self.data_class.customer_2b\r\n # demand = self.data_class.demand_2b\r\n # distribution_price = self.data_class.distribution_price\r\n # city_add = self.data_class.city_add\r\n # # sla = self.data_class.c_sla\r\n # # define the network dict\r\n # c_network = {}\r\n # # get the cdc_open, f_c\r\n # rdc_open = self.rdc_open\r\n # # filter all items that cdc_open = 1, get the valid\r\n # rdc_open_valid = {k: v for k, v in rdc_open.items() if v == 1}\r\n # cdc_rdc_category = self.cdc_rdc_category\r\n #\r\n # # loop\r\n # k = 0\r\n # df_b = pd.DataFrame()\r\n #\r\n # for rdc_name in rdc_open_valid.keys():\r\n # c_network['RDC'] = rdc_name\r\n # c_network['RDC_NAME'] = city_add[rdc_name]['city_name']\r\n # c_network['RDC_LAT'] = city_add[rdc_name]['lat']\r\n # c_network['RDC_LGT'] = city_add[rdc_name]['lgt']\r\n # for c_name in customer:\r\n # if f_b[rdc_name, c_name] >= 0.90:\r\n # c_network['CUSTOMER'] = c_name\r\n # c_network['QUANTITY'] = f_b[rdc_name, c_name] * demand[c_name]['demand_sum']\r\n # c_network['WEIGHT'] = f_b[rdc_name, c_name] * demand[c_name]['demand_weight_sum']\r\n # c_network['SLA'] = f_b[rdc_name, c_name] * distribution_price[rdc_name, c_name]['sla_toB']\r\n # c_network['CUSTOMER_LGT'] = city_add[c_name]['lgt']\r\n # c_network['CUSTOMER_LAT'] = city_add[c_name]['lat']\r\n # c_network['CUSTOMER_NAME'] = city_add[c_name]['city_name']\r\n # c_network['TIME'] = distribution_price[rdc_name, c_name]['time_median_toB']\r\n # df_b = df_b.append(pd.DataFrame(c_network, index=[k]))\r\n # k = k+1\r\n # else:\r\n # continue\r\n # # log.info('the df_c: \\n {}'.format(df_c))\r\n # return df_b\r\n\r\n\r\n\r\n\r\n","sub_path":"Facility_Location_WL/core/model_3.py","file_name":"model_3.py","file_ext":"py","file_size_in_byte":38404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"260413090","text":"from app.models import Collection\nfrom app.extends import db\n\n\nclass CollectionService:\n\n @staticmethod\n def add_link(title, link, category):\n\n collection = Collection(title=title, link=link, category=category)\n collection.save()\n\n @staticmethod\n def query_links():\n collections = Collection.query.order_by('id').all()\n\n return [collection.to_dict() for collection in collections]\n\n @staticmethod\n def query_links_page(pagenum, offset):\n\n collections = Collection.query.order_by('id')\n\n page_collection = collections.limit(pagenum).offset(offset).all()\n\n total = Collection.query.count()\n\n return [collection.to_dict() for collection in page_collection], total\n\n\n @staticmethod\n def delete_link_byid(link_id):\n\n link = Collection.query.get(link_id)\n link.delete()\n\n @staticmethod\n def query_link_byid(link_id):\n\n link = Collection.query.get(link_id)\n return link.to_dict()\n\n @staticmethod\n def update_link_byid(link_id, data):\n\n link = Collection.query.get(link_id)\n for k, v in data.items():\n if v is not None:\n setattr(link, k, v)\n\n db.session.add(link)\n db.session.commit()\n\n","sub_path":"app/services/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323398431","text":"import argparse\nimport os\nimport socket\nimport sys\nimport time\n\nimport sqlalchemy\n\ndef get_executor(dsn):\n engine = sqlalchemy.create_engine(dsn)\n connection = engine.raw_connection()\n cursor = connection.cursor()\n return cursor.execute\n\ndef get_rows(results):\n\n while not results.rowcount < 0:\n results.nextset()\n return results.fetchall()\n\ndef get_info():\n parser = argparse.ArgumentParser(description='Send SQL results to Graphite')\n parser.add_argument('--graphite-host', metavar='graphite-host', type=str, default=None, help='Host to send metrics to')\n parser.add_argument('--graphite-port', metavar='graphite-port', type=int, default=2003, help='Graphite port to send metrics to')\n parser.add_argument('--graphite-prefix', metavar='graphite-prefix', type=str, default='db', help='Prefix for metrics')\n parser.add_argument('--dsn', type=str, default=os.environ.get('S2G_DSN'), help='SQLAlchemy DSN for database connection')\n parser.add_argument('--timestamped-metric', action='store_true', help='Use 3rd column in query containing timestamp values instead of current timestamp')\n return parser.parse_args()\n\ndef run(graphite_host, graphite_port, graphite_prefix, timestamped, queries, executor):\n data = []\n now = time.time()\n sock = _socket_for_host_port(graphite_host, graphite_port)\n data = [get_rows(executor(q)) for q in queries]\n for result in data:\n for line in result:\n if timestamped:\n metric, value, timestamp = line[:3]\n metric = '{}.{} {} {:0.0f}\\n'.format(graphite_prefix, metric, value, timestamp)\n else:\n metric, value = line[:2]\n metric = '{}.{} {} {:0.0f}\\n'.format(graphite_prefix, metric, value, now)\n print (metric)\n sock.sendall(metric.encode())\n sock.close()\n\ndef _socket_for_host_port(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(10)\n sock.connect((host, port))\n sock.settimeout(None)\n return sock\n\n\ndef main():\n args = get_info()\n if args.dsn is None:\n print ('You must set your DSN in the environment variable `S2G_DSN` or the --dsn argument')\n sys.exit(1)\n else:\n print ('Using DSN: {}'.format(args.dsn))\n\n queries = sys.stdin.readlines()\n\n run(\n args.graphite_host,\n args.graphite_port,\n args.graphite_prefix,\n args.timestamped_metric,\n queries,\n get_executor(args.dsn),\n )\n\nif __name__ == '__main__':\n main()\n","sub_path":"sql_to_graphite/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578311948","text":"#!/usr/bin/env python3\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# Class holds default settings for json requests to Ghost -\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nimport ast\nimport os\nimport sys\n\nif sys.version_info[0] != 3:\n print(\"This script requires Python 3\")\n exit()\n\nimport requests\n\nimport jwt\nfrom datetime import datetime as date\nimport json\nimport subprocess\nfrom scp import SCPClient\nimport paramiko\nfrom GrafanaRequest import GrafanaRequest\nimport time\nfrom collections import Counter\n\n\nclass CSVReader:\n def read_csv(self,\n file,\n sep=','):\n df = open(file).read().split('\\n')\n rows = list()\n for x in df:\n if len(x) > 0:\n rows.append(x.split(sep))\n length = list(range(0, len(df[0])))\n columns = dict(zip(df[0], length))\n return rows\n\n def get_column(self,\n df,\n value):\n index = df[0].index(value)\n values = []\n for row in df[1:]:\n values.append(row[index])\n return values\n\n def get_columns(self, df, targets):\n target_index = []\n for item in targets:\n target_index.append(df[0].index(item))\n results = []\n for row in df:\n row_data = []\n for x in target_index:\n row_data.append(row[x])\n results.append(row_data)\n return results\n\n def to_html(self, df):\n html = ''\n html = html + ('')\n for row in df:\n for item in row:\n html = html + ('' % item)\n html = html + ('\\n')\n html = html + ('
%s
')\n return html\n\n def filter_df(self, df, column, expression, target):\n target_index = df[0].index(column)\n counter = 0\n targets = [0]\n for row in df:\n try:\n if expression == 'less than':\n if float(row[target_index]) <= target:\n targets.append(counter)\n counter += 1\n else:\n counter += 1\n if expression == 'greater than':\n if float(row[target_index]) >= target:\n targets.append(counter)\n counter += 1\n else:\n counter += 1\n except:\n counter += 1\n return list(map(df.__getitem__, targets))\n\n\nclass GhostRequest:\n def __init__(self,\n _ghost_json_host,\n _ghost_json_port,\n _api_token=None,\n _overwrite='false',\n debug_=False,\n die_on_error_=False):\n self.debug = debug_\n self.die_on_error = die_on_error_\n self.ghost_json_host = _ghost_json_host\n self.ghost_json_port = _ghost_json_port\n self.ghost_json_url = \"http://%s:%s/ghost/api/v3\" % (_ghost_json_host, _ghost_json_port)\n self.data = dict()\n self.data['overwrite'] = _overwrite\n self.ghost_json_login = self.ghost_json_url + '/admin/session/'\n self.api_token = _api_token\n self.images = list()\n self.pdfs = list()\n\n def encode_token(self):\n\n # Split the key into ID and SECRET\n key_id, secret = self.api_token.split(':')\n\n # Prepare header and payload\n iat = int(date.now().timestamp())\n\n header = {'alg': 'HS256', 'typ': 'JWT', 'kid': key_id}\n payload = {\n 'iat': iat,\n 'exp': iat + 5 * 60,\n 'aud': '/v3/admin/'\n }\n token = jwt.encode(payload, bytes.fromhex(secret), algorithm='HS256', headers=header)\n return token\n\n def create_post(self,\n title=None,\n text=None,\n tags=None,\n authors=None,\n status=\"published\"):\n ghost_json_url = self.ghost_json_url + '/admin/posts/?source=html'\n post = dict()\n posts = list()\n datastore = dict()\n datastore['html'] = text\n datastore['title'] = title\n datastore['status'] = status\n posts.append(datastore)\n post['posts'] = posts\n\n headers = dict()\n\n token = self.encode_token()\n headers['Authorization'] = 'Ghost {}'.format(token)\n response = requests.post(ghost_json_url, json=post, headers=headers)\n if self.debug:\n print(datastore)\n print(ghost_json_url)\n print('\\n')\n print(post)\n print('\\n')\n print(headers)\n print(response.headers)\n\n def upload_image(self,\n image):\n print(image)\n ghost_json_url = self.ghost_json_url + '/admin/images/upload/'\n\n token = self.encode_token()\n bashCommand = \"curl -X POST -F 'file=@%s' -H \\\"Authorization: Ghost %s\\\" %s\" % (image, token, ghost_json_url)\n\n proc = subprocess.Popen(bashCommand, shell=True, stdout=subprocess.PIPE)\n output = proc.stdout.read().decode('utf-8')\n print(output)\n self.images.append(json.loads(output)['images'][0]['url'])\n\n def upload_images(self,\n folder):\n for image in os.listdir(folder):\n if 'kpi' in image:\n if 'png' in image:\n self.upload_image(folder + '/' + image)\n print('images %s' % self.images)\n\n def custom_post(self,\n folder,\n authors,\n title='custom'):\n self.upload_images(folder)\n head = '''

This is a custom post created via a script

'''\n for picture in self.images:\n head = head + '' % picture\n head = head + '''

This is the end of the example

'''\n self.create_post(title=title,\n text=head,\n tags='custom',\n authors=authors)\n\n def wifi_capacity_to_ghost(self,\n authors,\n folders,\n title=None,\n server_pull=None,\n ghost_host=None,\n port='22',\n user_pull='lanforge',\n password_pull='lanforge',\n user_push=None,\n password_push=None,\n customer=None,\n testbed='Unknown Testbed',\n test_run=None,\n target_folders=list(),\n grafana_dashboard=None,\n grafana_token=None,\n grafana_host=None,\n grafana_port=3000):\n text = ''\n csvreader = CSVReader()\n if grafana_token is not None:\n grafana = GrafanaRequest(grafana_token,\n grafana_host,\n grafanajson_port=grafana_port\n )\n if test_run is None:\n test_run = sorted(folders)[0].split('/')[-1].strip('/')\n print(folders)\n for folder in folders:\n print(folder)\n ssh_pull = paramiko.SSHClient()\n ssh_pull.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)\n ssh_pull.connect(server_pull,\n port,\n username=user_pull,\n password=password_pull,\n allow_agent=False,\n look_for_keys=False)\n scp_pull = SCPClient(ssh_pull.get_transport())\n scp_pull.get(folder, recursive=True)\n target_folder = str(folder).rstrip('/').split('/')[-1]\n target_folders.append(target_folder)\n print('Target folder: %s' % target_folder)\n try:\n target_file = '%s/kpi.csv' % target_folder\n print('target file %s' % target_file)\n df = csvreader.read_csv(file=target_file, sep='\\t')\n csv_testbed = csvreader.get_column(df, 'test-rig')[0]\n pass_fail = Counter(csvreader.get_column(df, 'pass/fail'))\n if pass_fail['PASS'] + pass_fail['FAIL'] > 0:\n text = text + 'Tests passed: %s
' % pass_fail['PASS']\n text = text + 'Tests failed: %s
' % pass_fail['FAIL']\n text = text + 'Percentage of tests passed: %s
' % (\n pass_fail['PASS'] / (pass_fail['PASS'] + pass_fail['FAIL']))\n\n print(csv_testbed)\n except:\n pass\n if len(csv_testbed) > 2:\n testbed = csv_testbed\n text = text + 'Testbed: %s
' % testbed\n if testbed == 'Unknown Testbed':\n raise UserWarning('Please define your testbed')\n print('testbed %s' % testbed)\n\n ssh_push = paramiko.SSHClient()\n ssh_push.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)\n ssh_push.connect(ghost_host,\n port,\n username=user_push,\n password=password_push,\n allow_agent=False,\n look_for_keys=False)\n scp_push = SCPClient(ssh_push.get_transport())\n local_path = '/home/%s/%s/%s' % (user_push, customer, testbed)\n transport = paramiko.Transport((ghost_host, port))\n transport.connect(None, user_push, password_push)\n sftp = paramiko.sftp_client.SFTPClient.from_transport(transport)\n print('Local Path: %s' % local_path)\n try:\n sftp.mkdir(local_path)\n except:\n print('folder %s already exists' % local_path)\n scp_push.put(target_folder, recursive=True, remote_path=local_path)\n print(local_path + '/' + target_folder)\n files = sftp.listdir(local_path + '/' + target_folder)\n # print('Files: %s' % files)\n for file in files:\n if 'pdf' in file:\n url = 'http://%s/%s/%s/%s/%s/%s' % (\n ghost_host, customer.strip('/'), testbed, test_run, target_folder, file)\n text = text + 'PDF of results: %s
' % (url, file)\n print(url)\n scp_pull.close()\n scp_push.close()\n self.upload_images(target_folder)\n for image in self.images:\n if 'kpi-' in image:\n if '-print' not in image:\n text = text + '' % image\n self.images = []\n\n if grafana_token is not None:\n # get the details of the dashboard through the API, and set the end date to the youngest KPI\n grafana.list_dashboards()\n\n grafana.create_snapshot(title=grafana_dashboard)\n time.sleep(3)\n snapshot = grafana.list_snapshots()[-1]\n print(snapshot)\n text = text + '
' % (\n grafana_host, snapshot['key'], '%')\n\n results = csvreader.get_columns(df,['short-description','numeric-score','test details','test-priority'])\n\n low_priority = csvreader.to_html(csvreader.filter_df(results, 'test-priority', 'less than', 94))\n high_priority = csvreader.to_html(csvreader.filter_df(results, 'test-priority', 'greater than', 95))\n\n text = text + 'High priority results: %s' % high_priority\n\n text = text + 'Low priority results: %s' % low_priority\n\n now = date.now()\n\n if title is None:\n title = \"%s %s %s %s:%s report\" % (now.day, now.month, now.year, now.hour, now.minute)\n\n # create Grafana Dashboard\n target_files = []\n for folder in folders:\n print(folder)\n target_files.append(folder.split('/')[-1] + '/kpi.csv')\n grafana.create_custom_dashboard(target_csvs=target_files,\n title=title)\n\n self.create_post(title=title,\n text=text,\n tags='custom',\n authors=authors)\n","sub_path":"py-dashboard/GhostRequest.py","file_name":"GhostRequest.py","file_ext":"py","file_size_in_byte":12801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"558051419","text":"import os\nimport cv2\nimport util\nimport math\nimport queue\nimport random\nimport numpy as np\nfrom tkinter import *\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nimport matplotlib.pyplot as plt\n\nclass Painter:\n def __init__(self, use_model, rgb, reconstruct_iter, update_freq, input_size, show_area, model):\n self.USE_MODEL = use_model\n self.RGB = rgb\n self.RECONSTRUCT_ITER = reconstruct_iter\n self.update_freq = update_freq\n self.INPUT_SIZE = input_size\n self.SHOW_AREA = show_area\n self.model = model\n PADDINGS = [2, 4, 8, 16, 32]\n DRAWING_AREA = (512, 512, 3)\n\n if self.RGB:\n TOOLBOX_AREA = (DRAWING_AREA[0]+self.SHOW_AREA[0]+256+26, 32)\n else:\n TOOLBOX_AREA = (DRAWING_AREA[0]+self.SHOW_AREA[0]+26, 32)\n\n self.WINDOW_AREA = (TOOLBOX_AREA[0], TOOLBOX_AREA[1]+DRAWING_AREA[1]+PADDINGS[2]*2)\n self.save_dir = 'save/'\n\n # main window\n self.root = Tk()\n self.root.title('Doodle Master')\n self.root.geometry(str(self.WINDOW_AREA[0])+'x'+str(self.WINDOW_AREA[1]))\n self.root.resizable(width=False, height=False)\n self.root.columnconfigure(1, weight=1)\n self.root.rowconfigure(1, weight=1)\n\n # drawing area\n self.drawing_panel = Label(self.root, width=DRAWING_AREA[0], height=DRAWING_AREA[1], borderwidth=2, relief='groove')\n self.drawing_panel.place(x=PADDINGS[1]+PADDINGS[2]+self.SHOW_AREA[0], y=TOOLBOX_AREA[1]+PADDINGS[2]+PADDINGS[1])\n\n # show area\n self.show_panel_1 = Label(self.root, width=self.SHOW_AREA[0], height=self.SHOW_AREA[1], borderwidth=2, relief='groove')\n self.show_panel_1.place(x=PADDINGS[1], y=TOOLBOX_AREA[1]+PADDINGS[2]+PADDINGS[1])\n\n # palette\n if self.RGB:\n self.palette_panel = Label(self.root, width=256, height=256, borderwidth=2, relief='groove')\n self.palette_panel.place(x=PADDINGS[1]+PADDINGS[2]*2+self.SHOW_AREA[0]+DRAWING_AREA[0], y=TOOLBOX_AREA[1]+PADDINGS[2]+PADDINGS[1])\n\n self.palette_square = Label(self.root, width=128, height=128, borderwidth=0, relief='groove')\n self.palette_square.place(x=PADDINGS[1]+PADDINGS[2]*2+self.SHOW_AREA[0]+DRAWING_AREA[0]+65, y=TOOLBOX_AREA[1]+PADDINGS[2]+PADDINGS[1]+65)\n\n ############################################################\n # Tool Box #\n ############################################################\n self.stencils = {'pen': 0, 'eraser':1, 'paint_bucket':2}\n\n self.minus_image = PhotoImage(file='resource/minus.png')\n self.minus_button = Button(self.root, image=self.minus_image, width=32, height=32, command=self.minus_button_click)\n self.minus_button.place(x=PADDINGS[1], y=PADDINGS[1])\n\n self.plus_image = PhotoImage(file='resource/plus.png')\n self.plus_button = Button(self.root, image=self.plus_image, width=32, height=32, command=self.plus_button_click)\n self.plus_button.place(x=PADDINGS[1]+PADDINGS[2]*2+64, y=PADDINGS[1])\n\n self.iter_label_text = StringVar()\n self.iter_label_text.set('3')\n self.iter_label = Label(self.root, textvariable=self.iter_label_text, font=('Arial', 20))\n self.iter_label.place(x=PADDINGS[1]+PADDINGS[2]+32+10, y=PADDINGS[1])\n\n self.pen_image = PhotoImage(file='resource/pen.png')\n self.pen_button = Button(self.root, width=32, height=32, image=self.pen_image, command=self.pen_button_cllick)\n self.pen_button.place(x=PADDINGS[1]+PADDINGS[2]+self.SHOW_AREA[0], y=PADDINGS[1])\n self.pen_button.configure(background='#CCCCCC')\n\n self.eraser_image = PhotoImage(file='resource/eraser.png')\n self.eraser_button = Button(self.root, image=self.eraser_image, width=32, height=32, command=self.eraser_button_click)\n self.eraser_button.place(x=PADDINGS[1]+PADDINGS[2]*2+self.SHOW_AREA[0]+32, y=PADDINGS[1])\n\n self.paint_bucket_button_image = PhotoImage(file='resource/PaintBucket.png')\n self.paint_bucket_button = Button(self.root, image=self.paint_bucket_button_image, width=32, height=32, command=self.paint_bucket_button_click)\n self.paint_bucket_button.place(x=PADDINGS[1]+PADDINGS[2]*3+self.SHOW_AREA[0]+32*2, y=PADDINGS[1])\n\n self.load_button_image = PhotoImage(file='resource/load.png')\n self.load_button = Button(self.root, image=self.load_button_image, width=32, height=32, command=self.load_button_click)\n self.load_button.place(x=PADDINGS[1]+PADDINGS[2]*4+self.SHOW_AREA[0]+32*3, y=PADDINGS[1])\n\n self.cat_button_image = PhotoImage(file='resource/cat.png')\n self.cat_button = Button(self.root, image=self.cat_button_image, width=32, height=32, command=self.cat_button_click)\n self.cat_button.place(x=PADDINGS[1]+PADDINGS[2]*5+self.SHOW_AREA[0]+32*4, y=PADDINGS[1])\n\n self.stencil_buttons = [self.pen_button, self.eraser_button, self.paint_bucket_button]\n ############################################################\n\n ###################### Drawing ######################\n self.img = np.ones((512, 512, 3), np.uint8) * 255\n self.img_re = np.ones(self.SHOW_AREA, np.uint8) * 255\n self.palette_img = np.ones((256, 256, 3)) * 240\n self.palette_square_image = np.zeros((128, 128, 3))\n self.palette_weighting_1 = np.zeros((128, 128, 3))\n self.palette_weighting_2 = np.zeros((128, 128, 3))\n self.stencil_size = np.ones(len(self.stencils), np.int) * 5\n self.stencil_id = 0\n self.mouse_x = self.mouse_y = self.pre_x = self.pre_y = 0\n self.palette_circle_x, self.palette_circle_y, self.palette_square_x, self.palette_square_y = 195, 33, 0, 127\n self.palette_pressing = False\n self.redo_stack = []\n\n ### Button events ###\n def minus_button_click(self):\n self.RECONSTRUCT_ITER -= 1\n self.RECONSTRUCT_ITER = max(self.RECONSTRUCT_ITER, 1)\n self.iter_label_text.set(str(self.RECONSTRUCT_ITER))\n\n def plus_button_click(self):\n self.RECONSTRUCT_ITER += 1\n # RECONSTRUCT_ITER = min(RECONSTRUCT_ITER, 10)\n self.iter_label_text.set(str(self.RECONSTRUCT_ITER))\n\n def highlight_button(self, id):\n for button in self.stencil_buttons:\n button.configure(background='white')\n self.stencil_buttons[id].configure(background='#CCCCCC')\n\n def pen_button_cllick(self):\n self.stencil_id = self.stencils['pen']\n self.highlight_button(self.stencil_id)\n\n def eraser_button_click(self):\n self.stencil_id = self.stencils['eraser']\n self.highlight_button(self.stencil_id)\n\n def paint_bucket_button_click(self):\n self.stencil_id = self.stencils['paint_bucket']\n self.highlight_button(self.stencil_id)\n\n def load_button_click(self):\n self.img = np.copy(np.array(Image.open('save/' + os.listdir('save/')[0]).resize((DRAWING_AREA[0], DRAWING_AREA[1]), Image.ANTIALIAS))[:,:,0:3])\n\n def cat_button_click(self):\n self.img = np.copy(np.array(Image.open('resource/big_cat.png'))[:,:,0:3])\n\n #########################################\n\n def insert_stack(self ,img):\n self.redo_stack.insert(0,np.copy(self.img))\n if(len(self.redo_stack) > 10):\n self.redo_stack.pop()\n\n def redo(self):\n if(len(self.redo_stack)>0):\n print(\"Redo\")\n self.img = np.copy(self.redo_stack[0])\n self.redo_stack.pop(0)\n else:\n print(\"Can't Redo\")\n\n def reImage(self ,img):\n if not self.RGB:\n return np.array(Image.fromarray(self.img).resize(self.INPUT_SIZE, Image.ANTIALIAS))[:,:,0] / 255.\n return np.array(Image.fromarray(self.img).resize(self.INPUT_SIZE, Image.ANTIALIAS))/ 255.\n\n def search_color(self, color, circle_color):\n for i in range(-120, 120):\n for j in range(-120, 120):\n if abs(i) <= 66 and abs(j) <= 66:\n continue\n if np.linalg.norm((i, j)) < 116 and np.linalg.norm((i, j)) > 114 and np.linalg.norm(np.array(circle_color-self.palette_img[i+127][j+127])) < 2:\n # print(palette_img[i+127][j+127])\n self.palette_circle_x = int(j/np.linalg.norm((i, j))*115+127)\n self.palette_circle_y = int(i/np.linalg.norm((i, j))*115+127)\n update_palette_square()\n for n in range(128):\n for m in range(128):\n if np.linalg.norm(np.array(self.palette_square_image[n][m]- color)) < 1:\n self.palette_square_x = m\n self.palette_square_y = n\n self.update_palette()\n self.update_palette_square()\n return\n \n\n def get_color(self, color):\n # print(color)\n tmp = color + 255 - max(color)\n # print(tmp)\n if min(tmp) == 255:\n self.search_color(color, (255, 255, 0))\n return\n # print('!')\n if tmp[0] == 255:\n if tmp[1] > tmp[2]:\n tmp[1] -= (255-tmp[1])*(tmp[2]/(255-tmp[2]))\n self.search_color(color, (tmp[0], tmp[1], 0))\n else:\n tmp[2] -= (255-tmp[2])*(tmp[1]/(255-tmp[1]))\n self.search_color(color, (tmp[0], 0, tmp[2]))\n elif tmp[1] == 255:\n if tmp[0] > tmp[2]:\n tmp[0] -= (255-tmp[0])*(tmp[2]/(255-tmp[2]))\n self.search_color(color, (tmp[0], tmp[1], 0))\n else:\n tmp[2] -= (255-tmp[2])*(tmp[0]/(255-tmp[0]))\n self.search_color(color, (0, tmp[1], tmp[2]))\n else:\n if tmp[0] > tmp[1]:\n tmp[0] -= (255-tmp[0])*(tmp[1]/(255-tmp[1]))\n self.search_color(color, (tmp[0], 0, tmp[2]))\n else:\n tmp[1] -= (255-tmp[1])*(tmp[0]/(255-tmp[0]))\n self.search_color(color, (0, tmp[1], tmp[2]))\n # print(tmp)\n\n def Filtering(self, img, k=5):\n blur = cv2.GaussianBlur(img,(k,k),0)\n return blur\n\n def fill_area(self, p, color):\n p = (p[1], p[0])\n m = np.zeros((img.shape[0], img.shape[1]))\n c = np.copy(img[p[0]][p[1]])\n s = set([])\n s.add(p)\n dir = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n cnt = 0\n while len(s) != 0:\n cnt += 1\n cur = s.pop()\n self.img[cur[0]][cur[1]] = np.copy(color)\n for d in dir:\n if cur[0]+d[0] >= 0 and cur[0]+d[0] < DRAWING_AREA[0] and cur[1]+d[1] >= 0 and cur[1]+d[1] < DRAWING_AREA[1] and m[cur[0]+d[0]][cur[1]+d[1]] == 0 and np.array_equal(img[cur[0]+d[0]][cur[1]+d[1]], c):\n s.add((cur[0]+d[0], cur[1]+d[1]))\n m[cur[0]+d[0]][cur[1]+d[1]] = 1\n print(cnt)\n\n def draw(self, p1, p2):\n if self.stencil_id == self.stencils['eraser']:\n cv2.line(self.img,p1,p2,(255, 255, 255),self.stencil_size[self.stencil_id])\n elif self.stencil_id == self.stencils['pen']:\n if self.RGB:\n cv2.line(self.img,p1,p2,tuple(self.palette_square_image[self.palette_square_y][self.palette_square_x]),self.stencil_size[self.stencil_id])\n else:\n cv2.line(self.img,p1,p2,(0, 0, 0),self.stencil_size[self.stencil_id])\n elif self.stencil_id == self.stencils['paint_bucket']:\n self.fill_area(p2, self.palette_square_image[self.palette_square_y][self.palette_square_x])\n\n def update_drawing_panel(self):\n img_out = np.copy(self.img)\n cv2.circle(img_out,(self.mouse_x,self.mouse_y),int(self.stencil_size[self.stencil_id]/2+1),(0,0,0),1)\n cv2.circle(img_out,(self.mouse_x,self.mouse_y),int(self.stencil_size[self.stencil_id]/2),(255,255,255),1)\n photo = ImageTk.PhotoImage(image=Image.fromarray(img_out))\n self.drawing_panel.configure(image=photo)\n self.drawing_panel.image = photo\n\n def update_showing_panel(self):\n photo = ImageTk.PhotoImage(image=Image.fromarray(self.img_re))\n self.show_panel_1.configure(image=photo)\n self.show_panel_1.image = photo\n\n def get_circle_color(self, x, y):\n theta = math.atan2(x, y)\n color = np.zeros(3)\n k = math.pi/3\n if theta >= k*2:\n color[2] = 1\n color[1] = (k*3-theta)/k\n elif theta >= k:\n color[1] = 1\n color[2] = (theta-k)/k\n elif theta >= 0:\n color[1] = 1\n color[0] = (k-theta)/k\n elif theta >= -k:\n color[0] = 1\n color[1] = (theta+k)/k\n elif theta >= -2*k:\n color[0] = 1\n color[2] = (-k-theta)/k\n else:\n color[2] = 1\n color[0] = (theta+3*k)/k\n return color*255\n\n def change_stencil_size(self, n):\n if self.stencil_id == self.stencils['paint_bucket']:\n return\n\n self.stencil_size[self.stencil_id] += n\n self.stencil_size[self.stencil_id] = max(2, self.stencil_size[self.stencil_id])\n\n def create_palette(self):\n for i in range(128):\n for j in range(128):\n self.palette_weighting_1[i][j] = (127-j)/127\n self.palette_weighting_2[i][j] = (i)/127*255\n\n for i in range(-127, 128):\n for j in range(-127, 128):\n if (np.linalg.norm((i, j)) > 125 and np.linalg.norm((i, j)) < 126.5) or (np.linalg.norm((i, j)) > 104 and np.linalg.norm((i, j)) < 105.5):\n self.palette_img[i+127][j+127] = 255\n elif np.linalg.norm((i, j)) < 125 and np.linalg.norm((i, j)) > 105.5:\n self.palette_img[i+127][j+127] = self.get_circle_color(i, j)\n elif (np.linalg.norm((i, j)) > 126.5 and np.linalg.norm((i, j)) < 128) or (np.linalg.norm((i, j)) > 102.5 and np.linalg.norm((i, j)) < 104):\n self.palette_img[i+127][j+127] = 200\n\n if (i== -66 or i == 66) and (j >= -66 and j <= 66):\n self.palette_img[i+127][j+127] = 150\n if (j== -66 or j == 66) and (i >= -66 and i <= 66):\n self.palette_img[i+127][j+127] = 150\n\n def update_palette(self):\n img_out = np.copy(self.palette_img).astype(np.uint8)\n cv2.circle(img_out,(self.palette_circle_x,self.palette_circle_y),4,(0,0,0),2)\n cv2.circle(img_out,(self.palette_square_x+63,self.palette_square_y+65),4,(0,0,0),2)\n photo = ImageTk.PhotoImage(image=Image.fromarray(img_out))\n self.palette_panel.configure(image=photo)\n self.palette_panel.image = photo\n\n def update_palette_square(self):\n self.palette_square_image = (-self.palette_img[self.palette_circle_y][self.palette_circle_x]+255)*self.palette_weighting_1 + self.palette_img[self.palette_circle_y][self.palette_circle_x]\n self.palette_square_image[self.palette_square_image>255] = 255\n self.palette_square_image -= self.palette_weighting_2\n self.palette_square_image[self.palette_square_image<0] = 0\n\n img_out = np.copy(self.palette_square_image).astype(np.uint8)\n if max(self.palette_square_image[self.palette_square_y][self.palette_square_x]) > 128:\n cv2.circle(img_out,(self.palette_square_x,self.palette_square_y),4,(0,0,0),2)\n else:\n cv2.circle(img_out,(self.palette_square_x,self.palette_square_y),4,(256,256,256),2)\n photo = ImageTk.PhotoImage(image=Image.fromarray(img_out))\n self.palette_square.configure(image=photo)\n self.palette_square.image = photo\n\n def save_result(self, iter=5):\n print('Save...')\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n\n img_out = self.reImage(self.img)\n for i in range(iter+1):\n if not self.RGB:\n plt.imsave(self.save_dir + str(i) + '.png',img_out.reshape(img_out.shape[0], img_out.shape[1]),cmap='Greys_r')\n else:\n plt.imsave(self.save_dir + str(i) + '.png',img_out)\n img_out = self.model.AutoDraw(img_out)\n\n def set_root_events(self):\n def key(event):\n if event.char == 's' or event.char == 'S':\n self.save_result()\n elif event.char == 'c' or event.char == 'C':\n self.insert_stack(self.img)\n self.img = np.ones((512, 512, 3), np.uint8) * 255\n elif event.char == 'p' or event.char == 'P':\n self.pen_button_cllick()\n elif event.char == 'e' or event.char == 'E':\n self.eraser_button_click()\n elif event.char == '1':\n self.change_stencil_size(-2)\n elif event.char == '2':\n self.change_stencil_size(2)\n elif event.char == 'r' or event.char == 'R':\n self.redo()\n\n self.root.bind('', key)\n\n def set_drawing_panel_events(self):\n def left_button_click(event):\n self.insert_stack(self.img)\n self.pre_x = event.x\n self.pre_y = event.y\n mouse_motion(event)\n if self.stencil_id == self.stencils['paint_bucket']:\n self.draw((self.pre_x, self.pre_y), (event.x, event.y))\n\n\n def right_button_click(event):\n self.get_color(self.img[event.y][event.x])\n\n def left_button_move(event):\n if self.stencil_id != self.stencils['paint_bucket']:\n self.draw((self.pre_x, self.pre_y), (event.x, event.y)) \n self.pre_x = event.x\n self.pre_y = event.y\n mouse_motion(event)\n\n def wheel(event):\n if event.delta > 0:\n self.change_stencil_size(2)\n else:\n self.change_stencil_size(-2)\n\n def mouse_motion(event):\n self.mouse_x, self.mouse_y = event.x, event.y\n\n self.drawing_panel.bind('', left_button_click)\n self.drawing_panel.bind('', right_button_click)\n self.drawing_panel.bind('', left_button_move)\n self.drawing_panel.bind('', wheel)\n self.drawing_panel.bind('', mouse_motion)\n\n def set_palette_event(self):\n def update_palate_position(event):\n if np.linalg.norm((event.x-127, event.y-127)) < 125 and np.linalg.norm((event.x-127, event.y-127)) > 105.5:\n self.palette_pressing = True\n\n if not self.palette_pressing:\n return\n\n self.palette_circle_x, self.palette_circle_y = event.x, event.y\n t = np.linalg.norm((self.palette_circle_x-127, self.palette_circle_y-127))\n self.palette_circle_x = int((self.palette_circle_x-127) / t * 115) + 127\n self.palette_circle_y = int((self.palette_circle_y-127) / t * 115) + 127\n self.update_palette()\n self.update_palette_square()\n \n\n def left_button_click(event):\n update_palate_position(event)\n\n def left_button_release(event):\n self.palette_pressing = False\n \n def left_button_move(event):\n update_palate_position(event)\n self.palette_panel.bind('', left_button_click)\n self.palette_panel.bind('', left_button_move)\n self.palette_panel.bind('', left_button_release)\n\n def set_palette_square_event(self):\n def update_palate_position(event):\n self.palette_square_x, self.palette_square_y = min(max(event.x,0),127), min(max(event.y,0),127)\n self.update_palette_square()\n self.update_palette()\n \n def left_button_click(event):\n update_palate_position(event)\n\n \n def left_button_move(event):\n update_palate_position(event)\n\n self.palette_square.bind('', left_button_click)\n self.palette_square.bind('', left_button_move)\n\n def main(self):\n self.create_palette()\n self.update_palette()\n self.update_palette_square()\n self.set_root_events()\n self.set_palette_event()\n self.set_drawing_panel_events()\n self.set_palette_square_event()\n cnt = 0\n while True:\n if cnt > self.update_freq:\n if self.USE_MODEL:\n self.img_re = self.reImage(self.img)\n self.img_re = self.model.AutoDraw(self.img_re, iter=self.RECONSTRUCT_ITER)*255\n if self.RGB:\n self.img_re = np.reshape(self.img_re, (self.INPUT_SIZE[0],self.INPUT_SIZE[1],3)).astype(np.uint8)\n self.img_re = np.array(Image.fromarray(self.img_re).resize((self.SHOW_AREA[0], self.SHOW_AREA[1]), Image.ANTIALIAS))\n else:\n self.img_re = np.reshape(self.img_re, self.INPUT_SIZE).astype(np.uint8)\n self.img_re = np.array(Image.fromarray(self.img_re, 'L').resize((self.SHOW_AREA[0], self.SHOW_AREA[1]), Image.ANTIALIAS))\n cnt = 0\n cnt += 1\n\n self.update_drawing_panel()\n self.update_showing_panel()\n self.root.update()\n self.root.mainloop()","sub_path":"Painter.py","file_name":"Painter.py","file_ext":"py","file_size_in_byte":21436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121760031","text":"from matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.rcParams['text.usetex'] = True\r\n\r\n# plt.style.use('seaborn')\r\n\r\n\r\n# creating arrays of numbers\r\n# this is for a zoomed in section of the mandelbrot set.\r\nxstart = -.25\r\nxstop = -.19\r\nystart = .78\r\nystop = .84\r\ncount = 10000\r\nxpoints = np.linspace(xstart, xstop, count)\r\nypoints = np.linspace(ystart, ystop, count)\r\n\r\nxx, yy = np.meshgrid(xpoints, ypoints)\r\n# xx_initial = xx\r\n# yy_initial = yy\r\ncomplex_points_original = xx + yy * 1j\r\nin_set = np.abs(complex_points_original) < 2\r\n# print(in_set)\r\ncomplex_points_squared = complex_points_original[in_set]\r\ncomplex_points_original = complex_points_original[in_set]\r\n# print(xx)\r\n# print(yy)\r\n# print(complex_points)\r\n\r\n\r\n\r\n# perform the iteration step of the mandelbrot set\r\ntotal_iterations = 50\r\nfor iter in range(total_iterations):\r\n print(iter)\r\n complex_points_squared = np.square(complex_points_squared) + complex_points_original\r\n in_set = np.abs(complex_points_squared) < 2\r\n complex_points_squared = complex_points_squared[in_set]\r\n complex_points_original = complex_points_original[in_set]\r\n # xx = np.square(xx) - np.square(yy)\r\n # yy = 2*xx*yy\r\n\r\n\r\nin_set = np.abs(complex_points_squared) < 2\r\n# print(complex_points[in_set])\r\n# print(complex_points[np.logical_not(in_set)])\r\n\r\nxx_inset = complex_points_original[in_set].real\r\nyy_inset = complex_points_original[in_set].imag\r\n\r\n# xx_notinset = complex_points_original[np.logical_not(in_set)].real\r\n# yy_notinset = complex_points_original[np.logical_not(in_set)].imag\r\n\r\n\r\n\r\nfig, ax = plt.subplots()\r\n\r\n\r\nax.scatter(xx_inset, yy_inset, c = \"black\", s = .01, marker = \".\", linewidths = 0)\r\n\r\n\r\n\r\n# ax.legend()\r\naxis_fontsize = 10\r\nax.set_title(r\"Mandelbrot Set: $z_{n+1} = z_{n}^2 + c$\", fontsize = axis_fontsize + 2)\r\nax.set_xlabel(\"Real Axis\", fontsize = axis_fontsize)\r\nax.set_ylabel(\"Imaginary Axis\", fontsize = axis_fontsize)\r\n\r\n\r\naxis_label_size = 10\r\nax.tick_params(axis='x', labelsize = axis_label_size)\r\nax.tick_params(axis='y', labelsize = axis_label_size)\r\n\r\n# set background color to light blue - using hex color code\r\nax.set_facecolor('#EBF5FB')\r\nax.margins(0)\r\n\r\n# set ratio of unit on x and y axis to 1\r\nax.set_aspect(1)\r\n\r\n# fig.set_dpi(1200)\r\n\r\n# ax.grid(True)\r\n\r\nplt.tight_layout()\r\n\r\n# uncomment the next line to save the figure in high resolution\r\n# plt.savefig(\"mandelbrot_section3.png\", dpi = 1200)\r\n\r\nplt.show()","sub_path":"Python/mandelbrot_section.py","file_name":"mandelbrot_section.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101168277","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2022 Accenture Technology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\n\nfrom mercury.platform import Platform\nfrom mercury.system.object_stream import ObjectStreamIO, ObjectStreamWriter, ObjectStreamReader\n\n\ndef main():\n platform = Platform()\n log = platform.get_logger()\n # connect to the network\n platform.connect_to_cloud()\n # wait until connected\n while not platform.cloud_ready():\n try:\n time.sleep(0.1)\n except KeyboardInterrupt:\n # this allows us to stop the application while waiting for cloud connection\n platform.stop()\n return\n\n #\n # You can create a new I/O stream using ObjectStreamIO.\n # This requires a live connection to the language connector.\n #\n stream = ObjectStreamIO(10)\n\n in_stream_id = stream.get_input_stream()\n out_stream_id = stream.get_output_stream()\n\n output_stream = ObjectStreamWriter(out_stream_id)\n input_stream = ObjectStreamReader(in_stream_id)\n\n for i in range(100):\n output_stream.write(f'hello world {i}')\n\n #\n # if output stream is not closed, input will expire\n # Therefore, please use try-except for TimeoutError in the iterator for-loop below.\n #\n output_stream.close()\n\n for block in input_stream.read(5.0):\n if block is None:\n log.info(\"EOF\")\n else:\n log.info(block)\n\n input_stream.close()\n #\n # Stop platform after demo\n #\n platform.stop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/stream-demo.py","file_name":"stream-demo.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"561436185","text":"# -*- coding: UTF-8 -*-\nfrom django.shortcuts import render_to_response,HttpResponse,Http404\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom cart.models import Cart_goods, Cart_customer\nfrom shop.models import Goods_no, Goods_shoe, Shop\nfrom django.views.decorators.http import require_POST\nfrom accounts.decorators import login_needed,active_required\nfrom cart.util import checkSameShop\nimport json\n\n@ensure_csrf_cookie\n@login_needed\n@active_required\ndef index(request):\n uid = request.session.get('uid', None)\n assert uid\n try:\n cart = Cart_customer.objects.get(uid = uid)\n except Cart_customer.DoesNotExist:\n print(\" Cart_customer.DoesNotExist\")\n cart = Cart_customer(uid=uid)\n cart.save()\n try:\n goods_list = Cart_goods.objects.filter(cart_id = cart.id,goods_cart_type = 0)\n except Cart_goods.DoesNotExist:\n print(\" Cart_goods.DoesNotExist\")\n return render_to_response('cart/cart.html',{'step':1,'shop_info':[]},context_instance=RequestContext(request))\n shop_info = []\n # shop_goods_info = []\n for goods in goods_list:\n goods_tmp = {}\n goods_tmp['cart_goods_id'] = goods.cart_goods_id\n goods_tmp['goods_num'] = goods.goods_num\n goods_no = Goods_no.objects.get(goods_no = goods.goods_no)\n goods_tmp['goods_color'] = goods_no.goods_color\n goods_tmp['goods_size'] = goods_no.goods_size\n goods_tmp['goods_unit_price'] = goods_no.goods_price\n goods_shoe = Goods_shoe.objects.get(goods_id = goods_no.goods_id)\n goods_tmp['goods_title'] = goods_shoe.goods_title\n goods_tmp['goods_pic'] = goods_shoe.goods_pic_url\n shop_id = goods_shoe.shop_id\n goods_tmp['original_price'] = goods_shoe.goods_price\n goods_tmp['price'] = int(goods_tmp['goods_unit_price']) * int(goods_tmp['goods_num'])\n shop_goods_info = checkSameShop(shop_info,shop_id)\n if shop_goods_info == None:\n shop = Shop.objects.get(shop_id = goods_shoe.shop_id)\n shop_info.append({'shop_id':shop.shop_id ,'shop_name': shop.shop_name,'goods':[goods_tmp]})\n else:\n shop_goods_info['goods'].append(goods_tmp)\n\n # print(shop_info)\n\n return render_to_response('cart/cart.html',{'step':1,'shop_info':shop_info},context_instance=RequestContext(request))\n\n\n@require_POST\ndef add_goods_to_cart(request):\n goods_no = request.POST.get('goods_no', '')\n goods_num = request.POST.get('goods_num', '')\n try:\n goods_no = int(goods_no)\n goods_num = int(goods_num)\n except ValueError:\n raise Http404\n\n # goods_no=10003\n # goods_num=3\n goods_cart_type=0\n uid = request.session.get('uid', None)\n if not uid:\n result={'status':'NEED_LOGIN'}\n return HttpResponse(json.dumps(result,ensure_ascii = False))\n # uid=10\n\n cart=Cart_customer.objects.filter(uid=uid)\n if cart and len(cart) == 1:\n cart_id=cart[0].id\n else:\n new_cart=Cart_customer(uid=uid)\n new_cart.save()\n cart_id=new_cart.id\n goods=Cart_goods.objects.filter(cart_id=cart_id,goods_no=goods_no,goods_cart_type=goods_cart_type)\n if goods:\n goods[0].goods_num=goods[0].goods_num+goods_num\n goods[0].save()\n else:\n new_goods=Cart_goods(cart_id=cart_id,goods_no=goods_no,goods_num=goods_num,goods_cart_type=goods_cart_type)\n new_goods.save()\n result={'status':'SUCCESS'}\n return HttpResponse(json.dumps(result,ensure_ascii = False))\n\n\n@require_POST\ndef setGoodsNum(request):\n cart_goods_id = request.POST.get('cart_goods_id', '')\n goods_num = request.POST.get('goods_num', '')\n try:\n cart_goods_id = int(cart_goods_id)\n goods_num =int(goods_num)\n except ValueError:\n raise Http404\n # uid=10\n uid = request.session.get('uid', None)\n if not uid:\n result={'status':'NEED_LOGIN'}\n return HttpResponse(json.dumps(result,ensure_ascii = False))\n\n cart=Cart_customer.objects.filter(uid=uid)\n if cart:\n cart_id=cart[0].id\n else:\n print(\"没有对应购物车\")\n return HttpResponse(json.dumps({'status':'NO_SUCH_GOODS'},ensure_ascii = False))\n\n goods=Cart_goods.objects.get(cart_id=cart_id,cart_goods_id = cart_goods_id,goods_cart_type=0)\n if goods:\n goods.goods_num = goods_num;\n goods.save()\n else:\n pass\n return HttpResponse(json.dumps({'status':'SUCCESS'},ensure_ascii = False))\n\n\n@require_POST\ndef delete_goods(request):\n cart_goods_id = request.POST.get('cart_goods_id', '')\n try:\n cart_goods_id = int(cart_goods_id)\n except ValueError:\n raise Http404\n # uid=10\n uid = request.session.get('uid', None)\n if not uid:\n result={'status':'NEED_LOGIN'}\n return HttpResponse(json.dumps(result,ensure_ascii = False))\n\n cart=Cart_customer.objects.filter(uid=uid)\n if cart:\n cart_id=cart[0].id\n else:\n print(\"没有对应购物车\")\n return HttpResponse(json.dumps({'status':'NO_SUCH_GOODS'},ensure_ascii = False))\n\n goods=Cart_goods.objects.get(cart_id=cart_id,cart_goods_id = cart_goods_id,goods_cart_type=0)\n if goods:\n goods.goods_cart_type=2;\n goods.save()\n else:\n pass\n return HttpResponse(json.dumps({'status':'SUCCESS'},ensure_ascii = False))","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"599236545","text":"import sys\ndir_str = \"../../ml-scratch/utils\"\nif (dir_str not in sys.path):\n sys.path.append(dir_str)\n\nimport numpy as np\nfrom collections import OrderedDict\nfrom get_mini_batch import GetMiniBatch\nfrom change_shape import get_output_size\nfrom optimizer_2 import SGD, Momentum, Nesterov, AdaGrad, RMSprop, Adam\nfrom activator_2 import Relu, Softmax\nfrom layer import Conv1D, MaxPooling1D, Flatten, Affine\n\nclass Scratch1dCNNClassifier():\n def __init__(\n self, conv_param={'n_filters': 30, 'filter_size': 3, 'stride': 1, 'pad': 0},\n pool_param={'pool_size': 2},\n n_epochs=5, batch_size=100, optimizer='Adam',\n optimizer_param={'lr': 0.001},\n layer_nodes = {'hidden': 100, 'output': 10},\n weight_init_std=0.01,\n verbose=True\n ):\n self.conv_param = conv_param\n self.pool_param = pool_param\n self.layer_nodes = layer_nodes\n self.weight_init_std = weight_init_std\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.verbose = verbose\n\n optimizer_class_dict = {'sgd': SGD, 'momentum': Momentum, 'nesterov': Nesterov,\n 'adagrad': AdaGrad, 'rmsprop': RMSprop, 'adam': Adam}\n #**kwargsでdictで引数をまとめて受け取っている\n self.optimizer = optimizer_class_dict[optimizer.lower()](**optimizer_param)\n\n self.train_loss_list =[]\n self.train_acc_list = []\n self.val_loss_list = []\n self.val_acc_list = []\n\n def fit(self, x_train, y_train, x_val=None, y_val=None):\n self.x_train = x_train\n self.y_train = y_train\n self.x_val = x_val\n self.y_val = y_val\n #layerを生成\n self._gen_layers()\n #epoch数だけ学習\n for epoch in range(self.n_epochs):\n self._train()\n print(\"epoch: \" + str(epoch))\n #verbose=Trueなら学習中のlossなど計算して表示\n if (self.verbose):\n self._calc_loss_acc()\n print(\"train_acc: \" + str(self.train_acc_list[epoch]) + \", val_acc\" + str(self.val_acc_list[epoch]))\n print(\"train loss: \" + str(self.train_loss_list[epoch]) + \", val_loss\" + str(self.val_loss_list[epoch]) )\n return self.train_loss_list, self.train_loss_list\n\n def predict(self, x):\n proba = self._propagate_forward(x)\n return np.argmax(proba, axis=1)\n\n def _gen_layers(self):\n \"\"\"\n x_train: ndarray of shape(n_samples, n_channels, n_features)\n \"\"\"\n self.n_train_samples, n_channels, n_features = self.x_train.shape\n n_filters = self.conv_param['n_filters']\n filter_size = self.conv_param['filter_size']\n filter_stride = self.conv_param['stride']\n filter_pad = self.conv_param['pad']\n pool_size = self.pool_param['pool_size']\n\n conv_output_size = get_output_size(n_features, filter_size, filter_stride, filter_pad)\n pool_output_size = int(n_filters * conv_output_size/ pool_size)\n\n #initialize hyper parameters\n self.params ={}\n self.params['W1'] = self.weight_init_std * np.random.randn(n_filters, n_channels, filter_size)\n self.params['b1'] = np.zeros(n_filters)\n self.params['W2'] = self.weight_init_std * np.random.randn(pool_output_size, self.layer_nodes['hidden'])\n self.params['b2'] = np.zeros(self.layer_nodes['hidden'])\n self.params['W3'] = self.weight_init_std * np.random.randn(self.layer_nodes['hidden'], self.layer_nodes['output'])\n self.params['b3'] = np.zeros(self.layer_nodes['output'])\n\n #generate layers\n self.layers = OrderedDict()\n self.layers['Conv1'] = Conv1D(self.params['W1'], self.params['b1'], filter_stride, filter_pad)\n self.layers['Relu1'] = Relu()\n self.layers['Pool1'] = MaxPooling1D(pool_size=pool_size, stride=pool_size)\n self.layers['Flatten1'] = Flatten()\n self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])\n self.layers['Relu2'] = Relu()\n self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])\n self.layers['Last'] = Softmax()\n\n #gradients\n self.grads = {}\n\n def _train(self):\n mini_batch = GetMiniBatch(X=self.x_train, y=self.y_train, batch_size=self.batch_size, seed=0)\n\n for mini_x, mini_y in mini_batch:\n #forward\n z = self._propagate_forward(mini_x)\n #backward\n self._propagate_backward(z - mini_y)\n #gradient更新\n self.optimizer.update(self.params, self.grads)\n return\n\n def _loss(self, y_actual, pred_proba):\n return -(y_actual * np.log(pred_proba + 1e-7)).sum() / y_actual.shape[0]\n\n def _accuracy(self, y_actual, pred_proba):\n y_actual = np.argmax(y_actual, axis=1)\n pred = np.argmax(pred_proba, axis=1)\n acc = np.sum( y_actual == pred) / y_actual.shape[0]\n return acc\n\n def _propagate_forward(self, x):\n #forward\n for layer in self.layers.values():\n x = layer.forward(x)\n return x\n\n def _propagate_backward(self, dout):\n #backward\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n #ここはupdate weightsとして切り出したほうがわかりやすいかも\n self.grads['W1'], self.grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db\n self.grads['W2'], self.grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db\n self.grads['W3'], self.grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db\n #返却値なし\n return\n def _calc_loss_acc(self):\n proba = self._propagate_forward(self.x_train)\n #loss計算\n loss = self._loss(self.y_train, proba)\n self.train_loss_list.append(loss)\n #accuracy計算\n train_acc = self._accuracy(self.y_train, proba)\n self.train_acc_list.append(train_acc)\n\n if((self.x_val is not None) & (self.y_val is not None)):\n proba = self._propagate_forward(self.x_val)\n #loss計算\n val_loss = self._loss(self.y_val, proba)\n self.val_loss_list.append(loss)\n #accuracy計算\n val_acc = self._accuracy(self.y_val, proba)\n self.val_acc_list.append(val_acc)\n #返却値なし\n return\n","sub_path":"diveintocode-term2/ml-scratch/model/scratch_cnn1d_classifier.py","file_name":"scratch_cnn1d_classifier.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"139050635","text":"import sys\nimport aspell\n\nLANG = 'bg';\n\ndef main(filename):\n speller = aspell.Speller('lang', LANG)\n buffersize = 2**16\n with open(filename) as f:\n while True:\n lines_buffer = f.readlines(buffersize)\n if not lines_buffer:\n break\n for line in lines_buffer:\n word = line.strip()\n if speller.check(word):\n print(word)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"sift.py","file_name":"sift.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"539950847","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n\n#######################################################################################\n\n\ndef function(x):\n return 1.0 / (1.0 + x ** 2.0)\n\n\ndef approximation(n, count=500):\n results = []\n x = np.linspace(-5, 5, n + 1)\n vander_x = np.vander(x, n + 1, True)\n coefficients = np.linalg.solve(vander_x, list(map(function, x)))\n\n for x in np.linspace(-5, 5, count):\n result = 0\n for index, coefficient in enumerate(coefficients):\n result += coefficient * (x ** index)\n results.append(result)\n\n return results\n\n\nplt.plot(np.linspace(-5, 5, 500), approximation(5))\nplt.plot(np.linspace(-5, 5, 500), approximation(10))\nplt.plot(np.linspace(-5, 5, 500), approximation(15))\n\nreal_function = []\nfor x in np.linspace(-5, 5, 500):\n real_function.append(function(x))\n\nplt.plot(np.linspace(-5, 5, 500), real_function)\nplt.show()\n\n\n#######################################################################################\n\n\ndef calculate_difference(n):\n results = approximation(n, 30)\n x = np.linspace(-5, 5, 30)\n\n results = list(map(lambda x, y: x - y, results, list(map(function, x))))\n\n return results\n\n\nplt.plot(np.linspace(-5, 5, 30), calculate_difference(5))\nplt.plot(np.linspace(-5, 5, 30), calculate_difference(10))\nplt.plot(np.linspace(-5, 5, 30), calculate_difference(15))\nplt.show()\n\n\n#######################################################################################\n\ndef count_chebyshev_x(n, k, a, b):\n return ((1.0 / 2.0) * (a + b)) + ((1.0 / 2.0) * (b - a) * np.cos(((2.0 * k - 1.0) * np.pi) / (2.0 * n)))\n\n\ndef chebyshev_approximation(n):\n results = []\n x = []\n for i in range(1, n+1):\n x.append(count_chebyshev_x(n, i, -5, 5))\n\n vander_x = np.vander(x, n , True)\n coefficients = np.linalg.solve(vander_x, list(map(function, x)))\n\n for x in np.linspace(-5, 5, 500):\n result = 0\n for index, coefficient in enumerate(coefficients):\n result += coefficient * (x ** index)\n results.append(result)\n\n return results\n\n\nplt.plot(np.linspace(-5, 5, 500), chebyshev_approximation(15))\nplt.plot(np.linspace(-5, 5, 500), real_function)\nplt.show()\n\n#######################################################################################\n\n\n\na = 10\nb = 5\n\n\ndef function_x(t):\n return a * np.cos(t)\n\n\ndef function_y(t):\n return b * np.sin(t)\n\n\ndef interpolate_elipse(n):\n points = np.linspace(0, 2 * np.pi, n)\n\n spline_x = interpolate.interp1d(points, list(map(function_x, points)), kind='cubic')\n spline_y = interpolate.interp1d(points, list(map(function_y, points)), kind='cubic')\n\n xs = []\n ys = []\n\n for t in np.linspace(0, 2 * np.pi, 30):\n xs.append(spline_x(t))\n ys.append(spline_y(t))\n\n plt.plot(xs, ys)\n\n\ninterpolate_elipse(10)\nplt.show()\n","sub_path":"lab-02/task-1.py","file_name":"task-1.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131291135","text":"\n# JeromeJGuay January 2020\nimport sys\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nsys.path.append(\"../\")\nfrom Processing.FindTransect.FindTransect import haversine\n\n\ndef add_echosounder(dataset, axe, tide_dataset, Transducer='Transducer1', alpha=1,\n vmin=-0.1, vmax=0.0, cmap='seismic'):\n \"\"\"\n \"\"\"\n\n echogram = dataset.Transducer1.T\n\n # pcolormesh vector are set to the corner\n echo_position = dataset.position - np.gradient(dataset.position)/2\n\n TideHeight = tide_dataset.interp(time=dataset.time.mean()).height\n\n echo_depth = dataset.depth - TideHeight - np.gradient(dataset.depth)[0]/2 \n\n echo_pcolor = axe.pcolormesh(echo_position,\n echo_depth,\n echogram,\n cmap=cmap,\n vmin=vmin, # -0.1\n vmax=vmax, # -0.0\n alpha=alpha,\n edgecolors=\"Face\")\n\n return echo_pcolor\n\n\ndef add_ADCP_quiver(dataset, axe, scale=30, width=0.005,\n headwidth=3, headlength=5,\n headaxislength=4.5,\n position=None, color='c',\n zorder=0):\n \"\"\"\n size : arrow size\n \"\"\"\n if position is not None:\n dataset = dataset.interp(position=position)\n\n X, Y = dataset.position.data, dataset.depth.data\n\n U = dataset.VelocityAlong.data.T\n # substracting the mean z velocity.\n V = dataset.VelocityZ.data.T - np.nanmean(dataset.VelocityZ.data.T)\n \n adcp_quivers = axe.quiver(X, Y,\n U, -V,\n units='width',\n angles='xy',\n pivot='middle',\n scale=scale,\n width=width,\n headwidth=headwidth,\n headlength=headlength,\n headaxislength=headaxislength,\n alpha=1,\n edgecolor='w',\n facecolor='k',\n linewidth=1,\n zorder=zorder)\n\n return adcp_quivers\n\n\ndef add_grouped_ADCP_quiver(dataset, axe, scale=30, width=0.005,\n headwidth=3, headlength=5,\n headaxislength=4.5,\n position=None, color='c',\n zorder=0,\n groups=[0, 0.2, 0.5, 0.8, 0.1]):\n \"\"\"\n size : arrow size\n \"\"\"\n if position is not None:\n dataset = dataset.interp(position=position)\n\n X, Y = dataset.position.data, dataset.depth.data\n\n U = dataset.VelocityAlong.data.T\n # substracting the mean z velocity.\n V = dataset.VelocityZ.data.T - np.nanmean(dataset.VelocityZ.data.T)\n\n UV = np.sqrt(U**2+V**2)\n\n for i in range(len(groups)-1):\n Indx = np.where((UV > groups[i]) & (UV <= groups[i+1]))\n ratio = UV[Indx]/groups[i+1]\n U[Indx] = U[Indx]/ratio\n V[Indx] = V[Indx]/ratio\n\n adcp_quivers = axe.quiver(X, Y,\n U, -V,\n units='width',\n angles='xy',\n pivot='middle',\n scale=scale,\n width=width,\n headwidth=headwidth,\n headlength=headlength,\n headaxislength=headaxislength,\n alpha=1,\n edgecolor='w',\n facecolor='k',\n linewidth=1,\n zorder=zorder)\n\n return adcp_quivers\n\n\ndef add_ADCP_cmap(dataset, dim, axe, cmap, levels,\n zorder=0):\n \"\"\"\n \"\"\"\n adcp_contourf = axe.contourf(dataset.position,\n dataset.depth,\n dataset[dim].T,\n cmap=cmap,\n levels=levels,\n zorder=zorder)\n return adcp_contourf\n\n\ndef add_ctd(dataset, dtype, axe, iso, cmap, size,\n color, marker,\n linewidth, edgecolor, zorder=0,\n change_color=False,\n change_marker=False):\n \"\"\"\n iso : (list) isotherm, isohaline, isopycnals\n \"\"\"\n \n marker_list = [\"o\", \"s\",\n \"v\", \"D\", \"p\", \"*\", \"h\",\"d\"]\n color_list = ['red', 'orange', 'blue',\n 'green', 'yellow', 'black', '']\n \n if change_marker is True:\n marker = marker_list[:len(iso)]\n else:\n marker = [marker for i in range(len(iso))]\n\n if change_color is True:\n #color = color_list[:len(iso)]\n color = cm.get_cmap(cmap)(np.linspace(0, 1, len(iso))).tolist()\n else:\n color = [color for i in range(len(iso))]\n\n for i in range(len(iso)):\n Indx_0 = np.argwhere(np.diff(np.sign(dataset[dtype].data\n - iso[i])) != 0).ravel()\n Indx = Indx_0[np.argwhere(abs(dataset[dtype].data[Indx_0]\n - iso[i]) < 0.2).ravel()]\n\n axe.scatter(dataset.ajusted_position.data[Indx],\n dataset.depth.data[Indx],\n color=color[i],\n marker=marker[i],\n linewidth=linewidth,\n edgecolor=edgecolor,\n s=size,\n zorder=zorder)\n\n return Indx\n\n\ndef plot_cast(dataset, axe, dtype, cmap, linewidth):\n \"\"\"\n \"\"\"\n\n Indx = np.argwhere(np.abs(np.diff(dataset.depth.data)) > 10).ravel() + 1\n\n color = cm.get_cmap(cmap)(np.linspace(0, 1, len(Indx)-1)).tolist()\n\n if (dataset.ajusted_position.data[-1] - dataset.ajusted_position.data[0]) < 0:\n color.reverse()\n Indx = [0] + Indx + [-1]\n\n position = []\n\n for i in range(len(Indx)-1):\n axe.plot(dataset[dtype].data[Indx[i]+1:Indx[i+1]],\n dataset.depth.data[Indx[i]+1:Indx[i+1]],\n c=color[i], linewidth=linewidth)\n position.append(np.mean(dataset.ajusted_position.data[Indx[i]:Indx[i+1]]))\n\n return color, position\n\n\ndef overlay_ctd(dataset, dtype, xlim, ylim, f_L, f_W, figure, BWH, dtype_lim,\n linewidth=1, ticks_position=1, iso=[], axe_color='gray',\n cmap=\"jet\"):\n \"\"\"\n \"\"\"\n if dtype == 'density':\n dtype_name = \"$\\sigma_t$\"\n else:\n dtype_name = dtype\n color = cm.get_cmap(cmap)(np.linspace(0, 1, len(iso)))\n \n ctd_cmap = cm.get_cmap(cmap).from_list('ctd_cmap', color, len(iso))\n \n color = color[0:len(iso)]\n\n cut_Indx = np.argwhere(np.abs(np.diff(dataset.depth.data)) > 5).ravel() + 1\n\n cut_Indx = [0] + cut_Indx.tolist() + [len(dataset.time)]\n\n cut_Indx_list = [[cut_Indx[i], cut_Indx[i+1]] for i in range(len(cut_Indx)-1)]\n\n if dataset.ajusted_position[-1] - dataset.ajusted_position[0] < 0:\n direction = 0\n else:\n direction = 1\n\n positions = []\n \n for c_i in cut_Indx_list:\n ds_tmp = dataset.isel(time=range(c_i[0], c_i[1]))\n positions.append(np.mean(ds_tmp.ajusted_position.data))\n\n positions = np.array(positions)\n # This selects cast by position| This bit is just to keep the last value so I append infinity \n position_indx = np.intersect1d(np.where(np.abs(np.diff(np.append(positions, np.infty))) > 200)[0],\n np.intersect1d(np.where(positions < xlim[1])[0],\n np.where(positions > xlim[0])[0]))\n\n #Exclude bad positions\n cut_Indx_list = np.array(cut_Indx_list)[position_indx]\n positions = positions[position_indx]\n\n if positions != []:\n sorted_positions = np.sort(positions)\n mid_position = sorted_positions[round(len(sorted_positions)/2)-1]\n\n\n for c_i, position in zip(cut_Indx_list, positions):\n ds_tmp = dataset.isel(time=range(c_i[0], c_i[1]))\n \"\"\"--- label axis ---\"\"\"\n axe = add_cast_axe(position,\n xlim[0],\n xlim[1],\n f_L,\n f_W,\n figure,\n BWH)\n \n depth_vector = ds_tmp.depth.data\n\n mid_x = np.mean(dtype_lim)\n \n axe.set_ylim(ylim[0], ylim[1])\n axe.set_xlim(dtype_lim[0]-1, dtype_lim[1]+1)\n axe.patch.set_alpha(0)\n axe.xaxis.tick_top()\n plt.setp(axe.get_yticklabels(), visible=False)\n plt.setp(axe.get_yticklines(), visible=False) \n axe.spines[\"bottom\"].set_visible(False)\n axe.spines[\"right\"].set_visible(False)\n axe.spines['left'].set_visible(False)\n axe.spines[\"top\"].set_position((\"axes\", ticks_position))\n axe.spines[\"top\"].set_linewidth(2.5)\n axe.spines[\"top\"].set_color(axe_color)\n axe.xaxis.label.set_color(axe_color)\n axe.tick_params(axis='x', labelsize=11)\n axe.xaxis.set_ticks([int(dtype_lim[0]+1),\n int(mid_x),\n int(dtype_lim[1])])\n axe.plot([mid_x, mid_x],\n [0, depth_vector.max()],\n linewidth=1.5,\n c=axe_color)\n\n if position == mid_position:\n axe.set_xlabel(str(dtype_name)+\" [\"+ds_tmp[dtype].units+']',\n fontdict={'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 12},\n labelpad=10)\n axe.xaxis.set_label_position('top')\n else:\n plt.setp(axe.get_xticklabels(), visible=False)\n\n \"\"\"---Cast---\"\"\"\n axe.plot(ds_tmp[dtype],\n depth_vector,\n c='k', linewidth=linewidth)\n\n \"\"\"---Colored Value---\"\"\"\n for i in range(len(iso)):\n iso_Indx = np.argwhere(np.diff(np.sign(ds_tmp[dtype].data\n - iso[i])) != 0).ravel()\n # I don,t remember why I putted this here.\n #iso_Indx = iso_Indx[np.argwhere(abs(ds_tmp[dtype].data[iso_Indx]\n # - iso[i]) < 0.2).ravel()]\n\n if len(iso_Indx) > 1:\n diff_indx = np.where(np.diff(ds_tmp.depth.data[iso_Indx]) > 0.001)[0].tolist()\n \n diff_indx = [0] + diff_indx + [len(iso_Indx)-1]\n\n depths = [np.mean(ds_tmp.depth.data[iso_Indx[i]:iso_Indx[i+1]])\n for i in diff_indx[0:-1]]\n else:\n depths = ds_tmp.depth[iso_Indx].data\n\n if len(depths) > 0:\n \n axe.scatter(np.ones(len(depths)) * iso[i],\n depths,\n s=35*linewidth,\n c=color[i],\n linewidth=linewidth-0.5,\n edgecolor='k',\n zorder=3)\n \n return ctd_cmap\n\n\ndef add_cast_axe(position_x, min_x, max_x,\n f_L, f_W, figure, BWH):\n \"\"\"\n \"\"\"\n Left = (f_W)*(position_x - min_x)/(np.abs(max_x - min_x)) + f_L\n axe = figure.add_axes([Left-BWH[1]/2, BWH[0], BWH[1], BWH[2]])\n\n return axe\n\n\ndef make_bath(dataset, P0, P1, node=1000):\n \"\"\"\n \"\"\"\n lon_transect = xr.DataArray(np.linspace(P0[0], P1[0], node),\n dims='position')\n lat_transect = xr.DataArray(np.linspace(P0[1], P1[1], node),\n dims='position')\n\n dataset = dataset.interp(longitude=lon_transect,\n latitude=lat_transect)\n dataarray = dataset.assign_coords(position=haversine(P0[0],\n P0[1],\n dataset.longitude,\n dataset.latitude).data).Bathymetry\n return dataarray\n\n\ndef plot_bath(dataarray, axe, color='wheat', alpha=0.6):\n \"\"\"\n \"\"\"\n axe.plot(dataarray.position, dataarray, c='black')\n axe.fill_between(dataarray.position,\n (1.05*dataarray.max()),\n dataarray,\n facecolor=color,\n alpha=alpha)\n","sub_path":"Visualization/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":12715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80493076","text":"from django.urls import path\nfrom . import views\nfrom surveys.views import *\n\nurlpatterns=[\n path('stations/add', views.station, name=\"stations\"),\n path('profiles/add', views.profile, name=\"profiles\"),\n path('surveys/add', views.survey, name=\"surveys\"),\n path('surveys/view', views.survey_details, name='survey_details'),\n path('surveys/calc', views.survey_calc, name='survey_calc'),\n path('surveys/survey_delete', views.survey_delete, name='survey_delete'),\n path('surveys/profile_delete', views.profile_delete, name='profile_delete'),\n path('surveys/station_delete', views.station_delete, name='station_delete'),\n path('surveys/delete', views.delete, name='delete'),\n path('surveys/surveys_edit', views.surveys_edit, name='surveys_edit'),\n path('surveys/profiles_edit', views.profiles_edit, name='profiles_edit'),\n path('surveys/stations_edit', views.stations_edit, name='stations_edit'),\n path('', views.index, name='index') #Create an index?\n]","sub_path":"surveys/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"12054442","text":"#!/usr/bin/env python\n\nimport unittest\nfrom tests import build_testsuite\n\nfrom db import *\n\nclass CountObserver(object):\n def __init__(self):\n self.call_count = 0\n def __call__(self, *args):\n self.call_count += 1\n\nclass Notifier(Subscriptable):\n def __call__(self, *args):\n self._notify(*args)\n\n\nclass SubscriptableTestCase(unittest.TestCase):\n def setUp(self):\n self.observers = [ CountObserver() for i in range(10) ]\n self.notifier = Notifier()\n\n def tearDown(self):\n self.observers = None\n self.notifier = None\n\n def callCounts(self):\n return [ o.call_count for o in self.observers ]\n\n def testSingleCallAll(self):\n for observer in self.observers:\n self.notifier.subscribe(observer)\n\n self.notifier()\n\n self.assertEqual([], [ o for o in self.observers if o.call_count < 1 ], \"listener not called\")\n self.assertEqual([], [ o for o in self.observers if o.call_count > 1 ], \"listener called more than once\")\n\n def testSingleCallHalf(self):\n for observer in self.observers[::2]:\n self.notifier.subscribe(observer)\n\n self.notifier()\n\n self.assertEqual(self.observers[::2], [ o for o in self.observers if o.call_count == 1 ], \"inconsistent listeners\")\n\n def testMultipleCallAll(self):\n for observer in self.observers:\n self.notifier.subscribe(observer)\n\n for i in range(10):\n self.notifier()\n\n self.assertEqual([], [ o for o in self.observers if o.call_count != i+1 ], \"inconsistent listeners\")\n\n def testMultipleSubscribe(self):\n for i in range(10):\n for observer in self.observers:\n self.notifier.subscribe(observer)\n\n self.notifier()\n\n self.assertEqual([], [ o for o in self.observers if o.call_count != i+1 ], \"inconsistent listeners\")\n\n def testMultipleUnsubscribe(self):\n for observer in self.observers:\n self.notifier.subscribe(observer)\n\n for i, observer in enumerate(self.observers):\n self.notifier()\n self.notifier.unsubscribe(observer)\n\n self.assertEqual(self.observers[i:], [ o for o in self.observers if o.call_count > i ], \"inconsistent listeners\")\n\n before = self.callCounts()\n self.notifier()\n after = self.callCounts()\n\n self.assertEqual(before, after, \"inconsistent listeners\")\n\n\nclass NodeTestCase(unittest.TestCase):\n def get(self): return 1\n def set(self, value):\n if value != 1:\n raise ValueError\n\n def build_property(self):\n return SubscriptableProperty(self.get, self.set)\n\n def setUp(self):\n self.listener = CountObserver()\n\n def testNodes(self):\n common_property_object = self.build_property()\n\n nodes = []\n for i in range(10):\n property_object = self.build_property()\n nodes.append( Node({'bla':property_object, 'common_bla':common_property_object}) )\n\n for node in nodes:\n self.assertRaises(AttributeError, node._subscribe, 'nobla', self.listener)\n node._subscribe('bla', self.listener)\n\n for node in nodes:\n self.assertEquals(1, node.bla)\n self.assertRaises(ValueError, setattr, node, 'bla', 2)\n node.bla = 1\n\n self.assertEqual(self.listener.call_count, len(nodes), 'invalid call_count: %d' % self.listener.call_count)\n\n nodes[0]._subscribe('common_bla', self.listener)\n nodes[-1].common_bla = 1\n\n self.assertEqual(self.listener.call_count, len(nodes)+1, 'invalid call_count: %d' % self.listener.call_count)\n\n def testNodeCopies(self):\n property_object = self.build_property()\n\n node = Node({'bla':property_object})\n node._subscribe('bla', self.listener)\n\n for i in range(10):\n node = node._copy()\n\n node.bla = 1\n self.assertEqual(1, self.listener.call_count, 'invalid call_count: %d' % self.listener.call_count)\n\n\nclass TransformerTestCase(unittest.TestCase):\n @staticmethod\n def const_transform(value): return 10\n\n @staticmethod\n def add10_transform(value): return value+10\n\n @staticmethod\n def add50_transform(value): return value+50\n\n class pback(object):\n def __init__(self, value): self.set(value)\n def set(self, value): self.val = value\n def get(self): return self.val\n\n def get(self): return 1\n def set(self, value):\n if value != 1:\n raise ValueError\n\n def testPropertyTransformer(self):\n\n value = self.pback(5)\n property_object = SubscriptableProperty(value.get, value.set)\n\n node1 = Node({'bla':property_object})\n\n self.assertEqual( 5, node1.bla)\n\n transform = PropertyTransformer(self.const_transform, None)\n transformed_property = transform(property_object)\n node2 = Node({'bla':transformed_property})\n\n self.assertEqual( 5, node1.bla)\n self.assertEqual(10, node2.bla)\n\n transform = PropertyTransformer(self.add10_transform, self.add50_transform)\n transformed_property = transform(property_object)\n\n node3 = Node({'bla':transformed_property})\n\n self.assertEqual( 5, node1.bla)\n self.assertEqual(10, node2.bla)\n self.assertEqual(15, node3.bla)\n\n transform = PropertyTransformer(self.add10_transform, self.add50_transform)\n transformed_property = transform(node2._getProperty('bla'))\n\n node4 = Node({'bla':transformed_property})\n\n self.assertEqual( 5, node1.bla)\n self.assertEqual(10, node2.bla)\n self.assertEqual(15, node3.bla)\n self.assertEqual(20, node4.bla)\n\n node4.bla = 1\n\n self.assertEqual(51, node1.bla)\n self.assertEqual(10, node2.bla)\n self.assertEqual(61, node3.bla)\n self.assertEqual(20, node4.bla)\n\n node3.bla = 2\n\n self.assertEqual(52, node1.bla)\n self.assertEqual(10, node2.bla)\n self.assertEqual(62, node3.bla)\n self.assertEqual(20, node4.bla)\n\n node2.bla = 3\n\n self.assertEqual( 3, node1.bla)\n self.assertEqual(10, node2.bla)\n self.assertEqual(13, node3.bla)\n self.assertEqual(20, node4.bla)\n\n node1.bla = 4\n\n self.assertEqual( 4, node1.bla)\n self.assertEqual(10, node2.bla)\n self.assertEqual(14, node3.bla)\n self.assertEqual(20, node4.bla)\n\n\n def testNodeTransformer(self):\n pass\n\n\nif __name__ == \"__main__\":\n suite = build_testsuite( globals().values() )\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"trunk/tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49523964","text":"\"\"\"\n读取Excel中的测试用例,使用unittest来进行断言\n\"\"\"\nimport unittest\nimport paramunittest\nimport urllib.parse\n\nfrom testFile.geturlParams import geturlParams\nfrom testFile.readConfig import ReadConfig\nfrom testFile.readExcel import readExcel\nfrom common.configHttp import RunMain\nfrom common.util import Utility\n\nreadconfig = ReadConfig()\nprojectInfo_xls = readExcel().get_xls('userCase.xlsx', 'projectInfo')\nutil = Utility()\n\n\n# 调用我们的geturlParams获取我们拼接的url\n# url = geturlParams().get_url(readConfig.get_http('baseurl_first'),'')\n\n@paramunittest.parametrized(*projectInfo_xls)\nclass TestProjectInfo(unittest.TestCase):\n def setParameters(self, case_no, case_name, path, parameter, method, expect_result, expect_content):\n \"\"\"\n :param case_no:\n :param case_name:\n :param parameter:\n :param method:\n :param expect_result:\n :param path:\n :param expect_content:\n :return:\n \"\"\"\n # 定义变量的值\n self.case_no = str(case_no)\n self.case_name = str(case_name)\n self.request_path = str(path)\n self.request_data = str(parameter)\n self.request_method = str(method)\n self.expect_code = int(expect_result)\n self.expect_content = str(expect_content)\n\n def description(self):\n \"\"\"\n test report description\n :return:\n \"\"\"\n #获取测试名称\n self.case_name\n\n def setUp(self):\n \"\"\"\n\n :return:\n \"\"\"\n print('测试开始前的准备')\n\n def test_projectInfo(self):\n self.checkResult()\n\n def checkResult(self): # 断言\n url = geturlParams().get_url(readconfig.get_http('baseurl_first'),\n self.request_path) # 调用我们的geturlParams获取我们拼接的url\n new_url = url + self.request_data\n data = dict(urllib.parse.parse_qsl(\n urllib.parse.urlsplit(\n new_url).query)) # 将一个完整的URL中的name=&password=转换为{'username':'xxx','password':'bbb'}\n #调用util进行加密\n # userid = util.md5_join_b64(data[\"userid\"])\n #\n # #把加密后的值在替换到相应的位置\n # data[\"userid\"] = userid\n info = RunMain().run_main(self.request_method, url, data,\n files=None) # 根据Excel中的method调用run_main来进行requests请求,并拿到响应\n ss = info.json() # 根据Excel中的method调用run_main来进行requests请求,并拿到响应\n if self.case_name == 'projectInfo_success': # 如果case_name是login,说明合法,返回的code应该为200\n self.assertEqual(self.expect_code, ss['code'])\n\n if self.case_name == 'projectInfo_user_id_null': # 同上\n self.assertEqual(self.expect_code, ss['code'])\n\n def tearDown(self):\n \"\"\"\n\n :return:\n \"\"\"\n print('测试结束,输出log日志,完结\\n\\n')\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"babTest/testcase/testProjectInfo.py","file_name":"testProjectInfo.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28624713","text":"# coding=utf-8\nfrom test.base.basecase import BaseCase\nimport unittest\nimport sys\n'''引入BsseCase简化后的登陆用例'''\nsys.path.append('../..') # 统一将包的搜索路径提升至项目根目录下\nfrom config.config import *\nclass TestUserReg(unittest.TestCase):\n def test_user_reg_normal(self):\n \"\"\"level1:正常登陆\"\"\"\n case=BaseCase().get_case_data(\"test_user_data.xlsx\",\"TestUserReg\",'test_user_reg_normal')\n BaseCase().send_request(case)\n if not case:\n print(\"用例数据不存在\")\n\n def test_user_reg_exist(self):\n case = BaseCase().get_case_data(\"test_user_data.xlsx\", \"TestUserReg\", 'test_user_reg_exist')\n BaseCase().send_request(case)\n if not case:\n print(\"用例数据不存在\")\nif __name__=='__main__':\n unittest.main(verbosity=2)\n","sub_path":"test/user/test_user_reg2.py","file_name":"test_user_reg2.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"630380739","text":"import speech_recognition as sr\n\nrecognizer = sr.Recognizer()\n\n# Recording the sound\n\nwith sr.Microphone() as source:\n # print(\"Adjusting noise\")\n recognizer.adjust_for_ambient_noise(source, duration=1)\n print(\"\\n\\nRecording.....\")\n recorded_audio = recognizer.listen(source, timeout=10)\n print(\"Done recording\\n\\n\")\n\n\n# Recorgnizing the Audio \n\ntry:\n print(\"Recognizing the text\")\n text = recognizer.recognize_google(\n recorded_audio, \n language=\"en-US\"\n )\n print(\"\\nDecoded Text : {}\\n\\n\".format(text))\n\nexcept Exception as ex:\n print(ex)","sub_path":"Jasleen_minhas/Projects_Jasleen/voice_to_text/voice_to_text.py","file_name":"voice_to_text.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"614378137","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Summary, TypeOfStudy,DeviceType,DiseaseType,PatientGroup,ProcedureType\nfrom ml_files.nlp import generate_summary\nfrom .forms import GenerateSummaryForm, ConvertCSVForm\nfrom django.views.generic import TemplateView, ListView, DetailView\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nimport csv\nfrom statistics import mode\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import sent_tokenize\nimport PyPDF2\nfrom PyPDF2 import PdfFileReader\nimport io\nimport pandas as pd\nimport xlrd\nimport time \nimport os\n\nclass RootView(TemplateView):\n template_name=\"summaryapp/landing.html\"\n \nclass SummaryListView(LoginRequiredMixin, ListView):\n\tmodel = Summary\n\ttemplate_name = \"summaryapp/summary_list.html\"\n\tcontext_object_name = 'summarys'\n\tordering = ['-created_on']\n\n@login_required\ndef generate_summary(request):\n\tif request.method == 'POST':\n\t\tform = GenerateSummaryForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tfile = io.BytesIO(request.FILES['files'].read())\n\t\t\tpdf_reader = PdfFileReader(file)\n\t\t\tpdf_len = pdf_reader.getNumPages()\n\t\t\tstrr = ' '\n\t\t\tfor i in range(0,pdf_len):\n\t\t\t\tpage = pdf_reader.getPage(i)\n\t\t\t\tstrr += page.extractText()\n\t\t\t\tfile.close()\n\t\t\tstrr=request.POST.get('strr')\n\n\t\t\ttypes_of_study=list(TypeOfStudy.objects.all().values('stemmed'))\n\t\t\tpatient_group =list(PatientGroup.objects.all().values('stemmed'))\n\t\t\tprocedure_type = list(ProcedureType.objects.all().values('stemmed'))\n\t\t\tdevice_type = list(DeviceType.objects.all().values('device_type_stemmed'))\n\t\t\tdevice_maker = list(DeviceType.objects.all().values('device_maker_stemmed'))\n\t\t\tdisease_type = list(DiseaseType.objects.all().values('stemmed'))\n\n\t\t\t\n\t\t\tstrr = strr.replace(',', ' ')\n\t\t\tstrr = strr.replace('\\n', ' ')\n\t\t\tstrr=strr.lower()\n\t\t\tarticle_tokens=word_tokenize(strr)\n\t\t\t\n\n\t\t\tstudy_type=\"STUDY NOT FOUND\"\n\t\t\t\n\t\t\t# study_tokens=article_tokens\n\t\t \n\t\t\t# for word in study_tokens:\n\t\t\t# \tif word in types_of_study:\n\t\t\t# \t\tstudy_type=word\n\t\t\t# \t\tbreak\n\t\t\tli=[]\n\t\t\tfor q in types_of_study:\n\t\t\t\tfor key,value in q.items():\n\t\t\t\t\tif value in strr:\n\t\t\t\t\t\tli.append(value)\n\t\t\tif len(li)>0:\n\t\t\t\tans=max(li,key=len)\n\t\t\t\tstudy_type=ans\n\t\t\telse:\n\t\t\t\tstudy_type=\"Study type not FOUND\"\n\t\t\t\t\t\n\t\t\tpatient_token=article_tokens\n\t\t\tpatients_list = []\n\t\t\ttotal_patient = 0\n\t\t\tl = len(patient_token)\n\t\t \n\t\t\tfor i in range(0,l-1):\n\t\t\t\tnum=patient_token[i].strip()\n\t\t\t\tif num.isdigit() and (patient_token[i+1]==\"patients\"):\n\t\t\t\t\tpatients_list.append(num)\n\t\t\tif(len(patients_list)>0):\n\t\t\t\ttotal_patient=mode(patients_list)\n\t\t\telse:\n\t\t\t\ttotal_patient=\"PATIENT NOT FOUND\"\n\t\t\tages_token = sent_tokenize(strr)\n\t\t\tage_value = \"\"\n\t\t\tfor w in ages_token:\n\t\t\t\tword_tok=word_tokenize(w)\n\t\t\t\tflag1=0\n\t\t\t\tflag2=0\n\t\t\t\tfor wo in word_tok:\n\t\t\t\t\tif wo==\"mean\":\n\t\t\t\t\t\tflag1=1\n\t\t\t\t\tif wo==\"age\":\n\t\t\t\t\t\tflag2=1\n\t\t\t\tif (flag1==1 and flag2==1):\n\t\t\t\t\tage_value = w\n\t\t\t\t\tbreak\n\t\t\tif age_value == \"\":\n\t\t\t\tage_value = \"NO MEAN AGE FOUND\"\n\n\t\t ### Getting Device Type ###\n\t\t\t\n\t\t\tdevice_type_ans = \"\"\n\t\t\tdevice_maker_ans = \"\"\n\t\t\tdevice_token = word_tokenize(strr)\n\t\t\tdevice_type_stemmed_list = []\n\t\t\tindex=None\n\t\t\tfor q in device_type:\n\t\t\t\tfor key, value in q.items():\n\t\t\t\t\tfor stem in value:\n\t\t\t\t\t\tdevice_type_stemmed_list.append(stem)\n\t\t\tfor w in device_token:\n\t\t\t\tif w in device_type_stemmed_list:\n\t\t\t\t\tdevice_type_ans = w\n\t\t\t\t\tbreak\n\n\t\t\t\n\t\t\t\n\t\t\tif device_type_ans == \"\":\n\t\t\t\tdevice_type_ans = \"DEVICE TYPE NOT FOUND\"\n\t\t\telse:\n\t\t\t\tdevice_type_ans = device_type_ans + \" device\"\n\t\t\t\tindex = device_type_stemmed_list.index(device_type_ans)\n\t\t\t\t### Getting Device Maker ###\n\t\t\t\t\n\t\t\t\tdevice_maker_stemmed_list = []\n\t\t\t\n\t\t\t\tfor q in device_maker:\n\t\t\t\t\tfor key, value in q.items():\n\t\t\t\t\t\tfor stem in value:\n\t\t\t\t\t\t\tdevice_maker_stemmed_list.append(stem)\n\n\t\t\t\tdevice_maker_ans = device_maker_stemmed_list[index]\n\n\t\t\t\n\t\t\tif device_maker_ans == \"\":\n\t\t\t\tdevice_maker_ans = \"DEVICE MAKER NOT FOUND\"\n\t\t\telse:\n\t\t\t\tdevice_maker_ans = device_maker_ans + \" device maker\"\n\n\n\t\t\t### Getting The Indication ###\n\n\t\t\tindication_type_token = sent_tokenize(strr)\n\t\t\tindication_type_ans = \"\"\n\t\t\tfor w in indication_type_token:\n\t\t\t\tif \"indicate\" in w:\n\t\t\t\t\tindication_type_ans = indication_type_ans + w\n\t\t\tif indication_type_ans == \"\":\n\t\t\t\tindication_type_ans = \"NO INDICATION FOUND\"\n\n\n\t\t ### Patient Group ###\n\t\t\tpatient_grp=\"NO PATIENT GROUP FOUND\"\n\t\t\tpat_grp_word_token=word_tokenize(strr)\n\t\t\tpatient_group_stemmed_list = []\n\n\t\t\tfor q in patient_group:\n\t\t\t\tfor key, value in q.items():\n\t\t\t\t\tfor stem in value:\n\t\t\t\t\t\tpatient_group_stemmed_list.append(stem)\n\t\t\t\n\t\t\tfor w in pat_grp_word_token:\n\t\t\t\tif w in patient_group_stemmed_list:\n\t\t\t\t\tpatient_grp=w\n\t\t\t\t\tbreak\n\n\n\t\t\t### Disease Type ###\n\t\t\tdisease_type_=\"NO DISEASE FOUND\"\n\t\t\tdisease_type_word_token=word_tokenize(strr)\n\t\t\tdisease_type_stemmed_list = []\n\t\t\t\n\t\t\tfor q in disease_type:\n\t\t\t\tfor key, value in q.items():\n\t\t\t\t\tfor stem in value:\n\t\t\t\t\t\tdisease_type_stemmed_list.append(stem)\n\n\t\t\tfor w in disease_type_word_token:\n\t\t\t\tif w in disease_type_stemmed_list:\n\t\t\t\t\tdisease_type_=w\n\t\t\t\t\tbreak\n\n\t\t\tanswer=\" Summary: \"+\" This \" +study_type +\" study involves total \"+total_patient +\" Patients .\"+ age_value +\\\n\t\t\t \"who presented with \",disease_type_ , \"All of these patients were treated with \",device_type_ans,\" from \",device_maker_ans+\\\n\t\t \" While the \"+device_type_ans+\" is the excellent tool for these indications : \"+indication_type_ans+\" Patient Group : \"+patient_grp+\"....\"\n\t\t\t\n\t\t\ttittle = request.POST.get('tittle')\n\t\t\n\t\t\tsummary = Summary(tittle=tittle, detail=answer,author=request.user)\n\t\t\tsummary.save()\n\t\t\t\n\t\t\treturn redirect('summary-list')\n\telse:\n\t\tform = GenerateSummaryForm()\n\treturn render(request, 'summaryapp/generate_summary.html', {'form': form})\n\n\n@login_required\ndef convert_csv(request):\n\tif request.method == 'POST':\n\t\tform = ConvertCSVForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tresponse = HttpResponse(content_type='text/csv')\n\t\t\tresponse['Content-Disposition'] = 'attachment; filename=\"converted.csv\"'\n\t\t\tdataset=pd.read_excel(request.FILES['file'])\n\t\t\tdf=pd.DataFrame({'Combined':dataset['Title'].str.cat(dataset['Author Names'],sep=\". \")})\n\t\t\tdf['Combined']=df['Combined'].str.cat(dataset['Source'],sep=\" \")\n\t\t\tsub=pd.DataFrame({\"Combined Info for Embase Abstracts\":df['Combined']})\n\t\t\tsub.to_csv(response, index=False)\n\t\t\tprint(\"File Converted Successfully !!\")\n\t\t\ttime.sleep(4)\n\telse:\n\t\tform = ConvertCSVForm()\n\treturn render(request, 'summaryapp/convert_csv.html', {'form': form})","sub_path":"aisummarytool-master/summaryapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"132458232","text":"from opentrons import labware, instruments, modules, robot\n\n\nsample_number=38\nethanol_well='A11'\n\n\ndef magbead(\n sample_number,\n ethanol_well,\n elution_buffer_well,\n sample_volume=30,\n bead_ratio=1.8,\n elution_buffer_volume=40,\n incubation_time=5,\n settling_time=2,\n drying_time=5,\n elution_time=2,\n sample_offset=0,\n tiprack_type=\"opentrons_96_tiprack_300ul\"):\n \"\"\"Implements magbead purification reactions for BASIC assembly using an opentrons OT-2.\n\n Selected args:\n ethanol_well (str): well in reagent container containing ethanol.\n elution_buffer_well (str): well in reagent container containing elution buffer.\n sample_offset (int): offset the intial sample column by the specified value.\n\n \"\"\"\n\n # Constants\n PIPETTE_ASPIRATE_RATE = 25\n PIPETTE_DISPENSE_RATE = 150\n TIPS_PER_SAMPLE = 9\n CANDIDATE_TIPRACK_SLOTS = ['3', '6', '9', '2', '5']\n MAGDECK_POSITION = '1'\n MIX_PLATE_TYPE = '4ti-0960_FrameStar'\n MIX_PLATE_POSITION = '4'\n REAGENT_CONTAINER_TYPE = '4ti0131_trough-12'\n REAGENT_CONTAINER_POSITION = '7'\n BEAD_CONTAINER_TYPE = '4ti0136_96_deep-well'\n BEAD_CONTAINER_POSITION = '8'\n LIQUID_WASTE_WELL = 'A12'\n BEADS_WELL = 'A1'\n DEAD_TOTAL_VOL = 5\n SLOW_HEAD_SPEEDS = {'x': 600 // 4, 'y': 400 // 4,\n 'z': 125 // 10, 'a': 125 // 10}\n DEFAULT_HEAD_SPEEDS = {'x': 400, 'y': 400, 'z': 125, 'a': 100}\n IMMOBILISE_MIX_REPS = 10\n MAGDECK_HEIGHT = 20\n AIR_VOL_COEFF = 0.1\n ETHANOL_VOL = 150\n WASH_TIME = 0.5\n ETHANOL_DEAD_VOL = 50\n ELUTION_MIX_REPS = 20\n ELUTANT_SEP_TIME = 1\n ELUTION_DEAD_VOL = 2\n\n # Errors\n if sample_number > 48:\n raise ValueError('sample number cannot exceed 48')\n\n # Tips and pipette\n total_tips = sample_number * TIPS_PER_SAMPLE\n tiprack_num = total_tips // 96 + (1 if total_tips % 96 > 0 else 0)\n slots = CANDIDATE_TIPRACK_SLOTS[:tiprack_num]\n tipracks = [labware.load(tiprack_type, slot)\n for slot in slots]\n pipette = instruments.P300_Multi(\n mount=\"left\",\n tip_racks=tipracks,\n aspirate_flow_rate=PIPETTE_ASPIRATE_RATE,\n dispense_flow_rate=PIPETTE_DISPENSE_RATE)\n\n # Define labware\n MAGDECK = modules.load('magdeck', MAGDECK_POSITION)\n MAGDECK.disengage()\n mag_plate = labware.load(MIX_PLATE_TYPE, MAGDECK_POSITION, share=True)\n mix_plate = labware.load(MIX_PLATE_TYPE, MIX_PLATE_POSITION)\n reagent_container = labware.load(\n REAGENT_CONTAINER_TYPE, REAGENT_CONTAINER_POSITION)\n bead_container = labware.load(BEAD_CONTAINER_TYPE, BEAD_CONTAINER_POSITION)\n col_num = sample_number // 8 + (1 if sample_number % 8 > 0 else 0)\n samples = [col for col in mag_plate.cols(\n )[0 + sample_offset:col_num + sample_offset]]\n output = [col for col in mag_plate.cols(\n )[6 + sample_offset:col_num + 6 + sample_offset]]\n mixing = [col for col in mix_plate.cols(\n )[0 + sample_offset:col_num + sample_offset]]\n\n # Define reagents and liquid waste\n liquid_waste = reagent_container.wells(LIQUID_WASTE_WELL)\n beads = bead_container.wells(BEADS_WELL)\n ethanol = reagent_container.wells(ethanol_well)\n elution_buffer = reagent_container.wells(elution_buffer_well)\n\n # Define bead and mix volume\n bead_volume = sample_volume * bead_ratio\n if bead_volume / 2 > pipette.max_volume:\n mix_vol = pipette.max_volume\n else:\n mix_vol = bead_volume / 2\n total_vol = bead_volume + sample_volume + DEAD_TOTAL_VOL\n\n # Mix beads and PCR samples and incubate\n for target in range(int(len(samples))):\n # Aspirate beads\n pipette.pick_up_tip()\n pipette.aspirate(bead_volume, beads)\n robot.head_speed(**SLOW_HEAD_SPEEDS, combined_speed=max(SLOW_HEAD_SPEEDS.values()))\n\n # Transfer and mix on mix_plate\n pipette.aspirate(sample_volume + DEAD_TOTAL_VOL, samples[target])\n pipette.dispense(total_vol, mixing[target])\n pipette.mix(IMMOBILISE_MIX_REPS, mix_vol, mixing[target])\n pipette.blow_out()\n\n # Dispose of tip\n robot.head_speed(**DEFAULT_HEAD_SPEEDS, combined_speed=max(DEFAULT_HEAD_SPEEDS.values()))\n pipette.drop_tip()\n\n # Immobilise sample\n pipette.delay(minutes=incubation_time)\n\n # Transfer sample back to magdeck\n for target in range(int(len(samples))):\n pipette.transfer(total_vol, mixing[target], samples[target],\n blow_out=True)\n\n # Engagae MagDeck and incubate\n MAGDECK.engage(height=MAGDECK_HEIGHT)\n pipette.delay(minutes=settling_time)\n\n # Remove supernatant from magnetic beads\n for target in samples:\n pipette.transfer(total_vol, target, liquid_waste, blow_out=True)\n\n # Wash beads twice with 70% ethanol\n air_vol = pipette.max_volume * AIR_VOL_COEFF\n for cycle in range(2):\n for target in samples:\n pipette.transfer(ETHANOL_VOL, ethanol, target, air_gap=air_vol)\n pipette.delay(minutes=WASH_TIME)\n for target in samples:\n pipette.transfer(ETHANOL_VOL + ETHANOL_DEAD_VOL, target, liquid_waste,\n air_gap=air_vol)\n\n # Dry at RT\n pipette.delay(minutes=drying_time)\n\n # Disengage MagDeck\n MAGDECK.disengage()\n\n # Mix beads with elution buffer\n if elution_buffer_volume / 2 > pipette.max_volume:\n mix_vol = pipette.max_volume\n else:\n mix_vol = elution_buffer_volume / 2\n for target in samples:\n pipette.transfer(elution_buffer_volume, elution_buffer,\n target, mix_after=(ELUTION_MIX_REPS, mix_vol))\n\n # Incubate at RT for \"elution_time\" minutes\n pipette.delay(minutes=elution_time)\n\n # Engagae MagDeck for 1 minute and remain engaged for DNA elution\n MAGDECK.engage(height=MAGDECK_HEIGHT)\n pipette.delay(minutes=ELUTANT_SEP_TIME)\n\n # Transfer clean PCR product to a new well\n for target, dest in zip(samples, output):\n pipette.transfer(elution_buffer_volume - ELUTION_DEAD_VOL, target,\n dest, blow_out=False)\n\n # Disengage MagDeck\n MAGDECK.disengage()\n\n\nmagbead(sample_number=sample_number,\n ethanol_well=ethanol_well, elution_buffer_well='A1')\n\nfor c in robot.commands():\n print(c)\n","sub_path":"examples/construct_csvs/storch_et_al_cons/2_purification.ot2.py","file_name":"2_purification.ot2.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"649321791","text":"import numpy as np\nimport pandas as pd\nimport os\nimport cv2\nimport sklearn\nimport tensorflow as tf\nimport itertools\nimport csv\nimport matplotlib\nimport matplotlib.pyplot as plt\nget_ipython().magic(u'matplotlib inline')\n\nfrom sklearn.model_selection import train_test_split\npath = os.getcwd()\nos.getcwd()\n\n\nthreshold = 0.15 #Angle threshold, adjust here and generate data\nin_data_1 = pd.read_csv('./data/driving_log.csv')\nin_data_2 = pd.read_csv('./data/driving_log_add_t2_d.csv') #Track2 proper driving\n\nin_data = pd.concat([in_data_1,in_data_2])\nin_data_strait = in_data[abs(in_data['steering'])<=threshold]\nin_data_recovery = in_data[abs(in_data['steering'])>threshold]\n\n\n# In[7]:\n\n#Adjust right and left camera images with correction of 0.2\n\nimgs_strait = list(pd.concat([in_data_strait['center'],in_data_strait['left'],in_data_strait['right']]))\nsteer_angle_strait = list(pd.concat([in_data_strait['steering'],in_data_strait['steering']+0.2,in_data_strait['steering']-0.20]))\n\nimgs_recovery = list(pd.concat([in_data_recovery['center'],in_data_recovery['left'],in_data_recovery['right']]))\nsteer_angle_recovery = list(pd.concat([in_data_recovery['steering'],in_data_recovery['steering']+0.2,in_data_recovery['steering']-0.2]))\n\n\n# In[8]:\n\ndf_strait = pd.DataFrame(steer_angle_strait,imgs_strait)\ndf_strait = df_strait.reset_index()\ndf_strait.columns = ['Image','steering']\ndf_strait.head()\ndf_strait = df_strait.iloc[np.random.permutation(len(df_strait))]\ndf_strait.to_csv(\"./data/img_steer_strait.csv\",index=False,header=False)\n\n\ndf_recovery = pd.DataFrame(steer_angle_recovery,imgs_recovery)\ndf_recovery = df_recovery.reset_index()\ndf_recovery.columns = ['Image','steering']\ndf_recovery.head()\ndf_recovery = df_recovery.iloc[np.random.permutation(len(df_recovery))]\ndf_recovery.to_csv(\"./data/img_steer_recovery.csv\",index=False,header=False)\n\n#idx = int(0.15*len(df_strait))\n#df_strait = df_strait[0:idx]\n\n#df_merged = pd.concat([df_strait,df_strait])\n#df_merged.to_csv(\"./data/img_steer_new.csv\",index=False,header=False)\n\n\n# In[ ]:\n\nlen(df_recovery)\n\n\n# In[ ]:\n\nlen(df_strait)\n\n\n# In[ ]:\n\n\n\n","sub_path":"dataPreparation.py","file_name":"dataPreparation.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"305686379","text":"#!/usr/bin/env python\n\"\"\"Group-related views.\"\"\"\n\nimport datetime\nimport hashlib\nimport helpers\nimport libunison.geometry as geometry\nimport libunison.predict as predict\nimport random\nimport time\nimport re\nimport math\n\nfrom constants import errors, events\nfrom flask import Blueprint, request, g, jsonify\nfrom libentry_views import set_rating\nfrom libunison.models import User, UserTags, Group, Track, LibEntry, GroupEvent, Cluster\nfrom operator import itemgetter\nfrom storm.expr import Desc, In\nfrom storm.locals import AutoReload\n\n# Maximal number of groups returned when listing groups.\nMAX_GROUPS = 10\n\n# Maximal number of tracks returned when asking for the next tracks.\nMAX_TRACKS = 5\n\n# Interval during which we don't play the same song again.\nACTIVITY_INTERVAL = 60 * 60 * 5 # In seconds.\n\n# Minimum size of a cluster so that we make a suggestion.\nMIN_SUGGESTION_SIZE = 2\n\n#number of users of a newly created group, for now the group is created empty.\nNB_USERS_IN_NEW_GROUP = 0\n\ngroup_views = Blueprint('group_views', __name__)\n\n\n@group_views.route('', methods=['GET'])\n@helpers.authenticate()\ndef list_groups():\n \"\"\"Get a list of groups.\"\"\"\n userloc = None\n try:\n lat = float(request.values['lat'])\n lon = float(request.values['lon'])\n except (KeyError, ValueError):\n # Sort by descending ID - new groups come first.\n key_fct = lambda r: -1 * r.id\n else:\n # Sort the rows according to the distance from the user's location.\n userloc = geometry.Point(lat, lon)\n key_fct = lambda r: geometry.distance(userloc, r.coordinates)\n groups = list()\n rows = sorted(g.store.find(Group, (Group.is_active) & (Group.is_automatic == False)), key=key_fct) # \"not\" doesn't work...\n for group in rows[:MAX_GROUPS]:\n groups.append({\n 'gid': group.id,\n 'name': group.name,\n 'nb_users': group.users.count(),\n 'distance': (geometry.distance(userloc, group.coordinates)\n if userloc is not None else None),\n \t\t 'password': group.password != None\n })\n return jsonify(groups=groups)\n\n\n@group_views.route('', methods=['POST'])\n@helpers.authenticate()\ndef create_group():\n \"\"\"Create a new group.\"\"\"\n try:\n name = request.form['name']\n lat = float(request.form['lat'])\n lon = float(request.form['lon'])\n except (KeyError, ValueError):\n raise helpers.BadRequest(errors.MISSING_FIELD,\n \"group name, latitude or longitude is missing or invalid\")\n #Added by Vincent:\n \n group = Group(name, is_active=True)\n group.coordinates = geometry.Point(lat, lon)\n group = g.store.add(group)\n \n \n askList = False\n if 'list' in request.form:\n askList = bool(request.form['list'])\n \n if askList:\n return list_groups()\n else:\n #the user asked only for the newly created group to be returned.\n group.id = AutoReload\n groupDict = {\n 'gid': group.id,\n 'name': group.name,\n 'nb_users': NB_USERS_IN_NEW_GROUP, #the group has just been created\n 'distance': 0.0, #this won't be displayed anyway\n# 'distance': (geometry.distance(userloc, group.coordinates)\n# if userloc is not None else None), #this should be either 0 or None\n 'password': False #the group has just been created\n }\n #TODO: understand why we cannot give the json object a name\n return jsonify(groupDict)\n\n\n#Added by Louis for group password handling\t\n@group_views.route('/', methods=['PUT'])\n@helpers.authenticate(with_user=True)\ndef put_new_password(user, gid):\n \"\"\"Change the password for the group or sets one if there is one.\n\t\n We must decide if the users already in the group should be prompted for the new password.\n\t\n \"\"\"\n\t\n try:\n password = request.form['password']\n except (KeyrError, ValueError):\n raise helpers.BadRequest(errors.MISSING_FIELD,\n \"group password is missing\")\n group = g.store.get(Group, gid)\n\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n\n if user.id != group.master_id or group.is_automatic:\n raise helpers.BadRequest(errors.UNAUTHORIZED,\n \"not allowed to change group password unless DJ\")\n\t\n group.password = password if (password != '') else None\n\t#event = GroupEvent(user, user, events.PASSWORD, password)\n\t#TODO check if this is correct\n\t#g.store.add(event)\n\t\n return helpers.success()\n\n\n@group_views.route('/', methods=['GET'])\n@helpers.authenticate()\ndef get_group_info(gid):\n \"\"\"Get infos about the specified group.\n\n Includes:\n - participants in the group (ID, nickname & stats)\n - current DJ (ID & nickname)\n - info about last track\n \"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n userdict = dict()\n for user in group.users:\n userdict[user.id] = {'nickname': user.nickname}\n # Search for the last track that was played.\n results = g.store.find(GroupEvent, (GroupEvent.event_type == events.PLAY)\n & (GroupEvent.group == group))\n track = None\n play_event = results.order_by(Desc(GroupEvent.created)).first()\n if play_event is not None:\n artist = play_event.payload.get('artist')\n title = play_event.payload.get('title')\n row = g.store.find(Track, (Track.artist == artist)\n & (Track.title == title)).one()\n image = row.image if row is not None else None\n track = {\n 'artist': artist,\n 'title': title,\n 'image': image,\n }\n for entry in play_event.payload.get('stats', []):\n if entry.get('uid') in userdict:\n uid = entry['uid']\n userdict[uid]['score'] = entry.get('score')\n userdict[uid]['predicted'] = entry.get('predicted', True)\n users = list()\n for key, val in userdict.iteritems():\n users.append({\n 'uid': key,\n 'nickname': val.get('nickname'),\n 'score': val.get('score'),\n 'predicted': val.get('predicted', True)\n })\n master = None\n if group.master is not None:\n master = {\n 'uid': group.master.id,\n 'nickname': group.master.nickname\n }\n return jsonify(name=group.name, track=track, master=master, users=users)\n\n\ndef get_played_filter(group):\n played = set()\n threshold = datetime.datetime.fromtimestamp(\n time.time() - ACTIVITY_INTERVAL)\n events = g.store.find(GroupEvent, (GroupEvent.group == group)\n & (GroupEvent.event_type == u'play') & (GroupEvent.created > threshold))\n for event in events:\n info = (event.payload.get('artist'), event.payload.get('title'))\n played.add(info)\n def played_filter(entry):\n info = (entry.track.artist, entry.track.title)\n return info not in played\n return played_filter\n\n\ndef get_playlist_id(group):\n # Find last event in the group that could have changed the playlist\n events = g.store.find(GroupEvent, (GroupEvent.group == group)\n & In(GroupEvent.event_type, [u'join', u'leave', u'master']))\n last = events.order_by(Desc(GroupEvent.created)).first()\n if last is not None:\n when = last.created\n else:\n when = datetime.datetime.utcnow()\n return unicode(hashlib.sha1(when.strftime('%s')).hexdigest())\n\n\n@group_views.route('//playlist', methods=['GET'])\n@helpers.authenticate(with_user=True)\ndef get_playlist(master, gid):\n \"\"\"Get the playlist id.\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n id = get_playlist_id(group)\n return jsonify(playlist_id=id)\n\n\n@group_views.route('//tracks', methods=['GET'])\n@helpers.authenticate(with_user=True)\ndef get_tracks(master, gid):\n \"\"\"Get the next tracks.\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n if group.master != master:\n raise helpers.Unauthorized(\"you are not the DJ\")\n # Get all the tracks in the master's library that haven't been played.\n played_filter = get_played_filter(group)\n remaining = filter(played_filter, g.store.find(LibEntry,\n (LibEntry.user == master) & (LibEntry.is_valid == True)\n & (LibEntry.is_local == True)))\n if not remaining: # http://stackoverflow.com/questions/53513/python-what-is-the-best-way-to-check-if-a-list-is-empty\n # Instead of removing the read tracks, reload all the tracks\n remaining = g.store.find(LibEntry,\n (LibEntry.user == master) & (LibEntry.is_valid == True)\n & (LibEntry.is_local == True))\n if not remaining:\n raise helpers.NotFound(errors.TRACKS_DEPLETED, 'no tracks to play')\n # Partition tracks based on whether we can embed them in the latent space.\n with_feats = list()\n points = list()\n no_feats = list()\n for entry in remaining:\n point = predict.get_point(entry.track)\n if point is not None:\n with_feats.append(entry)\n points.append(point)\n else:\n no_feats.append(entry)\n\n #@author: Hieu\n # Get users' current preferences\n pref_users = [user.id for user in group.users if user is not None]\n prefs = [usertags.preference for usertags in [g.store.get(UserTags,u) for u in pref_users] if usertags is not None and usertags.preference]\n prefs_features = [predict.get_tag_point(tag) for tag in prefs]\n \n # The effect of current preferences \n # calculate sum of dot products of every point with every tag/pref and group by point\n prefs_ratings_agg = [sum([predict.score_by_tag(ppoint,ppref) for ppref in prefs_features if ppref is not None]) for ppoint in points]\n \n # construct the playlist, decreasing order of preference scores\n playlist_by_pref = [entry for entry, score in sorted(\n zip(with_feats, prefs_ratings_agg), key=itemgetter(1), reverse=True)]\n \n # For the users that can be modelled: predict their ratings.\n models = filter(lambda model: model.is_nontrivial(),\n [predict.Model(user) for user in group.users])\n playlist_model = list()\n \n if len(models) > 0:\n ratings = [model.score(points) for model in models]\n # obsoleted\n # agg = predict.aggregate(ratings)\n \n sorted_item_list_asc = [x for x in range(0,len(points))]\n ranked_ratings = [[entry for entry, score in sorted(zip(sorted_item_list_asc,r), key=itemgetter(1), reverse=True)] for r in ratings]\n \n final_rank = list()\n iter = 0\n stop = False\n while not stop and iter<10000:\n transition_matrix = predict.transition_matrix(ranked_ratings,sorted_item_list_asc)\n stationary = predict.markovchain4(transition_matrix)\n addition = [entry for entry, score in sorted(zip(sorted_item_list_asc, stationary), key=itemgetter(1), reverse=True) if score>0]\n final_rank = final_rank+addition\n sorted_item_list_asc = [x for x in sorted_item_list_asc if x not in addition]\n if not sorted_item_list_asc:\n stop = True\n iter = iter+1\n ranked_ratings = [[x for x in r if x in sorted_item_list_asc] for r in ranked_ratings]\n \n if len(final_rank) < len(points):\n final_rank = final_rank + sorted_item_list_asc\n playlist_model = [with_feats[i] for i in final_rank]\n else:\n # Not a single user can be modelled! just order the songs randomly.\n agg = range(len(with_feats))\n random.shuffle(agg)\n # Construct the playlist, decreasing order of scores.\n playlist_model = [entry for entry, score in sorted(zip(with_feats, agg), key=itemgetter(1), reverse=True)]\n \n #@author: Hieu\n # merge two playlists of preferences and models\n entry_dict = dict()\n weight = [0.75, 0.25]; #weight(preference,models)\n \n for i in range (0,len(playlist_model)):\n entry_dict[playlist_model[i]] = weight[1]*(i+1)\n for i in range (0,len(playlist_by_pref)):\n entry_dict[playlist_by_pref[i]] += weight[0]*(i+1)\n \n playlist = [k[0] for k in sorted(entry_dict.iteritems(), key=itemgetter(1), reverse=False)]\n \n #@end-author: Hieu \n\n # Randomize songs for which we don't have features.\n random.shuffle(no_feats)\n playlist.extend(no_feats)\n # Craft the JSON response.\n tracks = list()\n for entry in playlist[:MAX_TRACKS]:\n tracks.append({\n 'artist': entry.track.artist,\n 'title': entry.track.title,\n 'local_id': entry.local_id,\n })\n return jsonify(playlist_id=get_playlist_id(group), tracks=tracks)\n\n\n@group_views.route('//current', methods=['PUT'])\n@helpers.authenticate(with_user=True)\ndef play_track(user, gid):\n \"\"\"Register the track that is currently playing.\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n if group.master != user:\n raise helpers.Unauthorized(\"you are not the master\")\n try:\n artist = request.form['artist']\n title = request.form['title']\n except KeyError:\n raise helpers.BadRequest(errors.MISSING_FIELD,\n \"missing artist and / or title\")\n track = g.store.find(Track,\n (Track.artist == artist) & (Track.title == title)).one()\n if track is None:\n raise helpers.BadRequest(errors.INVALID_TRACK,\n \"track not found\")\n payload = {\n 'artist': track.artist,\n 'title': track.title,\n 'master': {'uid': user.id, 'nickname': user.nickname},\n }\n payload['stats'] = list()\n # TODO Something better than random scores :)\n for resident in group.users:\n payload['stats'].append({\n 'uid': resident.id,\n 'nickname': resident.nickname,\n 'score': int(random.random() * 100),\n 'predicted': True #if random.random() > 0.2 else False\n })\n event = GroupEvent(group, user, events.PLAY, payload)\n g.store.add(event)\n return helpers.success()\n\n\n@group_views.route('//current', methods=['DELETE'])\n@helpers.authenticate(with_user=True)\ndef skip_track(user, gid):\n \"\"\"Skip the track that is currently being played.\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n if group.master != user:\n raise helpers.Unauthorized(\"you are not the master\")\n results = g.store.find(GroupEvent, (GroupEvent.event_type == events.PLAY)\n & (GroupEvent.group == group))\n play_event = results.order_by(Desc(GroupEvent.created)).first()\n if play_event is None:\n raise helpers.BadRequest(errors.NO_CURRENT_TRACK,\n \"no track to skip\")\n payload = {\n 'artist': play_event.payload.get('artist'),\n 'title': play_event.payload.get('title'),\n 'master': {'uid': user.id, 'nickname': user.nickname},\n }\n event = GroupEvent(group, user, events.SKIP, payload)\n g.store.add(event)\n return helpers.success()\n\n\n@group_views.route('//ratings', methods=['POST'])\n@helpers.authenticate(with_user=True)\ndef add_rating(user, gid):\n \"\"\"Take the DJ spot (if it is available).\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n try:\n artist = request.form['artist']\n title = request.form['title']\n rating = max(1, min(5, int(request.form['rating'])))\n except KeyError:\n raise helpers.BadRequest(errors.MISSING_FIELD,\n \"missing artist, title or rating\")\n except ValueError:\n raise helpers.BadRequest(errors.INVALID_RATING,\n \"rating is invalid\")\n if user.group != group:\n raise helpers.Unauthorized(\"you are not in this group\")\n track = g.store.find(Track,\n (Track.artist == artist) & (Track.title == title)).one()\n if track is None:\n raise helpers.BadRequest(errors.INVALID_TRACK,\n \"track not found\")\n # Add a group event.\n event = GroupEvent(group, user, events.RATING)\n event.payload = {\n 'artist': track.artist,\n 'title': track.title,\n 'rating': rating,\n }\n g.store.add(event)\n # Add a library entry.\n set_rating(user, track.artist, track.title, rating)\n return helpers.success()\n\n\n@group_views.route('//master', methods=['PUT'])\n@helpers.authenticate(with_user=True)\ndef set_master(user, gid):\n \"\"\"Take the DJ spot (if it is available).\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n try:\n uid = int(request.form['uid'])\n except (KeyError, ValueError):\n raise helpers.BadRequest(errors.MISSING_FIELD,\n \"cannot parse uid\")\n if user.id != uid or user.group != group:\n raise helpers.Unauthorized(\"user not self or not in group\")\n if group.master != None and group.master != user:\n raise helpers.Unauthorized(\"someone else is already here\")\n group.master = user\n event = GroupEvent(group, user, events.MASTER, None)\n g.store.add(event)\n return helpers.success()\n\n\n@group_views.route('//master', methods=['DELETE'])\n@helpers.authenticate(with_user=True)\ndef leave_master(user, gid):\n \"\"\"Leave the DJ spot.\"\"\"\n group = g.store.get(Group, gid)\n if group is None:\n raise helpers.BadRequest(errors.INVALID_GROUP,\n \"group does not exist\")\n if group.master != None and group.master != user:\n raise helpers.Unauthorized(\"you are not the master\")\n group.master = None\n return helpers.success()\n\n\n# added by Vincent and Louis\n@group_views.route('/suggestion', methods=['GET'])\n@helpers.authenticate(with_user=True)\ndef send_suggest(user):\n try:\n lat = float(request.args['lat'])\n lon = float(request.args['lon'])\n except (KeyError, ValueError):\n raise helpers.BadRequest(errors.MISSING_FIELD,\n \"cannot parse lat and lon\")\n\n #TODO: only remove and add if necessary\n \n\n # Get user's location to put him in a cluster.\n user_loc = geometry.Point(lat, lon)\n cluster_loc = geometry.map_location_on_grid(user_loc)\n clusterRequest = g.store.execute(\"SELECT * FROM \\\"cluster\\\" WHERE position ~= CAST ('(\"+str(cluster_loc.lat)+\",\"+str(cluster_loc.lon)+\")' AS point)\")\n clusterResult = clusterRequest.get_one()\n clusterRequest.close() # close the cursor in DB\n\n\n if clusterResult is None:\n cluster = Cluster(cluster_loc)\n cluster = g.store.add(cluster)\n cluster.id = AutoReload\n else:\n coordinatesList = re.split('[\\(,\\)]', clusterResult[1])\n #CAUTION: format is ('', 'lat', 'lon', '')\n# cluster = Cluster(geometry.Point(float(coordinatesList[1]), float(coordinatesList[2])))\n# cluster.id = clusterResult[0]\n# cluster.group_id = clusterResult[2]\n\n #now we get the cluster by its ID because otherwise the store doesn't seem to be properly set for this cluster\n cluster = g.store.get(Cluster, clusterResult[0] )\n\n \n user.cluster_id = cluster.id\n# usersInCluster = g.store.find(User, (User.cluster_id == cluster.id))\n usersInCluster = cluster.users\n size = usersInCluster.count()\n if size < MIN_SUGGESTION_SIZE:\n return jsonify(suggestion=False, clusterId=cluster.id)\n else:\n #Create group for cluster if needed:\n if cluster.group_id is None:\n groupName = u''\n clusterGroup = Group(groupName, is_active=True)\n clusterGroup.is_automatic = True\n #We need some values added by the database, like the ID.\n clusterGroup = g.store.add(clusterGroup)\n clusterGroup.coordinates = geometry.Point(cluster_loc.lat, cluster_loc.lon) #this value cannot be null when inserted into the DB\n clusterGroup.id = AutoReload\n clusterGroup.name = u'AutoGroup ' + str(clusterGroup.id) #result type is \"unicode\"\n #tie the group with the cluster\n cluster.group_id = clusterGroup.id\n else:\n clusterGroup = g.store.get(Group, cluster.group_id)\n #Retrieve users already in cluster:\n users = list()\n for user in usersInCluster:\n users.append(user.nickname)\n\n #Create a dictionary representing the group as in list_groups: TODO: modularize\n groupDict = {\n 'gid': clusterGroup.id,\n 'name': clusterGroup.name,\n 'nb_users': clusterGroup.users.count(),\n 'distance': None,\n 'lat': clusterGroup.coordinates.lat,\n 'lon': clusterGroup.coordinates.lon,\n 'automatic': True\n }\n #Create a dictionary representing the cluster:\n clusterDict = {\n 'cid': cluster.id,\n 'lat': cluster.coordinates.lat,\n 'lon': cluster.coordinates.lon,\n 'gid': cluster.group_id\n }\n return jsonify(suggestion=True, cluster=clusterDict, group=groupDict, users=users)\n\n","sub_path":"api/unison/group_views.py","file_name":"group_views.py","file_ext":"py","file_size_in_byte":21594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"148312319","text":"\"import rules\"\n\nload(\"@io_bazel_rules_dotnet//dotnet/private:context.bzl\", \"dotnet_context\")\nload(\"@io_bazel_rules_dotnet//dotnet/private:common.bzl\", \"as_iterable\")\nload(\"@io_bazel_rules_dotnet//dotnet/private:providers.bzl\", \"DotnetLibraryInfo\")\nload(\"@io_bazel_rules_dotnet//dotnet/private:rules/common.bzl\", \"collect_transitive_info\", \"wrap_binary\")\nload(\"@io_bazel_rules_dotnet//dotnet/private:rules/versions.bzl\", \"parse_version\")\n\ndef _import_library_impl(ctx):\n name = ctx.label.name\n\n deps = ctx.attr.deps\n src = ctx.attr.src\n result = src.files.to_list()[0]\n\n transitive = collect_transitive_info(deps)\n\n direct_runfiles = []\n direct_runfiles.append(result)\n\n if ctx.attr.data:\n data_l = [f for t in ctx.attr.data for f in as_iterable(t.files)]\n direct_runfiles += data_l\n\n runfiles = depset(direct = direct_runfiles)\n\n library = DotnetLibraryInfo(\n name = name,\n deps = deps,\n transitive = transitive,\n runfiles = runfiles,\n result = result,\n version = parse_version(ctx.attr.version),\n ref = ctx.attr.ref.files.to_list()[0] if ctx.attr.ref != None else result,\n )\n\n return [\n library,\n DefaultInfo(\n files = depset([library.result]),\n runfiles = ctx.runfiles(files = library.runfiles.to_list(), transitive_files = depset(transitive = [t.runfiles for t in library.transitive])),\n ),\n ]\n\ndef _import_binary_internal_impl(ctx):\n dotnet = dotnet_context(ctx, \"csharp\")\n\n name = dotnet._ctx.label.name\n subdir = name + \"/\"\n\n srcname = ctx.attr.src.files.to_list()[0].basename\n\n deps = ctx.attr.deps\n src = ctx.attr.src\n\n # Binary import needs to be copied to be usable\n result = ctx.actions.declare_file(subdir + srcname)\n ctx.actions.run(\n outputs = [result],\n inputs = src.files.to_list(),\n executable = ctx.attr._copy.files.to_list()[0],\n arguments = [result.path, src.files.to_list()[0].path],\n mnemonic = \"CopySrc\",\n )\n\n transitive = collect_transitive_info(deps)\n\n direct_runfiles = []\n direct_runfiles.append(result)\n\n if ctx.attr.data:\n data_l = [f for t in ctx.attr.data for f in as_iterable(t.files)]\n direct_runfiles += data_l\n\n runfiles = depset(direct = direct_runfiles)\n\n executable = DotnetLibraryInfo(\n name = srcname,\n deps = deps,\n transitive = transitive,\n runfiles = runfiles,\n result = result,\n version = parse_version(ctx.attr.version),\n ref = ctx.attr.ref.files.to_list()[0] if ctx.attr.ref != None else result,\n )\n\n return wrap_binary(executable, dotnet)\n\ncore_import_library = rule(\n _import_library_impl,\n attrs = {\n \"deps\": attr.label_list(providers = [DotnetLibraryInfo], doc = \"The direct dependencies of this dll. These may be compatible with the [DotnetLibraryInfo](api.md#dotnetlibraryinfo) provider.\"),\n \"src\": attr.label(allow_files = [\".dll\", \".exe\"], mandatory = True, doc = \"The file to be transformed into [DotnetLibraryInfo](api.md#dotnetlibraryinfo) provider.\"),\n \"data\": attr.label_list(allow_files = True, doc = \"Additional files to copy with the target assembly. \"),\n \"version\": attr.string(mandatory = True, doc = \"Version of the imported assembly.\"),\n \"ref\": attr.label(allow_files = True, mandatory = False, doc = \"[Reference assembly](https://docs.microsoft.com/en-us/dotnet/standard/assembly/reference-assemblies) for given library.\"),\n },\n provides = [DotnetLibraryInfo],\n executable = False,\n doc = \"This imports an external dll and transforms it into [DotnetLibraryInfo](api.md#dotnetlibraryinfo) so it can be referenced as dependency by other rules.\",\n)\n\ncore_import_binary = rule(\n _import_binary_internal_impl,\n attrs = {\n \"deps\": attr.label_list(providers = [DotnetLibraryInfo], doc = \"The direct dependencies of this dll. These may be rules or compatible rules with the [DotnetLibraryInfo](api.md#dotnetlibraryinfo) provider.\"),\n \"src\": attr.label(allow_files = [\".dll\", \".exe\"], mandatory = True, doc = \"The file to be transformed into [DotnetLibraryInfo](api.md#dotnetlibraryinfo) provider.\"),\n \"data\": attr.label_list(allow_files = True, doc = \"Additional files to copy with the target assembly.\"),\n \"version\": attr.string(mandatory = True, doc = \"Version of the imported assembly.\"),\n \"ref\": attr.label(allow_files = True, mandatory = False, doc = \"[Reference assembly](https://docs.microsoft.com/en-us/dotnet/standard/assembly/reference-assemblies) for given library.\"),\n \"_launcher\": attr.label(default = Label(\"@io_bazel_rules_dotnet//dotnet/tools/launcher_core:launcher_core.exe\")),\n \"_copy\": attr.label(default = Label(\"@io_bazel_rules_dotnet//dotnet/tools/copy\")),\n \"_symlink\": attr.label(default = Label(\"@io_bazel_rules_dotnet//dotnet/tools/symlink\")),\n \"data_with_dirs\": attr.label_keyed_string_dict(allow_files = True, doc = \"Dictionary of {label:folder}. Files specified by